Oct 11 00:47:34 localhost kernel: Linux version 5.14.0-621.el9.x86_64 (mockbuild@x86-05.stream.rdu2.redhat.com) (gcc (GCC) 11.5.0 20240719 (Red Hat 11.5.0-11), GNU ld version 2.35.2-67.el9) #1 SMP PREEMPT_DYNAMIC Tue Sep 30 07:37:35 UTC 2025
Oct 11 00:47:34 localhost kernel: The list of certified hardware and cloud instances for Red Hat Enterprise Linux 9 can be viewed at the Red Hat Ecosystem Catalog, https://catalog.redhat.com.
Oct 11 00:47:34 localhost kernel: Command line: BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-621.el9.x86_64 root=UUID=9839e2e1-98a2-4594-b609-79d514deb0a3 ro console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-2G:192M,2G-64G:256M,64G-:512M
Oct 11 00:47:34 localhost kernel: BIOS-provided physical RAM map:
Oct 11 00:47:34 localhost kernel: BIOS-e820: [mem 0x0000000000000000-0x000000000009fbff] usable
Oct 11 00:47:34 localhost kernel: BIOS-e820: [mem 0x000000000009fc00-0x000000000009ffff] reserved
Oct 11 00:47:34 localhost kernel: BIOS-e820: [mem 0x00000000000f0000-0x00000000000fffff] reserved
Oct 11 00:47:34 localhost kernel: BIOS-e820: [mem 0x0000000000100000-0x00000000bffdafff] usable
Oct 11 00:47:34 localhost kernel: BIOS-e820: [mem 0x00000000bffdb000-0x00000000bfffffff] reserved
Oct 11 00:47:34 localhost kernel: BIOS-e820: [mem 0x00000000feffc000-0x00000000feffffff] reserved
Oct 11 00:47:34 localhost kernel: BIOS-e820: [mem 0x00000000fffc0000-0x00000000ffffffff] reserved
Oct 11 00:47:34 localhost kernel: BIOS-e820: [mem 0x0000000100000000-0x000000023fffffff] usable
Oct 11 00:47:34 localhost kernel: NX (Execute Disable) protection: active
Oct 11 00:47:34 localhost kernel: APIC: Static calls initialized
Oct 11 00:47:34 localhost kernel: SMBIOS 2.8 present.
Oct 11 00:47:34 localhost kernel: DMI: OpenStack Foundation OpenStack Nova, BIOS 1.15.0-1 04/01/2014
Oct 11 00:47:34 localhost kernel: Hypervisor detected: KVM
Oct 11 00:47:34 localhost kernel: kvm-clock: Using msrs 4b564d01 and 4b564d00
Oct 11 00:47:34 localhost kernel: kvm-clock: using sched offset of 3859042740 cycles
Oct 11 00:47:34 localhost kernel: clocksource: kvm-clock: mask: 0xffffffffffffffff max_cycles: 0x1cd42e4dffb, max_idle_ns: 881590591483 ns
Oct 11 00:47:34 localhost kernel: tsc: Detected 2800.000 MHz processor
Oct 11 00:47:34 localhost kernel: e820: update [mem 0x00000000-0x00000fff] usable ==> reserved
Oct 11 00:47:34 localhost kernel: e820: remove [mem 0x000a0000-0x000fffff] usable
Oct 11 00:47:34 localhost kernel: last_pfn = 0x240000 max_arch_pfn = 0x400000000
Oct 11 00:47:34 localhost kernel: MTRR map: 4 entries (3 fixed + 1 variable; max 19), built from 8 variable MTRRs
Oct 11 00:47:34 localhost kernel: x86/PAT: Configuration [0-7]: WB  WC  UC- UC  WB  WP  UC- WT  
Oct 11 00:47:34 localhost kernel: last_pfn = 0xbffdb max_arch_pfn = 0x400000000
Oct 11 00:47:34 localhost kernel: found SMP MP-table at [mem 0x000f5ae0-0x000f5aef]
Oct 11 00:47:34 localhost kernel: Using GB pages for direct mapping
Oct 11 00:47:34 localhost kernel: RAMDISK: [mem 0x2d858000-0x32c23fff]
Oct 11 00:47:34 localhost kernel: ACPI: Early table checksum verification disabled
Oct 11 00:47:34 localhost kernel: ACPI: RSDP 0x00000000000F5AA0 000014 (v00 BOCHS )
Oct 11 00:47:34 localhost kernel: ACPI: RSDT 0x00000000BFFE16BD 000030 (v01 BOCHS  BXPC     00000001 BXPC 00000001)
Oct 11 00:47:34 localhost kernel: ACPI: FACP 0x00000000BFFE1571 000074 (v01 BOCHS  BXPC     00000001 BXPC 00000001)
Oct 11 00:47:34 localhost kernel: ACPI: DSDT 0x00000000BFFDFC80 0018F1 (v01 BOCHS  BXPC     00000001 BXPC 00000001)
Oct 11 00:47:34 localhost kernel: ACPI: FACS 0x00000000BFFDFC40 000040
Oct 11 00:47:34 localhost kernel: ACPI: APIC 0x00000000BFFE15E5 0000B0 (v01 BOCHS  BXPC     00000001 BXPC 00000001)
Oct 11 00:47:34 localhost kernel: ACPI: WAET 0x00000000BFFE1695 000028 (v01 BOCHS  BXPC     00000001 BXPC 00000001)
Oct 11 00:47:34 localhost kernel: ACPI: Reserving FACP table memory at [mem 0xbffe1571-0xbffe15e4]
Oct 11 00:47:34 localhost kernel: ACPI: Reserving DSDT table memory at [mem 0xbffdfc80-0xbffe1570]
Oct 11 00:47:34 localhost kernel: ACPI: Reserving FACS table memory at [mem 0xbffdfc40-0xbffdfc7f]
Oct 11 00:47:34 localhost kernel: ACPI: Reserving APIC table memory at [mem 0xbffe15e5-0xbffe1694]
Oct 11 00:47:34 localhost kernel: ACPI: Reserving WAET table memory at [mem 0xbffe1695-0xbffe16bc]
Oct 11 00:47:34 localhost kernel: No NUMA configuration found
Oct 11 00:47:34 localhost kernel: Faking a node at [mem 0x0000000000000000-0x000000023fffffff]
Oct 11 00:47:34 localhost kernel: NODE_DATA(0) allocated [mem 0x23ffd3000-0x23fffdfff]
Oct 11 00:47:34 localhost kernel: crashkernel reserved: 0x00000000af000000 - 0x00000000bf000000 (256 MB)
Oct 11 00:47:34 localhost kernel: Zone ranges:
Oct 11 00:47:34 localhost kernel:   DMA      [mem 0x0000000000001000-0x0000000000ffffff]
Oct 11 00:47:34 localhost kernel:   DMA32    [mem 0x0000000001000000-0x00000000ffffffff]
Oct 11 00:47:34 localhost kernel:   Normal   [mem 0x0000000100000000-0x000000023fffffff]
Oct 11 00:47:34 localhost kernel:   Device   empty
Oct 11 00:47:34 localhost kernel: Movable zone start for each node
Oct 11 00:47:34 localhost kernel: Early memory node ranges
Oct 11 00:47:34 localhost kernel:   node   0: [mem 0x0000000000001000-0x000000000009efff]
Oct 11 00:47:34 localhost kernel:   node   0: [mem 0x0000000000100000-0x00000000bffdafff]
Oct 11 00:47:34 localhost kernel:   node   0: [mem 0x0000000100000000-0x000000023fffffff]
Oct 11 00:47:34 localhost kernel: Initmem setup node 0 [mem 0x0000000000001000-0x000000023fffffff]
Oct 11 00:47:34 localhost kernel: On node 0, zone DMA: 1 pages in unavailable ranges
Oct 11 00:47:34 localhost kernel: On node 0, zone DMA: 97 pages in unavailable ranges
Oct 11 00:47:34 localhost kernel: On node 0, zone Normal: 37 pages in unavailable ranges
Oct 11 00:47:34 localhost kernel: ACPI: PM-Timer IO Port: 0x608
Oct 11 00:47:34 localhost kernel: ACPI: LAPIC_NMI (acpi_id[0xff] dfl dfl lint[0x1])
Oct 11 00:47:34 localhost kernel: IOAPIC[0]: apic_id 0, version 17, address 0xfec00000, GSI 0-23
Oct 11 00:47:34 localhost kernel: ACPI: INT_SRC_OVR (bus 0 bus_irq 0 global_irq 2 dfl dfl)
Oct 11 00:47:34 localhost kernel: ACPI: INT_SRC_OVR (bus 0 bus_irq 5 global_irq 5 high level)
Oct 11 00:47:34 localhost kernel: ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 high level)
Oct 11 00:47:34 localhost kernel: ACPI: INT_SRC_OVR (bus 0 bus_irq 10 global_irq 10 high level)
Oct 11 00:47:34 localhost kernel: ACPI: INT_SRC_OVR (bus 0 bus_irq 11 global_irq 11 high level)
Oct 11 00:47:34 localhost kernel: ACPI: Using ACPI (MADT) for SMP configuration information
Oct 11 00:47:34 localhost kernel: TSC deadline timer available
Oct 11 00:47:34 localhost kernel: CPU topo: Max. logical packages:   8
Oct 11 00:47:34 localhost kernel: CPU topo: Max. logical dies:       8
Oct 11 00:47:34 localhost kernel: CPU topo: Max. dies per package:   1
Oct 11 00:47:34 localhost kernel: CPU topo: Max. threads per core:   1
Oct 11 00:47:34 localhost kernel: CPU topo: Num. cores per package:     1
Oct 11 00:47:34 localhost kernel: CPU topo: Num. threads per package:   1
Oct 11 00:47:34 localhost kernel: CPU topo: Allowing 8 present CPUs plus 0 hotplug CPUs
Oct 11 00:47:34 localhost kernel: kvm-guest: APIC: eoi() replaced with kvm_guest_apic_eoi_write()
Oct 11 00:47:34 localhost kernel: PM: hibernation: Registered nosave memory: [mem 0x00000000-0x00000fff]
Oct 11 00:47:34 localhost kernel: PM: hibernation: Registered nosave memory: [mem 0x0009f000-0x0009ffff]
Oct 11 00:47:34 localhost kernel: PM: hibernation: Registered nosave memory: [mem 0x000a0000-0x000effff]
Oct 11 00:47:34 localhost kernel: PM: hibernation: Registered nosave memory: [mem 0x000f0000-0x000fffff]
Oct 11 00:47:34 localhost kernel: PM: hibernation: Registered nosave memory: [mem 0xbffdb000-0xbfffffff]
Oct 11 00:47:34 localhost kernel: PM: hibernation: Registered nosave memory: [mem 0xc0000000-0xfeffbfff]
Oct 11 00:47:34 localhost kernel: PM: hibernation: Registered nosave memory: [mem 0xfeffc000-0xfeffffff]
Oct 11 00:47:34 localhost kernel: PM: hibernation: Registered nosave memory: [mem 0xff000000-0xfffbffff]
Oct 11 00:47:34 localhost kernel: PM: hibernation: Registered nosave memory: [mem 0xfffc0000-0xffffffff]
Oct 11 00:47:34 localhost kernel: [mem 0xc0000000-0xfeffbfff] available for PCI devices
Oct 11 00:47:34 localhost kernel: Booting paravirtualized kernel on KVM
Oct 11 00:47:34 localhost kernel: clocksource: refined-jiffies: mask: 0xffffffff max_cycles: 0xffffffff, max_idle_ns: 1910969940391419 ns
Oct 11 00:47:34 localhost kernel: setup_percpu: NR_CPUS:8192 nr_cpumask_bits:8 nr_cpu_ids:8 nr_node_ids:1
Oct 11 00:47:34 localhost kernel: percpu: Embedded 64 pages/cpu s225280 r8192 d28672 u262144
Oct 11 00:47:34 localhost kernel: pcpu-alloc: s225280 r8192 d28672 u262144 alloc=1*2097152
Oct 11 00:47:34 localhost kernel: pcpu-alloc: [0] 0 1 2 3 4 5 6 7 
Oct 11 00:47:34 localhost kernel: kvm-guest: PV spinlocks disabled, no host support
Oct 11 00:47:34 localhost kernel: Kernel command line: BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-621.el9.x86_64 root=UUID=9839e2e1-98a2-4594-b609-79d514deb0a3 ro console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-2G:192M,2G-64G:256M,64G-:512M
Oct 11 00:47:34 localhost kernel: Unknown kernel command line parameters "BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-621.el9.x86_64", will be passed to user space.
Oct 11 00:47:34 localhost kernel: random: crng init done
Oct 11 00:47:34 localhost kernel: Dentry cache hash table entries: 1048576 (order: 11, 8388608 bytes, linear)
Oct 11 00:47:34 localhost kernel: Inode-cache hash table entries: 524288 (order: 10, 4194304 bytes, linear)
Oct 11 00:47:34 localhost kernel: Fallback order for Node 0: 0 
Oct 11 00:47:34 localhost kernel: Built 1 zonelists, mobility grouping on.  Total pages: 2064091
Oct 11 00:47:34 localhost kernel: Policy zone: Normal
Oct 11 00:47:34 localhost kernel: mem auto-init: stack:off, heap alloc:off, heap free:off
Oct 11 00:47:34 localhost kernel: software IO TLB: area num 8.
Oct 11 00:47:34 localhost kernel: SLUB: HWalign=64, Order=0-3, MinObjects=0, CPUs=8, Nodes=1
Oct 11 00:47:34 localhost kernel: ftrace: allocating 49162 entries in 193 pages
Oct 11 00:47:34 localhost kernel: ftrace: allocated 193 pages with 3 groups
Oct 11 00:47:34 localhost kernel: Dynamic Preempt: voluntary
Oct 11 00:47:34 localhost kernel: rcu: Preemptible hierarchical RCU implementation.
Oct 11 00:47:34 localhost kernel: rcu:         RCU event tracing is enabled.
Oct 11 00:47:34 localhost kernel: rcu:         RCU restricting CPUs from NR_CPUS=8192 to nr_cpu_ids=8.
Oct 11 00:47:34 localhost kernel:         Trampoline variant of Tasks RCU enabled.
Oct 11 00:47:34 localhost kernel:         Rude variant of Tasks RCU enabled.
Oct 11 00:47:34 localhost kernel:         Tracing variant of Tasks RCU enabled.
Oct 11 00:47:34 localhost kernel: rcu: RCU calculated value of scheduler-enlistment delay is 100 jiffies.
Oct 11 00:47:34 localhost kernel: rcu: Adjusting geometry for rcu_fanout_leaf=16, nr_cpu_ids=8
Oct 11 00:47:34 localhost kernel: RCU Tasks: Setting shift to 3 and lim to 1 rcu_task_cb_adjust=1 rcu_task_cpu_ids=8.
Oct 11 00:47:34 localhost kernel: RCU Tasks Rude: Setting shift to 3 and lim to 1 rcu_task_cb_adjust=1 rcu_task_cpu_ids=8.
Oct 11 00:47:34 localhost kernel: RCU Tasks Trace: Setting shift to 3 and lim to 1 rcu_task_cb_adjust=1 rcu_task_cpu_ids=8.
Oct 11 00:47:34 localhost kernel: NR_IRQS: 524544, nr_irqs: 488, preallocated irqs: 16
Oct 11 00:47:34 localhost kernel: rcu: srcu_init: Setting srcu_struct sizes based on contention.
Oct 11 00:47:34 localhost kernel: kfence: initialized - using 2097152 bytes for 255 objects at 0x(____ptrval____)-0x(____ptrval____)
Oct 11 00:47:34 localhost kernel: Console: colour VGA+ 80x25
Oct 11 00:47:34 localhost kernel: printk: console [ttyS0] enabled
Oct 11 00:47:34 localhost kernel: ACPI: Core revision 20230331
Oct 11 00:47:34 localhost kernel: APIC: Switch to symmetric I/O mode setup
Oct 11 00:47:34 localhost kernel: x2apic enabled
Oct 11 00:47:34 localhost kernel: APIC: Switched APIC routing to: physical x2apic
Oct 11 00:47:34 localhost kernel: tsc: Marking TSC unstable due to TSCs unsynchronized
Oct 11 00:47:34 localhost kernel: Calibrating delay loop (skipped) preset value.. 5600.00 BogoMIPS (lpj=2800000)
Oct 11 00:47:34 localhost kernel: x86/cpu: User Mode Instruction Prevention (UMIP) activated
Oct 11 00:47:34 localhost kernel: Last level iTLB entries: 4KB 512, 2MB 255, 4MB 127
Oct 11 00:47:34 localhost kernel: Last level dTLB entries: 4KB 512, 2MB 255, 4MB 127, 1GB 0
Oct 11 00:47:34 localhost kernel: Spectre V1 : Mitigation: usercopy/swapgs barriers and __user pointer sanitization
Oct 11 00:47:34 localhost kernel: Spectre V2 : Mitigation: Retpolines
Oct 11 00:47:34 localhost kernel: Spectre V2 : Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT
Oct 11 00:47:34 localhost kernel: Spectre V2 : Enabling Speculation Barrier for firmware calls
Oct 11 00:47:34 localhost kernel: RETBleed: Mitigation: untrained return thunk
Oct 11 00:47:34 localhost kernel: Spectre V2 : mitigation: Enabling conditional Indirect Branch Prediction Barrier
Oct 11 00:47:34 localhost kernel: Speculative Store Bypass: Mitigation: Speculative Store Bypass disabled via prctl
Oct 11 00:47:34 localhost kernel: Speculative Return Stack Overflow: IBPB-extending microcode not applied!
Oct 11 00:47:34 localhost kernel: Speculative Return Stack Overflow: WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options.
Oct 11 00:47:34 localhost kernel: x86/bugs: return thunk changed
Oct 11 00:47:34 localhost kernel: Speculative Return Stack Overflow: Vulnerable: Safe RET, no microcode
Oct 11 00:47:34 localhost kernel: x86/fpu: Supporting XSAVE feature 0x001: 'x87 floating point registers'
Oct 11 00:47:34 localhost kernel: x86/fpu: Supporting XSAVE feature 0x002: 'SSE registers'
Oct 11 00:47:34 localhost kernel: x86/fpu: Supporting XSAVE feature 0x004: 'AVX registers'
Oct 11 00:47:34 localhost kernel: x86/fpu: xstate_offset[2]:  576, xstate_sizes[2]:  256
Oct 11 00:47:34 localhost kernel: x86/fpu: Enabled xstate features 0x7, context size is 832 bytes, using 'compacted' format.
Oct 11 00:47:34 localhost kernel: Freeing SMP alternatives memory: 40K
Oct 11 00:47:34 localhost kernel: pid_max: default: 32768 minimum: 301
Oct 11 00:47:34 localhost kernel: LSM: initializing lsm=lockdown,capability,landlock,yama,integrity,selinux,bpf
Oct 11 00:47:34 localhost kernel: landlock: Up and running.
Oct 11 00:47:34 localhost kernel: Yama: becoming mindful.
Oct 11 00:47:34 localhost kernel: SELinux:  Initializing.
Oct 11 00:47:34 localhost kernel: LSM support for eBPF active
Oct 11 00:47:34 localhost kernel: Mount-cache hash table entries: 16384 (order: 5, 131072 bytes, linear)
Oct 11 00:47:34 localhost kernel: Mountpoint-cache hash table entries: 16384 (order: 5, 131072 bytes, linear)
Oct 11 00:47:34 localhost kernel: smpboot: CPU0: AMD EPYC-Rome Processor (family: 0x17, model: 0x31, stepping: 0x0)
Oct 11 00:47:34 localhost kernel: Performance Events: Fam17h+ core perfctr, AMD PMU driver.
Oct 11 00:47:34 localhost kernel: ... version:                0
Oct 11 00:47:34 localhost kernel: ... bit width:              48
Oct 11 00:47:34 localhost kernel: ... generic registers:      6
Oct 11 00:47:34 localhost kernel: ... value mask:             0000ffffffffffff
Oct 11 00:47:34 localhost kernel: ... max period:             00007fffffffffff
Oct 11 00:47:34 localhost kernel: ... fixed-purpose events:   0
Oct 11 00:47:34 localhost kernel: ... event mask:             000000000000003f
Oct 11 00:47:34 localhost kernel: signal: max sigframe size: 1776
Oct 11 00:47:34 localhost kernel: rcu: Hierarchical SRCU implementation.
Oct 11 00:47:34 localhost kernel: rcu:         Max phase no-delay instances is 400.
Oct 11 00:47:34 localhost kernel: smp: Bringing up secondary CPUs ...
Oct 11 00:47:34 localhost kernel: smpboot: x86: Booting SMP configuration:
Oct 11 00:47:34 localhost kernel: .... node  #0, CPUs:      #1 #2 #3 #4 #5 #6 #7
Oct 11 00:47:34 localhost kernel: smp: Brought up 1 node, 8 CPUs
Oct 11 00:47:34 localhost kernel: smpboot: Total of 8 processors activated (44800.00 BogoMIPS)
Oct 11 00:47:34 localhost kernel: node 0 deferred pages initialised in 9ms
Oct 11 00:47:34 localhost kernel: Memory: 7765928K/8388068K available (16384K kernel code, 5784K rwdata, 13864K rodata, 4188K init, 7196K bss, 616216K reserved, 0K cma-reserved)
Oct 11 00:47:34 localhost kernel: devtmpfs: initialized
Oct 11 00:47:34 localhost kernel: x86/mm: Memory block size: 128MB
Oct 11 00:47:34 localhost kernel: clocksource: jiffies: mask: 0xffffffff max_cycles: 0xffffffff, max_idle_ns: 1911260446275000 ns
Oct 11 00:47:34 localhost kernel: futex hash table entries: 2048 (order: 5, 131072 bytes, linear)
Oct 11 00:47:34 localhost kernel: pinctrl core: initialized pinctrl subsystem
Oct 11 00:47:34 localhost kernel: NET: Registered PF_NETLINK/PF_ROUTE protocol family
Oct 11 00:47:34 localhost kernel: DMA: preallocated 1024 KiB GFP_KERNEL pool for atomic allocations
Oct 11 00:47:34 localhost kernel: DMA: preallocated 1024 KiB GFP_KERNEL|GFP_DMA pool for atomic allocations
Oct 11 00:47:34 localhost kernel: DMA: preallocated 1024 KiB GFP_KERNEL|GFP_DMA32 pool for atomic allocations
Oct 11 00:47:34 localhost kernel: audit: initializing netlink subsys (disabled)
Oct 11 00:47:34 localhost kernel: audit: type=2000 audit(1760143652.620:1): state=initialized audit_enabled=0 res=1
Oct 11 00:47:34 localhost kernel: thermal_sys: Registered thermal governor 'fair_share'
Oct 11 00:47:34 localhost kernel: thermal_sys: Registered thermal governor 'step_wise'
Oct 11 00:47:34 localhost kernel: thermal_sys: Registered thermal governor 'user_space'
Oct 11 00:47:34 localhost kernel: cpuidle: using governor menu
Oct 11 00:47:34 localhost kernel: acpiphp: ACPI Hot Plug PCI Controller Driver version: 0.5
Oct 11 00:47:34 localhost kernel: PCI: Using configuration type 1 for base access
Oct 11 00:47:34 localhost kernel: PCI: Using configuration type 1 for extended access
Oct 11 00:47:34 localhost kernel: kprobes: kprobe jump-optimization is enabled. All kprobes are optimized if possible.
Oct 11 00:47:34 localhost kernel: HugeTLB: registered 1.00 GiB page size, pre-allocated 0 pages
Oct 11 00:47:34 localhost kernel: HugeTLB: 16380 KiB vmemmap can be freed for a 1.00 GiB page
Oct 11 00:47:34 localhost kernel: HugeTLB: registered 2.00 MiB page size, pre-allocated 0 pages
Oct 11 00:47:34 localhost kernel: HugeTLB: 28 KiB vmemmap can be freed for a 2.00 MiB page
Oct 11 00:47:34 localhost kernel: Demotion targets for Node 0: null
Oct 11 00:47:34 localhost kernel: cryptd: max_cpu_qlen set to 1000
Oct 11 00:47:34 localhost kernel: ACPI: Added _OSI(Module Device)
Oct 11 00:47:34 localhost kernel: ACPI: Added _OSI(Processor Device)
Oct 11 00:47:34 localhost kernel: ACPI: Added _OSI(3.0 _SCP Extensions)
Oct 11 00:47:34 localhost kernel: ACPI: Added _OSI(Processor Aggregator Device)
Oct 11 00:47:34 localhost kernel: ACPI: 1 ACPI AML tables successfully acquired and loaded
Oct 11 00:47:34 localhost kernel: ACPI: _OSC evaluation for CPUs failed, trying _PDC
Oct 11 00:47:34 localhost kernel: ACPI: Interpreter enabled
Oct 11 00:47:34 localhost kernel: ACPI: PM: (supports S0 S3 S4 S5)
Oct 11 00:47:34 localhost kernel: ACPI: Using IOAPIC for interrupt routing
Oct 11 00:47:34 localhost kernel: PCI: Using host bridge windows from ACPI; if necessary, use "pci=nocrs" and report a bug
Oct 11 00:47:34 localhost kernel: PCI: Using E820 reservations for host bridge windows
Oct 11 00:47:34 localhost kernel: ACPI: Enabled 2 GPEs in block 00 to 0F
Oct 11 00:47:34 localhost kernel: ACPI: PCI Root Bridge [PCI0] (domain 0000 [bus 00-ff])
Oct 11 00:47:34 localhost kernel: acpi PNP0A03:00: _OSC: OS supports [ExtendedConfig ASPM ClockPM Segments MSI EDR HPX-Type3]
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [3] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [4] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [5] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [6] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [7] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [8] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [9] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [10] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [11] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [12] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [13] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [14] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [15] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [16] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [17] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [18] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [19] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [20] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [21] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [22] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [23] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [24] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [25] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [26] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [27] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [28] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [29] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [30] registered
Oct 11 00:47:34 localhost kernel: acpiphp: Slot [31] registered
Oct 11 00:47:34 localhost kernel: PCI host bridge to bus 0000:00
Oct 11 00:47:34 localhost kernel: pci_bus 0000:00: root bus resource [io  0x0000-0x0cf7 window]
Oct 11 00:47:34 localhost kernel: pci_bus 0000:00: root bus resource [io  0x0d00-0xffff window]
Oct 11 00:47:34 localhost kernel: pci_bus 0000:00: root bus resource [mem 0x000a0000-0x000bffff window]
Oct 11 00:47:34 localhost kernel: pci_bus 0000:00: root bus resource [mem 0xc0000000-0xfebfffff window]
Oct 11 00:47:34 localhost kernel: pci_bus 0000:00: root bus resource [mem 0x240000000-0x2bfffffff window]
Oct 11 00:47:34 localhost kernel: pci_bus 0000:00: root bus resource [bus 00-ff]
Oct 11 00:47:34 localhost kernel: pci 0000:00:00.0: [8086:1237] type 00 class 0x060000 conventional PCI endpoint
Oct 11 00:47:34 localhost kernel: pci 0000:00:01.0: [8086:7000] type 00 class 0x060100 conventional PCI endpoint
Oct 11 00:47:34 localhost kernel: pci 0000:00:01.1: [8086:7010] type 00 class 0x010180 conventional PCI endpoint
Oct 11 00:47:34 localhost kernel: pci 0000:00:01.1: BAR 4 [io  0xc140-0xc14f]
Oct 11 00:47:34 localhost kernel: pci 0000:00:01.1: BAR 0 [io  0x01f0-0x01f7]: legacy IDE quirk
Oct 11 00:47:34 localhost kernel: pci 0000:00:01.1: BAR 1 [io  0x03f6]: legacy IDE quirk
Oct 11 00:47:34 localhost kernel: pci 0000:00:01.1: BAR 2 [io  0x0170-0x0177]: legacy IDE quirk
Oct 11 00:47:34 localhost kernel: pci 0000:00:01.1: BAR 3 [io  0x0376]: legacy IDE quirk
Oct 11 00:47:34 localhost kernel: pci 0000:00:01.2: [8086:7020] type 00 class 0x0c0300 conventional PCI endpoint
Oct 11 00:47:34 localhost kernel: pci 0000:00:01.2: BAR 4 [io  0xc100-0xc11f]
Oct 11 00:47:34 localhost kernel: pci 0000:00:01.3: [8086:7113] type 00 class 0x068000 conventional PCI endpoint
Oct 11 00:47:34 localhost kernel: pci 0000:00:01.3: quirk: [io  0x0600-0x063f] claimed by PIIX4 ACPI
Oct 11 00:47:34 localhost kernel: pci 0000:00:01.3: quirk: [io  0x0700-0x070f] claimed by PIIX4 SMB
Oct 11 00:47:34 localhost kernel: pci 0000:00:02.0: [1af4:1050] type 00 class 0x030000 conventional PCI endpoint
Oct 11 00:47:34 localhost kernel: pci 0000:00:02.0: BAR 0 [mem 0xfe000000-0xfe7fffff pref]
Oct 11 00:47:34 localhost kernel: pci 0000:00:02.0: BAR 2 [mem 0xfe800000-0xfe803fff 64bit pref]
Oct 11 00:47:34 localhost kernel: pci 0000:00:02.0: BAR 4 [mem 0xfeb90000-0xfeb90fff]
Oct 11 00:47:34 localhost kernel: pci 0000:00:02.0: ROM [mem 0xfeb80000-0xfeb8ffff pref]
Oct 11 00:47:34 localhost kernel: pci 0000:00:02.0: Video device with shadowed ROM at [mem 0x000c0000-0x000dffff]
Oct 11 00:47:34 localhost kernel: pci 0000:00:03.0: [1af4:1000] type 00 class 0x020000 conventional PCI endpoint
Oct 11 00:47:34 localhost kernel: pci 0000:00:03.0: BAR 0 [io  0xc080-0xc0bf]
Oct 11 00:47:34 localhost kernel: pci 0000:00:03.0: BAR 1 [mem 0xfeb91000-0xfeb91fff]
Oct 11 00:47:34 localhost kernel: pci 0000:00:03.0: BAR 4 [mem 0xfe804000-0xfe807fff 64bit pref]
Oct 11 00:47:34 localhost kernel: pci 0000:00:03.0: ROM [mem 0xfeb00000-0xfeb7ffff pref]
Oct 11 00:47:34 localhost kernel: pci 0000:00:04.0: [1af4:1001] type 00 class 0x010000 conventional PCI endpoint
Oct 11 00:47:34 localhost kernel: pci 0000:00:04.0: BAR 0 [io  0xc000-0xc07f]
Oct 11 00:47:34 localhost kernel: pci 0000:00:04.0: BAR 1 [mem 0xfeb92000-0xfeb92fff]
Oct 11 00:47:34 localhost kernel: pci 0000:00:04.0: BAR 4 [mem 0xfe808000-0xfe80bfff 64bit pref]
Oct 11 00:47:34 localhost kernel: pci 0000:00:05.0: [1af4:1002] type 00 class 0x00ff00 conventional PCI endpoint
Oct 11 00:47:34 localhost kernel: pci 0000:00:05.0: BAR 0 [io  0xc0c0-0xc0ff]
Oct 11 00:47:34 localhost kernel: pci 0000:00:05.0: BAR 4 [mem 0xfe80c000-0xfe80ffff 64bit pref]
Oct 11 00:47:34 localhost kernel: pci 0000:00:06.0: [1af4:1005] type 00 class 0x00ff00 conventional PCI endpoint
Oct 11 00:47:34 localhost kernel: pci 0000:00:06.0: BAR 0 [io  0xc120-0xc13f]
Oct 11 00:47:34 localhost kernel: pci 0000:00:06.0: BAR 4 [mem 0xfe810000-0xfe813fff 64bit pref]
Oct 11 00:47:34 localhost kernel: ACPI: PCI: Interrupt link LNKA configured for IRQ 10
Oct 11 00:47:34 localhost kernel: ACPI: PCI: Interrupt link LNKB configured for IRQ 10
Oct 11 00:47:34 localhost kernel: ACPI: PCI: Interrupt link LNKC configured for IRQ 11
Oct 11 00:47:34 localhost kernel: ACPI: PCI: Interrupt link LNKD configured for IRQ 11
Oct 11 00:47:34 localhost kernel: ACPI: PCI: Interrupt link LNKS configured for IRQ 9
Oct 11 00:47:34 localhost kernel: iommu: Default domain type: Translated
Oct 11 00:47:34 localhost kernel: iommu: DMA domain TLB invalidation policy: lazy mode
Oct 11 00:47:34 localhost kernel: SCSI subsystem initialized
Oct 11 00:47:34 localhost kernel: ACPI: bus type USB registered
Oct 11 00:47:34 localhost kernel: usbcore: registered new interface driver usbfs
Oct 11 00:47:34 localhost kernel: usbcore: registered new interface driver hub
Oct 11 00:47:34 localhost kernel: usbcore: registered new device driver usb
Oct 11 00:47:34 localhost kernel: pps_core: LinuxPPS API ver. 1 registered
Oct 11 00:47:34 localhost kernel: pps_core: Software ver. 5.3.6 - Copyright 2005-2007 Rodolfo Giometti <giometti@linux.it>
Oct 11 00:47:34 localhost kernel: PTP clock support registered
Oct 11 00:47:34 localhost kernel: EDAC MC: Ver: 3.0.0
Oct 11 00:47:34 localhost kernel: NetLabel: Initializing
Oct 11 00:47:34 localhost kernel: NetLabel:  domain hash size = 128
Oct 11 00:47:34 localhost kernel: NetLabel:  protocols = UNLABELED CIPSOv4 CALIPSO
Oct 11 00:47:34 localhost kernel: NetLabel:  unlabeled traffic allowed by default
Oct 11 00:47:34 localhost kernel: PCI: Using ACPI for IRQ routing
Oct 11 00:47:34 localhost kernel: PCI: pci_cache_line_size set to 64 bytes
Oct 11 00:47:34 localhost kernel: e820: reserve RAM buffer [mem 0x0009fc00-0x0009ffff]
Oct 11 00:47:34 localhost kernel: e820: reserve RAM buffer [mem 0xbffdb000-0xbfffffff]
Oct 11 00:47:34 localhost kernel: pci 0000:00:02.0: vgaarb: setting as boot VGA device
Oct 11 00:47:34 localhost kernel: pci 0000:00:02.0: vgaarb: bridge control possible
Oct 11 00:47:34 localhost kernel: pci 0000:00:02.0: vgaarb: VGA device added: decodes=io+mem,owns=io+mem,locks=none
Oct 11 00:47:34 localhost kernel: vgaarb: loaded
Oct 11 00:47:34 localhost kernel: clocksource: Switched to clocksource kvm-clock
Oct 11 00:47:34 localhost kernel: VFS: Disk quotas dquot_6.6.0
Oct 11 00:47:34 localhost kernel: VFS: Dquot-cache hash table entries: 512 (order 0, 4096 bytes)
Oct 11 00:47:34 localhost kernel: pnp: PnP ACPI init
Oct 11 00:47:34 localhost kernel: pnp 00:03: [dma 2]
Oct 11 00:47:34 localhost kernel: pnp: PnP ACPI: found 5 devices
Oct 11 00:47:34 localhost kernel: clocksource: acpi_pm: mask: 0xffffff max_cycles: 0xffffff, max_idle_ns: 2085701024 ns
Oct 11 00:47:34 localhost kernel: NET: Registered PF_INET protocol family
Oct 11 00:47:34 localhost kernel: IP idents hash table entries: 131072 (order: 8, 1048576 bytes, linear)
Oct 11 00:47:34 localhost kernel: tcp_listen_portaddr_hash hash table entries: 4096 (order: 4, 65536 bytes, linear)
Oct 11 00:47:34 localhost kernel: Table-perturb hash table entries: 65536 (order: 6, 262144 bytes, linear)
Oct 11 00:47:34 localhost kernel: TCP established hash table entries: 65536 (order: 7, 524288 bytes, linear)
Oct 11 00:47:34 localhost kernel: TCP bind hash table entries: 65536 (order: 8, 1048576 bytes, linear)
Oct 11 00:47:34 localhost kernel: TCP: Hash tables configured (established 65536 bind 65536)
Oct 11 00:47:34 localhost kernel: MPTCP token hash table entries: 8192 (order: 5, 196608 bytes, linear)
Oct 11 00:47:34 localhost kernel: UDP hash table entries: 4096 (order: 5, 131072 bytes, linear)
Oct 11 00:47:34 localhost kernel: UDP-Lite hash table entries: 4096 (order: 5, 131072 bytes, linear)
Oct 11 00:47:34 localhost kernel: NET: Registered PF_UNIX/PF_LOCAL protocol family
Oct 11 00:47:34 localhost kernel: NET: Registered PF_XDP protocol family
Oct 11 00:47:34 localhost kernel: pci_bus 0000:00: resource 4 [io  0x0000-0x0cf7 window]
Oct 11 00:47:34 localhost kernel: pci_bus 0000:00: resource 5 [io  0x0d00-0xffff window]
Oct 11 00:47:34 localhost kernel: pci_bus 0000:00: resource 6 [mem 0x000a0000-0x000bffff window]
Oct 11 00:47:34 localhost kernel: pci_bus 0000:00: resource 7 [mem 0xc0000000-0xfebfffff window]
Oct 11 00:47:34 localhost kernel: pci_bus 0000:00: resource 8 [mem 0x240000000-0x2bfffffff window]
Oct 11 00:47:34 localhost kernel: pci 0000:00:01.0: PIIX3: Enabling Passive Release
Oct 11 00:47:34 localhost kernel: pci 0000:00:00.0: Limiting direct PCI/PCI transfers
Oct 11 00:47:34 localhost kernel: ACPI: \_SB_.LNKD: Enabled at IRQ 11
Oct 11 00:47:34 localhost kernel: pci 0000:00:01.2: quirk_usb_early_handoff+0x0/0x160 took 110976 usecs
Oct 11 00:47:34 localhost kernel: PCI: CLS 0 bytes, default 64
Oct 11 00:47:34 localhost kernel: PCI-DMA: Using software bounce buffering for IO (SWIOTLB)
Oct 11 00:47:34 localhost kernel: software IO TLB: mapped [mem 0x00000000ab000000-0x00000000af000000] (64MB)
Oct 11 00:47:34 localhost kernel: ACPI: bus type thunderbolt registered
Oct 11 00:47:34 localhost kernel: Trying to unpack rootfs image as initramfs...
Oct 11 00:47:34 localhost kernel: Initialise system trusted keyrings
Oct 11 00:47:34 localhost kernel: Key type blacklist registered
Oct 11 00:47:34 localhost kernel: workingset: timestamp_bits=36 max_order=21 bucket_order=0
Oct 11 00:47:34 localhost kernel: zbud: loaded
Oct 11 00:47:34 localhost kernel: integrity: Platform Keyring initialized
Oct 11 00:47:34 localhost kernel: integrity: Machine keyring initialized
Oct 11 00:47:34 localhost kernel: Freeing initrd memory: 85808K
Oct 11 00:47:34 localhost kernel: NET: Registered PF_ALG protocol family
Oct 11 00:47:34 localhost kernel: xor: automatically using best checksumming function   avx       
Oct 11 00:47:34 localhost kernel: Key type asymmetric registered
Oct 11 00:47:34 localhost kernel: Asymmetric key parser 'x509' registered
Oct 11 00:47:34 localhost kernel: Block layer SCSI generic (bsg) driver version 0.4 loaded (major 246)
Oct 11 00:47:34 localhost kernel: io scheduler mq-deadline registered
Oct 11 00:47:34 localhost kernel: io scheduler kyber registered
Oct 11 00:47:34 localhost kernel: io scheduler bfq registered
Oct 11 00:47:34 localhost kernel: atomic64_test: passed for x86-64 platform with CX8 and with SSE
Oct 11 00:47:34 localhost kernel: shpchp: Standard Hot Plug PCI Controller Driver version: 0.4
Oct 11 00:47:34 localhost kernel: input: Power Button as /devices/LNXSYSTM:00/LNXPWRBN:00/input/input0
Oct 11 00:47:34 localhost kernel: ACPI: button: Power Button [PWRF]
Oct 11 00:47:34 localhost kernel: ACPI: \_SB_.LNKB: Enabled at IRQ 10
Oct 11 00:47:34 localhost kernel: ACPI: \_SB_.LNKC: Enabled at IRQ 11
Oct 11 00:47:34 localhost kernel: ACPI: \_SB_.LNKA: Enabled at IRQ 10
Oct 11 00:47:34 localhost kernel: Serial: 8250/16550 driver, 4 ports, IRQ sharing enabled
Oct 11 00:47:34 localhost kernel: 00:00: ttyS0 at I/O 0x3f8 (irq = 4, base_baud = 115200) is a 16550A
Oct 11 00:47:34 localhost kernel: Non-volatile memory driver v1.3
Oct 11 00:47:34 localhost kernel: rdac: device handler registered
Oct 11 00:47:34 localhost kernel: hp_sw: device handler registered
Oct 11 00:47:34 localhost kernel: emc: device handler registered
Oct 11 00:47:34 localhost kernel: alua: device handler registered
Oct 11 00:47:34 localhost kernel: uhci_hcd 0000:00:01.2: UHCI Host Controller
Oct 11 00:47:34 localhost kernel: uhci_hcd 0000:00:01.2: new USB bus registered, assigned bus number 1
Oct 11 00:47:34 localhost kernel: uhci_hcd 0000:00:01.2: detected 2 ports
Oct 11 00:47:34 localhost kernel: uhci_hcd 0000:00:01.2: irq 11, io port 0x0000c100
Oct 11 00:47:34 localhost kernel: usb usb1: New USB device found, idVendor=1d6b, idProduct=0001, bcdDevice= 5.14
Oct 11 00:47:34 localhost kernel: usb usb1: New USB device strings: Mfr=3, Product=2, SerialNumber=1
Oct 11 00:47:34 localhost kernel: usb usb1: Product: UHCI Host Controller
Oct 11 00:47:34 localhost kernel: usb usb1: Manufacturer: Linux 5.14.0-621.el9.x86_64 uhci_hcd
Oct 11 00:47:34 localhost kernel: usb usb1: SerialNumber: 0000:00:01.2
Oct 11 00:47:34 localhost kernel: hub 1-0:1.0: USB hub found
Oct 11 00:47:34 localhost kernel: hub 1-0:1.0: 2 ports detected
Oct 11 00:47:34 localhost kernel: usbcore: registered new interface driver usbserial_generic
Oct 11 00:47:34 localhost kernel: usbserial: USB Serial support registered for generic
Oct 11 00:47:34 localhost kernel: i8042: PNP: PS/2 Controller [PNP0303:KBD,PNP0f13:MOU] at 0x60,0x64 irq 1,12
Oct 11 00:47:34 localhost kernel: serio: i8042 KBD port at 0x60,0x64 irq 1
Oct 11 00:47:34 localhost kernel: serio: i8042 AUX port at 0x60,0x64 irq 12
Oct 11 00:47:34 localhost kernel: mousedev: PS/2 mouse device common for all mice
Oct 11 00:47:34 localhost kernel: rtc_cmos 00:04: RTC can wake from S4
Oct 11 00:47:34 localhost kernel: rtc_cmos 00:04: registered as rtc0
Oct 11 00:47:34 localhost kernel: rtc_cmos 00:04: setting system clock to 2025-10-11T00:47:33 UTC (1760143653)
Oct 11 00:47:34 localhost kernel: rtc_cmos 00:04: alarms up to one day, y3k, 242 bytes nvram
Oct 11 00:47:34 localhost kernel: input: AT Translated Set 2 keyboard as /devices/platform/i8042/serio0/input/input1
Oct 11 00:47:34 localhost kernel: amd_pstate: the _CPC object is not present in SBIOS or ACPI disabled
Oct 11 00:47:34 localhost kernel: hid: raw HID events driver (C) Jiri Kosina
Oct 11 00:47:34 localhost kernel: usbcore: registered new interface driver usbhid
Oct 11 00:47:34 localhost kernel: usbhid: USB HID core driver
Oct 11 00:47:34 localhost kernel: drop_monitor: Initializing network drop monitor service
Oct 11 00:47:34 localhost kernel: input: VirtualPS/2 VMware VMMouse as /devices/platform/i8042/serio1/input/input4
Oct 11 00:47:34 localhost kernel: input: VirtualPS/2 VMware VMMouse as /devices/platform/i8042/serio1/input/input3
Oct 11 00:47:34 localhost kernel: Initializing XFRM netlink socket
Oct 11 00:47:34 localhost kernel: NET: Registered PF_INET6 protocol family
Oct 11 00:47:34 localhost kernel: Segment Routing with IPv6
Oct 11 00:47:34 localhost kernel: NET: Registered PF_PACKET protocol family
Oct 11 00:47:34 localhost kernel: mpls_gso: MPLS GSO support
Oct 11 00:47:34 localhost kernel: IPI shorthand broadcast: enabled
Oct 11 00:47:34 localhost kernel: AVX2 version of gcm_enc/dec engaged.
Oct 11 00:47:34 localhost kernel: AES CTR mode by8 optimization enabled
Oct 11 00:47:34 localhost kernel: sched_clock: Marking stable (1323005100, 140750500)->(1585879290, -122123690)
Oct 11 00:47:34 localhost kernel: registered taskstats version 1
Oct 11 00:47:34 localhost kernel: Loading compiled-in X.509 certificates
Oct 11 00:47:34 localhost kernel: Loaded X.509 cert 'The CentOS Project: CentOS Stream kernel signing key: 72f99a463516b0dfb027e50caab189f607ef1bc9'
Oct 11 00:47:34 localhost kernel: Loaded X.509 cert 'Red Hat Enterprise Linux Driver Update Program (key 3): bf57f3e87362bc7229d9f465321773dfd1f77a80'
Oct 11 00:47:34 localhost kernel: Loaded X.509 cert 'Red Hat Enterprise Linux kpatch signing key: 4d38fd864ebe18c5f0b72e3852e2014c3a676fc8'
Oct 11 00:47:34 localhost kernel: Loaded X.509 cert 'RH-IMA-CA: Red Hat IMA CA: fb31825dd0e073685b264e3038963673f753959a'
Oct 11 00:47:34 localhost kernel: Loaded X.509 cert 'Nvidia GPU OOT signing 001: 55e1cef88193e60419f0b0ec379c49f77545acf0'
Oct 11 00:47:34 localhost kernel: Demotion targets for Node 0: null
Oct 11 00:47:34 localhost kernel: page_owner is disabled
Oct 11 00:47:34 localhost kernel: Key type .fscrypt registered
Oct 11 00:47:34 localhost kernel: Key type fscrypt-provisioning registered
Oct 11 00:47:34 localhost kernel: Key type big_key registered
Oct 11 00:47:34 localhost kernel: Key type encrypted registered
Oct 11 00:47:34 localhost kernel: ima: No TPM chip found, activating TPM-bypass!
Oct 11 00:47:34 localhost kernel: Loading compiled-in module X.509 certificates
Oct 11 00:47:34 localhost kernel: Loaded X.509 cert 'The CentOS Project: CentOS Stream kernel signing key: 72f99a463516b0dfb027e50caab189f607ef1bc9'
Oct 11 00:47:34 localhost kernel: ima: Allocated hash algorithm: sha256
Oct 11 00:47:34 localhost kernel: ima: No architecture policies found
Oct 11 00:47:34 localhost kernel: evm: Initialising EVM extended attributes:
Oct 11 00:47:34 localhost kernel: evm: security.selinux
Oct 11 00:47:34 localhost kernel: evm: security.SMACK64 (disabled)
Oct 11 00:47:34 localhost kernel: evm: security.SMACK64EXEC (disabled)
Oct 11 00:47:34 localhost kernel: evm: security.SMACK64TRANSMUTE (disabled)
Oct 11 00:47:34 localhost kernel: evm: security.SMACK64MMAP (disabled)
Oct 11 00:47:34 localhost kernel: evm: security.apparmor (disabled)
Oct 11 00:47:34 localhost kernel: evm: security.ima
Oct 11 00:47:34 localhost kernel: evm: security.capability
Oct 11 00:47:34 localhost kernel: evm: HMAC attrs: 0x1
Oct 11 00:47:34 localhost kernel: usb 1-1: new full-speed USB device number 2 using uhci_hcd
Oct 11 00:47:34 localhost kernel: usb 1-1: New USB device found, idVendor=0627, idProduct=0001, bcdDevice= 0.00
Oct 11 00:47:34 localhost kernel: usb 1-1: New USB device strings: Mfr=1, Product=3, SerialNumber=10
Oct 11 00:47:34 localhost kernel: usb 1-1: Product: QEMU USB Tablet
Oct 11 00:47:34 localhost kernel: usb 1-1: Manufacturer: QEMU
Oct 11 00:47:34 localhost kernel: usb 1-1: SerialNumber: 28754-0000:00:01.2-1
Oct 11 00:47:34 localhost kernel: input: QEMU QEMU USB Tablet as /devices/pci0000:00/0000:00:01.2/usb1/1-1/1-1:1.0/0003:0627:0001.0001/input/input5
Oct 11 00:47:34 localhost kernel: hid-generic 0003:0627:0001.0001: input,hidraw0: USB HID v0.01 Mouse [QEMU QEMU USB Tablet] on usb-0000:00:01.2-1/input0
Oct 11 00:47:34 localhost kernel: Running certificate verification RSA selftest
Oct 11 00:47:34 localhost kernel: Loaded X.509 cert 'Certificate verification self-testing key: f58703bb33ce1b73ee02eccdee5b8817518fe3db'
Oct 11 00:47:34 localhost kernel: Running certificate verification ECDSA selftest
Oct 11 00:47:34 localhost kernel: Loaded X.509 cert 'Certificate verification ECDSA self-testing key: 2900bcea1deb7bc8479a84a23d758efdfdd2b2d3'
Oct 11 00:47:34 localhost kernel: clk: Disabling unused clocks
Oct 11 00:47:34 localhost kernel: Freeing unused decrypted memory: 2028K
Oct 11 00:47:34 localhost kernel: Freeing unused kernel image (initmem) memory: 4188K
Oct 11 00:47:34 localhost kernel: Write protecting the kernel read-only data: 30720k
Oct 11 00:47:34 localhost kernel: Freeing unused kernel image (rodata/data gap) memory: 472K
Oct 11 00:47:34 localhost kernel: x86/mm: Checked W+X mappings: passed, no W+X pages found.
Oct 11 00:47:34 localhost kernel: Run /init as init process
Oct 11 00:47:34 localhost kernel:   with arguments:
Oct 11 00:47:34 localhost kernel:     /init
Oct 11 00:47:34 localhost kernel:   with environment:
Oct 11 00:47:34 localhost kernel:     HOME=/
Oct 11 00:47:34 localhost kernel:     TERM=linux
Oct 11 00:47:34 localhost kernel:     BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-621.el9.x86_64
Oct 11 00:47:34 localhost systemd[1]: systemd 252-57.el9 running in system mode (+PAM +AUDIT +SELINUX -APPARMOR +IMA +SMACK +SECCOMP +GCRYPT +GNUTLS +OPENSSL +ACL +BLKID +CURL +ELFUTILS +FIDO2 +IDN2 -IDN -IPTC +KMOD +LIBCRYPTSETUP +LIBFDISK +PCRE2 -PWQUALITY +P11KIT -QRENCODE +TPM2 +BZIP2 +LZ4 +XZ +ZLIB +ZSTD -BPF_FRAMEWORK +XKBCOMMON +UTMP +SYSVINIT default-hierarchy=unified)
Oct 11 00:47:34 localhost systemd[1]: Detected virtualization kvm.
Oct 11 00:47:34 localhost systemd[1]: Detected architecture x86-64.
Oct 11 00:47:34 localhost systemd[1]: Running in initrd.
Oct 11 00:47:34 localhost systemd[1]: No hostname configured, using default hostname.
Oct 11 00:47:34 localhost systemd[1]: Hostname set to <localhost>.
Oct 11 00:47:34 localhost systemd[1]: Initializing machine ID from VM UUID.
Oct 11 00:47:34 localhost systemd[1]: Queued start job for default target Initrd Default Target.
Oct 11 00:47:34 localhost systemd[1]: Started Dispatch Password Requests to Console Directory Watch.
Oct 11 00:47:34 localhost systemd[1]: Reached target Local Encrypted Volumes.
Oct 11 00:47:34 localhost systemd[1]: Reached target Initrd /usr File System.
Oct 11 00:47:34 localhost systemd[1]: Reached target Local File Systems.
Oct 11 00:47:34 localhost systemd[1]: Reached target Path Units.
Oct 11 00:47:34 localhost systemd[1]: Reached target Slice Units.
Oct 11 00:47:34 localhost systemd[1]: Reached target Swaps.
Oct 11 00:47:34 localhost systemd[1]: Reached target Timer Units.
Oct 11 00:47:34 localhost systemd[1]: Listening on D-Bus System Message Bus Socket.
Oct 11 00:47:34 localhost systemd[1]: Listening on Journal Socket (/dev/log).
Oct 11 00:47:34 localhost systemd[1]: Listening on Journal Socket.
Oct 11 00:47:34 localhost systemd[1]: Listening on udev Control Socket.
Oct 11 00:47:34 localhost systemd[1]: Listening on udev Kernel Socket.
Oct 11 00:47:34 localhost systemd[1]: Reached target Socket Units.
Oct 11 00:47:34 localhost systemd[1]: Starting Create List of Static Device Nodes...
Oct 11 00:47:34 localhost systemd[1]: Starting Journal Service...
Oct 11 00:47:34 localhost systemd[1]: Load Kernel Modules was skipped because no trigger condition checks were met.
Oct 11 00:47:34 localhost systemd[1]: Starting Apply Kernel Variables...
Oct 11 00:47:34 localhost systemd[1]: Starting Create System Users...
Oct 11 00:47:34 localhost systemd[1]: Starting Setup Virtual Console...
Oct 11 00:47:34 localhost systemd[1]: Finished Create List of Static Device Nodes.
Oct 11 00:47:34 localhost systemd[1]: Finished Apply Kernel Variables.
Oct 11 00:47:34 localhost systemd[1]: Finished Create System Users.
Oct 11 00:47:34 localhost systemd-journald[303]: Journal started
Oct 11 00:47:34 localhost systemd-journald[303]: Runtime Journal (/run/log/journal/c0909b4b08604b28ab6b0ab32acb5a0f) is 8.0M, max 153.6M, 145.6M free.
Oct 11 00:47:34 localhost systemd-sysusers[307]: Creating group 'users' with GID 100.
Oct 11 00:47:34 localhost systemd-sysusers[307]: Creating group 'dbus' with GID 81.
Oct 11 00:47:34 localhost systemd-sysusers[307]: Creating user 'dbus' (System Message Bus) with UID 81 and GID 81.
Oct 11 00:47:34 localhost systemd[1]: Started Journal Service.
Oct 11 00:47:34 localhost systemd[1]: Starting Create Static Device Nodes in /dev...
Oct 11 00:47:34 localhost systemd[1]: Starting Create Volatile Files and Directories...
Oct 11 00:47:34 localhost systemd[1]: Finished Create Static Device Nodes in /dev.
Oct 11 00:47:34 localhost systemd[1]: Finished Create Volatile Files and Directories.
Oct 11 00:47:34 localhost systemd[1]: Finished Setup Virtual Console.
Oct 11 00:47:34 localhost systemd[1]: dracut ask for additional cmdline parameters was skipped because no trigger condition checks were met.
Oct 11 00:47:34 localhost systemd[1]: Starting dracut cmdline hook...
Oct 11 00:47:34 localhost dracut-cmdline[323]: dracut-9 dracut-057-102.git20250818.el9
Oct 11 00:47:34 localhost dracut-cmdline[323]: Using kernel command line parameters:    BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-621.el9.x86_64 root=UUID=9839e2e1-98a2-4594-b609-79d514deb0a3 ro console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-2G:192M,2G-64G:256M,64G-:512M
Oct 11 00:47:34 localhost systemd[1]: Finished dracut cmdline hook.
Oct 11 00:47:34 localhost systemd[1]: Starting dracut pre-udev hook...
Oct 11 00:47:34 localhost kernel: device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
Oct 11 00:47:34 localhost kernel: device-mapper: uevent: version 1.0.3
Oct 11 00:47:34 localhost kernel: device-mapper: ioctl: 4.50.0-ioctl (2025-04-28) initialised: dm-devel@lists.linux.dev
Oct 11 00:47:35 localhost kernel: RPC: Registered named UNIX socket transport module.
Oct 11 00:47:35 localhost kernel: RPC: Registered udp transport module.
Oct 11 00:47:35 localhost kernel: RPC: Registered tcp transport module.
Oct 11 00:47:35 localhost kernel: RPC: Registered tcp-with-tls transport module.
Oct 11 00:47:35 localhost kernel: RPC: Registered tcp NFSv4.1 backchannel transport module.
Oct 11 00:47:35 localhost rpc.statd[439]: Version 2.5.4 starting
Oct 11 00:47:35 localhost rpc.statd[439]: Initializing NSM state
Oct 11 00:47:35 localhost rpc.idmapd[444]: Setting log level to 0
Oct 11 00:47:35 localhost systemd[1]: Finished dracut pre-udev hook.
Oct 11 00:47:35 localhost systemd[1]: Starting Rule-based Manager for Device Events and Files...
Oct 11 00:47:35 localhost systemd-udevd[457]: Using default interface naming scheme 'rhel-9.0'.
Oct 11 00:47:35 localhost systemd[1]: Started Rule-based Manager for Device Events and Files.
Oct 11 00:47:35 localhost systemd[1]: Starting dracut pre-trigger hook...
Oct 11 00:47:35 localhost systemd[1]: Finished dracut pre-trigger hook.
Oct 11 00:47:35 localhost systemd[1]: Starting Coldplug All udev Devices...
Oct 11 00:47:35 localhost systemd[1]: Created slice Slice /system/modprobe.
Oct 11 00:47:35 localhost systemd[1]: Starting Load Kernel Module configfs...
Oct 11 00:47:35 localhost systemd[1]: Finished Coldplug All udev Devices.
Oct 11 00:47:35 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully.
Oct 11 00:47:35 localhost systemd[1]: Finished Load Kernel Module configfs.
Oct 11 00:47:35 localhost systemd[1]: Mounting Kernel Configuration File System...
Oct 11 00:47:35 localhost systemd[1]: nm-initrd.service was skipped because of an unmet condition check (ConditionPathExists=/run/NetworkManager/initrd/neednet).
Oct 11 00:47:35 localhost systemd[1]: Reached target Network.
Oct 11 00:47:35 localhost systemd[1]: nm-wait-online-initrd.service was skipped because of an unmet condition check (ConditionPathExists=/run/NetworkManager/initrd/neednet).
Oct 11 00:47:35 localhost systemd[1]: Starting dracut initqueue hook...
Oct 11 00:47:35 localhost systemd[1]: Mounted Kernel Configuration File System.
Oct 11 00:47:35 localhost systemd[1]: Reached target System Initialization.
Oct 11 00:47:35 localhost kernel: virtio_blk virtio2: 8/0/0 default/read/poll queues
Oct 11 00:47:35 localhost kernel: virtio_blk virtio2: [vda] 167772160 512-byte logical blocks (85.9 GB/80.0 GiB)
Oct 11 00:47:35 localhost kernel:  vda: vda1
Oct 11 00:47:35 localhost systemd[1]: Reached target Basic System.
Oct 11 00:47:35 localhost systemd-udevd[493]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 00:47:35 localhost kernel: libata version 3.00 loaded.
Oct 11 00:47:35 localhost kernel: ata_piix 0000:00:01.1: version 2.13
Oct 11 00:47:35 localhost kernel: scsi host0: ata_piix
Oct 11 00:47:35 localhost kernel: scsi host1: ata_piix
Oct 11 00:47:35 localhost kernel: ata1: PATA max MWDMA2 cmd 0x1f0 ctl 0x3f6 bmdma 0xc140 irq 14 lpm-pol 0
Oct 11 00:47:35 localhost kernel: ata2: PATA max MWDMA2 cmd 0x170 ctl 0x376 bmdma 0xc148 irq 15 lpm-pol 0
Oct 11 00:47:35 localhost systemd[1]: Found device /dev/disk/by-uuid/9839e2e1-98a2-4594-b609-79d514deb0a3.
Oct 11 00:47:35 localhost systemd[1]: Reached target Initrd Root Device.
Oct 11 00:47:35 localhost kernel: ata1: found unknown device (class 0)
Oct 11 00:47:35 localhost kernel: ata1.00: ATAPI: QEMU DVD-ROM, 2.5+, max UDMA/100
Oct 11 00:47:35 localhost kernel: scsi 0:0:0:0: CD-ROM            QEMU     QEMU DVD-ROM     2.5+ PQ: 0 ANSI: 5
Oct 11 00:47:35 localhost kernel: scsi 0:0:0:0: Attached scsi generic sg0 type 5
Oct 11 00:47:35 localhost kernel: sr 0:0:0:0: [sr0] scsi3-mmc drive: 4x/4x cd/rw xa/form2 tray
Oct 11 00:47:35 localhost kernel: cdrom: Uniform CD-ROM driver Revision: 3.20
Oct 11 00:47:35 localhost kernel: sr 0:0:0:0: Attached scsi CD-ROM sr0
Oct 11 00:47:35 localhost systemd[1]: Finished dracut initqueue hook.
Oct 11 00:47:35 localhost systemd[1]: Reached target Preparation for Remote File Systems.
Oct 11 00:47:35 localhost systemd[1]: Reached target Remote Encrypted Volumes.
Oct 11 00:47:35 localhost systemd[1]: Reached target Remote File Systems.
Oct 11 00:47:35 localhost systemd[1]: Starting dracut pre-mount hook...
Oct 11 00:47:35 localhost systemd[1]: Finished dracut pre-mount hook.
Oct 11 00:47:35 localhost systemd[1]: Starting File System Check on /dev/disk/by-uuid/9839e2e1-98a2-4594-b609-79d514deb0a3...
Oct 11 00:47:36 localhost systemd-fsck[551]: /usr/sbin/fsck.xfs: XFS file system.
Oct 11 00:47:36 localhost systemd[1]: Finished File System Check on /dev/disk/by-uuid/9839e2e1-98a2-4594-b609-79d514deb0a3.
Oct 11 00:47:36 localhost systemd[1]: Mounting /sysroot...
Oct 11 00:47:36 localhost kernel: SGI XFS with ACLs, security attributes, scrub, quota, no debug enabled
Oct 11 00:47:36 localhost kernel: XFS (vda1): Mounting V5 Filesystem 9839e2e1-98a2-4594-b609-79d514deb0a3
Oct 11 00:47:36 localhost kernel: XFS (vda1): Ending clean mount
Oct 11 00:47:36 localhost systemd[1]: Mounted /sysroot.
Oct 11 00:47:36 localhost systemd[1]: Reached target Initrd Root File System.
Oct 11 00:47:36 localhost systemd[1]: Starting Mountpoints Configured in the Real Root...
Oct 11 00:47:36 localhost systemd[1]: initrd-parse-etc.service: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Finished Mountpoints Configured in the Real Root.
Oct 11 00:47:36 localhost systemd[1]: Reached target Initrd File Systems.
Oct 11 00:47:36 localhost systemd[1]: Reached target Initrd Default Target.
Oct 11 00:47:36 localhost systemd[1]: Starting dracut mount hook...
Oct 11 00:47:36 localhost systemd[1]: Finished dracut mount hook.
Oct 11 00:47:36 localhost systemd[1]: Starting dracut pre-pivot and cleanup hook...
Oct 11 00:47:36 localhost rpc.idmapd[444]: exiting on signal 15
Oct 11 00:47:36 localhost systemd[1]: var-lib-nfs-rpc_pipefs.mount: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Finished dracut pre-pivot and cleanup hook.
Oct 11 00:47:36 localhost systemd[1]: Starting Cleaning Up and Shutting Down Daemons...
Oct 11 00:47:36 localhost systemd[1]: Stopped target Network.
Oct 11 00:47:36 localhost systemd[1]: Stopped target Remote Encrypted Volumes.
Oct 11 00:47:36 localhost systemd[1]: Stopped target Timer Units.
Oct 11 00:47:36 localhost systemd[1]: dbus.socket: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Closed D-Bus System Message Bus Socket.
Oct 11 00:47:36 localhost systemd[1]: dracut-pre-pivot.service: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Stopped dracut pre-pivot and cleanup hook.
Oct 11 00:47:36 localhost systemd[1]: Stopped target Initrd Default Target.
Oct 11 00:47:36 localhost systemd[1]: Stopped target Basic System.
Oct 11 00:47:36 localhost systemd[1]: Stopped target Initrd Root Device.
Oct 11 00:47:36 localhost systemd[1]: Stopped target Initrd /usr File System.
Oct 11 00:47:36 localhost systemd[1]: Stopped target Path Units.
Oct 11 00:47:36 localhost systemd[1]: Stopped target Remote File Systems.
Oct 11 00:47:36 localhost systemd[1]: Stopped target Preparation for Remote File Systems.
Oct 11 00:47:36 localhost systemd[1]: Stopped target Slice Units.
Oct 11 00:47:36 localhost systemd[1]: Stopped target Socket Units.
Oct 11 00:47:36 localhost systemd[1]: Stopped target System Initialization.
Oct 11 00:47:36 localhost systemd[1]: Stopped target Local File Systems.
Oct 11 00:47:36 localhost systemd[1]: Stopped target Swaps.
Oct 11 00:47:36 localhost systemd[1]: dracut-mount.service: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Stopped dracut mount hook.
Oct 11 00:47:36 localhost systemd[1]: dracut-pre-mount.service: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Stopped dracut pre-mount hook.
Oct 11 00:47:36 localhost systemd[1]: Stopped target Local Encrypted Volumes.
Oct 11 00:47:36 localhost systemd[1]: systemd-ask-password-console.path: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Stopped Dispatch Password Requests to Console Directory Watch.
Oct 11 00:47:36 localhost systemd[1]: dracut-initqueue.service: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Stopped dracut initqueue hook.
Oct 11 00:47:36 localhost systemd[1]: systemd-sysctl.service: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Stopped Apply Kernel Variables.
Oct 11 00:47:36 localhost systemd[1]: systemd-tmpfiles-setup.service: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Stopped Create Volatile Files and Directories.
Oct 11 00:47:36 localhost systemd[1]: systemd-udev-trigger.service: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Stopped Coldplug All udev Devices.
Oct 11 00:47:36 localhost systemd[1]: dracut-pre-trigger.service: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Stopped dracut pre-trigger hook.
Oct 11 00:47:36 localhost systemd[1]: Stopping Rule-based Manager for Device Events and Files...
Oct 11 00:47:36 localhost systemd[1]: systemd-vconsole-setup.service: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Stopped Setup Virtual Console.
Oct 11 00:47:36 localhost systemd[1]: run-credentials-systemd\x2dtmpfiles\x2dsetup.service.mount: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: run-credentials-systemd\x2dsysctl.service.mount: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: systemd-udevd.service: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Stopped Rule-based Manager for Device Events and Files.
Oct 11 00:47:36 localhost systemd[1]: systemd-udevd.service: Consumed 1.078s CPU time.
Oct 11 00:47:36 localhost systemd[1]: systemd-udevd-control.socket: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Closed udev Control Socket.
Oct 11 00:47:36 localhost systemd[1]: systemd-udevd-kernel.socket: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Closed udev Kernel Socket.
Oct 11 00:47:36 localhost systemd[1]: dracut-pre-udev.service: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Stopped dracut pre-udev hook.
Oct 11 00:47:36 localhost systemd[1]: dracut-cmdline.service: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Stopped dracut cmdline hook.
Oct 11 00:47:36 localhost systemd[1]: Starting Cleanup udev Database...
Oct 11 00:47:36 localhost systemd[1]: systemd-tmpfiles-setup-dev.service: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Stopped Create Static Device Nodes in /dev.
Oct 11 00:47:36 localhost systemd[1]: kmod-static-nodes.service: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Stopped Create List of Static Device Nodes.
Oct 11 00:47:36 localhost systemd[1]: systemd-sysusers.service: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Stopped Create System Users.
Oct 11 00:47:36 localhost systemd[1]: run-credentials-systemd\x2dtmpfiles\x2dsetup\x2ddev.service.mount: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: run-credentials-systemd\x2dsysusers.service.mount: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: initrd-cleanup.service: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Finished Cleaning Up and Shutting Down Daemons.
Oct 11 00:47:36 localhost systemd[1]: initrd-udevadm-cleanup-db.service: Deactivated successfully.
Oct 11 00:47:36 localhost systemd[1]: Finished Cleanup udev Database.
Oct 11 00:47:36 localhost systemd[1]: Reached target Switch Root.
Oct 11 00:47:36 localhost systemd[1]: Starting Switch Root...
Oct 11 00:47:36 localhost systemd[1]: Switching root.
Oct 11 00:47:36 localhost systemd-journald[303]: Journal stopped
Oct 11 00:47:38 localhost systemd-journald[303]: Received SIGTERM from PID 1 (systemd).
Oct 11 00:47:38 localhost kernel: audit: type=1404 audit(1760143657.072:2): enforcing=1 old_enforcing=0 auid=4294967295 ses=4294967295 enabled=1 old-enabled=1 lsm=selinux res=1
Oct 11 00:47:38 localhost kernel: SELinux:  policy capability network_peer_controls=1
Oct 11 00:47:38 localhost kernel: SELinux:  policy capability open_perms=1
Oct 11 00:47:38 localhost kernel: SELinux:  policy capability extended_socket_class=1
Oct 11 00:47:38 localhost kernel: SELinux:  policy capability always_check_network=0
Oct 11 00:47:38 localhost kernel: SELinux:  policy capability cgroup_seclabel=1
Oct 11 00:47:38 localhost kernel: SELinux:  policy capability nnp_nosuid_transition=1
Oct 11 00:47:38 localhost kernel: SELinux:  policy capability genfs_seclabel_symlinks=1
Oct 11 00:47:38 localhost kernel: audit: type=1403 audit(1760143657.238:3): auid=4294967295 ses=4294967295 lsm=selinux res=1
Oct 11 00:47:38 localhost systemd[1]: Successfully loaded SELinux policy in 170.520ms.
Oct 11 00:47:38 localhost systemd[1]: Relabelled /dev, /dev/shm, /run, /sys/fs/cgroup in 25.754ms.
Oct 11 00:47:38 localhost systemd[1]: systemd 252-57.el9 running in system mode (+PAM +AUDIT +SELINUX -APPARMOR +IMA +SMACK +SECCOMP +GCRYPT +GNUTLS +OPENSSL +ACL +BLKID +CURL +ELFUTILS +FIDO2 +IDN2 -IDN -IPTC +KMOD +LIBCRYPTSETUP +LIBFDISK +PCRE2 -PWQUALITY +P11KIT -QRENCODE +TPM2 +BZIP2 +LZ4 +XZ +ZLIB +ZSTD -BPF_FRAMEWORK +XKBCOMMON +UTMP +SYSVINIT default-hierarchy=unified)
Oct 11 00:47:38 localhost systemd[1]: Detected virtualization kvm.
Oct 11 00:47:38 localhost systemd[1]: Detected architecture x86-64.
Oct 11 00:47:38 localhost systemd-rc-local-generator[633]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 00:47:38 localhost systemd[1]: initrd-switch-root.service: Deactivated successfully.
Oct 11 00:47:38 localhost systemd[1]: Stopped Switch Root.
Oct 11 00:47:38 localhost systemd[1]: systemd-journald.service: Scheduled restart job, restart counter is at 1.
Oct 11 00:47:38 localhost systemd[1]: Created slice Slice /system/getty.
Oct 11 00:47:38 localhost systemd[1]: Created slice Slice /system/serial-getty.
Oct 11 00:47:38 localhost systemd[1]: Created slice Slice /system/sshd-keygen.
Oct 11 00:47:38 localhost systemd[1]: Created slice User and Session Slice.
Oct 11 00:47:38 localhost systemd[1]: Started Dispatch Password Requests to Console Directory Watch.
Oct 11 00:47:38 localhost systemd[1]: Started Forward Password Requests to Wall Directory Watch.
Oct 11 00:47:38 localhost systemd[1]: Set up automount Arbitrary Executable File Formats File System Automount Point.
Oct 11 00:47:38 localhost systemd[1]: Reached target Local Encrypted Volumes.
Oct 11 00:47:38 localhost systemd[1]: Stopped target Switch Root.
Oct 11 00:47:38 localhost systemd[1]: Stopped target Initrd File Systems.
Oct 11 00:47:38 localhost systemd[1]: Stopped target Initrd Root File System.
Oct 11 00:47:38 localhost systemd[1]: Reached target Local Integrity Protected Volumes.
Oct 11 00:47:38 localhost systemd[1]: Reached target Path Units.
Oct 11 00:47:38 localhost systemd[1]: Reached target rpc_pipefs.target.
Oct 11 00:47:38 localhost systemd[1]: Reached target Slice Units.
Oct 11 00:47:38 localhost systemd[1]: Reached target Swaps.
Oct 11 00:47:38 localhost systemd[1]: Reached target Local Verity Protected Volumes.
Oct 11 00:47:38 localhost systemd[1]: Listening on RPCbind Server Activation Socket.
Oct 11 00:47:38 localhost systemd[1]: Reached target RPC Port Mapper.
Oct 11 00:47:38 localhost systemd[1]: Listening on Process Core Dump Socket.
Oct 11 00:47:38 localhost systemd[1]: Listening on initctl Compatibility Named Pipe.
Oct 11 00:47:38 localhost systemd[1]: Listening on udev Control Socket.
Oct 11 00:47:38 localhost systemd[1]: Listening on udev Kernel Socket.
Oct 11 00:47:38 localhost systemd[1]: Mounting Huge Pages File System...
Oct 11 00:47:38 localhost systemd[1]: Mounting POSIX Message Queue File System...
Oct 11 00:47:38 localhost systemd[1]: Mounting Kernel Debug File System...
Oct 11 00:47:38 localhost systemd[1]: Mounting Kernel Trace File System...
Oct 11 00:47:38 localhost systemd[1]: Kernel Module supporting RPCSEC_GSS was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab).
Oct 11 00:47:38 localhost systemd[1]: Starting Create List of Static Device Nodes...
Oct 11 00:47:38 localhost systemd[1]: Starting Load Kernel Module configfs...
Oct 11 00:47:38 localhost systemd[1]: Starting Load Kernel Module drm...
Oct 11 00:47:38 localhost systemd[1]: Starting Load Kernel Module efi_pstore...
Oct 11 00:47:38 localhost systemd[1]: Starting Load Kernel Module fuse...
Oct 11 00:47:38 localhost systemd[1]: Starting Read and set NIS domainname from /etc/sysconfig/network...
Oct 11 00:47:38 localhost systemd[1]: systemd-fsck-root.service: Deactivated successfully.
Oct 11 00:47:38 localhost systemd[1]: Stopped File System Check on Root Device.
Oct 11 00:47:38 localhost systemd[1]: Stopped Journal Service.
Oct 11 00:47:38 localhost systemd[1]: Starting Journal Service...
Oct 11 00:47:38 localhost systemd[1]: Load Kernel Modules was skipped because no trigger condition checks were met.
Oct 11 00:47:38 localhost systemd[1]: Starting Generate network units from Kernel command line...
Oct 11 00:47:38 localhost systemd[1]: TPM2 PCR Machine ID Measurement was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).
Oct 11 00:47:38 localhost systemd[1]: Starting Remount Root and Kernel File Systems...
Oct 11 00:47:38 localhost systemd[1]: Repartition Root Disk was skipped because no trigger condition checks were met.
Oct 11 00:47:38 localhost systemd[1]: Starting Apply Kernel Variables...
Oct 11 00:47:38 localhost systemd[1]: Starting Coldplug All udev Devices...
Oct 11 00:47:38 localhost kernel: fuse: init (API version 7.37)
Oct 11 00:47:38 localhost systemd[1]: Mounted Huge Pages File System.
Oct 11 00:47:38 localhost kernel: xfs filesystem being remounted at / supports timestamps until 2038 (0x7fffffff)
Oct 11 00:47:38 localhost systemd[1]: Mounted POSIX Message Queue File System.
Oct 11 00:47:38 localhost systemd[1]: Mounted Kernel Debug File System.
Oct 11 00:47:38 localhost systemd[1]: Mounted Kernel Trace File System.
Oct 11 00:47:38 localhost systemd-journald[675]: Journal started
Oct 11 00:47:38 localhost systemd-journald[675]: Runtime Journal (/run/log/journal/a1727ec20198bc6caf436a6e13c4ff5e) is 8.0M, max 153.6M, 145.6M free.
Oct 11 00:47:37 localhost systemd[1]: Queued start job for default target Multi-User System.
Oct 11 00:47:37 localhost systemd[1]: systemd-journald.service: Deactivated successfully.
Oct 11 00:47:38 localhost systemd[1]: Started Journal Service.
Oct 11 00:47:38 localhost systemd[1]: Finished Create List of Static Device Nodes.
Oct 11 00:47:38 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully.
Oct 11 00:47:38 localhost systemd[1]: Finished Load Kernel Module configfs.
Oct 11 00:47:38 localhost systemd[1]: modprobe@efi_pstore.service: Deactivated successfully.
Oct 11 00:47:38 localhost systemd[1]: Finished Load Kernel Module efi_pstore.
Oct 11 00:47:38 localhost systemd[1]: modprobe@fuse.service: Deactivated successfully.
Oct 11 00:47:38 localhost systemd[1]: Finished Load Kernel Module fuse.
Oct 11 00:47:38 localhost systemd[1]: Finished Read and set NIS domainname from /etc/sysconfig/network.
Oct 11 00:47:38 localhost systemd[1]: Finished Generate network units from Kernel command line.
Oct 11 00:47:38 localhost systemd[1]: Finished Remount Root and Kernel File Systems.
Oct 11 00:47:38 localhost systemd[1]: Finished Apply Kernel Variables.
Oct 11 00:47:38 localhost systemd[1]: Mounting FUSE Control File System...
Oct 11 00:47:38 localhost systemd[1]: First Boot Wizard was skipped because of an unmet condition check (ConditionFirstBoot=yes).
Oct 11 00:47:38 localhost kernel: ACPI: bus type drm_connector registered
Oct 11 00:47:38 localhost systemd[1]: Starting Rebuild Hardware Database...
Oct 11 00:47:38 localhost systemd[1]: Starting Flush Journal to Persistent Storage...
Oct 11 00:47:38 localhost systemd[1]: Platform Persistent Storage Archival was skipped because of an unmet condition check (ConditionDirectoryNotEmpty=/sys/fs/pstore).
Oct 11 00:47:38 localhost systemd[1]: Starting Load/Save OS Random Seed...
Oct 11 00:47:38 localhost systemd[1]: Starting Create System Users...
Oct 11 00:47:38 localhost systemd[1]: modprobe@drm.service: Deactivated successfully.
Oct 11 00:47:38 localhost systemd[1]: Finished Load Kernel Module drm.
Oct 11 00:47:38 localhost systemd[1]: Mounted FUSE Control File System.
Oct 11 00:47:38 localhost systemd-journald[675]: Runtime Journal (/run/log/journal/a1727ec20198bc6caf436a6e13c4ff5e) is 8.0M, max 153.6M, 145.6M free.
Oct 11 00:47:38 localhost systemd-journald[675]: Received client request to flush runtime journal.
Oct 11 00:47:38 localhost systemd[1]: Finished Flush Journal to Persistent Storage.
Oct 11 00:47:38 localhost systemd[1]: Finished Load/Save OS Random Seed.
Oct 11 00:47:38 localhost systemd[1]: First Boot Complete was skipped because of an unmet condition check (ConditionFirstBoot=yes).
Oct 11 00:47:38 localhost systemd[1]: Finished Create System Users.
Oct 11 00:47:38 localhost systemd[1]: Starting Create Static Device Nodes in /dev...
Oct 11 00:47:38 localhost systemd[1]: Finished Coldplug All udev Devices.
Oct 11 00:47:38 localhost systemd[1]: Finished Create Static Device Nodes in /dev.
Oct 11 00:47:38 localhost systemd[1]: Reached target Preparation for Local File Systems.
Oct 11 00:47:38 localhost systemd[1]: Reached target Local File Systems.
Oct 11 00:47:38 localhost systemd[1]: Starting Rebuild Dynamic Linker Cache...
Oct 11 00:47:38 localhost systemd[1]: Mark the need to relabel after reboot was skipped because of an unmet condition check (ConditionSecurity=!selinux).
Oct 11 00:47:38 localhost systemd[1]: Set Up Additional Binary Formats was skipped because no trigger condition checks were met.
Oct 11 00:47:38 localhost systemd[1]: Update Boot Loader Random Seed was skipped because no trigger condition checks were met.
Oct 11 00:47:38 localhost systemd[1]: Starting Automatic Boot Loader Update...
Oct 11 00:47:38 localhost systemd[1]: Commit a transient machine-id on disk was skipped because of an unmet condition check (ConditionPathIsMountPoint=/etc/machine-id).
Oct 11 00:47:38 localhost systemd[1]: Starting Create Volatile Files and Directories...
Oct 11 00:47:38 localhost bootctl[693]: Couldn't find EFI system partition, skipping.
Oct 11 00:47:38 localhost systemd[1]: Finished Automatic Boot Loader Update.
Oct 11 00:47:38 localhost systemd[1]: Finished Create Volatile Files and Directories.
Oct 11 00:47:38 localhost systemd[1]: Starting Security Auditing Service...
Oct 11 00:47:38 localhost systemd[1]: Starting RPC Bind...
Oct 11 00:47:38 localhost systemd[1]: Starting Rebuild Journal Catalog...
Oct 11 00:47:38 localhost auditd[699]: audit dispatcher initialized with q_depth=2000 and 1 active plugins
Oct 11 00:47:38 localhost auditd[699]: Init complete, auditd 3.1.5 listening for events (startup state enable)
Oct 11 00:47:38 localhost systemd[1]: Started RPC Bind.
Oct 11 00:47:38 localhost systemd[1]: Finished Rebuild Journal Catalog.
Oct 11 00:47:38 localhost augenrules[705]: /sbin/augenrules: No change
Oct 11 00:47:38 localhost augenrules[720]: No rules
Oct 11 00:47:38 localhost augenrules[720]: enabled 1
Oct 11 00:47:38 localhost augenrules[720]: failure 1
Oct 11 00:47:38 localhost augenrules[720]: pid 699
Oct 11 00:47:38 localhost augenrules[720]: rate_limit 0
Oct 11 00:47:38 localhost augenrules[720]: backlog_limit 8192
Oct 11 00:47:38 localhost augenrules[720]: lost 0
Oct 11 00:47:38 localhost augenrules[720]: backlog 3
Oct 11 00:47:38 localhost augenrules[720]: backlog_wait_time 60000
Oct 11 00:47:38 localhost augenrules[720]: backlog_wait_time_actual 0
Oct 11 00:47:38 localhost augenrules[720]: enabled 1
Oct 11 00:47:38 localhost augenrules[720]: failure 1
Oct 11 00:47:38 localhost augenrules[720]: pid 699
Oct 11 00:47:38 localhost augenrules[720]: rate_limit 0
Oct 11 00:47:38 localhost augenrules[720]: backlog_limit 8192
Oct 11 00:47:38 localhost augenrules[720]: lost 0
Oct 11 00:47:38 localhost augenrules[720]: backlog 4
Oct 11 00:47:38 localhost augenrules[720]: backlog_wait_time 60000
Oct 11 00:47:38 localhost augenrules[720]: backlog_wait_time_actual 0
Oct 11 00:47:38 localhost augenrules[720]: enabled 1
Oct 11 00:47:38 localhost augenrules[720]: failure 1
Oct 11 00:47:38 localhost augenrules[720]: pid 699
Oct 11 00:47:38 localhost augenrules[720]: rate_limit 0
Oct 11 00:47:38 localhost augenrules[720]: backlog_limit 8192
Oct 11 00:47:38 localhost augenrules[720]: lost 0
Oct 11 00:47:38 localhost augenrules[720]: backlog 1
Oct 11 00:47:38 localhost augenrules[720]: backlog_wait_time 60000
Oct 11 00:47:38 localhost augenrules[720]: backlog_wait_time_actual 0
Oct 11 00:47:38 localhost systemd[1]: Started Security Auditing Service.
Oct 11 00:47:38 localhost systemd[1]: Starting Record System Boot/Shutdown in UTMP...
Oct 11 00:47:38 localhost systemd[1]: Finished Record System Boot/Shutdown in UTMP.
Oct 11 00:47:38 localhost systemd[1]: Finished Rebuild Dynamic Linker Cache.
Oct 11 00:47:38 localhost systemd[1]: Finished Rebuild Hardware Database.
Oct 11 00:47:38 localhost systemd[1]: Starting Rule-based Manager for Device Events and Files...
Oct 11 00:47:38 localhost systemd[1]: Starting Update is Completed...
Oct 11 00:47:38 localhost systemd[1]: Finished Update is Completed.
Oct 11 00:47:38 localhost systemd-udevd[728]: Using default interface naming scheme 'rhel-9.0'.
Oct 11 00:47:38 localhost systemd[1]: Started Rule-based Manager for Device Events and Files.
Oct 11 00:47:38 localhost systemd[1]: Reached target System Initialization.
Oct 11 00:47:38 localhost systemd[1]: Started dnf makecache --timer.
Oct 11 00:47:38 localhost systemd[1]: Started Daily rotation of log files.
Oct 11 00:47:38 localhost systemd[1]: Started Daily Cleanup of Temporary Directories.
Oct 11 00:47:38 localhost systemd[1]: Reached target Timer Units.
Oct 11 00:47:38 localhost systemd[1]: Listening on D-Bus System Message Bus Socket.
Oct 11 00:47:38 localhost systemd[1]: Listening on SSSD Kerberos Cache Manager responder socket.
Oct 11 00:47:38 localhost systemd[1]: Reached target Socket Units.
Oct 11 00:47:38 localhost systemd[1]: Starting D-Bus System Message Bus...
Oct 11 00:47:38 localhost systemd[1]: TPM2 PCR Barrier (Initialization) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).
Oct 11 00:47:38 localhost systemd[1]: Starting Load Kernel Module configfs...
Oct 11 00:47:38 localhost systemd-udevd[736]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 00:47:38 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully.
Oct 11 00:47:38 localhost systemd[1]: Finished Load Kernel Module configfs.
Oct 11 00:47:38 localhost systemd[1]: Condition check resulted in /dev/ttyS0 being skipped.
Oct 11 00:47:38 localhost systemd[1]: Started D-Bus System Message Bus.
Oct 11 00:47:38 localhost systemd[1]: Reached target Basic System.
Oct 11 00:47:38 localhost kernel: input: PC Speaker as /devices/platform/pcspkr/input/input6
Oct 11 00:47:38 localhost dbus-broker-lau[764]: Ready
Oct 11 00:47:38 localhost systemd[1]: Starting NTP client/server...
Oct 11 00:47:38 localhost systemd[1]: Starting Cloud-init: Local Stage (pre-network)...
Oct 11 00:47:38 localhost systemd[1]: Starting Restore /run/initramfs on shutdown...
Oct 11 00:47:38 localhost kernel: piix4_smbus 0000:00:01.3: SMBus Host Controller at 0x700, revision 0
Oct 11 00:47:38 localhost kernel: i2c i2c-0: 1/1 memory slots populated (from DMI)
Oct 11 00:47:38 localhost kernel: i2c i2c-0: Memory type 0x07 not supported yet, not instantiating SPD
Oct 11 00:47:38 localhost chronyd[784]: chronyd version 4.6.1 starting (+CMDMON +NTP +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +ASYNCDNS +NTS +SECHASH +IPV6 +DEBUG)
Oct 11 00:47:38 localhost chronyd[784]: Loaded 0 symmetric keys
Oct 11 00:47:38 localhost chronyd[784]: Using right/UTC timezone to obtain leap second data
Oct 11 00:47:38 localhost chronyd[784]: Loaded seccomp filter (level 2)
Oct 11 00:47:39 localhost systemd[1]: Starting IPv4 firewall with iptables...
Oct 11 00:47:39 localhost systemd[1]: Started irqbalance daemon.
Oct 11 00:47:39 localhost kernel: [drm] pci: virtio-vga detected at 0000:00:02.0
Oct 11 00:47:39 localhost kernel: virtio-pci 0000:00:02.0: vgaarb: deactivate vga console
Oct 11 00:47:39 localhost kernel: kvm_amd: TSC scaling supported
Oct 11 00:47:39 localhost kernel: kvm_amd: Nested Virtualization enabled
Oct 11 00:47:39 localhost kernel: kvm_amd: Nested Paging enabled
Oct 11 00:47:39 localhost kernel: kvm_amd: LBR virtualization supported
Oct 11 00:47:39 localhost systemd[1]: Load CPU microcode update was skipped because of an unmet condition check (ConditionPathExists=/sys/devices/system/cpu/microcode/reload).
Oct 11 00:47:39 localhost systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Oct 11 00:47:39 localhost systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Oct 11 00:47:39 localhost systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Oct 11 00:47:39 localhost systemd[1]: Reached target sshd-keygen.target.
Oct 11 00:47:39 localhost kernel: Console: switching to colour dummy device 80x25
Oct 11 00:47:39 localhost kernel: [drm] features: -virgl +edid -resource_blob -host_visible
Oct 11 00:47:39 localhost kernel: [drm] features: -context_init
Oct 11 00:47:39 localhost kernel: [drm] number of scanouts: 1
Oct 11 00:47:39 localhost kernel: [drm] number of cap sets: 0
Oct 11 00:47:39 localhost kernel: [drm] Initialized virtio_gpu 0.1.0 for 0000:00:02.0 on minor 0
Oct 11 00:47:39 localhost kernel: fbcon: virtio_gpudrmfb (fb0) is primary device
Oct 11 00:47:39 localhost kernel: Console: switching to colour frame buffer device 128x48
Oct 11 00:47:39 localhost kernel: virtio-pci 0000:00:02.0: [drm] fb0: virtio_gpudrmfb frame buffer device
Oct 11 00:47:39 localhost systemd[1]: System Security Services Daemon was skipped because no trigger condition checks were met.
Oct 11 00:47:39 localhost systemd[1]: Reached target User and Group Name Lookups.
Oct 11 00:47:39 localhost kernel: Warning: Deprecated Driver is detected: nft_compat will not be maintained in a future major release and may be disabled
Oct 11 00:47:39 localhost kernel: Warning: Deprecated Driver is detected: nft_compat_module_init will not be maintained in a future major release and may be disabled
Oct 11 00:47:39 localhost systemd[1]: Starting User Login Management...
Oct 11 00:47:39 localhost systemd[1]: Started NTP client/server.
Oct 11 00:47:39 localhost systemd[1]: Finished Restore /run/initramfs on shutdown.
Oct 11 00:47:39 localhost systemd-logind[804]: New seat seat0.
Oct 11 00:47:39 localhost systemd-logind[804]: Watching system buttons on /dev/input/event0 (Power Button)
Oct 11 00:47:39 localhost systemd-logind[804]: Watching system buttons on /dev/input/event1 (AT Translated Set 2 keyboard)
Oct 11 00:47:39 localhost systemd[1]: Started User Login Management.
Oct 11 00:47:39 localhost iptables.init[780]: iptables: Applying firewall rules: [  OK  ]
Oct 11 00:47:39 localhost systemd[1]: Finished IPv4 firewall with iptables.
Oct 11 00:47:39 localhost cloud-init[836]: Cloud-init v. 24.4-7.el9 running 'init-local' at Sat, 11 Oct 2025 00:47:39 +0000. Up 7.31 seconds.
Oct 11 00:47:39 localhost kernel: ISO 9660 Extensions: Microsoft Joliet Level 3
Oct 11 00:47:39 localhost kernel: ISO 9660 Extensions: RRIP_1991A
Oct 11 00:47:39 localhost systemd[1]: run-cloud\x2dinit-tmp-tmpb5myvk_r.mount: Deactivated successfully.
Oct 11 00:47:39 localhost systemd[1]: Starting Hostname Service...
Oct 11 00:47:39 localhost systemd[1]: Started Hostname Service.
Oct 11 00:47:39 np0005480793.novalocal systemd-hostnamed[850]: Hostname set to <np0005480793.novalocal> (static)
Oct 11 00:47:40 np0005480793.novalocal systemd[1]: Finished Cloud-init: Local Stage (pre-network).
Oct 11 00:47:40 np0005480793.novalocal systemd[1]: Reached target Preparation for Network.
Oct 11 00:47:40 np0005480793.novalocal systemd[1]: Starting Network Manager...
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2153] NetworkManager (version 1.54.1-1.el9) is starting... (boot:eb68ea8c-2b5a-452e-9a83-23761d4fd4c0)
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2158] Read config: /etc/NetworkManager/NetworkManager.conf, /run/NetworkManager/conf.d/15-carrier-timeout.conf
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2375] manager[0x5633a4585080]: monitoring kernel firmware directory '/lib/firmware'.
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2443] hostname: hostname: using hostnamed
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2444] hostname: static hostname changed from (none) to "np0005480793.novalocal"
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2450] dns-mgr: init: dns=default,systemd-resolved rc-manager=symlink (auto)
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2612] manager[0x5633a4585080]: rfkill: Wi-Fi hardware radio set enabled
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2613] manager[0x5633a4585080]: rfkill: WWAN hardware radio set enabled
Oct 11 00:47:40 np0005480793.novalocal systemd[1]: Listening on Load/Save RF Kill Switch Status /dev/rfkill Watch.
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2720] Loaded device plugin: NMTeamFactory (/usr/lib64/NetworkManager/1.54.1-1.el9/libnm-device-plugin-team.so)
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2722] manager: rfkill: Wi-Fi enabled by radio killswitch; enabled by state file
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2723] manager: rfkill: WWAN enabled by radio killswitch; enabled by state file
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2724] manager: Networking is enabled by state file
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2727] settings: Loaded settings plugin: keyfile (internal)
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2761] settings: Loaded settings plugin: ifcfg-rh ("/usr/lib64/NetworkManager/1.54.1-1.el9/libnm-settings-plugin-ifcfg-rh.so")
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2803] Warning: the ifcfg-rh plugin is deprecated, please migrate connections to the keyfile format using "nmcli connection migrate"
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2834] dhcp: init: Using DHCP client 'internal'
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2838] manager: (lo): new Loopback device (/org/freedesktop/NetworkManager/Devices/1)
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2859] device (lo): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2876] device (lo): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external')
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2887] device (lo): Activation: starting connection 'lo' (33081159-a34e-4514-87f3-ab50b6bb8250)
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2903] manager: (eth0): new Ethernet device (/org/freedesktop/NetworkManager/Devices/2)
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2908] device (eth0): state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Oct 11 00:47:40 np0005480793.novalocal systemd[1]: Starting Network Manager Script Dispatcher Service...
Oct 11 00:47:40 np0005480793.novalocal systemd[1]: Started Network Manager.
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2956] bus-manager: acquired D-Bus service "org.freedesktop.NetworkManager"
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2962] device (lo): state change: disconnected -> prepare (reason 'none', managed-type: 'external')
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2966] device (lo): state change: prepare -> config (reason 'none', managed-type: 'external')
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2968] device (lo): state change: config -> ip-config (reason 'none', managed-type: 'external')
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2972] device (eth0): carrier: link connected
Oct 11 00:47:40 np0005480793.novalocal systemd[1]: Reached target Network.
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2977] device (lo): state change: ip-config -> ip-check (reason 'none', managed-type: 'external')
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2987] device (eth0): state change: unavailable -> disconnected (reason 'carrier-changed', managed-type: 'full')
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.2995] policy: auto-activating connection 'System eth0' (5fb06bd0-0bb0-7ffb-45f1-d6edd65f3e03)
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.3002] device (eth0): Activation: starting connection 'System eth0' (5fb06bd0-0bb0-7ffb-45f1-d6edd65f3e03)
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.3004] device (eth0): state change: disconnected -> prepare (reason 'none', managed-type: 'full')
Oct 11 00:47:40 np0005480793.novalocal systemd[1]: Starting Network Manager Wait Online...
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.3008] manager: NetworkManager state is now CONNECTING
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.3010] device (eth0): state change: prepare -> config (reason 'none', managed-type: 'full')
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.3021] device (eth0): state change: config -> ip-config (reason 'none', managed-type: 'full')
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.3025] dhcp4 (eth0): activation: beginning transaction (timeout in 45 seconds)
Oct 11 00:47:40 np0005480793.novalocal systemd[1]: Starting GSSAPI Proxy Daemon...
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.3079] dhcp4 (eth0): state changed new lease, address=38.102.83.82
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.3092] policy: set 'System eth0' (eth0) as default for IPv4 routing and DNS
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.3130] device (eth0): state change: ip-config -> ip-check (reason 'none', managed-type: 'full')
Oct 11 00:47:40 np0005480793.novalocal systemd[1]: Started Network Manager Script Dispatcher Service.
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.3232] device (lo): state change: ip-check -> secondaries (reason 'none', managed-type: 'external')
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.3236] device (lo): state change: secondaries -> activated (reason 'none', managed-type: 'external')
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.3248] device (lo): Activation: successful, device activated.
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.3281] device (eth0): state change: ip-check -> secondaries (reason 'none', managed-type: 'full')
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.3284] device (eth0): state change: secondaries -> activated (reason 'none', managed-type: 'full')
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.3290] manager: NetworkManager state is now CONNECTED_SITE
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.3298] device (eth0): Activation: successful, device activated.
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.3306] manager: NetworkManager state is now CONNECTED_GLOBAL
Oct 11 00:47:40 np0005480793.novalocal NetworkManager[854]: <info>  [1760143660.3312] manager: startup complete
Oct 11 00:47:40 np0005480793.novalocal systemd[1]: Started GSSAPI Proxy Daemon.
Oct 11 00:47:40 np0005480793.novalocal systemd[1]: RPC security service for NFS client and server was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab).
Oct 11 00:47:40 np0005480793.novalocal systemd[1]: Reached target NFS client services.
Oct 11 00:47:40 np0005480793.novalocal systemd[1]: Reached target Preparation for Remote File Systems.
Oct 11 00:47:40 np0005480793.novalocal systemd[1]: Reached target Remote File Systems.
Oct 11 00:47:40 np0005480793.novalocal systemd[1]: TPM2 PCR Barrier (User) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).
Oct 11 00:47:40 np0005480793.novalocal systemd[1]: Finished Network Manager Wait Online.
Oct 11 00:47:40 np0005480793.novalocal systemd[1]: Starting Cloud-init: Network Stage...
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: Cloud-init v. 24.4-7.el9 running 'init' at Sat, 11 Oct 2025 00:47:40 +0000. Up 8.42 seconds.
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: +++++++++++++++++++++++++++++++++++++++Net device info+++++++++++++++++++++++++++++++++++++++
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: +--------+------+------------------------------+---------------+--------+-------------------+
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: | Device |  Up  |           Address            |      Mask     | Scope  |     Hw-Address    |
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: +--------+------+------------------------------+---------------+--------+-------------------+
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: |  eth0  | True |         38.102.83.82         | 255.255.255.0 | global | fa:16:3e:56:95:61 |
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: |  eth0  | True | fe80::f816:3eff:fe56:9561/64 |       .       |  link  | fa:16:3e:56:95:61 |
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: |   lo   | True |          127.0.0.1           |   255.0.0.0   |  host  |         .         |
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: |   lo   | True |           ::1/128            |       .       |  host  |         .         |
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: +--------+------+------------------------------+---------------+--------+-------------------+
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: +++++++++++++++++++++++++++++++++Route IPv4 info+++++++++++++++++++++++++++++++++
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: +-------+-----------------+---------------+-----------------+-----------+-------+
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: | Route |   Destination   |    Gateway    |     Genmask     | Interface | Flags |
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: +-------+-----------------+---------------+-----------------+-----------+-------+
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: |   0   |     0.0.0.0     |  38.102.83.1  |     0.0.0.0     |    eth0   |   UG  |
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: |   1   |   38.102.83.0   |    0.0.0.0    |  255.255.255.0  |    eth0   |   U   |
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: |   2   | 169.254.169.254 | 38.102.83.126 | 255.255.255.255 |    eth0   |  UGH  |
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: +-------+-----------------+---------------+-----------------+-----------+-------+
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: +++++++++++++++++++Route IPv6 info+++++++++++++++++++
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: +-------+-------------+---------+-----------+-------+
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: | Route | Destination | Gateway | Interface | Flags |
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: +-------+-------------+---------+-----------+-------+
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: |   1   |  fe80::/64  |    ::   |    eth0   |   U   |
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: |   3   |  multicast  |    ::   |    eth0   |   U   |
Oct 11 00:47:40 np0005480793.novalocal cloud-init[915]: ci-info: +-------+-------------+---------+-----------+-------+
Oct 11 00:47:41 np0005480793.novalocal useradd[982]: new group: name=cloud-user, GID=1001
Oct 11 00:47:41 np0005480793.novalocal useradd[982]: new user: name=cloud-user, UID=1001, GID=1001, home=/home/cloud-user, shell=/bin/bash, from=none
Oct 11 00:47:41 np0005480793.novalocal useradd[982]: add 'cloud-user' to group 'adm'
Oct 11 00:47:41 np0005480793.novalocal useradd[982]: add 'cloud-user' to group 'systemd-journal'
Oct 11 00:47:41 np0005480793.novalocal useradd[982]: add 'cloud-user' to shadow group 'adm'
Oct 11 00:47:41 np0005480793.novalocal useradd[982]: add 'cloud-user' to shadow group 'systemd-journal'
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: Generating public/private rsa key pair.
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: Your identification has been saved in /etc/ssh/ssh_host_rsa_key
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: Your public key has been saved in /etc/ssh/ssh_host_rsa_key.pub
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: The key fingerprint is:
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: SHA256:gTL0bcCRCPEGEc6cuBtazVnE6iBuOsL43CXb26lE8bA root@np0005480793.novalocal
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: The key's randomart image is:
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: +---[RSA 3072]----+
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |  *=.++o         |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: | = =.o++         |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |. = =.* +        |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |...+.= * .       |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |+..o+ E S        |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |.*  ..           |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |B   . o          |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |=o . * . .       |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |.oo o +oo        |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: +----[SHA256]-----+
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: Generating public/private ecdsa key pair.
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: Your identification has been saved in /etc/ssh/ssh_host_ecdsa_key
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: Your public key has been saved in /etc/ssh/ssh_host_ecdsa_key.pub
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: The key fingerprint is:
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: SHA256:mmkyc0iYz53X9KFPkpQhd8RCd2NDubC0eibFbDPICkE root@np0005480793.novalocal
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: The key's randomart image is:
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: +---[ECDSA 256]---+
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |     .E  ...o.*. |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |      .   .o+o.o |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |       ...o*.+ . |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |   o  .  oo+X .  |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |  o .  .S.++.o   |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |   + o =.+o+o.   |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |    B O . =+o    |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |     * .   +     |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |            .    |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: +----[SHA256]-----+
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: Generating public/private ed25519 key pair.
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: Your identification has been saved in /etc/ssh/ssh_host_ed25519_key
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: Your public key has been saved in /etc/ssh/ssh_host_ed25519_key.pub
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: The key fingerprint is:
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: SHA256:iLQiXJfvU4e/Z2u8TbFLRts1Ol05nbPBvv3LpwFCXGE root@np0005480793.novalocal
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: The key's randomart image is:
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: +--[ED25519 256]--+
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |            E.   |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |      .  . o     |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |   ..o    o      |
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |. ...o.. ..   . +|
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |... o ..So...  @+|
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: | . .  . . o. .= &|
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |       o   ..o.X.|
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |        .   .=B.=|
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: |           .+o+B*|
Oct 11 00:47:42 np0005480793.novalocal cloud-init[915]: +----[SHA256]-----+
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Finished Cloud-init: Network Stage.
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Reached target Cloud-config availability.
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Reached target Network is Online.
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Starting Cloud-init: Config Stage...
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Starting Notify NFS peers of a restart...
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Starting System Logging Service...
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Starting OpenSSH server daemon...
Oct 11 00:47:42 np0005480793.novalocal sm-notify[997]: Version 2.5.4 starting
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Starting Permit User Sessions...
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Started Notify NFS peers of a restart.
Oct 11 00:47:42 np0005480793.novalocal sshd[999]: Server listening on 0.0.0.0 port 22.
Oct 11 00:47:42 np0005480793.novalocal sshd[999]: Server listening on :: port 22.
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Started OpenSSH server daemon.
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Finished Permit User Sessions.
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Started Command Scheduler.
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Started Getty on tty1.
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Started Serial Getty on ttyS0.
Oct 11 00:47:42 np0005480793.novalocal crond[1001]: (CRON) STARTUP (1.5.7)
Oct 11 00:47:42 np0005480793.novalocal crond[1001]: (CRON) INFO (Syslog will be used instead of sendmail.)
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Reached target Login Prompts.
Oct 11 00:47:42 np0005480793.novalocal crond[1001]: (CRON) INFO (RANDOM_DELAY will be scaled with factor 43% if used.)
Oct 11 00:47:42 np0005480793.novalocal crond[1001]: (CRON) INFO (running with inotify support)
Oct 11 00:47:42 np0005480793.novalocal rsyslogd[998]: [origin software="rsyslogd" swVersion="8.2506.0-2.el9" x-pid="998" x-info="https://www.rsyslog.com"] start
Oct 11 00:47:42 np0005480793.novalocal rsyslogd[998]: imjournal: No statefile exists, /var/lib/rsyslog/imjournal.state will be created (ignore if this is first run): No such file or directory [v8.2506.0-2.el9 try https://www.rsyslog.com/e/2040 ]
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Started System Logging Service.
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Reached target Multi-User System.
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Starting Record Runlevel Change in UTMP...
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: systemd-update-utmp-runlevel.service: Deactivated successfully.
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Finished Record Runlevel Change in UTMP.
Oct 11 00:47:42 np0005480793.novalocal rsyslogd[998]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 00:47:42 np0005480793.novalocal cloud-init[1011]: Cloud-init v. 24.4-7.el9 running 'modules:config' at Sat, 11 Oct 2025 00:47:42 +0000. Up 10.53 seconds.
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Finished Cloud-init: Config Stage.
Oct 11 00:47:42 np0005480793.novalocal systemd[1]: Starting Cloud-init: Final Stage...
Oct 11 00:47:43 np0005480793.novalocal cloud-init[1015]: Cloud-init v. 24.4-7.el9 running 'modules:final' at Sat, 11 Oct 2025 00:47:43 +0000. Up 10.87 seconds.
Oct 11 00:47:43 np0005480793.novalocal cloud-init[1017]: #############################################################
Oct 11 00:47:43 np0005480793.novalocal cloud-init[1018]: -----BEGIN SSH HOST KEY FINGERPRINTS-----
Oct 11 00:47:43 np0005480793.novalocal cloud-init[1020]: 256 SHA256:mmkyc0iYz53X9KFPkpQhd8RCd2NDubC0eibFbDPICkE root@np0005480793.novalocal (ECDSA)
Oct 11 00:47:43 np0005480793.novalocal cloud-init[1022]: 256 SHA256:iLQiXJfvU4e/Z2u8TbFLRts1Ol05nbPBvv3LpwFCXGE root@np0005480793.novalocal (ED25519)
Oct 11 00:47:43 np0005480793.novalocal cloud-init[1024]: 3072 SHA256:gTL0bcCRCPEGEc6cuBtazVnE6iBuOsL43CXb26lE8bA root@np0005480793.novalocal (RSA)
Oct 11 00:47:43 np0005480793.novalocal cloud-init[1025]: -----END SSH HOST KEY FINGERPRINTS-----
Oct 11 00:47:43 np0005480793.novalocal cloud-init[1026]: #############################################################
Oct 11 00:47:43 np0005480793.novalocal cloud-init[1015]: Cloud-init v. 24.4-7.el9 finished at Sat, 11 Oct 2025 00:47:43 +0000. Datasource DataSourceConfigDrive [net,ver=2][source=/dev/sr0].  Up 11.10 seconds
Oct 11 00:47:43 np0005480793.novalocal systemd[1]: Finished Cloud-init: Final Stage.
Oct 11 00:47:43 np0005480793.novalocal systemd[1]: Reached target Cloud-init target.
Oct 11 00:47:43 np0005480793.novalocal systemd[1]: Startup finished in 1.874s (kernel) + 2.964s (initrd) + 6.336s (userspace) = 11.174s.
Oct 11 00:47:43 np0005480793.novalocal sshd-session[1030]: Connection reset by 38.102.83.114 port 53826 [preauth]
Oct 11 00:47:43 np0005480793.novalocal sshd-session[1032]: Unable to negotiate with 38.102.83.114 port 53832: no matching host key type found. Their offer: ssh-ed25519,ssh-ed25519-cert-v01@openssh.com [preauth]
Oct 11 00:47:43 np0005480793.novalocal sshd-session[1034]: Connection reset by 38.102.83.114 port 53846 [preauth]
Oct 11 00:47:43 np0005480793.novalocal sshd-session[1036]: Unable to negotiate with 38.102.83.114 port 53862: no matching host key type found. Their offer: ecdsa-sha2-nistp384,ecdsa-sha2-nistp384-cert-v01@openssh.com [preauth]
Oct 11 00:47:43 np0005480793.novalocal sshd-session[1038]: Unable to negotiate with 38.102.83.114 port 53866: no matching host key type found. Their offer: ecdsa-sha2-nistp521,ecdsa-sha2-nistp521-cert-v01@openssh.com [preauth]
Oct 11 00:47:43 np0005480793.novalocal sshd-session[1040]: Connection reset by 38.102.83.114 port 53870 [preauth]
Oct 11 00:47:43 np0005480793.novalocal sshd-session[1042]: Connection reset by 38.102.83.114 port 53886 [preauth]
Oct 11 00:47:43 np0005480793.novalocal sshd-session[1044]: Unable to negotiate with 38.102.83.114 port 53898: no matching host key type found. Their offer: ssh-rsa,ssh-rsa-cert-v01@openssh.com [preauth]
Oct 11 00:47:43 np0005480793.novalocal sshd-session[1046]: Unable to negotiate with 38.102.83.114 port 53908: no matching host key type found. Their offer: ssh-dss,ssh-dss-cert-v01@openssh.com [preauth]
Oct 11 00:47:45 np0005480793.novalocal chronyd[784]: Selected source 167.160.187.12 (2.centos.pool.ntp.org)
Oct 11 00:47:45 np0005480793.novalocal chronyd[784]: System clock TAI offset set to 37 seconds
Oct 11 00:47:49 np0005480793.novalocal irqbalance[789]: Cannot change IRQ 25 affinity: Operation not permitted
Oct 11 00:47:49 np0005480793.novalocal irqbalance[789]: IRQ 25 affinity is now unmanaged
Oct 11 00:47:49 np0005480793.novalocal irqbalance[789]: Cannot change IRQ 31 affinity: Operation not permitted
Oct 11 00:47:49 np0005480793.novalocal irqbalance[789]: IRQ 31 affinity is now unmanaged
Oct 11 00:47:49 np0005480793.novalocal irqbalance[789]: Cannot change IRQ 28 affinity: Operation not permitted
Oct 11 00:47:49 np0005480793.novalocal irqbalance[789]: IRQ 28 affinity is now unmanaged
Oct 11 00:47:49 np0005480793.novalocal irqbalance[789]: Cannot change IRQ 32 affinity: Operation not permitted
Oct 11 00:47:49 np0005480793.novalocal irqbalance[789]: IRQ 32 affinity is now unmanaged
Oct 11 00:47:49 np0005480793.novalocal irqbalance[789]: Cannot change IRQ 30 affinity: Operation not permitted
Oct 11 00:47:49 np0005480793.novalocal irqbalance[789]: IRQ 30 affinity is now unmanaged
Oct 11 00:47:49 np0005480793.novalocal irqbalance[789]: Cannot change IRQ 29 affinity: Operation not permitted
Oct 11 00:47:49 np0005480793.novalocal irqbalance[789]: IRQ 29 affinity is now unmanaged
Oct 11 00:47:50 np0005480793.novalocal systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully.
Oct 11 00:48:00 np0005480793.novalocal sshd-session[1049]: Accepted publickey for zuul from 38.102.83.114 port 60460 ssh2: RSA SHA256:zhs3MiW0JhxzckYcMHQES8SMYHj1iGcomnyzmbiwor8
Oct 11 00:48:00 np0005480793.novalocal systemd[1]: Created slice User Slice of UID 1000.
Oct 11 00:48:00 np0005480793.novalocal systemd[1]: Starting User Runtime Directory /run/user/1000...
Oct 11 00:48:00 np0005480793.novalocal systemd-logind[804]: New session 1 of user zuul.
Oct 11 00:48:00 np0005480793.novalocal systemd[1]: Finished User Runtime Directory /run/user/1000.
Oct 11 00:48:00 np0005480793.novalocal systemd[1]: Starting User Manager for UID 1000...
Oct 11 00:48:00 np0005480793.novalocal systemd[1053]: pam_unix(systemd-user:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 00:48:00 np0005480793.novalocal systemd[1053]: Queued start job for default target Main User Target.
Oct 11 00:48:01 np0005480793.novalocal systemd[1053]: Created slice User Application Slice.
Oct 11 00:48:01 np0005480793.novalocal systemd[1053]: Started Mark boot as successful after the user session has run 2 minutes.
Oct 11 00:48:01 np0005480793.novalocal systemd[1053]: Started Daily Cleanup of User's Temporary Directories.
Oct 11 00:48:01 np0005480793.novalocal systemd[1053]: Reached target Paths.
Oct 11 00:48:01 np0005480793.novalocal systemd[1053]: Reached target Timers.
Oct 11 00:48:01 np0005480793.novalocal systemd[1053]: Starting D-Bus User Message Bus Socket...
Oct 11 00:48:01 np0005480793.novalocal systemd[1053]: Starting Create User's Volatile Files and Directories...
Oct 11 00:48:01 np0005480793.novalocal systemd[1053]: Listening on D-Bus User Message Bus Socket.
Oct 11 00:48:01 np0005480793.novalocal systemd[1053]: Reached target Sockets.
Oct 11 00:48:01 np0005480793.novalocal systemd[1053]: Finished Create User's Volatile Files and Directories.
Oct 11 00:48:01 np0005480793.novalocal systemd[1053]: Reached target Basic System.
Oct 11 00:48:01 np0005480793.novalocal systemd[1053]: Reached target Main User Target.
Oct 11 00:48:01 np0005480793.novalocal systemd[1053]: Startup finished in 158ms.
Oct 11 00:48:01 np0005480793.novalocal systemd[1]: Started User Manager for UID 1000.
Oct 11 00:48:01 np0005480793.novalocal systemd[1]: Started Session 1 of User zuul.
Oct 11 00:48:01 np0005480793.novalocal sshd-session[1049]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 00:48:01 np0005480793.novalocal python3[1135]: ansible-setup Invoked with gather_subset=['!all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 00:48:04 np0005480793.novalocal python3[1163]: ansible-ansible.legacy.setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 00:48:10 np0005480793.novalocal systemd[1]: systemd-hostnamed.service: Deactivated successfully.
Oct 11 00:48:10 np0005480793.novalocal python3[1222]: ansible-setup Invoked with gather_subset=['network'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 00:48:11 np0005480793.novalocal python3[1264]: ansible-zuul_console Invoked with path=/tmp/console-{log_uuid}.log port=19885 state=present
Oct 11 00:48:13 np0005480793.novalocal python3[1290]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCzNgnF4mZ8mGvkuWQlBiJ4fRZgU5jhXlyBQ9G7S0I2euBlpTtfH0ieG3E+4wNLQyuD2g9pazrs+IRhIBnrwoDfISry5i5XmY0THnn1WlOQYciZKBQoeWGmG5LNPtYS7g6TgS3fKU1rwSTIQMjdEf8TPoY4FURHw9NW5dDR9xJhJXeGfRJJvClbkqBZGf3A8sq4z9T/gwUQx+JpiJKiHYsSwpAy/MnSlXBE5GJhcM2mkWbsSXQUggh4Xmq2MX5C3AMgtuhi/yTdJU1YrQFeGEA4lTAk5g48ryikcHEFluQjIc6OZJlexfWohgDdOW0YuuPdDIzxbYhi9cXE0nnhMzBaymB5HKZCzhglBkb75ez2B+ubqiiTqm6QQ0duzRZJWcBB0kh6wF280blZt5a9LI1ysv3yYyfXrtiGJbThINBP3bUmfBmebmhikj+XO5zhZliTzV3FBLd9acUs/WMAsHcdVTd1IibsWSR+VFlLr8KVtJzTotECx8bTTheHDq0h+pc= zuul-build-sshkey manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:13 np0005480793.novalocal python3[1314]: ansible-file Invoked with state=directory path=/home/zuul/.ssh mode=448 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:48:14 np0005480793.novalocal python3[1413]: ansible-ansible.legacy.stat Invoked with path=/home/zuul/.ssh/id_rsa follow=False get_checksum=False checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 00:48:14 np0005480793.novalocal python3[1484]: ansible-ansible.legacy.copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760143693.8695197-207-211410211495296/source dest=/home/zuul/.ssh/id_rsa mode=384 force=False _original_basename=ea86fa080eb14bc184e5824c428ad2b6_id_rsa follow=False checksum=64f011c0441234cdf017c56cd34916acabdc0d22 backup=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:48:15 np0005480793.novalocal python3[1607]: ansible-ansible.legacy.stat Invoked with path=/home/zuul/.ssh/id_rsa.pub follow=False get_checksum=False checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 00:48:15 np0005480793.novalocal python3[1678]: ansible-ansible.legacy.copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760143694.935962-240-137234460411839/source dest=/home/zuul/.ssh/id_rsa.pub mode=420 force=False _original_basename=ea86fa080eb14bc184e5824c428ad2b6_id_rsa.pub follow=False checksum=0cdfe6926dd4a68a281eff1063100f88ff313646 backup=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:48:17 np0005480793.novalocal python3[1726]: ansible-ping Invoked with data=pong
Oct 11 00:48:18 np0005480793.novalocal python3[1750]: ansible-setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 00:48:19 np0005480793.novalocal python3[1808]: ansible-zuul_debug_info Invoked with ipv4_route_required=False ipv6_route_required=False image_manifest_files=['/etc/dib-builddate.txt', '/etc/image-hostname.txt'] image_manifest=None traceroute_host=None
Oct 11 00:48:20 np0005480793.novalocal python3[1840]: ansible-file Invoked with path=/home/zuul/zuul-output/logs state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:48:21 np0005480793.novalocal python3[1864]: ansible-file Invoked with path=/home/zuul/zuul-output/artifacts state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:48:21 np0005480793.novalocal python3[1888]: ansible-file Invoked with path=/home/zuul/zuul-output/docs state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:48:21 np0005480793.novalocal python3[1912]: ansible-file Invoked with path=/home/zuul/zuul-output/logs state=directory mode=493 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:48:21 np0005480793.novalocal python3[1936]: ansible-file Invoked with path=/home/zuul/zuul-output/artifacts state=directory mode=493 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:48:22 np0005480793.novalocal python3[1960]: ansible-file Invoked with path=/home/zuul/zuul-output/docs state=directory mode=493 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:48:23 np0005480793.novalocal sudo[1984]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jrjjbwlakiqnipmtakhpeguzsanphqdx ; /usr/bin/python3'
Oct 11 00:48:23 np0005480793.novalocal sudo[1984]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:48:23 np0005480793.novalocal python3[1986]: ansible-file Invoked with path=/etc/ci state=directory owner=root group=root mode=493 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:48:23 np0005480793.novalocal sudo[1984]: pam_unix(sudo:session): session closed for user root
Oct 11 00:48:24 np0005480793.novalocal sudo[2062]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pqwwaqxedbhydppqlxscrmdrwfdryury ; /usr/bin/python3'
Oct 11 00:48:24 np0005480793.novalocal sudo[2062]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:48:24 np0005480793.novalocal python3[2064]: ansible-ansible.legacy.stat Invoked with path=/etc/ci/mirror_info.sh follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 00:48:24 np0005480793.novalocal sudo[2062]: pam_unix(sudo:session): session closed for user root
Oct 11 00:48:24 np0005480793.novalocal sudo[2135]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gpkkainkcyhsgejuoxookpvaeyzxdzta ; /usr/bin/python3'
Oct 11 00:48:24 np0005480793.novalocal sudo[2135]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:48:25 np0005480793.novalocal python3[2137]: ansible-ansible.legacy.copy Invoked with dest=/etc/ci/mirror_info.sh owner=root group=root mode=420 src=/home/zuul/.ansible/tmp/ansible-tmp-1760143704.0416665-21-20109796640917/source follow=False _original_basename=mirror_info.sh.j2 checksum=92d92a03afdddee82732741071f662c729080c35 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:48:25 np0005480793.novalocal sudo[2135]: pam_unix(sudo:session): session closed for user root
Oct 11 00:48:25 np0005480793.novalocal python3[2185]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4Z/c9osaGGtU6X8fgELwfj/yayRurfcKA0HMFfdpPxev2dbwljysMuzoVp4OZmW1gvGtyYPSNRvnzgsaabPNKNo2ym5NToCP6UM+KSe93aln4BcM/24mXChYAbXJQ5Bqq/pIzsGs/pKetQN+vwvMxLOwTvpcsCJBXaa981RKML6xj9l/UZ7IIq1HSEKMvPLxZMWdu0Ut8DkCd5F4nOw9Wgml2uYpDCj5LLCrQQ9ChdOMz8hz6SighhNlRpPkvPaet3OXxr/ytFMu7j7vv06CaEnuMMiY2aTWN1Imin9eHAylIqFHta/3gFfQSWt9jXM7owkBLKL7ATzhaAn+fjNupw== arxcruz@redhat.com manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:25 np0005480793.novalocal python3[2209]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDS4Fn6k4deCnIlOtLWqZJyksbepjQt04j8Ed8CGx9EKkj0fKiAxiI4TadXQYPuNHMixZy4Nevjb6aDhL5Z906TfvNHKUrjrG7G26a0k8vdc61NEQ7FmcGMWRLwwc6ReDO7lFpzYKBMk4YqfWgBuGU/K6WLKiVW2cVvwIuGIaYrE1OiiX0iVUUk7KApXlDJMXn7qjSYynfO4mF629NIp8FJal38+Kv+HA+0QkE5Y2xXnzD4Lar5+keymiCHRntPppXHeLIRzbt0gxC7v3L72hpQ3BTBEzwHpeS8KY+SX1y5lRMN45thCHfJqGmARJREDjBvWG8JXOPmVIKQtZmVcD5b mandreou@redhat.com manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:26 np0005480793.novalocal python3[2233]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9MiLfy30deHA7xPOAlew5qUq3UP2gmRMYJi8PtkjFB20/DKeWwWNnkZPqP9AayruRoo51SIiVg870gbZE2jYl+Ncx/FYDe56JeC3ySZsXoAVkC9bP7gkOGqOmJjirvAgPMI7bogVz8i+66Q4Ar7OKTp3762G4IuWPPEg4ce4Y7lx9qWocZapHYq4cYKMxrOZ7SEbFSATBbe2bPZAPKTw8do/Eny+Hq/LkHFhIeyra6cqTFQYShr+zPln0Cr+ro/pDX3bB+1ubFgTpjpkkkQsLhDfR6cCdCWM2lgnS3BTtYj5Ct9/JRPR5YOphqZz+uB+OEu2IL68hmU9vNTth1KeX rlandy@redhat.com manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:26 np0005480793.novalocal python3[2257]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFCbgz8gdERiJlk2IKOtkjQxEXejrio6ZYMJAVJYpOIp raukadah@gmail.com manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:26 np0005480793.novalocal python3[2281]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBqb3Q/9uDf4LmihQ7xeJ9gA/STIQUFPSfyyV0m8AoQi bshewale@redhat.com manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:27 np0005480793.novalocal python3[2305]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0I8QqQx0Az2ysJt2JuffucLijhBqnsXKEIx5GyHwxVULROa8VtNFXUDH6ZKZavhiMcmfHB2+TBTda+lDP4FldYj06dGmzCY+IYGa+uDRdxHNGYjvCfLFcmLlzRK6fNbTcui+KlUFUdKe0fb9CRoGKyhlJD5GRkM1Dv+Yb6Bj+RNnmm1fVGYxzmrD2utvffYEb0SZGWxq2R9gefx1q/3wCGjeqvufEV+AskPhVGc5T7t9eyZ4qmslkLh1/nMuaIBFcr9AUACRajsvk6mXrAN1g3HlBf2gQlhi1UEyfbqIQvzzFtsbLDlSum/KmKjy818GzvWjERfQ0VkGzCd9bSLVL dviroel@redhat.com manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:27 np0005480793.novalocal python3[2329]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDLOQd4ZLtkZXQGY6UwAr/06ppWQK4fDO3HaqxPk98csyOCBXsliSKK39Bso828+5srIXiW7aI6aC9P5mwi4mUZlGPfJlQbfrcGvY+b/SocuvaGK+1RrHLoJCT52LBhwgrzlXio2jeksZeein8iaTrhsPrOAs7KggIL/rB9hEiB3NaOPWhhoCP4vlW6MEMExGcqB/1FVxXFBPnLkEyW0Lk7ycVflZl2ocRxbfjZi0+tI1Wlinp8PvSQSc/WVrAcDgKjc/mB4ODPOyYy3G8FHgfMsrXSDEyjBKgLKMsdCrAUcqJQWjkqXleXSYOV4q3pzL+9umK+q/e3P/bIoSFQzmJKTU1eDfuvPXmow9F5H54fii/Da7ezlMJ+wPGHJrRAkmzvMbALy7xwswLhZMkOGNtRcPqaKYRmIBKpw3o6bCTtcNUHOtOQnzwY8JzrM2eBWJBXAANYw+9/ho80JIiwhg29CFNpVBuHbql2YxJQNrnl90guN65rYNpDxdIluweyUf8= anbanerj@kaermorhen manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:27 np0005480793.novalocal python3[2353]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC3VwV8Im9kRm49lt3tM36hj4Zv27FxGo4C1Q/0jqhzFmHY7RHbmeRr8ObhwWoHjXSozKWg8FL5ER0z3hTwL0W6lez3sL7hUaCmSuZmG5Hnl3x4vTSxDI9JZ/Y65rtYiiWQo2fC5xJhU/4+0e5e/pseCm8cKRSu+SaxhO+sd6FDojA2x1BzOzKiQRDy/1zWGp/cZkxcEuB1wHI5LMzN03c67vmbu+fhZRAUO4dQkvcnj2LrhQtpa+ytvnSjr8icMDosf1OsbSffwZFyHB/hfWGAfe0eIeSA2XPraxiPknXxiPKx2MJsaUTYbsZcm3EjFdHBBMumw5rBI74zLrMRvCO9GwBEmGT4rFng1nP+yw5DB8sn2zqpOsPg1LYRwCPOUveC13P6pgsZZPh812e8v5EKnETct+5XI3dVpdw6CnNiLwAyVAF15DJvBGT/u1k0Myg/bQn+Gv9k2MSj6LvQmf6WbZu2Wgjm30z3FyCneBqTL7mLF19YXzeC0ufHz5pnO1E= dasm@fedora manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:27 np0005480793.novalocal python3[2377]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHUnwjB20UKmsSed9X73eGNV5AOEFccQ3NYrRW776pEk cjeanner manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:28 np0005480793.novalocal python3[2401]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDercCMGn8rW1C4P67tHgtflPdTeXlpyUJYH+6XDd2lR jgilaber@redhat.com manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:28 np0005480793.novalocal python3[2425]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAMI6kkg9Wg0sG7jIJmyZemEBwUn1yzNpQQd3gnulOmZ adrianfuscoarnejo@gmail.com manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:28 np0005480793.novalocal python3[2449]: ansible-authorized_key Invoked with user=zuul state=present key=ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPijwpQu/3jhhhBZInXNOLEH57DrknPc3PLbsRvYyJIFzwYjX+WD4a7+nGnMYS42MuZk6TJcVqgnqofVx4isoD4= ramishra@redhat.com manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:29 np0005480793.novalocal python3[2473]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGpU/BepK3qX0NRf5Np+dOBDqzQEefhNrw2DCZaH3uWW rebtoor@monolith manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:29 np0005480793.novalocal python3[2497]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDK0iKdi8jQTpQrDdLVH/AAgLVYyTXF7AQ1gjc/5uT3t ykarel@yatinkarel manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:29 np0005480793.novalocal python3[2521]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIF/V/cLotA6LZeO32VL45Hd78skuA2lJA425Sm2LlQeZ fmount@horcrux manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:30 np0005480793.novalocal python3[2545]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDa7QCjuDMVmRPo1rREbGwzYeBCYVN+Ou/3WKXZEC6Sr manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:30 np0005480793.novalocal python3[2569]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCfNtF7NvKl915TGsGGoseUb06Hj8L/S4toWf0hExeY+F00woL6NvBlJD0nDct+P5a22I4EhvoQCRQ8reaPCm1lybR3uiRIJsj+8zkVvLwby9LXzfZorlNG9ofjd00FEmB09uW/YvTl6Q9XwwwX6tInzIOv3TMqTHHGOL74ibbj8J/FJR0cFEyj0z4WQRvtkh32xAHl83gbuINryMt0sqRI+clj2381NKL55DRLQrVw0gsfqqxiHAnXg21qWmc4J+b9e9kiuAFQjcjwTVkwJCcg3xbPwC/qokYRby/Y5S40UUd7/jEARGXT7RZgpzTuDd1oZiCVrnrqJNPaMNdVv5MLeFdf1B7iIe5aa/fGouX7AO4SdKhZUdnJmCFAGvjC6S3JMZ2wAcUl+OHnssfmdj7XL50cLo27vjuzMtLAgSqi6N99m92WCF2s8J9aVzszX7Xz9OKZCeGsiVJp3/NdABKzSEAyM9xBD/5Vho894Sav+otpySHe3p6RUTgbB5Zu8VyZRZ/UtB3ueXxyo764yrc6qWIDqrehm84Xm9g+/jpIBzGPl07NUNJpdt/6Sgf9RIKXw/7XypO5yZfUcuFNGTxLfqjTNrtgLZNcjfav6sSdVXVcMPL//XNuRdKmVFaO76eV/oGMQGr1fGcCD+N+CpI7+Q+fCNB6VFWG4nZFuI/Iuw== averdagu@redhat.com manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:30 np0005480793.novalocal python3[2593]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDq8l27xI+QlQVdS4djp9ogSoyrNE2+Ox6vKPdhSNL1J3PE5w+WCSvMz9A5gnNuH810zwbekEApbxTze/gLQJwBHA52CChfURpXrFaxY7ePXRElwKAL3mJfzBWY/c5jnNL9TCVmFJTGZkFZP3Nh+BMgZvL6xBkt3WKm6Uq18qzd9XeKcZusrA+O+uLv1fVeQnadY9RIqOCyeFYCzLWrUfTyE8x/XG0hAWIM7qpnF2cALQS2h9n4hW5ybiUN790H08wf9hFwEf5nxY9Z9dVkPFQiTSGKNBzmnCXU9skxS/xhpFjJ5duGSZdtAHe9O+nGZm9c67hxgtf8e5PDuqAdXEv2cf6e3VBAt+Bz8EKI3yosTj0oZHfwr42Yzb1l/SKy14Rggsrc9KAQlrGXan6+u2jcQqqx7l+SWmnpFiWTV9u5cWj2IgOhApOitmRBPYqk9rE2usfO0hLn/Pj/R/Nau4803e1/EikdLE7Ps95s9mX5jRDjAoUa2JwFF5RsVFyL910= ashigupt@ashigupt.remote.csb manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:30 np0005480793.novalocal python3[2617]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOKLl0NYKwoZ/JY5KeZU8VwRAggeOxqQJeoqp3dsAaY9 manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:31 np0005480793.novalocal python3[2641]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIASASQOH2BcOyLKuuDOdWZlPi2orcjcA8q4400T73DLH evallesp@fedora manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:31 np0005480793.novalocal python3[2665]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILeBWlamUph+jRKV2qrx1PGU7vWuGIt5+z9k96I8WehW amsinha@amsinha-mac manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:31 np0005480793.novalocal python3[2689]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIANvVgvJBlK3gb1yz5uef/JqIGq4HLEmY2dYA8e37swb morenod@redhat-laptop manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:32 np0005480793.novalocal python3[2713]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDZdI7t1cxYx65heVI24HTV4F7oQLW1zyfxHreL2TIJKxjyrUUKIFEUmTutcBlJRLNT2Eoix6x1sOw9YrchloCLcn//SGfTElr9mSc5jbjb7QXEU+zJMhtxyEJ1Po3CUGnj7ckiIXw7wcawZtrEOAQ9pH3ExYCJcEMiyNjRQZCxT3tPK+S4B95EWh5Fsrz9CkwpjNRPPH7LigCeQTM3Wc7r97utAslBUUvYceDSLA7rMgkitJE38b7rZBeYzsGQ8YYUBjTCtehqQXxCRjizbHWaaZkBU+N3zkKB6n/iCNGIO690NK7A/qb6msTijiz1PeuM8ThOsi9qXnbX5v0PoTpcFSojV7NHAQ71f0XXuS43FhZctT+Dcx44dT8Fb5vJu2cJGrk+qF8ZgJYNpRS7gPg0EG2EqjK7JMf9ULdjSu0r+KlqIAyLvtzT4eOnQipoKlb/WG5D/0ohKv7OMQ352ggfkBFIQsRXyyTCT98Ft9juqPuahi3CAQmP4H9dyE+7+Kz437PEtsxLmfm6naNmWi7Ee1DqWPwS8rEajsm4sNM4wW9gdBboJQtc0uZw0DfLj1I9r3Mc8Ol0jYtz0yNQDSzVLrGCaJlC311trU70tZ+ZkAVV6Mn8lOhSbj1cK0lvSr6ZK4dgqGl3I1eTZJJhbLNdg7UOVaiRx9543+C/p/As7w== brjackma@redhat.com manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:32 np0005480793.novalocal python3[2737]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKwedoZ0TWPJX/z/4TAbO/kKcDZOQVgRH0hAqrL5UCI1 vcastell@redhat.com manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:32 np0005480793.novalocal python3[2761]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEmv8sE8GCk6ZTPIqF0FQrttBdL3mq7rCm/IJy0xDFh7 michburk@redhat.com manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:33 np0005480793.novalocal python3[2785]: ansible-authorized_key Invoked with user=zuul state=present key=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICy6GpGEtwevXEEn4mmLR5lmSLe23dGgAvzkB9DMNbkf rsafrono@rsafrono manage_dir=True exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:48:35 np0005480793.novalocal sudo[2809]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ylhkdiyvgvvxvezvqczubfcmnicoyyjn ; /usr/bin/python3'
Oct 11 00:48:35 np0005480793.novalocal sudo[2809]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:48:35 np0005480793.novalocal python3[2811]: ansible-community.general.timezone Invoked with name=UTC hwclock=None
Oct 11 00:48:35 np0005480793.novalocal systemd[1]: Starting Time & Date Service...
Oct 11 00:48:35 np0005480793.novalocal systemd[1]: Started Time & Date Service.
Oct 11 00:48:35 np0005480793.novalocal systemd-timedated[2813]: Changed time zone to 'UTC' (UTC).
Oct 11 00:48:35 np0005480793.novalocal sudo[2809]: pam_unix(sudo:session): session closed for user root
Oct 11 00:48:36 np0005480793.novalocal sudo[2840]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hhkgbckkocirrmrplxmqhogeyhhmpfqa ; /usr/bin/python3'
Oct 11 00:48:36 np0005480793.novalocal sudo[2840]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:48:36 np0005480793.novalocal python3[2842]: ansible-file Invoked with path=/etc/nodepool state=directory mode=511 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:48:36 np0005480793.novalocal sudo[2840]: pam_unix(sudo:session): session closed for user root
Oct 11 00:48:36 np0005480793.novalocal python3[2918]: ansible-ansible.legacy.stat Invoked with path=/etc/nodepool/sub_nodes follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 00:48:37 np0005480793.novalocal python3[2989]: ansible-ansible.legacy.copy Invoked with dest=/etc/nodepool/sub_nodes src=/home/zuul/.ansible/tmp/ansible-tmp-1760143716.5423892-153-161061578422279/source _original_basename=tmp4n3afp7r follow=False checksum=da39a3ee5e6b4b0d3255bfef95601890afd80709 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:48:37 np0005480793.novalocal python3[3089]: ansible-ansible.legacy.stat Invoked with path=/etc/nodepool/sub_nodes_private follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 00:48:38 np0005480793.novalocal python3[3160]: ansible-ansible.legacy.copy Invoked with dest=/etc/nodepool/sub_nodes_private src=/home/zuul/.ansible/tmp/ansible-tmp-1760143717.4926138-183-247883882498470/source _original_basename=tmpfk7hxsu6 follow=False checksum=da39a3ee5e6b4b0d3255bfef95601890afd80709 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:48:38 np0005480793.novalocal sudo[3260]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lrhtgpsfipayfvvbchtthmqylwqwxhpn ; /usr/bin/python3'
Oct 11 00:48:38 np0005480793.novalocal sudo[3260]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:48:39 np0005480793.novalocal python3[3262]: ansible-ansible.legacy.stat Invoked with path=/etc/nodepool/node_private follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 00:48:39 np0005480793.novalocal sudo[3260]: pam_unix(sudo:session): session closed for user root
Oct 11 00:48:39 np0005480793.novalocal sudo[3333]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-diaziytvtejktswjamuiyetgtvrgsnwb ; /usr/bin/python3'
Oct 11 00:48:39 np0005480793.novalocal sudo[3333]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:48:39 np0005480793.novalocal python3[3335]: ansible-ansible.legacy.copy Invoked with dest=/etc/nodepool/node_private src=/home/zuul/.ansible/tmp/ansible-tmp-1760143718.6923277-231-200088174655232/source _original_basename=tmpv1oyei7d follow=False checksum=d300ef2a9a28a235d7b76ee497641bd17d004fed backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:48:39 np0005480793.novalocal sudo[3333]: pam_unix(sudo:session): session closed for user root
Oct 11 00:48:40 np0005480793.novalocal python3[3383]: ansible-ansible.legacy.command Invoked with _raw_params=cp .ssh/id_rsa /etc/nodepool/id_rsa zuul_log_id=in-loop-ignore zuul_ansible_split_streams=False _uses_shell=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 00:48:40 np0005480793.novalocal python3[3409]: ansible-ansible.legacy.command Invoked with _raw_params=cp .ssh/id_rsa.pub /etc/nodepool/id_rsa.pub zuul_log_id=in-loop-ignore zuul_ansible_split_streams=False _uses_shell=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 00:48:40 np0005480793.novalocal sudo[3487]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vxhhrbyfyfzjqtbuibfuzitkccehpujf ; /usr/bin/python3'
Oct 11 00:48:40 np0005480793.novalocal sudo[3487]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:48:40 np0005480793.novalocal python3[3489]: ansible-ansible.legacy.stat Invoked with path=/etc/sudoers.d/zuul-sudo-grep follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 00:48:40 np0005480793.novalocal sudo[3487]: pam_unix(sudo:session): session closed for user root
Oct 11 00:48:41 np0005480793.novalocal sudo[3560]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rtjpceikfywtmxpbwozcvetwbfneogmn ; /usr/bin/python3'
Oct 11 00:48:41 np0005480793.novalocal sudo[3560]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:48:41 np0005480793.novalocal python3[3562]: ansible-ansible.legacy.copy Invoked with dest=/etc/sudoers.d/zuul-sudo-grep mode=288 src=/home/zuul/.ansible/tmp/ansible-tmp-1760143720.5781064-273-64964598755352/source _original_basename=tmp6u7nabp2 follow=False checksum=bdca1a77493d00fb51567671791f4aa30f66c2f0 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:48:41 np0005480793.novalocal sudo[3560]: pam_unix(sudo:session): session closed for user root
Oct 11 00:48:41 np0005480793.novalocal sudo[3611]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cliopfaxlucaqumkwjufunkvpjhemkuw ; /usr/bin/python3'
Oct 11 00:48:41 np0005480793.novalocal sudo[3611]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:48:42 np0005480793.novalocal python3[3613]: ansible-ansible.legacy.command Invoked with _raw_params=/usr/sbin/visudo -c zuul_log_id=fa163e3b-3c83-c343-1a78-00000000001d-1-compute0 zuul_ansible_split_streams=False _uses_shell=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 00:48:42 np0005480793.novalocal sudo[3611]: pam_unix(sudo:session): session closed for user root
Oct 11 00:48:42 np0005480793.novalocal python3[3641]: ansible-ansible.legacy.command Invoked with executable=/bin/bash _raw_params=env
                                                       _uses_shell=True zuul_log_id=fa163e3b-3c83-c343-1a78-00000000001e-1-compute0 zuul_ansible_split_streams=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None creates=None removes=None stdin=None
Oct 11 00:48:43 np0005480793.novalocal python3[3669]: ansible-file Invoked with path=/home/zuul/workspace state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:49:00 np0005480793.novalocal sudo[3693]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-taspmarbjmyyzjsohfltjagsejnrpxas ; /usr/bin/python3'
Oct 11 00:49:00 np0005480793.novalocal sudo[3693]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:49:00 np0005480793.novalocal python3[3695]: ansible-ansible.builtin.file Invoked with path=/etc/ci/env state=directory mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:49:00 np0005480793.novalocal sudo[3693]: pam_unix(sudo:session): session closed for user root
Oct 11 00:49:05 np0005480793.novalocal systemd[1]: systemd-timedated.service: Deactivated successfully.
Oct 11 00:49:31 np0005480793.novalocal kernel: pci 0000:00:07.0: [1af4:1000] type 00 class 0x020000 conventional PCI endpoint
Oct 11 00:49:31 np0005480793.novalocal kernel: pci 0000:00:07.0: BAR 0 [io  0x0000-0x003f]
Oct 11 00:49:31 np0005480793.novalocal kernel: pci 0000:00:07.0: BAR 1 [mem 0x00000000-0x00000fff]
Oct 11 00:49:31 np0005480793.novalocal kernel: pci 0000:00:07.0: BAR 4 [mem 0x00000000-0x00003fff 64bit pref]
Oct 11 00:49:31 np0005480793.novalocal kernel: pci 0000:00:07.0: ROM [mem 0x00000000-0x0007ffff pref]
Oct 11 00:49:31 np0005480793.novalocal kernel: pci 0000:00:07.0: ROM [mem 0xc0000000-0xc007ffff pref]: assigned
Oct 11 00:49:31 np0005480793.novalocal kernel: pci 0000:00:07.0: BAR 4 [mem 0x240000000-0x240003fff 64bit pref]: assigned
Oct 11 00:49:31 np0005480793.novalocal kernel: pci 0000:00:07.0: BAR 1 [mem 0xc0080000-0xc0080fff]: assigned
Oct 11 00:49:31 np0005480793.novalocal kernel: pci 0000:00:07.0: BAR 0 [io  0x1000-0x103f]: assigned
Oct 11 00:49:31 np0005480793.novalocal kernel: virtio-pci 0000:00:07.0: enabling device (0000 -> 0003)
Oct 11 00:49:32 np0005480793.novalocal NetworkManager[854]: <info>  [1760143772.0000] manager: (eth1): new Ethernet device (/org/freedesktop/NetworkManager/Devices/3)
Oct 11 00:49:32 np0005480793.novalocal systemd-udevd[3698]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 00:49:32 np0005480793.novalocal NetworkManager[854]: <info>  [1760143772.0207] device (eth1): state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Oct 11 00:49:32 np0005480793.novalocal NetworkManager[854]: <info>  [1760143772.0251] settings: (eth1): created default wired connection 'Wired connection 1'
Oct 11 00:49:32 np0005480793.novalocal NetworkManager[854]: <info>  [1760143772.0258] device (eth1): carrier: link connected
Oct 11 00:49:32 np0005480793.novalocal NetworkManager[854]: <info>  [1760143772.0261] device (eth1): state change: unavailable -> disconnected (reason 'carrier-changed', managed-type: 'full')
Oct 11 00:49:32 np0005480793.novalocal NetworkManager[854]: <info>  [1760143772.0272] policy: auto-activating connection 'Wired connection 1' (0495e8e2-a84b-3e37-a3c0-1eb40b77fe9f)
Oct 11 00:49:32 np0005480793.novalocal NetworkManager[854]: <info>  [1760143772.0279] device (eth1): Activation: starting connection 'Wired connection 1' (0495e8e2-a84b-3e37-a3c0-1eb40b77fe9f)
Oct 11 00:49:32 np0005480793.novalocal NetworkManager[854]: <info>  [1760143772.0280] device (eth1): state change: disconnected -> prepare (reason 'none', managed-type: 'full')
Oct 11 00:49:32 np0005480793.novalocal NetworkManager[854]: <info>  [1760143772.0287] device (eth1): state change: prepare -> config (reason 'none', managed-type: 'full')
Oct 11 00:49:32 np0005480793.novalocal NetworkManager[854]: <info>  [1760143772.0295] device (eth1): state change: config -> ip-config (reason 'none', managed-type: 'full')
Oct 11 00:49:32 np0005480793.novalocal NetworkManager[854]: <info>  [1760143772.0304] dhcp4 (eth1): activation: beginning transaction (timeout in 45 seconds)
Oct 11 00:49:32 np0005480793.novalocal python3[3725]: ansible-ansible.legacy.command Invoked with _raw_params=ip -j link zuul_log_id=fa163e3b-3c83-d5e2-2e00-0000000000fc-0-controller zuul_ansible_split_streams=False _uses_shell=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 00:49:42 np0005480793.novalocal sudo[3803]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lyexdjywzepwdevjoziyjckszebpjqsd ; OS_CLOUD=vexxhost /usr/bin/python3'
Oct 11 00:49:42 np0005480793.novalocal sudo[3803]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:49:43 np0005480793.novalocal python3[3805]: ansible-ansible.legacy.stat Invoked with path=/etc/NetworkManager/system-connections/ci-private-network.nmconnection follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 00:49:43 np0005480793.novalocal sudo[3803]: pam_unix(sudo:session): session closed for user root
Oct 11 00:49:43 np0005480793.novalocal sudo[3876]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dnwrkexjynhxeoqqxkarjdhwvhxwvgtd ; OS_CLOUD=vexxhost /usr/bin/python3'
Oct 11 00:49:43 np0005480793.novalocal sudo[3876]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:49:43 np0005480793.novalocal python3[3878]: ansible-ansible.legacy.copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760143782.7497683-102-144864088526029/source dest=/etc/NetworkManager/system-connections/ci-private-network.nmconnection mode=0600 owner=root group=root follow=False _original_basename=bootstrap-ci-network-nm-connection.nmconnection.j2 checksum=c2f2a53e0f8866db25ad7a7c37caaf4c37ba5011 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:49:43 np0005480793.novalocal sudo[3876]: pam_unix(sudo:session): session closed for user root
Oct 11 00:49:44 np0005480793.novalocal sudo[3926]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wksrpsbinhthcckzycjqeauesyyirbgd ; OS_CLOUD=vexxhost /usr/bin/python3'
Oct 11 00:49:44 np0005480793.novalocal sudo[3926]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:49:44 np0005480793.novalocal python3[3928]: ansible-ansible.builtin.systemd Invoked with name=NetworkManager state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 00:49:44 np0005480793.novalocal systemd[1]: NetworkManager-wait-online.service: Deactivated successfully.
Oct 11 00:49:44 np0005480793.novalocal systemd[1]: Stopped Network Manager Wait Online.
Oct 11 00:49:44 np0005480793.novalocal systemd[1]: Stopping Network Manager Wait Online...
Oct 11 00:49:44 np0005480793.novalocal systemd[1]: Stopping Network Manager...
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[854]: <info>  [1760143784.4640] caught SIGTERM, shutting down normally.
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[854]: <info>  [1760143784.4651] dhcp4 (eth0): canceled DHCP transaction
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[854]: <info>  [1760143784.4652] dhcp4 (eth0): activation: beginning transaction (timeout in 45 seconds)
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[854]: <info>  [1760143784.4652] dhcp4 (eth0): state changed no lease
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[854]: <info>  [1760143784.4656] manager: NetworkManager state is now CONNECTING
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[854]: <info>  [1760143784.4773] dhcp4 (eth1): canceled DHCP transaction
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[854]: <info>  [1760143784.4773] dhcp4 (eth1): state changed no lease
Oct 11 00:49:44 np0005480793.novalocal systemd[1]: Starting Network Manager Script Dispatcher Service...
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[854]: <info>  [1760143784.4838] exiting (success)
Oct 11 00:49:44 np0005480793.novalocal systemd[1]: Started Network Manager Script Dispatcher Service.
Oct 11 00:49:44 np0005480793.novalocal systemd[1]: NetworkManager.service: Deactivated successfully.
Oct 11 00:49:44 np0005480793.novalocal systemd[1]: Stopped Network Manager.
Oct 11 00:49:44 np0005480793.novalocal systemd[1]: Starting Network Manager...
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.5640] NetworkManager (version 1.54.1-1.el9) is starting... (after a restart, boot:eb68ea8c-2b5a-452e-9a83-23761d4fd4c0)
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.5646] Read config: /etc/NetworkManager/NetworkManager.conf, /run/NetworkManager/conf.d/15-carrier-timeout.conf
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.5736] manager[0x55c4eba81070]: monitoring kernel firmware directory '/lib/firmware'.
Oct 11 00:49:44 np0005480793.novalocal systemd[1]: Starting Hostname Service...
Oct 11 00:49:44 np0005480793.novalocal systemd[1]: Started Hostname Service.
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6788] hostname: hostname: using hostnamed
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6789] hostname: static hostname changed from (none) to "np0005480793.novalocal"
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6797] dns-mgr: init: dns=default,systemd-resolved rc-manager=symlink (auto)
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6803] manager[0x55c4eba81070]: rfkill: Wi-Fi hardware radio set enabled
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6803] manager[0x55c4eba81070]: rfkill: WWAN hardware radio set enabled
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6849] Loaded device plugin: NMTeamFactory (/usr/lib64/NetworkManager/1.54.1-1.el9/libnm-device-plugin-team.so)
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6850] manager: rfkill: Wi-Fi enabled by radio killswitch; enabled by state file
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6851] manager: rfkill: WWAN enabled by radio killswitch; enabled by state file
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6852] manager: Networking is enabled by state file
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6856] settings: Loaded settings plugin: keyfile (internal)
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6862] settings: Loaded settings plugin: ifcfg-rh ("/usr/lib64/NetworkManager/1.54.1-1.el9/libnm-settings-plugin-ifcfg-rh.so")
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6904] Warning: the ifcfg-rh plugin is deprecated, please migrate connections to the keyfile format using "nmcli connection migrate"
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6921] dhcp: init: Using DHCP client 'internal'
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6926] manager: (lo): new Loopback device (/org/freedesktop/NetworkManager/Devices/1)
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6935] device (lo): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6944] device (lo): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6957] device (lo): Activation: starting connection 'lo' (33081159-a34e-4514-87f3-ab50b6bb8250)
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6968] device (eth0): carrier: link connected
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6975] manager: (eth0): new Ethernet device (/org/freedesktop/NetworkManager/Devices/2)
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6982] manager: (eth0): assume: will attempt to assume matching connection 'System eth0' (5fb06bd0-0bb0-7ffb-45f1-d6edd65f3e03) (indicated)
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6983] device (eth0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'assume')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.6993] device (eth0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'assume')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7004] device (eth0): Activation: starting connection 'System eth0' (5fb06bd0-0bb0-7ffb-45f1-d6edd65f3e03)
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7014] device (eth1): carrier: link connected
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7020] manager: (eth1): new Ethernet device (/org/freedesktop/NetworkManager/Devices/3)
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7029] manager: (eth1): assume: will attempt to assume matching connection 'Wired connection 1' (0495e8e2-a84b-3e37-a3c0-1eb40b77fe9f) (indicated)
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7029] device (eth1): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'assume')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7039] device (eth1): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'assume')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7049] device (eth1): Activation: starting connection 'Wired connection 1' (0495e8e2-a84b-3e37-a3c0-1eb40b77fe9f)
Oct 11 00:49:44 np0005480793.novalocal systemd[1]: Started Network Manager.
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7062] bus-manager: acquired D-Bus service "org.freedesktop.NetworkManager"
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7071] device (lo): state change: disconnected -> prepare (reason 'none', managed-type: 'external')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7077] device (lo): state change: prepare -> config (reason 'none', managed-type: 'external')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7084] device (lo): state change: config -> ip-config (reason 'none', managed-type: 'external')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7088] device (eth0): state change: disconnected -> prepare (reason 'none', managed-type: 'assume')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7092] device (eth0): state change: prepare -> config (reason 'none', managed-type: 'assume')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7096] device (eth1): state change: disconnected -> prepare (reason 'none', managed-type: 'assume')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7100] device (eth1): state change: prepare -> config (reason 'none', managed-type: 'assume')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7105] device (lo): state change: ip-config -> ip-check (reason 'none', managed-type: 'external')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7117] device (eth0): state change: config -> ip-config (reason 'none', managed-type: 'assume')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7122] dhcp4 (eth0): activation: beginning transaction (timeout in 45 seconds)
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7134] device (eth1): state change: config -> ip-config (reason 'none', managed-type: 'assume')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7141] dhcp4 (eth1): activation: beginning transaction (timeout in 45 seconds)
Oct 11 00:49:44 np0005480793.novalocal systemd[1]: Starting Network Manager Wait Online...
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7177] device (lo): state change: ip-check -> secondaries (reason 'none', managed-type: 'external')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7179] device (lo): state change: secondaries -> activated (reason 'none', managed-type: 'external')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7187] device (lo): Activation: successful, device activated.
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7199] dhcp4 (eth0): state changed new lease, address=38.102.83.82
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7216] policy: set 'System eth0' (eth0) as default for IPv4 routing and DNS
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7309] device (eth0): state change: ip-config -> ip-check (reason 'none', managed-type: 'assume')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7340] device (eth0): state change: ip-check -> secondaries (reason 'none', managed-type: 'assume')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7342] device (eth0): state change: secondaries -> activated (reason 'none', managed-type: 'assume')
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7345] manager: NetworkManager state is now CONNECTED_SITE
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7348] device (eth0): Activation: successful, device activated.
Oct 11 00:49:44 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143784.7359] manager: NetworkManager state is now CONNECTED_GLOBAL
Oct 11 00:49:44 np0005480793.novalocal sudo[3926]: pam_unix(sudo:session): session closed for user root
Oct 11 00:49:45 np0005480793.novalocal python3[4012]: ansible-ansible.legacy.command Invoked with _raw_params=ip route zuul_log_id=fa163e3b-3c83-d5e2-2e00-0000000000a7-0-controller zuul_ansible_split_streams=False _uses_shell=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 00:49:54 np0005480793.novalocal systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully.
Oct 11 00:49:58 np0005480793.novalocal sshd-session[4015]: Received disconnect from 193.46.255.7 port 22764:11:  [preauth]
Oct 11 00:49:58 np0005480793.novalocal sshd-session[4015]: Disconnected from authenticating user root 193.46.255.7 port 22764 [preauth]
Oct 11 00:50:14 np0005480793.novalocal systemd[1]: systemd-hostnamed.service: Deactivated successfully.
Oct 11 00:50:22 np0005480793.novalocal systemd[1053]: Starting Mark boot as successful...
Oct 11 00:50:22 np0005480793.novalocal systemd[1053]: Finished Mark boot as successful.
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143830.2215] device (eth1): state change: ip-config -> ip-check (reason 'none', managed-type: 'assume')
Oct 11 00:50:30 np0005480793.novalocal systemd[1]: Starting Network Manager Script Dispatcher Service...
Oct 11 00:50:30 np0005480793.novalocal systemd[1]: Started Network Manager Script Dispatcher Service.
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143830.2622] device (eth1): state change: ip-check -> secondaries (reason 'none', managed-type: 'assume')
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143830.2626] device (eth1): state change: secondaries -> activated (reason 'none', managed-type: 'assume')
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143830.2638] device (eth1): Activation: successful, device activated.
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143830.2648] manager: startup complete
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143830.2651] device (eth1): state change: activated -> failed (reason 'ip-config-unavailable', managed-type: 'full')
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <warn>  [1760143830.2659] device (eth1): Activation: failed for connection 'Wired connection 1'
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143830.2670] device (eth1): state change: failed -> disconnected (reason 'none', managed-type: 'full')
Oct 11 00:50:30 np0005480793.novalocal systemd[1]: Finished Network Manager Wait Online.
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143830.2763] dhcp4 (eth1): canceled DHCP transaction
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143830.2765] dhcp4 (eth1): activation: beginning transaction (timeout in 45 seconds)
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143830.2766] dhcp4 (eth1): state changed no lease
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143830.2793] policy: auto-activating connection 'ci-private-network' (35c95777-b4d1-53c3-bd1c-f3dcadb92093)
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143830.2805] device (eth1): Activation: starting connection 'ci-private-network' (35c95777-b4d1-53c3-bd1c-f3dcadb92093)
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143830.2808] device (eth1): state change: disconnected -> prepare (reason 'none', managed-type: 'full')
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143830.2819] device (eth1): state change: prepare -> config (reason 'none', managed-type: 'full')
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143830.2834] device (eth1): state change: config -> ip-config (reason 'none', managed-type: 'full')
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143830.2854] device (eth1): state change: ip-config -> ip-check (reason 'none', managed-type: 'full')
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143830.2908] device (eth1): state change: ip-check -> secondaries (reason 'none', managed-type: 'full')
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143830.2912] device (eth1): state change: secondaries -> activated (reason 'none', managed-type: 'full')
Oct 11 00:50:30 np0005480793.novalocal NetworkManager[3938]: <info>  [1760143830.2922] device (eth1): Activation: successful, device activated.
Oct 11 00:50:40 np0005480793.novalocal systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully.
Oct 11 00:50:43 np0005480793.novalocal sudo[4118]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-auwldxttakwsmwffiecqwiccxryfnowh ; OS_CLOUD=vexxhost /usr/bin/python3'
Oct 11 00:50:43 np0005480793.novalocal sudo[4118]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:50:43 np0005480793.novalocal python3[4120]: ansible-ansible.legacy.stat Invoked with path=/etc/ci/env/networking-info.yml follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 00:50:43 np0005480793.novalocal sudo[4118]: pam_unix(sudo:session): session closed for user root
Oct 11 00:50:43 np0005480793.novalocal sudo[4191]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-llstkybeukzwzpdbclvqxqvdwghdepxl ; OS_CLOUD=vexxhost /usr/bin/python3'
Oct 11 00:50:43 np0005480793.novalocal sudo[4191]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:50:43 np0005480793.novalocal python3[4193]: ansible-ansible.legacy.copy Invoked with dest=/etc/ci/env/networking-info.yml owner=root group=root mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1760143843.2595704-267-272547938792018/source _original_basename=tmp1037hkys follow=False checksum=b31ca454b060a8965d78bedc546f97a7c92351d7 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:50:43 np0005480793.novalocal sudo[4191]: pam_unix(sudo:session): session closed for user root
Oct 11 00:51:44 np0005480793.novalocal sshd-session[1062]: Received disconnect from 38.102.83.114 port 60460:11: disconnected by user
Oct 11 00:51:44 np0005480793.novalocal sshd-session[1062]: Disconnected from user zuul 38.102.83.114 port 60460
Oct 11 00:51:44 np0005480793.novalocal sshd-session[1049]: pam_unix(sshd:session): session closed for user zuul
Oct 11 00:51:44 np0005480793.novalocal systemd-logind[804]: Session 1 logged out. Waiting for processes to exit.
Oct 11 00:53:22 np0005480793.novalocal systemd[1053]: Created slice User Background Tasks Slice.
Oct 11 00:53:22 np0005480793.novalocal systemd[1053]: Starting Cleanup of User's Temporary Files and Directories...
Oct 11 00:53:22 np0005480793.novalocal systemd[1053]: Finished Cleanup of User's Temporary Files and Directories.
Oct 11 00:55:57 np0005480793.novalocal sshd-session[4222]: Accepted publickey for zuul from 38.102.83.114 port 42892 ssh2: RSA SHA256:sxgyqRujXfGvMV2Eq7ZlGcFGCGFr/dtz6dk2ZJwy3W4
Oct 11 00:55:57 np0005480793.novalocal systemd-logind[804]: New session 3 of user zuul.
Oct 11 00:55:57 np0005480793.novalocal systemd[1]: Started Session 3 of User zuul.
Oct 11 00:55:57 np0005480793.novalocal sshd-session[4222]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 00:55:57 np0005480793.novalocal sudo[4249]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bspobywbmtskejikmxifpjexfismziah ; /usr/bin/python3'
Oct 11 00:55:57 np0005480793.novalocal sudo[4249]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:55:57 np0005480793.novalocal python3[4251]: ansible-ansible.legacy.command Invoked with _raw_params=lsblk -nd -o MAJ:MIN /dev/vda
                                                       _uses_shell=True zuul_log_id=fa163e3b-3c83-2b40-e31c-000000001ce8-1-compute0 zuul_ansible_split_streams=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 00:55:57 np0005480793.novalocal sudo[4249]: pam_unix(sudo:session): session closed for user root
Oct 11 00:55:57 np0005480793.novalocal sudo[4277]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-atorcjwmlfaawwnsiiegbcgqiormjngf ; /usr/bin/python3'
Oct 11 00:55:57 np0005480793.novalocal sudo[4277]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:55:58 np0005480793.novalocal python3[4279]: ansible-ansible.builtin.file Invoked with path=/sys/fs/cgroup/init.scope state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:55:58 np0005480793.novalocal sudo[4277]: pam_unix(sudo:session): session closed for user root
Oct 11 00:55:58 np0005480793.novalocal sudo[4304]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ddweyfjzamgedoiakcbuitptjagpbkum ; /usr/bin/python3'
Oct 11 00:55:58 np0005480793.novalocal sudo[4304]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:55:58 np0005480793.novalocal python3[4306]: ansible-ansible.builtin.file Invoked with path=/sys/fs/cgroup/machine.slice state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:55:58 np0005480793.novalocal sudo[4304]: pam_unix(sudo:session): session closed for user root
Oct 11 00:55:58 np0005480793.novalocal sudo[4330]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jmcrbrgfajmxvafulqiayxozcyosuhqf ; /usr/bin/python3'
Oct 11 00:55:58 np0005480793.novalocal sudo[4330]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:55:58 np0005480793.novalocal python3[4332]: ansible-ansible.builtin.file Invoked with path=/sys/fs/cgroup/system.slice state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:55:58 np0005480793.novalocal sudo[4330]: pam_unix(sudo:session): session closed for user root
Oct 11 00:55:58 np0005480793.novalocal sudo[4356]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jcwnqdeipwwhkmehvwtmpeabfmgtntbc ; /usr/bin/python3'
Oct 11 00:55:58 np0005480793.novalocal sudo[4356]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:55:58 np0005480793.novalocal python3[4358]: ansible-ansible.builtin.file Invoked with path=/sys/fs/cgroup/user.slice state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:55:58 np0005480793.novalocal sudo[4356]: pam_unix(sudo:session): session closed for user root
Oct 11 00:55:59 np0005480793.novalocal sudo[4382]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-twcednsdtrurozxwtqeggkhcdtvsemjx ; /usr/bin/python3'
Oct 11 00:55:59 np0005480793.novalocal sudo[4382]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:55:59 np0005480793.novalocal irqbalance[789]: Cannot change IRQ 27 affinity: Operation not permitted
Oct 11 00:55:59 np0005480793.novalocal irqbalance[789]: IRQ 27 affinity is now unmanaged
Oct 11 00:55:59 np0005480793.novalocal python3[4384]: ansible-ansible.builtin.lineinfile Invoked with path=/etc/systemd/system.conf regexp=^#DefaultIOAccounting=no line=DefaultIOAccounting=yes state=present backrefs=False create=False backup=False firstmatch=False unsafe_writes=False search_string=None insertafter=None insertbefore=None validate=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:55:59 np0005480793.novalocal python3[4384]: ansible-ansible.builtin.lineinfile [WARNING] Module remote_tmp /root/.ansible/tmp did not exist and was created with a mode of 0700, this may cause issues when running as another user. To avoid this, create the remote_tmp dir with the correct permissions manually
Oct 11 00:55:59 np0005480793.novalocal sudo[4382]: pam_unix(sudo:session): session closed for user root
Oct 11 00:55:59 np0005480793.novalocal sudo[4408]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eeqxhqlnmwlbykodstnvlgvtmsrozelf ; /usr/bin/python3'
Oct 11 00:55:59 np0005480793.novalocal sudo[4408]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:56:00 np0005480793.novalocal python3[4410]: ansible-ansible.builtin.systemd_service Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 00:56:00 np0005480793.novalocal systemd[1]: Reloading.
Oct 11 00:56:00 np0005480793.novalocal systemd-rc-local-generator[4431]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 00:56:00 np0005480793.novalocal sudo[4408]: pam_unix(sudo:session): session closed for user root
Oct 11 00:56:01 np0005480793.novalocal sudo[4466]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mxrosuoebwcfvwchkdoszssbqlvqooxg ; /usr/bin/python3'
Oct 11 00:56:01 np0005480793.novalocal sudo[4466]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:56:01 np0005480793.novalocal python3[4468]: ansible-ansible.builtin.wait_for Invoked with path=/sys/fs/cgroup/system.slice/io.max state=present timeout=30 host=127.0.0.1 connect_timeout=5 delay=0 active_connection_states=['ESTABLISHED', 'FIN_WAIT1', 'FIN_WAIT2', 'SYN_RECV', 'SYN_SENT', 'TIME_WAIT'] sleep=1 port=None search_regex=None exclude_hosts=None msg=None
Oct 11 00:56:01 np0005480793.novalocal sudo[4466]: pam_unix(sudo:session): session closed for user root
Oct 11 00:56:02 np0005480793.novalocal sudo[4492]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jyzwxvrxruahspxzauryllgnnjhqgqlj ; /usr/bin/python3'
Oct 11 00:56:02 np0005480793.novalocal sudo[4492]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:56:02 np0005480793.novalocal python3[4494]: ansible-ansible.legacy.command Invoked with _raw_params=echo "252:0   riops=18000 wiops=18000 rbps=262144000 wbps=262144000" > /sys/fs/cgroup/init.scope/io.max
                                                       _uses_shell=True zuul_log_id=in-loop-ignore zuul_ansible_split_streams=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 00:56:02 np0005480793.novalocal sudo[4492]: pam_unix(sudo:session): session closed for user root
Oct 11 00:56:02 np0005480793.novalocal sudo[4520]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ptthspkdwcrgzbxyyetgokirysvtvaxu ; /usr/bin/python3'
Oct 11 00:56:02 np0005480793.novalocal sudo[4520]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:56:02 np0005480793.novalocal python3[4522]: ansible-ansible.legacy.command Invoked with _raw_params=echo "252:0   riops=18000 wiops=18000 rbps=262144000 wbps=262144000" > /sys/fs/cgroup/machine.slice/io.max
                                                       _uses_shell=True zuul_log_id=in-loop-ignore zuul_ansible_split_streams=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 00:56:02 np0005480793.novalocal sudo[4520]: pam_unix(sudo:session): session closed for user root
Oct 11 00:56:02 np0005480793.novalocal sudo[4548]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rzuuiutfvnmgnnwnuzwqwzobjxaardka ; /usr/bin/python3'
Oct 11 00:56:02 np0005480793.novalocal sudo[4548]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:56:02 np0005480793.novalocal python3[4550]: ansible-ansible.legacy.command Invoked with _raw_params=echo "252:0   riops=18000 wiops=18000 rbps=262144000 wbps=262144000" > /sys/fs/cgroup/system.slice/io.max
                                                       _uses_shell=True zuul_log_id=in-loop-ignore zuul_ansible_split_streams=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 00:56:02 np0005480793.novalocal sudo[4548]: pam_unix(sudo:session): session closed for user root
Oct 11 00:56:02 np0005480793.novalocal sudo[4576]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-thgpfhvnanppfghhxhnoslhvtjlnidan ; /usr/bin/python3'
Oct 11 00:56:02 np0005480793.novalocal sudo[4576]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:56:03 np0005480793.novalocal python3[4578]: ansible-ansible.legacy.command Invoked with _raw_params=echo "252:0   riops=18000 wiops=18000 rbps=262144000 wbps=262144000" > /sys/fs/cgroup/user.slice/io.max
                                                       _uses_shell=True zuul_log_id=in-loop-ignore zuul_ansible_split_streams=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 00:56:03 np0005480793.novalocal sudo[4576]: pam_unix(sudo:session): session closed for user root
Oct 11 00:56:03 np0005480793.novalocal python3[4605]: ansible-ansible.legacy.command Invoked with _raw_params=echo "init";    cat /sys/fs/cgroup/init.scope/io.max; echo "machine"; cat /sys/fs/cgroup/machine.slice/io.max; echo "system";  cat /sys/fs/cgroup/system.slice/io.max; echo "user";    cat /sys/fs/cgroup/user.slice/io.max;
                                                       _uses_shell=True zuul_log_id=fa163e3b-3c83-2b40-e31c-000000001cee-1-compute0 zuul_ansible_split_streams=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 00:56:04 np0005480793.novalocal python3[4635]: ansible-ansible.builtin.stat Invoked with path=/sys/fs/cgroup/kubepods.slice/io.max follow=False get_md5=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 00:56:05 np0005480793.novalocal sshd-session[4225]: Connection closed by 38.102.83.114 port 42892
Oct 11 00:56:05 np0005480793.novalocal sshd-session[4222]: pam_unix(sshd:session): session closed for user zuul
Oct 11 00:56:05 np0005480793.novalocal systemd[1]: session-3.scope: Deactivated successfully.
Oct 11 00:56:05 np0005480793.novalocal systemd[1]: session-3.scope: Consumed 4.026s CPU time.
Oct 11 00:56:05 np0005480793.novalocal systemd-logind[804]: Session 3 logged out. Waiting for processes to exit.
Oct 11 00:56:05 np0005480793.novalocal systemd-logind[804]: Removed session 3.
Oct 11 00:56:07 np0005480793.novalocal sshd-session[4641]: Accepted publickey for zuul from 38.102.83.114 port 55096 ssh2: RSA SHA256:sxgyqRujXfGvMV2Eq7ZlGcFGCGFr/dtz6dk2ZJwy3W4
Oct 11 00:56:07 np0005480793.novalocal systemd-logind[804]: New session 4 of user zuul.
Oct 11 00:56:07 np0005480793.novalocal systemd[1]: Started Session 4 of User zuul.
Oct 11 00:56:07 np0005480793.novalocal sshd-session[4641]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 00:56:07 np0005480793.novalocal sudo[4668]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-aijdkqrartdclewjenirqyvwjcjykupd ; /usr/bin/python3'
Oct 11 00:56:07 np0005480793.novalocal sudo[4668]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:56:07 np0005480793.novalocal python3[4670]: ansible-ansible.legacy.dnf Invoked with name=['podman', 'buildah'] state=present allow_downgrade=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 allowerasing=False nobest=False use_backend=auto conf_file=None disable_excludes=None download_dir=None list=None releasever=None
Oct 11 00:56:21 np0005480793.novalocal kernel: SELinux:  Converting 363 SID table entries...
Oct 11 00:56:21 np0005480793.novalocal kernel: SELinux:  policy capability network_peer_controls=1
Oct 11 00:56:21 np0005480793.novalocal kernel: SELinux:  policy capability open_perms=1
Oct 11 00:56:21 np0005480793.novalocal kernel: SELinux:  policy capability extended_socket_class=1
Oct 11 00:56:21 np0005480793.novalocal kernel: SELinux:  policy capability always_check_network=0
Oct 11 00:56:21 np0005480793.novalocal kernel: SELinux:  policy capability cgroup_seclabel=1
Oct 11 00:56:21 np0005480793.novalocal kernel: SELinux:  policy capability nnp_nosuid_transition=1
Oct 11 00:56:21 np0005480793.novalocal kernel: SELinux:  policy capability genfs_seclabel_symlinks=1
Oct 11 00:56:30 np0005480793.novalocal kernel: SELinux:  Converting 363 SID table entries...
Oct 11 00:56:30 np0005480793.novalocal kernel: SELinux:  policy capability network_peer_controls=1
Oct 11 00:56:30 np0005480793.novalocal kernel: SELinux:  policy capability open_perms=1
Oct 11 00:56:30 np0005480793.novalocal kernel: SELinux:  policy capability extended_socket_class=1
Oct 11 00:56:30 np0005480793.novalocal kernel: SELinux:  policy capability always_check_network=0
Oct 11 00:56:30 np0005480793.novalocal kernel: SELinux:  policy capability cgroup_seclabel=1
Oct 11 00:56:30 np0005480793.novalocal kernel: SELinux:  policy capability nnp_nosuid_transition=1
Oct 11 00:56:30 np0005480793.novalocal kernel: SELinux:  policy capability genfs_seclabel_symlinks=1
Oct 11 00:56:39 np0005480793.novalocal kernel: SELinux:  Converting 363 SID table entries...
Oct 11 00:56:39 np0005480793.novalocal kernel: SELinux:  policy capability network_peer_controls=1
Oct 11 00:56:39 np0005480793.novalocal kernel: SELinux:  policy capability open_perms=1
Oct 11 00:56:39 np0005480793.novalocal kernel: SELinux:  policy capability extended_socket_class=1
Oct 11 00:56:39 np0005480793.novalocal kernel: SELinux:  policy capability always_check_network=0
Oct 11 00:56:39 np0005480793.novalocal kernel: SELinux:  policy capability cgroup_seclabel=1
Oct 11 00:56:39 np0005480793.novalocal kernel: SELinux:  policy capability nnp_nosuid_transition=1
Oct 11 00:56:39 np0005480793.novalocal kernel: SELinux:  policy capability genfs_seclabel_symlinks=1
Oct 11 00:56:40 np0005480793.novalocal setsebool[4737]: The virt_use_nfs policy boolean was changed to 1 by root
Oct 11 00:56:40 np0005480793.novalocal setsebool[4737]: The virt_sandbox_use_all_caps policy boolean was changed to 1 by root
Oct 11 00:56:53 np0005480793.novalocal kernel: SELinux:  Converting 366 SID table entries...
Oct 11 00:56:53 np0005480793.novalocal kernel: SELinux:  policy capability network_peer_controls=1
Oct 11 00:56:53 np0005480793.novalocal kernel: SELinux:  policy capability open_perms=1
Oct 11 00:56:53 np0005480793.novalocal kernel: SELinux:  policy capability extended_socket_class=1
Oct 11 00:56:53 np0005480793.novalocal kernel: SELinux:  policy capability always_check_network=0
Oct 11 00:56:53 np0005480793.novalocal kernel: SELinux:  policy capability cgroup_seclabel=1
Oct 11 00:56:53 np0005480793.novalocal kernel: SELinux:  policy capability nnp_nosuid_transition=1
Oct 11 00:56:53 np0005480793.novalocal kernel: SELinux:  policy capability genfs_seclabel_symlinks=1
Oct 11 00:57:11 np0005480793.novalocal dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=6 res=1
Oct 11 00:57:11 np0005480793.novalocal systemd[1]: Started /usr/bin/systemctl start man-db-cache-update.
Oct 11 00:57:11 np0005480793.novalocal systemd[1]: Starting man-db-cache-update.service...
Oct 11 00:57:11 np0005480793.novalocal systemd[1]: Reloading.
Oct 11 00:57:11 np0005480793.novalocal systemd-rc-local-generator[5491]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 00:57:11 np0005480793.novalocal systemd[1]: Queuing reload/restart jobs for marked units…
Oct 11 00:57:12 np0005480793.novalocal systemd[1]: Starting PackageKit Daemon...
Oct 11 00:57:12 np0005480793.novalocal PackageKit[6139]: daemon start
Oct 11 00:57:12 np0005480793.novalocal systemd[1]: Starting Authorization Manager...
Oct 11 00:57:12 np0005480793.novalocal polkitd[6240]: Started polkitd version 0.117
Oct 11 00:57:12 np0005480793.novalocal polkitd[6240]: Loading rules from directory /etc/polkit-1/rules.d
Oct 11 00:57:12 np0005480793.novalocal polkitd[6240]: Loading rules from directory /usr/share/polkit-1/rules.d
Oct 11 00:57:12 np0005480793.novalocal polkitd[6240]: Finished loading, compiling and executing 3 rules
Oct 11 00:57:12 np0005480793.novalocal polkitd[6240]: Acquired the name org.freedesktop.PolicyKit1 on the system bus
Oct 11 00:57:12 np0005480793.novalocal systemd[1]: Started Authorization Manager.
Oct 11 00:57:12 np0005480793.novalocal systemd[1]: Started PackageKit Daemon.
Oct 11 00:57:12 np0005480793.novalocal sudo[4668]: pam_unix(sudo:session): session closed for user root
Oct 11 00:57:14 np0005480793.novalocal python3[7718]: ansible-ansible.legacy.command Invoked with _raw_params=echo "openstack-k8s-operators+cirobot"
                                                       _uses_shell=True zuul_log_id=fa163e3b-3c83-a773-48b9-00000000000a-1-compute0 zuul_ansible_split_streams=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 00:57:15 np0005480793.novalocal kernel: evm: overlay not supported
Oct 11 00:57:15 np0005480793.novalocal systemd[1053]: Starting D-Bus User Message Bus...
Oct 11 00:57:15 np0005480793.novalocal dbus-broker-launch[8453]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +31: Eavesdropping is deprecated and ignored
Oct 11 00:57:15 np0005480793.novalocal dbus-broker-launch[8453]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +33: Eavesdropping is deprecated and ignored
Oct 11 00:57:15 np0005480793.novalocal systemd[1053]: Started D-Bus User Message Bus.
Oct 11 00:57:15 np0005480793.novalocal dbus-broker-lau[8453]: Ready
Oct 11 00:57:15 np0005480793.novalocal systemd[1053]: selinux: avc:  op=load_policy lsm=selinux seqno=6 res=1
Oct 11 00:57:15 np0005480793.novalocal systemd[1053]: Created slice Slice /user.
Oct 11 00:57:15 np0005480793.novalocal systemd[1053]: podman-8337.scope: unit configures an IP firewall, but not running as root.
Oct 11 00:57:15 np0005480793.novalocal systemd[1053]: (This warning is only shown for the first unit using IP firewalling.)
Oct 11 00:57:15 np0005480793.novalocal systemd[1053]: Started podman-8337.scope.
Oct 11 00:57:15 np0005480793.novalocal systemd[1053]: Started podman-pause-ce17db89.scope.
Oct 11 00:57:16 np0005480793.novalocal sudo[8831]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uwfwgfmrqctblvegcrnzqlokhmqcvtwg ; /usr/bin/python3'
Oct 11 00:57:16 np0005480793.novalocal sudo[8831]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:57:16 np0005480793.novalocal python3[8846]: ansible-ansible.builtin.blockinfile Invoked with state=present insertafter=EOF dest=/etc/containers/registries.conf content=[[registry]]
                                                      location = "38.102.83.145:5001"
                                                      insecure = true path=/etc/containers/registries.conf block=[[registry]]
                                                      location = "38.102.83.145:5001"
                                                      insecure = true marker=# {mark} ANSIBLE MANAGED BLOCK create=False backup=False marker_begin=BEGIN marker_end=END unsafe_writes=False insertbefore=None validate=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:57:16 np0005480793.novalocal sudo[8831]: pam_unix(sudo:session): session closed for user root
Oct 11 00:57:16 np0005480793.novalocal sshd-session[4644]: Connection closed by 38.102.83.114 port 55096
Oct 11 00:57:16 np0005480793.novalocal sshd-session[4641]: pam_unix(sshd:session): session closed for user zuul
Oct 11 00:57:16 np0005480793.novalocal systemd[1]: session-4.scope: Deactivated successfully.
Oct 11 00:57:16 np0005480793.novalocal systemd[1]: session-4.scope: Consumed 1min 2.144s CPU time.
Oct 11 00:57:16 np0005480793.novalocal systemd-logind[804]: Session 4 logged out. Waiting for processes to exit.
Oct 11 00:57:16 np0005480793.novalocal systemd-logind[804]: Removed session 4.
Oct 11 00:57:36 np0005480793.novalocal sshd-session[15795]: Unable to negotiate with 38.102.83.70 port 33314: no matching host key type found. Their offer: ssh-ed25519 [preauth]
Oct 11 00:57:36 np0005480793.novalocal sshd-session[15798]: Connection closed by 38.102.83.70 port 33290 [preauth]
Oct 11 00:57:36 np0005480793.novalocal sshd-session[15800]: Unable to negotiate with 38.102.83.70 port 33328: no matching host key type found. Their offer: sk-ecdsa-sha2-nistp256@openssh.com [preauth]
Oct 11 00:57:36 np0005480793.novalocal sshd-session[15793]: Unable to negotiate with 38.102.83.70 port 33344: no matching host key type found. Their offer: sk-ssh-ed25519@openssh.com [preauth]
Oct 11 00:57:36 np0005480793.novalocal sshd-session[15801]: Connection closed by 38.102.83.70 port 33304 [preauth]
Oct 11 00:57:40 np0005480793.novalocal sshd-session[17194]: Accepted publickey for zuul from 38.102.83.114 port 37068 ssh2: RSA SHA256:sxgyqRujXfGvMV2Eq7ZlGcFGCGFr/dtz6dk2ZJwy3W4
Oct 11 00:57:40 np0005480793.novalocal systemd-logind[804]: New session 5 of user zuul.
Oct 11 00:57:40 np0005480793.novalocal systemd[1]: Started Session 5 of User zuul.
Oct 11 00:57:40 np0005480793.novalocal sshd-session[17194]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 00:57:41 np0005480793.novalocal python3[17289]: ansible-ansible.posix.authorized_key Invoked with user=zuul key=ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJESShMrDVu6CO8We6aK0R1gnbUkxsZljxorR+buFgpUlN2sBdpveNKeae2iB3wECIKV09nfVoUYoVGmnw6VJ2g= zuul@np0005480792.novalocal
                                                        manage_dir=True state=present exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:57:41 np0005480793.novalocal sudo[17417]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ksgmsjshnmywpjelllyuwccyldpsjvwn ; /usr/bin/python3'
Oct 11 00:57:41 np0005480793.novalocal sudo[17417]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:57:41 np0005480793.novalocal python3[17430]: ansible-ansible.posix.authorized_key Invoked with user=root key=ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJESShMrDVu6CO8We6aK0R1gnbUkxsZljxorR+buFgpUlN2sBdpveNKeae2iB3wECIKV09nfVoUYoVGmnw6VJ2g= zuul@np0005480792.novalocal
                                                        manage_dir=True state=present exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:57:41 np0005480793.novalocal sudo[17417]: pam_unix(sudo:session): session closed for user root
Oct 11 00:57:42 np0005480793.novalocal sudo[17710]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-neczatvgvklrqjzyfihsdpelkngcpait ; /usr/bin/python3'
Oct 11 00:57:42 np0005480793.novalocal sudo[17710]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:57:42 np0005480793.novalocal python3[17720]: ansible-ansible.builtin.user Invoked with name=cloud-admin shell=/bin/bash state=present non_unique=False force=False remove=False create_home=True system=False move_home=False append=False ssh_key_bits=0 ssh_key_type=rsa ssh_key_comment=ansible-generated on np0005480793.novalocal update_password=always uid=None group=None groups=None comment=None home=None password=NOT_LOGGING_PARAMETER login_class=None password_expire_max=None password_expire_min=None hidden=None seuser=None skeleton=None generate_ssh_key=None ssh_key_file=None ssh_key_passphrase=NOT_LOGGING_PARAMETER expires=None password_lock=None local=None profile=None authorization=None role=None umask=None
Oct 11 00:57:42 np0005480793.novalocal useradd[17785]: new group: name=cloud-admin, GID=1002
Oct 11 00:57:42 np0005480793.novalocal useradd[17785]: new user: name=cloud-admin, UID=1002, GID=1002, home=/home/cloud-admin, shell=/bin/bash, from=none
Oct 11 00:57:42 np0005480793.novalocal sudo[17710]: pam_unix(sudo:session): session closed for user root
Oct 11 00:57:42 np0005480793.novalocal sudo[17892]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hpkzbashscqkexmhblojiocaqqgmsnot ; /usr/bin/python3'
Oct 11 00:57:42 np0005480793.novalocal sudo[17892]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:57:42 np0005480793.novalocal python3[17903]: ansible-ansible.posix.authorized_key Invoked with user=cloud-admin key=ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJESShMrDVu6CO8We6aK0R1gnbUkxsZljxorR+buFgpUlN2sBdpveNKeae2iB3wECIKV09nfVoUYoVGmnw6VJ2g= zuul@np0005480792.novalocal
                                                        manage_dir=True state=present exclusive=False validate_certs=True follow=False path=None key_options=None comment=None
Oct 11 00:57:42 np0005480793.novalocal sudo[17892]: pam_unix(sudo:session): session closed for user root
Oct 11 00:57:43 np0005480793.novalocal sudo[18119]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oemukzxkqrostbegebbjmtxhonvvduhf ; /usr/bin/python3'
Oct 11 00:57:43 np0005480793.novalocal sudo[18119]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:57:43 np0005480793.novalocal python3[18129]: ansible-ansible.legacy.stat Invoked with path=/etc/sudoers.d/cloud-admin follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 00:57:43 np0005480793.novalocal sudo[18119]: pam_unix(sudo:session): session closed for user root
Oct 11 00:57:43 np0005480793.novalocal sudo[18357]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hnnyyzssajlzuzwnlihtfmgjrpsvzbcy ; /usr/bin/python3'
Oct 11 00:57:43 np0005480793.novalocal sudo[18357]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:57:43 np0005480793.novalocal python3[18368]: ansible-ansible.legacy.copy Invoked with dest=/etc/sudoers.d/cloud-admin mode=0640 src=/home/zuul/.ansible/tmp/ansible-tmp-1760144262.9661977-135-177566822548759/source _original_basename=tmp6dgjc56d follow=False checksum=e7614e5ad3ab06eaae55b8efaa2ed81b63ea5634 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 00:57:43 np0005480793.novalocal sudo[18357]: pam_unix(sudo:session): session closed for user root
Oct 11 00:57:44 np0005480793.novalocal sudo[18630]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-llxztrfzlwjwohawmxrgnglcejhguxnf ; /usr/bin/python3'
Oct 11 00:57:44 np0005480793.novalocal sudo[18630]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 00:57:44 np0005480793.novalocal python3[18637]: ansible-ansible.builtin.hostname Invoked with name=compute-0 use=systemd
Oct 11 00:57:44 np0005480793.novalocal systemd[1]: Starting Hostname Service...
Oct 11 00:57:44 np0005480793.novalocal systemd[1]: Started Hostname Service.
Oct 11 00:57:44 np0005480793.novalocal systemd-hostnamed[18719]: Changed pretty hostname to 'compute-0'
Oct 11 00:57:44 compute-0 systemd-hostnamed[18719]: Hostname set to <compute-0> (static)
Oct 11 00:57:44 compute-0 NetworkManager[3938]: <info>  [1760144264.9314] hostname: static hostname changed from "np0005480793.novalocal" to "compute-0"
Oct 11 00:57:44 compute-0 systemd[1]: Starting Network Manager Script Dispatcher Service...
Oct 11 00:57:44 compute-0 systemd[1]: Started Network Manager Script Dispatcher Service.
Oct 11 00:57:45 compute-0 sudo[18630]: pam_unix(sudo:session): session closed for user root
Oct 11 00:57:45 compute-0 sshd-session[17236]: Connection closed by 38.102.83.114 port 37068
Oct 11 00:57:45 compute-0 sshd-session[17194]: pam_unix(sshd:session): session closed for user zuul
Oct 11 00:57:45 compute-0 systemd[1]: session-5.scope: Deactivated successfully.
Oct 11 00:57:45 compute-0 systemd[1]: session-5.scope: Consumed 2.776s CPU time.
Oct 11 00:57:45 compute-0 systemd-logind[804]: Session 5 logged out. Waiting for processes to exit.
Oct 11 00:57:45 compute-0 systemd-logind[804]: Removed session 5.
Oct 11 00:57:55 compute-0 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully.
Oct 11 00:58:12 compute-0 systemd[1]: man-db-cache-update.service: Deactivated successfully.
Oct 11 00:58:12 compute-0 systemd[1]: Finished man-db-cache-update.service.
Oct 11 00:58:12 compute-0 systemd[1]: man-db-cache-update.service: Consumed 1min 15.621s CPU time.
Oct 11 00:58:12 compute-0 systemd[1]: run-r7aa5f0b2903f4697b64c9e37f6f0d8e1.service: Deactivated successfully.
Oct 11 00:58:14 compute-0 systemd[1]: systemd-hostnamed.service: Deactivated successfully.
Oct 11 00:58:24 compute-0 sshd-session[26537]: Received disconnect from 80.94.93.233 port 45278:11:  [preauth]
Oct 11 00:58:24 compute-0 sshd-session[26537]: Disconnected from authenticating user root 80.94.93.233 port 45278 [preauth]
Oct 11 01:01:01 compute-0 CROND[26544]: (root) CMD (run-parts /etc/cron.hourly)
Oct 11 01:01:01 compute-0 run-parts[26547]: (/etc/cron.hourly) starting 0anacron
Oct 11 01:01:01 compute-0 anacron[26555]: Anacron started on 2025-10-11
Oct 11 01:01:01 compute-0 anacron[26555]: Will run job `cron.daily' in 45 min.
Oct 11 01:01:01 compute-0 anacron[26555]: Will run job `cron.weekly' in 65 min.
Oct 11 01:01:01 compute-0 anacron[26555]: Will run job `cron.monthly' in 85 min.
Oct 11 01:01:01 compute-0 anacron[26555]: Jobs will be executed sequentially
Oct 11 01:01:01 compute-0 run-parts[26557]: (/etc/cron.hourly) finished 0anacron
Oct 11 01:01:01 compute-0 CROND[26543]: (root) CMDEND (run-parts /etc/cron.hourly)
Oct 11 01:02:18 compute-0 PackageKit[6139]: daemon quit
Oct 11 01:02:18 compute-0 systemd[1]: packagekit.service: Deactivated successfully.
Oct 11 01:02:19 compute-0 sshd-session[26558]: Accepted publickey for zuul from 38.102.83.70 port 49606 ssh2: RSA SHA256:sxgyqRujXfGvMV2Eq7ZlGcFGCGFr/dtz6dk2ZJwy3W4
Oct 11 01:02:19 compute-0 systemd-logind[804]: New session 6 of user zuul.
Oct 11 01:02:19 compute-0 systemd[1]: Started Session 6 of User zuul.
Oct 11 01:02:19 compute-0 sshd-session[26558]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:02:19 compute-0 python3[26634]: ansible-ansible.legacy.setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:02:21 compute-0 sudo[26748]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xpecroskakwhrrxchahzvewaaxilbqtr ; /usr/bin/python3'
Oct 11 01:02:21 compute-0 sudo[26748]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:02:21 compute-0 python3[26750]: ansible-ansible.legacy.stat Invoked with path=/etc/yum.repos.d/delorean.repo follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 01:02:21 compute-0 sudo[26748]: pam_unix(sudo:session): session closed for user root
Oct 11 01:02:22 compute-0 sudo[26821]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mvjuezrazzhpojfttwodrinzmunqgvap ; /usr/bin/python3'
Oct 11 01:02:22 compute-0 sudo[26821]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:02:22 compute-0 python3[26823]: ansible-ansible.legacy.copy Invoked with dest=/etc/yum.repos.d/ src=/home/zuul/.ansible/tmp/ansible-tmp-1760144541.3154464-30235-84122535871710/source mode=0755 _original_basename=delorean.repo follow=False checksum=f3fabc627b4c59ab3d10213193ffdeeed080e354 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:02:22 compute-0 sudo[26821]: pam_unix(sudo:session): session closed for user root
Oct 11 01:02:22 compute-0 sudo[26847]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fvaujjgiabxdkevslfzglrtdwhvnwoer ; /usr/bin/python3'
Oct 11 01:02:22 compute-0 sudo[26847]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:02:22 compute-0 python3[26849]: ansible-ansible.legacy.stat Invoked with path=/etc/yum.repos.d/delorean-antelope-testing.repo follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 01:02:22 compute-0 sudo[26847]: pam_unix(sudo:session): session closed for user root
Oct 11 01:02:22 compute-0 sudo[26920]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-amfgsowigfeddcowtsfeeoikeuaxsyxm ; /usr/bin/python3'
Oct 11 01:02:22 compute-0 sudo[26920]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:02:23 compute-0 python3[26922]: ansible-ansible.legacy.copy Invoked with dest=/etc/yum.repos.d/ src=/home/zuul/.ansible/tmp/ansible-tmp-1760144541.3154464-30235-84122535871710/source mode=0755 _original_basename=delorean-antelope-testing.repo follow=False checksum=0bdbb813b840548359ae77c28d76ca272ccaf31b backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:02:23 compute-0 sudo[26920]: pam_unix(sudo:session): session closed for user root
Oct 11 01:02:23 compute-0 sudo[26946]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ebnhppckyimpwphuaovpbrcpjhyigyvz ; /usr/bin/python3'
Oct 11 01:02:23 compute-0 sudo[26946]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:02:23 compute-0 python3[26948]: ansible-ansible.legacy.stat Invoked with path=/etc/yum.repos.d/repo-setup-centos-highavailability.repo follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 01:02:23 compute-0 sudo[26946]: pam_unix(sudo:session): session closed for user root
Oct 11 01:02:23 compute-0 sudo[27019]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-teurddnsmmtpdxfuxxpmcsbjskdnbzsv ; /usr/bin/python3'
Oct 11 01:02:23 compute-0 sudo[27019]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:02:23 compute-0 python3[27021]: ansible-ansible.legacy.copy Invoked with dest=/etc/yum.repos.d/ src=/home/zuul/.ansible/tmp/ansible-tmp-1760144541.3154464-30235-84122535871710/source mode=0755 _original_basename=repo-setup-centos-highavailability.repo follow=False checksum=55d0f695fd0d8f47cbc3044ce0dcf5f88862490f backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:02:23 compute-0 sudo[27019]: pam_unix(sudo:session): session closed for user root
Oct 11 01:02:23 compute-0 sudo[27045]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yisnftymscdtyoeeotzflkgojgjkqmvv ; /usr/bin/python3'
Oct 11 01:02:23 compute-0 sudo[27045]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:02:24 compute-0 python3[27047]: ansible-ansible.legacy.stat Invoked with path=/etc/yum.repos.d/repo-setup-centos-powertools.repo follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 01:02:24 compute-0 sudo[27045]: pam_unix(sudo:session): session closed for user root
Oct 11 01:02:24 compute-0 sudo[27118]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kteojhshfqjtnzxszhzzcbcrsmpdbzov ; /usr/bin/python3'
Oct 11 01:02:24 compute-0 sudo[27118]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:02:24 compute-0 python3[27120]: ansible-ansible.legacy.copy Invoked with dest=/etc/yum.repos.d/ src=/home/zuul/.ansible/tmp/ansible-tmp-1760144541.3154464-30235-84122535871710/source mode=0755 _original_basename=repo-setup-centos-powertools.repo follow=False checksum=4b0cf99aa89c5c5be0151545863a7a7568f67568 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:02:24 compute-0 sudo[27118]: pam_unix(sudo:session): session closed for user root
Oct 11 01:02:24 compute-0 sudo[27144]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-objjsdujhzipfclmemiaxxvnemfepyns ; /usr/bin/python3'
Oct 11 01:02:24 compute-0 sudo[27144]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:02:24 compute-0 python3[27146]: ansible-ansible.legacy.stat Invoked with path=/etc/yum.repos.d/repo-setup-centos-appstream.repo follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 01:02:24 compute-0 sudo[27144]: pam_unix(sudo:session): session closed for user root
Oct 11 01:02:25 compute-0 sudo[27217]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dusxwdyhoynxnbqiiciwoekeegpktblj ; /usr/bin/python3'
Oct 11 01:02:25 compute-0 sudo[27217]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:02:25 compute-0 python3[27219]: ansible-ansible.legacy.copy Invoked with dest=/etc/yum.repos.d/ src=/home/zuul/.ansible/tmp/ansible-tmp-1760144541.3154464-30235-84122535871710/source mode=0755 _original_basename=repo-setup-centos-appstream.repo follow=False checksum=e89244d2503b2996429dda1857290c1e91e393a1 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:02:25 compute-0 sudo[27217]: pam_unix(sudo:session): session closed for user root
Oct 11 01:02:25 compute-0 sudo[27243]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dfwktwggwwhtgouptxyqwgheraoiohqc ; /usr/bin/python3'
Oct 11 01:02:25 compute-0 sudo[27243]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:02:25 compute-0 python3[27245]: ansible-ansible.legacy.stat Invoked with path=/etc/yum.repos.d/repo-setup-centos-baseos.repo follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 01:02:25 compute-0 sudo[27243]: pam_unix(sudo:session): session closed for user root
Oct 11 01:02:25 compute-0 sudo[27316]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gzbpgruyvjolnxlbbtyvduiiylvfeynk ; /usr/bin/python3'
Oct 11 01:02:25 compute-0 sudo[27316]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:02:26 compute-0 python3[27318]: ansible-ansible.legacy.copy Invoked with dest=/etc/yum.repos.d/ src=/home/zuul/.ansible/tmp/ansible-tmp-1760144541.3154464-30235-84122535871710/source mode=0755 _original_basename=repo-setup-centos-baseos.repo follow=False checksum=36d926db23a40dbfa5c84b5e4d43eac6fa2301d6 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:02:26 compute-0 sudo[27316]: pam_unix(sudo:session): session closed for user root
Oct 11 01:02:26 compute-0 sudo[27342]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-amduorplkfpjbihdshfnbbcfhlszhbrv ; /usr/bin/python3'
Oct 11 01:02:26 compute-0 sudo[27342]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:02:26 compute-0 python3[27344]: ansible-ansible.legacy.stat Invoked with path=/etc/yum.repos.d/delorean.repo.md5 follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 01:02:26 compute-0 sudo[27342]: pam_unix(sudo:session): session closed for user root
Oct 11 01:02:26 compute-0 sudo[27415]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-onkuyrgbsamxkllpwislbdrbphskcgnz ; /usr/bin/python3'
Oct 11 01:02:26 compute-0 sudo[27415]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:02:26 compute-0 python3[27417]: ansible-ansible.legacy.copy Invoked with dest=/etc/yum.repos.d/ src=/home/zuul/.ansible/tmp/ansible-tmp-1760144541.3154464-30235-84122535871710/source mode=0755 _original_basename=delorean.repo.md5 follow=False checksum=5e44558a2b46929660a6b5bfc8824fb4521580a4 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:02:26 compute-0 sudo[27415]: pam_unix(sudo:session): session closed for user root
Oct 11 01:02:29 compute-0 sshd-session[27444]: Connection closed by 192.168.122.11 port 60386 [preauth]
Oct 11 01:02:29 compute-0 sshd-session[27443]: Connection closed by 192.168.122.11 port 60390 [preauth]
Oct 11 01:02:29 compute-0 sshd-session[27442]: Unable to negotiate with 192.168.122.11 port 60394: no matching host key type found. Their offer: ssh-ed25519 [preauth]
Oct 11 01:02:29 compute-0 sshd-session[27446]: Unable to negotiate with 192.168.122.11 port 60396: no matching host key type found. Their offer: sk-ecdsa-sha2-nistp256@openssh.com [preauth]
Oct 11 01:02:29 compute-0 sshd-session[27445]: Unable to negotiate with 192.168.122.11 port 60402: no matching host key type found. Their offer: sk-ssh-ed25519@openssh.com [preauth]
Oct 11 01:03:22 compute-0 systemd[1]: Starting Cleanup of Temporary Directories...
Oct 11 01:03:22 compute-0 systemd[1]: systemd-tmpfiles-clean.service: Deactivated successfully.
Oct 11 01:03:22 compute-0 systemd[1]: Finished Cleanup of Temporary Directories.
Oct 11 01:03:22 compute-0 systemd[1]: run-credentials-systemd\x2dtmpfiles\x2dclean.service.mount: Deactivated successfully.
Oct 11 01:05:05 compute-0 python3[27479]: ansible-ansible.legacy.command Invoked with _raw_params=hostname _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:07:10 compute-0 sshd-session[27481]: Received disconnect from 193.46.255.20 port 60570:11:  [preauth]
Oct 11 01:07:10 compute-0 sshd-session[27481]: Disconnected from authenticating user root 193.46.255.20 port 60570 [preauth]
Oct 11 01:10:05 compute-0 sshd-session[26561]: Received disconnect from 38.102.83.70 port 49606:11: disconnected by user
Oct 11 01:10:05 compute-0 sshd-session[26561]: Disconnected from user zuul 38.102.83.70 port 49606
Oct 11 01:10:05 compute-0 sshd-session[26558]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:10:05 compute-0 systemd[1]: session-6.scope: Deactivated successfully.
Oct 11 01:10:05 compute-0 systemd[1]: session-6.scope: Consumed 6.175s CPU time.
Oct 11 01:10:05 compute-0 systemd-logind[804]: Session 6 logged out. Waiting for processes to exit.
Oct 11 01:10:05 compute-0 systemd-logind[804]: Removed session 6.
Oct 11 01:16:29 compute-0 sshd-session[27487]: Received disconnect from 193.46.255.217 port 18616:11:  [preauth]
Oct 11 01:16:29 compute-0 sshd-session[27487]: Disconnected from authenticating user root 193.46.255.217 port 18616 [preauth]
Oct 11 01:17:20 compute-0 sshd-session[27489]: Accepted publickey for zuul from 192.168.122.30 port 34634 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:17:20 compute-0 systemd-logind[804]: New session 7 of user zuul.
Oct 11 01:17:20 compute-0 systemd[1]: Started Session 7 of User zuul.
Oct 11 01:17:20 compute-0 sshd-session[27489]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:17:21 compute-0 python3.9[27642]: ansible-ansible.legacy.setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:17:22 compute-0 sudo[27821]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kglwwlrsjrcpdobceemcqnkmzisatxiw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145442.3499207-32-225294745445148/AnsiballZ_command.py'
Oct 11 01:17:22 compute-0 sudo[27821]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:17:23 compute-0 python3.9[27823]: ansible-ansible.legacy.command Invoked with _raw_params=set -euxo pipefail
                                            pushd /var/tmp
                                            curl -sL https://github.com/openstack-k8s-operators/repo-setup/archive/refs/heads/main.tar.gz | tar -xz
                                            pushd repo-setup-main
                                            python3 -m venv ./venv
                                            PBR_VERSION=0.0.0 ./venv/bin/pip install ./
                                            ./venv/bin/repo-setup current-podified -b antelope
                                            popd
                                            rm -rf repo-setup-main
                                             _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:17:30 compute-0 sudo[27821]: pam_unix(sudo:session): session closed for user root
Oct 11 01:17:30 compute-0 sshd-session[27492]: Connection closed by 192.168.122.30 port 34634
Oct 11 01:17:30 compute-0 sshd-session[27489]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:17:30 compute-0 systemd[1]: session-7.scope: Deactivated successfully.
Oct 11 01:17:30 compute-0 systemd[1]: session-7.scope: Consumed 8.276s CPU time.
Oct 11 01:17:30 compute-0 systemd-logind[804]: Session 7 logged out. Waiting for processes to exit.
Oct 11 01:17:30 compute-0 systemd-logind[804]: Removed session 7.
Oct 11 01:17:45 compute-0 sshd-session[27880]: Accepted publickey for zuul from 192.168.122.30 port 45120 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:17:45 compute-0 systemd-logind[804]: New session 8 of user zuul.
Oct 11 01:17:45 compute-0 systemd[1]: Started Session 8 of User zuul.
Oct 11 01:17:45 compute-0 sshd-session[27880]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:17:46 compute-0 python3.9[28033]: ansible-ansible.legacy.ping Invoked with data=pong
Oct 11 01:17:48 compute-0 python3.9[28207]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:17:48 compute-0 sudo[28357]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vjfzzecpyidhrwxnvmeggdiqvudwkpkg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145468.4705448-45-50333850392925/AnsiballZ_command.py'
Oct 11 01:17:48 compute-0 sudo[28357]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:17:49 compute-0 python3.9[28359]: ansible-ansible.legacy.command Invoked with _raw_params=PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin which growvols
                                             _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:17:49 compute-0 sudo[28357]: pam_unix(sudo:session): session closed for user root
Oct 11 01:17:50 compute-0 sudo[28510]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-msiorjihtsdqsuithnlijyemdwtpkkbr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145469.5921628-57-199500661921985/AnsiballZ_stat.py'
Oct 11 01:17:50 compute-0 sudo[28510]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:17:50 compute-0 python3.9[28512]: ansible-ansible.builtin.stat Invoked with path=/etc/ansible/facts.d/bootc.fact follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:17:50 compute-0 sudo[28510]: pam_unix(sudo:session): session closed for user root
Oct 11 01:17:51 compute-0 sudo[28662]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kyagavfasjbuqilfpouyfekpqcnkmnvl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145470.5346138-65-192487396372158/AnsiballZ_file.py'
Oct 11 01:17:51 compute-0 sudo[28662]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:17:51 compute-0 python3.9[28664]: ansible-ansible.builtin.file Invoked with mode=755 path=/etc/ansible/facts.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:17:51 compute-0 sudo[28662]: pam_unix(sudo:session): session closed for user root
Oct 11 01:17:51 compute-0 sudo[28814]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gbfisesvwimhdrqkhxnluvapibnszicb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145471.5445533-73-74256462612838/AnsiballZ_stat.py'
Oct 11 01:17:51 compute-0 sudo[28814]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:17:52 compute-0 python3.9[28816]: ansible-ansible.legacy.stat Invoked with path=/etc/ansible/facts.d/bootc.fact follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:17:52 compute-0 sudo[28814]: pam_unix(sudo:session): session closed for user root
Oct 11 01:17:52 compute-0 sudo[28937]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bqenjevdwguxdlunpqsuueapitwzkkec ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145471.5445533-73-74256462612838/AnsiballZ_copy.py'
Oct 11 01:17:52 compute-0 sudo[28937]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:17:53 compute-0 python3.9[28939]: ansible-ansible.legacy.copy Invoked with dest=/etc/ansible/facts.d/bootc.fact mode=755 src=/home/zuul/.ansible/tmp/ansible-tmp-1760145471.5445533-73-74256462612838/.source.fact _original_basename=bootc.fact follow=False checksum=eb4122ce7fc50a38407beb511c4ff8c178005b12 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:17:53 compute-0 sudo[28937]: pam_unix(sudo:session): session closed for user root
Oct 11 01:17:53 compute-0 sudo[29089]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kgakfolnjaskuxtgvqyeiydeihsqmeli ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145473.3518007-88-165628050668491/AnsiballZ_setup.py'
Oct 11 01:17:53 compute-0 sudo[29089]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:17:54 compute-0 python3.9[29091]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:17:54 compute-0 sudo[29089]: pam_unix(sudo:session): session closed for user root
Oct 11 01:17:54 compute-0 sudo[29245]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nvcvbzsedjanpsouzcxoriqyzlsspivn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145474.4682698-96-217580166459241/AnsiballZ_file.py'
Oct 11 01:17:54 compute-0 sudo[29245]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:17:55 compute-0 python3.9[29247]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/log/journal setype=var_log_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:17:55 compute-0 sudo[29245]: pam_unix(sudo:session): session closed for user root
Oct 11 01:17:56 compute-0 python3.9[29397]: ansible-ansible.builtin.service_facts Invoked
Oct 11 01:18:01 compute-0 python3.9[29652]: ansible-ansible.builtin.lineinfile Invoked with line=cloud-init=disabled path=/proc/cmdline state=present backrefs=False create=False backup=False firstmatch=False unsafe_writes=False regexp=None search_string=None insertafter=None insertbefore=None validate=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:18:02 compute-0 python3.9[29802]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:18:03 compute-0 python3.9[29956]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local', 'distribution'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:18:04 compute-0 sudo[30112]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-imfcpaeauxucsyqhobubtxlcwmziodly ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145484.238784-144-119636706600490/AnsiballZ_setup.py'
Oct 11 01:18:04 compute-0 sudo[30112]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:18:04 compute-0 python3.9[30114]: ansible-ansible.legacy.setup Invoked with filter=['ansible_pkg_mgr'] gather_subset=['!all'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:18:05 compute-0 sudo[30112]: pam_unix(sudo:session): session closed for user root
Oct 11 01:18:05 compute-0 sudo[30196]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ljekaexxjmolbxfvkehqfungevsnsrnz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145484.238784-144-119636706600490/AnsiballZ_dnf.py'
Oct 11 01:18:05 compute-0 sudo[30196]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:18:05 compute-0 python3.9[30198]: ansible-ansible.legacy.dnf Invoked with name=['driverctl', 'lvm2', 'crudini', 'jq', 'nftables', 'NetworkManager', 'openstack-selinux', 'python3-libselinux', 'python3-pyyaml', 'rsync', 'tmpwatch', 'sysstat', 'iproute-tc', 'ksmtuned', 'systemd-container', 'crypto-policies-scripts', 'grubby', 'sos'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:18:09 compute-0 irqbalance[789]: Cannot change IRQ 26 affinity: Operation not permitted
Oct 11 01:18:09 compute-0 irqbalance[789]: IRQ 26 affinity is now unmanaged
Oct 11 01:18:49 compute-0 systemd[1]: Reloading.
Oct 11 01:18:49 compute-0 systemd-rc-local-generator[30391]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:18:49 compute-0 systemd[1]: Listening on Device-mapper event daemon FIFOs.
Oct 11 01:18:49 compute-0 systemd[1]: Reloading.
Oct 11 01:18:49 compute-0 systemd-rc-local-generator[30429]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:18:50 compute-0 systemd[1]: Starting Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling...
Oct 11 01:18:50 compute-0 systemd[1]: Finished Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling.
Oct 11 01:18:50 compute-0 systemd[1]: Reloading.
Oct 11 01:18:50 compute-0 systemd-rc-local-generator[30470]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:18:50 compute-0 systemd[1]: Listening on LVM2 poll daemon socket.
Oct 11 01:18:50 compute-0 dbus-broker-launch[764]: Noticed file-system modification, trigger reload.
Oct 11 01:18:50 compute-0 dbus-broker-launch[764]: Noticed file-system modification, trigger reload.
Oct 11 01:18:50 compute-0 dbus-broker-launch[764]: Noticed file-system modification, trigger reload.
Oct 11 01:19:53 compute-0 kernel: SELinux:  Converting 2713 SID table entries...
Oct 11 01:19:53 compute-0 kernel: SELinux:  policy capability network_peer_controls=1
Oct 11 01:19:53 compute-0 kernel: SELinux:  policy capability open_perms=1
Oct 11 01:19:53 compute-0 kernel: SELinux:  policy capability extended_socket_class=1
Oct 11 01:19:53 compute-0 kernel: SELinux:  policy capability always_check_network=0
Oct 11 01:19:53 compute-0 kernel: SELinux:  policy capability cgroup_seclabel=1
Oct 11 01:19:53 compute-0 kernel: SELinux:  policy capability nnp_nosuid_transition=1
Oct 11 01:19:53 compute-0 kernel: SELinux:  policy capability genfs_seclabel_symlinks=1
Oct 11 01:19:53 compute-0 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=8 res=1
Oct 11 01:19:53 compute-0 systemd[1]: Started /usr/bin/systemctl start man-db-cache-update.
Oct 11 01:19:53 compute-0 systemd[1]: Starting man-db-cache-update.service...
Oct 11 01:19:53 compute-0 systemd[1]: Reloading.
Oct 11 01:19:53 compute-0 systemd-rc-local-generator[30777]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:19:54 compute-0 systemd[1]: Starting dnf makecache...
Oct 11 01:19:54 compute-0 systemd[1]: Queuing reload/restart jobs for marked units…
Oct 11 01:19:54 compute-0 dnf[30821]: Failed determining last makecache time.
Oct 11 01:19:54 compute-0 systemd[1]: Starting PackageKit Daemon...
Oct 11 01:19:54 compute-0 PackageKit[31003]: daemon start
Oct 11 01:19:54 compute-0 dnf[30821]: delorean-openstack-barbican-42b4c41831408a8e323  92 kB/s | 3.0 kB     00:00
Oct 11 01:19:54 compute-0 dnf[30821]: delorean-python-glean-10df0bd91b9bc5c9fd9cc02d7 169 kB/s | 3.0 kB     00:00
Oct 11 01:19:54 compute-0 dnf[30821]: delorean-openstack-cinder-1c00d6490d88e436f26ef 150 kB/s | 3.0 kB     00:00
Oct 11 01:19:54 compute-0 systemd[1]: Started PackageKit Daemon.
Oct 11 01:19:54 compute-0 dnf[30821]: delorean-python-stevedore-c4acc5639fd2329372142 144 kB/s | 3.0 kB     00:00
Oct 11 01:19:54 compute-0 dnf[30821]: delorean-python-observabilityclient-2f31846d73c 153 kB/s | 3.0 kB     00:00
Oct 11 01:19:54 compute-0 dnf[30821]: delorean-diskimage-builder-7d793e664cf892461c55 131 kB/s | 3.0 kB     00:00
Oct 11 01:19:54 compute-0 dnf[30821]: delorean-openstack-nova-6f8decf0b4f1aa2e96292b6 101 kB/s | 3.0 kB     00:00
Oct 11 01:19:54 compute-0 dnf[30821]: delorean-python-designate-tests-tempest-347fdbc 124 kB/s | 3.0 kB     00:00
Oct 11 01:19:54 compute-0 dnf[30821]: delorean-openstack-glance-1fd12c29b339f30fe823e 130 kB/s | 3.0 kB     00:00
Oct 11 01:19:54 compute-0 dnf[30821]: delorean-openstack-keystone-e4b40af0ae3698fbbbb 116 kB/s | 3.0 kB     00:00
Oct 11 01:19:54 compute-0 dnf[30821]: delorean-openstack-manila-3c01b7181572c95dac462 135 kB/s | 3.0 kB     00:00
Oct 11 01:19:54 compute-0 dnf[30821]: delorean-python-vmware-nsxlib-458234972d1428ac9 126 kB/s | 3.0 kB     00:00
Oct 11 01:19:54 compute-0 sudo[30196]: pam_unix(sudo:session): session closed for user root
Oct 11 01:19:54 compute-0 dnf[30821]: delorean-openstack-octavia-ba397f07a7331190208c 123 kB/s | 3.0 kB     00:00
Oct 11 01:19:54 compute-0 dnf[30821]: delorean-openstack-watcher-c014f81a8647287f6dcc 127 kB/s | 3.0 kB     00:00
Oct 11 01:19:54 compute-0 dnf[30821]: delorean-python-tcib-ff70d03bf5bc0bb6f3540a02d3 150 kB/s | 3.0 kB     00:00
Oct 11 01:19:54 compute-0 dnf[30821]: delorean-puppet-ceph-91ba84bc002c318a7f961d084e 163 kB/s | 3.0 kB     00:00
Oct 11 01:19:54 compute-0 dnf[30821]: delorean-openstack-swift-dc98a8463506ac520c469a 163 kB/s | 3.0 kB     00:00
Oct 11 01:19:54 compute-0 dnf[30821]: delorean-python-tempestconf-8515371b7cceebd4282 166 kB/s | 3.0 kB     00:00
Oct 11 01:19:54 compute-0 dnf[30821]: delorean-openstack-heat-ui-013accbfd179753bc3f0 157 kB/s | 3.0 kB     00:00
Oct 11 01:19:54 compute-0 dnf[30821]: CentOS Stream 9 - BaseOS                         51 kB/s | 6.7 kB     00:00
Oct 11 01:19:55 compute-0 systemd[1]: man-db-cache-update.service: Deactivated successfully.
Oct 11 01:19:55 compute-0 systemd[1]: Finished man-db-cache-update.service.
Oct 11 01:19:55 compute-0 systemd[1]: man-db-cache-update.service: Consumed 1.608s CPU time.
Oct 11 01:19:55 compute-0 systemd[1]: run-r08c1d71c392f477a8378f0626bfedd11.service: Deactivated successfully.
Oct 11 01:19:55 compute-0 sudo[31718]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iahxllnqfdbrtyfwjqxqnamitxfsmlhq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145594.8646114-156-30940106197734/AnsiballZ_command.py'
Oct 11 01:19:55 compute-0 sudo[31718]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:19:55 compute-0 dnf[30821]: CentOS Stream 9 - AppStream                      28 kB/s | 6.8 kB     00:00
Oct 11 01:19:55 compute-0 python3.9[31720]: ansible-ansible.legacy.command Invoked with _raw_params=rpm -V driverctl lvm2 crudini jq nftables NetworkManager openstack-selinux python3-libselinux python3-pyyaml rsync tmpwatch sysstat iproute-tc ksmtuned systemd-container crypto-policies-scripts grubby sos _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:19:55 compute-0 dnf[30821]: CentOS Stream 9 - CRB                            62 kB/s | 6.6 kB     00:00
Oct 11 01:19:55 compute-0 dnf[30821]: CentOS Stream 9 - Extras packages                74 kB/s | 8.0 kB     00:00
Oct 11 01:19:55 compute-0 dnf[30821]: dlrn-antelope-testing                            98 kB/s | 3.0 kB     00:00
Oct 11 01:19:55 compute-0 dnf[30821]: dlrn-antelope-build-deps                         97 kB/s | 3.0 kB     00:00
Oct 11 01:19:55 compute-0 dnf[30821]: centos9-rabbitmq                                 58 kB/s | 3.0 kB     00:00
Oct 11 01:19:55 compute-0 dnf[30821]: centos9-storage                                  78 kB/s | 3.0 kB     00:00
Oct 11 01:19:55 compute-0 dnf[30821]: centos9-opstools                                 99 kB/s | 3.0 kB     00:00
Oct 11 01:19:55 compute-0 dnf[30821]: NFV SIG OpenvSwitch                              76 kB/s | 3.0 kB     00:00
Oct 11 01:19:55 compute-0 dnf[30821]: repo-setup-centos-appstream                     150 kB/s | 4.4 kB     00:00
Oct 11 01:19:56 compute-0 dnf[30821]: repo-setup-centos-baseos                        169 kB/s | 3.9 kB     00:00
Oct 11 01:19:56 compute-0 dnf[30821]: repo-setup-centos-highavailability               89 kB/s | 3.9 kB     00:00
Oct 11 01:19:56 compute-0 dnf[30821]: repo-setup-centos-powertools                    141 kB/s | 4.3 kB     00:00
Oct 11 01:19:56 compute-0 dnf[30821]: Extra Packages for Enterprise Linux 9 - x86_64  288 kB/s |  35 kB     00:00
Oct 11 01:19:56 compute-0 sudo[31718]: pam_unix(sudo:session): session closed for user root
Oct 11 01:19:56 compute-0 dnf[30821]: Metadata cache created.
Oct 11 01:19:56 compute-0 systemd[1]: dnf-makecache.service: Deactivated successfully.
Oct 11 01:19:56 compute-0 systemd[1]: Finished dnf makecache.
Oct 11 01:19:56 compute-0 systemd[1]: dnf-makecache.service: Consumed 1.929s CPU time.
Oct 11 01:19:57 compute-0 sudo[32021]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rmridtjxttbpyohxtcyrvgxdvedzplti ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145596.800459-164-152252143924770/AnsiballZ_selinux.py'
Oct 11 01:19:57 compute-0 sudo[32021]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:19:57 compute-0 python3.9[32023]: ansible-ansible.posix.selinux Invoked with policy=targeted state=enforcing configfile=/etc/selinux/config update_kernel_param=False
Oct 11 01:19:57 compute-0 sudo[32021]: pam_unix(sudo:session): session closed for user root
Oct 11 01:19:58 compute-0 sudo[32173]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vlljpzmxqzcxwqhchonydkwziwlmlceg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145598.2985885-175-59161949913846/AnsiballZ_command.py'
Oct 11 01:19:58 compute-0 sudo[32173]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:19:58 compute-0 python3.9[32175]: ansible-ansible.legacy.command Invoked with cmd=dd if=/dev/zero of=/swap count=1024 bs=1M creates=/swap _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None removes=None stdin=None
Oct 11 01:20:00 compute-0 sudo[32173]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:00 compute-0 sudo[32326]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jwvipitcyvpyfkwaciuvbkptenbpfoot ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145600.3373008-183-127760228014490/AnsiballZ_file.py'
Oct 11 01:20:00 compute-0 sudo[32326]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:01 compute-0 python3.9[32328]: ansible-ansible.builtin.file Invoked with group=root mode=0600 owner=root path=/swap recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False state=None _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:20:01 compute-0 sudo[32326]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:01 compute-0 sudo[32478]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jwnvukrfmojvzojhwmlwivjkntunhorf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145601.300111-191-48062943668898/AnsiballZ_mount.py'
Oct 11 01:20:01 compute-0 sudo[32478]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:02 compute-0 python3.9[32480]: ansible-ansible.posix.mount Invoked with dump=0 fstype=swap name=none opts=sw passno=0 src=/swap state=present path=none boot=True opts_no_log=False backup=False fstab=None
Oct 11 01:20:02 compute-0 sudo[32478]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:03 compute-0 sudo[32630]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ynurdoavlaygqupokncroakcedeycuqq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145602.909369-219-125467599574422/AnsiballZ_file.py'
Oct 11 01:20:03 compute-0 sudo[32630]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:03 compute-0 python3.9[32632]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/pki/ca-trust/source/anchors setype=cert_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:20:03 compute-0 sudo[32630]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:04 compute-0 sudo[32782]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ivnutnrgdptwcdozhmbcnmplpbhkcagi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145603.747123-227-58504389044048/AnsiballZ_stat.py'
Oct 11 01:20:04 compute-0 sudo[32782]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:04 compute-0 python3.9[32784]: ansible-ansible.legacy.stat Invoked with path=/etc/pki/ca-trust/source/anchors/tls-ca-bundle.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:20:04 compute-0 sudo[32782]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:04 compute-0 sudo[32905]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ubvtesapdkjkujueybnznwnhnsmzyxvf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145603.747123-227-58504389044048/AnsiballZ_copy.py'
Oct 11 01:20:04 compute-0 sudo[32905]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:05 compute-0 python3.9[32907]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/ca-trust/source/anchors/tls-ca-bundle.pem group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145603.747123-227-58504389044048/.source.pem _original_basename=tls-ca-bundle.pem follow=False checksum=f57cfa4065467101f7ba494c2f61c0e2e8a6dad5 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:20:05 compute-0 sudo[32905]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:06 compute-0 sudo[33057]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ycxdthafyuyahqeukftlbvtnuntzuoom ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145605.7621386-254-152915199128653/AnsiballZ_getent.py'
Oct 11 01:20:06 compute-0 sudo[33057]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:08 compute-0 python3.9[33059]: ansible-ansible.builtin.getent Invoked with database=passwd key=qemu fail_key=True service=None split=None
Oct 11 01:20:08 compute-0 sudo[33057]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:09 compute-0 sudo[33210]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fkolyrissqkdqqclwxjntmxbnmjvbhuf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145608.7974932-262-140994162190749/AnsiballZ_group.py'
Oct 11 01:20:09 compute-0 sudo[33210]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:09 compute-0 python3.9[33212]: ansible-ansible.builtin.group Invoked with gid=107 name=qemu state=present force=False system=False local=False non_unique=False gid_min=None gid_max=None
Oct 11 01:20:09 compute-0 groupadd[33213]: group added to /etc/group: name=qemu, GID=107
Oct 11 01:20:09 compute-0 rsyslogd[998]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 01:20:09 compute-0 groupadd[33213]: group added to /etc/gshadow: name=qemu
Oct 11 01:20:09 compute-0 groupadd[33213]: new group: name=qemu, GID=107
Oct 11 01:20:09 compute-0 sudo[33210]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:10 compute-0 sudo[33369]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ifhbbuqdjdlyrvfithkznaqkeujixgjk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145609.851736-270-264508901495458/AnsiballZ_user.py'
Oct 11 01:20:10 compute-0 sudo[33369]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:10 compute-0 python3.9[33371]: ansible-ansible.builtin.user Invoked with comment=qemu user group=qemu groups=[''] name=qemu shell=/sbin/nologin state=present uid=107 non_unique=False force=False remove=False create_home=True system=False move_home=False append=False ssh_key_bits=0 ssh_key_type=rsa ssh_key_comment=ansible-generated on compute-0 update_password=always home=None password=NOT_LOGGING_PARAMETER login_class=None password_expire_max=None password_expire_min=None password_expire_warn=None hidden=None seuser=None skeleton=None generate_ssh_key=None ssh_key_file=None ssh_key_passphrase=NOT_LOGGING_PARAMETER expires=None password_lock=None local=None profile=None authorization=None role=None umask=None password_expire_account_disable=None uid_min=None uid_max=None
Oct 11 01:20:10 compute-0 useradd[33373]: new user: name=qemu, UID=107, GID=107, home=/home/qemu, shell=/sbin/nologin, from=/dev/pts/0
Oct 11 01:20:10 compute-0 sudo[33369]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:11 compute-0 sudo[33529]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xanjayokzdszmrvztvoovglqjdgtxdcc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145610.996608-278-37376642747503/AnsiballZ_getent.py'
Oct 11 01:20:11 compute-0 sudo[33529]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:11 compute-0 python3.9[33531]: ansible-ansible.builtin.getent Invoked with database=passwd key=hugetlbfs fail_key=True service=None split=None
Oct 11 01:20:11 compute-0 sudo[33529]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:12 compute-0 sudo[33682]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nfrbrknnnboimxrtahncyjzurktotryl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145611.8850899-286-276160031099827/AnsiballZ_group.py'
Oct 11 01:20:12 compute-0 sudo[33682]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:12 compute-0 python3.9[33684]: ansible-ansible.builtin.group Invoked with gid=42477 name=hugetlbfs state=present force=False system=False local=False non_unique=False gid_min=None gid_max=None
Oct 11 01:20:12 compute-0 groupadd[33685]: group added to /etc/group: name=hugetlbfs, GID=42477
Oct 11 01:20:12 compute-0 groupadd[33685]: group added to /etc/gshadow: name=hugetlbfs
Oct 11 01:20:12 compute-0 groupadd[33685]: new group: name=hugetlbfs, GID=42477
Oct 11 01:20:12 compute-0 sudo[33682]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:13 compute-0 sudo[33840]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fdqvevodnpqvavhkbzhtyjdygbxcoino ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145612.849511-295-79831502922279/AnsiballZ_file.py'
Oct 11 01:20:13 compute-0 sudo[33840]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:13 compute-0 python3.9[33842]: ansible-ansible.builtin.file Invoked with group=qemu mode=0755 owner=qemu path=/var/lib/vhost_sockets setype=virt_cache_t seuser=system_u state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None serole=None selevel=None attributes=None
Oct 11 01:20:13 compute-0 sudo[33840]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:14 compute-0 sudo[33992]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jyvsuzbeswzbddlxagioutknhxfvxsvq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145613.8182282-306-246514048961190/AnsiballZ_dnf.py'
Oct 11 01:20:14 compute-0 sudo[33992]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:14 compute-0 python3.9[33994]: ansible-ansible.legacy.dnf Invoked with name=['dracut-config-generic'] state=absent allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:20:16 compute-0 sudo[33992]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:16 compute-0 sudo[34145]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pnpjpikallrrlvsamysssmnvktckecqx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145616.3003128-314-55373741593082/AnsiballZ_file.py'
Oct 11 01:20:16 compute-0 sudo[34145]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:16 compute-0 python3.9[34147]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/modules-load.d setype=etc_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:20:16 compute-0 sudo[34145]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:17 compute-0 sudo[34297]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zhbffctjbnmmktdyntoqegscdhubydhk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145617.139638-322-60613520633666/AnsiballZ_stat.py'
Oct 11 01:20:17 compute-0 sudo[34297]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:17 compute-0 python3.9[34299]: ansible-ansible.legacy.stat Invoked with path=/etc/modules-load.d/99-edpm.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:20:17 compute-0 sudo[34297]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:18 compute-0 sudo[34420]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bzvolgshinsnfhanjuwaswiixprnijud ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145617.139638-322-60613520633666/AnsiballZ_copy.py'
Oct 11 01:20:18 compute-0 sudo[34420]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:18 compute-0 python3.9[34422]: ansible-ansible.legacy.copy Invoked with dest=/etc/modules-load.d/99-edpm.conf group=root mode=0644 owner=root setype=etc_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760145617.139638-322-60613520633666/.source.conf follow=False _original_basename=edpm-modprobe.conf.j2 checksum=8021efe01721d8fa8cab46b95c00ec1be6dbb9d0 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:20:18 compute-0 sudo[34420]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:19 compute-0 sudo[34572]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zfkluivrxqrewsjdwovdeeapmdkfmnzf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145618.6679015-337-7647821600890/AnsiballZ_systemd.py'
Oct 11 01:20:19 compute-0 sudo[34572]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:19 compute-0 python3.9[34574]: ansible-ansible.builtin.systemd Invoked with name=systemd-modules-load.service state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:20:20 compute-0 systemd[1]: Starting Load Kernel Modules...
Oct 11 01:20:20 compute-0 kernel: bridge: filtering via arp/ip/ip6tables is no longer available by default. Update your scripts to load br_netfilter if you need this.
Oct 11 01:20:20 compute-0 kernel: Bridge firewalling registered
Oct 11 01:20:20 compute-0 systemd-modules-load[34578]: Inserted module 'br_netfilter'
Oct 11 01:20:20 compute-0 systemd[1]: Finished Load Kernel Modules.
Oct 11 01:20:20 compute-0 sudo[34572]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:21 compute-0 sudo[34731]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gbmnahnccxbwuvuxrqmzvtzgmvygpcca ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145621.1424387-345-80393696254459/AnsiballZ_stat.py'
Oct 11 01:20:21 compute-0 sudo[34731]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:21 compute-0 python3.9[34733]: ansible-ansible.legacy.stat Invoked with path=/etc/sysctl.d/99-edpm.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:20:21 compute-0 sudo[34731]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:22 compute-0 sudo[34854]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mkugbjrdssyruvnciikyycxequzscqbv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145621.1424387-345-80393696254459/AnsiballZ_copy.py'
Oct 11 01:20:22 compute-0 sudo[34854]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:22 compute-0 python3.9[34856]: ansible-ansible.legacy.copy Invoked with dest=/etc/sysctl.d/99-edpm.conf group=root mode=0644 owner=root setype=etc_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760145621.1424387-345-80393696254459/.source.conf follow=False _original_basename=edpm-sysctl.conf.j2 checksum=2a366439721b855adcfe4d7f152babb68596a007 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:20:22 compute-0 sudo[34854]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:23 compute-0 sudo[35006]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rgvpqmuoktxannvzwtqtioziqpvpkxee ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145622.791802-363-120886090456676/AnsiballZ_dnf.py'
Oct 11 01:20:23 compute-0 sudo[35006]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:23 compute-0 python3.9[35008]: ansible-ansible.legacy.dnf Invoked with name=['tuned', 'tuned-profiles-cpu-partitioning'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:20:26 compute-0 dbus-broker-launch[764]: Noticed file-system modification, trigger reload.
Oct 11 01:20:26 compute-0 dbus-broker-launch[764]: Noticed file-system modification, trigger reload.
Oct 11 01:20:27 compute-0 systemd[1]: Started /usr/bin/systemctl start man-db-cache-update.
Oct 11 01:20:27 compute-0 systemd[1]: Starting man-db-cache-update.service...
Oct 11 01:20:27 compute-0 systemd[1]: Reloading.
Oct 11 01:20:27 compute-0 systemd-rc-local-generator[35072]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:20:27 compute-0 systemd[1]: Queuing reload/restart jobs for marked units…
Oct 11 01:20:27 compute-0 sudo[35006]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:28 compute-0 python3.9[36084]: ansible-ansible.builtin.stat Invoked with path=/etc/tuned/active_profile follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:20:29 compute-0 python3.9[36869]: ansible-ansible.builtin.slurp Invoked with src=/etc/tuned/active_profile
Oct 11 01:20:30 compute-0 python3.9[37527]: ansible-ansible.builtin.stat Invoked with path=/etc/tuned/throughput-performance-variables.conf follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:20:31 compute-0 sudo[38242]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rhcbiqgutwlikibvzvdkslhgscwpvrqj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145630.9052854-402-50465435594588/AnsiballZ_command.py'
Oct 11 01:20:31 compute-0 sudo[38242]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:31 compute-0 python3.9[38268]: ansible-ansible.legacy.command Invoked with _raw_params=/usr/sbin/tuned-adm profile throughput-performance _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:20:31 compute-0 systemd[1]: Starting Dynamic System Tuning Daemon...
Oct 11 01:20:32 compute-0 systemd[1]: Started Dynamic System Tuning Daemon.
Oct 11 01:20:32 compute-0 sudo[38242]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:32 compute-0 systemd[1]: man-db-cache-update.service: Deactivated successfully.
Oct 11 01:20:32 compute-0 systemd[1]: Finished man-db-cache-update.service.
Oct 11 01:20:32 compute-0 systemd[1]: man-db-cache-update.service: Consumed 7.252s CPU time.
Oct 11 01:20:32 compute-0 systemd[1]: run-r47ef87cba96e428daab6cc8b08175b27.service: Deactivated successfully.
Oct 11 01:20:32 compute-0 sudo[39551]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-aavwutkctegurhcmbkkfffsrlqcljdel ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145632.5574133-411-8103523822473/AnsiballZ_systemd.py'
Oct 11 01:20:32 compute-0 sudo[39551]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:33 compute-0 python3.9[39554]: ansible-ansible.builtin.systemd Invoked with enabled=True name=tuned state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:20:34 compute-0 systemd[1]: Stopping Dynamic System Tuning Daemon...
Oct 11 01:20:34 compute-0 systemd[1]: tuned.service: Deactivated successfully.
Oct 11 01:20:34 compute-0 systemd[1]: Stopped Dynamic System Tuning Daemon.
Oct 11 01:20:34 compute-0 systemd[1]: Starting Dynamic System Tuning Daemon...
Oct 11 01:20:34 compute-0 systemd[1]: Started Dynamic System Tuning Daemon.
Oct 11 01:20:34 compute-0 sudo[39551]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:35 compute-0 python3.9[39716]: ansible-ansible.builtin.slurp Invoked with src=/proc/cmdline
Oct 11 01:20:37 compute-0 sudo[39866]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qkrmegaowadjvuqaioctxrvxjcxvbnju ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145637.4592423-468-223806836947307/AnsiballZ_systemd.py'
Oct 11 01:20:37 compute-0 sudo[39866]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:38 compute-0 python3.9[39868]: ansible-ansible.builtin.systemd Invoked with enabled=False name=ksm.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:20:38 compute-0 systemd[1]: Reloading.
Oct 11 01:20:38 compute-0 systemd-rc-local-generator[39898]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:20:38 compute-0 sudo[39866]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:39 compute-0 sudo[40055]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dkyuldpzveekewwrlchrzqjuwjwzglcn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145638.6292236-468-229268813485685/AnsiballZ_systemd.py'
Oct 11 01:20:39 compute-0 sudo[40055]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:39 compute-0 python3.9[40057]: ansible-ansible.builtin.systemd Invoked with enabled=False name=ksmtuned.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:20:40 compute-0 systemd[1]: Reloading.
Oct 11 01:20:40 compute-0 systemd-rc-local-generator[40088]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:20:40 compute-0 sudo[40055]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:41 compute-0 sudo[40245]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vryalfvircryzthgrsswtxoepgbgewxu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145640.9537659-484-236419105435922/AnsiballZ_command.py'
Oct 11 01:20:41 compute-0 sudo[40245]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:41 compute-0 python3.9[40247]: ansible-ansible.legacy.command Invoked with _raw_params=mkswap "/swap" _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:20:41 compute-0 sudo[40245]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:42 compute-0 sudo[40398]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-swwsglwrobiwysczanhqyfdnftwcilcp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145641.7743957-492-138611242372639/AnsiballZ_command.py'
Oct 11 01:20:42 compute-0 sudo[40398]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:42 compute-0 python3.9[40400]: ansible-ansible.legacy.command Invoked with _raw_params=swapon "/swap" _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:20:42 compute-0 kernel: Adding 1048572k swap on /swap.  Priority:-2 extents:1 across:1048572k 
Oct 11 01:20:42 compute-0 sudo[40398]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:43 compute-0 sudo[40551]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gnkxxupcclegdcemusosfswuwopwneyt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145642.6305153-500-126043161597700/AnsiballZ_command.py'
Oct 11 01:20:43 compute-0 sudo[40551]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:43 compute-0 python3.9[40553]: ansible-ansible.legacy.command Invoked with _raw_params=/usr/bin/update-ca-trust _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:20:44 compute-0 sudo[40551]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:45 compute-0 sudo[40713]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iowdtxsdmmjmktbavydjmhjdebqtcsxq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145644.8919067-508-253944387562730/AnsiballZ_command.py'
Oct 11 01:20:45 compute-0 sudo[40713]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:45 compute-0 python3.9[40715]: ansible-ansible.legacy.command Invoked with _raw_params=echo 2 >/sys/kernel/mm/ksm/run _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:20:45 compute-0 sudo[40713]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:46 compute-0 sudo[40866]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cbgkxtoewttjwcbshmfziclrbxlojjjr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145645.629169-516-170741513741882/AnsiballZ_systemd.py'
Oct 11 01:20:46 compute-0 sudo[40866]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:46 compute-0 python3.9[40868]: ansible-ansible.builtin.systemd Invoked with name=systemd-sysctl.service state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:20:46 compute-0 systemd[1]: systemd-sysctl.service: Deactivated successfully.
Oct 11 01:20:46 compute-0 systemd[1]: Stopped Apply Kernel Variables.
Oct 11 01:20:46 compute-0 systemd[1]: Stopping Apply Kernel Variables...
Oct 11 01:20:46 compute-0 systemd[1]: Starting Apply Kernel Variables...
Oct 11 01:20:46 compute-0 systemd[1]: run-credentials-systemd\x2dsysctl.service.mount: Deactivated successfully.
Oct 11 01:20:46 compute-0 systemd[1]: Finished Apply Kernel Variables.
Oct 11 01:20:46 compute-0 sudo[40866]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:46 compute-0 sshd-session[27883]: Connection closed by 192.168.122.30 port 45120
Oct 11 01:20:46 compute-0 sshd-session[27880]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:20:46 compute-0 systemd[1]: session-8.scope: Deactivated successfully.
Oct 11 01:20:46 compute-0 systemd[1]: session-8.scope: Consumed 2min 20.006s CPU time.
Oct 11 01:20:46 compute-0 systemd-logind[804]: Session 8 logged out. Waiting for processes to exit.
Oct 11 01:20:46 compute-0 systemd-logind[804]: Removed session 8.
Oct 11 01:20:52 compute-0 sshd-session[40898]: Accepted publickey for zuul from 192.168.122.30 port 54826 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:20:52 compute-0 systemd-logind[804]: New session 9 of user zuul.
Oct 11 01:20:52 compute-0 systemd[1]: Started Session 9 of User zuul.
Oct 11 01:20:52 compute-0 sshd-session[40898]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:20:53 compute-0 python3.9[41051]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:20:55 compute-0 sudo[41205]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qupbkdbedkcxmhprrxxxrfubjchwliwt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145654.6129885-36-279732935744341/AnsiballZ_getent.py'
Oct 11 01:20:55 compute-0 sudo[41205]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:55 compute-0 python3.9[41207]: ansible-ansible.builtin.getent Invoked with database=passwd key=openvswitch fail_key=True service=None split=None
Oct 11 01:20:55 compute-0 sudo[41205]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:56 compute-0 sudo[41358]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ajuqgsfggwkfmsqgbehijisqhpftjmbs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145655.6434135-44-66660748579577/AnsiballZ_group.py'
Oct 11 01:20:56 compute-0 sudo[41358]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:56 compute-0 python3.9[41360]: ansible-ansible.builtin.group Invoked with gid=42476 name=openvswitch state=present force=False system=False local=False non_unique=False gid_min=None gid_max=None
Oct 11 01:20:56 compute-0 groupadd[41361]: group added to /etc/group: name=openvswitch, GID=42476
Oct 11 01:20:56 compute-0 groupadd[41361]: group added to /etc/gshadow: name=openvswitch
Oct 11 01:20:56 compute-0 groupadd[41361]: new group: name=openvswitch, GID=42476
Oct 11 01:20:56 compute-0 sudo[41358]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:57 compute-0 sudo[41516]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jcyoovttyiopzinboowpbhnybesmnpcq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145656.9328516-52-82744710558721/AnsiballZ_user.py'
Oct 11 01:20:57 compute-0 sudo[41516]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:20:57 compute-0 python3.9[41518]: ansible-ansible.builtin.user Invoked with comment=openvswitch user group=openvswitch groups=['hugetlbfs'] name=openvswitch shell=/sbin/nologin state=present uid=42476 non_unique=False force=False remove=False create_home=True system=False move_home=False append=False ssh_key_bits=0 ssh_key_type=rsa ssh_key_comment=ansible-generated on compute-0 update_password=always home=None password=NOT_LOGGING_PARAMETER login_class=None password_expire_max=None password_expire_min=None password_expire_warn=None hidden=None seuser=None skeleton=None generate_ssh_key=None ssh_key_file=None ssh_key_passphrase=NOT_LOGGING_PARAMETER expires=None password_lock=None local=None profile=None authorization=None role=None umask=None password_expire_account_disable=None uid_min=None uid_max=None
Oct 11 01:20:58 compute-0 useradd[41520]: new user: name=openvswitch, UID=42476, GID=42476, home=/home/openvswitch, shell=/sbin/nologin, from=/dev/pts/0
Oct 11 01:20:58 compute-0 useradd[41520]: add 'openvswitch' to group 'hugetlbfs'
Oct 11 01:20:58 compute-0 useradd[41520]: add 'openvswitch' to shadow group 'hugetlbfs'
Oct 11 01:20:59 compute-0 sudo[41516]: pam_unix(sudo:session): session closed for user root
Oct 11 01:20:59 compute-0 sudo[41676]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bcvohlchapimhfggregqsgqvwruoptyl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145659.2824996-62-54032663065825/AnsiballZ_setup.py'
Oct 11 01:20:59 compute-0 sudo[41676]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:00 compute-0 python3.9[41678]: ansible-ansible.legacy.setup Invoked with filter=['ansible_pkg_mgr'] gather_subset=['!all'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:21:00 compute-0 sudo[41676]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:00 compute-0 sudo[41760]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bfjhyybgidutasdkuwwhjdkqjzyqwlpf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145659.2824996-62-54032663065825/AnsiballZ_dnf.py'
Oct 11 01:21:00 compute-0 sudo[41760]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:01 compute-0 python3.9[41762]: ansible-ansible.legacy.dnf Invoked with download_only=True name=['openvswitch'] allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None state=None
Oct 11 01:21:03 compute-0 sudo[41760]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:04 compute-0 sudo[41923]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fgqaviaxhbvlykzyeazwzkpcmgjeyjdy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145664.3631425-76-20461213939889/AnsiballZ_dnf.py'
Oct 11 01:21:04 compute-0 sudo[41923]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:05 compute-0 python3.9[41925]: ansible-ansible.legacy.dnf Invoked with name=['openvswitch'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:21:16 compute-0 kernel: SELinux:  Converting 2724 SID table entries...
Oct 11 01:21:16 compute-0 kernel: SELinux:  policy capability network_peer_controls=1
Oct 11 01:21:16 compute-0 kernel: SELinux:  policy capability open_perms=1
Oct 11 01:21:16 compute-0 kernel: SELinux:  policy capability extended_socket_class=1
Oct 11 01:21:16 compute-0 kernel: SELinux:  policy capability always_check_network=0
Oct 11 01:21:16 compute-0 kernel: SELinux:  policy capability cgroup_seclabel=1
Oct 11 01:21:16 compute-0 kernel: SELinux:  policy capability nnp_nosuid_transition=1
Oct 11 01:21:16 compute-0 kernel: SELinux:  policy capability genfs_seclabel_symlinks=1
Oct 11 01:21:16 compute-0 groupadd[41948]: group added to /etc/group: name=unbound, GID=993
Oct 11 01:21:16 compute-0 groupadd[41948]: group added to /etc/gshadow: name=unbound
Oct 11 01:21:16 compute-0 groupadd[41948]: new group: name=unbound, GID=993
Oct 11 01:21:16 compute-0 useradd[41955]: new user: name=unbound, UID=993, GID=993, home=/var/lib/unbound, shell=/sbin/nologin, from=none
Oct 11 01:21:16 compute-0 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=9 res=1
Oct 11 01:21:16 compute-0 systemd[1]: Started daily update of the root trust anchor for DNSSEC.
Oct 11 01:21:18 compute-0 systemd[1]: Started /usr/bin/systemctl start man-db-cache-update.
Oct 11 01:21:18 compute-0 systemd[1]: Starting man-db-cache-update.service...
Oct 11 01:21:18 compute-0 systemd[1]: Reloading.
Oct 11 01:21:18 compute-0 systemd-rc-local-generator[42453]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:21:18 compute-0 systemd-sysv-generator[42457]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:21:18 compute-0 systemd[1]: Queuing reload/restart jobs for marked units…
Oct 11 01:21:18 compute-0 sudo[41923]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:19 compute-0 systemd[1]: man-db-cache-update.service: Deactivated successfully.
Oct 11 01:21:19 compute-0 systemd[1]: Finished man-db-cache-update.service.
Oct 11 01:21:19 compute-0 systemd[1]: man-db-cache-update.service: Consumed 1.133s CPU time.
Oct 11 01:21:19 compute-0 systemd[1]: run-r84620854c9964ea19daeb03bb9f42566.service: Deactivated successfully.
Oct 11 01:21:19 compute-0 sudo[43027]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tqoxlnapsnbnwlnofodvdmgjonprqpxq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145678.9588056-84-152262830961063/AnsiballZ_systemd.py'
Oct 11 01:21:19 compute-0 sudo[43027]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:19 compute-0 python3.9[43029]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=openvswitch.service state=started daemon_reload=False daemon_reexec=False scope=system no_block=False force=None
Oct 11 01:21:20 compute-0 systemd[1]: Reloading.
Oct 11 01:21:20 compute-0 systemd-rc-local-generator[43062]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:21:20 compute-0 systemd-sysv-generator[43067]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:21:20 compute-0 systemd[1]: Starting Open vSwitch Database Unit...
Oct 11 01:21:20 compute-0 chown[43072]: /usr/bin/chown: cannot access '/run/openvswitch': No such file or directory
Oct 11 01:21:20 compute-0 ovs-ctl[43078]: /etc/openvswitch/conf.db does not exist ... (warning).
Oct 11 01:21:20 compute-0 ovs-ctl[43078]: Creating empty database /etc/openvswitch/conf.db [  OK  ]
Oct 11 01:21:20 compute-0 ovs-ctl[43078]: Starting ovsdb-server [  OK  ]
Oct 11 01:21:20 compute-0 ovs-vsctl[43127]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait -- init -- set Open_vSwitch . db-version=8.5.1
Oct 11 01:21:20 compute-0 ovs-vsctl[43146]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait set Open_vSwitch . ovs-version=3.3.5-115.el9s "external-ids:system-id=\"47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6\"" "external-ids:rundir=\"/var/run/openvswitch\"" "system-type=\"centos\"" "system-version=\"9\""
Oct 11 01:21:20 compute-0 ovs-ctl[43078]: Configuring Open vSwitch system IDs [  OK  ]
Oct 11 01:21:20 compute-0 ovs-vsctl[43152]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0
Oct 11 01:21:20 compute-0 ovs-ctl[43078]: Enabling remote OVSDB managers [  OK  ]
Oct 11 01:21:20 compute-0 systemd[1]: Started Open vSwitch Database Unit.
Oct 11 01:21:20 compute-0 systemd[1]: Starting Open vSwitch Delete Transient Ports...
Oct 11 01:21:20 compute-0 systemd[1]: Finished Open vSwitch Delete Transient Ports.
Oct 11 01:21:20 compute-0 systemd[1]: Starting Open vSwitch Forwarding Unit...
Oct 11 01:21:21 compute-0 kernel: openvswitch: Open vSwitch switching datapath
Oct 11 01:21:21 compute-0 ovs-ctl[43197]: Inserting openvswitch module [  OK  ]
Oct 11 01:21:21 compute-0 ovs-ctl[43166]: Starting ovs-vswitchd [  OK  ]
Oct 11 01:21:21 compute-0 ovs-vsctl[43217]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0
Oct 11 01:21:21 compute-0 ovs-ctl[43166]: Enabling remote OVSDB managers [  OK  ]
Oct 11 01:21:21 compute-0 systemd[1]: Started Open vSwitch Forwarding Unit.
Oct 11 01:21:21 compute-0 systemd[1]: Starting Open vSwitch...
Oct 11 01:21:21 compute-0 systemd[1]: Finished Open vSwitch.
Oct 11 01:21:21 compute-0 sudo[43027]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:22 compute-0 python3.9[43369]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'selinux'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:21:23 compute-0 sudo[43519]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dboghrnrjpyteqtxqwojmgmixrxihumk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145682.576007-102-248547528682527/AnsiballZ_sefcontext.py'
Oct 11 01:21:23 compute-0 sudo[43519]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:23 compute-0 python3.9[43521]: ansible-community.general.sefcontext Invoked with selevel=s0 setype=container_file_t state=present target=/var/lib/edpm-config(/.*)? ignore_selinux_state=False ftype=a reload=True substitute=None seuser=None
Oct 11 01:21:24 compute-0 kernel: SELinux:  Converting 2738 SID table entries...
Oct 11 01:21:24 compute-0 kernel: SELinux:  policy capability network_peer_controls=1
Oct 11 01:21:24 compute-0 kernel: SELinux:  policy capability open_perms=1
Oct 11 01:21:24 compute-0 kernel: SELinux:  policy capability extended_socket_class=1
Oct 11 01:21:24 compute-0 kernel: SELinux:  policy capability always_check_network=0
Oct 11 01:21:24 compute-0 kernel: SELinux:  policy capability cgroup_seclabel=1
Oct 11 01:21:24 compute-0 kernel: SELinux:  policy capability nnp_nosuid_transition=1
Oct 11 01:21:24 compute-0 kernel: SELinux:  policy capability genfs_seclabel_symlinks=1
Oct 11 01:21:24 compute-0 sudo[43519]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:25 compute-0 python3.9[43676]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local', 'distribution'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:21:26 compute-0 sudo[43832]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pltzppgsnnxhtmzlfibyilaolwxbessl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145686.3551066-120-235412159010738/AnsiballZ_dnf.py'
Oct 11 01:21:26 compute-0 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=10 res=1
Oct 11 01:21:26 compute-0 sudo[43832]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:26 compute-0 python3.9[43834]: ansible-ansible.legacy.dnf Invoked with name=['driverctl', 'lvm2', 'crudini', 'jq', 'nftables', 'NetworkManager', 'openstack-selinux', 'python3-libselinux', 'python3-pyyaml', 'rsync', 'tmpwatch', 'sysstat', 'iproute-tc', 'ksmtuned', 'systemd-container', 'crypto-policies-scripts', 'grubby', 'sos'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:21:28 compute-0 sudo[43832]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:28 compute-0 sudo[43985]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sxkumvdvzvkffzeusfhrfrufepgyazgw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145688.3807042-128-128432603463512/AnsiballZ_command.py'
Oct 11 01:21:28 compute-0 sudo[43985]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:29 compute-0 python3.9[43987]: ansible-ansible.legacy.command Invoked with _raw_params=rpm -V driverctl lvm2 crudini jq nftables NetworkManager openstack-selinux python3-libselinux python3-pyyaml rsync tmpwatch sysstat iproute-tc ksmtuned systemd-container crypto-policies-scripts grubby sos _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:21:29 compute-0 sudo[43985]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:30 compute-0 sudo[44272]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lmhvodmgefhqaywthshzolcjqnlmfuji ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145690.2011073-136-67540870442978/AnsiballZ_file.py'
Oct 11 01:21:30 compute-0 sudo[44272]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:30 compute-0 python3.9[44274]: ansible-ansible.builtin.file Invoked with mode=0750 path=/var/lib/edpm-config selevel=s0 setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None attributes=None
Oct 11 01:21:31 compute-0 sudo[44272]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:32 compute-0 python3.9[44424]: ansible-ansible.builtin.stat Invoked with path=/etc/cloud/cloud.cfg.d follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:21:32 compute-0 sudo[44576]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cvetvrlkdaohimcplgncnasgrfncxcgs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145692.2969627-152-151167112237970/AnsiballZ_dnf.py'
Oct 11 01:21:32 compute-0 sudo[44576]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:32 compute-0 python3.9[44578]: ansible-ansible.legacy.dnf Invoked with name=['NetworkManager-ovs'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:21:34 compute-0 systemd[1]: Started /usr/bin/systemctl start man-db-cache-update.
Oct 11 01:21:34 compute-0 systemd[1]: Starting man-db-cache-update.service...
Oct 11 01:21:34 compute-0 systemd[1]: Reloading.
Oct 11 01:21:34 compute-0 systemd-rc-local-generator[44615]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:21:34 compute-0 systemd-sysv-generator[44619]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:21:35 compute-0 systemd[1]: Queuing reload/restart jobs for marked units…
Oct 11 01:21:35 compute-0 systemd[1]: man-db-cache-update.service: Deactivated successfully.
Oct 11 01:21:35 compute-0 systemd[1]: Finished man-db-cache-update.service.
Oct 11 01:21:35 compute-0 systemd[1]: run-rbd8399fc1c324577a7fb8c4bc9d1994f.service: Deactivated successfully.
Oct 11 01:21:35 compute-0 sudo[44576]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:36 compute-0 sudo[44893]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vzqpedongkidasmvkxcevbqxibehkqjc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145695.8742778-160-247532119539304/AnsiballZ_systemd.py'
Oct 11 01:21:36 compute-0 sudo[44893]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:36 compute-0 python3.9[44895]: ansible-ansible.builtin.systemd Invoked with name=NetworkManager state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:21:36 compute-0 systemd[1]: NetworkManager-wait-online.service: Deactivated successfully.
Oct 11 01:21:36 compute-0 systemd[1]: Stopped Network Manager Wait Online.
Oct 11 01:21:36 compute-0 systemd[1]: Stopping Network Manager Wait Online...
Oct 11 01:21:36 compute-0 systemd[1]: Stopping Network Manager...
Oct 11 01:21:36 compute-0 NetworkManager[3938]: <info>  [1760145696.6631] caught SIGTERM, shutting down normally.
Oct 11 01:21:36 compute-0 NetworkManager[3938]: <info>  [1760145696.6658] dhcp4 (eth0): canceled DHCP transaction
Oct 11 01:21:36 compute-0 NetworkManager[3938]: <info>  [1760145696.6658] dhcp4 (eth0): activation: beginning transaction (timeout in 45 seconds)
Oct 11 01:21:36 compute-0 NetworkManager[3938]: <info>  [1760145696.6658] dhcp4 (eth0): state changed no lease
Oct 11 01:21:36 compute-0 NetworkManager[3938]: <info>  [1760145696.6663] manager: NetworkManager state is now CONNECTED_SITE
Oct 11 01:21:36 compute-0 NetworkManager[3938]: <info>  [1760145696.6736] exiting (success)
Oct 11 01:21:36 compute-0 systemd[1]: Starting Network Manager Script Dispatcher Service...
Oct 11 01:21:36 compute-0 systemd[1]: Started Network Manager Script Dispatcher Service.
Oct 11 01:21:36 compute-0 systemd[1]: NetworkManager.service: Deactivated successfully.
Oct 11 01:21:36 compute-0 systemd[1]: Stopped Network Manager.
Oct 11 01:21:36 compute-0 systemd[1]: NetworkManager.service: Consumed 11.291s CPU time, 4.3M memory peak, read 0B from disk, written 16.5K to disk.
Oct 11 01:21:36 compute-0 systemd[1]: Starting Network Manager...
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.7681] NetworkManager (version 1.54.1-1.el9) is starting... (after a restart, boot:eb68ea8c-2b5a-452e-9a83-23761d4fd4c0)
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.7683] Read config: /etc/NetworkManager/NetworkManager.conf, /run/NetworkManager/conf.d/15-carrier-timeout.conf
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.7754] manager[0x55a3b8bd4090]: monitoring kernel firmware directory '/lib/firmware'.
Oct 11 01:21:36 compute-0 systemd[1]: Starting Hostname Service...
Oct 11 01:21:36 compute-0 systemd[1]: Started Hostname Service.
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.8865] hostname: hostname: using hostnamed
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.8867] hostname: static hostname changed from (none) to "compute-0"
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.8877] dns-mgr: init: dns=default,systemd-resolved rc-manager=symlink (auto)
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.8886] manager[0x55a3b8bd4090]: rfkill: Wi-Fi hardware radio set enabled
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.8886] manager[0x55a3b8bd4090]: rfkill: WWAN hardware radio set enabled
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.8921] Loaded device plugin: NMOvsFactory (/usr/lib64/NetworkManager/1.54.1-1.el9/libnm-device-plugin-ovs.so)
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.8937] Loaded device plugin: NMTeamFactory (/usr/lib64/NetworkManager/1.54.1-1.el9/libnm-device-plugin-team.so)
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.8938] manager: rfkill: Wi-Fi enabled by radio killswitch; enabled by state file
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.8939] manager: rfkill: WWAN enabled by radio killswitch; enabled by state file
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.8940] manager: Networking is enabled by state file
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.8944] settings: Loaded settings plugin: keyfile (internal)
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.8950] settings: Loaded settings plugin: ifcfg-rh ("/usr/lib64/NetworkManager/1.54.1-1.el9/libnm-settings-plugin-ifcfg-rh.so")
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.8988] Warning: the ifcfg-rh plugin is deprecated, please migrate connections to the keyfile format using "nmcli connection migrate"
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9005] dhcp: init: Using DHCP client 'internal'
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9009] manager: (lo): new Loopback device (/org/freedesktop/NetworkManager/Devices/1)
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9019] device (lo): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9028] device (lo): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9041] device (lo): Activation: starting connection 'lo' (33081159-a34e-4514-87f3-ab50b6bb8250)
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9050] device (eth0): carrier: link connected
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9057] manager: (eth0): new Ethernet device (/org/freedesktop/NetworkManager/Devices/2)
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9065] manager: (eth0): assume: will attempt to assume matching connection 'System eth0' (5fb06bd0-0bb0-7ffb-45f1-d6edd65f3e03) (indicated)
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9066] device (eth0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'assume')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9078] device (eth0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'assume')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9088] device (eth0): Activation: starting connection 'System eth0' (5fb06bd0-0bb0-7ffb-45f1-d6edd65f3e03)
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9097] device (eth1): carrier: link connected
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9104] manager: (eth1): new Ethernet device (/org/freedesktop/NetworkManager/Devices/3)
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9113] manager: (eth1): assume: will attempt to assume matching connection 'ci-private-network' (35c95777-b4d1-53c3-bd1c-f3dcadb92093) (indicated)
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9114] device (eth1): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'assume')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9123] device (eth1): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'assume')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9134] device (eth1): Activation: starting connection 'ci-private-network' (35c95777-b4d1-53c3-bd1c-f3dcadb92093)
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9142] bus-manager: acquired D-Bus service "org.freedesktop.NetworkManager"
Oct 11 01:21:36 compute-0 systemd[1]: Started Network Manager.
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9153] device (lo): state change: disconnected -> prepare (reason 'none', managed-type: 'external')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9176] device (lo): state change: prepare -> config (reason 'none', managed-type: 'external')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9179] device (lo): state change: config -> ip-config (reason 'none', managed-type: 'external')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9183] device (eth0): state change: disconnected -> prepare (reason 'none', managed-type: 'assume')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9188] device (eth0): state change: prepare -> config (reason 'none', managed-type: 'assume')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9191] device (eth1): state change: disconnected -> prepare (reason 'none', managed-type: 'assume')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9195] device (eth1): state change: prepare -> config (reason 'none', managed-type: 'assume')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9202] device (lo): state change: ip-config -> ip-check (reason 'none', managed-type: 'external')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9215] device (eth0): state change: config -> ip-config (reason 'none', managed-type: 'assume')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9228] dhcp4 (eth0): activation: beginning transaction (timeout in 45 seconds)
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9244] device (eth1): state change: config -> ip-config (reason 'none', managed-type: 'assume')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9258] device (eth1): state change: ip-config -> ip-check (reason 'none', managed-type: 'assume')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9265] device (lo): state change: ip-check -> secondaries (reason 'none', managed-type: 'external')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9266] device (lo): state change: secondaries -> activated (reason 'none', managed-type: 'external')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9271] device (lo): Activation: successful, device activated.
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9279] dhcp4 (eth0): state changed new lease, address=38.102.83.82
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9285] policy: set 'System eth0' (eth0) as default for IPv4 routing and DNS
Oct 11 01:21:36 compute-0 systemd[1]: Starting Network Manager Wait Online...
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9374] device (eth0): state change: ip-config -> ip-check (reason 'none', managed-type: 'assume')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9389] device (eth1): state change: ip-check -> secondaries (reason 'none', managed-type: 'assume')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9396] device (eth1): state change: secondaries -> activated (reason 'none', managed-type: 'assume')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9404] manager: NetworkManager state is now CONNECTED_LOCAL
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9415] device (eth1): Activation: successful, device activated.
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9444] device (eth0): state change: ip-check -> secondaries (reason 'none', managed-type: 'assume')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9448] device (eth0): state change: secondaries -> activated (reason 'none', managed-type: 'assume')
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9457] manager: NetworkManager state is now CONNECTED_SITE
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9461] device (eth0): Activation: successful, device activated.
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9467] manager: NetworkManager state is now CONNECTED_GLOBAL
Oct 11 01:21:36 compute-0 NetworkManager[44908]: <info>  [1760145696.9470] manager: startup complete
Oct 11 01:21:36 compute-0 systemd[1]: Finished Network Manager Wait Online.
Oct 11 01:21:36 compute-0 sudo[44893]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:37 compute-0 sudo[45119]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lcewgfmmiwrcjyquzssqbdvoqjusqtbb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145697.205646-168-123227006267786/AnsiballZ_dnf.py'
Oct 11 01:21:37 compute-0 sudo[45119]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:37 compute-0 python3.9[45121]: ansible-ansible.legacy.dnf Invoked with name=['os-net-config'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:21:42 compute-0 systemd[1]: Started /usr/bin/systemctl start man-db-cache-update.
Oct 11 01:21:42 compute-0 systemd[1]: Starting man-db-cache-update.service...
Oct 11 01:21:42 compute-0 systemd[1]: Reloading.
Oct 11 01:21:42 compute-0 systemd-sysv-generator[45179]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:21:42 compute-0 systemd-rc-local-generator[45176]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:21:42 compute-0 systemd[1]: Queuing reload/restart jobs for marked units…
Oct 11 01:21:43 compute-0 systemd[1]: man-db-cache-update.service: Deactivated successfully.
Oct 11 01:21:43 compute-0 systemd[1]: Finished man-db-cache-update.service.
Oct 11 01:21:43 compute-0 systemd[1]: run-rda08da69233d44f5b7229779b604e55e.service: Deactivated successfully.
Oct 11 01:21:43 compute-0 sudo[45119]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:44 compute-0 sudo[45583]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ilbceoxaenxkpmpniaomdhnbooejtoth ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145704.2546597-180-17257935293944/AnsiballZ_stat.py'
Oct 11 01:21:44 compute-0 sudo[45583]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:44 compute-0 python3.9[45585]: ansible-ansible.builtin.stat Invoked with path=/var/lib/edpm-config/os-net-config.returncode follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:21:44 compute-0 sudo[45583]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:45 compute-0 sudo[45735]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mdhtronuikexgyvfazbwayehrrjfcqjh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145705.074371-189-107947141954209/AnsiballZ_ini_file.py'
Oct 11 01:21:45 compute-0 sudo[45735]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:45 compute-0 python3.9[45737]: ansible-community.general.ini_file Invoked with backup=True mode=0644 no_extra_spaces=True option=no-auto-default path=/etc/NetworkManager/NetworkManager.conf section=main state=present value=* exclusive=True ignore_spaces=False allow_no_value=False modify_inactive_option=True create=True follow=False unsafe_writes=False section_has_values=None values=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:21:45 compute-0 sudo[45735]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:46 compute-0 sudo[45889]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-effuqojptpksihdcqyllzegnnlxpjvuh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145706.2349303-199-254782362553097/AnsiballZ_ini_file.py'
Oct 11 01:21:46 compute-0 sudo[45889]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:46 compute-0 python3.9[45891]: ansible-community.general.ini_file Invoked with backup=True mode=0644 no_extra_spaces=True option=dns path=/etc/NetworkManager/NetworkManager.conf section=main state=absent value=none exclusive=True ignore_spaces=False allow_no_value=False modify_inactive_option=True create=True follow=False unsafe_writes=False section_has_values=None values=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:21:46 compute-0 sudo[45889]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:47 compute-0 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully.
Oct 11 01:21:47 compute-0 sudo[46041]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-occbdomvcjsnkdkmlczftadlrbocccey ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145707.086525-199-119128090098804/AnsiballZ_ini_file.py'
Oct 11 01:21:47 compute-0 sudo[46041]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:47 compute-0 python3.9[46043]: ansible-community.general.ini_file Invoked with backup=True mode=0644 no_extra_spaces=True option=dns path=/etc/NetworkManager/conf.d/99-cloud-init.conf section=main state=absent value=none exclusive=True ignore_spaces=False allow_no_value=False modify_inactive_option=True create=True follow=False unsafe_writes=False section_has_values=None values=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:21:47 compute-0 sudo[46041]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:48 compute-0 sudo[46193]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dwuclgfsfovebtppvltcttwsfekjduht ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145707.9319823-214-200302318975366/AnsiballZ_ini_file.py'
Oct 11 01:21:48 compute-0 sudo[46193]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:48 compute-0 python3.9[46195]: ansible-community.general.ini_file Invoked with backup=True mode=0644 no_extra_spaces=True option=rc-manager path=/etc/NetworkManager/NetworkManager.conf section=main state=absent value=unmanaged exclusive=True ignore_spaces=False allow_no_value=False modify_inactive_option=True create=True follow=False unsafe_writes=False section_has_values=None values=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:21:48 compute-0 sudo[46193]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:49 compute-0 sudo[46345]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pvawrinjxedwmmenntzgvhrivtvuvohw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145708.8178387-214-269134466617987/AnsiballZ_ini_file.py'
Oct 11 01:21:49 compute-0 sudo[46345]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:49 compute-0 python3.9[46347]: ansible-community.general.ini_file Invoked with backup=True mode=0644 no_extra_spaces=True option=rc-manager path=/etc/NetworkManager/conf.d/99-cloud-init.conf section=main state=absent value=unmanaged exclusive=True ignore_spaces=False allow_no_value=False modify_inactive_option=True create=True follow=False unsafe_writes=False section_has_values=None values=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:21:49 compute-0 sudo[46345]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:50 compute-0 sudo[46497]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rwtbavlhcbdjfxxjqscpjevtzlpbfgit ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145709.7053769-229-51600313224590/AnsiballZ_stat.py'
Oct 11 01:21:50 compute-0 sudo[46497]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:50 compute-0 python3.9[46499]: ansible-ansible.legacy.stat Invoked with path=/etc/dhcp/dhclient-enter-hooks follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:21:50 compute-0 sudo[46497]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:51 compute-0 sudo[46620]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-upyjizruxywmpzwfjmkrpopdwmygujrm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145709.7053769-229-51600313224590/AnsiballZ_copy.py'
Oct 11 01:21:51 compute-0 sudo[46620]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:51 compute-0 python3.9[46622]: ansible-ansible.legacy.copy Invoked with dest=/etc/dhcp/dhclient-enter-hooks mode=0755 src=/home/zuul/.ansible/tmp/ansible-tmp-1760145709.7053769-229-51600313224590/.source _original_basename=.1cn0jdxw follow=False checksum=f6278a40de79a9841f6ed1fc584538225566990c backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:21:51 compute-0 sudo[46620]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:51 compute-0 sudo[46772]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lgykupmuzwehuhgouwwbnnatcwmhcbln ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145711.48077-244-186856808783691/AnsiballZ_file.py'
Oct 11 01:21:51 compute-0 sudo[46772]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:52 compute-0 python3.9[46774]: ansible-ansible.builtin.file Invoked with mode=0755 path=/etc/os-net-config state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:21:52 compute-0 sudo[46772]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:52 compute-0 sudo[46924]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wfxpeekvfmukpqyrylcwzcwosnxddebl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145712.364994-252-7723148313752/AnsiballZ_edpm_os_net_config_mappings.py'
Oct 11 01:21:52 compute-0 sudo[46924]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:53 compute-0 python3.9[46926]: ansible-edpm_os_net_config_mappings Invoked with net_config_data_lookup={}
Oct 11 01:21:53 compute-0 sudo[46924]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:53 compute-0 sudo[47076]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-slqsaypetzapqxnllvdyukjrugrxpbjl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145713.3971424-261-251985399954957/AnsiballZ_file.py'
Oct 11 01:21:53 compute-0 sudo[47076]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:53 compute-0 python3.9[47078]: ansible-ansible.builtin.file Invoked with path=/var/lib/edpm-config/scripts state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:21:54 compute-0 sudo[47076]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:54 compute-0 sudo[47229]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kzuoticbmytqhswhmdeovwbdnpxrqtbh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145714.3814125-271-160043093200488/AnsiballZ_stat.py'
Oct 11 01:21:54 compute-0 sudo[47229]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:54 compute-0 sudo[47229]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:55 compute-0 sudo[47352]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eqpjpyoxdjtlepgoskkhtazdbyqkvjbf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145714.3814125-271-160043093200488/AnsiballZ_copy.py'
Oct 11 01:21:55 compute-0 sudo[47352]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:55 compute-0 sudo[47352]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:56 compute-0 sudo[47504]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kefgtgpsdmknwkbldpbgtqzysutarltl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145715.8914611-286-56453846407883/AnsiballZ_slurp.py'
Oct 11 01:21:56 compute-0 sudo[47504]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:56 compute-0 python3.9[47506]: ansible-ansible.builtin.slurp Invoked with path=/etc/os-net-config/config.yaml src=/etc/os-net-config/config.yaml
Oct 11 01:21:56 compute-0 sudo[47504]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:57 compute-0 sudo[47679]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-utclunnnjvjsjhtnqkydpofcuwixmylh ; ANSIBLE_ASYNC_DIR=\'~/.ansible_async\' /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145716.9514124-295-141356714374578/async_wrapper.py j728238082002 300 /home/zuul/.ansible/tmp/ansible-tmp-1760145716.9514124-295-141356714374578/AnsiballZ_edpm_os_net_config.py _'
Oct 11 01:21:57 compute-0 sudo[47679]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:21:57 compute-0 ansible-async_wrapper.py[47681]: Invoked with j728238082002 300 /home/zuul/.ansible/tmp/ansible-tmp-1760145716.9514124-295-141356714374578/AnsiballZ_edpm_os_net_config.py _
Oct 11 01:21:57 compute-0 ansible-async_wrapper.py[47684]: Starting module and watcher
Oct 11 01:21:57 compute-0 ansible-async_wrapper.py[47684]: Start watching 47685 (300)
Oct 11 01:21:57 compute-0 ansible-async_wrapper.py[47685]: Start module (47685)
Oct 11 01:21:57 compute-0 ansible-async_wrapper.py[47681]: Return async_wrapper task started.
Oct 11 01:21:58 compute-0 sudo[47679]: pam_unix(sudo:session): session closed for user root
Oct 11 01:21:58 compute-0 python3.9[47686]: ansible-edpm_os_net_config Invoked with cleanup=True config_file=/etc/os-net-config/config.yaml debug=True detailed_exit_codes=True safe_defaults=False use_nmstate=True
Oct 11 01:21:58 compute-0 kernel: cfg80211: Loading compiled-in X.509 certificates for regulatory database
Oct 11 01:21:58 compute-0 kernel: Loaded X.509 cert 'sforshee: 00b28ddf47aef9cea7'
Oct 11 01:21:58 compute-0 kernel: Loaded X.509 cert 'wens: 61c038651aabdcf94bd0ac7ff06c7248db18c600'
Oct 11 01:21:58 compute-0 kernel: platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
Oct 11 01:21:58 compute-0 kernel: cfg80211: failed to load regulatory.db
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.7164] audit: op="checkpoint-create" arg="/org/freedesktop/NetworkManager/Checkpoint/1" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.7188] audit: op="checkpoint-adjust-rollback-timeout" arg="/org/freedesktop/NetworkManager/Checkpoint/1" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8212] manager: (br-ex): new Open vSwitch Bridge device (/org/freedesktop/NetworkManager/Devices/4)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8215] audit: op="connection-add" uuid="01782f19-69bc-44c7-9a8f-eeadad0c1a6d" name="br-ex-br" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8243] manager: (br-ex): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/5)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8245] audit: op="connection-add" uuid="cd165b1d-0dbf-4caf-a6b3-e17a85169d5b" name="br-ex-port" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8271] manager: (eth1): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/6)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8273] audit: op="connection-add" uuid="fb816f0c-fd8b-4584-a4f3-873ac1ba1ee1" name="eth1-port" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8299] manager: (vlan20): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/7)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8301] audit: op="connection-add" uuid="56c5970b-3827-4d7c-b7d4-f856f6a265f7" name="vlan20-port" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8327] manager: (vlan21): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/8)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8329] audit: op="connection-add" uuid="6e106b93-7e39-4ee1-9307-84e4aadfa465" name="vlan21-port" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8354] manager: (vlan22): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/9)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8356] audit: op="connection-add" uuid="361e76a1-1e8b-4881-ad72-084e87b8bc45" name="vlan22-port" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8382] manager: (vlan23): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/10)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8384] audit: op="connection-add" uuid="e73caa14-5962-44b7-a1ed-6c53e6cc671c" name="vlan23-port" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8422] audit: op="connection-update" uuid="5fb06bd0-0bb0-7ffb-45f1-d6edd65f3e03" name="System eth0" args="ipv4.dhcp-client-id,ipv4.dhcp-timeout,802-3-ethernet.mtu,connection.autoconnect-priority,connection.timestamp,ipv6.dhcp-timeout,ipv6.addr-gen-mode,ipv6.method" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8454] manager: (br-ex): new Open vSwitch Interface device (/org/freedesktop/NetworkManager/Devices/11)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8457] audit: op="connection-add" uuid="369aa8e7-89c1-4a52-802e-a5dbf8313ebb" name="br-ex-if" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8514] audit: op="connection-update" uuid="35c95777-b4d1-53c3-bd1c-f3dcadb92093" name="ci-private-network" args="ovs-interface.type,ipv4.addresses,ipv4.never-default,ipv4.routes,ipv4.method,ipv4.dns,ipv4.routing-rules,ipv6.addr-gen-mode,ipv6.addresses,ipv6.routes,ipv6.method,ipv6.dns,ipv6.routing-rules,connection.slave-type,connection.timestamp,connection.master,connection.controller,connection.port-type,ovs-external-ids.data" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8549] manager: (vlan20): new Open vSwitch Interface device (/org/freedesktop/NetworkManager/Devices/12)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8554] audit: op="connection-add" uuid="83313be4-66a4-401d-85ce-407b83aa35fc" name="vlan20-if" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8586] manager: (vlan21): new Open vSwitch Interface device (/org/freedesktop/NetworkManager/Devices/13)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8590] audit: op="connection-add" uuid="24065ca2-dc43-4448-9e1b-e3ea2befe307" name="vlan21-if" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8621] manager: (vlan22): new Open vSwitch Interface device (/org/freedesktop/NetworkManager/Devices/14)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8625] audit: op="connection-add" uuid="1d0bfaf4-48f6-4777-acae-5cc5fea85bb1" name="vlan22-if" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8655] manager: (vlan23): new Open vSwitch Interface device (/org/freedesktop/NetworkManager/Devices/15)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8660] audit: op="connection-add" uuid="91a7ece8-5263-4580-9f6d-6e7ae1f1f57b" name="vlan23-if" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8682] audit: op="connection-delete" uuid="0495e8e2-a84b-3e37-a3c0-1eb40b77fe9f" name="Wired connection 1" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8705] device (br-ex)[Open vSwitch Bridge]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8726] device (br-ex)[Open vSwitch Bridge]: state change: unavailable -> disconnected (reason 'user-requested', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8735] device (br-ex)[Open vSwitch Bridge]: Activation: starting connection 'br-ex-br' (01782f19-69bc-44c7-9a8f-eeadad0c1a6d)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8738] audit: op="connection-activate" uuid="01782f19-69bc-44c7-9a8f-eeadad0c1a6d" name="br-ex-br" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8742] device (br-ex)[Open vSwitch Port]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8756] device (br-ex)[Open vSwitch Port]: state change: unavailable -> disconnected (reason 'user-requested', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8764] device (br-ex)[Open vSwitch Port]: Activation: starting connection 'br-ex-port' (cd165b1d-0dbf-4caf-a6b3-e17a85169d5b)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8768] device (eth1)[Open vSwitch Port]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8779] device (eth1)[Open vSwitch Port]: state change: unavailable -> disconnected (reason 'user-requested', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8788] device (eth1)[Open vSwitch Port]: Activation: starting connection 'eth1-port' (fb816f0c-fd8b-4584-a4f3-873ac1ba1ee1)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8793] device (vlan20)[Open vSwitch Port]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8805] device (vlan20)[Open vSwitch Port]: state change: unavailable -> disconnected (reason 'user-requested', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8815] device (vlan20)[Open vSwitch Port]: Activation: starting connection 'vlan20-port' (56c5970b-3827-4d7c-b7d4-f856f6a265f7)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8819] device (vlan21)[Open vSwitch Port]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8831] device (vlan21)[Open vSwitch Port]: state change: unavailable -> disconnected (reason 'user-requested', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8840] device (vlan21)[Open vSwitch Port]: Activation: starting connection 'vlan21-port' (6e106b93-7e39-4ee1-9307-84e4aadfa465)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8845] device (vlan22)[Open vSwitch Port]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8857] device (vlan22)[Open vSwitch Port]: state change: unavailable -> disconnected (reason 'user-requested', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8866] device (vlan22)[Open vSwitch Port]: Activation: starting connection 'vlan22-port' (361e76a1-1e8b-4881-ad72-084e87b8bc45)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8873] device (vlan23)[Open vSwitch Port]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8885] device (vlan23)[Open vSwitch Port]: state change: unavailable -> disconnected (reason 'user-requested', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8894] device (vlan23)[Open vSwitch Port]: Activation: starting connection 'vlan23-port' (e73caa14-5962-44b7-a1ed-6c53e6cc671c)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8897] device (br-ex)[Open vSwitch Bridge]: state change: disconnected -> prepare (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8901] device (br-ex)[Open vSwitch Bridge]: state change: prepare -> config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8906] device (br-ex)[Open vSwitch Bridge]: state change: config -> ip-config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8919] device (br-ex)[Open vSwitch Interface]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8929] device (br-ex)[Open vSwitch Interface]: state change: unavailable -> disconnected (reason 'user-requested', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8938] device (br-ex)[Open vSwitch Interface]: Activation: starting connection 'br-ex-if' (369aa8e7-89c1-4a52-802e-a5dbf8313ebb)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8940] device (br-ex)[Open vSwitch Port]: state change: disconnected -> prepare (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8947] device (br-ex)[Open vSwitch Port]: state change: prepare -> config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8952] device (br-ex)[Open vSwitch Port]: state change: config -> ip-config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8955] device (br-ex)[Open vSwitch Port]: Activation: connection 'br-ex-port' attached as port, continuing activation
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8958] device (eth1): state change: activated -> deactivating (reason 'new-activation', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8977] device (eth1): disconnecting for new activation request.
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8978] device (eth1)[Open vSwitch Port]: state change: disconnected -> prepare (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8983] device (eth1)[Open vSwitch Port]: state change: prepare -> config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8986] device (eth1)[Open vSwitch Port]: state change: config -> ip-config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8989] device (eth1)[Open vSwitch Port]: Activation: connection 'eth1-port' attached as port, continuing activation
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.8994] device (vlan20)[Open vSwitch Interface]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9000] device (vlan20)[Open vSwitch Interface]: state change: unavailable -> disconnected (reason 'user-requested', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9007] device (vlan20)[Open vSwitch Interface]: Activation: starting connection 'vlan20-if' (83313be4-66a4-401d-85ce-407b83aa35fc)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9009] device (vlan20)[Open vSwitch Port]: state change: disconnected -> prepare (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9012] device (vlan20)[Open vSwitch Port]: state change: prepare -> config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9016] device (vlan20)[Open vSwitch Port]: state change: config -> ip-config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9018] device (vlan20)[Open vSwitch Port]: Activation: connection 'vlan20-port' attached as port, continuing activation
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9022] device (vlan21)[Open vSwitch Interface]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9029] device (vlan21)[Open vSwitch Interface]: state change: unavailable -> disconnected (reason 'user-requested', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9035] device (vlan21)[Open vSwitch Interface]: Activation: starting connection 'vlan21-if' (24065ca2-dc43-4448-9e1b-e3ea2befe307)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9037] device (vlan21)[Open vSwitch Port]: state change: disconnected -> prepare (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9041] device (vlan21)[Open vSwitch Port]: state change: prepare -> config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9044] device (vlan21)[Open vSwitch Port]: state change: config -> ip-config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9047] device (vlan21)[Open vSwitch Port]: Activation: connection 'vlan21-port' attached as port, continuing activation
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9051] device (vlan22)[Open vSwitch Interface]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9057] device (vlan22)[Open vSwitch Interface]: state change: unavailable -> disconnected (reason 'user-requested', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9063] device (vlan22)[Open vSwitch Interface]: Activation: starting connection 'vlan22-if' (1d0bfaf4-48f6-4777-acae-5cc5fea85bb1)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9065] device (vlan22)[Open vSwitch Port]: state change: disconnected -> prepare (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9069] device (vlan22)[Open vSwitch Port]: state change: prepare -> config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9072] device (vlan22)[Open vSwitch Port]: state change: config -> ip-config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9074] device (vlan22)[Open vSwitch Port]: Activation: connection 'vlan22-port' attached as port, continuing activation
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9078] device (vlan23)[Open vSwitch Interface]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9085] device (vlan23)[Open vSwitch Interface]: state change: unavailable -> disconnected (reason 'user-requested', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9091] device (vlan23)[Open vSwitch Interface]: Activation: starting connection 'vlan23-if' (91a7ece8-5263-4580-9f6d-6e7ae1f1f57b)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9093] device (vlan23)[Open vSwitch Port]: state change: disconnected -> prepare (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9097] device (vlan23)[Open vSwitch Port]: state change: prepare -> config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9100] device (vlan23)[Open vSwitch Port]: state change: config -> ip-config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9103] device (vlan23)[Open vSwitch Port]: Activation: connection 'vlan23-port' attached as port, continuing activation
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9105] device (br-ex)[Open vSwitch Bridge]: state change: ip-config -> ip-check (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9121] audit: op="device-reapply" interface="eth0" ifindex=2 args="ipv4.dhcp-client-id,ipv4.dhcp-timeout,802-3-ethernet.mtu,connection.autoconnect-priority,ipv6.addr-gen-mode,ipv6.method" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9125] device (br-ex)[Open vSwitch Interface]: state change: disconnected -> prepare (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9129] device (br-ex)[Open vSwitch Interface]: state change: prepare -> config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9132] device (br-ex)[Open vSwitch Interface]: state change: config -> ip-config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9147] device (br-ex)[Open vSwitch Port]: state change: ip-config -> ip-check (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9154] device (eth1)[Open vSwitch Port]: state change: ip-config -> ip-check (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 kernel: ovs-system: entered promiscuous mode
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9160] device (vlan20)[Open vSwitch Interface]: state change: disconnected -> prepare (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9165] device (vlan20)[Open vSwitch Interface]: state change: prepare -> config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9168] device (vlan20)[Open vSwitch Interface]: state change: config -> ip-config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9179] device (vlan20)[Open vSwitch Port]: state change: ip-config -> ip-check (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9189] device (vlan21)[Open vSwitch Interface]: state change: disconnected -> prepare (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 kernel: Timeout policy base is empty
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9197] device (vlan21)[Open vSwitch Interface]: state change: prepare -> config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9201] device (vlan21)[Open vSwitch Interface]: state change: config -> ip-config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9213] device (vlan21)[Open vSwitch Port]: state change: ip-config -> ip-check (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 systemd-udevd[47691]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9224] device (vlan22)[Open vSwitch Interface]: state change: disconnected -> prepare (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9235] device (vlan22)[Open vSwitch Interface]: state change: prepare -> config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9239] device (vlan22)[Open vSwitch Interface]: state change: config -> ip-config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9251] device (vlan22)[Open vSwitch Port]: state change: ip-config -> ip-check (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 systemd[1]: Starting Network Manager Script Dispatcher Service...
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9261] device (vlan23)[Open vSwitch Interface]: state change: disconnected -> prepare (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9266] device (vlan23)[Open vSwitch Interface]: state change: prepare -> config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9269] device (vlan23)[Open vSwitch Interface]: state change: config -> ip-config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9280] device (vlan23)[Open vSwitch Port]: state change: ip-config -> ip-check (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9288] dhcp4 (eth0): canceled DHCP transaction
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9289] dhcp4 (eth0): activation: beginning transaction (timeout in 45 seconds)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9289] dhcp4 (eth0): state changed no lease
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9293] dhcp4 (eth0): activation: beginning transaction (no timeout)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9311] device (br-ex)[Open vSwitch Interface]: Activation: connection 'br-ex-if' attached as port, continuing activation
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9318] audit: op="device-reapply" interface="eth1" ifindex=3 pid=47687 uid=0 result="fail" reason="Device is not activated"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9324] device (vlan20)[Open vSwitch Interface]: Activation: connection 'vlan20-if' attached as port, continuing activation
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9332] device (vlan21)[Open vSwitch Interface]: Activation: connection 'vlan21-if' attached as port, continuing activation
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9341] device (vlan22)[Open vSwitch Interface]: Activation: connection 'vlan22-if' attached as port, continuing activation
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9347] dhcp4 (eth0): state changed new lease, address=38.102.83.82
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9353] device (vlan23)[Open vSwitch Interface]: Activation: connection 'vlan23-if' attached as port, continuing activation
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9363] device (eth1): disconnecting for new activation request.
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9364] audit: op="connection-activate" uuid="35c95777-b4d1-53c3-bd1c-f3dcadb92093" name="ci-private-network" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9445] audit: op="checkpoint-adjust-rollback-timeout" arg="/org/freedesktop/NetworkManager/Checkpoint/1" pid=47687 uid=0 result="success"
Oct 11 01:22:00 compute-0 systemd[1]: Started Network Manager Script Dispatcher Service.
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9514] device (eth1): state change: deactivating -> disconnected (reason 'new-activation', managed-type: 'full')
Oct 11 01:22:00 compute-0 kernel: br-ex: entered promiscuous mode
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9625] device (eth1): Activation: starting connection 'ci-private-network' (35c95777-b4d1-53c3-bd1c-f3dcadb92093)
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9634] device (br-ex)[Open vSwitch Bridge]: state change: ip-check -> secondaries (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9650] device (eth1): state change: disconnected -> prepare (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9656] device (eth1): state change: prepare -> config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9667] device (br-ex)[Open vSwitch Bridge]: state change: secondaries -> activated (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9675] device (br-ex)[Open vSwitch Bridge]: Activation: successful, device activated.
Oct 11 01:22:00 compute-0 kernel: vlan22: entered promiscuous mode
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9687] device (br-ex)[Open vSwitch Port]: state change: ip-check -> secondaries (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9688] device (eth1)[Open vSwitch Port]: state change: ip-check -> secondaries (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9690] device (vlan20)[Open vSwitch Port]: state change: ip-check -> secondaries (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9691] device (vlan21)[Open vSwitch Port]: state change: ip-check -> secondaries (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9693] device (vlan22)[Open vSwitch Port]: state change: ip-check -> secondaries (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9695] device (vlan23)[Open vSwitch Port]: state change: ip-check -> secondaries (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 systemd-udevd[47693]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9700] device (eth1): state change: config -> ip-config (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9710] device (br-ex)[Open vSwitch Port]: state change: secondaries -> activated (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9717] device (br-ex)[Open vSwitch Port]: Activation: successful, device activated.
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9724] device (eth1)[Open vSwitch Port]: state change: secondaries -> activated (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9730] device (eth1)[Open vSwitch Port]: Activation: successful, device activated.
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9735] device (vlan20)[Open vSwitch Port]: state change: secondaries -> activated (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9740] device (vlan20)[Open vSwitch Port]: Activation: successful, device activated.
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9745] device (vlan21)[Open vSwitch Port]: state change: secondaries -> activated (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9752] device (vlan21)[Open vSwitch Port]: Activation: successful, device activated.
Oct 11 01:22:00 compute-0 kernel: vlan23: entered promiscuous mode
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9758] device (vlan22)[Open vSwitch Port]: state change: secondaries -> activated (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9764] device (vlan22)[Open vSwitch Port]: Activation: successful, device activated.
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9771] device (vlan23)[Open vSwitch Port]: state change: secondaries -> activated (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9778] device (vlan23)[Open vSwitch Port]: Activation: successful, device activated.
Oct 11 01:22:00 compute-0 systemd-udevd[47692]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9793] device (eth1): Activation: connection 'ci-private-network' attached as port, continuing activation
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9799] device (eth1): state change: ip-config -> ip-check (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 kernel: vlan21: entered promiscuous mode
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9917] device (br-ex)[Open vSwitch Interface]: carrier: link connected
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9929] device (vlan22)[Open vSwitch Interface]: carrier: link connected
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9976] device (br-ex)[Open vSwitch Interface]: state change: ip-config -> ip-check (reason 'none', managed-type: 'full')
Oct 11 01:22:00 compute-0 systemd-udevd[47793]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 01:22:00 compute-0 kernel: vlan20: entered promiscuous mode
Oct 11 01:22:00 compute-0 NetworkManager[44908]: <info>  [1760145720.9993] device (vlan22)[Open vSwitch Interface]: state change: ip-config -> ip-check (reason 'none', managed-type: 'full')
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0014] device (vlan23)[Open vSwitch Interface]: carrier: link connected
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0015] device (eth1): state change: ip-check -> secondaries (reason 'none', managed-type: 'full')
Oct 11 01:22:01 compute-0 kernel: virtio_net virtio5 eth1: entered promiscuous mode
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0031] device (eth1): state change: secondaries -> activated (reason 'none', managed-type: 'full')
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0040] device (eth1): Activation: successful, device activated.
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0056] device (vlan23)[Open vSwitch Interface]: state change: ip-config -> ip-check (reason 'none', managed-type: 'full')
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0067] device (br-ex)[Open vSwitch Interface]: state change: ip-check -> secondaries (reason 'none', managed-type: 'full')
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0068] device (vlan22)[Open vSwitch Interface]: state change: ip-check -> secondaries (reason 'none', managed-type: 'full')
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0069] device (br-ex)[Open vSwitch Interface]: state change: secondaries -> activated (reason 'none', managed-type: 'full')
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0075] device (br-ex)[Open vSwitch Interface]: Activation: successful, device activated.
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0081] device (vlan22)[Open vSwitch Interface]: state change: secondaries -> activated (reason 'none', managed-type: 'full')
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0087] device (vlan22)[Open vSwitch Interface]: Activation: successful, device activated.
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0097] device (vlan23)[Open vSwitch Interface]: state change: ip-check -> secondaries (reason 'none', managed-type: 'full')
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0098] device (vlan23)[Open vSwitch Interface]: state change: secondaries -> activated (reason 'none', managed-type: 'full')
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0105] device (vlan23)[Open vSwitch Interface]: Activation: successful, device activated.
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0120] device (vlan21)[Open vSwitch Interface]: carrier: link connected
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0123] device (vlan20)[Open vSwitch Interface]: carrier: link connected
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0142] device (vlan21)[Open vSwitch Interface]: state change: ip-config -> ip-check (reason 'none', managed-type: 'full')
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0149] device (vlan20)[Open vSwitch Interface]: state change: ip-config -> ip-check (reason 'none', managed-type: 'full')
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0192] device (vlan21)[Open vSwitch Interface]: state change: ip-check -> secondaries (reason 'none', managed-type: 'full')
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0194] device (vlan20)[Open vSwitch Interface]: state change: ip-check -> secondaries (reason 'none', managed-type: 'full')
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0195] device (vlan21)[Open vSwitch Interface]: state change: secondaries -> activated (reason 'none', managed-type: 'full')
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0202] device (vlan21)[Open vSwitch Interface]: Activation: successful, device activated.
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0209] device (vlan20)[Open vSwitch Interface]: state change: secondaries -> activated (reason 'none', managed-type: 'full')
Oct 11 01:22:01 compute-0 NetworkManager[44908]: <info>  [1760145721.0216] device (vlan20)[Open vSwitch Interface]: Activation: successful, device activated.
Oct 11 01:22:01 compute-0 sudo[48044]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fypfsncmflhiezpacscqwbjlyopverok ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145721.1883717-295-123194434805462/AnsiballZ_async_status.py'
Oct 11 01:22:01 compute-0 sudo[48044]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:02 compute-0 python3.9[48046]: ansible-ansible.legacy.async_status Invoked with jid=j728238082002.47681 mode=status _async_dir=/root/.ansible_async
Oct 11 01:22:02 compute-0 sudo[48044]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:02 compute-0 NetworkManager[44908]: <info>  [1760145722.2109] audit: op="checkpoint-adjust-rollback-timeout" arg="/org/freedesktop/NetworkManager/Checkpoint/1" pid=47687 uid=0 result="success"
Oct 11 01:22:02 compute-0 NetworkManager[44908]: <info>  [1760145722.5043] checkpoint[0x55a3b8baa950]: destroy /org/freedesktop/NetworkManager/Checkpoint/1
Oct 11 01:22:02 compute-0 NetworkManager[44908]: <info>  [1760145722.5051] audit: op="checkpoint-destroy" arg="/org/freedesktop/NetworkManager/Checkpoint/1" pid=47687 uid=0 result="success"
Oct 11 01:22:02 compute-0 ansible-async_wrapper.py[47684]: 47685 still running (300)
Oct 11 01:22:03 compute-0 NetworkManager[44908]: <info>  [1760145723.0421] audit: op="checkpoint-create" arg="/org/freedesktop/NetworkManager/Checkpoint/2" pid=47687 uid=0 result="success"
Oct 11 01:22:03 compute-0 NetworkManager[44908]: <info>  [1760145723.0445] audit: op="checkpoint-adjust-rollback-timeout" arg="/org/freedesktop/NetworkManager/Checkpoint/2" pid=47687 uid=0 result="success"
Oct 11 01:22:03 compute-0 NetworkManager[44908]: <info>  [1760145723.3867] audit: op="networking-control" arg="global-dns-configuration" pid=47687 uid=0 result="success"
Oct 11 01:22:03 compute-0 NetworkManager[44908]: <info>  [1760145723.3895] config: signal: SET_VALUES,values,values-intern,global-dns-config (/etc/NetworkManager/NetworkManager.conf, /run/NetworkManager/conf.d/15-carrier-timeout.conf)
Oct 11 01:22:03 compute-0 NetworkManager[44908]: <info>  [1760145723.3925] audit: op="networking-control" arg="global-dns-configuration" pid=47687 uid=0 result="success"
Oct 11 01:22:03 compute-0 NetworkManager[44908]: <info>  [1760145723.3943] audit: op="checkpoint-adjust-rollback-timeout" arg="/org/freedesktop/NetworkManager/Checkpoint/2" pid=47687 uid=0 result="success"
Oct 11 01:22:03 compute-0 NetworkManager[44908]: <info>  [1760145723.5354] checkpoint[0x55a3b8baaa20]: destroy /org/freedesktop/NetworkManager/Checkpoint/2
Oct 11 01:22:03 compute-0 NetworkManager[44908]: <info>  [1760145723.5362] audit: op="checkpoint-destroy" arg="/org/freedesktop/NetworkManager/Checkpoint/2" pid=47687 uid=0 result="success"
Oct 11 01:22:03 compute-0 ansible-async_wrapper.py[47685]: Module complete (47685)
Oct 11 01:22:05 compute-0 sudo[48151]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-taqzqemgixwxceaepwzllkbwzhmhyocl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145721.1883717-295-123194434805462/AnsiballZ_async_status.py'
Oct 11 01:22:05 compute-0 sudo[48151]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:05 compute-0 python3.9[48153]: ansible-ansible.legacy.async_status Invoked with jid=j728238082002.47681 mode=status _async_dir=/root/.ansible_async
Oct 11 01:22:05 compute-0 sudo[48151]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:06 compute-0 sudo[48250]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yuxxupiytmmkwpqnmtimttqcxcvaboiy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145721.1883717-295-123194434805462/AnsiballZ_async_status.py'
Oct 11 01:22:06 compute-0 sudo[48250]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:06 compute-0 python3.9[48252]: ansible-ansible.legacy.async_status Invoked with jid=j728238082002.47681 mode=cleanup _async_dir=/root/.ansible_async
Oct 11 01:22:06 compute-0 sudo[48250]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:06 compute-0 systemd[1]: systemd-hostnamed.service: Deactivated successfully.
Oct 11 01:22:06 compute-0 sudo[48405]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uggsilktvwyvugvdghtebflyvucrfuep ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145726.615519-322-227262963804727/AnsiballZ_stat.py'
Oct 11 01:22:07 compute-0 sudo[48405]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:07 compute-0 python3.9[48407]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/os-net-config.returncode follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:22:07 compute-0 sudo[48405]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:07 compute-0 sudo[48528]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ljutxqmmpfthvmaxzarakddhjbonxlem ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145726.615519-322-227262963804727/AnsiballZ_copy.py'
Oct 11 01:22:07 compute-0 sudo[48528]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:07 compute-0 python3.9[48530]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/edpm-config/os-net-config.returncode mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1760145726.615519-322-227262963804727/.source.returncode _original_basename=.8y3ufoen follow=False checksum=b6589fc6ab0dc82cf12099d1c2d40ab994e8410c backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:22:07 compute-0 sudo[48528]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:07 compute-0 ansible-async_wrapper.py[47684]: Done in kid B.
Oct 11 01:22:08 compute-0 sudo[48681]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hfmfqdzrecrqvytkyhsayszarqhvekbv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145728.1751258-338-96895782713064/AnsiballZ_stat.py'
Oct 11 01:22:08 compute-0 sudo[48681]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:08 compute-0 python3.9[48683]: ansible-ansible.legacy.stat Invoked with path=/etc/cloud/cloud.cfg.d/99-edpm-disable-network-config.cfg follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:22:08 compute-0 sudo[48681]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:09 compute-0 sudo[48804]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pukordxncayggwglngkybvzhkaeumwug ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145728.1751258-338-96895782713064/AnsiballZ_copy.py'
Oct 11 01:22:09 compute-0 sudo[48804]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:09 compute-0 python3.9[48806]: ansible-ansible.legacy.copy Invoked with dest=/etc/cloud/cloud.cfg.d/99-edpm-disable-network-config.cfg mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1760145728.1751258-338-96895782713064/.source.cfg _original_basename=.8kn2held follow=False checksum=f3c5952a9cd4c6c31b314b25eb897168971cc86e backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:22:09 compute-0 sudo[48804]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:10 compute-0 sudo[48956]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rvqxiokjijdauliyvcgododnyxwirgwo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145729.7990127-353-132095170383969/AnsiballZ_systemd.py'
Oct 11 01:22:10 compute-0 sudo[48956]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:10 compute-0 python3.9[48958]: ansible-ansible.builtin.systemd Invoked with name=NetworkManager state=reloaded daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:22:10 compute-0 systemd[1]: Reloading Network Manager...
Oct 11 01:22:10 compute-0 NetworkManager[44908]: <info>  [1760145730.6873] audit: op="reload" arg="0" pid=48962 uid=0 result="success"
Oct 11 01:22:10 compute-0 NetworkManager[44908]: <info>  [1760145730.6884] config: signal: SIGHUP,config-files,values,values-user,no-auto-default (/etc/NetworkManager/NetworkManager.conf, /usr/lib/NetworkManager/conf.d/00-server.conf, /run/NetworkManager/conf.d/15-carrier-timeout.conf, /var/lib/NetworkManager/NetworkManager-intern.conf)
Oct 11 01:22:10 compute-0 systemd[1]: Reloaded Network Manager.
Oct 11 01:22:10 compute-0 sudo[48956]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:11 compute-0 sshd-session[40901]: Connection closed by 192.168.122.30 port 54826
Oct 11 01:22:11 compute-0 sshd-session[40898]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:22:11 compute-0 systemd[1]: session-9.scope: Deactivated successfully.
Oct 11 01:22:11 compute-0 systemd[1]: session-9.scope: Consumed 57.662s CPU time.
Oct 11 01:22:11 compute-0 systemd-logind[804]: Session 9 logged out. Waiting for processes to exit.
Oct 11 01:22:11 compute-0 systemd-logind[804]: Removed session 9.
Oct 11 01:22:16 compute-0 sshd-session[48993]: Accepted publickey for zuul from 192.168.122.30 port 58112 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:22:16 compute-0 systemd-logind[804]: New session 10 of user zuul.
Oct 11 01:22:16 compute-0 systemd[1]: Started Session 10 of User zuul.
Oct 11 01:22:16 compute-0 sshd-session[48993]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:22:17 compute-0 python3.9[49146]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:22:18 compute-0 python3.9[49301]: ansible-ansible.builtin.setup Invoked with filter=['ansible_default_ipv4'] gather_subset=['!all', '!min', 'network'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:22:20 compute-0 python3.9[49494]: ansible-ansible.legacy.command Invoked with _raw_params=hostname -f _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:22:20 compute-0 sshd-session[48996]: Connection closed by 192.168.122.30 port 58112
Oct 11 01:22:20 compute-0 sshd-session[48993]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:22:20 compute-0 systemd[1]: session-10.scope: Deactivated successfully.
Oct 11 01:22:20 compute-0 systemd[1]: session-10.scope: Consumed 3.208s CPU time.
Oct 11 01:22:20 compute-0 systemd-logind[804]: Session 10 logged out. Waiting for processes to exit.
Oct 11 01:22:20 compute-0 systemd-logind[804]: Removed session 10.
Oct 11 01:22:20 compute-0 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully.
Oct 11 01:22:26 compute-0 sshd-session[49523]: Accepted publickey for zuul from 192.168.122.30 port 35870 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:22:26 compute-0 systemd-logind[804]: New session 11 of user zuul.
Oct 11 01:22:26 compute-0 systemd[1]: Started Session 11 of User zuul.
Oct 11 01:22:26 compute-0 sshd-session[49523]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:22:27 compute-0 python3.9[49676]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:22:28 compute-0 python3.9[49831]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:22:29 compute-0 sudo[49985]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-etatcakcbyyuaqgsyxkwoktinpjkedjj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145749.0744903-40-16994250154221/AnsiballZ_setup.py'
Oct 11 01:22:29 compute-0 sudo[49985]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:29 compute-0 python3.9[49987]: ansible-ansible.legacy.setup Invoked with filter=['ansible_pkg_mgr'] gather_subset=['!all'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:22:30 compute-0 sudo[49985]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:30 compute-0 sudo[50069]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lhchjdokdshneaupwgamarcijbfqfadn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145749.0744903-40-16994250154221/AnsiballZ_dnf.py'
Oct 11 01:22:30 compute-0 sudo[50069]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:30 compute-0 python3.9[50071]: ansible-ansible.legacy.dnf Invoked with name=['podman'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:22:32 compute-0 sudo[50069]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:32 compute-0 sudo[50223]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-msdrlhadhqkmkuoofhccfazkqtacbgvr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145752.3830676-52-170902361709151/AnsiballZ_setup.py'
Oct 11 01:22:32 compute-0 sudo[50223]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:33 compute-0 python3.9[50225]: ansible-ansible.builtin.setup Invoked with filter=['ansible_interfaces'] gather_subset=['!all', '!min', 'network'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:22:33 compute-0 sudo[50223]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:34 compute-0 sudo[50418]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tbsaitlnohmdqvfquitirgbbzdssdkav ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145753.9137602-63-28609406271077/AnsiballZ_file.py'
Oct 11 01:22:34 compute-0 sudo[50418]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:34 compute-0 python3.9[50420]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/containers/networks recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:22:34 compute-0 sudo[50418]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:35 compute-0 sudo[50570]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tnsjvcihijiimzurbdvffpdtkmafalry ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145754.9281764-71-164171674444054/AnsiballZ_command.py'
Oct 11 01:22:35 compute-0 sudo[50570]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:35 compute-0 python3.9[50572]: ansible-ansible.legacy.command Invoked with _raw_params=podman network inspect podman
                                             _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:22:35 compute-0 podman[50573]: 2025-10-11 01:22:35.780510437 +0000 UTC m=+0.075433929 system refresh
Oct 11 01:22:35 compute-0 sudo[50570]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:36 compute-0 sudo[50733]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uwvkqquymbxcvhvbixfcahawgavwvpah ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145756.0382679-79-120773282895879/AnsiballZ_stat.py'
Oct 11 01:22:36 compute-0 sudo[50733]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:36 compute-0 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.
Oct 11 01:22:36 compute-0 python3.9[50735]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/networks/podman.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:22:36 compute-0 sudo[50733]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:37 compute-0 sudo[50856]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ntzyzcevhlqsaybllzkqmplbwtphjclv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145756.0382679-79-120773282895879/AnsiballZ_copy.py'
Oct 11 01:22:37 compute-0 sudo[50856]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:37 compute-0 python3.9[50858]: ansible-ansible.legacy.copy Invoked with dest=/etc/containers/networks/podman.json group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145756.0382679-79-120773282895879/.source.json follow=False _original_basename=podman_network_config.j2 checksum=7b48fc83890a5fe656c7a2a49143331928a89194 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:22:37 compute-0 sudo[50856]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:38 compute-0 sudo[51008]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-snradauvkknbljooxhnhkysgghlhyhgb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145758.0276191-94-7030364109744/AnsiballZ_stat.py'
Oct 11 01:22:38 compute-0 sudo[51008]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:38 compute-0 python3.9[51010]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/registries.conf.d/20-edpm-podman-registries.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:22:38 compute-0 sudo[51008]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:39 compute-0 sudo[51131]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zssvggdtnqwsvmftrnjtzzfdltofksrq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145758.0276191-94-7030364109744/AnsiballZ_copy.py'
Oct 11 01:22:39 compute-0 sudo[51131]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:39 compute-0 python3.9[51133]: ansible-ansible.legacy.copy Invoked with dest=/etc/containers/registries.conf.d/20-edpm-podman-registries.conf group=root mode=0644 owner=root setype=etc_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760145758.0276191-94-7030364109744/.source.conf follow=False _original_basename=registries.conf.j2 checksum=d987b949eaca6ee61c2461c1b8dc7f701ea74149 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:22:39 compute-0 sudo[51131]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:40 compute-0 sudo[51283]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ptjlmsaaklypvsozfupclpkleoemwzbl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145759.5725594-110-235298972301559/AnsiballZ_ini_file.py'
Oct 11 01:22:40 compute-0 sudo[51283]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:40 compute-0 python3.9[51285]: ansible-community.general.ini_file Invoked with create=True group=root mode=0644 option=pids_limit owner=root path=/etc/containers/containers.conf section=containers setype=etc_t value=4096 backup=False state=present exclusive=True no_extra_spaces=False ignore_spaces=False allow_no_value=False modify_inactive_option=True follow=False unsafe_writes=False section_has_values=None values=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:22:40 compute-0 sudo[51283]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:40 compute-0 sudo[51435]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lttfmzvipwcxzxtjjajiywcvnscojuqc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145760.4804919-110-158902754787560/AnsiballZ_ini_file.py'
Oct 11 01:22:40 compute-0 sudo[51435]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:41 compute-0 python3.9[51437]: ansible-community.general.ini_file Invoked with create=True group=root mode=0644 option=events_logger owner=root path=/etc/containers/containers.conf section=engine setype=etc_t value="journald" backup=False state=present exclusive=True no_extra_spaces=False ignore_spaces=False allow_no_value=False modify_inactive_option=True follow=False unsafe_writes=False section_has_values=None values=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:22:41 compute-0 sudo[51435]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:41 compute-0 sudo[51587]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ouaexiahmnnjzjfrweaqxjpfnyxmtglw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145761.2075522-110-277316946960371/AnsiballZ_ini_file.py'
Oct 11 01:22:41 compute-0 sudo[51587]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:41 compute-0 python3.9[51589]: ansible-community.general.ini_file Invoked with create=True group=root mode=0644 option=runtime owner=root path=/etc/containers/containers.conf section=engine setype=etc_t value="crun" backup=False state=present exclusive=True no_extra_spaces=False ignore_spaces=False allow_no_value=False modify_inactive_option=True follow=False unsafe_writes=False section_has_values=None values=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:22:41 compute-0 sudo[51587]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:42 compute-0 sudo[51739]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sdzfxvjhtnkwrmrpoxycpdlywnwiafha ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145761.983795-110-39771726780417/AnsiballZ_ini_file.py'
Oct 11 01:22:42 compute-0 sudo[51739]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:42 compute-0 python3.9[51741]: ansible-community.general.ini_file Invoked with create=True group=root mode=0644 option=network_backend owner=root path=/etc/containers/containers.conf section=network setype=etc_t value="netavark" backup=False state=present exclusive=True no_extra_spaces=False ignore_spaces=False allow_no_value=False modify_inactive_option=True follow=False unsafe_writes=False section_has_values=None values=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:22:42 compute-0 sudo[51739]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:43 compute-0 sudo[51891]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yvcnocniearnwthgwgjbjzhadlbufwvl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145762.8884594-141-89797652885210/AnsiballZ_dnf.py'
Oct 11 01:22:43 compute-0 sudo[51891]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:43 compute-0 python3.9[51893]: ansible-ansible.legacy.dnf Invoked with name=['openssh-server'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:22:44 compute-0 sudo[51891]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:45 compute-0 sudo[52044]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xmfjzceruasjjuhnqocxiltbmvngqenm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145765.1943014-152-25823662134782/AnsiballZ_setup.py'
Oct 11 01:22:45 compute-0 sudo[52044]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:45 compute-0 python3.9[52046]: ansible-setup Invoked with gather_subset=['!all', '!min', 'distribution', 'distribution_major_version', 'distribution_version', 'os_family'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:22:45 compute-0 sudo[52044]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:46 compute-0 sudo[52198]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hpbnmwyukayxjkhsrhtdwhdhnmvbdiuz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145766.2382474-160-32680390778448/AnsiballZ_stat.py'
Oct 11 01:22:46 compute-0 sudo[52198]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:46 compute-0 python3.9[52200]: ansible-stat Invoked with path=/run/ostree-booted follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:22:46 compute-0 sudo[52198]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:47 compute-0 sudo[52350]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yqdeakacabjjuahvqdiyxfkdngjpymsg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145767.1476946-169-277394050301738/AnsiballZ_stat.py'
Oct 11 01:22:47 compute-0 sudo[52350]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:47 compute-0 python3.9[52352]: ansible-stat Invoked with path=/sbin/transactional-update follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:22:47 compute-0 sudo[52350]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:48 compute-0 sudo[52502]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kdpgikmpibadcituujbdkzicjoihddpo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145768.0636845-179-9938733381603/AnsiballZ_service_facts.py'
Oct 11 01:22:48 compute-0 sudo[52502]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:48 compute-0 python3.9[52504]: ansible-service_facts Invoked
Oct 11 01:22:48 compute-0 network[52521]: You are using 'network' service provided by 'network-scripts', which are now deprecated.
Oct 11 01:22:48 compute-0 network[52522]: 'network-scripts' will be removed from distribution in near future.
Oct 11 01:22:48 compute-0 network[52523]: It is advised to switch to 'NetworkManager' instead for network management.
Oct 11 01:22:52 compute-0 sudo[52502]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:53 compute-0 sudo[52808]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-plugqcuptrdyghbesltcflbrzesnakbe ; /bin/bash /home/zuul/.ansible/tmp/ansible-tmp-1760145773.4677396-192-191067120153507/AnsiballZ_timesync_provider.sh /home/zuul/.ansible/tmp/ansible-tmp-1760145773.4677396-192-191067120153507/args'
Oct 11 01:22:53 compute-0 sudo[52808]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:54 compute-0 sudo[52808]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:54 compute-0 sudo[52975]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-seedxzqrhzqsmihrnallxgeaadisgmdf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145774.4596734-203-246435081606185/AnsiballZ_dnf.py'
Oct 11 01:22:54 compute-0 sudo[52975]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:55 compute-0 python3.9[52977]: ansible-ansible.legacy.dnf Invoked with name=['chrony'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:22:56 compute-0 sudo[52975]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:57 compute-0 sudo[53128]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kfycsyhfxvsqrknghvieiuqenaoatozj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145776.6237745-216-152033889463489/AnsiballZ_package_facts.py'
Oct 11 01:22:57 compute-0 sudo[53128]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:57 compute-0 python3.9[53130]: ansible-package_facts Invoked with manager=['auto'] strategy=first
Oct 11 01:22:57 compute-0 sudo[53128]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:58 compute-0 sudo[53280]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pnyuuikjaidztbtumjeqxbtipkcpdgmm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145778.3302114-226-186358854423117/AnsiballZ_stat.py'
Oct 11 01:22:58 compute-0 sudo[53280]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:58 compute-0 python3.9[53282]: ansible-ansible.legacy.stat Invoked with path=/etc/chrony.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:22:59 compute-0 sudo[53280]: pam_unix(sudo:session): session closed for user root
Oct 11 01:22:59 compute-0 sudo[53405]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ihdarmroazkxosnwelkmwxztgrpxxuzl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145778.3302114-226-186358854423117/AnsiballZ_copy.py'
Oct 11 01:22:59 compute-0 sudo[53405]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:22:59 compute-0 python3.9[53407]: ansible-ansible.legacy.copy Invoked with backup=True dest=/etc/chrony.conf mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1760145778.3302114-226-186358854423117/.source.conf follow=False _original_basename=chrony.conf.j2 checksum=cfb003e56d02d0d2c65555452eb1a05073fecdad force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:22:59 compute-0 sudo[53405]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:00 compute-0 sudo[53559]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qdlvxtwkvrmsjeqhkklwrdquedgpmvir ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145780.020607-241-14135028423027/AnsiballZ_stat.py'
Oct 11 01:23:00 compute-0 sudo[53559]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:00 compute-0 python3.9[53561]: ansible-ansible.legacy.stat Invoked with path=/etc/sysconfig/chronyd follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:23:00 compute-0 sudo[53559]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:01 compute-0 sudo[53684]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jcurzvgioqbjzeanmcwrafeylpnmzsjj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145780.020607-241-14135028423027/AnsiballZ_copy.py'
Oct 11 01:23:01 compute-0 sudo[53684]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:01 compute-0 python3.9[53686]: ansible-ansible.legacy.copy Invoked with backup=True dest=/etc/sysconfig/chronyd mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1760145780.020607-241-14135028423027/.source follow=False _original_basename=chronyd.sysconfig.j2 checksum=dd196b1ff1f915b23eebc37ec77405b5dd3df76c force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:23:01 compute-0 sudo[53684]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:02 compute-0 sudo[53838]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fbbgbqgaefutsgbsynjtnfyhebnqcorn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145781.8773606-262-99925463883790/AnsiballZ_lineinfile.py'
Oct 11 01:23:02 compute-0 sudo[53838]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:02 compute-0 python3.9[53840]: ansible-lineinfile Invoked with backup=True create=True dest=/etc/sysconfig/network line=PEERNTP=no mode=0644 regexp=^PEERNTP= state=present path=/etc/sysconfig/network backrefs=False firstmatch=False unsafe_writes=False search_string=None insertafter=None insertbefore=None validate=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:23:02 compute-0 sudo[53838]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:03 compute-0 sudo[53992]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dpkdlvzatooaxiolhfkluwlcswzgpypq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145783.1878903-277-39932008991162/AnsiballZ_setup.py'
Oct 11 01:23:03 compute-0 sudo[53992]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:03 compute-0 python3.9[53994]: ansible-ansible.legacy.setup Invoked with gather_subset=['!all'] filter=['ansible_service_mgr'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:23:04 compute-0 sudo[53992]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:04 compute-0 sudo[54076]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mwcijsrvyvhxyqmlebwjgpuwcutiezdl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145783.1878903-277-39932008991162/AnsiballZ_systemd.py'
Oct 11 01:23:04 compute-0 sudo[54076]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:05 compute-0 python3.9[54078]: ansible-ansible.legacy.systemd Invoked with enabled=True name=chronyd state=started daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:23:05 compute-0 sudo[54076]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:05 compute-0 sudo[54230]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xuwpxykabgzfurwlcmpwlxzzphyqtzlg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145785.576844-293-240026512099425/AnsiballZ_setup.py'
Oct 11 01:23:05 compute-0 sudo[54230]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:06 compute-0 python3.9[54232]: ansible-ansible.legacy.setup Invoked with gather_subset=['!all'] filter=['ansible_service_mgr'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:23:06 compute-0 sudo[54230]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:06 compute-0 sudo[54314]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-efhqnthnmwwbnejjkvehdgvvpzguitqv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145785.576844-293-240026512099425/AnsiballZ_systemd.py'
Oct 11 01:23:06 compute-0 sudo[54314]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:07 compute-0 python3.9[54316]: ansible-ansible.legacy.systemd Invoked with name=chronyd state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:23:07 compute-0 chronyd[784]: chronyd exiting
Oct 11 01:23:07 compute-0 systemd[1]: Stopping NTP client/server...
Oct 11 01:23:07 compute-0 systemd[1]: chronyd.service: Deactivated successfully.
Oct 11 01:23:07 compute-0 systemd[1]: Stopped NTP client/server.
Oct 11 01:23:07 compute-0 systemd[1]: Starting NTP client/server...
Oct 11 01:23:07 compute-0 chronyd[54325]: chronyd version 4.6.1 starting (+CMDMON +NTP +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +ASYNCDNS +NTS +SECHASH +IPV6 +DEBUG)
Oct 11 01:23:07 compute-0 chronyd[54325]: Frequency -27.834 +/- 0.592 ppm read from /var/lib/chrony/drift
Oct 11 01:23:07 compute-0 chronyd[54325]: Loaded seccomp filter (level 2)
Oct 11 01:23:07 compute-0 systemd[1]: Started NTP client/server.
Oct 11 01:23:07 compute-0 sudo[54314]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:07 compute-0 sshd-session[49526]: Connection closed by 192.168.122.30 port 35870
Oct 11 01:23:07 compute-0 sshd-session[49523]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:23:07 compute-0 systemd[1]: session-11.scope: Deactivated successfully.
Oct 11 01:23:07 compute-0 systemd[1]: session-11.scope: Consumed 31.518s CPU time.
Oct 11 01:23:07 compute-0 systemd-logind[804]: Session 11 logged out. Waiting for processes to exit.
Oct 11 01:23:07 compute-0 systemd-logind[804]: Removed session 11.
Oct 11 01:23:12 compute-0 sshd-session[54351]: Accepted publickey for zuul from 192.168.122.30 port 45206 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:23:12 compute-0 systemd-logind[804]: New session 12 of user zuul.
Oct 11 01:23:13 compute-0 systemd[1]: Started Session 12 of User zuul.
Oct 11 01:23:13 compute-0 sshd-session[54351]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:23:13 compute-0 sudo[54504]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cxkuccrwgkuqxbbfzgsncfbogypbpltv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145793.1380916-22-69120391257344/AnsiballZ_file.py'
Oct 11 01:23:13 compute-0 sudo[54504]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:13 compute-0 python3.9[54506]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/lib/edpm-config/firewall state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:23:14 compute-0 sudo[54504]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:14 compute-0 sudo[54656]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wbbuineyhgearhlehubsowiijaymgvna ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145794.1928365-34-273690861657179/AnsiballZ_stat.py'
Oct 11 01:23:14 compute-0 sudo[54656]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:14 compute-0 python3.9[54658]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/ceph-networks.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:23:14 compute-0 sudo[54656]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:15 compute-0 sudo[54779]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dfnusluqwmyaeujwirjdknmwmsvprlvs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145794.1928365-34-273690861657179/AnsiballZ_copy.py'
Oct 11 01:23:15 compute-0 sudo[54779]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:15 compute-0 python3.9[54781]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/edpm-config/firewall/ceph-networks.yaml mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1760145794.1928365-34-273690861657179/.source.yaml follow=False _original_basename=firewall.yaml.j2 checksum=729ea8396013e3343245d6e934e0dcef55029ad2 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:23:15 compute-0 sudo[54779]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:16 compute-0 sshd-session[54354]: Connection closed by 192.168.122.30 port 45206
Oct 11 01:23:16 compute-0 sshd-session[54351]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:23:16 compute-0 systemd[1]: session-12.scope: Deactivated successfully.
Oct 11 01:23:16 compute-0 systemd[1]: session-12.scope: Consumed 2.235s CPU time.
Oct 11 01:23:16 compute-0 systemd-logind[804]: Session 12 logged out. Waiting for processes to exit.
Oct 11 01:23:16 compute-0 systemd-logind[804]: Removed session 12.
Oct 11 01:23:22 compute-0 sshd-session[54806]: Accepted publickey for zuul from 192.168.122.30 port 55892 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:23:22 compute-0 systemd-logind[804]: New session 13 of user zuul.
Oct 11 01:23:22 compute-0 systemd[1]: Started Session 13 of User zuul.
Oct 11 01:23:22 compute-0 sshd-session[54806]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:23:23 compute-0 python3.9[54959]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:23:24 compute-0 sudo[55113]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tfgycqjyhglyqlzzvnfsmukkroyhrpzq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145803.9191093-33-89231565628946/AnsiballZ_file.py'
Oct 11 01:23:24 compute-0 sudo[55113]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:24 compute-0 python3.9[55115]: ansible-ansible.builtin.file Invoked with group=zuul mode=0770 owner=zuul path=/root/.config/containers recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:23:24 compute-0 sudo[55113]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:25 compute-0 sudo[55288]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qacbpkmdsipjlttmsjhsrgsaozalntzj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145804.9824421-41-159299565206185/AnsiballZ_stat.py'
Oct 11 01:23:25 compute-0 sudo[55288]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:25 compute-0 python3.9[55290]: ansible-ansible.legacy.stat Invoked with path=/root/.config/containers/auth.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:23:25 compute-0 sudo[55288]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:26 compute-0 sudo[55411]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-duipkrhdpmfmvweciqlkwglehtizzvfd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145804.9824421-41-159299565206185/AnsiballZ_copy.py'
Oct 11 01:23:26 compute-0 sudo[55411]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:26 compute-0 python3.9[55413]: ansible-ansible.legacy.copy Invoked with dest=/root/.config/containers/auth.json group=zuul mode=0660 owner=zuul src=/home/zuul/.ansible/tmp/ansible-tmp-1760145804.9824421-41-159299565206185/.source.json _original_basename=.swad87cn follow=False checksum=bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:23:26 compute-0 sudo[55411]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:27 compute-0 sudo[55563]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-naajltyprbpfzlqvmhrmjzozchvtjfnz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145807.033018-64-248976307218774/AnsiballZ_stat.py'
Oct 11 01:23:27 compute-0 sudo[55563]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:27 compute-0 python3.9[55565]: ansible-ansible.legacy.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:23:27 compute-0 sudo[55563]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:28 compute-0 sudo[55686]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hwvqkcifjxoxtrwgbeeutxvuhgvwnzeh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145807.033018-64-248976307218774/AnsiballZ_copy.py'
Oct 11 01:23:28 compute-0 sudo[55686]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:28 compute-0 python3.9[55688]: ansible-ansible.legacy.copy Invoked with dest=/etc/sysconfig/podman_drop_in mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1760145807.033018-64-248976307218774/.source _original_basename=.zu_f8gkt follow=False checksum=125299ce8dea7711a76292961206447f0043248b backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:23:28 compute-0 sudo[55686]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:28 compute-0 sudo[55838]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jivfbfcsixitqdhjtqeazoywqtlccdiv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145808.4454694-80-273543398389970/AnsiballZ_file.py'
Oct 11 01:23:28 compute-0 sudo[55838]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:28 compute-0 python3.9[55840]: ansible-ansible.builtin.file Invoked with path=/var/local/libexec recurse=True setype=container_file_t state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:23:29 compute-0 sudo[55838]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:29 compute-0 sudo[55990]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eetlasfxkdgpxqcskhflnszxlwbxtlsi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145809.241057-88-232151176418557/AnsiballZ_stat.py'
Oct 11 01:23:29 compute-0 sudo[55990]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:29 compute-0 python3.9[55992]: ansible-ansible.legacy.stat Invoked with path=/var/local/libexec/edpm-container-shutdown follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:23:29 compute-0 sudo[55990]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:30 compute-0 sudo[56113]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cwfcavilysyhrwtvulbumalswrautojd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145809.241057-88-232151176418557/AnsiballZ_copy.py'
Oct 11 01:23:30 compute-0 sudo[56113]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:30 compute-0 python3.9[56115]: ansible-ansible.legacy.copy Invoked with dest=/var/local/libexec/edpm-container-shutdown group=root mode=0700 owner=root setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760145809.241057-88-232151176418557/.source _original_basename=edpm-container-shutdown follow=False checksum=632c3792eb3dce4288b33ae7b265b71950d69f13 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:23:30 compute-0 sudo[56113]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:31 compute-0 sudo[56265]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-irjcjyguqrdirjrjaagavxlyowkccxfo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145810.8275104-88-11590089189615/AnsiballZ_stat.py'
Oct 11 01:23:31 compute-0 sudo[56265]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:31 compute-0 python3.9[56267]: ansible-ansible.legacy.stat Invoked with path=/var/local/libexec/edpm-start-podman-container follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:23:31 compute-0 sudo[56265]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:32 compute-0 sudo[56388]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bwxslqdxqnvjzhpzeehzvzamnangdngs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145810.8275104-88-11590089189615/AnsiballZ_copy.py'
Oct 11 01:23:32 compute-0 sudo[56388]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:32 compute-0 python3.9[56390]: ansible-ansible.legacy.copy Invoked with dest=/var/local/libexec/edpm-start-podman-container group=root mode=0700 owner=root setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760145810.8275104-88-11590089189615/.source _original_basename=edpm-start-podman-container follow=False checksum=b963c569d75a655c0ccae95d9bb4a2a9a4df27d1 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:23:32 compute-0 sudo[56388]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:32 compute-0 sudo[56540]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-skckwunsegiwtjoocadwiudyolqaqolf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145812.6084545-117-158706293234205/AnsiballZ_file.py'
Oct 11 01:23:32 compute-0 sudo[56540]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:33 compute-0 python3.9[56542]: ansible-ansible.builtin.file Invoked with mode=420 path=/etc/systemd/system-preset state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:23:33 compute-0 sudo[56540]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:33 compute-0 sudo[56692]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-upwysdhkvqghyeeeyktvqkptjyhjwmmc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145813.4607375-125-248913589451105/AnsiballZ_stat.py'
Oct 11 01:23:33 compute-0 sudo[56692]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:34 compute-0 python3.9[56694]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/edpm-container-shutdown.service follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:23:34 compute-0 sudo[56692]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:34 compute-0 sudo[56815]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-clyuzssuugdxxuraxduittvphdwifzon ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145813.4607375-125-248913589451105/AnsiballZ_copy.py'
Oct 11 01:23:34 compute-0 sudo[56815]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:34 compute-0 python3.9[56817]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system/edpm-container-shutdown.service group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145813.4607375-125-248913589451105/.source.service _original_basename=edpm-container-shutdown-service follow=False checksum=6336835cb0f888670cc99de31e19c8c071444d33 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:23:34 compute-0 sudo[56815]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:35 compute-0 sudo[56967]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bmlniuryxgtbjfxuqnbeicgtoiujozqy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145815.0164936-140-46502124452586/AnsiballZ_stat.py'
Oct 11 01:23:35 compute-0 sudo[56967]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:35 compute-0 python3.9[56969]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system-preset/91-edpm-container-shutdown.preset follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:23:35 compute-0 sudo[56967]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:36 compute-0 sudo[57090]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ryysbzbzbnkgxddwmexcyacvjupktpph ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145815.0164936-140-46502124452586/AnsiballZ_copy.py'
Oct 11 01:23:36 compute-0 sudo[57090]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:36 compute-0 python3.9[57092]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system-preset/91-edpm-container-shutdown.preset group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145815.0164936-140-46502124452586/.source.preset _original_basename=91-edpm-container-shutdown-preset follow=False checksum=b275e4375287528cb63464dd32f622c4f142a915 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:23:36 compute-0 sudo[57090]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:37 compute-0 sudo[57242]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xtwdexiktidgnhvdmfxuapenqqsvznze ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145816.559191-155-79074554385213/AnsiballZ_systemd.py'
Oct 11 01:23:37 compute-0 sudo[57242]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:37 compute-0 python3.9[57244]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True enabled=True name=edpm-container-shutdown state=started daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:23:37 compute-0 systemd[1]: Reloading.
Oct 11 01:23:37 compute-0 systemd-sysv-generator[57275]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:23:37 compute-0 systemd-rc-local-generator[57272]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:23:37 compute-0 systemd[1]: Reloading.
Oct 11 01:23:38 compute-0 systemd-rc-local-generator[57310]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:23:38 compute-0 systemd-sysv-generator[57314]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:23:38 compute-0 systemd[1]: Starting EDPM Container Shutdown...
Oct 11 01:23:38 compute-0 systemd[1]: Finished EDPM Container Shutdown.
Oct 11 01:23:38 compute-0 sudo[57242]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:38 compute-0 sudo[57470]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qiudwkoodstzjjtyobmwmrdskggdnomt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145818.5005028-163-16240409455301/AnsiballZ_stat.py'
Oct 11 01:23:38 compute-0 sudo[57470]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:39 compute-0 python3.9[57472]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/netns-placeholder.service follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:23:39 compute-0 sudo[57470]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:39 compute-0 sudo[57593]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qkngijreakatyzehwevursrhmdiiczku ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145818.5005028-163-16240409455301/AnsiballZ_copy.py'
Oct 11 01:23:39 compute-0 sudo[57593]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:39 compute-0 python3.9[57595]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system/netns-placeholder.service group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145818.5005028-163-16240409455301/.source.service _original_basename=netns-placeholder-service follow=False checksum=b61b1b5918c20c877b8b226fbf34ff89a082d972 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:23:39 compute-0 sudo[57593]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:40 compute-0 sudo[57745]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pgaiitkplcjkxxnumuydxtadxrpqffdi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145820.1024146-178-152842374430348/AnsiballZ_stat.py'
Oct 11 01:23:40 compute-0 sudo[57745]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:40 compute-0 python3.9[57747]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system-preset/91-netns-placeholder.preset follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:23:40 compute-0 sudo[57745]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:41 compute-0 sudo[57868]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jdbrefohampvhyhidfangcjillclfhay ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145820.1024146-178-152842374430348/AnsiballZ_copy.py'
Oct 11 01:23:41 compute-0 sudo[57868]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:41 compute-0 python3.9[57870]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system-preset/91-netns-placeholder.preset group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145820.1024146-178-152842374430348/.source.preset _original_basename=91-netns-placeholder-preset follow=False checksum=28b7b9aa893525d134a1eeda8a0a48fb25b736b9 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:23:41 compute-0 sudo[57868]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:42 compute-0 sudo[58020]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hpiepgonfwdmgxfqquihzxdyzfqxdzen ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145821.7888198-193-186039263835564/AnsiballZ_systemd.py'
Oct 11 01:23:42 compute-0 sudo[58020]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:42 compute-0 python3.9[58022]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True enabled=True name=netns-placeholder state=started daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:23:42 compute-0 systemd[1]: Reloading.
Oct 11 01:23:42 compute-0 systemd-sysv-generator[58052]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:23:42 compute-0 systemd-rc-local-generator[58046]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:23:42 compute-0 systemd[1]: Reloading.
Oct 11 01:23:43 compute-0 systemd-rc-local-generator[58086]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:23:43 compute-0 systemd-sysv-generator[58090]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:23:43 compute-0 systemd[1]: Starting Create netns directory...
Oct 11 01:23:43 compute-0 systemd[1]: run-netns-placeholder.mount: Deactivated successfully.
Oct 11 01:23:43 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Oct 11 01:23:43 compute-0 systemd[1]: Finished Create netns directory.
Oct 11 01:23:43 compute-0 sudo[58020]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:44 compute-0 python3.9[58247]: ansible-ansible.builtin.service_facts Invoked
Oct 11 01:23:44 compute-0 network[58264]: You are using 'network' service provided by 'network-scripts', which are now deprecated.
Oct 11 01:23:44 compute-0 network[58265]: 'network-scripts' will be removed from distribution in near future.
Oct 11 01:23:44 compute-0 network[58266]: It is advised to switch to 'NetworkManager' instead for network management.
Oct 11 01:23:48 compute-0 sudo[58528]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cgxrukulwjlhwbavketjroiredbuzkwy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145828.3669336-209-191071525122988/AnsiballZ_systemd.py'
Oct 11 01:23:48 compute-0 sudo[58528]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:49 compute-0 python3.9[58530]: ansible-ansible.builtin.systemd Invoked with enabled=False name=iptables.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:23:49 compute-0 systemd[1]: Reloading.
Oct 11 01:23:49 compute-0 systemd-sysv-generator[58562]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:23:49 compute-0 systemd-rc-local-generator[58557]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:23:49 compute-0 systemd[1]: Stopping IPv4 firewall with iptables...
Oct 11 01:23:49 compute-0 iptables.init[58569]: iptables: Setting chains to policy ACCEPT: raw mangle filter nat [  OK  ]
Oct 11 01:23:49 compute-0 iptables.init[58569]: iptables: Flushing firewall rules: [  OK  ]
Oct 11 01:23:49 compute-0 systemd[1]: iptables.service: Deactivated successfully.
Oct 11 01:23:49 compute-0 systemd[1]: Stopped IPv4 firewall with iptables.
Oct 11 01:23:49 compute-0 sudo[58528]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:50 compute-0 sudo[58763]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-quivutgbqpfcqpkvmsrwlsmdliudiqnp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145830.0382965-209-124368829366014/AnsiballZ_systemd.py'
Oct 11 01:23:50 compute-0 sudo[58763]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:50 compute-0 python3.9[58765]: ansible-ansible.builtin.systemd Invoked with enabled=False name=ip6tables.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:23:50 compute-0 sudo[58763]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:51 compute-0 sudo[58917]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xktnpcgfaqkkakicnyxukcilaxwafpgn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145831.12244-225-123999646890659/AnsiballZ_systemd.py'
Oct 11 01:23:51 compute-0 sudo[58917]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:51 compute-0 python3.9[58919]: ansible-ansible.builtin.systemd Invoked with enabled=True name=nftables state=started daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:23:51 compute-0 systemd[1]: Reloading.
Oct 11 01:23:52 compute-0 systemd-rc-local-generator[58943]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:23:52 compute-0 systemd-sysv-generator[58949]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:23:52 compute-0 systemd[1]: Starting Netfilter Tables...
Oct 11 01:23:52 compute-0 systemd[1]: Finished Netfilter Tables.
Oct 11 01:23:52 compute-0 sudo[58917]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:53 compute-0 sudo[59109]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nmnsulxtzbtbradmkzukqywpqsqabqqq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145832.5114064-233-103738048739857/AnsiballZ_command.py'
Oct 11 01:23:53 compute-0 sudo[59109]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:53 compute-0 python3.9[59111]: ansible-ansible.legacy.command Invoked with _raw_params=nft flush ruleset _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:23:53 compute-0 sudo[59109]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:54 compute-0 sudo[59262]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jdvzqhgxsgwpfkvbhoemmzzryqilljqx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145833.8561957-247-246401426014420/AnsiballZ_stat.py'
Oct 11 01:23:54 compute-0 sudo[59262]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:54 compute-0 python3.9[59264]: ansible-ansible.legacy.stat Invoked with path=/etc/ssh/sshd_config follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:23:54 compute-0 sudo[59262]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:54 compute-0 sudo[59387]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ksdlgnyzakgqxdxeclixqbvxsaokgvkh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145833.8561957-247-246401426014420/AnsiballZ_copy.py'
Oct 11 01:23:54 compute-0 sudo[59387]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:23:55 compute-0 python3.9[59389]: ansible-ansible.legacy.copy Invoked with dest=/etc/ssh/sshd_config mode=0600 src=/home/zuul/.ansible/tmp/ansible-tmp-1760145833.8561957-247-246401426014420/.source validate=/usr/sbin/sshd -T -f %s follow=False _original_basename=sshd_config_block.j2 checksum=4729b6ffc5b555fa142bf0b6e6dc15609cb89a22 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:23:55 compute-0 sudo[59387]: pam_unix(sudo:session): session closed for user root
Oct 11 01:23:56 compute-0 python3.9[59540]: ansible-ansible.builtin.systemd Invoked with name=sshd state=reloaded daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:23:56 compute-0 polkitd[6240]: Registered Authentication Agent for unix-process:59542:218394 (system bus name :1.522 [/usr/bin/pkttyagent --notify-fd 5 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Oct 11 01:24:21 compute-0 polkitd[6240]: Unregistered Authentication Agent for unix-process:59542:218394 (system bus name :1.522, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Oct 11 01:24:21 compute-0 polkit-agent-helper-1[59554]: pam_unix(polkit-1:auth): conversation failed
Oct 11 01:24:21 compute-0 polkit-agent-helper-1[59554]: pam_unix(polkit-1:auth): auth could not identify password for [root]
Oct 11 01:24:21 compute-0 polkitd[6240]: Operator of unix-process:59542:218394 FAILED to authenticate to gain authorization for action org.freedesktop.systemd1.manage-units for system-bus-name::1.521 [<unknown>] (owned by unix-user:zuul)
Oct 11 01:24:21 compute-0 sshd-session[54809]: Connection closed by 192.168.122.30 port 55892
Oct 11 01:24:21 compute-0 sshd-session[54806]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:24:21 compute-0 systemd[1]: session-13.scope: Deactivated successfully.
Oct 11 01:24:21 compute-0 systemd[1]: session-13.scope: Consumed 25.663s CPU time.
Oct 11 01:24:21 compute-0 systemd-logind[804]: Session 13 logged out. Waiting for processes to exit.
Oct 11 01:24:21 compute-0 systemd-logind[804]: Removed session 13.
Oct 11 01:24:33 compute-0 sshd-session[59580]: Accepted publickey for zuul from 192.168.122.30 port 51398 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:24:33 compute-0 systemd-logind[804]: New session 14 of user zuul.
Oct 11 01:24:33 compute-0 systemd[1]: Started Session 14 of User zuul.
Oct 11 01:24:33 compute-0 sshd-session[59580]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:24:35 compute-0 python3.9[59733]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:24:36 compute-0 sudo[59887]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bupjewngwxsfbpkavfwctmcdrotqvunj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145875.63854-33-113114323209748/AnsiballZ_file.py'
Oct 11 01:24:36 compute-0 sudo[59887]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:36 compute-0 python3.9[59889]: ansible-ansible.builtin.file Invoked with group=zuul mode=0770 owner=zuul path=/root/.config/containers recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:24:36 compute-0 sudo[59887]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:37 compute-0 sudo[60062]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jzedisekacztztfzppabevusyisistod ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145876.6232855-41-244536720034674/AnsiballZ_stat.py'
Oct 11 01:24:37 compute-0 sudo[60062]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:37 compute-0 python3.9[60064]: ansible-ansible.legacy.stat Invoked with path=/root/.config/containers/auth.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:24:37 compute-0 sudo[60062]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:37 compute-0 sudo[60140]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yvzswfvspxognvnidhltaerwdfaqaoaq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145876.6232855-41-244536720034674/AnsiballZ_file.py'
Oct 11 01:24:37 compute-0 sudo[60140]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:38 compute-0 python3.9[60142]: ansible-ansible.legacy.file Invoked with group=zuul mode=0660 owner=zuul dest=/root/.config/containers/auth.json _original_basename=.fyi39ec7 recurse=False state=file path=/root/.config/containers/auth.json force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:24:38 compute-0 sudo[60140]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:38 compute-0 sudo[60292]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lgczsmbjmrfuwqvukbuwdcweejuuknju ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145878.4625864-61-101287747980231/AnsiballZ_stat.py'
Oct 11 01:24:38 compute-0 sudo[60292]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:39 compute-0 python3.9[60294]: ansible-ansible.legacy.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:24:39 compute-0 sudo[60292]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:39 compute-0 sudo[60370]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lrnhdgvpvcpqhhdsillcscuyufesmvji ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145878.4625864-61-101287747980231/AnsiballZ_file.py'
Oct 11 01:24:39 compute-0 sudo[60370]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:39 compute-0 python3.9[60372]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/etc/sysconfig/podman_drop_in _original_basename=.ghbuhk9x recurse=False state=file path=/etc/sysconfig/podman_drop_in force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:24:39 compute-0 sudo[60370]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:40 compute-0 sudo[60522]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nbvrzeebvtqnnslraaschqglbiiqfnkj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145880.0480106-74-182155505461545/AnsiballZ_file.py'
Oct 11 01:24:40 compute-0 sudo[60522]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:40 compute-0 python3.9[60524]: ansible-ansible.builtin.file Invoked with path=/var/local/libexec recurse=True setype=container_file_t state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:24:40 compute-0 sudo[60522]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:41 compute-0 sudo[60674]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-exxnthklabpoixfgixqwoqhfwvmczvyu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145880.8253605-82-138650316330492/AnsiballZ_stat.py'
Oct 11 01:24:41 compute-0 sudo[60674]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:41 compute-0 python3.9[60676]: ansible-ansible.legacy.stat Invoked with path=/var/local/libexec/edpm-container-shutdown follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:24:41 compute-0 sudo[60674]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:41 compute-0 sudo[60754]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cadgctovoixledtyrbzaocfdewdsugrg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145880.8253605-82-138650316330492/AnsiballZ_file.py'
Oct 11 01:24:41 compute-0 sudo[60754]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:41 compute-0 python3.9[60756]: ansible-ansible.legacy.file Invoked with group=root mode=0700 owner=root setype=container_file_t dest=/var/local/libexec/edpm-container-shutdown _original_basename=edpm-container-shutdown recurse=False state=file path=/var/local/libexec/edpm-container-shutdown force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:24:41 compute-0 sudo[60754]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:42 compute-0 sshd-session[60677]: Invalid user ubuntu from 103.14.32.75 port 41218
Oct 11 01:24:42 compute-0 sshd-session[60677]: Received disconnect from 103.14.32.75 port 41218:11:  [preauth]
Oct 11 01:24:42 compute-0 sshd-session[60677]: Disconnected from invalid user ubuntu 103.14.32.75 port 41218 [preauth]
Oct 11 01:24:42 compute-0 sudo[60906]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ppwtrxddpugddxwdyqodiozceqowaufh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145882.1439388-82-267884563880075/AnsiballZ_stat.py'
Oct 11 01:24:42 compute-0 sudo[60906]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:42 compute-0 python3.9[60908]: ansible-ansible.legacy.stat Invoked with path=/var/local/libexec/edpm-start-podman-container follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:24:42 compute-0 sudo[60906]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:43 compute-0 sudo[60984]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-shjozbksstwxovfefvyrzmidnvramldm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145882.1439388-82-267884563880075/AnsiballZ_file.py'
Oct 11 01:24:43 compute-0 sudo[60984]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:43 compute-0 python3.9[60986]: ansible-ansible.legacy.file Invoked with group=root mode=0700 owner=root setype=container_file_t dest=/var/local/libexec/edpm-start-podman-container _original_basename=edpm-start-podman-container recurse=False state=file path=/var/local/libexec/edpm-start-podman-container force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:24:43 compute-0 sudo[60984]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:43 compute-0 sudo[61136]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bnimryupdygwarwcvszuqjxwttowsvig ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145883.5887659-105-12934288835056/AnsiballZ_file.py'
Oct 11 01:24:43 compute-0 sudo[61136]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:44 compute-0 python3.9[61138]: ansible-ansible.builtin.file Invoked with mode=420 path=/etc/systemd/system-preset state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:24:44 compute-0 sudo[61136]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:44 compute-0 sudo[61288]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ygbxjzeqcxijypvdcmmtolmnpcymzqzj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145884.471951-113-215016080156396/AnsiballZ_stat.py'
Oct 11 01:24:44 compute-0 sudo[61288]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:45 compute-0 python3.9[61290]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/edpm-container-shutdown.service follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:24:45 compute-0 sudo[61288]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:45 compute-0 sudo[61366]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kajvirchojsgsydilymcccwhjlwrssmt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145884.471951-113-215016080156396/AnsiballZ_file.py'
Oct 11 01:24:45 compute-0 sudo[61366]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:45 compute-0 python3.9[61368]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/edpm-container-shutdown.service _original_basename=edpm-container-shutdown-service recurse=False state=file path=/etc/systemd/system/edpm-container-shutdown.service force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:24:45 compute-0 sudo[61366]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:46 compute-0 sudo[61518]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jkfcutoarojzghwcwxmytnrfgmcohevy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145885.7933445-125-244993773127983/AnsiballZ_stat.py'
Oct 11 01:24:46 compute-0 sudo[61518]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:46 compute-0 python3.9[61520]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system-preset/91-edpm-container-shutdown.preset follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:24:46 compute-0 sudo[61518]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:46 compute-0 sudo[61596]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pqvbjubcmvhmggergisjbhfimmbksgoi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145885.7933445-125-244993773127983/AnsiballZ_file.py'
Oct 11 01:24:46 compute-0 sudo[61596]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:47 compute-0 python3.9[61598]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system-preset/91-edpm-container-shutdown.preset _original_basename=91-edpm-container-shutdown-preset recurse=False state=file path=/etc/systemd/system-preset/91-edpm-container-shutdown.preset force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:24:47 compute-0 sudo[61596]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:47 compute-0 sudo[61748]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fijqjonjrtllwvbjaguiheufqezwuldx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145887.2905223-137-73793617928303/AnsiballZ_systemd.py'
Oct 11 01:24:47 compute-0 sudo[61748]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:48 compute-0 python3.9[61750]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True enabled=True name=edpm-container-shutdown state=started daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:24:48 compute-0 systemd[1]: Reloading.
Oct 11 01:24:48 compute-0 systemd-sysv-generator[61783]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:24:48 compute-0 systemd-rc-local-generator[61777]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:24:48 compute-0 sudo[61748]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:49 compute-0 sudo[61939]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fqzegchchdvxkgwrvrietfemdxvrsygd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145888.8585536-145-47777360286770/AnsiballZ_stat.py'
Oct 11 01:24:49 compute-0 sudo[61939]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:49 compute-0 python3.9[61941]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/netns-placeholder.service follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:24:49 compute-0 sudo[61939]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:49 compute-0 sudo[62017]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nsaixovyxirjyosgzxcyowtggkwfcwdw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145888.8585536-145-47777360286770/AnsiballZ_file.py'
Oct 11 01:24:49 compute-0 sudo[62017]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:49 compute-0 python3.9[62019]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/netns-placeholder.service _original_basename=netns-placeholder-service recurse=False state=file path=/etc/systemd/system/netns-placeholder.service force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:24:50 compute-0 sudo[62017]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:50 compute-0 sudo[62169]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tiijadhqsuazmzahbgohfamuyjqaqdzl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145890.2112164-157-138261257430773/AnsiballZ_stat.py'
Oct 11 01:24:50 compute-0 sudo[62169]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:50 compute-0 python3.9[62171]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system-preset/91-netns-placeholder.preset follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:24:50 compute-0 sudo[62169]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:51 compute-0 sudo[62247]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qiiiwvnmpxvmoxbzcnrdufxzwhsogedl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145890.2112164-157-138261257430773/AnsiballZ_file.py'
Oct 11 01:24:51 compute-0 sudo[62247]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:51 compute-0 python3.9[62249]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system-preset/91-netns-placeholder.preset _original_basename=91-netns-placeholder-preset recurse=False state=file path=/etc/systemd/system-preset/91-netns-placeholder.preset force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:24:51 compute-0 sudo[62247]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:52 compute-0 sudo[62399]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mfsmdttfhdaiwlhwtrnfsscjjybvasdi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145891.736424-169-73643769563813/AnsiballZ_systemd.py'
Oct 11 01:24:52 compute-0 sudo[62399]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:52 compute-0 python3.9[62401]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True enabled=True name=netns-placeholder state=started daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:24:52 compute-0 systemd[1]: Reloading.
Oct 11 01:24:52 compute-0 systemd-sysv-generator[62430]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:24:52 compute-0 systemd-rc-local-generator[62427]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:24:52 compute-0 systemd[1]: Starting Create netns directory...
Oct 11 01:24:52 compute-0 systemd[1]: run-netns-placeholder.mount: Deactivated successfully.
Oct 11 01:24:52 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Oct 11 01:24:52 compute-0 systemd[1]: Finished Create netns directory.
Oct 11 01:24:52 compute-0 sudo[62399]: pam_unix(sudo:session): session closed for user root
Oct 11 01:24:53 compute-0 python3.9[62592]: ansible-ansible.builtin.service_facts Invoked
Oct 11 01:24:53 compute-0 network[62609]: You are using 'network' service provided by 'network-scripts', which are now deprecated.
Oct 11 01:24:53 compute-0 network[62610]: 'network-scripts' will be removed from distribution in near future.
Oct 11 01:24:53 compute-0 network[62611]: It is advised to switch to 'NetworkManager' instead for network management.
Oct 11 01:24:59 compute-0 sudo[62872]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-isghgdechgnpcbwaubcsfhwtswjkvofj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145899.1679523-195-179407715492884/AnsiballZ_stat.py'
Oct 11 01:24:59 compute-0 sudo[62872]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:24:59 compute-0 python3.9[62874]: ansible-ansible.legacy.stat Invoked with path=/etc/ssh/sshd_config follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:24:59 compute-0 sudo[62872]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:00 compute-0 sudo[62950]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ljuizvmdhrmyrvmmxkzmcczhcpnsvlux ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145899.1679523-195-179407715492884/AnsiballZ_file.py'
Oct 11 01:25:00 compute-0 sudo[62950]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:00 compute-0 python3.9[62952]: ansible-ansible.legacy.file Invoked with mode=0600 dest=/etc/ssh/sshd_config _original_basename=sshd_config_block.j2 recurse=False state=file path=/etc/ssh/sshd_config force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:25:00 compute-0 sudo[62950]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:00 compute-0 sudo[63102]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-amznxgvczwwhtxoituiqkuulinutjoid ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145900.6030302-208-277475628206917/AnsiballZ_file.py'
Oct 11 01:25:00 compute-0 sudo[63102]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:01 compute-0 python3.9[63104]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/lib/edpm-config/firewall state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:25:01 compute-0 sudo[63102]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:01 compute-0 sudo[63254]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tuszvvedjrcwtptlkbofsvlckaxzcbrv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145901.3965461-216-142266902226530/AnsiballZ_stat.py'
Oct 11 01:25:01 compute-0 sudo[63254]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:02 compute-0 python3.9[63256]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/sshd-networks.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:25:02 compute-0 sudo[63254]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:02 compute-0 sudo[63377]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wfozavbtgviracqstcrefbdpbpzlxadl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145901.3965461-216-142266902226530/AnsiballZ_copy.py'
Oct 11 01:25:02 compute-0 sudo[63377]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:02 compute-0 python3.9[63379]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/edpm-config/firewall/sshd-networks.yaml group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145901.3965461-216-142266902226530/.source.yaml follow=False _original_basename=firewall.yaml.j2 checksum=0bfc8440fd8f39002ab90252479fb794f51b5ae8 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:25:03 compute-0 sudo[63377]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:03 compute-0 sudo[63529]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lbtxieshfhtcgzfoccssmmgkueysaaeq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145903.2981098-234-41612144288210/AnsiballZ_timezone.py'
Oct 11 01:25:03 compute-0 sudo[63529]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:04 compute-0 python3.9[63531]: ansible-community.general.timezone Invoked with name=UTC hwclock=None
Oct 11 01:25:04 compute-0 systemd[1]: Starting Time & Date Service...
Oct 11 01:25:04 compute-0 systemd[1]: Started Time & Date Service.
Oct 11 01:25:04 compute-0 sudo[63529]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:04 compute-0 sudo[63685]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jvpavfpbhjekwznedtzvukckhriqyoyt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145904.527424-243-23587542455524/AnsiballZ_file.py'
Oct 11 01:25:04 compute-0 sudo[63685]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:05 compute-0 python3.9[63687]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/lib/edpm-config/firewall state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:25:05 compute-0 sudo[63685]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:05 compute-0 sudo[63837]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kmcqerqgfanulobyepwjwhjawabmucku ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145905.3385582-251-169354199577632/AnsiballZ_stat.py'
Oct 11 01:25:05 compute-0 sudo[63837]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:05 compute-0 python3.9[63839]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:25:05 compute-0 sudo[63837]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:06 compute-0 sudo[63960]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ucdiygtelyozfjskgwkxavovcgavarnw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145905.3385582-251-169354199577632/AnsiballZ_copy.py'
Oct 11 01:25:06 compute-0 sudo[63960]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:06 compute-0 python3.9[63962]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1760145905.3385582-251-169354199577632/.source.yaml follow=False _original_basename=base-rules.yaml.j2 checksum=450456afcafded6d4bdecceec7a02e806eebd8b3 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:25:06 compute-0 sudo[63960]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:07 compute-0 sudo[64112]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rbkeufakfmbmwfgorszjaslhjkqoqflg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145906.9041076-266-122888082460884/AnsiballZ_stat.py'
Oct 11 01:25:07 compute-0 sudo[64112]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:07 compute-0 python3.9[64114]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:25:07 compute-0 sudo[64112]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:08 compute-0 sudo[64235]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gldovelnhneuhigycxaqjwbbnkrrdlud ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145906.9041076-266-122888082460884/AnsiballZ_copy.py'
Oct 11 01:25:08 compute-0 sudo[64235]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:08 compute-0 python3.9[64237]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1760145906.9041076-266-122888082460884/.source.yaml _original_basename=.g0v02v1x follow=False checksum=97d170e1550eee4afc0af065b78cda302a97674c backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:25:08 compute-0 sudo[64235]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:08 compute-0 sudo[64387]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yvhlvaaysuzknafsmizeteszdryjganq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145908.5052903-281-169748530247171/AnsiballZ_stat.py'
Oct 11 01:25:08 compute-0 sudo[64387]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:09 compute-0 python3.9[64389]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/iptables.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:25:09 compute-0 sudo[64387]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:09 compute-0 sudo[64510]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-smwimkooeajyelicyniyifkvztmhnmwa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145908.5052903-281-169748530247171/AnsiballZ_copy.py'
Oct 11 01:25:09 compute-0 sudo[64510]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:09 compute-0 python3.9[64512]: ansible-ansible.legacy.copy Invoked with dest=/etc/nftables/iptables.nft group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145908.5052903-281-169748530247171/.source.nft _original_basename=iptables.nft follow=False checksum=3e02df08f1f3ab4a513e94056dbd390e3d38fe30 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:25:09 compute-0 sudo[64510]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:10 compute-0 sudo[64662]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sxznhqmjngqopzmfmmyrnqenkpcvqidf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145910.1161053-296-226228293364529/AnsiballZ_command.py'
Oct 11 01:25:10 compute-0 sudo[64662]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:10 compute-0 python3.9[64664]: ansible-ansible.legacy.command Invoked with _raw_params=nft -f /etc/nftables/iptables.nft _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:25:10 compute-0 sudo[64662]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:11 compute-0 sudo[64815]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-onemdzvsyefpucxkxlmevlwrbhydhgsr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145911.1291897-304-142455182331599/AnsiballZ_command.py'
Oct 11 01:25:11 compute-0 sudo[64815]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:11 compute-0 python3.9[64817]: ansible-ansible.legacy.command Invoked with _raw_params=nft -j list ruleset _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:25:11 compute-0 sudo[64815]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:12 compute-0 sudo[64968]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oqjusdhxuwfpxsbixsdfhupbsfizxort ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760145912.0678895-312-240008631100677/AnsiballZ_edpm_nftables_from_files.py'
Oct 11 01:25:12 compute-0 sudo[64968]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:12 compute-0 python3[64970]: ansible-edpm_nftables_from_files Invoked with src=/var/lib/edpm-config/firewall
Oct 11 01:25:12 compute-0 sudo[64968]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:13 compute-0 sudo[65120]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fcnqtgmvqprmcupdungfwzchovpzvdxb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145913.0558405-320-196849512203096/AnsiballZ_stat.py'
Oct 11 01:25:13 compute-0 sudo[65120]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:13 compute-0 python3.9[65122]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:25:13 compute-0 sudo[65120]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:14 compute-0 sudo[65243]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mjjdxdyffuwhgsfcrpyyeyzaqmdsssim ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145913.0558405-320-196849512203096/AnsiballZ_copy.py'
Oct 11 01:25:14 compute-0 sudo[65243]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:14 compute-0 python3.9[65245]: ansible-ansible.legacy.copy Invoked with dest=/etc/nftables/edpm-jumps.nft group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145913.0558405-320-196849512203096/.source.nft follow=False _original_basename=jump-chain.j2 checksum=4c6f036d2d5808f109acc0880c19aa74ca48c961 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:25:14 compute-0 sudo[65243]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:14 compute-0 sudo[65395]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vfuqgqoneovuqxfqjbobncpsbgjmhpqt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145914.5549552-335-229114624177484/AnsiballZ_stat.py'
Oct 11 01:25:14 compute-0 sudo[65395]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:15 compute-0 python3.9[65397]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-update-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:25:15 compute-0 sudo[65395]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:15 compute-0 sudo[65518]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jbeqwulxkuegcdgcnydfjfuwybkavreh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145914.5549552-335-229114624177484/AnsiballZ_copy.py'
Oct 11 01:25:15 compute-0 sudo[65518]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:15 compute-0 python3.9[65520]: ansible-ansible.legacy.copy Invoked with dest=/etc/nftables/edpm-update-jumps.nft group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145914.5549552-335-229114624177484/.source.nft follow=False _original_basename=jump-chain.j2 checksum=4c6f036d2d5808f109acc0880c19aa74ca48c961 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:25:15 compute-0 sudo[65518]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:16 compute-0 sudo[65670]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-igphhqigsgzpybjxflybbkwqmlvsrlqq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145916.1110456-350-208378396515910/AnsiballZ_stat.py'
Oct 11 01:25:16 compute-0 sudo[65670]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:16 compute-0 python3.9[65672]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-flushes.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:25:16 compute-0 sudo[65670]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:17 compute-0 chronyd[54325]: Selected source 198.50.127.72 (pool.ntp.org)
Oct 11 01:25:17 compute-0 sudo[65793]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ckgwikiwfrhkrpkjvqxbofefqpqlzexm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145916.1110456-350-208378396515910/AnsiballZ_copy.py'
Oct 11 01:25:17 compute-0 sudo[65793]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:17 compute-0 python3.9[65795]: ansible-ansible.legacy.copy Invoked with dest=/etc/nftables/edpm-flushes.nft group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145916.1110456-350-208378396515910/.source.nft follow=False _original_basename=flush-chain.j2 checksum=d16337256a56373421842284fe09e4e6c7df417e backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:25:17 compute-0 sudo[65793]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:18 compute-0 sudo[65947]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gfuhpkmkusvehlhvhqilczkndahxbpqw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145917.768901-365-109051221651928/AnsiballZ_stat.py'
Oct 11 01:25:18 compute-0 sudo[65947]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:18 compute-0 python3.9[65949]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-chains.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:25:18 compute-0 sudo[65947]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:18 compute-0 sshd-session[65919]: Received disconnect from 193.46.255.159 port 22722:11:  [preauth]
Oct 11 01:25:18 compute-0 sshd-session[65919]: Disconnected from authenticating user root 193.46.255.159 port 22722 [preauth]
Oct 11 01:25:18 compute-0 sudo[66070]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hkjmyqpzoihzyumpvgkuadymgeffufnb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145917.768901-365-109051221651928/AnsiballZ_copy.py'
Oct 11 01:25:18 compute-0 sudo[66070]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:19 compute-0 python3.9[66072]: ansible-ansible.legacy.copy Invoked with dest=/etc/nftables/edpm-chains.nft group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145917.768901-365-109051221651928/.source.nft follow=False _original_basename=chains.j2 checksum=2079f3b60590a165d1d502e763170876fc8e2984 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:25:19 compute-0 sudo[66070]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:19 compute-0 sudo[66222]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zexvrbbzfqlomhzihstsuxqmmfpqoozh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145919.4088511-380-267013803359375/AnsiballZ_stat.py'
Oct 11 01:25:19 compute-0 sudo[66222]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:20 compute-0 python3.9[66224]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-rules.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:25:20 compute-0 sudo[66222]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:20 compute-0 sudo[66345]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-efioumghdxagywypvkofzbpeudhxlawj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145919.4088511-380-267013803359375/AnsiballZ_copy.py'
Oct 11 01:25:20 compute-0 sudo[66345]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:20 compute-0 python3.9[66347]: ansible-ansible.legacy.copy Invoked with dest=/etc/nftables/edpm-rules.nft group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145919.4088511-380-267013803359375/.source.nft follow=False _original_basename=ruleset.j2 checksum=693377dc03e5b6b24713cb537b18b88774724e35 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:25:20 compute-0 sudo[66345]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:21 compute-0 sudo[66497]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cvehgihvruzgashdpjsztmuywyzvzwgs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145921.0458796-395-186754224964054/AnsiballZ_file.py'
Oct 11 01:25:21 compute-0 sudo[66497]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:21 compute-0 python3.9[66499]: ansible-ansible.builtin.file Invoked with group=root mode=0600 owner=root path=/etc/nftables/edpm-rules.nft.changed state=touch recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:25:21 compute-0 sudo[66497]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:22 compute-0 sudo[66649]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rexhueolibmcbuotajgmxezrpgjymmzb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145921.9414995-403-851975225370/AnsiballZ_command.py'
Oct 11 01:25:22 compute-0 sudo[66649]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:22 compute-0 python3.9[66651]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail; cat /etc/nftables/edpm-chains.nft /etc/nftables/edpm-flushes.nft /etc/nftables/edpm-rules.nft /etc/nftables/edpm-update-jumps.nft /etc/nftables/edpm-jumps.nft | nft -c -f - _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:25:22 compute-0 sudo[66649]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:23 compute-0 sudo[66808]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bmlpujtmsmmsewldzciwunbxwykiiump ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145922.9378881-411-67324152411971/AnsiballZ_blockinfile.py'
Oct 11 01:25:23 compute-0 sudo[66808]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:23 compute-0 python3.9[66810]: ansible-ansible.builtin.blockinfile Invoked with backup=False block=include "/etc/nftables/iptables.nft"
                                            include "/etc/nftables/edpm-chains.nft"
                                            include "/etc/nftables/edpm-rules.nft"
                                            include "/etc/nftables/edpm-jumps.nft"
                                             path=/etc/sysconfig/nftables.conf validate=nft -c -f %s state=present marker=# {mark} ANSIBLE MANAGED BLOCK create=False marker_begin=BEGIN marker_end=END append_newline=False prepend_newline=False unsafe_writes=False insertafter=None insertbefore=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:25:23 compute-0 sudo[66808]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:24 compute-0 sudo[66961]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fuvjigwfzjlqvehdnnphugramyzsqete ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145924.0508323-420-118871839927022/AnsiballZ_file.py'
Oct 11 01:25:24 compute-0 sudo[66961]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:24 compute-0 python3.9[66963]: ansible-ansible.builtin.file Invoked with group=hugetlbfs mode=0775 owner=zuul path=/dev/hugepages1G state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:25:24 compute-0 sudo[66961]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:25 compute-0 sudo[67113]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ggroxzadocpbpymnxfvrrwomucdjzmdz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145924.8809173-420-202715166775834/AnsiballZ_file.py'
Oct 11 01:25:25 compute-0 sudo[67113]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:25 compute-0 python3.9[67115]: ansible-ansible.builtin.file Invoked with group=hugetlbfs mode=0775 owner=zuul path=/dev/hugepages2M state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:25:25 compute-0 sudo[67113]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:26 compute-0 sudo[67265]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-duhmjxelvtjvfrxkzhshujmrdohqxnbj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145925.745517-435-221961217657140/AnsiballZ_mount.py'
Oct 11 01:25:26 compute-0 sudo[67265]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:26 compute-0 python3.9[67267]: ansible-ansible.posix.mount Invoked with fstype=hugetlbfs opts=pagesize=1G path=/dev/hugepages1G src=none state=mounted boot=True dump=0 opts_no_log=False passno=0 backup=False fstab=None
Oct 11 01:25:26 compute-0 sudo[67265]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:27 compute-0 sudo[67418]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gubntcssmqrztscsmepbfyessybiksqs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145926.803256-435-100603229470090/AnsiballZ_mount.py'
Oct 11 01:25:27 compute-0 sudo[67418]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:27 compute-0 python3.9[67420]: ansible-ansible.posix.mount Invoked with fstype=hugetlbfs opts=pagesize=2M path=/dev/hugepages2M src=none state=mounted boot=True dump=0 opts_no_log=False passno=0 backup=False fstab=None
Oct 11 01:25:27 compute-0 sudo[67418]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:28 compute-0 sshd-session[59583]: Connection closed by 192.168.122.30 port 51398
Oct 11 01:25:28 compute-0 sshd-session[59580]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:25:28 compute-0 systemd[1]: session-14.scope: Deactivated successfully.
Oct 11 01:25:28 compute-0 systemd[1]: session-14.scope: Consumed 41.408s CPU time.
Oct 11 01:25:28 compute-0 systemd-logind[804]: Session 14 logged out. Waiting for processes to exit.
Oct 11 01:25:28 compute-0 systemd-logind[804]: Removed session 14.
Oct 11 01:25:33 compute-0 sshd-session[67446]: Accepted publickey for zuul from 192.168.122.30 port 39418 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:25:33 compute-0 systemd-logind[804]: New session 15 of user zuul.
Oct 11 01:25:33 compute-0 systemd[1]: Started Session 15 of User zuul.
Oct 11 01:25:33 compute-0 sshd-session[67446]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:25:34 compute-0 systemd[1]: systemd-timedated.service: Deactivated successfully.
Oct 11 01:25:34 compute-0 sudo[67601]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cyfwsbkkcyyhwojkomydquvpdvrdiicz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145933.949825-16-206830653339236/AnsiballZ_tempfile.py'
Oct 11 01:25:34 compute-0 sudo[67601]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:34 compute-0 python3.9[67603]: ansible-ansible.builtin.tempfile Invoked with state=file prefix=ansible. suffix= path=None
Oct 11 01:25:34 compute-0 sudo[67601]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:35 compute-0 sudo[67753]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tkjzwnslibdnlyxtwclphwfvwallctyv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145934.9911175-28-132781518040977/AnsiballZ_stat.py'
Oct 11 01:25:35 compute-0 sudo[67753]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:35 compute-0 python3.9[67755]: ansible-ansible.builtin.stat Invoked with path=/etc/ssh/ssh_known_hosts follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:25:35 compute-0 sudo[67753]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:36 compute-0 sudo[67905]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rkpdppambfmvqzzmjhruzjibjzlhxvue ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145936.030473-38-141664859274498/AnsiballZ_setup.py'
Oct 11 01:25:36 compute-0 sudo[67905]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:37 compute-0 python3.9[67907]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'ssh_host_key_rsa_public', 'ssh_host_key_ed25519_public', 'ssh_host_key_ecdsa_public'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:25:37 compute-0 sudo[67905]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:37 compute-0 sudo[68057]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hnpnmsouazpkxwfrbicwusbcjphjgpzx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145937.3174849-47-140966656717827/AnsiballZ_blockinfile.py'
Oct 11 01:25:37 compute-0 sudo[68057]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:38 compute-0 python3.9[68059]: ansible-ansible.builtin.blockinfile Invoked with block=compute-0.ctlplane.example.com,192.168.122.100,compute-0* ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDCIdnNJXEzty4CiVEsQZKTsrvDMbMIfWnM6SmxDoDiaBSKjE0gIdIyszcedOqlaH5lXEVeZ6fus0sCQfkBMbju4l7W6IePjHdglc1yQ217jhLN1W7FgWyQ1JAkOEeNwnCwt5yGoqPA4fqm3z+EQ2MyCrpiYZ3y3GF1AT0EOJY+BlcEteWbn8iSJi/MnErCUbyN2BLQKNR+S5HUIlOztpgfDXDYHBzZXoWzBrL5yUtkr3R2lcz7vpcVZZkkq6xH00zMoBtbqcZWJ0Aj21Luo3oo/wnyBcuD9+hRyj9/C6KnM6jtVxWUo09w3S8IyG9y5GBBJ+uzOZbQ9piGsxnTFwC9B7IRFzx2H+QcKWFK+i+HUSiK1KOvXBF+a/owkP83YlGBYvNoiUq/c+vl31EAdDUtbu/bBHk2N00CRhrKWme2O8A1dc7PNhaW/rUzoZL87Hp49wdreMb5uxWHlA0vUJweOxVVHVwkAKdnLpKyeV6JqLzVvWH2tm5a4G4vhUlLIqM=
                                            compute-0.ctlplane.example.com,192.168.122.100,compute-0* ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGo7eC2eXuR6HNsLolVokRdWYpxD4UNnKEqOwYb+hR8L
                                            compute-0.ctlplane.example.com,192.168.122.100,compute-0* ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKNcUmGzkxmH914R7kTLqjEeDLEAUeCYxOthKxvUCNxgbJBfHePifvSlCMmzlBeBDi+9zUqh+p6KJjBIHHSgO5I=
                                             create=True mode=0644 path=/tmp/ansible.rjrw6fed state=present marker=# {mark} ANSIBLE MANAGED BLOCK backup=False marker_begin=BEGIN marker_end=END append_newline=False prepend_newline=False unsafe_writes=False insertafter=None insertbefore=None validate=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:25:38 compute-0 sudo[68057]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:38 compute-0 sudo[68209]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bjmqnzdwsitjhoqpdofufkhdouznbzdm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145938.2653334-55-279757945298745/AnsiballZ_command.py'
Oct 11 01:25:38 compute-0 sudo[68209]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:39 compute-0 python3.9[68211]: ansible-ansible.legacy.command Invoked with _raw_params=cat '/tmp/ansible.rjrw6fed' > /etc/ssh/ssh_known_hosts _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:25:39 compute-0 sudo[68209]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:39 compute-0 sudo[68363]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rvhyoqplzitgycafyvfmkfqfsrisokht ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145939.3029914-63-66145772653035/AnsiballZ_file.py'
Oct 11 01:25:39 compute-0 sudo[68363]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:40 compute-0 python3.9[68365]: ansible-ansible.builtin.file Invoked with path=/tmp/ansible.rjrw6fed state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:25:40 compute-0 sudo[68363]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:40 compute-0 sshd-session[67449]: Connection closed by 192.168.122.30 port 39418
Oct 11 01:25:40 compute-0 sshd-session[67446]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:25:40 compute-0 systemd[1]: session-15.scope: Deactivated successfully.
Oct 11 01:25:40 compute-0 systemd[1]: session-15.scope: Consumed 4.548s CPU time.
Oct 11 01:25:40 compute-0 systemd-logind[804]: Session 15 logged out. Waiting for processes to exit.
Oct 11 01:25:40 compute-0 systemd-logind[804]: Removed session 15.
Oct 11 01:25:45 compute-0 sshd-session[68390]: Accepted publickey for zuul from 192.168.122.30 port 45940 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:25:46 compute-0 systemd-logind[804]: New session 16 of user zuul.
Oct 11 01:25:46 compute-0 systemd[1]: Started Session 16 of User zuul.
Oct 11 01:25:46 compute-0 sshd-session[68390]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:25:47 compute-0 python3.9[68543]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:25:48 compute-0 sudo[68697]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rrlqzjnbzvzmackzrvcpwmzujdbrgvkv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145947.7952712-32-280510390492371/AnsiballZ_systemd.py'
Oct 11 01:25:48 compute-0 sudo[68697]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:48 compute-0 python3.9[68699]: ansible-ansible.builtin.systemd Invoked with enabled=True name=sshd daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None masked=None
Oct 11 01:25:48 compute-0 sudo[68697]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:49 compute-0 sudo[68851]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-biwwoxhalvekcjdeefmkaszwzqvnnepi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145949.1631768-40-120632989095218/AnsiballZ_systemd.py'
Oct 11 01:25:49 compute-0 sudo[68851]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:49 compute-0 python3.9[68853]: ansible-ansible.builtin.systemd Invoked with name=sshd state=started daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:25:49 compute-0 sudo[68851]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:50 compute-0 sudo[69004]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rbdlwumkresizghgrellsxeodfjkwoyj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145950.2179832-49-78219129958333/AnsiballZ_command.py'
Oct 11 01:25:50 compute-0 sudo[69004]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:51 compute-0 python3.9[69006]: ansible-ansible.legacy.command Invoked with _raw_params=nft -f /etc/nftables/edpm-chains.nft _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:25:51 compute-0 sudo[69004]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:51 compute-0 sudo[69158]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hoshdmevevdnqnofeykqsluhkkqrdrvq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145951.3394945-57-63307784236498/AnsiballZ_stat.py'
Oct 11 01:25:51 compute-0 sudo[69158]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:52 compute-0 python3.9[69160]: ansible-ansible.builtin.stat Invoked with path=/etc/nftables/edpm-rules.nft.changed follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:25:52 compute-0 sudo[69158]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:52 compute-0 sudo[69312]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bxyuzmmagqdagpzjmusbkkyuhdcolwkr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145952.3047724-65-211874888922241/AnsiballZ_command.py'
Oct 11 01:25:52 compute-0 sudo[69312]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:52 compute-0 python3.9[69314]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail; cat /etc/nftables/edpm-flushes.nft /etc/nftables/edpm-rules.nft /etc/nftables/edpm-update-jumps.nft | nft -f - _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:25:52 compute-0 sudo[69312]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:53 compute-0 sudo[69467]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vtjvouawupaqhvhzdxyriunkwhwctvmu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145953.177135-73-238273529773102/AnsiballZ_file.py'
Oct 11 01:25:53 compute-0 sudo[69467]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:25:53 compute-0 python3.9[69469]: ansible-ansible.builtin.file Invoked with path=/etc/nftables/edpm-rules.nft.changed state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:25:53 compute-0 sudo[69467]: pam_unix(sudo:session): session closed for user root
Oct 11 01:25:54 compute-0 sshd-session[68393]: Connection closed by 192.168.122.30 port 45940
Oct 11 01:25:54 compute-0 sshd-session[68390]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:25:54 compute-0 systemd[1]: session-16.scope: Deactivated successfully.
Oct 11 01:25:54 compute-0 systemd[1]: session-16.scope: Consumed 5.922s CPU time.
Oct 11 01:25:54 compute-0 systemd-logind[804]: Session 16 logged out. Waiting for processes to exit.
Oct 11 01:25:54 compute-0 systemd-logind[804]: Removed session 16.
Oct 11 01:26:00 compute-0 sshd-session[69494]: Accepted publickey for zuul from 192.168.122.30 port 43386 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:26:00 compute-0 systemd-logind[804]: New session 17 of user zuul.
Oct 11 01:26:00 compute-0 systemd[1]: Started Session 17 of User zuul.
Oct 11 01:26:00 compute-0 sshd-session[69494]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:26:01 compute-0 python3.9[69647]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:26:02 compute-0 sudo[69801]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pgqzkojrgedkyybjwgveqihoiikemepq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145961.8908193-34-39704479457562/AnsiballZ_setup.py'
Oct 11 01:26:02 compute-0 sudo[69801]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:02 compute-0 python3.9[69803]: ansible-ansible.legacy.setup Invoked with filter=['ansible_pkg_mgr'] gather_subset=['!all'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:26:02 compute-0 sudo[69801]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:03 compute-0 sudo[69885]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qwnsfbvruhebhcpjrbdxjgcyumbcympg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145961.8908193-34-39704479457562/AnsiballZ_dnf.py'
Oct 11 01:26:03 compute-0 sudo[69885]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:03 compute-0 python3.9[69887]: ansible-ansible.legacy.dnf Invoked with name=['yum-utils'] allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None state=None
Oct 11 01:26:04 compute-0 sudo[69885]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:05 compute-0 python3.9[70038]: ansible-ansible.legacy.command Invoked with _raw_params=needs-restarting -r _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:26:07 compute-0 python3.9[70189]: ansible-ansible.builtin.find Invoked with paths=['/var/lib/openstack/reboot_required/'] patterns=[] read_whole_file=False file_type=file age_stamp=mtime recurse=False hidden=False follow=False get_checksum=False checksum_algorithm=sha1 use_regex=False exact_mode=True excludes=None contains=None age=None size=None depth=None mode=None encoding=None limit=None
Oct 11 01:26:08 compute-0 python3.9[70339]: ansible-ansible.builtin.stat Invoked with path=/var/lib/config-data/puppet-generated follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:26:08 compute-0 rsyslogd[998]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 01:26:08 compute-0 rsyslogd[998]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 01:26:09 compute-0 python3.9[70490]: ansible-ansible.builtin.stat Invoked with path=/var/lib/openstack/config follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:26:09 compute-0 sshd-session[69497]: Connection closed by 192.168.122.30 port 43386
Oct 11 01:26:09 compute-0 sshd-session[69494]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:26:09 compute-0 systemd[1]: session-17.scope: Deactivated successfully.
Oct 11 01:26:09 compute-0 systemd[1]: session-17.scope: Consumed 7.045s CPU time.
Oct 11 01:26:09 compute-0 systemd-logind[804]: Session 17 logged out. Waiting for processes to exit.
Oct 11 01:26:09 compute-0 systemd-logind[804]: Removed session 17.
Oct 11 01:26:15 compute-0 sshd-session[70515]: Accepted publickey for zuul from 192.168.122.30 port 52858 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:26:15 compute-0 systemd-logind[804]: New session 18 of user zuul.
Oct 11 01:26:15 compute-0 systemd[1]: Started Session 18 of User zuul.
Oct 11 01:26:15 compute-0 sshd-session[70515]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:26:16 compute-0 python3.9[70668]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:26:18 compute-0 sudo[70822]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ffjpweaiyilmzkymrqkrcnbvpkvuxbov ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145977.8622055-50-71528451954662/AnsiballZ_file.py'
Oct 11 01:26:18 compute-0 sudo[70822]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:18 compute-0 python3.9[70824]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/certs/telemetry/default setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:26:18 compute-0 sudo[70822]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:19 compute-0 sudo[70974]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-assvdmpasfkuwgclwyhtvmtmnstlaumm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145978.8417177-50-241508190118318/AnsiballZ_file.py'
Oct 11 01:26:19 compute-0 sudo[70974]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:19 compute-0 python3.9[70976]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/certs/telemetry/default setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:26:19 compute-0 sudo[70974]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:20 compute-0 sudo[71126]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tupvwhrdpjcxeifydlzdnimvznrfmidh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145979.9316523-65-277711530377794/AnsiballZ_stat.py'
Oct 11 01:26:20 compute-0 sudo[71126]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:20 compute-0 python3.9[71128]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/telemetry/default/tls.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:26:20 compute-0 sudo[71126]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:21 compute-0 sudo[71249]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kzfjfkirkbdhaadhwynjuvlsimdypquj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145979.9316523-65-277711530377794/AnsiballZ_copy.py'
Oct 11 01:26:21 compute-0 sudo[71249]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:21 compute-0 python3.9[71251]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/certs/telemetry/default/tls.crt group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145979.9316523-65-277711530377794/.source.crt _original_basename=compute-0.ctlplane.example.com-tls.crt follow=False checksum=17da0d015f8fc6bd12554db5835ff227087a31f3 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:26:21 compute-0 sudo[71249]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:22 compute-0 sudo[71401]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tuqytsorcyrqtaysfairkmhzyqncgsxv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145981.8245878-65-87844144989083/AnsiballZ_stat.py'
Oct 11 01:26:22 compute-0 sudo[71401]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:22 compute-0 python3.9[71403]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/telemetry/default/ca.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:26:22 compute-0 sudo[71401]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:22 compute-0 sudo[71524]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pxlaprlrbppbhrdjfngkuxtlrvctwdtj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145981.8245878-65-87844144989083/AnsiballZ_copy.py'
Oct 11 01:26:22 compute-0 sudo[71524]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:23 compute-0 python3.9[71526]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/certs/telemetry/default/ca.crt group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145981.8245878-65-87844144989083/.source.crt _original_basename=compute-0.ctlplane.example.com-ca.crt follow=False checksum=8e17555fab3c7800adbc0bc0b78007dc4d3f875c backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:26:23 compute-0 sudo[71524]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:23 compute-0 sudo[71676]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-itndecxgjfilnmhhxyudrpgyhhbwjnck ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145983.3723664-65-21498718669320/AnsiballZ_stat.py'
Oct 11 01:26:23 compute-0 sudo[71676]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:24 compute-0 python3.9[71678]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/telemetry/default/tls.key follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:26:24 compute-0 sudo[71676]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:24 compute-0 sudo[71799]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-twkjepnajhfnpftvrsamjiknkswdujra ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145983.3723664-65-21498718669320/AnsiballZ_copy.py'
Oct 11 01:26:24 compute-0 sudo[71799]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:24 compute-0 python3.9[71801]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/certs/telemetry/default/tls.key group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145983.3723664-65-21498718669320/.source.key _original_basename=compute-0.ctlplane.example.com-tls.key follow=False checksum=0f44ec9dcbd9aae2be965b899da8c25fa06fdfa7 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:26:24 compute-0 sudo[71799]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:25 compute-0 sudo[71951]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mhudzmwmuqxbpnfxnsijnxbygjkymtcw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145985.2128446-109-252949515477613/AnsiballZ_file.py'
Oct 11 01:26:25 compute-0 sudo[71951]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:25 compute-0 python3.9[71953]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/certs/telemetry-power-monitoring/default setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:26:25 compute-0 sudo[71951]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:26 compute-0 sudo[72103]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-drgutdwrjfhsytfvdsoarfunccryjaap ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145985.991405-109-218042636939555/AnsiballZ_file.py'
Oct 11 01:26:26 compute-0 sudo[72103]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:26 compute-0 python3.9[72105]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/certs/telemetry-power-monitoring/default setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:26:26 compute-0 sudo[72103]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:27 compute-0 sudo[72255]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-inxskrlldlzphaatspxjpvahisykstbk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145986.829134-124-204674391850129/AnsiballZ_stat.py'
Oct 11 01:26:27 compute-0 sudo[72255]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:27 compute-0 python3.9[72257]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/telemetry-power-monitoring/default/tls.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:26:27 compute-0 sudo[72255]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:27 compute-0 sudo[72378]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nypsszjangxgnoucfyssmbpuletjdnrn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145986.829134-124-204674391850129/AnsiballZ_copy.py'
Oct 11 01:26:27 compute-0 sudo[72378]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:28 compute-0 python3.9[72380]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/certs/telemetry-power-monitoring/default/tls.crt group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145986.829134-124-204674391850129/.source.crt _original_basename=compute-0.ctlplane.example.com-tls.crt follow=False checksum=c279e75d39a037b3ab493bbd0ccec0106fc12601 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:26:28 compute-0 sudo[72378]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:28 compute-0 sudo[72530]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dvfudztkisftbxvqmwbwcuxaiuytsrsq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145988.411002-124-243336102871210/AnsiballZ_stat.py'
Oct 11 01:26:28 compute-0 sudo[72530]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:29 compute-0 python3.9[72532]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/telemetry-power-monitoring/default/ca.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:26:29 compute-0 sudo[72530]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:29 compute-0 sudo[72653]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zamrhsdkxwdtkytteezvizewfouedkco ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145988.411002-124-243336102871210/AnsiballZ_copy.py'
Oct 11 01:26:29 compute-0 sudo[72653]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:29 compute-0 python3.9[72655]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/certs/telemetry-power-monitoring/default/ca.crt group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145988.411002-124-243336102871210/.source.crt _original_basename=compute-0.ctlplane.example.com-ca.crt follow=False checksum=8e17555fab3c7800adbc0bc0b78007dc4d3f875c backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:26:29 compute-0 sudo[72653]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:30 compute-0 sudo[72805]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lzaqsxmookpmgjhhoesmgharyfdqedmd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145990.1723022-124-202983706145010/AnsiballZ_stat.py'
Oct 11 01:26:30 compute-0 sudo[72805]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:30 compute-0 python3.9[72807]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/telemetry-power-monitoring/default/tls.key follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:26:30 compute-0 sudo[72805]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:31 compute-0 sudo[72928]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tsnjeiptmywnreeqmknzgagqryjbtpdd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145990.1723022-124-202983706145010/AnsiballZ_copy.py'
Oct 11 01:26:31 compute-0 sudo[72928]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:31 compute-0 python3.9[72930]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/certs/telemetry-power-monitoring/default/tls.key group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145990.1723022-124-202983706145010/.source.key _original_basename=compute-0.ctlplane.example.com-tls.key follow=False checksum=64e6e0c887a56ed107615daddc477a7c67ce1b27 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:26:31 compute-0 sudo[72928]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:32 compute-0 sudo[73080]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xylbdwebzeboylzsnqggwxyaogavxujw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145991.8836215-168-235548034800280/AnsiballZ_file.py'
Oct 11 01:26:32 compute-0 sudo[73080]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:32 compute-0 python3.9[73082]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/certs/libvirt/default setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:26:32 compute-0 sudo[73080]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:33 compute-0 sudo[73232]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-njfpgfbrjfgixttpsmuleoejsoupozhj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145992.705348-168-36891158367206/AnsiballZ_file.py'
Oct 11 01:26:33 compute-0 sudo[73232]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:33 compute-0 python3.9[73234]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/certs/libvirt/default setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:26:33 compute-0 sudo[73232]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:33 compute-0 sudo[73384]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uvzkjagrzdddvkddxfvozkkdrjwvzlsn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145993.5726082-183-87345906711856/AnsiballZ_stat.py'
Oct 11 01:26:33 compute-0 sudo[73384]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:34 compute-0 python3.9[73386]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/libvirt/default/tls.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:26:34 compute-0 sudo[73384]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:34 compute-0 sudo[73507]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yzncpnzmdtuiwgggumuliuiarvdixkkm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145993.5726082-183-87345906711856/AnsiballZ_copy.py'
Oct 11 01:26:34 compute-0 sudo[73507]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:35 compute-0 python3.9[73509]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/certs/libvirt/default/tls.crt group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145993.5726082-183-87345906711856/.source.crt _original_basename=compute-0.ctlplane.example.com-tls.crt follow=False checksum=462fefd069deee1ab3956e4d7cb7188ca5e716dc backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:26:35 compute-0 sudo[73507]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:35 compute-0 sudo[73659]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kcqhwangpxyelbexvizalaascqogbnqx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145995.2055016-183-277035377786824/AnsiballZ_stat.py'
Oct 11 01:26:35 compute-0 sudo[73659]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:35 compute-0 python3.9[73661]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/libvirt/default/ca.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:26:35 compute-0 sudo[73659]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:36 compute-0 sudo[73782]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ovkwpgolhbqtrlakxtgbtkhvdgdiaadn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145995.2055016-183-277035377786824/AnsiballZ_copy.py'
Oct 11 01:26:36 compute-0 sudo[73782]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:36 compute-0 python3.9[73784]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/certs/libvirt/default/ca.crt group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145995.2055016-183-277035377786824/.source.crt _original_basename=compute-0.ctlplane.example.com-ca.crt follow=False checksum=fb9d385d3d3b6328b13756660d1a03cd8e9a07a0 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:26:36 compute-0 sudo[73782]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:37 compute-0 sudo[73934]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bjrocqumikhmpojowcjhvzjlruywjdrh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145996.7696695-183-63422726185669/AnsiballZ_stat.py'
Oct 11 01:26:37 compute-0 sudo[73934]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:37 compute-0 python3.9[73936]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/libvirt/default/tls.key follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:26:37 compute-0 sudo[73934]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:37 compute-0 sudo[74057]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pykgykpfahkbvlfxocypgkeuyidkqjwr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145996.7696695-183-63422726185669/AnsiballZ_copy.py'
Oct 11 01:26:37 compute-0 sudo[74057]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:38 compute-0 python3.9[74059]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/certs/libvirt/default/tls.key group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760145996.7696695-183-63422726185669/.source.key _original_basename=compute-0.ctlplane.example.com-tls.key follow=False checksum=0890df465cb3672581ee006283c6c3bd0bbd9f1e backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:26:38 compute-0 sudo[74057]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:38 compute-0 sudo[74209]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sblctwbazkkkaykggzaocvotyjtrevxy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145998.3585057-227-173048565631404/AnsiballZ_file.py'
Oct 11 01:26:38 compute-0 sudo[74209]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:38 compute-0 python3.9[74211]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/certs/ovn/default setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:26:39 compute-0 sudo[74209]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:39 compute-0 sudo[74361]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dfxddvbeyxdvlhiyxkxzoupvuxintpgb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760145999.2328918-227-173079420972537/AnsiballZ_file.py'
Oct 11 01:26:39 compute-0 sudo[74361]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:39 compute-0 python3.9[74363]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/certs/ovn/default setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:26:39 compute-0 sudo[74361]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:40 compute-0 sudo[74514]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ajddpjhscxgtzqvcakcxbdflneplczws ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146000.2193353-242-45544356695633/AnsiballZ_stat.py'
Oct 11 01:26:40 compute-0 sudo[74514]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:40 compute-0 python3.9[74516]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/ovn/default/tls.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:26:40 compute-0 sudo[74514]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:41 compute-0 sudo[74637]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-garkxkuczozyqscjstwguzmdqrbqslup ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146000.2193353-242-45544356695633/AnsiballZ_copy.py'
Oct 11 01:26:41 compute-0 sudo[74637]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:41 compute-0 python3.9[74639]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/certs/ovn/default/tls.crt group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146000.2193353-242-45544356695633/.source.crt _original_basename=compute-0.ctlplane.example.com-tls.crt follow=False checksum=72c0ea96e8fa729c9574a2f8cc36a993b535d21a backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:26:41 compute-0 sudo[74637]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:42 compute-0 sudo[74789]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jwvfgujwsxoghevkffgzrtoktsxtsxcd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146001.7627752-242-136176354623465/AnsiballZ_stat.py'
Oct 11 01:26:42 compute-0 sudo[74789]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:42 compute-0 python3.9[74791]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/ovn/default/ca.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:26:42 compute-0 sudo[74789]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:42 compute-0 sudo[74912]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gjcmprlrrokicdykyztkslrblefyojxz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146001.7627752-242-136176354623465/AnsiballZ_copy.py'
Oct 11 01:26:42 compute-0 sudo[74912]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:43 compute-0 python3.9[74914]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/certs/ovn/default/ca.crt group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146001.7627752-242-136176354623465/.source.crt _original_basename=compute-0.ctlplane.example.com-ca.crt follow=False checksum=dd1ca38573e4329f99bc06f928c8186c0e5d69d9 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:26:43 compute-0 sudo[74912]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:43 compute-0 sudo[75064]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lkegiasygzfdddkalqddstyciogtweww ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146003.3021162-242-265073932830905/AnsiballZ_stat.py'
Oct 11 01:26:43 compute-0 sudo[75064]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:43 compute-0 python3.9[75066]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/ovn/default/tls.key follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:26:43 compute-0 sudo[75064]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:44 compute-0 sudo[75187]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-azwgaiqdwutizqozwhcnycmtgwozsvqn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146003.3021162-242-265073932830905/AnsiballZ_copy.py'
Oct 11 01:26:44 compute-0 sudo[75187]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:44 compute-0 python3.9[75189]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/certs/ovn/default/tls.key group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146003.3021162-242-265073932830905/.source.key _original_basename=compute-0.ctlplane.example.com-tls.key follow=False checksum=b52a2e4720dd388510c4e1624ac7ef51cc963330 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:26:44 compute-0 sudo[75187]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:45 compute-0 sudo[75339]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yecoxidofqsjctmmqqappsfhmcewhegc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146005.428081-302-4286280306734/AnsiballZ_file.py'
Oct 11 01:26:45 compute-0 sudo[75339]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:46 compute-0 python3.9[75341]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/cacerts/ovn setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:26:46 compute-0 sudo[75339]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:46 compute-0 sudo[75491]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lwimlpzyploieymqzvsyvjzhgundnitu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146006.2317488-310-57759393455914/AnsiballZ_stat.py'
Oct 11 01:26:46 compute-0 sudo[75491]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:46 compute-0 python3.9[75493]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:26:46 compute-0 sudo[75491]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:47 compute-0 sudo[75614]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ergutwpwmiyfziyfjkorwfnmeujagjnn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146006.2317488-310-57759393455914/AnsiballZ_copy.py'
Oct 11 01:26:47 compute-0 sudo[75614]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:47 compute-0 python3.9[75616]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146006.2317488-310-57759393455914/.source.pem _original_basename=tls-ca-bundle.pem follow=False checksum=f57cfa4065467101f7ba494c2f61c0e2e8a6dad5 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:26:47 compute-0 sudo[75614]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:48 compute-0 sudo[75766]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yqitpkadwzfasmtqoouujqkzaqvcntjv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146007.8825834-326-10617526416295/AnsiballZ_file.py'
Oct 11 01:26:48 compute-0 sudo[75766]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:48 compute-0 python3.9[75768]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/cacerts/telemetry setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:26:48 compute-0 sudo[75766]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:49 compute-0 sudo[75918]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-syckocsjjgcxcklpuilniawgahsodwud ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146008.7250946-334-184377466447005/AnsiballZ_stat.py'
Oct 11 01:26:49 compute-0 sudo[75918]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:49 compute-0 PackageKit[31003]: daemon quit
Oct 11 01:26:49 compute-0 systemd[1]: packagekit.service: Deactivated successfully.
Oct 11 01:26:49 compute-0 python3.9[75920]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:26:49 compute-0 sudo[75918]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:49 compute-0 sudo[76042]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fjdoctlakmlqttghnxkhrccqupobcnon ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146008.7250946-334-184377466447005/AnsiballZ_copy.py'
Oct 11 01:26:49 compute-0 sudo[76042]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:50 compute-0 python3.9[76044]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146008.7250946-334-184377466447005/.source.pem _original_basename=tls-ca-bundle.pem follow=False checksum=f57cfa4065467101f7ba494c2f61c0e2e8a6dad5 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:26:50 compute-0 sudo[76042]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:50 compute-0 sudo[76194]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-slsotcftdjfialnykfnqenejljiyhupq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146010.4097986-350-198565344281169/AnsiballZ_file.py'
Oct 11 01:26:50 compute-0 sudo[76194]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:51 compute-0 python3.9[76196]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/cacerts/repo-setup setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:26:51 compute-0 sudo[76194]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:51 compute-0 sudo[76346]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-aqjyvomoovflyxmpnedpimzeocvgsnca ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146011.2937343-358-103615770237716/AnsiballZ_stat.py'
Oct 11 01:26:51 compute-0 sudo[76346]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:51 compute-0 python3.9[76348]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/cacerts/repo-setup/tls-ca-bundle.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:26:51 compute-0 sudo[76346]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:52 compute-0 sudo[76469]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gzmptbnignflyadydwcuugsllbpwjvil ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146011.2937343-358-103615770237716/AnsiballZ_copy.py'
Oct 11 01:26:52 compute-0 sudo[76469]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:52 compute-0 python3.9[76471]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/cacerts/repo-setup/tls-ca-bundle.pem group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146011.2937343-358-103615770237716/.source.pem _original_basename=tls-ca-bundle.pem follow=False checksum=f57cfa4065467101f7ba494c2f61c0e2e8a6dad5 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:26:52 compute-0 sudo[76469]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:53 compute-0 sudo[76621]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rkuovipilmyhmwapnnuuphskdwswxenh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146012.915567-374-262443204453650/AnsiballZ_file.py'
Oct 11 01:26:53 compute-0 sudo[76621]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:53 compute-0 python3.9[76623]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/cacerts/libvirt setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:26:53 compute-0 sudo[76621]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:54 compute-0 sudo[76773]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ltwiqpmkfaumuagccgxodlllbfkjqwga ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146013.7807355-382-154233295670269/AnsiballZ_stat.py'
Oct 11 01:26:54 compute-0 sudo[76773]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:54 compute-0 python3.9[76775]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/cacerts/libvirt/tls-ca-bundle.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:26:54 compute-0 sudo[76773]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:54 compute-0 sudo[76896]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ybxqegiwbowlpvufrnwebnsyiezmaluj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146013.7807355-382-154233295670269/AnsiballZ_copy.py'
Oct 11 01:26:54 compute-0 sudo[76896]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:55 compute-0 python3.9[76898]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/cacerts/libvirt/tls-ca-bundle.pem group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146013.7807355-382-154233295670269/.source.pem _original_basename=tls-ca-bundle.pem follow=False checksum=f57cfa4065467101f7ba494c2f61c0e2e8a6dad5 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:26:55 compute-0 sudo[76896]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:55 compute-0 sudo[77048]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jcmfbkezbbflaaeodtqukypdknswbevy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146015.3529775-398-179995906690086/AnsiballZ_file.py'
Oct 11 01:26:55 compute-0 sudo[77048]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:55 compute-0 python3.9[77050]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/cacerts/bootstrap setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:26:55 compute-0 sudo[77048]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:56 compute-0 sudo[77200]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tyysxkkysckzkqvhpznkpnzecqdzgeqm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146016.170065-406-176557747722999/AnsiballZ_stat.py'
Oct 11 01:26:56 compute-0 sudo[77200]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:56 compute-0 python3.9[77202]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/cacerts/bootstrap/tls-ca-bundle.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:26:56 compute-0 sudo[77200]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:57 compute-0 sudo[77323]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wlycwxczdhacxwawsscbjstreqzxepmw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146016.170065-406-176557747722999/AnsiballZ_copy.py'
Oct 11 01:26:57 compute-0 sudo[77323]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:57 compute-0 python3.9[77325]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/cacerts/bootstrap/tls-ca-bundle.pem group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146016.170065-406-176557747722999/.source.pem _original_basename=tls-ca-bundle.pem follow=False checksum=f57cfa4065467101f7ba494c2f61c0e2e8a6dad5 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:26:57 compute-0 sudo[77323]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:58 compute-0 sudo[77475]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xlaymflmpbwsdejynnwreydjpdgfhfjs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146017.77892-422-236008620563891/AnsiballZ_file.py'
Oct 11 01:26:58 compute-0 sudo[77475]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:58 compute-0 python3.9[77477]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/cacerts/telemetry-power-monitoring setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:26:58 compute-0 sudo[77475]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:58 compute-0 sudo[77627]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sulvdrbwxqlfomnufqliwaqzksoagwyy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146018.5834434-430-80447419731767/AnsiballZ_stat.py'
Oct 11 01:26:58 compute-0 sudo[77627]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:59 compute-0 python3.9[77629]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:26:59 compute-0 sudo[77627]: pam_unix(sudo:session): session closed for user root
Oct 11 01:26:59 compute-0 sudo[77750]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wdwsinsewqmxjiuzsohijgxjzjdwumzp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146018.5834434-430-80447419731767/AnsiballZ_copy.py'
Oct 11 01:26:59 compute-0 sudo[77750]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:26:59 compute-0 python3.9[77752]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146018.5834434-430-80447419731767/.source.pem _original_basename=tls-ca-bundle.pem follow=False checksum=f57cfa4065467101f7ba494c2f61c0e2e8a6dad5 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:26:59 compute-0 sudo[77750]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:00 compute-0 sshd-session[70518]: Connection closed by 192.168.122.30 port 52858
Oct 11 01:27:00 compute-0 sshd-session[70515]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:27:00 compute-0 systemd[1]: session-18.scope: Deactivated successfully.
Oct 11 01:27:00 compute-0 systemd[1]: session-18.scope: Consumed 36.181s CPU time.
Oct 11 01:27:00 compute-0 systemd-logind[804]: Session 18 logged out. Waiting for processes to exit.
Oct 11 01:27:00 compute-0 systemd-logind[804]: Removed session 18.
Oct 11 01:27:05 compute-0 sshd-session[77777]: Accepted publickey for zuul from 192.168.122.30 port 48494 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:27:05 compute-0 systemd-logind[804]: New session 19 of user zuul.
Oct 11 01:27:05 compute-0 systemd[1]: Started Session 19 of User zuul.
Oct 11 01:27:05 compute-0 sshd-session[77777]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:27:07 compute-0 python3.9[77930]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:27:08 compute-0 sudo[78084]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bphecwqojevjhkvlpfmwoclighamijur ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146027.6821647-34-97445795156322/AnsiballZ_file.py'
Oct 11 01:27:08 compute-0 sudo[78084]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:08 compute-0 python3.9[78086]: ansible-ansible.builtin.file Invoked with group=zuul mode=0750 owner=zuul path=/var/lib/edpm-config/firewall setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:27:08 compute-0 sudo[78084]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:09 compute-0 sudo[78236]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hojupwvjhbptmzxaqdkeddodpvazifrq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146028.68661-34-148729154298637/AnsiballZ_file.py'
Oct 11 01:27:09 compute-0 sudo[78236]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:09 compute-0 python3.9[78238]: ansible-ansible.builtin.file Invoked with group=openvswitch owner=openvswitch path=/var/lib/openvswitch/ovn setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:27:09 compute-0 sudo[78236]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:10 compute-0 python3.9[78388]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'selinux'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:27:11 compute-0 sudo[78538]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-spqsbqyybxcukhbnuktsotjadkfyrgnh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146030.461543-57-228527905655399/AnsiballZ_seboolean.py'
Oct 11 01:27:11 compute-0 sudo[78538]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:11 compute-0 python3.9[78540]: ansible-ansible.posix.seboolean Invoked with name=virt_sandbox_use_netlink persistent=True state=True ignore_selinux_state=False
Oct 11 01:27:12 compute-0 sudo[78538]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:13 compute-0 sudo[78694]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yboqyndefaojwqkgijxjaururlzjoffv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146032.7462158-67-29769804610575/AnsiballZ_setup.py'
Oct 11 01:27:13 compute-0 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=11 res=1
Oct 11 01:27:13 compute-0 sudo[78694]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:13 compute-0 python3.9[78696]: ansible-ansible.legacy.setup Invoked with filter=['ansible_pkg_mgr'] gather_subset=['!all'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:27:13 compute-0 sudo[78694]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:14 compute-0 sudo[78778]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qbawjqjjybqzsdlxjorcgrqwdihfraog ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146032.7462158-67-29769804610575/AnsiballZ_dnf.py'
Oct 11 01:27:14 compute-0 sudo[78778]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:14 compute-0 python3.9[78780]: ansible-ansible.legacy.dnf Invoked with name=['openvswitch'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:27:15 compute-0 sudo[78778]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:16 compute-0 sudo[78931]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ybffbmpthihrunsdbzodmnuymdloxeot ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146035.8643966-79-237601361244364/AnsiballZ_systemd.py'
Oct 11 01:27:16 compute-0 sudo[78931]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:16 compute-0 python3.9[78933]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=openvswitch.service state=started daemon_reload=False daemon_reexec=False scope=system no_block=False force=None
Oct 11 01:27:16 compute-0 sudo[78931]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:17 compute-0 sudo[79086]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hzdzffzejznhdxpapdsxwtydweksalla ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760146037.2078464-87-180405922704802/AnsiballZ_edpm_nftables_snippet.py'
Oct 11 01:27:17 compute-0 sudo[79086]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:18 compute-0 python3[79088]: ansible-osp.edpm.edpm_nftables_snippet Invoked with content=- rule_name: 118 neutron vxlan networks
                                            rule:
                                              proto: udp
                                              dport: 4789
                                          - rule_name: 119 neutron geneve networks
                                            rule:
                                              proto: udp
                                              dport: 6081
                                              state: ["UNTRACKED"]
                                          - rule_name: 120 neutron geneve networks no conntrack
                                            rule:
                                              proto: udp
                                              dport: 6081
                                              table: raw
                                              chain: OUTPUT
                                              jump: NOTRACK
                                              action: append
                                              state: []
                                          - rule_name: 121 neutron geneve networks no conntrack
                                            rule:
                                              proto: udp
                                              dport: 6081
                                              table: raw
                                              chain: PREROUTING
                                              jump: NOTRACK
                                              action: append
                                              state: []
                                           dest=/var/lib/edpm-config/firewall/ovn.yaml state=present
Oct 11 01:27:18 compute-0 sudo[79086]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:18 compute-0 sudo[79238]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cgokvnskoojriwvpbfinaeafhjnjtouh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146038.4380927-96-19779033560402/AnsiballZ_file.py'
Oct 11 01:27:18 compute-0 sudo[79238]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:19 compute-0 python3.9[79240]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/lib/edpm-config/firewall state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:27:19 compute-0 sudo[79238]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:19 compute-0 sudo[79390]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pwweriusrdcxmbohnnfrysymjlmxcess ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146039.2785792-104-228320228147763/AnsiballZ_stat.py'
Oct 11 01:27:19 compute-0 sudo[79390]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:20 compute-0 python3.9[79392]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:27:20 compute-0 sudo[79390]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:20 compute-0 sudo[79468]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jmocolgawnaguvcwlvquioyxehmulcfq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146039.2785792-104-228320228147763/AnsiballZ_file.py'
Oct 11 01:27:20 compute-0 sudo[79468]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:20 compute-0 python3.9[79470]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml _original_basename=base-rules.yaml.j2 recurse=False state=file path=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:27:20 compute-0 sudo[79468]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:21 compute-0 sudo[79620]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pqnejaxzchcwdhvpmfhzjkjbbhyoggvp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146040.8040435-116-201114505039509/AnsiballZ_stat.py'
Oct 11 01:27:21 compute-0 sudo[79620]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:21 compute-0 python3.9[79622]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:27:21 compute-0 sudo[79620]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:21 compute-0 sudo[79698]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mzyjohrxntdnkkcdnosfuzucggocbshk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146040.8040435-116-201114505039509/AnsiballZ_file.py'
Oct 11 01:27:21 compute-0 sudo[79698]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:21 compute-0 python3.9[79700]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml _original_basename=.yob1_2_s recurse=False state=file path=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:27:21 compute-0 sudo[79698]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:22 compute-0 sudo[79850]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nnjzugijilzhztwzygswexptsrtrfdvp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146042.1932554-128-62190563136196/AnsiballZ_stat.py'
Oct 11 01:27:22 compute-0 sudo[79850]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:22 compute-0 python3.9[79852]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/iptables.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:27:22 compute-0 sudo[79850]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:23 compute-0 sudo[79928]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ayjsbtzlhyprnbiagiuildyvabyykbbs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146042.1932554-128-62190563136196/AnsiballZ_file.py'
Oct 11 01:27:23 compute-0 sudo[79928]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:23 compute-0 python3.9[79930]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/iptables.nft _original_basename=iptables.nft recurse=False state=file path=/etc/nftables/iptables.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:27:23 compute-0 sudo[79928]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:24 compute-0 sudo[80080]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gsblzpgdoiuvwklkktomyfnwhuuqlfiy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146043.5382602-141-116164198608136/AnsiballZ_command.py'
Oct 11 01:27:24 compute-0 sudo[80080]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:24 compute-0 python3.9[80082]: ansible-ansible.legacy.command Invoked with _raw_params=nft -j list ruleset _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:27:24 compute-0 sudo[80080]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:25 compute-0 sudo[80233]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zlyngfddvpwtbzjukcwnizfxrezaqzuo ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760146044.5260932-149-31620310743745/AnsiballZ_edpm_nftables_from_files.py'
Oct 11 01:27:25 compute-0 sudo[80233]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:25 compute-0 python3[80235]: ansible-edpm_nftables_from_files Invoked with src=/var/lib/edpm-config/firewall
Oct 11 01:27:25 compute-0 sudo[80233]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:25 compute-0 sudo[80385]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mtxmgnpwhboqsudqasgdgcqddlkjrtvb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146045.4883451-157-55120613176464/AnsiballZ_stat.py'
Oct 11 01:27:25 compute-0 sudo[80385]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:26 compute-0 python3.9[80387]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:27:26 compute-0 sudo[80385]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:26 compute-0 sudo[80510]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-leiorucysbmeashfxaoibounutzgfzpe ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146045.4883451-157-55120613176464/AnsiballZ_copy.py'
Oct 11 01:27:26 compute-0 sudo[80510]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:27 compute-0 python3.9[80512]: ansible-ansible.legacy.copy Invoked with dest=/etc/nftables/edpm-jumps.nft group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146045.4883451-157-55120613176464/.source.nft follow=False _original_basename=jump-chain.j2 checksum=81c2fc96c23335ffe374f9b064e885d5d971ddf9 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:27:27 compute-0 sudo[80510]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:27 compute-0 sudo[80662]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cwmwxprzbxkgadufbtmxxdzlzdynvogn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146047.259627-172-108514000826915/AnsiballZ_stat.py'
Oct 11 01:27:27 compute-0 sudo[80662]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:27 compute-0 python3.9[80664]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-update-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:27:28 compute-0 sudo[80662]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:28 compute-0 sudo[80787]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fpvwuyraruwmruhhzihewqrfvqsfvdih ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146047.259627-172-108514000826915/AnsiballZ_copy.py'
Oct 11 01:27:28 compute-0 sudo[80787]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:28 compute-0 python3.9[80789]: ansible-ansible.legacy.copy Invoked with dest=/etc/nftables/edpm-update-jumps.nft group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146047.259627-172-108514000826915/.source.nft follow=False _original_basename=jump-chain.j2 checksum=81c2fc96c23335ffe374f9b064e885d5d971ddf9 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:27:28 compute-0 sudo[80787]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:29 compute-0 sudo[80939]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ujucjhyhxiuiypdzilctxeaonoxdosni ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146048.868835-187-29945453565038/AnsiballZ_stat.py'
Oct 11 01:27:29 compute-0 sudo[80939]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:29 compute-0 python3.9[80941]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-flushes.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:27:29 compute-0 sudo[80939]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:29 compute-0 sudo[81064]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yiekmzhblyrqnmuiwlvndgfbqygwvzxa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146048.868835-187-29945453565038/AnsiballZ_copy.py'
Oct 11 01:27:29 compute-0 sudo[81064]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:30 compute-0 python3.9[81066]: ansible-ansible.legacy.copy Invoked with dest=/etc/nftables/edpm-flushes.nft group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146048.868835-187-29945453565038/.source.nft follow=False _original_basename=flush-chain.j2 checksum=4d3ffec49c8eb1a9b80d2f1e8cd64070063a87b4 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:27:30 compute-0 sudo[81064]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:30 compute-0 sudo[81216]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tvrsjrppmgqyaqrnjsefqfhavsvplcou ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146050.3731499-202-75483029066180/AnsiballZ_stat.py'
Oct 11 01:27:30 compute-0 sudo[81216]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:30 compute-0 python3.9[81218]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-chains.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:27:31 compute-0 sudo[81216]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:31 compute-0 sudo[81341]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-echxaozectnceanpddxumarzzigaydha ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146050.3731499-202-75483029066180/AnsiballZ_copy.py'
Oct 11 01:27:31 compute-0 sudo[81341]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:31 compute-0 python3.9[81343]: ansible-ansible.legacy.copy Invoked with dest=/etc/nftables/edpm-chains.nft group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146050.3731499-202-75483029066180/.source.nft follow=False _original_basename=chains.j2 checksum=298ada419730ec15df17ded0cc50c97a4014a591 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:27:31 compute-0 sudo[81341]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:32 compute-0 sudo[81493]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ftewhokqqgvyiktbddqfzmkddsbkmyrv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146051.9953682-217-100501523261924/AnsiballZ_stat.py'
Oct 11 01:27:32 compute-0 sudo[81493]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:32 compute-0 python3.9[81495]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-rules.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:27:32 compute-0 sudo[81493]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:33 compute-0 sudo[81618]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xdqwkbjmlskwfrjeqbbnvzzikiibjybn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146051.9953682-217-100501523261924/AnsiballZ_copy.py'
Oct 11 01:27:33 compute-0 sudo[81618]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:33 compute-0 python3.9[81620]: ansible-ansible.legacy.copy Invoked with dest=/etc/nftables/edpm-rules.nft group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146051.9953682-217-100501523261924/.source.nft follow=False _original_basename=ruleset.j2 checksum=bdba38546f86123f1927359d89789bd211aba99d backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:27:33 compute-0 sudo[81618]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:34 compute-0 sudo[81770]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ntbnmfqwfmdolgttdmiiysssaxosdzsn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146053.6915128-232-52421082345555/AnsiballZ_file.py'
Oct 11 01:27:34 compute-0 sudo[81770]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:34 compute-0 python3.9[81772]: ansible-ansible.builtin.file Invoked with group=root mode=0600 owner=root path=/etc/nftables/edpm-rules.nft.changed state=touch recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:27:34 compute-0 sudo[81770]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:34 compute-0 sudo[81922]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wldfowmapdlhkocldhapmjfirdtadufs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146054.5239677-240-238760548244430/AnsiballZ_command.py'
Oct 11 01:27:34 compute-0 sudo[81922]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:35 compute-0 python3.9[81924]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail; cat /etc/nftables/edpm-chains.nft /etc/nftables/edpm-flushes.nft /etc/nftables/edpm-rules.nft /etc/nftables/edpm-update-jumps.nft /etc/nftables/edpm-jumps.nft | nft -c -f - _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:27:35 compute-0 sudo[81922]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:35 compute-0 sudo[82077]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-omwrmcvynbgvikrhbbsvhyjgsnxkbsgm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146055.4159985-248-247614849217470/AnsiballZ_blockinfile.py'
Oct 11 01:27:36 compute-0 sudo[82077]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:36 compute-0 python3.9[82079]: ansible-ansible.builtin.blockinfile Invoked with backup=False block=include "/etc/nftables/iptables.nft"
                                            include "/etc/nftables/edpm-chains.nft"
                                            include "/etc/nftables/edpm-rules.nft"
                                            include "/etc/nftables/edpm-jumps.nft"
                                             path=/etc/sysconfig/nftables.conf validate=nft -c -f %s state=present marker=# {mark} ANSIBLE MANAGED BLOCK create=False marker_begin=BEGIN marker_end=END append_newline=False prepend_newline=False unsafe_writes=False insertafter=None insertbefore=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:27:36 compute-0 sudo[82077]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:36 compute-0 sudo[82229]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-phfshadksduttefqxnlwnpsolqgchpkq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146056.5037124-257-124318979610544/AnsiballZ_command.py'
Oct 11 01:27:36 compute-0 sudo[82229]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:37 compute-0 python3.9[82231]: ansible-ansible.legacy.command Invoked with _raw_params=nft -f /etc/nftables/edpm-chains.nft _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:27:37 compute-0 sudo[82229]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:37 compute-0 sudo[82382]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dxkxbygowvtfwnifhgznddhyonlcldbi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146057.4092572-265-7471195870730/AnsiballZ_stat.py'
Oct 11 01:27:37 compute-0 sudo[82382]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:38 compute-0 python3.9[82384]: ansible-ansible.builtin.stat Invoked with path=/etc/nftables/edpm-rules.nft.changed follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:27:38 compute-0 sudo[82382]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:38 compute-0 sudo[82536]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yyelsqbhkgwlilkuhlmdeiprefmzycnv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146058.4089382-273-247811846940378/AnsiballZ_command.py'
Oct 11 01:27:38 compute-0 sudo[82536]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:39 compute-0 python3.9[82538]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail; cat /etc/nftables/edpm-flushes.nft /etc/nftables/edpm-rules.nft /etc/nftables/edpm-update-jumps.nft | nft -f - _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:27:39 compute-0 sudo[82536]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:39 compute-0 sudo[82691]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tisqhbxwhersqhfhxbzaqvjpcupruxva ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146059.319924-281-125007501236837/AnsiballZ_file.py'
Oct 11 01:27:39 compute-0 sudo[82691]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:39 compute-0 python3.9[82693]: ansible-ansible.builtin.file Invoked with path=/etc/nftables/edpm-rules.nft.changed state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:27:39 compute-0 sudo[82691]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:41 compute-0 python3.9[82843]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'machine'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:27:42 compute-0 sudo[82994]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bjqvzopgbmkwxedhkjaqazovnliaxlfg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146061.8461154-321-864044810295/AnsiballZ_command.py'
Oct 11 01:27:42 compute-0 sudo[82994]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:42 compute-0 python3.9[82996]: ansible-ansible.legacy.command Invoked with _raw_params=ovs-vsctl set open . external_ids:hostname=compute-0.ctlplane.example.com external_ids:ovn-bridge=br-int external_ids:ovn-bridge-mappings=datacentre:br-ex external_ids:ovn-chassis-mac-mappings="datacentre:0e:0a:c0:16:5a:16" external_ids:ovn-encap-ip=172.19.0.100 external_ids:ovn-encap-type=geneve external_ids:ovn-encap-tos=0 external_ids:ovn-match-northd-version=False external_ids:ovn-monitor-all=True external_ids:ovn-remote=ssl:ovsdbserver-sb.openstack.svc:6642 external_ids:ovn-remote-probe-interval=60000 external_ids:ovn-ofctrl-wait-before-clear=8000 external_ids:rundir=/var/run/openvswitch 
                                             _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:27:42 compute-0 ovs-vsctl[82997]: ovs|00001|vsctl|INFO|Called as ovs-vsctl set open . external_ids:hostname=compute-0.ctlplane.example.com external_ids:ovn-bridge=br-int external_ids:ovn-bridge-mappings=datacentre:br-ex external_ids:ovn-chassis-mac-mappings=datacentre:0e:0a:c0:16:5a:16 external_ids:ovn-encap-ip=172.19.0.100 external_ids:ovn-encap-type=geneve external_ids:ovn-encap-tos=0 external_ids:ovn-match-northd-version=False external_ids:ovn-monitor-all=True external_ids:ovn-remote=ssl:ovsdbserver-sb.openstack.svc:6642 external_ids:ovn-remote-probe-interval=60000 external_ids:ovn-ofctrl-wait-before-clear=8000 external_ids:rundir=/var/run/openvswitch
Oct 11 01:27:42 compute-0 sudo[82994]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:43 compute-0 sudo[83147]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-brnkcirixeamspvkbtmhkhlqgldqqqaz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146062.7544842-330-214798034285315/AnsiballZ_command.py'
Oct 11 01:27:43 compute-0 sudo[83147]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:43 compute-0 python3.9[83149]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail
                                            ovs-vsctl show | grep -q "Manager"
                                             _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:27:43 compute-0 sudo[83147]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:44 compute-0 sudo[83302]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ykllulyqunmpwaoweuqlvyumuocwojmu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146063.8047092-338-90731304942602/AnsiballZ_command.py'
Oct 11 01:27:44 compute-0 sudo[83302]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:44 compute-0 python3.9[83304]: ansible-ansible.legacy.command Invoked with _raw_params=ovs-vsctl --timeout=5 --id=@manager -- create Manager target=\"ptcp:********@manager
                                             _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:27:44 compute-0 ovs-vsctl[83305]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --timeout=5 --id=@manager -- create Manager "target=\"ptcp:6640:127.0.0.1\"" -- add Open_vSwitch . manager_options @manager
Oct 11 01:27:44 compute-0 sudo[83302]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:45 compute-0 python3.9[83455]: ansible-ansible.builtin.stat Invoked with path=/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:27:46 compute-0 sudo[83607]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ctvxpvdrcaokcnlogpkvmlizwzskfaxc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146065.6516445-355-224618709983659/AnsiballZ_file.py'
Oct 11 01:27:46 compute-0 sudo[83607]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:46 compute-0 python3.9[83609]: ansible-ansible.builtin.file Invoked with path=/var/local/libexec recurse=True setype=container_file_t state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:27:46 compute-0 sudo[83607]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:46 compute-0 sudo[83759]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lwgbiaickwhbdvqhzwxtekyoyywcxovh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146066.536932-363-221878579828782/AnsiballZ_stat.py'
Oct 11 01:27:46 compute-0 sudo[83759]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:47 compute-0 python3.9[83761]: ansible-ansible.legacy.stat Invoked with path=/var/local/libexec/edpm-container-shutdown follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:27:47 compute-0 sudo[83759]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:47 compute-0 sudo[83837]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jxytzdtmspqmgwljjbvddekxkpdmqgvn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146066.536932-363-221878579828782/AnsiballZ_file.py'
Oct 11 01:27:47 compute-0 sudo[83837]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:47 compute-0 python3.9[83839]: ansible-ansible.legacy.file Invoked with group=root mode=0700 owner=root setype=container_file_t dest=/var/local/libexec/edpm-container-shutdown _original_basename=edpm-container-shutdown recurse=False state=file path=/var/local/libexec/edpm-container-shutdown force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:27:47 compute-0 sudo[83837]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:48 compute-0 sudo[83989]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bigvcfjdshytdzpavvvqpotzvmipcvsd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146068.0045018-363-144387999470832/AnsiballZ_stat.py'
Oct 11 01:27:48 compute-0 sudo[83989]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:48 compute-0 python3.9[83991]: ansible-ansible.legacy.stat Invoked with path=/var/local/libexec/edpm-start-podman-container follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:27:48 compute-0 sudo[83989]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:49 compute-0 sudo[84067]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ecrxtmkpklslkztrdcnwddofdxvyrumn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146068.0045018-363-144387999470832/AnsiballZ_file.py'
Oct 11 01:27:49 compute-0 sudo[84067]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:49 compute-0 python3.9[84069]: ansible-ansible.legacy.file Invoked with group=root mode=0700 owner=root setype=container_file_t dest=/var/local/libexec/edpm-start-podman-container _original_basename=edpm-start-podman-container recurse=False state=file path=/var/local/libexec/edpm-start-podman-container force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:27:49 compute-0 sudo[84067]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:49 compute-0 sudo[84219]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dwqagcdpvaibhlicpamxhhhfntgrjgfy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146069.4768043-386-257864709867613/AnsiballZ_file.py'
Oct 11 01:27:49 compute-0 sudo[84219]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:50 compute-0 python3.9[84221]: ansible-ansible.builtin.file Invoked with mode=420 path=/etc/systemd/system-preset state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:27:50 compute-0 sudo[84219]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:50 compute-0 sudo[84371]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iydddjzwvjlriqmddegkffibrkajirvp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146070.3103366-394-77992262450961/AnsiballZ_stat.py'
Oct 11 01:27:50 compute-0 sudo[84371]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:50 compute-0 python3.9[84373]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/edpm-container-shutdown.service follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:27:50 compute-0 sudo[84371]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:51 compute-0 sudo[84449]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gjljaenxgcbrvffkljpyuldjrrxtvmit ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146070.3103366-394-77992262450961/AnsiballZ_file.py'
Oct 11 01:27:51 compute-0 sudo[84449]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:51 compute-0 python3.9[84451]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/edpm-container-shutdown.service _original_basename=edpm-container-shutdown-service recurse=False state=file path=/etc/systemd/system/edpm-container-shutdown.service force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:27:51 compute-0 sudo[84449]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:52 compute-0 sudo[84601]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tzqkrprjqcuaycrvumopjsfaslppufpr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146071.6866837-406-129426080845625/AnsiballZ_stat.py'
Oct 11 01:27:52 compute-0 sudo[84601]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:52 compute-0 python3.9[84603]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system-preset/91-edpm-container-shutdown.preset follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:27:52 compute-0 sudo[84601]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:52 compute-0 sudo[84679]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eurjyddadurgqcmhcqrbvxxuxeycxkli ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146071.6866837-406-129426080845625/AnsiballZ_file.py'
Oct 11 01:27:52 compute-0 sudo[84679]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:52 compute-0 python3.9[84681]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system-preset/91-edpm-container-shutdown.preset _original_basename=91-edpm-container-shutdown-preset recurse=False state=file path=/etc/systemd/system-preset/91-edpm-container-shutdown.preset force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:27:52 compute-0 sudo[84679]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:53 compute-0 sudo[84831]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kkhlwaoigevhjtcshrydapizmugpjige ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146073.1001868-418-173857837768873/AnsiballZ_systemd.py'
Oct 11 01:27:53 compute-0 sudo[84831]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:53 compute-0 python3.9[84833]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True enabled=True name=edpm-container-shutdown state=started daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:27:53 compute-0 systemd[1]: Reloading.
Oct 11 01:27:53 compute-0 systemd-sysv-generator[84863]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:27:53 compute-0 systemd-rc-local-generator[84858]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:27:54 compute-0 sudo[84831]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:54 compute-0 sudo[85020]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dgbbgrjopqbrbaqvvbjrigsagpkzitwb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146074.3446546-426-84429855781238/AnsiballZ_stat.py'
Oct 11 01:27:54 compute-0 sudo[85020]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:54 compute-0 python3.9[85022]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/netns-placeholder.service follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:27:55 compute-0 sudo[85020]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:55 compute-0 sudo[85098]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cfrpdgdtixnygbuvvcylskjrsoivnzqb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146074.3446546-426-84429855781238/AnsiballZ_file.py'
Oct 11 01:27:55 compute-0 sudo[85098]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:55 compute-0 python3.9[85100]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/netns-placeholder.service _original_basename=netns-placeholder-service recurse=False state=file path=/etc/systemd/system/netns-placeholder.service force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:27:55 compute-0 sudo[85098]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:56 compute-0 sudo[85250]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ctcnzwiilvewkcodujqlhpizxhqiwzpk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146075.825488-438-128694654345389/AnsiballZ_stat.py'
Oct 11 01:27:56 compute-0 sudo[85250]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:56 compute-0 python3.9[85252]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system-preset/91-netns-placeholder.preset follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:27:56 compute-0 sudo[85250]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:56 compute-0 sudo[85328]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ogpydpxcyhfwqhxsubssmsmhhntzaisp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146075.825488-438-128694654345389/AnsiballZ_file.py'
Oct 11 01:27:56 compute-0 sudo[85328]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:56 compute-0 python3.9[85330]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system-preset/91-netns-placeholder.preset _original_basename=91-netns-placeholder-preset recurse=False state=file path=/etc/systemd/system-preset/91-netns-placeholder.preset force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:27:57 compute-0 sudo[85328]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:57 compute-0 sudo[85480]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lzwwadmufmjjkynwicagbwmlcycimbws ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146077.237223-450-266518076477583/AnsiballZ_systemd.py'
Oct 11 01:27:57 compute-0 sudo[85480]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:57 compute-0 python3.9[85482]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True enabled=True name=netns-placeholder state=started daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:27:58 compute-0 systemd[1]: Reloading.
Oct 11 01:27:58 compute-0 systemd-sysv-generator[85512]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:27:58 compute-0 systemd-rc-local-generator[85509]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:27:58 compute-0 systemd[1]: Starting Create netns directory...
Oct 11 01:27:58 compute-0 systemd[1]: run-netns-placeholder.mount: Deactivated successfully.
Oct 11 01:27:58 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Oct 11 01:27:58 compute-0 systemd[1]: Finished Create netns directory.
Oct 11 01:27:58 compute-0 sudo[85480]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:59 compute-0 sudo[85674]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zjhvmjbkssqgqkipxomqeyeeltfaoqyd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146078.6568887-460-238805454758868/AnsiballZ_file.py'
Oct 11 01:27:59 compute-0 sudo[85674]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:27:59 compute-0 python3.9[85676]: ansible-ansible.builtin.file Invoked with group=zuul mode=0755 owner=zuul path=/var/lib/openstack/healthchecks setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:27:59 compute-0 sudo[85674]: pam_unix(sudo:session): session closed for user root
Oct 11 01:27:59 compute-0 sudo[85826]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pwfhokycofftlfstrzdayejdgiiouadv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146079.5067103-468-181941543821875/AnsiballZ_stat.py'
Oct 11 01:27:59 compute-0 sudo[85826]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:00 compute-0 python3.9[85828]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/ovn_controller/healthcheck follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:28:00 compute-0 sudo[85826]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:00 compute-0 sudo[85949]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gahaqpamdolmtvvyibzcgffvyieabiij ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146079.5067103-468-181941543821875/AnsiballZ_copy.py'
Oct 11 01:28:00 compute-0 sudo[85949]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:00 compute-0 python3.9[85951]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/healthchecks/ovn_controller/ group=zuul mode=0700 owner=zuul setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760146079.5067103-468-181941543821875/.source _original_basename=healthcheck follow=False checksum=4098dd010265fabdf5c26b97d169fc4e575ff457 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:28:00 compute-0 sudo[85949]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:01 compute-0 sudo[86101]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sfvweheozytsckemvrpcfmcmbpbuzeed ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146081.2986462-485-78093094225741/AnsiballZ_file.py'
Oct 11 01:28:01 compute-0 sudo[86101]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:01 compute-0 python3.9[86103]: ansible-ansible.builtin.file Invoked with path=/var/lib/kolla/config_files recurse=True setype=container_file_t state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:28:01 compute-0 sudo[86101]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:02 compute-0 sudo[86253]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ozlokloeufgsolwnazisnmjuxogamyvs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146082.23061-493-174492859994292/AnsiballZ_stat.py'
Oct 11 01:28:02 compute-0 sudo[86253]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:02 compute-0 python3.9[86255]: ansible-ansible.legacy.stat Invoked with path=/var/lib/kolla/config_files/ovn_controller.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:28:02 compute-0 sudo[86253]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:03 compute-0 sudo[86376]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ddzovpjxmewwjdglwnarryuzydjsdtmk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146082.23061-493-174492859994292/AnsiballZ_copy.py'
Oct 11 01:28:03 compute-0 sudo[86376]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:03 compute-0 python3.9[86378]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/kolla/config_files/ovn_controller.json mode=0600 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146082.23061-493-174492859994292/.source.json _original_basename=.8md5hn0l follow=False checksum=2328fc98619beeb08ee32b01f15bb43094c10b61 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:28:03 compute-0 sudo[86376]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:04 compute-0 sudo[86528]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wbhhwapgfddfzugxmbidmnaagcmyuruz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146083.8231246-508-37782504694829/AnsiballZ_file.py'
Oct 11 01:28:04 compute-0 sudo[86528]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:04 compute-0 python3.9[86530]: ansible-ansible.builtin.file Invoked with mode=0755 path=/var/lib/edpm-config/container-startup-config/ovn_controller state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:28:04 compute-0 sudo[86528]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:05 compute-0 sudo[86680]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vcuvjjvzuwtwfgreryhszsuvvhoagioz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146084.6659079-516-218315469481706/AnsiballZ_stat.py'
Oct 11 01:28:05 compute-0 sudo[86680]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:05 compute-0 sudo[86680]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:05 compute-0 sudo[86803]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-poyetmqcsnlojpfxejfzeabggnfqkxzg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146084.6659079-516-218315469481706/AnsiballZ_copy.py'
Oct 11 01:28:05 compute-0 sudo[86803]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:06 compute-0 sudo[86803]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:07 compute-0 sudo[86955]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pknbuvjpmiwpuyxzwpcyrkmqqwjizkke ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146086.486396-533-107774276916480/AnsiballZ_container_config_data.py'
Oct 11 01:28:07 compute-0 sudo[86955]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:07 compute-0 python3.9[86957]: ansible-container_config_data Invoked with config_overrides={} config_path=/var/lib/edpm-config/container-startup-config/ovn_controller config_pattern=*.json debug=False
Oct 11 01:28:07 compute-0 sudo[86955]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:08 compute-0 sudo[87107]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bcuffwuekaggtrilwbaqgzzjwkarwkzy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146087.7334495-542-112021395331527/AnsiballZ_container_config_hash.py'
Oct 11 01:28:08 compute-0 sudo[87107]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:08 compute-0 python3.9[87109]: ansible-container_config_hash Invoked with check_mode=False config_vol_prefix=/var/lib/config-data
Oct 11 01:28:08 compute-0 sudo[87107]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:09 compute-0 sudo[87259]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jrbrwveyvxhzgwhzndrbxclltuizwpfk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146088.8705122-551-96026870336556/AnsiballZ_podman_container_info.py'
Oct 11 01:28:09 compute-0 sudo[87259]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:09 compute-0 python3.9[87261]: ansible-containers.podman.podman_container_info Invoked with executable=podman name=None
Oct 11 01:28:09 compute-0 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.
Oct 11 01:28:09 compute-0 sudo[87259]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:10 compute-0 sudo[87422]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kvagrngqzpthztrdveeybztdbqlphuov ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760146090.1381278-564-256426022196818/AnsiballZ_edpm_container_manage.py'
Oct 11 01:28:10 compute-0 sudo[87422]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:11 compute-0 python3[87424]: ansible-edpm_container_manage Invoked with concurrency=1 config_dir=/var/lib/edpm-config/container-startup-config/ovn_controller config_id=ovn_controller config_overrides={} config_patterns=*.json log_base_path=/var/log/containers/stdouts debug=False
Oct 11 01:28:11 compute-0 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.
Oct 11 01:28:13 compute-0 systemd[1]: var-lib-containers-storage-overlay-compat2126021138-lower\x2dmapped.mount: Deactivated successfully.
Oct 11 01:28:17 compute-0 podman[87437]: 2025-10-11 01:28:17.553009563 +0000 UTC m=+6.422024972 image pull 3b86aea1acd0e80af91d8a3efa79cc99f54489e3c22377193c4282a256797350 quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified
Oct 11 01:28:17 compute-0 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.
Oct 11 01:28:17 compute-0 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.
Oct 11 01:28:17 compute-0 podman[87555]: 2025-10-11 01:28:17.773407808 +0000 UTC m=+0.076624964 container create 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, managed_by=edpm_ansible, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.schema-version=1.0, container_name=ovn_controller, org.label-schema.license=GPLv2, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team)
Oct 11 01:28:17 compute-0 podman[87555]: 2025-10-11 01:28:17.736012517 +0000 UTC m=+0.039229713 image pull 3b86aea1acd0e80af91d8a3efa79cc99f54489e3c22377193c4282a256797350 quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified
Oct 11 01:28:17 compute-0 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.
Oct 11 01:28:17 compute-0 python3[87424]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman create --name ovn_controller --conmon-pidfile /run/ovn_controller.pid --env KOLLA_CONFIG_STRATEGY=COPY_ALWAYS --healthcheck-command /openstack/healthcheck --label config_id=ovn_controller --label container_name=ovn_controller --label managed_by=edpm_ansible --label config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']} --log-driver journald --log-level info --network host --privileged=True --user root --volume /lib/modules:/lib/modules:ro --volume /run:/run --volume /var/lib/openvswitch/ovn:/run/ovn:shared,z --volume /var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro --volume /var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z --volume /var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z --volume /var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z --volume /var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z --volume /var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified
Oct 11 01:28:18 compute-0 sudo[87422]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:18 compute-0 sudo[87744]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-amyaqvzvgiatkrakgzalnslwjceekkqt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146098.220458-572-108080357297275/AnsiballZ_stat.py'
Oct 11 01:28:18 compute-0 sudo[87744]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:18 compute-0 python3.9[87746]: ansible-ansible.builtin.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:28:18 compute-0 sudo[87744]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:19 compute-0 sudo[87898]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ilknsojhtaeuftkvyybpxschbreiukqm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146099.1572595-581-168804941892637/AnsiballZ_file.py'
Oct 11 01:28:19 compute-0 sudo[87898]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:19 compute-0 python3.9[87900]: ansible-file Invoked with path=/etc/systemd/system/edpm_ovn_controller.requires state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:28:19 compute-0 sudo[87898]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:20 compute-0 sudo[87974]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-thlbspsuilxgtbhgmchvevlnujkhskwo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146099.1572595-581-168804941892637/AnsiballZ_stat.py'
Oct 11 01:28:20 compute-0 sudo[87974]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:20 compute-0 python3.9[87976]: ansible-stat Invoked with path=/etc/systemd/system/edpm_ovn_controller_healthcheck.timer follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:28:20 compute-0 sudo[87974]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:21 compute-0 sudo[88125]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-itdffqhrpwevmbjrzpyoxheqjpsqmjlz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146100.4606278-581-60368061802424/AnsiballZ_copy.py'
Oct 11 01:28:21 compute-0 sudo[88125]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:21 compute-0 python3.9[88127]: ansible-copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760146100.4606278-581-60368061802424/source dest=/etc/systemd/system/edpm_ovn_controller.service mode=0644 owner=root group=root backup=False force=True remote_src=False follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:28:21 compute-0 sudo[88125]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:21 compute-0 sudo[88201]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-odwtnnfnpkunalnuhcavzqudjlexlszq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146100.4606278-581-60368061802424/AnsiballZ_systemd.py'
Oct 11 01:28:21 compute-0 sudo[88201]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:21 compute-0 python3.9[88203]: ansible-systemd Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 01:28:21 compute-0 systemd[1]: Reloading.
Oct 11 01:28:22 compute-0 systemd-rc-local-generator[88231]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:28:22 compute-0 systemd-sysv-generator[88234]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:28:22 compute-0 sudo[88201]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:22 compute-0 sudo[88312]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-unvnpzfvygcwqumbmiimdlfvcjdchnfm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146100.4606278-581-60368061802424/AnsiballZ_systemd.py'
Oct 11 01:28:22 compute-0 sudo[88312]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:22 compute-0 python3.9[88314]: ansible-systemd Invoked with state=restarted name=edpm_ovn_controller.service enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:28:23 compute-0 systemd[1]: Reloading.
Oct 11 01:28:23 compute-0 systemd-rc-local-generator[88344]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:28:23 compute-0 systemd-sysv-generator[88347]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:28:23 compute-0 systemd[1]: Starting ovn_controller container...
Oct 11 01:28:23 compute-0 systemd[1]: Created slice Virtual Machine and Container Slice.
Oct 11 01:28:23 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:28:23 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/72cf541d0c6d75156bd4690ce11e1f1c1e52b1d33f998cb8bdc0729e173f0051/merged/run/ovn supports timestamps until 2038 (0x7fffffff)
Oct 11 01:28:23 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112.
Oct 11 01:28:23 compute-0 podman[88354]: 2025-10-11 01:28:23.517335947 +0000 UTC m=+0.199783281 container init 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_controller, io.buildah.version=1.41.3, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']})
Oct 11 01:28:23 compute-0 ovn_controller[88370]: + sudo -E kolla_set_configs
Oct 11 01:28:23 compute-0 podman[88354]: 2025-10-11 01:28:23.565473307 +0000 UTC m=+0.247920591 container start 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_controller, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 01:28:23 compute-0 edpm-start-podman-container[88354]: ovn_controller
Oct 11 01:28:23 compute-0 systemd[1]: Created slice User Slice of UID 0.
Oct 11 01:28:23 compute-0 systemd[1]: Starting User Runtime Directory /run/user/0...
Oct 11 01:28:23 compute-0 edpm-start-podman-container[88353]: Creating additional drop-in dependency for "ovn_controller" (861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112)
Oct 11 01:28:23 compute-0 systemd[1]: Finished User Runtime Directory /run/user/0.
Oct 11 01:28:23 compute-0 systemd[1]: Starting User Manager for UID 0...
Oct 11 01:28:23 compute-0 systemd[1]: Reloading.
Oct 11 01:28:23 compute-0 podman[88377]: 2025-10-11 01:28:23.694512779 +0000 UTC m=+0.122421039 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=starting, health_failing_streak=1, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=ovn_controller, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009)
Oct 11 01:28:23 compute-0 systemd-rc-local-generator[88445]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:28:23 compute-0 systemd-sysv-generator[88448]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:28:23 compute-0 systemd[1]: 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112-37cb0896d78bc03b.service: Main process exited, code=exited, status=1/FAILURE
Oct 11 01:28:23 compute-0 systemd[1]: 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112-37cb0896d78bc03b.service: Failed with result 'exit-code'.
Oct 11 01:28:23 compute-0 systemd[1]: Started ovn_controller container.
Oct 11 01:28:23 compute-0 systemd[88413]: pam_unix(systemd-user:session): session opened for user root(uid=0) by root(uid=0)
Oct 11 01:28:24 compute-0 sudo[88312]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:24 compute-0 systemd[88413]: Queued start job for default target Main User Target.
Oct 11 01:28:24 compute-0 systemd[88413]: Created slice User Application Slice.
Oct 11 01:28:24 compute-0 systemd[88413]: Mark boot as successful after the user session has run 2 minutes was skipped because of an unmet condition check (ConditionUser=!@system).
Oct 11 01:28:24 compute-0 systemd[88413]: Started Daily Cleanup of User's Temporary Directories.
Oct 11 01:28:24 compute-0 systemd[88413]: Reached target Paths.
Oct 11 01:28:24 compute-0 systemd[88413]: Reached target Timers.
Oct 11 01:28:24 compute-0 systemd[88413]: Starting D-Bus User Message Bus Socket...
Oct 11 01:28:24 compute-0 systemd[88413]: Starting Create User's Volatile Files and Directories...
Oct 11 01:28:24 compute-0 systemd[88413]: Listening on D-Bus User Message Bus Socket.
Oct 11 01:28:24 compute-0 systemd[88413]: Finished Create User's Volatile Files and Directories.
Oct 11 01:28:24 compute-0 systemd[88413]: Reached target Sockets.
Oct 11 01:28:24 compute-0 systemd[88413]: Reached target Basic System.
Oct 11 01:28:24 compute-0 systemd[88413]: Reached target Main User Target.
Oct 11 01:28:24 compute-0 systemd[88413]: Startup finished in 173ms.
Oct 11 01:28:24 compute-0 systemd[1]: Started User Manager for UID 0.
Oct 11 01:28:24 compute-0 systemd[1]: Started Session c1 of User root.
Oct 11 01:28:24 compute-0 ovn_controller[88370]: INFO:__main__:Loading config file at /var/lib/kolla/config_files/config.json
Oct 11 01:28:24 compute-0 ovn_controller[88370]: INFO:__main__:Validating config file
Oct 11 01:28:24 compute-0 ovn_controller[88370]: INFO:__main__:Kolla config strategy set to: COPY_ALWAYS
Oct 11 01:28:24 compute-0 ovn_controller[88370]: INFO:__main__:Writing out command to execute
Oct 11 01:28:24 compute-0 systemd[1]: session-c1.scope: Deactivated successfully.
Oct 11 01:28:24 compute-0 ovn_controller[88370]: ++ cat /run_command
Oct 11 01:28:24 compute-0 ovn_controller[88370]: + CMD='/usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock  -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt '
Oct 11 01:28:24 compute-0 ovn_controller[88370]: + ARGS=
Oct 11 01:28:24 compute-0 ovn_controller[88370]: + sudo kolla_copy_cacerts
Oct 11 01:28:24 compute-0 systemd[1]: Started Session c2 of User root.
Oct 11 01:28:24 compute-0 systemd[1]: session-c2.scope: Deactivated successfully.
Oct 11 01:28:24 compute-0 ovn_controller[88370]: + [[ ! -n '' ]]
Oct 11 01:28:24 compute-0 ovn_controller[88370]: + . kolla_extend_start
Oct 11 01:28:24 compute-0 ovn_controller[88370]: Running command: '/usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock  -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt '
Oct 11 01:28:24 compute-0 ovn_controller[88370]: + echo 'Running command: '\''/usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock  -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt '\'''
Oct 11 01:28:24 compute-0 ovn_controller[88370]: + umask 0022
Oct 11 01:28:24 compute-0 ovn_controller[88370]: + exec /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00001|reconnect|INFO|unix:/run/openvswitch/db.sock: connecting...
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00002|reconnect|INFO|unix:/run/openvswitch/db.sock: connected
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00003|main|INFO|OVN internal version is : [24.03.7-20.33.0-76.8]
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00004|main|INFO|OVS IDL reconnected, force recompute.
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00005|reconnect|INFO|ssl:ovsdbserver-sb.openstack.svc:6642: connecting...
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00006|main|INFO|OVNSB IDL reconnected, force recompute.
Oct 11 01:28:24 compute-0 NetworkManager[44908]: <info>  [1760146104.3502] manager: (br-int): new Open vSwitch Interface device (/org/freedesktop/NetworkManager/Devices/16)
Oct 11 01:28:24 compute-0 NetworkManager[44908]: <info>  [1760146104.3512] device (br-int)[Open vSwitch Interface]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Oct 11 01:28:24 compute-0 NetworkManager[44908]: <info>  [1760146104.3529] manager: (br-int): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/17)
Oct 11 01:28:24 compute-0 NetworkManager[44908]: <info>  [1760146104.3538] manager: (br-int): new Open vSwitch Bridge device (/org/freedesktop/NetworkManager/Devices/18)
Oct 11 01:28:24 compute-0 NetworkManager[44908]: <info>  [1760146104.3545] device (br-int)[Open vSwitch Interface]: state change: unavailable -> disconnected (reason 'none', managed-type: 'full')
Oct 11 01:28:24 compute-0 kernel: br-int: entered promiscuous mode
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00007|reconnect|INFO|ssl:ovsdbserver-sb.openstack.svc:6642: connected
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00008|features|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting to switch
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00009|rconn|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting...
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00010|features|INFO|OVS Feature: ct_zero_snat, state: supported
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00011|features|INFO|OVS Feature: ct_flush, state: supported
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00012|features|INFO|OVS Feature: dp_hash_l4_sym_support, state: supported
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00013|reconnect|INFO|unix:/run/openvswitch/db.sock: connecting...
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00014|main|INFO|OVS feature set changed, force recompute.
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00015|ofctrl|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting to switch
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00016|rconn|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting...
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00017|rconn|INFO|unix:/var/run/openvswitch/br-int.mgmt: connected
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00018|ofctrl|INFO|ofctrl-wait-before-clear is now 8000 ms (was 0 ms)
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00019|main|INFO|OVS OpenFlow connection reconnected,force recompute.
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00020|rconn|INFO|unix:/var/run/openvswitch/br-int.mgmt: connected
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00021|reconnect|INFO|unix:/run/openvswitch/db.sock: connected
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00022|main|INFO|OVS feature set changed, force recompute.
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00023|features|INFO|OVS DB schema supports 4 flow table prefixes, our IDL supports: 4
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00024|main|INFO|Setting flow table prefixes: ip_src, ip_dst, ipv6_src, ipv6_dst.
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00001|pinctrl(ovn_pinctrl0)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting to switch
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00001|statctrl(ovn_statctrl3)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting to switch
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00002|rconn(ovn_pinctrl0)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting...
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00002|rconn(ovn_statctrl3)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting...
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00003|rconn(ovn_pinctrl0)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connected
Oct 11 01:28:24 compute-0 ovn_controller[88370]: 2025-10-11T01:28:24Z|00003|rconn(ovn_statctrl3)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connected
Oct 11 01:28:24 compute-0 NetworkManager[44908]: <info>  [1760146104.3848] manager: (ovn-65bc68-0): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/19)
Oct 11 01:28:24 compute-0 kernel: genev_sys_6081: entered promiscuous mode
Oct 11 01:28:24 compute-0 NetworkManager[44908]: <info>  [1760146104.4091] device (genev_sys_6081): carrier: link connected
Oct 11 01:28:24 compute-0 NetworkManager[44908]: <info>  [1760146104.4096] manager: (genev_sys_6081): new Generic device (/org/freedesktop/NetworkManager/Devices/20)
Oct 11 01:28:24 compute-0 systemd-udevd[88572]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 01:28:24 compute-0 systemd-udevd[88577]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 01:28:24 compute-0 sudo[88635]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ukkccnyafbvqbxzfioeyxadxwsibrffz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146104.248373-609-168294098130738/AnsiballZ_command.py'
Oct 11 01:28:24 compute-0 sudo[88635]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:24 compute-0 python3.9[88637]: ansible-ansible.legacy.command Invoked with _raw_params=ovs-vsctl remove open . other_config hw-offload
                                             _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:28:24 compute-0 ovs-vsctl[88638]: ovs|00001|vsctl|INFO|Called as ovs-vsctl remove open . other_config hw-offload
Oct 11 01:28:24 compute-0 sudo[88635]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:25 compute-0 sudo[88788]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vraqyyoisalvtkvgpwqblvossmkjvcjl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146105.0805302-617-135105480336898/AnsiballZ_command.py'
Oct 11 01:28:25 compute-0 sudo[88788]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:25 compute-0 python3.9[88790]: ansible-ansible.legacy.command Invoked with _raw_params=ovs-vsctl get Open_vSwitch . external_ids:ovn-cms-options | sed 's/\"//g'
                                             _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:28:25 compute-0 ovs-vsctl[88792]: ovs|00001|db_ctl_base|ERR|no key "ovn-cms-options" in Open_vSwitch record "." column external_ids
Oct 11 01:28:25 compute-0 sudo[88788]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:26 compute-0 sudo[88943]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iioopvyogfgllhqqstzdiekheyjxajcr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146106.1260216-631-56244502267319/AnsiballZ_command.py'
Oct 11 01:28:26 compute-0 sudo[88943]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:26 compute-0 python3.9[88945]: ansible-ansible.legacy.command Invoked with _raw_params=ovs-vsctl remove Open_vSwitch . external_ids ovn-cms-options
                                             _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:28:26 compute-0 ovs-vsctl[88946]: ovs|00001|vsctl|INFO|Called as ovs-vsctl remove Open_vSwitch . external_ids ovn-cms-options
Oct 11 01:28:26 compute-0 sudo[88943]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:27 compute-0 sshd-session[77780]: Connection closed by 192.168.122.30 port 48494
Oct 11 01:28:27 compute-0 sshd-session[77777]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:28:27 compute-0 systemd-logind[804]: Session 19 logged out. Waiting for processes to exit.
Oct 11 01:28:27 compute-0 systemd[1]: session-19.scope: Deactivated successfully.
Oct 11 01:28:27 compute-0 systemd[1]: session-19.scope: Consumed 1min 12.317s CPU time.
Oct 11 01:28:27 compute-0 systemd-logind[804]: Removed session 19.
Oct 11 01:28:33 compute-0 sshd-session[88971]: Accepted publickey for zuul from 192.168.122.30 port 39452 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:28:33 compute-0 systemd-logind[804]: New session 21 of user zuul.
Oct 11 01:28:33 compute-0 systemd[1]: Started Session 21 of User zuul.
Oct 11 01:28:33 compute-0 sshd-session[88971]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:28:34 compute-0 systemd[1]: Stopping User Manager for UID 0...
Oct 11 01:28:34 compute-0 systemd[88413]: Activating special unit Exit the Session...
Oct 11 01:28:34 compute-0 systemd[88413]: Stopped target Main User Target.
Oct 11 01:28:34 compute-0 systemd[88413]: Stopped target Basic System.
Oct 11 01:28:34 compute-0 systemd[88413]: Stopped target Paths.
Oct 11 01:28:34 compute-0 systemd[88413]: Stopped target Sockets.
Oct 11 01:28:34 compute-0 systemd[88413]: Stopped target Timers.
Oct 11 01:28:34 compute-0 systemd[88413]: Stopped Daily Cleanup of User's Temporary Directories.
Oct 11 01:28:34 compute-0 systemd[88413]: Closed D-Bus User Message Bus Socket.
Oct 11 01:28:34 compute-0 systemd[88413]: Stopped Create User's Volatile Files and Directories.
Oct 11 01:28:34 compute-0 systemd[88413]: Removed slice User Application Slice.
Oct 11 01:28:34 compute-0 systemd[88413]: Reached target Shutdown.
Oct 11 01:28:34 compute-0 systemd[88413]: Finished Exit the Session.
Oct 11 01:28:34 compute-0 systemd[88413]: Reached target Exit the Session.
Oct 11 01:28:34 compute-0 systemd[1]: user@0.service: Deactivated successfully.
Oct 11 01:28:34 compute-0 systemd[1]: Stopped User Manager for UID 0.
Oct 11 01:28:34 compute-0 systemd[1]: Stopping User Runtime Directory /run/user/0...
Oct 11 01:28:34 compute-0 systemd[1]: run-user-0.mount: Deactivated successfully.
Oct 11 01:28:34 compute-0 systemd[1]: user-runtime-dir@0.service: Deactivated successfully.
Oct 11 01:28:34 compute-0 systemd[1]: Stopped User Runtime Directory /run/user/0.
Oct 11 01:28:34 compute-0 systemd[1]: Removed slice User Slice of UID 0.
Oct 11 01:28:34 compute-0 python3.9[89124]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:28:35 compute-0 sudo[89281]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-amahkfjpfagmsagrinwpgioqdrxwxyfp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146115.1662114-34-108558649610189/AnsiballZ_command.py'
Oct 11 01:28:35 compute-0 sudo[89281]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:36 compute-0 python3.9[89283]: ansible-ansible.legacy.command Invoked with _raw_params=podman ps -a --filter name=^nova_virtlogd$ --format \{\{.Names\}\} _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:28:36 compute-0 sudo[89281]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:37 compute-0 sudo[89446]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hkmooskcfyhaaiampvanmmmiuprvduyr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146116.5586967-45-147682436048172/AnsiballZ_systemd_service.py'
Oct 11 01:28:37 compute-0 sudo[89446]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:37 compute-0 python3.9[89448]: ansible-ansible.builtin.systemd_service Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 01:28:37 compute-0 systemd[1]: Reloading.
Oct 11 01:28:37 compute-0 systemd-rc-local-generator[89474]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:28:37 compute-0 systemd-sysv-generator[89479]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:28:37 compute-0 sudo[89446]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:38 compute-0 python3.9[89634]: ansible-ansible.builtin.service_facts Invoked
Oct 11 01:28:38 compute-0 network[89651]: You are using 'network' service provided by 'network-scripts', which are now deprecated.
Oct 11 01:28:38 compute-0 network[89652]: 'network-scripts' will be removed from distribution in near future.
Oct 11 01:28:38 compute-0 network[89653]: It is advised to switch to 'NetworkManager' instead for network management.
Oct 11 01:28:43 compute-0 sudo[89916]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-awxbtdjaqbsmgrbiolvchmmytdtbjzkv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146123.1226022-64-256651052178490/AnsiballZ_systemd_service.py'
Oct 11 01:28:43 compute-0 sudo[89916]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:43 compute-0 python3.9[89918]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_libvirt.target state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:28:43 compute-0 sudo[89916]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:44 compute-0 sudo[90069]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-aripsjobekeqsxxfbfmjyzivpdzouuwc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146124.094566-64-141509403518086/AnsiballZ_systemd_service.py'
Oct 11 01:28:44 compute-0 sudo[90069]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:44 compute-0 python3.9[90071]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_virtlogd_wrapper.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:28:44 compute-0 sudo[90069]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:45 compute-0 sudo[90222]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tfkmtmqegftfeqleqoxscswtcabqmzzv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146125.164167-64-127143090227342/AnsiballZ_systemd_service.py'
Oct 11 01:28:45 compute-0 sudo[90222]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:45 compute-0 python3.9[90224]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_virtnodedevd.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:28:45 compute-0 sudo[90222]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:46 compute-0 sudo[90375]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pfesgxvvrhlpnxyzhvpvctewqmyapszi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146126.1515603-64-125589824060928/AnsiballZ_systemd_service.py'
Oct 11 01:28:46 compute-0 sudo[90375]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:46 compute-0 python3.9[90377]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_virtproxyd.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:28:46 compute-0 sudo[90375]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:47 compute-0 sudo[90528]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oxbuueecrkylstgcqnlcjqbxfxvumkod ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146127.116418-64-26475666873015/AnsiballZ_systemd_service.py'
Oct 11 01:28:47 compute-0 sudo[90528]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:47 compute-0 python3.9[90530]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_virtqemud.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:28:47 compute-0 sudo[90528]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:48 compute-0 sudo[90681]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wrymkfmbjbjxlvwtoediwdbovovyfdxu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146128.0645149-64-115171674394867/AnsiballZ_systemd_service.py'
Oct 11 01:28:48 compute-0 sudo[90681]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:48 compute-0 python3.9[90683]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_virtsecretd.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:28:49 compute-0 sudo[90681]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:50 compute-0 sudo[90834]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nthuzyxhsuopihekuyxtobglyxyghfex ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146130.0201187-64-30880375591716/AnsiballZ_systemd_service.py'
Oct 11 01:28:50 compute-0 sudo[90834]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:50 compute-0 python3.9[90836]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_virtstoraged.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:28:50 compute-0 sudo[90834]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:51 compute-0 sudo[90987]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pkqpkvhdixtvvlgvtkvgssntwcxhgilt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146131.122904-116-85065163618928/AnsiballZ_file.py'
Oct 11 01:28:51 compute-0 sudo[90987]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:51 compute-0 python3.9[90989]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_libvirt.target state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:28:51 compute-0 sudo[90987]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:52 compute-0 sudo[91139]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nerlixxzanmvfnilfatubdmorwvcuiui ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146132.0703568-116-272863781613663/AnsiballZ_file.py'
Oct 11 01:28:52 compute-0 sudo[91139]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:52 compute-0 python3.9[91141]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_virtlogd_wrapper.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:28:52 compute-0 sudo[91139]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:53 compute-0 sudo[91291]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zurktdgdunkdhwjvyinvpxucmdrzbwxs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146132.907982-116-229076267630/AnsiballZ_file.py'
Oct 11 01:28:53 compute-0 sudo[91291]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:53 compute-0 python3.9[91293]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_virtnodedevd.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:28:53 compute-0 sudo[91291]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:54 compute-0 sudo[91454]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-izkqdlmweidhvsyotbglkdbenlntwqzi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146133.7643228-116-81502840206164/AnsiballZ_file.py'
Oct 11 01:28:54 compute-0 sudo[91454]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:54 compute-0 ovn_controller[88370]: 2025-10-11T01:28:54Z|00025|memory|INFO|16000 kB peak resident set size after 29.9 seconds
Oct 11 01:28:54 compute-0 ovn_controller[88370]: 2025-10-11T01:28:54Z|00026|memory|INFO|idl-cells-OVN_Southbound:239 idl-cells-Open_vSwitch:528 ofctrl_desired_flow_usage-KB:5 ofctrl_installed_flow_usage-KB:4 ofctrl_sb_flow_ref_usage-KB:2
Oct 11 01:28:54 compute-0 podman[91417]: 2025-10-11 01:28:54.334757878 +0000 UTC m=+0.211931066 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_id=ovn_controller, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.schema-version=1.0)
Oct 11 01:28:54 compute-0 python3.9[91462]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_virtproxyd.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:28:54 compute-0 sudo[91454]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:54 compute-0 sudo[91622]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-berejmeeuwmgoimantpsvrfogepuhuni ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146134.6177607-116-9868852130781/AnsiballZ_file.py'
Oct 11 01:28:55 compute-0 sudo[91622]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:55 compute-0 python3.9[91624]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_virtqemud.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:28:55 compute-0 sudo[91622]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:55 compute-0 sudo[91774]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bascagjefckxkfbbhnvkbccyvgdcmdsj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146135.4133308-116-189075203296844/AnsiballZ_file.py'
Oct 11 01:28:55 compute-0 sudo[91774]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:55 compute-0 python3.9[91776]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_virtsecretd.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:28:56 compute-0 sudo[91774]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:56 compute-0 sudo[91926]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gcwdqbptpxqrrmmenudyihevvzwtkllo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146136.1921682-116-49281179270697/AnsiballZ_file.py'
Oct 11 01:28:56 compute-0 sudo[91926]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:56 compute-0 python3.9[91928]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_virtstoraged.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:28:56 compute-0 sudo[91926]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:57 compute-0 sudo[92078]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dizhbcvtdnbpnxmahpspamumkvqtumhv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146137.0095956-166-248523039756531/AnsiballZ_file.py'
Oct 11 01:28:57 compute-0 sudo[92078]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:57 compute-0 python3.9[92080]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_libvirt.target state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:28:57 compute-0 sudo[92078]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:58 compute-0 sudo[92230]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-prbvftsykcxghfexrsppntlihfzgybrk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146137.8432515-166-217320395542199/AnsiballZ_file.py'
Oct 11 01:28:58 compute-0 sudo[92230]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:58 compute-0 python3.9[92232]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_virtlogd_wrapper.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:28:58 compute-0 sudo[92230]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:59 compute-0 sudo[92382]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tmbpokefsgzzbvjarnjftmltfifwrjqz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146138.642525-166-97646986485840/AnsiballZ_file.py'
Oct 11 01:28:59 compute-0 sudo[92382]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:28:59 compute-0 python3.9[92384]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_virtnodedevd.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:28:59 compute-0 sudo[92382]: pam_unix(sudo:session): session closed for user root
Oct 11 01:28:59 compute-0 sudo[92534]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-olzqmsynxyhkoxljchxewygitgdafgbg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146139.4640803-166-118656636663157/AnsiballZ_file.py'
Oct 11 01:28:59 compute-0 sudo[92534]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:29:00 compute-0 python3.9[92536]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_virtproxyd.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:29:00 compute-0 sudo[92534]: pam_unix(sudo:session): session closed for user root
Oct 11 01:29:00 compute-0 sudo[92686]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ygwloeqysxibabgjwfthefgfygywmwme ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146140.3018544-166-100507502343555/AnsiballZ_file.py'
Oct 11 01:29:00 compute-0 sudo[92686]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:29:00 compute-0 python3.9[92688]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_virtqemud.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:29:00 compute-0 sudo[92686]: pam_unix(sudo:session): session closed for user root
Oct 11 01:29:01 compute-0 sudo[92838]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vfjhapsgfsqikcfbvraembudortnvbau ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146141.1673806-166-275086189786730/AnsiballZ_file.py'
Oct 11 01:29:01 compute-0 sudo[92838]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:29:01 compute-0 python3.9[92840]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_virtsecretd.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:29:01 compute-0 sudo[92838]: pam_unix(sudo:session): session closed for user root
Oct 11 01:29:02 compute-0 sudo[92990]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zfpmpeoeoouyjynpuvcaglyqwsjapwch ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146141.9985864-166-234437634146865/AnsiballZ_file.py'
Oct 11 01:29:02 compute-0 sudo[92990]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:29:02 compute-0 python3.9[92992]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_virtstoraged.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:29:02 compute-0 sudo[92990]: pam_unix(sudo:session): session closed for user root
Oct 11 01:29:03 compute-0 sudo[93142]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cgsjupchcmsbuinsxhrocfbgtauglcsy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146142.9744158-217-77658738093882/AnsiballZ_command.py'
Oct 11 01:29:03 compute-0 sudo[93142]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:29:03 compute-0 python3.9[93144]: ansible-ansible.legacy.command Invoked with _raw_params=if systemctl is-active certmonger.service; then
                                              systemctl disable --now certmonger.service
                                              test -f /etc/systemd/system/certmonger.service || systemctl mask certmonger.service
                                            fi
                                             _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:29:03 compute-0 sudo[93142]: pam_unix(sudo:session): session closed for user root
Oct 11 01:29:04 compute-0 python3.9[93296]: ansible-ansible.builtin.find Invoked with file_type=any hidden=True paths=['/var/lib/certmonger/requests'] patterns=[] read_whole_file=False age_stamp=mtime recurse=False follow=False get_checksum=False checksum_algorithm=sha1 use_regex=False exact_mode=True excludes=None contains=None age=None size=None depth=None mode=None encoding=None limit=None
Oct 11 01:29:05 compute-0 sudo[93446]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pazqxafbxirnovqzlltfypmhskyjnvid ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146144.947662-235-47441170728392/AnsiballZ_systemd_service.py'
Oct 11 01:29:05 compute-0 sudo[93446]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:29:05 compute-0 python3.9[93448]: ansible-ansible.builtin.systemd_service Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 01:29:05 compute-0 systemd[1]: Reloading.
Oct 11 01:29:05 compute-0 systemd-sysv-generator[93479]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:29:05 compute-0 systemd-rc-local-generator[93474]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:29:06 compute-0 sudo[93446]: pam_unix(sudo:session): session closed for user root
Oct 11 01:29:06 compute-0 sudo[93633]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oufonrsjijxebzjcgxoplvtfhihsusjo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146146.2582908-243-214543356677608/AnsiballZ_command.py'
Oct 11 01:29:06 compute-0 sudo[93633]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:29:06 compute-0 python3.9[93635]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_libvirt.target _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:29:06 compute-0 sudo[93633]: pam_unix(sudo:session): session closed for user root
Oct 11 01:29:07 compute-0 sudo[93786]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lphbdencvdaqsadekfpozmtkyuahqixd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146147.078563-243-200716735173221/AnsiballZ_command.py'
Oct 11 01:29:07 compute-0 sudo[93786]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:29:07 compute-0 python3.9[93788]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_virtlogd_wrapper.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:29:07 compute-0 sudo[93786]: pam_unix(sudo:session): session closed for user root
Oct 11 01:29:08 compute-0 sudo[93939]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-otyajagrbetzrhfgtkayexnxfywdvpyd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146147.9340892-243-145397819253970/AnsiballZ_command.py'
Oct 11 01:29:08 compute-0 sudo[93939]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:29:08 compute-0 python3.9[93941]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_virtnodedevd.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:29:08 compute-0 sudo[93939]: pam_unix(sudo:session): session closed for user root
Oct 11 01:29:09 compute-0 sudo[94092]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fyxwsollmpdrqkbxebyxcqnuabiotxug ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146148.790014-243-33972672295168/AnsiballZ_command.py'
Oct 11 01:29:09 compute-0 sudo[94092]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:29:09 compute-0 python3.9[94094]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_virtproxyd.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:29:09 compute-0 sudo[94092]: pam_unix(sudo:session): session closed for user root
Oct 11 01:29:10 compute-0 sudo[94245]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dveznzxjwiyzcjijarniugmplsgtsrkm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146149.674003-243-245795414366561/AnsiballZ_command.py'
Oct 11 01:29:10 compute-0 sudo[94245]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:29:10 compute-0 python3.9[94247]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_virtqemud.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:29:10 compute-0 sudo[94245]: pam_unix(sudo:session): session closed for user root
Oct 11 01:29:10 compute-0 sudo[94398]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cgfqxelofaztmzelwpixzetuhbwbjfob ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146150.51622-243-13934647057293/AnsiballZ_command.py'
Oct 11 01:29:10 compute-0 sudo[94398]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:29:11 compute-0 python3.9[94400]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_virtsecretd.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:29:11 compute-0 sudo[94398]: pam_unix(sudo:session): session closed for user root
Oct 11 01:29:11 compute-0 sudo[94551]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vdwvwijzblmuzmnfsftwljhfauqturyl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146151.3975716-243-183719572871426/AnsiballZ_command.py'
Oct 11 01:29:11 compute-0 sudo[94551]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:29:12 compute-0 python3.9[94553]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_virtstoraged.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:29:12 compute-0 sudo[94551]: pam_unix(sudo:session): session closed for user root
Oct 11 01:29:13 compute-0 sudo[94704]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-osqixmyuntrergjkgtjtbbwgylgdkkhm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146152.6358378-297-131427637510820/AnsiballZ_getent.py'
Oct 11 01:29:13 compute-0 sudo[94704]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:29:13 compute-0 python3.9[94706]: ansible-ansible.builtin.getent Invoked with database=passwd key=libvirt fail_key=True service=None split=None
Oct 11 01:29:13 compute-0 sudo[94704]: pam_unix(sudo:session): session closed for user root
Oct 11 01:29:14 compute-0 sudo[94857]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vlhbpiyqjneluwepggusuyakbfigzzuw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146153.683915-305-131105833235102/AnsiballZ_group.py'
Oct 11 01:29:14 compute-0 sudo[94857]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:29:14 compute-0 python3.9[94859]: ansible-ansible.builtin.group Invoked with gid=42473 name=libvirt state=present force=False system=False local=False non_unique=False gid_min=None gid_max=None
Oct 11 01:29:14 compute-0 groupadd[94860]: group added to /etc/group: name=libvirt, GID=42473
Oct 11 01:29:14 compute-0 groupadd[94860]: group added to /etc/gshadow: name=libvirt
Oct 11 01:29:14 compute-0 groupadd[94860]: new group: name=libvirt, GID=42473
Oct 11 01:29:14 compute-0 sudo[94857]: pam_unix(sudo:session): session closed for user root
Oct 11 01:29:15 compute-0 sudo[95015]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-daqskmvucnznzwqyfbvmegkgmwliwaut ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146154.807963-313-166182491126867/AnsiballZ_user.py'
Oct 11 01:29:15 compute-0 sudo[95015]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:29:15 compute-0 python3.9[95017]: ansible-ansible.builtin.user Invoked with comment=libvirt user group=libvirt groups=[''] name=libvirt shell=/sbin/nologin state=present uid=42473 non_unique=False force=False remove=False create_home=True system=False move_home=False append=False ssh_key_bits=0 ssh_key_type=rsa ssh_key_comment=ansible-generated on compute-0 update_password=always home=None password=NOT_LOGGING_PARAMETER login_class=None password_expire_max=None password_expire_min=None password_expire_warn=None hidden=None seuser=None skeleton=None generate_ssh_key=None ssh_key_file=None ssh_key_passphrase=NOT_LOGGING_PARAMETER expires=None password_lock=None local=None profile=None authorization=None role=None umask=None password_expire_account_disable=None uid_min=None uid_max=None
Oct 11 01:29:15 compute-0 useradd[95019]: new user: name=libvirt, UID=42473, GID=42473, home=/home/libvirt, shell=/sbin/nologin, from=/dev/pts/0
Oct 11 01:29:15 compute-0 sudo[95015]: pam_unix(sudo:session): session closed for user root
Oct 11 01:29:16 compute-0 sudo[95175]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yhmqycchherfnpvbctoifommjbyntfqw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146156.1305993-324-217116653747421/AnsiballZ_setup.py'
Oct 11 01:29:16 compute-0 sudo[95175]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:29:16 compute-0 python3.9[95177]: ansible-ansible.legacy.setup Invoked with filter=['ansible_pkg_mgr'] gather_subset=['!all'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:29:17 compute-0 sudo[95175]: pam_unix(sudo:session): session closed for user root
Oct 11 01:29:17 compute-0 sudo[95259]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qveqwzoiaymamxrhzlyfpqsixqoeypxq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146156.1305993-324-217116653747421/AnsiballZ_dnf.py'
Oct 11 01:29:17 compute-0 sudo[95259]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:29:17 compute-0 python3.9[95261]: ansible-ansible.legacy.dnf Invoked with name=['libvirt ', 'libvirt-admin ', 'libvirt-client ', 'libvirt-daemon ', 'qemu-kvm', 'qemu-img', 'libguestfs', 'libseccomp', 'swtpm', 'swtpm-tools', 'edk2-ovmf', 'ceph-common', 'cyrus-sasl-scram'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:29:25 compute-0 podman[95272]: 2025-10-11 01:29:25.230312087 +0000 UTC m=+0.118564298 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_id=ovn_controller, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']})
Oct 11 01:29:46 compute-0 kernel: SELinux:  Converting 2752 SID table entries...
Oct 11 01:29:46 compute-0 kernel: SELinux:  policy capability network_peer_controls=1
Oct 11 01:29:46 compute-0 kernel: SELinux:  policy capability open_perms=1
Oct 11 01:29:46 compute-0 kernel: SELinux:  policy capability extended_socket_class=1
Oct 11 01:29:46 compute-0 kernel: SELinux:  policy capability always_check_network=0
Oct 11 01:29:46 compute-0 kernel: SELinux:  policy capability cgroup_seclabel=1
Oct 11 01:29:46 compute-0 kernel: SELinux:  policy capability nnp_nosuid_transition=1
Oct 11 01:29:46 compute-0 kernel: SELinux:  policy capability genfs_seclabel_symlinks=1
Oct 11 01:29:55 compute-0 kernel: SELinux:  Converting 2752 SID table entries...
Oct 11 01:29:55 compute-0 kernel: SELinux:  policy capability network_peer_controls=1
Oct 11 01:29:55 compute-0 kernel: SELinux:  policy capability open_perms=1
Oct 11 01:29:55 compute-0 kernel: SELinux:  policy capability extended_socket_class=1
Oct 11 01:29:55 compute-0 kernel: SELinux:  policy capability always_check_network=0
Oct 11 01:29:55 compute-0 kernel: SELinux:  policy capability cgroup_seclabel=1
Oct 11 01:29:55 compute-0 kernel: SELinux:  policy capability nnp_nosuid_transition=1
Oct 11 01:29:55 compute-0 kernel: SELinux:  policy capability genfs_seclabel_symlinks=1
Oct 11 01:29:56 compute-0 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=13 res=1
Oct 11 01:29:56 compute-0 podman[95493]: 2025-10-11 01:29:56.271945243 +0000 UTC m=+0.150024045 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=ovn_controller, io.buildah.version=1.41.3, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:30:27 compute-0 podman[103739]: 2025-10-11 01:30:27.228290044 +0000 UTC m=+0.121556821 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, container_name=ovn_controller)
Oct 11 01:30:58 compute-0 podman[112294]: 2025-10-11 01:30:58.249957291 +0000 UTC m=+0.152173633 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, container_name=ovn_controller, io.buildah.version=1.41.3, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 01:30:59 compute-0 kernel: SELinux:  Converting 2753 SID table entries...
Oct 11 01:30:59 compute-0 kernel: SELinux:  policy capability network_peer_controls=1
Oct 11 01:30:59 compute-0 kernel: SELinux:  policy capability open_perms=1
Oct 11 01:30:59 compute-0 kernel: SELinux:  policy capability extended_socket_class=1
Oct 11 01:30:59 compute-0 kernel: SELinux:  policy capability always_check_network=0
Oct 11 01:30:59 compute-0 kernel: SELinux:  policy capability cgroup_seclabel=1
Oct 11 01:30:59 compute-0 kernel: SELinux:  policy capability nnp_nosuid_transition=1
Oct 11 01:30:59 compute-0 kernel: SELinux:  policy capability genfs_seclabel_symlinks=1
Oct 11 01:31:00 compute-0 groupadd[112333]: group added to /etc/group: name=dnsmasq, GID=992
Oct 11 01:31:00 compute-0 groupadd[112333]: group added to /etc/gshadow: name=dnsmasq
Oct 11 01:31:00 compute-0 groupadd[112333]: new group: name=dnsmasq, GID=992
Oct 11 01:31:00 compute-0 useradd[112340]: new user: name=dnsmasq, UID=992, GID=992, home=/var/lib/dnsmasq, shell=/usr/sbin/nologin, from=none
Oct 11 01:31:00 compute-0 dbus-broker-launch[764]: Noticed file-system modification, trigger reload.
Oct 11 01:31:00 compute-0 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=14 res=1
Oct 11 01:31:00 compute-0 dbus-broker-launch[764]: Noticed file-system modification, trigger reload.
Oct 11 01:31:01 compute-0 groupadd[112353]: group added to /etc/group: name=clevis, GID=991
Oct 11 01:31:01 compute-0 groupadd[112353]: group added to /etc/gshadow: name=clevis
Oct 11 01:31:01 compute-0 groupadd[112353]: new group: name=clevis, GID=991
Oct 11 01:31:01 compute-0 useradd[112360]: new user: name=clevis, UID=991, GID=991, home=/var/cache/clevis, shell=/usr/sbin/nologin, from=none
Oct 11 01:31:02 compute-0 usermod[112370]: add 'clevis' to group 'tss'
Oct 11 01:31:02 compute-0 usermod[112370]: add 'clevis' to shadow group 'tss'
Oct 11 01:31:04 compute-0 polkitd[6240]: Reloading rules
Oct 11 01:31:04 compute-0 polkitd[6240]: Collecting garbage unconditionally...
Oct 11 01:31:04 compute-0 polkitd[6240]: Loading rules from directory /etc/polkit-1/rules.d
Oct 11 01:31:04 compute-0 polkitd[6240]: Loading rules from directory /usr/share/polkit-1/rules.d
Oct 11 01:31:04 compute-0 polkitd[6240]: Finished loading, compiling and executing 4 rules
Oct 11 01:31:04 compute-0 polkitd[6240]: Reloading rules
Oct 11 01:31:04 compute-0 polkitd[6240]: Collecting garbage unconditionally...
Oct 11 01:31:04 compute-0 polkitd[6240]: Loading rules from directory /etc/polkit-1/rules.d
Oct 11 01:31:04 compute-0 polkitd[6240]: Loading rules from directory /usr/share/polkit-1/rules.d
Oct 11 01:31:04 compute-0 polkitd[6240]: Finished loading, compiling and executing 4 rules
Oct 11 01:31:06 compute-0 groupadd[112557]: group added to /etc/group: name=ceph, GID=167
Oct 11 01:31:06 compute-0 groupadd[112557]: group added to /etc/gshadow: name=ceph
Oct 11 01:31:06 compute-0 groupadd[112557]: new group: name=ceph, GID=167
Oct 11 01:31:06 compute-0 useradd[112563]: new user: name=ceph, UID=167, GID=167, home=/var/lib/ceph, shell=/sbin/nologin, from=none
Oct 11 01:31:09 compute-0 systemd[1]: Stopping OpenSSH server daemon...
Oct 11 01:31:09 compute-0 sshd[999]: Received signal 15; terminating.
Oct 11 01:31:09 compute-0 systemd[1]: sshd.service: Deactivated successfully.
Oct 11 01:31:09 compute-0 systemd[1]: Stopped OpenSSH server daemon.
Oct 11 01:31:09 compute-0 systemd[1]: sshd.service: Consumed 1.827s CPU time, read 532.0K from disk, written 4.0K to disk.
Oct 11 01:31:09 compute-0 systemd[1]: Stopped target sshd-keygen.target.
Oct 11 01:31:09 compute-0 systemd[1]: Stopping sshd-keygen.target...
Oct 11 01:31:09 compute-0 systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Oct 11 01:31:09 compute-0 systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Oct 11 01:31:09 compute-0 systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Oct 11 01:31:09 compute-0 systemd[1]: Reached target sshd-keygen.target.
Oct 11 01:31:09 compute-0 systemd[1]: Starting OpenSSH server daemon...
Oct 11 01:31:09 compute-0 sshd[113062]: Server listening on 0.0.0.0 port 22.
Oct 11 01:31:09 compute-0 sshd[113062]: Server listening on :: port 22.
Oct 11 01:31:09 compute-0 systemd[1]: Started OpenSSH server daemon.
Oct 11 01:31:12 compute-0 systemd[1]: Started /usr/bin/systemctl start man-db-cache-update.
Oct 11 01:31:12 compute-0 systemd[1]: Starting man-db-cache-update.service...
Oct 11 01:31:12 compute-0 systemd[1]: Reloading.
Oct 11 01:31:12 compute-0 systemd-rc-local-generator[113320]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:31:12 compute-0 systemd-sysv-generator[113323]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:31:12 compute-0 systemd[1]: Queuing reload/restart jobs for marked units…
Oct 11 01:31:14 compute-0 systemd[1]: Starting PackageKit Daemon...
Oct 11 01:31:14 compute-0 PackageKit[114970]: daemon start
Oct 11 01:31:14 compute-0 systemd[1]: Started PackageKit Daemon.
Oct 11 01:31:15 compute-0 sudo[95259]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:16 compute-0 sudo[116279]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qiemodvflvphteokwepkslzuxdyacrnb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146275.2953677-336-35809337512132/AnsiballZ_systemd.py'
Oct 11 01:31:16 compute-0 sudo[116279]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:16 compute-0 python3.9[116313]: ansible-ansible.builtin.systemd Invoked with enabled=False masked=True name=libvirtd state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None
Oct 11 01:31:16 compute-0 systemd[1]: Reloading.
Oct 11 01:31:16 compute-0 systemd-sysv-generator[116709]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:31:16 compute-0 systemd-rc-local-generator[116704]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:31:16 compute-0 sudo[116279]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:17 compute-0 sudo[117435]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kzefghhbpqnhhjeqzlgsxjgphmpltrzg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146277.0110683-336-50252296492367/AnsiballZ_systemd.py'
Oct 11 01:31:17 compute-0 sudo[117435]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:17 compute-0 python3.9[117452]: ansible-ansible.builtin.systemd Invoked with enabled=False masked=True name=libvirtd-tcp.socket state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None
Oct 11 01:31:17 compute-0 systemd[1]: Reloading.
Oct 11 01:31:17 compute-0 systemd-rc-local-generator[117836]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:31:17 compute-0 systemd-sysv-generator[117840]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:31:18 compute-0 sudo[117435]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:18 compute-0 sudo[118483]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vxiupfojwxpiyatxwbyajgkdlfjxxohd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146278.350924-336-80616039101611/AnsiballZ_systemd.py'
Oct 11 01:31:18 compute-0 sudo[118483]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:19 compute-0 python3.9[118502]: ansible-ansible.builtin.systemd Invoked with enabled=False masked=True name=libvirtd-tls.socket state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None
Oct 11 01:31:19 compute-0 systemd[1]: Reloading.
Oct 11 01:31:19 compute-0 systemd-rc-local-generator[118838]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:31:19 compute-0 systemd-sysv-generator[118848]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:31:19 compute-0 sudo[118483]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:20 compute-0 sudo[119522]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xfbnvvsovgbemgkyknbqrpbdswbfuaur ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146279.707281-336-88455752440043/AnsiballZ_systemd.py'
Oct 11 01:31:20 compute-0 sudo[119522]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:20 compute-0 python3.9[119541]: ansible-ansible.builtin.systemd Invoked with enabled=False masked=True name=virtproxyd-tcp.socket state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None
Oct 11 01:31:20 compute-0 systemd[1]: Reloading.
Oct 11 01:31:20 compute-0 systemd-rc-local-generator[119865]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:31:20 compute-0 systemd-sysv-generator[119868]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:31:20 compute-0 sudo[119522]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:21 compute-0 sudo[120607]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-naraapxjlaikyzszmatkcyjvsyavpwvo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146281.1375213-365-227643766972463/AnsiballZ_systemd.py'
Oct 11 01:31:21 compute-0 sudo[120607]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:21 compute-0 python3.9[120629]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtlogd.service daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:31:21 compute-0 systemd[1]: Reloading.
Oct 11 01:31:22 compute-0 systemd-rc-local-generator[121044]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:31:22 compute-0 systemd-sysv-generator[121048]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:31:22 compute-0 sudo[120607]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:22 compute-0 sudo[121732]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xbzvrwguplknyeldaswdqqkjyorniglt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146282.4759643-365-151648879595338/AnsiballZ_systemd.py'
Oct 11 01:31:22 compute-0 sudo[121732]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:23 compute-0 python3.9[121758]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtnodedevd.service daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:31:23 compute-0 systemd[1]: Reloading.
Oct 11 01:31:23 compute-0 systemd-rc-local-generator[122122]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:31:23 compute-0 systemd-sysv-generator[122125]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:31:23 compute-0 sudo[121732]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:24 compute-0 sudo[122761]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nuffjzcodtgrajyapdnsxccmoqinsubm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146283.734273-365-132175788005498/AnsiballZ_systemd.py'
Oct 11 01:31:24 compute-0 sudo[122761]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:24 compute-0 systemd[1]: man-db-cache-update.service: Deactivated successfully.
Oct 11 01:31:24 compute-0 systemd[1]: Finished man-db-cache-update.service.
Oct 11 01:31:24 compute-0 systemd[1]: man-db-cache-update.service: Consumed 15.250s CPU time.
Oct 11 01:31:24 compute-0 python3.9[122778]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtproxyd.service daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:31:24 compute-0 systemd[1]: run-r8567e8e7fbb7485d9ae01c8d241a86f9.service: Deactivated successfully.
Oct 11 01:31:24 compute-0 systemd[1]: Reloading.
Oct 11 01:31:24 compute-0 systemd-rc-local-generator[122886]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:31:24 compute-0 systemd-sysv-generator[122889]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:31:24 compute-0 sudo[122761]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:25 compute-0 sudo[123042]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lvblyuplajfmhdqqlfqsmirchtpcoruk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146285.048641-365-126990613460604/AnsiballZ_systemd.py'
Oct 11 01:31:25 compute-0 sudo[123042]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:25 compute-0 python3.9[123044]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtqemud.service daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:31:25 compute-0 sudo[123042]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:26 compute-0 sudo[123197]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cvfqdzoylocokoizhhrzqygonxohacrs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146286.0988379-365-157824671958305/AnsiballZ_systemd.py'
Oct 11 01:31:26 compute-0 sudo[123197]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:26 compute-0 python3.9[123199]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtsecretd.service daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:31:26 compute-0 systemd[1]: Reloading.
Oct 11 01:31:26 compute-0 systemd-rc-local-generator[123228]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:31:26 compute-0 systemd-sysv-generator[123233]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:31:27 compute-0 sudo[123197]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:27 compute-0 sudo[123387]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dkxveipyrvbqvtgzhyqsccygdzpvigho ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146287.4437-401-60558769142834/AnsiballZ_systemd.py'
Oct 11 01:31:27 compute-0 sudo[123387]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:28 compute-0 python3.9[123389]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtproxyd-tls.socket state=started daemon_reload=False daemon_reexec=False scope=system no_block=False force=None
Oct 11 01:31:29 compute-0 podman[123391]: 2025-10-11 01:31:29.261284443 +0000 UTC m=+0.158390922 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 01:31:29 compute-0 systemd[1]: Reloading.
Oct 11 01:31:29 compute-0 systemd-rc-local-generator[123445]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:31:29 compute-0 systemd-sysv-generator[123450]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:31:29 compute-0 systemd[1]: Listening on libvirt proxy daemon socket.
Oct 11 01:31:29 compute-0 systemd[1]: Listening on libvirt proxy daemon TLS IP socket.
Oct 11 01:31:29 compute-0 sudo[123387]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:30 compute-0 sudo[123607]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fcgoukkzgerswqxywgksbhzpwzvmosyh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146289.94051-409-179034664898756/AnsiballZ_systemd.py'
Oct 11 01:31:30 compute-0 sudo[123607]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:30 compute-0 python3.9[123609]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtlogd.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:31:30 compute-0 sudo[123607]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:31 compute-0 sudo[123762]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gcvmhnqnsznltbtcltndsbpfgxcsbdkk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146290.97839-409-17804459478585/AnsiballZ_systemd.py'
Oct 11 01:31:31 compute-0 sudo[123762]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:31 compute-0 python3.9[123764]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtlogd-admin.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:31:31 compute-0 sudo[123762]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:32 compute-0 sudo[123917]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rcvjdekxfkqpxufzeeswsbqenirgsqoj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146292.0163963-409-63484215749774/AnsiballZ_systemd.py'
Oct 11 01:31:32 compute-0 sudo[123917]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:32 compute-0 python3.9[123919]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtnodedevd.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:31:32 compute-0 sudo[123917]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:33 compute-0 sudo[124072]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fbenlmxabrpiuoquducgheiyzgbtbwpa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146293.2134123-409-29653310153982/AnsiballZ_systemd.py'
Oct 11 01:31:33 compute-0 sudo[124072]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:33 compute-0 python3.9[124074]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtnodedevd-ro.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:31:35 compute-0 sudo[124072]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:35 compute-0 sudo[124227]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vkrzhqkdwfgervfkgkvwhrxjzrjyitkn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146295.222292-409-85701011087505/AnsiballZ_systemd.py'
Oct 11 01:31:35 compute-0 sudo[124227]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:35 compute-0 python3.9[124229]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtnodedevd-admin.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:31:36 compute-0 sudo[124227]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:36 compute-0 sudo[124382]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-acurarxovnzrpaqxzjaufouiifltwphl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146296.2257044-409-93929162040223/AnsiballZ_systemd.py'
Oct 11 01:31:36 compute-0 sudo[124382]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:36 compute-0 python3.9[124384]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtproxyd.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:31:37 compute-0 sudo[124382]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:37 compute-0 sudo[124537]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fleuvqhkwzifoktikcxcobpughufmhan ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146297.237801-409-71671049947304/AnsiballZ_systemd.py'
Oct 11 01:31:37 compute-0 sudo[124537]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:37 compute-0 python3.9[124539]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtproxyd-ro.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:31:38 compute-0 sudo[124537]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:38 compute-0 sudo[124692]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kngsixhqfyfqwcrtngrptjahtmqqdeen ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146298.2369413-409-140989857810614/AnsiballZ_systemd.py'
Oct 11 01:31:38 compute-0 sudo[124692]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:38 compute-0 python3.9[124694]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtproxyd-admin.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:31:39 compute-0 sudo[124692]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:39 compute-0 sudo[124847]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ndlakmmitwmolqdeldiearqwsyatdcka ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146299.287107-409-224242296575622/AnsiballZ_systemd.py'
Oct 11 01:31:39 compute-0 sudo[124847]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:40 compute-0 python3.9[124849]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtqemud.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:31:40 compute-0 sudo[124847]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:40 compute-0 sudo[125002]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ydkcnmsysloauhitxmrxjqxjeqtfvntk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146300.2895005-409-10523774595670/AnsiballZ_systemd.py'
Oct 11 01:31:40 compute-0 sudo[125002]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:40 compute-0 python3.9[125004]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtqemud-ro.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:31:41 compute-0 sudo[125002]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:41 compute-0 sudo[125157]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pgklnxkohyorquduoeblomsufnsrdkbe ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146301.433276-409-18762732871889/AnsiballZ_systemd.py'
Oct 11 01:31:41 compute-0 sudo[125157]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:42 compute-0 python3.9[125159]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtqemud-admin.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:31:42 compute-0 sudo[125157]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:42 compute-0 sudo[125312]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wwjkmcruwshtetekybxtqxuxvqbvzlac ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146302.449057-409-278216333381412/AnsiballZ_systemd.py'
Oct 11 01:31:42 compute-0 sudo[125312]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:43 compute-0 python3.9[125314]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtsecretd.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:31:43 compute-0 sudo[125312]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:43 compute-0 sudo[125467]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-drxyjuxoxmtfsfuxwujyviufuinachnd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146303.5146868-409-250467460948269/AnsiballZ_systemd.py'
Oct 11 01:31:43 compute-0 sudo[125467]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:44 compute-0 python3.9[125469]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtsecretd-ro.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:31:45 compute-0 sudo[125467]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:46 compute-0 sudo[125622]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sfpnxfuebfatskiuvpgbgjjtxzejsrwe ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146305.7209883-409-181364509200517/AnsiballZ_systemd.py'
Oct 11 01:31:46 compute-0 sudo[125622]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:46 compute-0 python3.9[125624]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtsecretd-admin.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:31:46 compute-0 sudo[125622]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:47 compute-0 sudo[125777]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zqktghikhcwknvymysvagvhjfbaleozt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146307.042635-511-144744616502595/AnsiballZ_file.py'
Oct 11 01:31:47 compute-0 sudo[125777]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:47 compute-0 python3.9[125779]: ansible-ansible.builtin.file Invoked with group=root owner=root path=/etc/tmpfiles.d/ setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:31:47 compute-0 sudo[125777]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:48 compute-0 sudo[125929]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lxarzdbuybhccdzubmsmoigyzhoydgkt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146307.9136176-511-223814832972413/AnsiballZ_file.py'
Oct 11 01:31:48 compute-0 sudo[125929]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:48 compute-0 python3.9[125931]: ansible-ansible.builtin.file Invoked with group=root owner=root path=/var/lib/edpm-config/firewall setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:31:48 compute-0 sudo[125929]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:49 compute-0 sudo[126081]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wixowmchasqetozmexgkjlngkwczctbt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146308.7760127-511-45943359546785/AnsiballZ_file.py'
Oct 11 01:31:49 compute-0 sudo[126081]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:49 compute-0 python3.9[126083]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/pki/libvirt setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:31:49 compute-0 sudo[126081]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:50 compute-0 sudo[126233]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wnzqaemdppprkszbanlaemzlfdsslgzz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146309.6831918-511-245164841059751/AnsiballZ_file.py'
Oct 11 01:31:50 compute-0 sudo[126233]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:50 compute-0 python3.9[126235]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/pki/libvirt/private setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:31:50 compute-0 sudo[126233]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:50 compute-0 sudo[126385]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fvgcrgilmeuvbbmeduumvfghncptnpai ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146310.5657358-511-101824354265041/AnsiballZ_file.py'
Oct 11 01:31:50 compute-0 sudo[126385]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:51 compute-0 python3.9[126387]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/pki/CA setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:31:51 compute-0 sudo[126385]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:51 compute-0 sudo[126537]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oxzplsjzigrkuukhcmswvsuysgwjpzlj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146311.36414-511-197892967760973/AnsiballZ_file.py'
Oct 11 01:31:51 compute-0 sudo[126537]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:51 compute-0 python3.9[126539]: ansible-ansible.builtin.file Invoked with group=qemu owner=root path=/etc/pki/qemu setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:31:51 compute-0 sudo[126537]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:52 compute-0 sudo[126689]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nlqxuklyhwfvwazusksfqlyxsbazeoas ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146312.2436416-554-198802435042631/AnsiballZ_stat.py'
Oct 11 01:31:52 compute-0 sudo[126689]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:53 compute-0 python3.9[126691]: ansible-ansible.legacy.stat Invoked with path=/etc/libvirt/virtlogd.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:31:53 compute-0 sudo[126689]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:53 compute-0 sudo[126814]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pwtouzxkebwxzchmbpteakjhudwfcdyl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146312.2436416-554-198802435042631/AnsiballZ_copy.py'
Oct 11 01:31:53 compute-0 sudo[126814]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:53 compute-0 python3.9[126816]: ansible-ansible.legacy.copy Invoked with dest=/etc/libvirt/virtlogd.conf group=libvirt mode=0640 owner=libvirt src=/home/zuul/.ansible/tmp/ansible-tmp-1760146312.2436416-554-198802435042631/.source.conf follow=False _original_basename=virtlogd.conf checksum=d7a72ae92c2c205983b029473e05a6aa4c58ec24 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:31:54 compute-0 sudo[126814]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:54 compute-0 sudo[126966]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nwbsakgzuhfhqjzrdbqofuwkgrkzmeoo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146314.2297719-554-37271985214792/AnsiballZ_stat.py'
Oct 11 01:31:54 compute-0 sudo[126966]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:54 compute-0 python3.9[126968]: ansible-ansible.legacy.stat Invoked with path=/etc/libvirt/virtnodedevd.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:31:54 compute-0 sudo[126966]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:55 compute-0 sudo[127091]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xlilgykpbcdzmpcllemndwpxyllyudil ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146314.2297719-554-37271985214792/AnsiballZ_copy.py'
Oct 11 01:31:55 compute-0 sudo[127091]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:55 compute-0 python3.9[127093]: ansible-ansible.legacy.copy Invoked with dest=/etc/libvirt/virtnodedevd.conf group=libvirt mode=0640 owner=libvirt src=/home/zuul/.ansible/tmp/ansible-tmp-1760146314.2297719-554-37271985214792/.source.conf follow=False _original_basename=virtnodedevd.conf checksum=7a604468adb2868f1ab6ebd0fd4622286e6373e2 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:31:55 compute-0 sudo[127091]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:56 compute-0 sudo[127243]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ljbvghdvxrufuchvlfanonczvuwvlctq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146315.946141-554-269532888772996/AnsiballZ_stat.py'
Oct 11 01:31:56 compute-0 sudo[127243]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:56 compute-0 python3.9[127245]: ansible-ansible.legacy.stat Invoked with path=/etc/libvirt/virtproxyd.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:31:56 compute-0 sudo[127243]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:57 compute-0 sudo[127368]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vvloovvvonpktfmidbkzaqzbgjuqmirs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146315.946141-554-269532888772996/AnsiballZ_copy.py'
Oct 11 01:31:57 compute-0 sudo[127368]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:57 compute-0 python3.9[127370]: ansible-ansible.legacy.copy Invoked with dest=/etc/libvirt/virtproxyd.conf group=libvirt mode=0640 owner=libvirt src=/home/zuul/.ansible/tmp/ansible-tmp-1760146315.946141-554-269532888772996/.source.conf follow=False _original_basename=virtproxyd.conf checksum=28bc484b7c9988e03de49d4fcc0a088ea975f716 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:31:57 compute-0 sudo[127368]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:57 compute-0 sudo[127520]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lurqtyyysggnqqlkjxfxgsoicolbzcgx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146317.5742166-554-142386321663223/AnsiballZ_stat.py'
Oct 11 01:31:57 compute-0 sudo[127520]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:58 compute-0 python3.9[127522]: ansible-ansible.legacy.stat Invoked with path=/etc/libvirt/virtqemud.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:31:58 compute-0 sudo[127520]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:58 compute-0 sudo[127645]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uunkskoxzyxtndgynjdebmobctzlktjy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146317.5742166-554-142386321663223/AnsiballZ_copy.py'
Oct 11 01:31:58 compute-0 sudo[127645]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:58 compute-0 python3.9[127647]: ansible-ansible.legacy.copy Invoked with dest=/etc/libvirt/virtqemud.conf group=libvirt mode=0640 owner=libvirt src=/home/zuul/.ansible/tmp/ansible-tmp-1760146317.5742166-554-142386321663223/.source.conf follow=False _original_basename=virtqemud.conf checksum=7a604468adb2868f1ab6ebd0fd4622286e6373e2 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:31:58 compute-0 sudo[127645]: pam_unix(sudo:session): session closed for user root
Oct 11 01:31:59 compute-0 sudo[127797]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-twvuldlpdhdbiuzzmabvxqgdozdeaicn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146319.0815635-554-88426386303844/AnsiballZ_stat.py'
Oct 11 01:31:59 compute-0 sudo[127797]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:31:59 compute-0 python3.9[127799]: ansible-ansible.legacy.stat Invoked with path=/etc/libvirt/qemu.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:31:59 compute-0 sudo[127797]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:00 compute-0 sudo[127937]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bdxysnlwvcfoquteuvsywgpleafglzro ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146319.0815635-554-88426386303844/AnsiballZ_copy.py'
Oct 11 01:32:00 compute-0 sudo[127937]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:00 compute-0 podman[127896]: 2025-10-11 01:32:00.226017137 +0000 UTC m=+0.126106838 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3)
Oct 11 01:32:00 compute-0 python3.9[127943]: ansible-ansible.legacy.copy Invoked with dest=/etc/libvirt/qemu.conf group=libvirt mode=0640 owner=libvirt src=/home/zuul/.ansible/tmp/ansible-tmp-1760146319.0815635-554-88426386303844/.source.conf follow=False _original_basename=qemu.conf.j2 checksum=c44de21af13c90603565570f09ff60c6a41ed8df backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:00 compute-0 sudo[127937]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:01 compute-0 sudo[128099]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hewxohpiyatkkjfsvuijvwiiamcjbeml ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146320.623077-554-49856457274214/AnsiballZ_stat.py'
Oct 11 01:32:01 compute-0 sudo[128099]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:01 compute-0 python3.9[128101]: ansible-ansible.legacy.stat Invoked with path=/etc/libvirt/virtsecretd.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:32:01 compute-0 sudo[128099]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:01 compute-0 sudo[128224]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ubocthfuzlwizvnjamjfnbxwqsewczwr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146320.623077-554-49856457274214/AnsiballZ_copy.py'
Oct 11 01:32:01 compute-0 sudo[128224]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:02 compute-0 python3.9[128226]: ansible-ansible.legacy.copy Invoked with dest=/etc/libvirt/virtsecretd.conf group=libvirt mode=0640 owner=libvirt src=/home/zuul/.ansible/tmp/ansible-tmp-1760146320.623077-554-49856457274214/.source.conf follow=False _original_basename=virtsecretd.conf checksum=7a604468adb2868f1ab6ebd0fd4622286e6373e2 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:02 compute-0 sudo[128224]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:02 compute-0 sudo[128376]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tsdlwtroxnbewabbbapmdzudfvbtxmgc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146322.3535478-554-20864665895515/AnsiballZ_stat.py'
Oct 11 01:32:02 compute-0 sudo[128376]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:02 compute-0 python3.9[128378]: ansible-ansible.legacy.stat Invoked with path=/etc/libvirt/auth.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:32:02 compute-0 sudo[128376]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:03 compute-0 sudo[128499]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xmaiccbucojhxqmzcdlikvvpdhpgqtmw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146322.3535478-554-20864665895515/AnsiballZ_copy.py'
Oct 11 01:32:03 compute-0 sudo[128499]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:03 compute-0 python3.9[128501]: ansible-ansible.legacy.copy Invoked with dest=/etc/libvirt/auth.conf group=libvirt mode=0600 owner=libvirt src=/home/zuul/.ansible/tmp/ansible-tmp-1760146322.3535478-554-20864665895515/.source.conf follow=False _original_basename=auth.conf checksum=a94cd818c374cec2c8425b70d2e0e2f41b743ae4 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:03 compute-0 sudo[128499]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:04 compute-0 sudo[128651]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bvhbhyxwzceuohzzkikokfyafwsbsshk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146323.8090894-554-188379720811673/AnsiballZ_stat.py'
Oct 11 01:32:04 compute-0 sudo[128651]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:04 compute-0 python3.9[128653]: ansible-ansible.legacy.stat Invoked with path=/etc/sasl2/libvirt.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:32:04 compute-0 sudo[128651]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:04 compute-0 sudo[128776]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hvcetyrvigfofoqwgyfhqutvuskkmqpe ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146323.8090894-554-188379720811673/AnsiballZ_copy.py'
Oct 11 01:32:04 compute-0 sudo[128776]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:05 compute-0 python3.9[128778]: ansible-ansible.legacy.copy Invoked with dest=/etc/sasl2/libvirt.conf group=libvirt mode=0640 owner=libvirt src=/home/zuul/.ansible/tmp/ansible-tmp-1760146323.8090894-554-188379720811673/.source.conf follow=False _original_basename=sasl_libvirt.conf checksum=652e4d404bf79253d06956b8e9847c9364979d4a backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:05 compute-0 sudo[128776]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:05 compute-0 sudo[128928]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ctyiquybxxjaxlgqawulkrockfelhnvg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146325.4161623-667-221398909045322/AnsiballZ_command.py'
Oct 11 01:32:05 compute-0 sudo[128928]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:06 compute-0 python3.9[128930]: ansible-ansible.legacy.command Invoked with cmd=saslpasswd2 -f /etc/libvirt/passwd.db -p -a libvirt -u openstack migration stdin=12345678 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None
Oct 11 01:32:06 compute-0 sudo[128928]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:06 compute-0 sudo[129081]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sbzbybxeksgkpvdfqwfcgdesxnxpahxj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146326.360262-676-214809144622667/AnsiballZ_file.py'
Oct 11 01:32:06 compute-0 sudo[129081]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:07 compute-0 python3.9[129083]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtlogd.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:07 compute-0 sudo[129081]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:07 compute-0 sudo[129233]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-phyvbyoyyehxfhywulkdcfhbsvzwyjkm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146327.244121-676-234901580731924/AnsiballZ_file.py'
Oct 11 01:32:07 compute-0 sudo[129233]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:07 compute-0 python3.9[129235]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtlogd-admin.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:07 compute-0 sudo[129233]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:08 compute-0 sudo[129385]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-anxmqljdvlavydebmisyychkfjxwgzbb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146328.094374-676-152109932235879/AnsiballZ_file.py'
Oct 11 01:32:08 compute-0 sudo[129385]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:08 compute-0 python3.9[129387]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtnodedevd.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:08 compute-0 sudo[129385]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:09 compute-0 sudo[129537]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fmkflwsjqwwpxhwxcdeigyccljrffjqg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146328.861771-676-200257344185557/AnsiballZ_file.py'
Oct 11 01:32:09 compute-0 sudo[129537]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:09 compute-0 python3.9[129539]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtnodedevd-ro.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:09 compute-0 sudo[129537]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:10 compute-0 sudo[129689]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yvarxdzwzqlpoyvutnjobokxuabyrukp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146329.6617067-676-191222544815202/AnsiballZ_file.py'
Oct 11 01:32:10 compute-0 sudo[129689]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:10 compute-0 python3.9[129691]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtnodedevd-admin.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:10 compute-0 sudo[129689]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:10 compute-0 sudo[129841]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wvudapyxcouprfafzrdbbfuiixrnpfor ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146330.4431605-676-94447466544887/AnsiballZ_file.py'
Oct 11 01:32:10 compute-0 sudo[129841]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:10 compute-0 python3.9[129843]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtproxyd.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:11 compute-0 sudo[129841]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:11 compute-0 sudo[129993]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iqxmhyvdcpekzccgkibqqqtgqhbdsdbp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146331.1828363-676-28731593265280/AnsiballZ_file.py'
Oct 11 01:32:11 compute-0 sudo[129993]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:11 compute-0 python3.9[129995]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtproxyd-ro.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:11 compute-0 sudo[129993]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:12 compute-0 sudo[130145]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dsxerqntjxjxmjtiwyttfepzeedxdonv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146332.0080304-676-261558968542823/AnsiballZ_file.py'
Oct 11 01:32:12 compute-0 sudo[130145]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:12 compute-0 python3.9[130147]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtproxyd-admin.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:12 compute-0 sudo[130145]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:13 compute-0 sudo[130297]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kxltaezaytabrabmsomgyvgyhnpymfps ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146332.8594792-676-274997545097758/AnsiballZ_file.py'
Oct 11 01:32:13 compute-0 sudo[130297]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:13 compute-0 python3.9[130299]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtqemud.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:13 compute-0 sudo[130297]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:14 compute-0 sudo[130449]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-erzlknmddfirwrnscvhffsnwrtyrjtgu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146333.7177215-676-248004151492415/AnsiballZ_file.py'
Oct 11 01:32:14 compute-0 sudo[130449]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:14 compute-0 python3.9[130451]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtqemud-ro.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:14 compute-0 sudo[130449]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:14 compute-0 sudo[130601]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mrllvpolxeholdjldxutmyvxlzyvsdlc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146334.5569544-676-124337602935607/AnsiballZ_file.py'
Oct 11 01:32:14 compute-0 sudo[130601]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:15 compute-0 python3.9[130603]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtqemud-admin.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:15 compute-0 sudo[130601]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:15 compute-0 sudo[130753]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zgrbjrpudwiizfckdqmnvrvumphciyhr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146335.363141-676-245086702585168/AnsiballZ_file.py'
Oct 11 01:32:15 compute-0 sudo[130753]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:15 compute-0 python3.9[130755]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtsecretd.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:15 compute-0 sudo[130753]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:16 compute-0 sudo[130905]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yvoqxnsnnhreemwdwlvtimwcmolqawzw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146336.202817-676-175838778391120/AnsiballZ_file.py'
Oct 11 01:32:16 compute-0 sudo[130905]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:16 compute-0 python3.9[130907]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtsecretd-ro.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:16 compute-0 sudo[130905]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:17 compute-0 sudo[131057]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gcobnliuanrizysguyhposxxddavkzpt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146337.0211473-676-281368471902937/AnsiballZ_file.py'
Oct 11 01:32:17 compute-0 sudo[131057]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:17 compute-0 python3.9[131059]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtsecretd-admin.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:17 compute-0 sudo[131057]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:18 compute-0 sudo[131209]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kfxdtcyszyngpcjwmrzbpqkysclfshwd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146337.9763885-775-114782028804319/AnsiballZ_stat.py'
Oct 11 01:32:18 compute-0 sudo[131209]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:18 compute-0 python3.9[131211]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtlogd.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:32:18 compute-0 sudo[131209]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:19 compute-0 sudo[131332]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qrrnnjxowsyytgjjhxtixognvegiloyl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146337.9763885-775-114782028804319/AnsiballZ_copy.py'
Oct 11 01:32:19 compute-0 sudo[131332]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:19 compute-0 python3.9[131334]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system/virtlogd.socket.d/override.conf group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146337.9763885-775-114782028804319/.source.conf follow=False _original_basename=libvirt-socket.unit.j2 checksum=0bad41f409b4ee7e780a2a59dc18f5c84ed99826 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:19 compute-0 sudo[131332]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:19 compute-0 sudo[131484]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eweuhqsebnzatxhytfifdfkdanyzpsas ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146339.541697-775-90104975856324/AnsiballZ_stat.py'
Oct 11 01:32:19 compute-0 sudo[131484]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:20 compute-0 python3.9[131486]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtlogd-admin.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:32:20 compute-0 sudo[131484]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:20 compute-0 sudo[131607]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gqwodmobawnmxtphgarniojobcbzudty ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146339.541697-775-90104975856324/AnsiballZ_copy.py'
Oct 11 01:32:20 compute-0 sudo[131607]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:20 compute-0 python3.9[131609]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system/virtlogd-admin.socket.d/override.conf group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146339.541697-775-90104975856324/.source.conf follow=False _original_basename=libvirt-socket.unit.j2 checksum=0bad41f409b4ee7e780a2a59dc18f5c84ed99826 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:20 compute-0 sudo[131607]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:21 compute-0 sudo[131759]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jbjojsejnnfwhpslhcgwcpghargnjxiv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146341.208064-775-249939680764606/AnsiballZ_stat.py'
Oct 11 01:32:21 compute-0 sudo[131759]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:21 compute-0 python3.9[131761]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtnodedevd.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:32:21 compute-0 sudo[131759]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:22 compute-0 sudo[131882]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-knfpalsyrclayrrlekcjxhtnclrizekx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146341.208064-775-249939680764606/AnsiballZ_copy.py'
Oct 11 01:32:22 compute-0 sudo[131882]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:22 compute-0 python3.9[131884]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system/virtnodedevd.socket.d/override.conf group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146341.208064-775-249939680764606/.source.conf follow=False _original_basename=libvirt-socket.unit.j2 checksum=0bad41f409b4ee7e780a2a59dc18f5c84ed99826 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:22 compute-0 sudo[131882]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:23 compute-0 sudo[132034]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uhzrckcxmoilqnokkydqtjywqbgunwpb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146342.8192496-775-225839540425706/AnsiballZ_stat.py'
Oct 11 01:32:23 compute-0 sudo[132034]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:23 compute-0 python3.9[132036]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtnodedevd-ro.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:32:23 compute-0 sudo[132034]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:23 compute-0 sudo[132157]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xjpjqxjhptobbdrwcfggavyqhjtrithh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146342.8192496-775-225839540425706/AnsiballZ_copy.py'
Oct 11 01:32:23 compute-0 sudo[132157]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:24 compute-0 python3.9[132159]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system/virtnodedevd-ro.socket.d/override.conf group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146342.8192496-775-225839540425706/.source.conf follow=False _original_basename=libvirt-socket.unit.j2 checksum=0bad41f409b4ee7e780a2a59dc18f5c84ed99826 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:24 compute-0 sudo[132157]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:24 compute-0 sudo[132309]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oikkfnecsizzojkvksypdcjwxjwmngwa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146344.4331276-775-164248647461474/AnsiballZ_stat.py'
Oct 11 01:32:24 compute-0 sudo[132309]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:25 compute-0 python3.9[132311]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtnodedevd-admin.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:32:25 compute-0 sudo[132309]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:25 compute-0 sudo[132432]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nufeuuutnctjhvotugzgzlclgmyigfye ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146344.4331276-775-164248647461474/AnsiballZ_copy.py'
Oct 11 01:32:25 compute-0 sudo[132432]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:25 compute-0 python3.9[132434]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system/virtnodedevd-admin.socket.d/override.conf group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146344.4331276-775-164248647461474/.source.conf follow=False _original_basename=libvirt-socket.unit.j2 checksum=0bad41f409b4ee7e780a2a59dc18f5c84ed99826 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:25 compute-0 sudo[132432]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:26 compute-0 sudo[132584]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uplorewyamxrbhmwigzsutjidfcvmtev ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146345.9819639-775-249723897596399/AnsiballZ_stat.py'
Oct 11 01:32:26 compute-0 sudo[132584]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:26 compute-0 python3.9[132586]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtproxyd.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:32:26 compute-0 sudo[132584]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:27 compute-0 sudo[132707]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ygeudgbifsnkbjsxdupqhtgoxejptsyk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146345.9819639-775-249723897596399/AnsiballZ_copy.py'
Oct 11 01:32:27 compute-0 sudo[132707]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:27 compute-0 python3.9[132709]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system/virtproxyd.socket.d/override.conf group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146345.9819639-775-249723897596399/.source.conf follow=False _original_basename=libvirt-socket.unit.j2 checksum=0bad41f409b4ee7e780a2a59dc18f5c84ed99826 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:27 compute-0 sudo[132707]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:27 compute-0 sudo[132859]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zrvzbbjimrsmxivuqhtosvzbohhipxqa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146347.4472942-775-207285250591342/AnsiballZ_stat.py'
Oct 11 01:32:27 compute-0 sudo[132859]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:28 compute-0 python3.9[132861]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtproxyd-ro.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:32:28 compute-0 sudo[132859]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:28 compute-0 sudo[132982]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wtkhqqmgtgqbywpblapsfnurzmaikwxq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146347.4472942-775-207285250591342/AnsiballZ_copy.py'
Oct 11 01:32:28 compute-0 sudo[132982]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:28 compute-0 python3.9[132984]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system/virtproxyd-ro.socket.d/override.conf group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146347.4472942-775-207285250591342/.source.conf follow=False _original_basename=libvirt-socket.unit.j2 checksum=0bad41f409b4ee7e780a2a59dc18f5c84ed99826 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:28 compute-0 sudo[132982]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:29 compute-0 sudo[133134]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wftncyxraswxducjhmgmigaotwddyuds ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146348.8594525-775-27473359939460/AnsiballZ_stat.py'
Oct 11 01:32:29 compute-0 sudo[133134]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:29 compute-0 python3.9[133136]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtproxyd-admin.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:32:29 compute-0 sudo[133134]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:29 compute-0 sudo[133257]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wcvitnccwmhbuoorwgqltcqgqewvntlc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146348.8594525-775-27473359939460/AnsiballZ_copy.py'
Oct 11 01:32:29 compute-0 sudo[133257]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:30 compute-0 python3.9[133259]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system/virtproxyd-admin.socket.d/override.conf group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146348.8594525-775-27473359939460/.source.conf follow=False _original_basename=libvirt-socket.unit.j2 checksum=0bad41f409b4ee7e780a2a59dc18f5c84ed99826 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:30 compute-0 sudo[133257]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:30 compute-0 sudo[133427]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sqjdujoanmhkupvkzdexuxcmecxjdcpx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146350.419158-775-235006532031270/AnsiballZ_stat.py'
Oct 11 01:32:30 compute-0 sudo[133427]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:30 compute-0 podman[133383]: 2025-10-11 01:32:30.8677203 +0000 UTC m=+0.133005997 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_id=ovn_controller, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 01:32:31 compute-0 python3.9[133435]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtqemud.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:32:31 compute-0 sudo[133427]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:31 compute-0 sudo[133559]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jufwzjpgmitibsfnrpkuerhtgpnoghqo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146350.419158-775-235006532031270/AnsiballZ_copy.py'
Oct 11 01:32:31 compute-0 sudo[133559]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:31 compute-0 python3.9[133561]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system/virtqemud.socket.d/override.conf group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146350.419158-775-235006532031270/.source.conf follow=False _original_basename=libvirt-socket.unit.j2 checksum=0bad41f409b4ee7e780a2a59dc18f5c84ed99826 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:31 compute-0 sudo[133559]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:32 compute-0 sudo[133711]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zuxjctljknfwkxouffkfgqaxlmcqiaxt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146351.9795692-775-84350747649292/AnsiballZ_stat.py'
Oct 11 01:32:32 compute-0 sudo[133711]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:32 compute-0 python3.9[133713]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtqemud-ro.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:32:32 compute-0 sudo[133711]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:33 compute-0 sudo[133834]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wlrgcpqhsdgnljovxqztwlhbuvexyahd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146351.9795692-775-84350747649292/AnsiballZ_copy.py'
Oct 11 01:32:33 compute-0 sudo[133834]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:33 compute-0 python3.9[133836]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system/virtqemud-ro.socket.d/override.conf group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146351.9795692-775-84350747649292/.source.conf follow=False _original_basename=libvirt-socket.unit.j2 checksum=0bad41f409b4ee7e780a2a59dc18f5c84ed99826 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:33 compute-0 sudo[133834]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:33 compute-0 sudo[133986]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ucdjvljrswqxmzpijanuwzjdofhbjagt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146353.557018-775-8360105220654/AnsiballZ_stat.py'
Oct 11 01:32:33 compute-0 sudo[133986]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:34 compute-0 python3.9[133988]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtqemud-admin.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:32:34 compute-0 sudo[133986]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:34 compute-0 sudo[134109]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vqgwooevstdqocaaogbtvuiocfsdamhd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146353.557018-775-8360105220654/AnsiballZ_copy.py'
Oct 11 01:32:34 compute-0 sudo[134109]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:34 compute-0 python3.9[134111]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system/virtqemud-admin.socket.d/override.conf group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146353.557018-775-8360105220654/.source.conf follow=False _original_basename=libvirt-socket.unit.j2 checksum=0bad41f409b4ee7e780a2a59dc18f5c84ed99826 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:34 compute-0 sudo[134109]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:35 compute-0 sudo[134261]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ucciwouherwvgzyqlopvpulptcpgjwjh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146354.9990273-775-220861005239986/AnsiballZ_stat.py'
Oct 11 01:32:35 compute-0 sudo[134261]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:35 compute-0 python3.9[134263]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtsecretd.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:32:35 compute-0 sudo[134261]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:36 compute-0 sudo[134384]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pdkmzyrpfqxokwfkatqfagokjkfsblhu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146354.9990273-775-220861005239986/AnsiballZ_copy.py'
Oct 11 01:32:36 compute-0 sudo[134384]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:36 compute-0 python3.9[134386]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system/virtsecretd.socket.d/override.conf group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146354.9990273-775-220861005239986/.source.conf follow=False _original_basename=libvirt-socket.unit.j2 checksum=0bad41f409b4ee7e780a2a59dc18f5c84ed99826 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:36 compute-0 sudo[134384]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:36 compute-0 sudo[134536]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-glhopjzqqfimrxpjdvhltakvwnnommdk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146356.5611079-775-47235193451938/AnsiballZ_stat.py'
Oct 11 01:32:36 compute-0 sudo[134536]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:37 compute-0 python3.9[134538]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtsecretd-ro.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:32:37 compute-0 sudo[134536]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:37 compute-0 sudo[134659]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uvwzxxfqnplslxnpqxuwqtwbbvxpmtcc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146356.5611079-775-47235193451938/AnsiballZ_copy.py'
Oct 11 01:32:37 compute-0 sudo[134659]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:37 compute-0 python3.9[134661]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system/virtsecretd-ro.socket.d/override.conf group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146356.5611079-775-47235193451938/.source.conf follow=False _original_basename=libvirt-socket.unit.j2 checksum=0bad41f409b4ee7e780a2a59dc18f5c84ed99826 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:37 compute-0 sudo[134659]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:38 compute-0 sudo[134811]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qsxixiaobowhawixaumxhjrgryezotyt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146358.1525068-775-121470830946890/AnsiballZ_stat.py'
Oct 11 01:32:38 compute-0 sudo[134811]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:38 compute-0 python3.9[134813]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtsecretd-admin.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:32:38 compute-0 sudo[134811]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:39 compute-0 sudo[134934]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kwlniedaobjvnkwjoknoeokstyppokdu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146358.1525068-775-121470830946890/AnsiballZ_copy.py'
Oct 11 01:32:39 compute-0 sudo[134934]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:39 compute-0 python3.9[134936]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system/virtsecretd-admin.socket.d/override.conf group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146358.1525068-775-121470830946890/.source.conf follow=False _original_basename=libvirt-socket.unit.j2 checksum=0bad41f409b4ee7e780a2a59dc18f5c84ed99826 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:39 compute-0 sudo[134934]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:40 compute-0 python3.9[135086]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail
                                             ls -lRZ /run/libvirt | grep -E ':container_\S+_t'
                                              _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:32:41 compute-0 sudo[135239]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yzuzlqqlcaisxsvhihnacdgcvuudcoce ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146360.6836867-981-102638497920342/AnsiballZ_seboolean.py'
Oct 11 01:32:41 compute-0 sudo[135239]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:41 compute-0 python3.9[135241]: ansible-ansible.posix.seboolean Invoked with name=os_enable_vtpm persistent=True state=True ignore_selinux_state=False
Oct 11 01:32:42 compute-0 sudo[135239]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:43 compute-0 sudo[135395]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fngxnsahfuosqcktqthayrucighfmqcx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146362.9774683-989-32845099156837/AnsiballZ_copy.py'
Oct 11 01:32:43 compute-0 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=15 res=1
Oct 11 01:32:43 compute-0 sudo[135395]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:43 compute-0 python3.9[135397]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/libvirt/servercert.pem group=root mode=0644 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/tls.crt backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:43 compute-0 sudo[135395]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:44 compute-0 sudo[135547]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ywubyovxykskspggbwarrgqgyevptxjy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146363.8237839-989-199665141396144/AnsiballZ_copy.py'
Oct 11 01:32:44 compute-0 sudo[135547]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:44 compute-0 python3.9[135549]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/libvirt/private/serverkey.pem group=root mode=0600 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/tls.key backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:44 compute-0 sudo[135547]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:45 compute-0 sudo[135699]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rkjmvecxhhsysheegoxyulhdaotggsjy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146364.6894603-989-272490176945011/AnsiballZ_copy.py'
Oct 11 01:32:45 compute-0 sudo[135699]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:45 compute-0 python3.9[135701]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/libvirt/clientcert.pem group=root mode=0644 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/tls.crt backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:45 compute-0 sudo[135699]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:45 compute-0 sudo[135851]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sogcaeakzjtqzcsgeovuczicyjfhmfmk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146365.503166-989-113342718081235/AnsiballZ_copy.py'
Oct 11 01:32:45 compute-0 sudo[135851]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:46 compute-0 python3.9[135853]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/libvirt/private/clientkey.pem group=root mode=0644 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/tls.key backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:46 compute-0 sudo[135851]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:46 compute-0 sudo[136003]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cyhsrwlsvqgalivcpmzvvbmflnrlfgsb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146366.2422595-989-159960732118548/AnsiballZ_copy.py'
Oct 11 01:32:46 compute-0 sudo[136003]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:46 compute-0 python3.9[136005]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/CA/cacert.pem group=root mode=0644 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/ca.crt backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:46 compute-0 sudo[136003]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:47 compute-0 sudo[136155]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jfdrihirfcqdqyaacaplhlqdqqxlpskh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146367.1010118-1025-35886712666189/AnsiballZ_copy.py'
Oct 11 01:32:47 compute-0 sudo[136155]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:47 compute-0 python3.9[136157]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/qemu/server-cert.pem group=qemu mode=0640 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/tls.crt backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:47 compute-0 sudo[136155]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:48 compute-0 sudo[136307]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hpjdfsiljyoxlkuurkuqwunjrusbtqhp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146367.9189324-1025-45860583286079/AnsiballZ_copy.py'
Oct 11 01:32:48 compute-0 sudo[136307]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:48 compute-0 python3.9[136309]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/qemu/server-key.pem group=qemu mode=0640 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/tls.key backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:48 compute-0 sudo[136307]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:49 compute-0 sudo[136460]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-svcnlpyiuqxdbpequcouyezhobaijukz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146368.780973-1025-141804152506505/AnsiballZ_copy.py'
Oct 11 01:32:49 compute-0 sudo[136460]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:49 compute-0 python3.9[136462]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/qemu/client-cert.pem group=qemu mode=0640 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/tls.crt backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:49 compute-0 sudo[136460]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:50 compute-0 sudo[136612]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-duotqnlklplxvaaswgoirvzqkrdmnshl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146369.63742-1025-172013613573998/AnsiballZ_copy.py'
Oct 11 01:32:50 compute-0 sudo[136612]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:50 compute-0 python3.9[136614]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/qemu/client-key.pem group=qemu mode=0640 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/tls.key backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:50 compute-0 sudo[136612]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:50 compute-0 sudo[136764]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-myvumahogpqzzagwllowqskkmluywgmj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146370.4577718-1025-64195732579977/AnsiballZ_copy.py'
Oct 11 01:32:50 compute-0 sudo[136764]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:51 compute-0 python3.9[136766]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/qemu/ca-cert.pem group=qemu mode=0640 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/ca.crt backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:51 compute-0 sudo[136764]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:51 compute-0 sudo[136916]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vvactpkmiplzatoxftbeefplycvsmuop ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146371.3582544-1061-192484193839773/AnsiballZ_systemd.py'
Oct 11 01:32:51 compute-0 sudo[136916]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:52 compute-0 python3.9[136918]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True name=virtlogd.service state=restarted daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:32:52 compute-0 systemd[1]: Reloading.
Oct 11 01:32:52 compute-0 systemd-rc-local-generator[136947]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:32:52 compute-0 systemd-sysv-generator[136950]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:32:52 compute-0 systemd[1]: Starting libvirt logging daemon socket...
Oct 11 01:32:52 compute-0 systemd[1]: Listening on libvirt logging daemon socket.
Oct 11 01:32:52 compute-0 systemd[1]: Starting libvirt logging daemon admin socket...
Oct 11 01:32:52 compute-0 systemd[1]: Listening on libvirt logging daemon admin socket.
Oct 11 01:32:52 compute-0 systemd[1]: Starting libvirt logging daemon...
Oct 11 01:32:52 compute-0 systemd[1]: Started libvirt logging daemon.
Oct 11 01:32:52 compute-0 sudo[136916]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:53 compute-0 sudo[137110]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kaadydtsmbjxkbkqobvhjyqpbdgmzpqp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146372.8560476-1061-112725351980705/AnsiballZ_systemd.py'
Oct 11 01:32:53 compute-0 sudo[137110]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:53 compute-0 python3.9[137112]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True name=virtnodedevd.service state=restarted daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:32:53 compute-0 systemd[1]: Reloading.
Oct 11 01:32:53 compute-0 systemd-rc-local-generator[137140]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:32:53 compute-0 systemd-sysv-generator[137144]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:32:53 compute-0 systemd[1]: Starting libvirt nodedev daemon socket...
Oct 11 01:32:53 compute-0 systemd[1]: Listening on libvirt nodedev daemon socket.
Oct 11 01:32:53 compute-0 systemd[1]: Starting libvirt nodedev daemon admin socket...
Oct 11 01:32:53 compute-0 systemd[1]: Starting libvirt nodedev daemon read-only socket...
Oct 11 01:32:53 compute-0 systemd[1]: Listening on libvirt nodedev daemon admin socket.
Oct 11 01:32:53 compute-0 systemd[1]: Listening on libvirt nodedev daemon read-only socket.
Oct 11 01:32:53 compute-0 systemd[1]: Starting libvirt nodedev daemon...
Oct 11 01:32:54 compute-0 systemd[1]: Started libvirt nodedev daemon.
Oct 11 01:32:54 compute-0 systemd[1]: Starting SETroubleshoot daemon for processing new SELinux denial logs...
Oct 11 01:32:54 compute-0 sudo[137110]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:54 compute-0 systemd[1]: Started SETroubleshoot daemon for processing new SELinux denial logs.
Oct 11 01:32:54 compute-0 sudo[137329]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kstlxhrpjtdwdcwukmviijdhegcntxeh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146374.2703958-1061-150899641117926/AnsiballZ_systemd.py'
Oct 11 01:32:54 compute-0 sudo[137329]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:54 compute-0 systemd[1]: Created slice Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged.
Oct 11 01:32:54 compute-0 systemd[1]: Started dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged@0.service.
Oct 11 01:32:54 compute-0 python3.9[137334]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True name=virtproxyd.service state=restarted daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:32:54 compute-0 systemd[1]: Reloading.
Oct 11 01:32:55 compute-0 systemd-rc-local-generator[137362]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:32:55 compute-0 systemd-sysv-generator[137369]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:32:55 compute-0 systemd[1]: Starting libvirt proxy daemon admin socket...
Oct 11 01:32:55 compute-0 systemd[1]: Starting libvirt proxy daemon read-only socket...
Oct 11 01:32:55 compute-0 systemd[1]: Listening on libvirt proxy daemon admin socket.
Oct 11 01:32:55 compute-0 systemd[1]: Listening on libvirt proxy daemon read-only socket.
Oct 11 01:32:55 compute-0 systemd[1]: Starting libvirt proxy daemon...
Oct 11 01:32:55 compute-0 systemd[1]: Started libvirt proxy daemon.
Oct 11 01:32:55 compute-0 sudo[137329]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:55 compute-0 setroubleshoot[137174]: SELinux is preventing /usr/sbin/virtlogd from using the dac_read_search capability. For complete SELinux messages run: sealert -l 1c44ed65-f2eb-4f37-9f7f-7b14e9aa2d3b
Oct 11 01:32:55 compute-0 setroubleshoot[137174]: SELinux is preventing /usr/sbin/virtlogd from using the dac_read_search capability.
                                                  
                                                  *****  Plugin dac_override (91.4 confidence) suggests   **********************
                                                  
                                                  If you want to help identify if domain needs this access or you have a file with the wrong permissions on your system
                                                  Then turn on full auditing to get path information about the offending file and generate the error again.
                                                  Do
                                                  
                                                  Turn on full auditing
                                                  # auditctl -w /etc/shadow -p w
                                                  Try to recreate AVC. Then execute
                                                  # ausearch -m avc -ts recent
                                                  If you see PATH record check ownership/permissions on file, and fix it,
                                                  otherwise report as a bugzilla.
                                                  
                                                  *****  Plugin catchall (9.59 confidence) suggests   **************************
                                                  
                                                  If you believe that virtlogd should have the dac_read_search capability by default.
                                                  Then you should report this as a bug.
                                                  You can generate a local policy module to allow this access.
                                                  Do
                                                  allow this access for now by executing:
                                                  # ausearch -c 'virtlogd' --raw | audit2allow -M my-virtlogd
                                                  # semodule -X 300 -i my-virtlogd.pp
                                                  
Oct 11 01:32:55 compute-0 setroubleshoot[137174]: SELinux is preventing /usr/sbin/virtlogd from using the dac_read_search capability. For complete SELinux messages run: sealert -l 1c44ed65-f2eb-4f37-9f7f-7b14e9aa2d3b
Oct 11 01:32:55 compute-0 setroubleshoot[137174]: SELinux is preventing /usr/sbin/virtlogd from using the dac_read_search capability.
                                                  
                                                  *****  Plugin dac_override (91.4 confidence) suggests   **********************
                                                  
                                                  If you want to help identify if domain needs this access or you have a file with the wrong permissions on your system
                                                  Then turn on full auditing to get path information about the offending file and generate the error again.
                                                  Do
                                                  
                                                  Turn on full auditing
                                                  # auditctl -w /etc/shadow -p w
                                                  Try to recreate AVC. Then execute
                                                  # ausearch -m avc -ts recent
                                                  If you see PATH record check ownership/permissions on file, and fix it,
                                                  otherwise report as a bugzilla.
                                                  
                                                  *****  Plugin catchall (9.59 confidence) suggests   **************************
                                                  
                                                  If you believe that virtlogd should have the dac_read_search capability by default.
                                                  Then you should report this as a bug.
                                                  You can generate a local policy module to allow this access.
                                                  Do
                                                  allow this access for now by executing:
                                                  # ausearch -c 'virtlogd' --raw | audit2allow -M my-virtlogd
                                                  # semodule -X 300 -i my-virtlogd.pp
                                                  
Oct 11 01:32:56 compute-0 sudo[137545]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-irlyibyibzgtokrdmszndycfvefrrwsi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146375.6194746-1061-122541641099429/AnsiballZ_systemd.py'
Oct 11 01:32:56 compute-0 sudo[137545]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:56 compute-0 python3.9[137547]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True name=virtqemud.service state=restarted daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:32:56 compute-0 systemd[1]: Reloading.
Oct 11 01:32:56 compute-0 systemd-sysv-generator[137578]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:32:56 compute-0 systemd-rc-local-generator[137575]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:32:56 compute-0 systemd[1]: Listening on libvirt locking daemon socket.
Oct 11 01:32:56 compute-0 systemd[1]: Starting libvirt QEMU daemon socket...
Oct 11 01:32:56 compute-0 systemd[1]: Virtual Machine and Container Storage (Compatibility) was skipped because of an unmet condition check (ConditionPathExists=/var/lib/machines.raw).
Oct 11 01:32:56 compute-0 systemd[1]: Starting Virtual Machine and Container Registration Service...
Oct 11 01:32:56 compute-0 systemd[1]: Listening on libvirt QEMU daemon socket.
Oct 11 01:32:56 compute-0 systemd[1]: Starting libvirt QEMU daemon admin socket...
Oct 11 01:32:56 compute-0 systemd[1]: Starting libvirt QEMU daemon read-only socket...
Oct 11 01:32:56 compute-0 systemd[1]: Started Virtual Machine and Container Registration Service.
Oct 11 01:32:56 compute-0 systemd[1]: Listening on libvirt QEMU daemon admin socket.
Oct 11 01:32:56 compute-0 systemd[1]: Listening on libvirt QEMU daemon read-only socket.
Oct 11 01:32:56 compute-0 systemd[1]: Starting libvirt QEMU daemon...
Oct 11 01:32:56 compute-0 systemd[1]: Started libvirt QEMU daemon.
Oct 11 01:32:56 compute-0 sudo[137545]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:57 compute-0 sudo[137757]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-shdgcnamqulacplhiyvfcnycwtwpciid ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146376.9974856-1061-124021006178138/AnsiballZ_systemd.py'
Oct 11 01:32:57 compute-0 sudo[137757]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:57 compute-0 python3.9[137759]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True name=virtsecretd.service state=restarted daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:32:57 compute-0 systemd[1]: Reloading.
Oct 11 01:32:57 compute-0 systemd-rc-local-generator[137785]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:32:57 compute-0 systemd-sysv-generator[137791]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:32:58 compute-0 systemd[1]: Starting libvirt secret daemon socket...
Oct 11 01:32:58 compute-0 systemd[1]: Listening on libvirt secret daemon socket.
Oct 11 01:32:58 compute-0 systemd[1]: Starting libvirt secret daemon admin socket...
Oct 11 01:32:58 compute-0 systemd[1]: Starting libvirt secret daemon read-only socket...
Oct 11 01:32:58 compute-0 systemd[1]: Listening on libvirt secret daemon admin socket.
Oct 11 01:32:58 compute-0 systemd[1]: Listening on libvirt secret daemon read-only socket.
Oct 11 01:32:58 compute-0 systemd[1]: Starting libvirt secret daemon...
Oct 11 01:32:58 compute-0 systemd[1]: Started libvirt secret daemon.
Oct 11 01:32:58 compute-0 sudo[137757]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:58 compute-0 sudo[137967]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gmnqfqboczudticszpvzdtwqdmharykp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146378.5205235-1098-137468063525569/AnsiballZ_file.py'
Oct 11 01:32:58 compute-0 sudo[137967]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:59 compute-0 python3.9[137969]: ansible-ansible.builtin.file Invoked with mode=0755 path=/var/lib/openstack/config/ceph state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:32:59 compute-0 sudo[137967]: pam_unix(sudo:session): session closed for user root
Oct 11 01:32:59 compute-0 sudo[138119]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-apsbdpdyvweiksvyfjzswfsjelhfkofw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146379.3396366-1106-17196992074855/AnsiballZ_find.py'
Oct 11 01:32:59 compute-0 sudo[138119]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:32:59 compute-0 python3.9[138121]: ansible-ansible.builtin.find Invoked with paths=['/var/lib/openstack/config/ceph'] patterns=['*.conf'] read_whole_file=False file_type=file age_stamp=mtime recurse=False hidden=False follow=False get_checksum=False checksum_algorithm=sha1 use_regex=False exact_mode=True excludes=None contains=None age=None size=None depth=None mode=None encoding=None limit=None
Oct 11 01:32:59 compute-0 sudo[138119]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:00 compute-0 sudo[138271]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vacczdywmnjwfveqcpretkwtlvrktxmo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146380.3869503-1120-44282238017130/AnsiballZ_stat.py'
Oct 11 01:33:00 compute-0 sudo[138271]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:00 compute-0 python3.9[138273]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/libvirt.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:33:01 compute-0 sudo[138271]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:01 compute-0 podman[138288]: 2025-10-11 01:33:01.289671087 +0000 UTC m=+0.182778467 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=ovn_controller, io.buildah.version=1.41.3, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 01:33:01 compute-0 sudo[138420]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-klhxsxvzikpnvwvewztdblazclgsvqui ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146380.3869503-1120-44282238017130/AnsiballZ_copy.py'
Oct 11 01:33:01 compute-0 sudo[138420]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:01 compute-0 python3.9[138422]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/edpm-config/firewall/libvirt.yaml mode=0640 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146380.3869503-1120-44282238017130/.source.yaml follow=False _original_basename=firewall.yaml.j2 checksum=5ca83b1310a74c5e48c4c3d4640e1cb8fdac1061 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:33:01 compute-0 sudo[138420]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:02 compute-0 sudo[138572]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vhvyjxluwrtyfijzycvqpqimyztmipan ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146382.1207972-1136-121006272853086/AnsiballZ_file.py'
Oct 11 01:33:02 compute-0 sudo[138572]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:02 compute-0 python3.9[138574]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/lib/edpm-config/firewall state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:33:02 compute-0 sudo[138572]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:03 compute-0 sudo[138724]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kmhcotoifqedhlgojfqmzlcnvaxtrwjk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146382.9923682-1144-122780920673838/AnsiballZ_stat.py'
Oct 11 01:33:03 compute-0 sudo[138724]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:03 compute-0 python3.9[138726]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:33:03 compute-0 sudo[138724]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:03 compute-0 sudo[138802]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tmtjgohmanlofdzcuekewzgvytlvzhch ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146382.9923682-1144-122780920673838/AnsiballZ_file.py'
Oct 11 01:33:03 compute-0 sudo[138802]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:04 compute-0 python3.9[138804]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml _original_basename=base-rules.yaml.j2 recurse=False state=file path=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:33:04 compute-0 sudo[138802]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:04 compute-0 sudo[138954]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yhlhggssoovzogptlfqiioqtvvjvtupu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146384.4477818-1156-58463213417352/AnsiballZ_stat.py'
Oct 11 01:33:04 compute-0 sudo[138954]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:05 compute-0 python3.9[138956]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:33:05 compute-0 sudo[138954]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:05 compute-0 sudo[139032]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-izrxujzehxikxpvabqkayfyohfwwnzfw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146384.4477818-1156-58463213417352/AnsiballZ_file.py'
Oct 11 01:33:05 compute-0 sudo[139032]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:05 compute-0 python3.9[139034]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml _original_basename=.5c3oyjcz recurse=False state=file path=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:33:05 compute-0 sudo[139032]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:05 compute-0 systemd[1]: dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged@0.service: Deactivated successfully.
Oct 11 01:33:05 compute-0 systemd[1]: setroubleshootd.service: Deactivated successfully.
Oct 11 01:33:06 compute-0 sudo[139184]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ktcgsdpgueabwcxjdrgvoqqztxxdxxgh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146385.8977823-1168-144947852893204/AnsiballZ_stat.py'
Oct 11 01:33:06 compute-0 sudo[139184]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:06 compute-0 python3.9[139186]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/iptables.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:33:06 compute-0 sudo[139184]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:06 compute-0 sudo[139262]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ukohbotptheiokshtssscprbmweuqeik ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146385.8977823-1168-144947852893204/AnsiballZ_file.py'
Oct 11 01:33:06 compute-0 sudo[139262]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:07 compute-0 python3.9[139264]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/iptables.nft _original_basename=iptables.nft recurse=False state=file path=/etc/nftables/iptables.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:33:07 compute-0 sudo[139262]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:07 compute-0 sudo[139414]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gcdtdouwbjpsuubfyghlioqvmjjiuyxt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146387.397932-1181-234598863987386/AnsiballZ_command.py'
Oct 11 01:33:07 compute-0 sudo[139414]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:07 compute-0 python3.9[139416]: ansible-ansible.legacy.command Invoked with _raw_params=nft -j list ruleset _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:33:08 compute-0 sudo[139414]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:08 compute-0 sudo[139567]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sjuwzyvbptvmucvahsveqrfvxerskgyr ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760146388.2674634-1189-266785857416947/AnsiballZ_edpm_nftables_from_files.py'
Oct 11 01:33:08 compute-0 sudo[139567]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:08 compute-0 python3[139569]: ansible-edpm_nftables_from_files Invoked with src=/var/lib/edpm-config/firewall
Oct 11 01:33:09 compute-0 sudo[139567]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:09 compute-0 sudo[139719]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cuopqscrdmkgahekwoucizhcntpauzys ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146389.2577846-1197-19885870197197/AnsiballZ_stat.py'
Oct 11 01:33:09 compute-0 sudo[139719]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:09 compute-0 python3.9[139721]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:33:09 compute-0 sudo[139719]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:10 compute-0 sudo[139797]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pzreuttinycmhoatdsrptoosztsypgss ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146389.2577846-1197-19885870197197/AnsiballZ_file.py'
Oct 11 01:33:10 compute-0 sudo[139797]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:10 compute-0 python3.9[139799]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-jumps.nft _original_basename=jump-chain.j2 recurse=False state=file path=/etc/nftables/edpm-jumps.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:33:10 compute-0 sudo[139797]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:11 compute-0 sudo[139949]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-skkzcvmknwozplnondubwtyqcecxvoqo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146390.7536986-1209-76907293576789/AnsiballZ_stat.py'
Oct 11 01:33:11 compute-0 sudo[139949]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:11 compute-0 python3.9[139951]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-update-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:33:11 compute-0 sudo[139949]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:11 compute-0 sudo[140027]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-firsofnchesoutcvbkgzozzzovgnnxcg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146390.7536986-1209-76907293576789/AnsiballZ_file.py'
Oct 11 01:33:11 compute-0 sudo[140027]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:12 compute-0 python3.9[140029]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-update-jumps.nft _original_basename=jump-chain.j2 recurse=False state=file path=/etc/nftables/edpm-update-jumps.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:33:12 compute-0 sudo[140027]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:12 compute-0 sudo[140179]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-naylztnegnhqjapwawqmhxngnipfmtvx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146392.3112566-1221-53693291317625/AnsiballZ_stat.py'
Oct 11 01:33:12 compute-0 sudo[140179]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:13 compute-0 python3.9[140181]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-flushes.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:33:13 compute-0 sudo[140179]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:13 compute-0 sudo[140257]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xrdfnqjtomfcpylyaqmwvvahoichepdi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146392.3112566-1221-53693291317625/AnsiballZ_file.py'
Oct 11 01:33:13 compute-0 sudo[140257]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:13 compute-0 python3.9[140259]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-flushes.nft _original_basename=flush-chain.j2 recurse=False state=file path=/etc/nftables/edpm-flushes.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:33:13 compute-0 sudo[140257]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:14 compute-0 sudo[140409]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bfveajatatjksujwexfnjwbsjojrpkoi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146393.8523178-1233-215590371606170/AnsiballZ_stat.py'
Oct 11 01:33:14 compute-0 sudo[140409]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:14 compute-0 python3.9[140411]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-chains.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:33:14 compute-0 sudo[140409]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:14 compute-0 sudo[140487]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xxymvwrsbkkertluxnwieoyfwajhgfkv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146393.8523178-1233-215590371606170/AnsiballZ_file.py'
Oct 11 01:33:14 compute-0 sudo[140487]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:15 compute-0 python3.9[140489]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-chains.nft _original_basename=chains.j2 recurse=False state=file path=/etc/nftables/edpm-chains.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:33:15 compute-0 sudo[140487]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:15 compute-0 sudo[140639]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uwciawshnsarkhvdeojrdgahkeehaiym ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146395.3546615-1245-154023790272966/AnsiballZ_stat.py'
Oct 11 01:33:15 compute-0 sudo[140639]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:16 compute-0 python3.9[140641]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-rules.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:33:16 compute-0 sudo[140639]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:16 compute-0 sudo[140764]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rtmcsjtpzxvyvcdcxeyclqxunmizakhk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146395.3546615-1245-154023790272966/AnsiballZ_copy.py'
Oct 11 01:33:16 compute-0 sudo[140764]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:16 compute-0 python3.9[140766]: ansible-ansible.legacy.copy Invoked with dest=/etc/nftables/edpm-rules.nft group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146395.3546615-1245-154023790272966/.source.nft follow=False _original_basename=ruleset.j2 checksum=ac3ce8ce2d33fa5fe0a79b0c811c97734ce43fa5 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:33:16 compute-0 sudo[140764]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:17 compute-0 sudo[140916]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-szdehybxflqcmjmfdtcboaxzbblqxssu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146397.1231434-1260-114597151824242/AnsiballZ_file.py'
Oct 11 01:33:17 compute-0 sudo[140916]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:17 compute-0 python3.9[140918]: ansible-ansible.builtin.file Invoked with group=root mode=0600 owner=root path=/etc/nftables/edpm-rules.nft.changed state=touch recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:33:17 compute-0 sudo[140916]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:18 compute-0 sudo[141068]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vruofuxwlxvakzefkhyhdeqovbguqgdh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146397.9759846-1268-38605979131073/AnsiballZ_command.py'
Oct 11 01:33:18 compute-0 sudo[141068]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:18 compute-0 python3.9[141070]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail; cat /etc/nftables/edpm-chains.nft /etc/nftables/edpm-flushes.nft /etc/nftables/edpm-rules.nft /etc/nftables/edpm-update-jumps.nft /etc/nftables/edpm-jumps.nft | nft -c -f - _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:33:18 compute-0 sudo[141068]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:19 compute-0 sudo[141223]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-efqfyhezlhjndedetjtqqixhktmesapp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146398.8741848-1276-281301080849057/AnsiballZ_blockinfile.py'
Oct 11 01:33:19 compute-0 sudo[141223]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:19 compute-0 python3.9[141225]: ansible-ansible.builtin.blockinfile Invoked with backup=False block=include "/etc/nftables/iptables.nft"
                                             include "/etc/nftables/edpm-chains.nft"
                                             include "/etc/nftables/edpm-rules.nft"
                                             include "/etc/nftables/edpm-jumps.nft"
                                              path=/etc/sysconfig/nftables.conf validate=nft -c -f %s state=present marker=# {mark} ANSIBLE MANAGED BLOCK create=False marker_begin=BEGIN marker_end=END append_newline=False prepend_newline=False unsafe_writes=False insertafter=None insertbefore=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:33:19 compute-0 sudo[141223]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:20 compute-0 sudo[141375]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gfmpgqfedxsvrpheohxbmqbcaonwdtbf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146399.9655159-1285-48824038923301/AnsiballZ_command.py'
Oct 11 01:33:20 compute-0 sudo[141375]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:20 compute-0 python3.9[141377]: ansible-ansible.legacy.command Invoked with _raw_params=nft -f /etc/nftables/edpm-chains.nft _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:33:20 compute-0 sudo[141375]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:21 compute-0 sudo[141528]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pxmwcntaqkuirtnhvodedyuxpyuwpmby ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146400.8839116-1293-75111751369196/AnsiballZ_stat.py'
Oct 11 01:33:21 compute-0 sudo[141528]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:21 compute-0 python3.9[141530]: ansible-ansible.builtin.stat Invoked with path=/etc/nftables/edpm-rules.nft.changed follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:33:21 compute-0 sudo[141528]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:22 compute-0 sudo[141682]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ckvyvgfwnzkubexceamltajwskzglvxs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146401.7498856-1301-226189728581750/AnsiballZ_command.py'
Oct 11 01:33:22 compute-0 sudo[141682]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:22 compute-0 python3.9[141684]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail; cat /etc/nftables/edpm-flushes.nft /etc/nftables/edpm-rules.nft /etc/nftables/edpm-update-jumps.nft | nft -f - _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:33:22 compute-0 sudo[141682]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:22 compute-0 sudo[141837]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-frjybfmpcdautgwyzfjntpqewufjftts ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146402.6312082-1309-266960482777982/AnsiballZ_file.py'
Oct 11 01:33:23 compute-0 sudo[141837]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:23 compute-0 python3.9[141839]: ansible-ansible.builtin.file Invoked with path=/etc/nftables/edpm-rules.nft.changed state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:33:23 compute-0 sudo[141837]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:23 compute-0 sudo[141989]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ccxrguifxrnuiosnxzcclwsqgzbizugw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146403.5105295-1317-42063762154086/AnsiballZ_stat.py'
Oct 11 01:33:23 compute-0 sudo[141989]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:24 compute-0 python3.9[141991]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/edpm_libvirt.target follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:33:24 compute-0 sudo[141989]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:24 compute-0 sudo[142112]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ifqzeygifiguxsrvxksprponkvpphazd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146403.5105295-1317-42063762154086/AnsiballZ_copy.py'
Oct 11 01:33:24 compute-0 sudo[142112]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:24 compute-0 python3.9[142114]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system/edpm_libvirt.target mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146403.5105295-1317-42063762154086/.source.target follow=False _original_basename=edpm_libvirt.target checksum=13035a1aa0f414c677b14be9a5a363b6623d393c backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:33:24 compute-0 sudo[142112]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:25 compute-0 sudo[142264]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ziuseoczwxyansqnhswrgjdqzkxyswrr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146405.0536726-1332-244790050172785/AnsiballZ_stat.py'
Oct 11 01:33:25 compute-0 sudo[142264]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:25 compute-0 python3.9[142266]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/edpm_libvirt_guests.service follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:33:25 compute-0 sudo[142264]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:26 compute-0 sudo[142387]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vcemxuptxqyioxbylpgyqfcmtzyuhcdp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146405.0536726-1332-244790050172785/AnsiballZ_copy.py'
Oct 11 01:33:26 compute-0 sudo[142387]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:26 compute-0 python3.9[142389]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system/edpm_libvirt_guests.service mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146405.0536726-1332-244790050172785/.source.service follow=False _original_basename=edpm_libvirt_guests.service checksum=db83430a42fc2ccfd6ed8b56ebf04f3dff9cd0cf backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:33:26 compute-0 sudo[142387]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:26 compute-0 sudo[142539]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-luoqnhjndrazbbyyovxtacagqazzbvap ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146406.4991739-1347-115609558459949/AnsiballZ_stat.py'
Oct 11 01:33:26 compute-0 sudo[142539]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:27 compute-0 python3.9[142541]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virt-guest-shutdown.target follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:33:27 compute-0 sudo[142539]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:27 compute-0 sudo[142662]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-okbjifiawwnisuwedbgnqyqtvxxbrqls ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146406.4991739-1347-115609558459949/AnsiballZ_copy.py'
Oct 11 01:33:27 compute-0 sudo[142662]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:27 compute-0 python3.9[142664]: ansible-ansible.legacy.copy Invoked with dest=/etc/systemd/system/virt-guest-shutdown.target mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146406.4991739-1347-115609558459949/.source.target follow=False _original_basename=virt-guest-shutdown.target checksum=49ca149619c596cbba877418629d2cf8f7b0f5cf backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:33:27 compute-0 sudo[142662]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:28 compute-0 sudo[142814]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zeocyqlgeuozjnlotgxhkfezvctpbicg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146408.0803328-1362-82219174986851/AnsiballZ_systemd.py'
Oct 11 01:33:28 compute-0 sudo[142814]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:28 compute-0 python3.9[142816]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True enabled=True name=edpm_libvirt.target state=restarted daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:33:28 compute-0 systemd[1]: Reloading.
Oct 11 01:33:28 compute-0 systemd-rc-local-generator[142841]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:33:28 compute-0 systemd-sysv-generator[142845]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:33:29 compute-0 systemd[1]: Reached target edpm_libvirt.target.
Oct 11 01:33:29 compute-0 sudo[142814]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:29 compute-0 sudo[143005]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vgmqlafljrlopewscgxuoddrkgzukqwg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146409.4284146-1370-143915455094998/AnsiballZ_systemd.py'
Oct 11 01:33:29 compute-0 sudo[143005]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:30 compute-0 python3.9[143007]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True enabled=True name=edpm_libvirt_guests daemon_reexec=False scope=system no_block=False state=None force=None masked=None
Oct 11 01:33:30 compute-0 systemd[1]: Reloading.
Oct 11 01:33:30 compute-0 systemd-rc-local-generator[143034]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:33:30 compute-0 systemd-sysv-generator[143038]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:33:30 compute-0 systemd[1]: Reloading.
Oct 11 01:33:30 compute-0 systemd-sysv-generator[143075]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:33:30 compute-0 systemd-rc-local-generator[143072]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:33:30 compute-0 sudo[143005]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:31 compute-0 sshd-session[88974]: Connection closed by 192.168.122.30 port 39452
Oct 11 01:33:31 compute-0 sshd-session[88971]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:33:31 compute-0 systemd[1]: session-21.scope: Deactivated successfully.
Oct 11 01:33:31 compute-0 systemd[1]: session-21.scope: Consumed 4min 12.169s CPU time.
Oct 11 01:33:31 compute-0 systemd-logind[804]: Session 21 logged out. Waiting for processes to exit.
Oct 11 01:33:31 compute-0 systemd-logind[804]: Removed session 21.
Oct 11 01:33:31 compute-0 podman[143104]: 2025-10-11 01:33:31.593588311 +0000 UTC m=+0.178599078 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 01:33:37 compute-0 sshd-session[143127]: Accepted publickey for zuul from 192.168.122.30 port 37804 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:33:37 compute-0 systemd-logind[804]: New session 22 of user zuul.
Oct 11 01:33:37 compute-0 systemd[1]: Started Session 22 of User zuul.
Oct 11 01:33:37 compute-0 sshd-session[143127]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:33:38 compute-0 python3.9[143280]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:33:40 compute-0 sudo[143434]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jusehniblmhynriyllvaqcducshkdxdd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146419.3585427-36-220658074721956/AnsiballZ_systemd_service.py'
Oct 11 01:33:40 compute-0 sudo[143434]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:40 compute-0 python3.9[143436]: ansible-ansible.builtin.systemd_service Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 01:33:40 compute-0 systemd[1]: Reloading.
Oct 11 01:33:40 compute-0 systemd-rc-local-generator[143463]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:33:40 compute-0 systemd-sysv-generator[143468]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:33:40 compute-0 sudo[143434]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:42 compute-0 python3.9[143620]: ansible-ansible.builtin.service_facts Invoked
Oct 11 01:33:42 compute-0 network[143637]: You are using 'network' service provided by 'network-scripts', which are now deprecated.
Oct 11 01:33:42 compute-0 network[143638]: 'network-scripts' will be removed from distribution in near future.
Oct 11 01:33:42 compute-0 network[143639]: It is advised to switch to 'NetworkManager' instead for network management.
Oct 11 01:33:47 compute-0 sudo[143910]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rtewqbzllsiltwnyylhcrtbqvzyslodq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146427.0911047-55-67540684196561/AnsiballZ_systemd_service.py'
Oct 11 01:33:47 compute-0 sudo[143910]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:47 compute-0 python3.9[143912]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_ceilometer_agent_compute.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:33:47 compute-0 sudo[143910]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:48 compute-0 sudo[144063]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ldosasggxxijonfguxrqyasppfkrcnhm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146428.2064645-65-199794716945282/AnsiballZ_file.py'
Oct 11 01:33:48 compute-0 sudo[144063]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:49 compute-0 python3.9[144065]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_ceilometer_agent_compute.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:33:49 compute-0 sudo[144063]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:49 compute-0 sudo[144215]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xjnhoxofnwpjsbwemhnnmlksyoqysbzl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146429.262879-73-30920958169912/AnsiballZ_file.py'
Oct 11 01:33:49 compute-0 sudo[144215]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:49 compute-0 python3.9[144217]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_ceilometer_agent_compute.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:33:49 compute-0 sudo[144215]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:50 compute-0 sudo[144367]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vhvrollsqwiizkzbxjmkcgaylguxtifv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146430.1267412-82-124082044329739/AnsiballZ_command.py'
Oct 11 01:33:50 compute-0 sudo[144367]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:50 compute-0 python3.9[144369]: ansible-ansible.legacy.command Invoked with _raw_params=if systemctl is-active certmonger.service; then
                                               systemctl disable --now certmonger.service
                                               test -f /etc/systemd/system/certmonger.service || systemctl mask certmonger.service
                                             fi
                                              _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:33:50 compute-0 sudo[144367]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:51 compute-0 python3.9[144521]: ansible-ansible.builtin.find Invoked with file_type=any hidden=True paths=['/var/lib/certmonger/requests'] patterns=[] read_whole_file=False age_stamp=mtime recurse=False follow=False get_checksum=False checksum_algorithm=sha1 use_regex=False exact_mode=True excludes=None contains=None age=None size=None depth=None mode=None encoding=None limit=None
Oct 11 01:33:52 compute-0 sudo[144671]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wymaktrvwdisbkmgkiekwcgunjgvamta ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146432.2918968-100-12406206888930/AnsiballZ_systemd_service.py'
Oct 11 01:33:52 compute-0 sudo[144671]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:53 compute-0 python3.9[144673]: ansible-ansible.builtin.systemd_service Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 01:33:53 compute-0 systemd[1]: Reloading.
Oct 11 01:33:53 compute-0 systemd-rc-local-generator[144700]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:33:53 compute-0 systemd-sysv-generator[144705]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:33:53 compute-0 sudo[144671]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:54 compute-0 sudo[144858]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rvayfpnvldjepvspalgjdwtrlnbjcmki ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146433.7179213-108-10701702142310/AnsiballZ_command.py'
Oct 11 01:33:54 compute-0 sudo[144858]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:54 compute-0 python3.9[144860]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_ceilometer_agent_compute.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:33:54 compute-0 sudo[144858]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:55 compute-0 sudo[145011]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jwbyerybzxcvsvxshzhkvmbwxroowlhl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146434.7507892-117-162338955458970/AnsiballZ_file.py'
Oct 11 01:33:55 compute-0 sudo[145011]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:55 compute-0 python3.9[145013]: ansible-ansible.builtin.file Invoked with group=zuul mode=0750 owner=zuul path=/var/lib/openstack/config/telemetry recurse=True setype=container_file_t state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:33:55 compute-0 sudo[145011]: pam_unix(sudo:session): session closed for user root
Oct 11 01:33:56 compute-0 python3.9[145163]: ansible-ansible.builtin.stat Invoked with path=/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:33:57 compute-0 python3.9[145315]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/ceilometer-host-specific.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:33:58 compute-0 python3.9[145436]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry/ceilometer-host-specific.conf mode=0644 setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760146436.614255-133-136695273134388/.source.conf follow=False _original_basename=ceilometer-host-specific.conf.j2 checksum=e86e0e43000ce9ccfe5aefbf8e8f2e3d15d05584 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:33:58 compute-0 sudo[145586]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-crubutqkvewjyrgrrulgejspuipsbioz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146438.4673984-148-139785352443235/AnsiballZ_group.py'
Oct 11 01:33:58 compute-0 sudo[145586]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:33:59 compute-0 python3.9[145588]: ansible-ansible.builtin.group Invoked with name=libvirt state=present force=False system=False local=False non_unique=False gid=None gid_min=None gid_max=None
Oct 11 01:33:59 compute-0 sudo[145586]: pam_unix(sudo:session): session closed for user root
Oct 11 01:34:00 compute-0 sudo[145738]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mmtlgiubxhnbxvmkvmrirnuvsufffyku ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146439.588298-159-129572402846902/AnsiballZ_getent.py'
Oct 11 01:34:00 compute-0 sudo[145738]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:34:00 compute-0 python3.9[145740]: ansible-ansible.builtin.getent Invoked with database=passwd key=ceilometer fail_key=True service=None split=None
Oct 11 01:34:00 compute-0 sudo[145738]: pam_unix(sudo:session): session closed for user root
Oct 11 01:34:01 compute-0 sudo[145891]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cyrmzuqomgjcppgvydabnrgqgtnnoipv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146440.6964226-167-100867521199723/AnsiballZ_group.py'
Oct 11 01:34:01 compute-0 sudo[145891]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:34:01 compute-0 python3.9[145893]: ansible-ansible.builtin.group Invoked with gid=42405 name=ceilometer state=present force=False system=False local=False non_unique=False gid_min=None gid_max=None
Oct 11 01:34:01 compute-0 groupadd[145894]: group added to /etc/group: name=ceilometer, GID=42405
Oct 11 01:34:01 compute-0 groupadd[145894]: group added to /etc/gshadow: name=ceilometer
Oct 11 01:34:01 compute-0 groupadd[145894]: new group: name=ceilometer, GID=42405
Oct 11 01:34:01 compute-0 sudo[145891]: pam_unix(sudo:session): session closed for user root
Oct 11 01:34:02 compute-0 podman[145976]: 2025-10-11 01:34:02.251104233 +0000 UTC m=+0.137010119 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_id=ovn_controller, container_name=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Oct 11 01:34:02 compute-0 sudo[146075]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ktanjhlochzpigzhpeyskidpszzptrfb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146441.7170627-175-240600125693095/AnsiballZ_user.py'
Oct 11 01:34:02 compute-0 sudo[146075]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:34:02 compute-0 python3.9[146077]: ansible-ansible.builtin.user Invoked with comment=ceilometer user group=ceilometer groups=['libvirt'] name=ceilometer shell=/sbin/nologin state=present uid=42405 non_unique=False force=False remove=False create_home=True system=False move_home=False append=False ssh_key_bits=0 ssh_key_type=rsa ssh_key_comment=ansible-generated on compute-0 update_password=always home=None password=NOT_LOGGING_PARAMETER login_class=None password_expire_max=None password_expire_min=None password_expire_warn=None hidden=None seuser=None skeleton=None generate_ssh_key=None ssh_key_file=None ssh_key_passphrase=NOT_LOGGING_PARAMETER expires=None password_lock=None local=None profile=None authorization=None role=None umask=None password_expire_account_disable=None uid_min=None uid_max=None
Oct 11 01:34:02 compute-0 useradd[146079]: new user: name=ceilometer, UID=42405, GID=42405, home=/home/ceilometer, shell=/sbin/nologin, from=/dev/pts/0
Oct 11 01:34:02 compute-0 useradd[146079]: add 'ceilometer' to group 'libvirt'
Oct 11 01:34:02 compute-0 useradd[146079]: add 'ceilometer' to shadow group 'libvirt'
Oct 11 01:34:02 compute-0 sudo[146075]: pam_unix(sudo:session): session closed for user root
Oct 11 01:34:04 compute-0 python3.9[146235]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/ceilometer.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:34:04 compute-0 python3.9[146356]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry/ceilometer.conf mode=0640 remote_src=False src=/home/zuul/.ansible/tmp/ansible-tmp-1760146443.5012286-201-199540036404992/.source.conf _original_basename=ceilometer.conf follow=False checksum=f74f01c63e6cdeca5458ef9aff2a1db5d6a4e4b9 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:05 compute-0 python3.9[146506]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/polling.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:34:06 compute-0 python3.9[146627]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry/polling.yaml mode=0640 remote_src=False src=/home/zuul/.ansible/tmp/ansible-tmp-1760146444.9390833-201-114218190950229/.source.yaml _original_basename=polling.yaml follow=False checksum=6c8680a286285f2e0ef9fa528ca754765e5ed0e5 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:06 compute-0 python3.9[146777]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/custom.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:34:07 compute-0 python3.9[146898]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry/custom.conf mode=0640 remote_src=False src=/home/zuul/.ansible/tmp/ansible-tmp-1760146446.3901038-201-146607597812394/.source.conf _original_basename=custom.conf follow=False checksum=838b8b0a7d7f72e55ab67d39f32e3cb3eca2139b backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:08 compute-0 python3.9[147048]: ansible-ansible.builtin.stat Invoked with path=/var/lib/openstack/certs/telemetry/default/tls.crt follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:34:09 compute-0 python3.9[147200]: ansible-ansible.builtin.stat Invoked with path=/var/lib/openstack/certs/telemetry/default/tls.key follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:34:10 compute-0 python3.9[147352]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:34:10 compute-0 python3.9[147473]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json mode=420 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146449.615528-260-12139150488514/.source.json follow=False _original_basename=ceilometer-agent-compute.json.j2 checksum=264d11e8d3809e7ef745878dce7edd46098e25b2 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:11 compute-0 python3.9[147623]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/ceilometer-host-specific.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:34:12 compute-0 python3.9[147699]: ansible-ansible.legacy.file Invoked with mode=420 dest=/var/lib/openstack/config/telemetry/ceilometer-host-specific.conf _original_basename=ceilometer-host-specific.conf.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry/ceilometer-host-specific.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:13 compute-0 python3.9[147849]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/ceilometer_agent_compute.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:34:13 compute-0 python3.9[147970]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry/ceilometer_agent_compute.json mode=420 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146452.517354-260-216949715238125/.source.json follow=False _original_basename=ceilometer_agent_compute.json.j2 checksum=4096a0f5410f47dcaf8ab19e56a9d8e211effecd backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:14 compute-0 python3.9[148120]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:34:15 compute-0 python3.9[148241]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml mode=420 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146454.0115285-260-79246680733103/.source.yaml follow=False _original_basename=ceilometer_prom_exporter.yaml.j2 checksum=10157c879411ee6023e506dc85a343cedc52700f backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:16 compute-0 python3.9[148391]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/firewall.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:34:16 compute-0 python3.9[148512]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry/firewall.yaml mode=420 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146455.4468358-260-66578007744123/.source.yaml follow=False _original_basename=firewall.yaml.j2 checksum=d942d984493b214bda2913f753ff68cdcedff00e backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:17 compute-0 python3.9[148662]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/node_exporter.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:34:18 compute-0 python3.9[148783]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry/node_exporter.json mode=420 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146456.9457452-260-127700455983388/.source.json follow=False _original_basename=node_exporter.json.j2 checksum=6e4982940d2bfae88404914dfaf72552f6356d81 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:18 compute-0 python3.9[148933]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/node_exporter.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:34:19 compute-0 python3.9[149054]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry/node_exporter.yaml mode=420 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146458.352931-260-194775191121987/.source.yaml follow=False _original_basename=node_exporter.yaml.j2 checksum=81d906d3e1e8c4f8367276f5d3a67b80ca7e989e backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:20 compute-0 python3.9[149204]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/openstack_network_exporter.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:34:21 compute-0 python3.9[149325]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry/openstack_network_exporter.json mode=420 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146459.7777867-260-270637353051718/.source.json follow=False _original_basename=openstack_network_exporter.json.j2 checksum=d474f1e4c3dbd24762592c51cbe5311f0a037273 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:21 compute-0 python3.9[149475]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:34:22 compute-0 python3.9[149596]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml mode=420 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146461.327073-260-192079387903148/.source.yaml follow=False _original_basename=openstack_network_exporter.yaml.j2 checksum=2b6bd0891e609bf38a73282f42888052b750bed6 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:23 compute-0 python3.9[149746]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/podman_exporter.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:34:23 compute-0 python3.9[149867]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry/podman_exporter.json mode=420 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146462.7403681-260-218393615471682/.source.json follow=False _original_basename=podman_exporter.json.j2 checksum=e342121a88f67e2bae7ebc05d1e6d350470198a5 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:24 compute-0 python3.9[150017]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/podman_exporter.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:34:25 compute-0 python3.9[150140]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry/podman_exporter.yaml mode=420 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146464.1413004-260-131625187664791/.source.yaml follow=False _original_basename=podman_exporter.yaml.j2 checksum=7ccb5eca2ff1dc337c3f3ecbbff5245af7149c47 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:25 compute-0 unix_chkpwd[150165]: password check failed for user (root)
Oct 11 01:34:25 compute-0 sshd-session[150018]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.99  user=root
Oct 11 01:34:26 compute-0 python3.9[150291]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/node_exporter.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:34:26 compute-0 python3.9[150367]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/openstack/config/telemetry/node_exporter.yaml _original_basename=node_exporter.yaml.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry/node_exporter.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:27 compute-0 sshd-session[150018]: Failed password for root from 193.46.255.99 port 12648 ssh2
Oct 11 01:34:27 compute-0 python3.9[150517]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/podman_exporter.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:34:28 compute-0 python3.9[150593]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/openstack/config/telemetry/podman_exporter.yaml _original_basename=podman_exporter.yaml.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry/podman_exporter.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:28 compute-0 unix_chkpwd[150717]: password check failed for user (root)
Oct 11 01:34:29 compute-0 python3.9[150744]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:34:29 compute-0 python3.9[150820]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml _original_basename=ceilometer_prom_exporter.yaml.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:30 compute-0 sudo[150970]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xwtrocbbzjguhkkwgrbveakrjpvjyavn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146469.988057-449-145450535922055/AnsiballZ_file.py'
Oct 11 01:34:30 compute-0 sudo[150970]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:34:30 compute-0 python3.9[150972]: ansible-ansible.builtin.file Invoked with group=ceilometer mode=0644 owner=ceilometer path=/var/lib/openstack/certs/telemetry/default/tls.crt recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False state=None _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:30 compute-0 sudo[150970]: pam_unix(sudo:session): session closed for user root
Oct 11 01:34:31 compute-0 sudo[151122]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rjwexpfgdfxnivypoajckgshkyztwijq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146470.9117608-457-241741338457751/AnsiballZ_file.py'
Oct 11 01:34:31 compute-0 sudo[151122]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:34:31 compute-0 sshd-session[150018]: Failed password for root from 193.46.255.99 port 12648 ssh2
Oct 11 01:34:31 compute-0 python3.9[151124]: ansible-ansible.builtin.file Invoked with group=ceilometer mode=0644 owner=ceilometer path=/var/lib/openstack/certs/telemetry/default/tls.key recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False state=None _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:31 compute-0 sudo[151122]: pam_unix(sudo:session): session closed for user root
Oct 11 01:34:32 compute-0 unix_chkpwd[151222]: password check failed for user (root)
Oct 11 01:34:32 compute-0 sudo[151288]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vewrcotjrvxitwdhnzfellgmujrhtsxr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146471.9163713-465-189014941430125/AnsiballZ_file.py'
Oct 11 01:34:32 compute-0 sudo[151288]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:34:32 compute-0 podman[151249]: 2025-10-11 01:34:32.468930105 +0000 UTC m=+0.152211994 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:34:32 compute-0 python3.9[151296]: ansible-ansible.builtin.file Invoked with group=zuul mode=0755 owner=zuul path=/var/lib/openstack/healthchecks setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:34:32 compute-0 sudo[151288]: pam_unix(sudo:session): session closed for user root
Oct 11 01:34:33 compute-0 sudo[151453]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bofzyfslemkpdxmozltisoisifknyzec ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146472.9327445-473-32748892364022/AnsiballZ_systemd_service.py'
Oct 11 01:34:33 compute-0 sudo[151453]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:34:33 compute-0 python3.9[151455]: ansible-ansible.builtin.systemd_service Invoked with enabled=True name=podman.socket state=started daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:34:33 compute-0 systemd[1]: Reloading.
Oct 11 01:34:33 compute-0 systemd-rc-local-generator[151479]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:34:33 compute-0 systemd-sysv-generator[151484]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:34:33 compute-0 systemd[1]: Listening on Podman API Socket.
Oct 11 01:34:34 compute-0 sudo[151453]: pam_unix(sudo:session): session closed for user root
Oct 11 01:34:34 compute-0 sshd-session[150018]: Failed password for root from 193.46.255.99 port 12648 ssh2
Oct 11 01:34:34 compute-0 sudo[151644]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-djrmkvqqulphwbpfrsetjzalckdshccd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146474.3584676-482-226780723418665/AnsiballZ_stat.py'
Oct 11 01:34:34 compute-0 sudo[151644]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:34:34 compute-0 python3.9[151646]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/ceilometer_agent_compute/healthcheck follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:34:34 compute-0 sudo[151644]: pam_unix(sudo:session): session closed for user root
Oct 11 01:34:35 compute-0 sshd-session[150018]: Received disconnect from 193.46.255.99 port 12648:11:  [preauth]
Oct 11 01:34:35 compute-0 sshd-session[150018]: Disconnected from authenticating user root 193.46.255.99 port 12648 [preauth]
Oct 11 01:34:35 compute-0 sshd-session[150018]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.99  user=root
Oct 11 01:34:35 compute-0 sudo[151769]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-unhoyjmkzpjjlvvueioacyschzjxrvsq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146474.3584676-482-226780723418665/AnsiballZ_copy.py'
Oct 11 01:34:35 compute-0 sudo[151769]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:34:35 compute-0 python3.9[151771]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/healthchecks/ceilometer_agent_compute/ group=zuul mode=0700 owner=zuul setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760146474.3584676-482-226780723418665/.source _original_basename=healthcheck follow=False checksum=ebb343c21fce35a02591a9351660cb7035a47d42 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:34:35 compute-0 sudo[151769]: pam_unix(sudo:session): session closed for user root
Oct 11 01:34:35 compute-0 sudo[151845]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fdrstwwodiqhbtvtaddokhwbuuxmtrnr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146474.3584676-482-226780723418665/AnsiballZ_stat.py'
Oct 11 01:34:35 compute-0 sudo[151845]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:34:36 compute-0 unix_chkpwd[151848]: password check failed for user (root)
Oct 11 01:34:36 compute-0 sshd-session[151754]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.99  user=root
Oct 11 01:34:36 compute-0 python3.9[151847]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/ceilometer_agent_compute/healthcheck.future follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:34:36 compute-0 sudo[151845]: pam_unix(sudo:session): session closed for user root
Oct 11 01:34:36 compute-0 sudo[151969]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oipthfgvovgtlnceoruwrpzjzxlcyedy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146474.3584676-482-226780723418665/AnsiballZ_copy.py'
Oct 11 01:34:36 compute-0 sudo[151969]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:34:36 compute-0 python3.9[151971]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/healthchecks/ceilometer_agent_compute/ group=zuul mode=0700 owner=zuul setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760146474.3584676-482-226780723418665/.source.future _original_basename=healthcheck.future follow=False checksum=d500a98192f4ddd70b4dfdc059e2d81aed36a294 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:34:36 compute-0 sudo[151969]: pam_unix(sudo:session): session closed for user root
Oct 11 01:34:37 compute-0 sshd-session[151754]: Failed password for root from 193.46.255.99 port 40078 ssh2
Oct 11 01:34:37 compute-0 sudo[152121]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ejtsuhpzwwpomnozcmatlvmjozutzsjg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146477.3165133-510-207303781818313/AnsiballZ_container_config_data.py'
Oct 11 01:34:37 compute-0 sudo[152121]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:34:38 compute-0 python3.9[152123]: ansible-container_config_data Invoked with config_overrides={} config_path=/var/lib/openstack/config/telemetry config_pattern=ceilometer_agent_compute.json debug=False
Oct 11 01:34:38 compute-0 sudo[152121]: pam_unix(sudo:session): session closed for user root
Oct 11 01:34:38 compute-0 sudo[152273]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yvvfwkjshcuonzvbaplhwnzwsylxtmdq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146478.3632464-519-275036492558518/AnsiballZ_container_config_hash.py'
Oct 11 01:34:38 compute-0 sudo[152273]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:34:39 compute-0 python3.9[152275]: ansible-container_config_hash Invoked with check_mode=False config_vol_prefix=/var/lib/config-data
Oct 11 01:34:39 compute-0 sudo[152273]: pam_unix(sudo:session): session closed for user root
Oct 11 01:34:39 compute-0 unix_chkpwd[152300]: password check failed for user (root)
Oct 11 01:34:40 compute-0 sudo[152426]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-djgghzmpkvrmbwxsroimllurzwjwayhb ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760146479.5649345-529-14381751320218/AnsiballZ_edpm_container_manage.py'
Oct 11 01:34:40 compute-0 sudo[152426]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:34:40 compute-0 python3[152428]: ansible-edpm_container_manage Invoked with concurrency=1 config_dir=/var/lib/openstack/config/telemetry config_id=edpm config_overrides={} config_patterns=ceilometer_agent_compute.json log_base_path=/var/log/containers/stdouts debug=False
Oct 11 01:34:41 compute-0 sshd-session[151754]: Failed password for root from 193.46.255.99 port 40078 ssh2
Oct 11 01:34:42 compute-0 unix_chkpwd[152479]: password check failed for user (root)
Oct 11 01:34:44 compute-0 sshd-session[151754]: Failed password for root from 193.46.255.99 port 40078 ssh2
Oct 11 01:34:45 compute-0 sshd-session[151754]: Received disconnect from 193.46.255.99 port 40078:11:  [preauth]
Oct 11 01:34:45 compute-0 sshd-session[151754]: Disconnected from authenticating user root 193.46.255.99 port 40078 [preauth]
Oct 11 01:34:45 compute-0 sshd-session[151754]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.99  user=root
Oct 11 01:34:46 compute-0 unix_chkpwd[152482]: password check failed for user (root)
Oct 11 01:34:46 compute-0 sshd-session[152480]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.99  user=root
Oct 11 01:34:49 compute-0 sshd-session[152480]: Failed password for root from 193.46.255.99 port 62556 ssh2
Oct 11 01:34:49 compute-0 unix_chkpwd[152499]: password check failed for user (root)
Oct 11 01:34:52 compute-0 sshd-session[152480]: Failed password for root from 193.46.255.99 port 62556 ssh2
Oct 11 01:34:53 compute-0 unix_chkpwd[152532]: password check failed for user (root)
Oct 11 01:34:54 compute-0 systemd[1]: virtnodedevd.service: Deactivated successfully.
Oct 11 01:34:55 compute-0 systemd[1]: virtproxyd.service: Deactivated successfully.
Oct 11 01:34:55 compute-0 sshd-session[152480]: Failed password for root from 193.46.255.99 port 62556 ssh2
Oct 11 01:34:55 compute-0 podman[152441]: 2025-10-11 01:34:55.978926514 +0000 UTC m=+15.400096654 image pull 38f935dbe54986887092542d996d7929ffbcdc27e83d1ca11ffb47197c7c2f87 quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested
Oct 11 01:34:56 compute-0 podman[152591]: 2025-10-11 01:34:56.184783159 +0000 UTC m=+0.059327281 container create c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.4, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 10 Base Image)
Oct 11 01:34:56 compute-0 podman[152591]: 2025-10-11 01:34:56.15576114 +0000 UTC m=+0.030305262 image pull 38f935dbe54986887092542d996d7929ffbcdc27e83d1ca11ffb47197c7c2f87 quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested
Oct 11 01:34:56 compute-0 python3[152428]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman create --name ceilometer_agent_compute --conmon-pidfile /run/ceilometer_agent_compute.pid --env KOLLA_CONFIG_STRATEGY=COPY_ALWAYS --env OS_ENDPOINT_TYPE=internal --healthcheck-command /openstack/healthcheck compute --label config_id=edpm --label container_name=ceilometer_agent_compute --label managed_by=edpm_ansible --label config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']} --log-driver journald --log-level info --network host --security-opt label:type:ceilometer_polling_t --user ceilometer --volume /var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z --volume /var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z --volume /run/libvirt:/run/libvirt:shared,ro --volume /etc/hosts:/etc/hosts:ro --volume /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro --volume /etc/localtime:/etc/localtime:ro --volume /etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro --volume /var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z --volume /var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z --volume /var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z --volume /dev/log:/dev/log --volume /var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested kolla_start
Oct 11 01:34:56 compute-0 sshd-session[152480]: Received disconnect from 193.46.255.99 port 62556:11:  [preauth]
Oct 11 01:34:56 compute-0 sshd-session[152480]: Disconnected from authenticating user root 193.46.255.99 port 62556 [preauth]
Oct 11 01:34:56 compute-0 sshd-session[152480]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.99  user=root
Oct 11 01:34:56 compute-0 sudo[152426]: pam_unix(sudo:session): session closed for user root
Oct 11 01:34:56 compute-0 systemd[1]: virtqemud.service: Deactivated successfully.
Oct 11 01:34:57 compute-0 sudo[152778]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-aowjotloterzpfylqqanpggmglwtkmxu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146496.663413-537-280128274883269/AnsiballZ_stat.py'
Oct 11 01:34:57 compute-0 sudo[152778]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:34:57 compute-0 python3.9[152780]: ansible-ansible.builtin.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:34:57 compute-0 sudo[152778]: pam_unix(sudo:session): session closed for user root
Oct 11 01:34:57 compute-0 sudo[152932]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bavqtwdljypcxajsurjwyusqljeywfvm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146497.6008987-546-169473599948456/AnsiballZ_file.py'
Oct 11 01:34:57 compute-0 sudo[152932]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:34:58 compute-0 python3.9[152934]: ansible-file Invoked with path=/etc/systemd/system/edpm_ceilometer_agent_compute.requires state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:58 compute-0 sudo[152932]: pam_unix(sudo:session): session closed for user root
Oct 11 01:34:58 compute-0 systemd[1]: virtsecretd.service: Deactivated successfully.
Oct 11 01:34:59 compute-0 sudo[153084]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-paizieijwtbxyprgsggjohgylwmlgxqe ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146498.2426589-546-145839932110682/AnsiballZ_copy.py'
Oct 11 01:34:59 compute-0 sudo[153084]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:34:59 compute-0 python3.9[153086]: ansible-copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760146498.2426589-546-145839932110682/source dest=/etc/systemd/system/edpm_ceilometer_agent_compute.service mode=0644 owner=root group=root backup=False force=True remote_src=False follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:34:59 compute-0 sudo[153084]: pam_unix(sudo:session): session closed for user root
Oct 11 01:34:59 compute-0 sudo[153160]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dlnhmkjexlxyqiymyfvxogqyaycljems ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146498.2426589-546-145839932110682/AnsiballZ_systemd.py'
Oct 11 01:34:59 compute-0 sudo[153160]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:00 compute-0 python3.9[153162]: ansible-systemd Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 01:35:00 compute-0 systemd[1]: Reloading.
Oct 11 01:35:00 compute-0 systemd-rc-local-generator[153187]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:35:00 compute-0 systemd-sysv-generator[153190]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:35:00 compute-0 sudo[153160]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:01 compute-0 sudo[153271]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qnpainrkzqzqpmbjzpficstnukfuyhdk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146498.2426589-546-145839932110682/AnsiballZ_systemd.py'
Oct 11 01:35:01 compute-0 sudo[153271]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:01 compute-0 python3.9[153273]: ansible-systemd Invoked with state=restarted name=edpm_ceilometer_agent_compute.service enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:35:01 compute-0 systemd[1]: Reloading.
Oct 11 01:35:01 compute-0 systemd-sysv-generator[153305]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:35:01 compute-0 systemd-rc-local-generator[153302]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:35:01 compute-0 systemd[1]: Starting ceilometer_agent_compute container...
Oct 11 01:35:01 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:35:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b3d18363ce40705c64403cf6057216716d6bebcedd4dc52be32b80ac0420f1aa/merged/etc/ceilometer/ceilometer_prom_exporter.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b3d18363ce40705c64403cf6057216716d6bebcedd4dc52be32b80ac0420f1aa/merged/etc/ceilometer/tls supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b3d18363ce40705c64403cf6057216716d6bebcedd4dc52be32b80ac0420f1aa/merged/var/lib/openstack/config supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b3d18363ce40705c64403cf6057216716d6bebcedd4dc52be32b80ac0420f1aa/merged/var/lib/kolla/config_files/config.json supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:01 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.
Oct 11 01:35:02 compute-0 podman[153312]: 2025-10-11 01:35:02.004917959 +0000 UTC m=+0.206921977 container init c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true)
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: + sudo -E kolla_set_configs
Oct 11 01:35:02 compute-0 podman[153312]: 2025-10-11 01:35:02.035073445 +0000 UTC m=+0.237077413 container start c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, io.buildah.version=1.41.4, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, config_id=edpm, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.build-date=20251007, org.label-schema.vendor=CentOS)
Oct 11 01:35:02 compute-0 podman[153312]: ceilometer_agent_compute
Oct 11 01:35:02 compute-0 systemd[1]: Started ceilometer_agent_compute container.
Oct 11 01:35:02 compute-0 sudo[153331]: ceilometer : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_set_configs
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: sudo: unable to send audit message: Operation not permitted
Oct 11 01:35:02 compute-0 sudo[153331]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=42405)
Oct 11 01:35:02 compute-0 sudo[153271]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: INFO:__main__:Loading config file at /var/lib/kolla/config_files/config.json
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: INFO:__main__:Validating config file
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: INFO:__main__:Kolla config strategy set to: COPY_ALWAYS
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: INFO:__main__:Copying service configuration files
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: INFO:__main__:Deleting /etc/ceilometer/ceilometer.conf
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: INFO:__main__:Copying /var/lib/openstack/config/ceilometer.conf to /etc/ceilometer/ceilometer.conf
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: INFO:__main__:Setting permission for /etc/ceilometer/ceilometer.conf
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: INFO:__main__:Deleting /etc/ceilometer/polling.yaml
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: INFO:__main__:Copying /var/lib/openstack/config/polling.yaml to /etc/ceilometer/polling.yaml
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: INFO:__main__:Setting permission for /etc/ceilometer/polling.yaml
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: INFO:__main__:Copying /var/lib/openstack/config/custom.conf to /etc/ceilometer/ceilometer.conf.d/01-ceilometer-custom.conf
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: INFO:__main__:Setting permission for /etc/ceilometer/ceilometer.conf.d/01-ceilometer-custom.conf
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: INFO:__main__:Copying /var/lib/openstack/config/ceilometer-host-specific.conf to /etc/ceilometer/ceilometer.conf.d/02-ceilometer-host-specific.conf
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: INFO:__main__:Setting permission for /etc/ceilometer/ceilometer.conf.d/02-ceilometer-host-specific.conf
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: INFO:__main__:Writing out command to execute
Oct 11 01:35:02 compute-0 sudo[153331]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: ++ cat /run_command
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: + CMD='/usr/bin/ceilometer-polling --polling-namespaces compute --logfile /dev/stdout'
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: + ARGS=
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: + sudo kolla_copy_cacerts
Oct 11 01:35:02 compute-0 podman[153332]: 2025-10-11 01:35:02.162560874 +0000 UTC m=+0.107155353 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=starting, health_failing_streak=1, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute)
Oct 11 01:35:02 compute-0 sudo[153360]: ceilometer : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_copy_cacerts
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: sudo: unable to send audit message: Operation not permitted
Oct 11 01:35:02 compute-0 sudo[153360]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=42405)
Oct 11 01:35:02 compute-0 systemd[1]: c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6-2f465ccaa4da2a95.service: Main process exited, code=exited, status=1/FAILURE
Oct 11 01:35:02 compute-0 systemd[1]: c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6-2f465ccaa4da2a95.service: Failed with result 'exit-code'.
Oct 11 01:35:02 compute-0 sudo[153360]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: + [[ ! -n '' ]]
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: + . kolla_extend_start
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: Running command: '/usr/bin/ceilometer-polling --polling-namespaces compute --logfile /dev/stdout'
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: + echo 'Running command: '\''/usr/bin/ceilometer-polling --polling-namespaces compute --logfile /dev/stdout'\'''
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: + umask 0022
Oct 11 01:35:02 compute-0 ceilometer_agent_compute[153325]: + exec /usr/bin/ceilometer-polling --polling-namespaces compute --logfile /dev/stdout
Oct 11 01:35:02 compute-0 sudo[153522]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tlewqicsnrebqeskdurfrqohmdktmtcv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146502.398102-570-226429729247865/AnsiballZ_systemd.py'
Oct 11 01:35:02 compute-0 sudo[153522]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:02 compute-0 podman[153479]: 2025-10-11 01:35:02.883633983 +0000 UTC m=+0.132801682 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller)
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.074 2 DEBUG cotyledon.oslo_config_glue [-] Full set of CONF: _load_service_manager_options /usr/lib/python3.12/site-packages/cotyledon/oslo_config_glue.py:45
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.074 2 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2804
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.074 2 DEBUG cotyledon.oslo_config_glue [-] Configuration options gathered from: log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2805
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.074 2 DEBUG cotyledon.oslo_config_glue [-] command line args: ['--polling-namespaces', 'compute', '--logfile', '/dev/stdout'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2806
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.075 2 DEBUG cotyledon.oslo_config_glue [-] config files: ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2807
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.076 2 DEBUG cotyledon.oslo_config_glue [-] ================================================================================ log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2809
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.076 2 DEBUG cotyledon.oslo_config_glue [-] batch_size                     = 50 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.076 2 DEBUG cotyledon.oslo_config_glue [-] cfg_file                       = polling.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.076 2 DEBUG cotyledon.oslo_config_glue [-] config_dir                     = ['/etc/ceilometer/ceilometer.conf.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.076 2 DEBUG cotyledon.oslo_config_glue [-] config_file                    = ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.076 2 DEBUG cotyledon.oslo_config_glue [-] config_source                  = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.077 2 DEBUG cotyledon.oslo_config_glue [-] debug                          = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.077 2 DEBUG cotyledon.oslo_config_glue [-] default_log_levels             = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'oslo_messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'requests.packages.urllib3.util.retry=WARN', 'urllib3.util.retry=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'taskflow=WARN', 'keystoneauth=WARN', 'oslo.cache=INFO', 'oslo_policy=INFO', 'dogpile.core.dogpile=INFO', 'futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.077 2 DEBUG cotyledon.oslo_config_glue [-] enable_notifications           = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.077 2 DEBUG cotyledon.oslo_config_glue [-] enable_prometheus_exporter     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.077 2 DEBUG cotyledon.oslo_config_glue [-] event_pipeline_cfg_file        = event_pipeline.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.078 2 DEBUG cotyledon.oslo_config_glue [-] graceful_shutdown_timeout      = 60 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.078 2 DEBUG cotyledon.oslo_config_glue [-] heartbeat_socket_dir           = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.078 2 DEBUG cotyledon.oslo_config_glue [-] host                           = compute-0.ctlplane.example.com log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.078 2 DEBUG cotyledon.oslo_config_glue [-] http_timeout                   = 600 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.078 2 DEBUG cotyledon.oslo_config_glue [-] hypervisor_inspector           = libvirt log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.079 2 WARNING oslo_config.cfg [-] Deprecated: Option "tenant_name_discovery" from group "DEFAULT" is deprecated. Use option "identity_name_discovery" from group "DEFAULT".
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.079 2 DEBUG cotyledon.oslo_config_glue [-] identity_name_discovery        = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.079 2 DEBUG cotyledon.oslo_config_glue [-] ignore_disabled_projects       = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.079 2 DEBUG cotyledon.oslo_config_glue [-] instance_format                = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.079 2 DEBUG cotyledon.oslo_config_glue [-] instance_uuid_format           = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.080 2 DEBUG cotyledon.oslo_config_glue [-] libvirt_type                   = kvm log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.080 2 DEBUG cotyledon.oslo_config_glue [-] libvirt_uri                    =  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.080 2 DEBUG cotyledon.oslo_config_glue [-] log_color                      = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.080 2 DEBUG cotyledon.oslo_config_glue [-] log_config_append              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.080 2 DEBUG cotyledon.oslo_config_glue [-] log_date_format                = %Y-%m-%d %H:%M:%S log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.080 2 DEBUG cotyledon.oslo_config_glue [-] log_dir                        = /var/log/ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.081 2 DEBUG cotyledon.oslo_config_glue [-] log_file                       = /dev/stdout log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.081 2 DEBUG cotyledon.oslo_config_glue [-] log_options                    = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.081 2 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval            = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.081 2 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval_type       = days log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.081 2 DEBUG cotyledon.oslo_config_glue [-] log_rotation_type              = none log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.081 2 DEBUG cotyledon.oslo_config_glue [-] logging_context_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.081 2 DEBUG cotyledon.oslo_config_glue [-] logging_debug_format_suffix    = %(funcName)s %(pathname)s:%(lineno)d log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.082 2 DEBUG cotyledon.oslo_config_glue [-] logging_default_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.082 2 DEBUG cotyledon.oslo_config_glue [-] logging_exception_prefix       = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.082 2 DEBUG cotyledon.oslo_config_glue [-] logging_user_identity_format   = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.082 2 DEBUG cotyledon.oslo_config_glue [-] max_logfile_count              = 30 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.082 2 DEBUG cotyledon.oslo_config_glue [-] max_logfile_size_mb            = 200 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.082 2 DEBUG cotyledon.oslo_config_glue [-] max_parallel_requests          = 64 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.083 2 DEBUG cotyledon.oslo_config_glue [-] partitioning_group_prefix      = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.083 2 DEBUG cotyledon.oslo_config_glue [-] pipeline_cfg_file              = pipeline.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.083 2 DEBUG cotyledon.oslo_config_glue [-] polling_namespaces             = ['compute'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.083 2 DEBUG cotyledon.oslo_config_glue [-] pollsters_definitions_dirs     = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.083 2 DEBUG cotyledon.oslo_config_glue [-] prometheus_listen_addresses    = ['127.0.0.1:9101'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.083 2 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_certfile        = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.084 2 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_enable          = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.084 2 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_keyfile         = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.084 2 DEBUG cotyledon.oslo_config_glue [-] publish_errors                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.084 2 DEBUG cotyledon.oslo_config_glue [-] rate_limit_burst               = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.084 2 DEBUG cotyledon.oslo_config_glue [-] rate_limit_except_level        = CRITICAL log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.084 2 DEBUG cotyledon.oslo_config_glue [-] rate_limit_interval            = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.085 2 DEBUG cotyledon.oslo_config_glue [-] reseller_prefix                = AUTH_ log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.085 2 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_keys         = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.085 2 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_length       = 256 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.085 2 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_namespace    = ['metering.'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.085 2 DEBUG cotyledon.oslo_config_glue [-] rootwrap_config                = /etc/ceilometer/rootwrap.conf log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.085 2 DEBUG cotyledon.oslo_config_glue [-] sample_source                  = openstack log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.086 2 DEBUG cotyledon.oslo_config_glue [-] shell_completion               = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.086 2 DEBUG cotyledon.oslo_config_glue [-] syslog_log_facility            = LOG_USER log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.086 2 DEBUG cotyledon.oslo_config_glue [-] threads_to_process_pollsters   = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.086 2 DEBUG cotyledon.oslo_config_glue [-] use_journal                    = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.086 2 DEBUG cotyledon.oslo_config_glue [-] use_json                       = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.086 2 DEBUG cotyledon.oslo_config_glue [-] use_stderr                     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.087 2 DEBUG cotyledon.oslo_config_glue [-] use_syslog                     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.087 2 DEBUG cotyledon.oslo_config_glue [-] watch_log_file                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.087 2 DEBUG cotyledon.oslo_config_glue [-] compute.fetch_extra_metadata   = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.087 2 DEBUG cotyledon.oslo_config_glue [-] compute.instance_discovery_method = libvirt_metadata log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.088 2 DEBUG cotyledon.oslo_config_glue [-] compute.resource_cache_expiry  = 3600 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.088 2 DEBUG cotyledon.oslo_config_glue [-] compute.resource_update_interval = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.088 2 DEBUG cotyledon.oslo_config_glue [-] coordination.backend_url       = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.088 2 DEBUG cotyledon.oslo_config_glue [-] event.definitions_cfg_file     = event_definitions.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.088 2 DEBUG cotyledon.oslo_config_glue [-] event.drop_unmatched_notifications = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.088 2 DEBUG cotyledon.oslo_config_glue [-] event.store_raw                = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.089 2 DEBUG cotyledon.oslo_config_glue [-] ipmi.polling_retry             = 3 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.089 2 DEBUG cotyledon.oslo_config_glue [-] meter.meter_definitions_dirs   = ['/etc/ceilometer/meters.d', '/usr/lib/python3.12/site-packages/ceilometer/data/meters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.089 2 DEBUG cotyledon.oslo_config_glue [-] notification.ack_on_event_error = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.089 2 DEBUG cotyledon.oslo_config_glue [-] notification.batch_size        = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.089 2 DEBUG cotyledon.oslo_config_glue [-] notification.batch_timeout     = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.090 2 DEBUG cotyledon.oslo_config_glue [-] notification.messaging_urls    = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.090 2 DEBUG cotyledon.oslo_config_glue [-] notification.notification_control_exchanges = ['nova', 'glance', 'neutron', 'cinder', 'heat', 'keystone', 'trove', 'zaqar', 'swift', 'ceilometer', 'magnum', 'dns', 'ironic', 'aodh'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.090 2 DEBUG cotyledon.oslo_config_glue [-] notification.pipelines         = ['meter', 'event'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.090 2 DEBUG cotyledon.oslo_config_glue [-] notification.workers           = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.090 2 DEBUG cotyledon.oslo_config_glue [-] polling.batch_size             = 50 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.090 2 DEBUG cotyledon.oslo_config_glue [-] polling.cfg_file               = polling.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.091 2 DEBUG cotyledon.oslo_config_glue [-] polling.enable_notifications   = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.091 2 DEBUG cotyledon.oslo_config_glue [-] polling.enable_prometheus_exporter = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.091 2 DEBUG cotyledon.oslo_config_glue [-] polling.heartbeat_socket_dir   = /var/lib/ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.091 2 DEBUG cotyledon.oslo_config_glue [-] polling.identity_name_discovery = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.091 2 DEBUG cotyledon.oslo_config_glue [-] polling.ignore_disabled_projects = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.092 2 DEBUG cotyledon.oslo_config_glue [-] polling.partitioning_group_prefix = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.092 2 DEBUG cotyledon.oslo_config_glue [-] polling.pollsters_definitions_dirs = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.092 2 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_listen_addresses = ['[::]:9101'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.092 2 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_certfile = /etc/ceilometer/tls/tls.crt log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.092 2 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_enable  = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.092 2 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_keyfile = /etc/ceilometer/tls/tls.key log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.092 2 DEBUG cotyledon.oslo_config_glue [-] polling.threads_to_process_pollsters = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.092 2 DEBUG cotyledon.oslo_config_glue [-] publisher.telemetry_secret     = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.092 2 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.event_topic = event log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.092 2 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.metering_topic = metering log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.092 2 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.telemetry_driver = messagingv2 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.092 2 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.access_key = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.092 2 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.secret_key = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.093 2 DEBUG cotyledon.oslo_config_glue [-] rgw_client.implicit_tenants    = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.093 2 DEBUG cotyledon.oslo_config_glue [-] service_types.aodh             = alarming log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.093 2 DEBUG cotyledon.oslo_config_glue [-] service_types.cinder           = volumev3 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.093 2 DEBUG cotyledon.oslo_config_glue [-] service_types.glance           = image log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.093 2 DEBUG cotyledon.oslo_config_glue [-] service_types.neutron          = network log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.093 2 DEBUG cotyledon.oslo_config_glue [-] service_types.nova             = compute log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.093 2 DEBUG cotyledon.oslo_config_glue [-] service_types.radosgw          = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.093 2 DEBUG cotyledon.oslo_config_glue [-] service_types.swift            = object-store log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.093 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_section = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.093 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_type  = password log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.093 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.cafile     = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.093 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.certfile   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.093 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.collect_timing = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.093 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.insecure   = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.094 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.interface  = internalURL log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.094 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.keyfile    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.094 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.region_name = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.094 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.split_loggers = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.094 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.timeout    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.094 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_section           = service_credentials log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.094 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_type              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.094 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.cafile                 = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.094 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.certfile               = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.094 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.collect_timing         = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.094 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.insecure               = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.094 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.interface              = internal log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.094 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.keyfile                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.095 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.region_name            = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.095 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.split_loggers          = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.095 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.timeout                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.095 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_section             = service_credentials log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.095 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_type                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.095 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.cafile                   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.095 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.certfile                 = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.095 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.collect_timing           = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.095 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.insecure                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.095 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.interface                = internal log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.095 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.keyfile                  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.096 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.region_name              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.096 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.split_loggers            = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.096 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.timeout                  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.096 2 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.file_event_handler = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.096 2 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.file_event_handler_interval = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.096 2 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.log_dir           = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.096 2 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2828
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.119 12 INFO ceilometer.polling.manager [-] Starting heartbeat child service. Listening on /var/lib/ceilometer/ceilometer-compute.socket
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.120 12 DEBUG cotyledon.oslo_config_glue [-] Full set of CONF: _load_service_options /usr/lib/python3.12/site-packages/cotyledon/oslo_config_glue.py:53
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.120 12 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2804
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.120 12 DEBUG cotyledon.oslo_config_glue [-] Configuration options gathered from: log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2805
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.120 12 DEBUG cotyledon.oslo_config_glue [-] command line args: ['--polling-namespaces', 'compute', '--logfile', '/dev/stdout'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2806
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.121 12 DEBUG cotyledon.oslo_config_glue [-] config files: ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2807
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.121 12 DEBUG cotyledon.oslo_config_glue [-] ================================================================================ log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2809
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.121 12 DEBUG cotyledon.oslo_config_glue [-] batch_size                     = 50 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.121 12 DEBUG cotyledon.oslo_config_glue [-] cfg_file                       = polling.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.121 12 DEBUG cotyledon.oslo_config_glue [-] config_dir                     = ['/etc/ceilometer/ceilometer.conf.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.121 12 DEBUG cotyledon.oslo_config_glue [-] config_file                    = ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.121 12 DEBUG cotyledon.oslo_config_glue [-] config_source                  = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.121 12 DEBUG cotyledon.oslo_config_glue [-] debug                          = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.121 12 DEBUG cotyledon.oslo_config_glue [-] default_log_levels             = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'oslo_messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'requests.packages.urllib3.util.retry=WARN', 'urllib3.util.retry=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'taskflow=WARN', 'keystoneauth=WARN', 'oslo.cache=INFO', 'oslo_policy=INFO', 'dogpile.core.dogpile=INFO', 'futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.122 12 DEBUG cotyledon.oslo_config_glue [-] enable_notifications           = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.122 12 DEBUG cotyledon.oslo_config_glue [-] enable_prometheus_exporter     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.122 12 DEBUG cotyledon.oslo_config_glue [-] event_pipeline_cfg_file        = event_pipeline.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.122 12 DEBUG cotyledon.oslo_config_glue [-] graceful_shutdown_timeout      = 60 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.122 12 DEBUG cotyledon.oslo_config_glue [-] heartbeat_socket_dir           = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.122 12 DEBUG cotyledon.oslo_config_glue [-] host                           = compute-0.ctlplane.example.com log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.122 12 DEBUG cotyledon.oslo_config_glue [-] http_timeout                   = 600 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.122 12 DEBUG cotyledon.oslo_config_glue [-] hypervisor_inspector           = libvirt log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.122 12 DEBUG cotyledon.oslo_config_glue [-] identity_name_discovery        = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.122 12 DEBUG cotyledon.oslo_config_glue [-] ignore_disabled_projects       = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.122 12 DEBUG cotyledon.oslo_config_glue [-] instance_format                = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.123 12 DEBUG cotyledon.oslo_config_glue [-] instance_uuid_format           = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.123 12 DEBUG cotyledon.oslo_config_glue [-] libvirt_type                   = kvm log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.123 12 DEBUG cotyledon.oslo_config_glue [-] libvirt_uri                    =  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.123 12 DEBUG cotyledon.oslo_config_glue [-] log_color                      = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.123 12 DEBUG cotyledon.oslo_config_glue [-] log_config_append              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.123 12 DEBUG cotyledon.oslo_config_glue [-] log_date_format                = %Y-%m-%d %H:%M:%S log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.123 12 DEBUG cotyledon.oslo_config_glue [-] log_dir                        = /var/log/ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.123 12 DEBUG cotyledon.oslo_config_glue [-] log_file                       = /dev/stdout log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.123 12 DEBUG cotyledon.oslo_config_glue [-] log_options                    = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.123 12 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval            = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.123 12 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval_type       = days log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.124 12 DEBUG cotyledon.oslo_config_glue [-] log_rotation_type              = none log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.124 12 DEBUG cotyledon.oslo_config_glue [-] logging_context_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.124 12 DEBUG cotyledon.oslo_config_glue [-] logging_debug_format_suffix    = %(funcName)s %(pathname)s:%(lineno)d log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.124 12 DEBUG cotyledon.oslo_config_glue [-] logging_default_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.124 12 DEBUG cotyledon.oslo_config_glue [-] logging_exception_prefix       = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.124 12 DEBUG cotyledon.oslo_config_glue [-] logging_user_identity_format   = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.124 12 DEBUG cotyledon.oslo_config_glue [-] max_logfile_count              = 30 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.124 12 DEBUG cotyledon.oslo_config_glue [-] max_logfile_size_mb            = 200 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.124 12 DEBUG cotyledon.oslo_config_glue [-] max_parallel_requests          = 64 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.124 12 DEBUG cotyledon.oslo_config_glue [-] partitioning_group_prefix      = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.124 12 DEBUG cotyledon.oslo_config_glue [-] pipeline_cfg_file              = pipeline.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.124 12 DEBUG cotyledon.oslo_config_glue [-] polling_namespaces             = ['compute'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.125 12 DEBUG cotyledon.oslo_config_glue [-] pollsters_definitions_dirs     = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.125 12 DEBUG cotyledon.oslo_config_glue [-] prometheus_listen_addresses    = ['127.0.0.1:9101'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.125 12 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_certfile        = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.125 12 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_enable          = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.125 12 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_keyfile         = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.125 12 DEBUG cotyledon.oslo_config_glue [-] publish_errors                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.125 12 DEBUG cotyledon.oslo_config_glue [-] rate_limit_burst               = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.125 12 DEBUG cotyledon.oslo_config_glue [-] rate_limit_except_level        = CRITICAL log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.125 12 DEBUG cotyledon.oslo_config_glue [-] rate_limit_interval            = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.125 12 DEBUG cotyledon.oslo_config_glue [-] reseller_prefix                = AUTH_ log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.125 12 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_keys         = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.126 12 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_length       = 256 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.126 12 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_namespace    = ['metering.'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.126 12 DEBUG cotyledon.oslo_config_glue [-] rootwrap_config                = /etc/ceilometer/rootwrap.conf log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.126 12 DEBUG cotyledon.oslo_config_glue [-] sample_source                  = openstack log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.126 12 DEBUG cotyledon.oslo_config_glue [-] shell_completion               = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.126 12 DEBUG cotyledon.oslo_config_glue [-] syslog_log_facility            = LOG_USER log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.126 12 DEBUG cotyledon.oslo_config_glue [-] threads_to_process_pollsters   = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.126 12 DEBUG cotyledon.oslo_config_glue [-] use_journal                    = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.126 12 DEBUG cotyledon.oslo_config_glue [-] use_json                       = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.126 12 DEBUG cotyledon.oslo_config_glue [-] use_stderr                     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.126 12 DEBUG cotyledon.oslo_config_glue [-] use_syslog                     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.126 12 DEBUG cotyledon.oslo_config_glue [-] watch_log_file                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.127 12 DEBUG cotyledon.oslo_config_glue [-] compute.fetch_extra_metadata   = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.127 12 DEBUG cotyledon.oslo_config_glue [-] compute.instance_discovery_method = libvirt_metadata log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.127 12 DEBUG cotyledon.oslo_config_glue [-] compute.resource_cache_expiry  = 3600 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.127 12 DEBUG cotyledon.oslo_config_glue [-] compute.resource_update_interval = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.127 12 DEBUG cotyledon.oslo_config_glue [-] coordination.backend_url       = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.127 12 DEBUG cotyledon.oslo_config_glue [-] event.definitions_cfg_file     = event_definitions.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.127 12 DEBUG cotyledon.oslo_config_glue [-] event.drop_unmatched_notifications = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.127 12 DEBUG cotyledon.oslo_config_glue [-] event.store_raw                = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.127 12 DEBUG cotyledon.oslo_config_glue [-] ipmi.polling_retry             = 3 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.128 12 DEBUG cotyledon.oslo_config_glue [-] meter.meter_definitions_dirs   = ['/etc/ceilometer/meters.d', '/usr/lib/python3.12/site-packages/ceilometer/data/meters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.128 12 DEBUG cotyledon.oslo_config_glue [-] notification.ack_on_event_error = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.128 12 DEBUG cotyledon.oslo_config_glue [-] notification.batch_size        = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.128 12 DEBUG cotyledon.oslo_config_glue [-] notification.batch_timeout     = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.128 12 DEBUG cotyledon.oslo_config_glue [-] notification.messaging_urls    = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.128 12 DEBUG cotyledon.oslo_config_glue [-] notification.notification_control_exchanges = ['nova', 'glance', 'neutron', 'cinder', 'heat', 'keystone', 'trove', 'zaqar', 'swift', 'ceilometer', 'magnum', 'dns', 'ironic', 'aodh'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.128 12 DEBUG cotyledon.oslo_config_glue [-] notification.pipelines         = ['meter', 'event'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.128 12 DEBUG cotyledon.oslo_config_glue [-] notification.workers           = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.128 12 DEBUG cotyledon.oslo_config_glue [-] polling.batch_size             = 50 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.128 12 DEBUG cotyledon.oslo_config_glue [-] polling.cfg_file               = polling.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.128 12 DEBUG cotyledon.oslo_config_glue [-] polling.enable_notifications   = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.128 12 DEBUG cotyledon.oslo_config_glue [-] polling.enable_prometheus_exporter = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.129 12 DEBUG cotyledon.oslo_config_glue [-] polling.heartbeat_socket_dir   = /var/lib/ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.129 12 DEBUG cotyledon.oslo_config_glue [-] polling.identity_name_discovery = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.129 12 DEBUG cotyledon.oslo_config_glue [-] polling.ignore_disabled_projects = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.129 12 DEBUG cotyledon.oslo_config_glue [-] polling.partitioning_group_prefix = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.129 12 DEBUG cotyledon.oslo_config_glue [-] polling.pollsters_definitions_dirs = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.129 12 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_listen_addresses = ['[::]:9101'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.129 12 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_certfile = /etc/ceilometer/tls/tls.crt log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.129 12 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_enable  = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.129 12 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_keyfile = /etc/ceilometer/tls/tls.key log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.129 12 DEBUG cotyledon.oslo_config_glue [-] polling.threads_to_process_pollsters = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.129 12 DEBUG cotyledon.oslo_config_glue [-] publisher.telemetry_secret     = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.129 12 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.event_topic = event log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.130 12 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.metering_topic = metering log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.130 12 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.telemetry_driver = messagingv2 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.130 12 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.access_key = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.130 12 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.secret_key = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.130 12 DEBUG cotyledon.oslo_config_glue [-] rgw_client.implicit_tenants    = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.130 12 DEBUG cotyledon.oslo_config_glue [-] service_types.aodh             = alarming log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.130 12 DEBUG cotyledon.oslo_config_glue [-] service_types.cinder           = volumev3 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.130 12 DEBUG cotyledon.oslo_config_glue [-] service_types.glance           = image log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.130 12 DEBUG cotyledon.oslo_config_glue [-] service_types.neutron          = network log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.130 12 DEBUG cotyledon.oslo_config_glue [-] service_types.nova             = compute log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.130 12 DEBUG cotyledon.oslo_config_glue [-] service_types.radosgw          = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.130 12 DEBUG cotyledon.oslo_config_glue [-] service_types.swift            = object-store log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.131 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_section = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.131 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_type  = password log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.131 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.cafile     = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.131 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.certfile   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.131 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.collect_timing = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.131 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.insecure   = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.131 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.interface  = internalURL log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.131 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.keyfile    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.131 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.region_name = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.131 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.split_loggers = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.131 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.timeout    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.132 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_section           = service_credentials log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.132 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_type              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.132 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.cafile                 = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.132 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.certfile               = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.132 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.collect_timing         = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.132 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.insecure               = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.132 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.interface              = internal log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.132 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.keyfile                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.132 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.region_name            = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.132 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.split_loggers          = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.132 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.timeout                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.133 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_section             = service_credentials log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.133 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_type                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.133 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.cafile                   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.133 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.certfile                 = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.133 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.collect_timing           = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.133 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.insecure                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.133 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.interface                = internal log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.133 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.keyfile                  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.133 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.region_name              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.133 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.split_loggers            = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.133 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.timeout                  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.133 12 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.file_event_handler = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.134 12 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.file_event_handler_interval = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.134 12 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.log_dir           = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.134 12 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2828
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.134 12 DEBUG cotyledon._service [-] Run service AgentHeartBeatManager(0) [12] wait_forever /usr/lib/python3.12/site-packages/cotyledon/_service.py:263
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.136 12 DEBUG ceilometer.polling.manager [-] Started heartbeat child process. run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:519
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.139 12 DEBUG ceilometer.polling.manager [-] Started heartbeat update thread _read_queue /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:522
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.140 12 DEBUG ceilometer.polling.manager [-] Started heartbeat reporting thread _report_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:527
Oct 11 01:35:03 compute-0 python3.9[153530]: ansible-ansible.builtin.systemd Invoked with name=edpm_ceilometer_agent_compute.service state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:35:03 compute-0 systemd[1]: Stopping ceilometer_agent_compute container...
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.307 2 INFO cotyledon._service_manager [-] Caught SIGTERM signal, graceful exiting of master process
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.343 14 DEBUG ceilometer.compute.virt.libvirt.utils [-] Connecting to libvirt: qemu:///system new_libvirt_connection /usr/lib/python3.12/site-packages/ceilometer/compute/virt/libvirt/utils.py:96
Oct 11 01:35:03 compute-0 systemd[1]: Starting libvirt QEMU daemon...
Oct 11 01:35:03 compute-0 systemd[1]: Started libvirt QEMU daemon.
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.409 2 DEBUG cotyledon._service_manager [-] Killing services with signal SIGTERM _shutdown /usr/lib/python3.12/site-packages/cotyledon/_service_manager.py:319
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.409 2 DEBUG cotyledon._service_manager [-] Waiting services to terminate _shutdown /usr/lib/python3.12/site-packages/cotyledon/_service_manager.py:323
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.409 12 INFO cotyledon._service [-] Caught SIGTERM signal, graceful exiting of service AgentHeartBeatManager(0) [12]
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.445 14 INFO ceilometer.polling.manager [-] Looking for dynamic pollsters configurations at [['/etc/ceilometer/pollsters.d']].
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.446 14 INFO ceilometer.polling.manager [-] No dynamic pollsters found in folder [/etc/ceilometer/pollsters.d].
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.446 14 INFO ceilometer.polling.manager [-] No dynamic pollsters file found in dirs [['/etc/ceilometer/pollsters.d']].
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.584 14 DEBUG cotyledon.oslo_config_glue [-] Full set of CONF: _load_service_options /usr/lib/python3.12/site-packages/cotyledon/oslo_config_glue.py:53
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.585 14 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2804
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.585 14 DEBUG cotyledon.oslo_config_glue [-] Configuration options gathered from: log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2805
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.585 14 DEBUG cotyledon.oslo_config_glue [-] command line args: ['--polling-namespaces', 'compute', '--logfile', '/dev/stdout'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2806
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.585 14 DEBUG cotyledon.oslo_config_glue [-] config files: ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2807
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.585 14 DEBUG cotyledon.oslo_config_glue [-] ================================================================================ log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2809
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.585 14 DEBUG cotyledon.oslo_config_glue [-] batch_size                     = 50 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.585 14 DEBUG cotyledon.oslo_config_glue [-] cfg_file                       = polling.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.585 14 DEBUG cotyledon.oslo_config_glue [-] config_dir                     = ['/etc/ceilometer/ceilometer.conf.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.585 14 DEBUG cotyledon.oslo_config_glue [-] config_file                    = ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.585 14 DEBUG cotyledon.oslo_config_glue [-] config_source                  = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.585 14 DEBUG cotyledon.oslo_config_glue [-] debug                          = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.585 14 DEBUG cotyledon.oslo_config_glue [-] default_log_levels             = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'oslo_messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'requests.packages.urllib3.util.retry=WARN', 'urllib3.util.retry=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'taskflow=WARN', 'keystoneauth=WARN', 'oslo.cache=INFO', 'oslo_policy=INFO', 'dogpile.core.dogpile=INFO', 'futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.586 14 DEBUG cotyledon.oslo_config_glue [-] enable_notifications           = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.586 14 DEBUG cotyledon.oslo_config_glue [-] enable_prometheus_exporter     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.586 14 DEBUG cotyledon.oslo_config_glue [-] event_pipeline_cfg_file        = event_pipeline.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.586 14 DEBUG cotyledon.oslo_config_glue [-] graceful_shutdown_timeout      = 60 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.586 14 DEBUG cotyledon.oslo_config_glue [-] heartbeat_socket_dir           = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.586 14 DEBUG cotyledon.oslo_config_glue [-] host                           = compute-0.ctlplane.example.com log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.586 14 DEBUG cotyledon.oslo_config_glue [-] http_timeout                   = 600 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.586 14 DEBUG cotyledon.oslo_config_glue [-] hypervisor_inspector           = libvirt log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.586 14 DEBUG cotyledon.oslo_config_glue [-] identity_name_discovery        = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.586 14 DEBUG cotyledon.oslo_config_glue [-] ignore_disabled_projects       = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.586 14 DEBUG cotyledon.oslo_config_glue [-] instance_format                = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.587 14 DEBUG cotyledon.oslo_config_glue [-] instance_uuid_format           = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.587 14 DEBUG cotyledon.oslo_config_glue [-] libvirt_type                   = kvm log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.587 14 DEBUG cotyledon.oslo_config_glue [-] libvirt_uri                    =  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.587 14 DEBUG cotyledon.oslo_config_glue [-] log_color                      = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.587 14 DEBUG cotyledon.oslo_config_glue [-] log_config_append              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.587 14 DEBUG cotyledon.oslo_config_glue [-] log_date_format                = %Y-%m-%d %H:%M:%S log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.587 14 DEBUG cotyledon.oslo_config_glue [-] log_dir                        = /var/log/ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.587 14 DEBUG cotyledon.oslo_config_glue [-] log_file                       = /dev/stdout log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.587 14 DEBUG cotyledon.oslo_config_glue [-] log_options                    = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.587 14 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval            = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.587 14 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval_type       = days log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.587 14 DEBUG cotyledon.oslo_config_glue [-] log_rotation_type              = none log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.587 14 DEBUG cotyledon.oslo_config_glue [-] logging_context_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.587 14 DEBUG cotyledon.oslo_config_glue [-] logging_debug_format_suffix    = %(funcName)s %(pathname)s:%(lineno)d log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.588 14 DEBUG cotyledon.oslo_config_glue [-] logging_default_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.588 14 DEBUG cotyledon.oslo_config_glue [-] logging_exception_prefix       = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.588 14 DEBUG cotyledon.oslo_config_glue [-] logging_user_identity_format   = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.588 14 DEBUG cotyledon.oslo_config_glue [-] max_logfile_count              = 30 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.588 14 DEBUG cotyledon.oslo_config_glue [-] max_logfile_size_mb            = 200 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.588 14 DEBUG cotyledon.oslo_config_glue [-] max_parallel_requests          = 64 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.588 14 DEBUG cotyledon.oslo_config_glue [-] partitioning_group_prefix      = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.588 14 DEBUG cotyledon.oslo_config_glue [-] pipeline_cfg_file              = pipeline.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.588 14 DEBUG cotyledon.oslo_config_glue [-] polling_namespaces             = ['compute'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.588 14 DEBUG cotyledon.oslo_config_glue [-] pollsters_definitions_dirs     = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.588 14 DEBUG cotyledon.oslo_config_glue [-] prometheus_listen_addresses    = ['127.0.0.1:9101'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.588 14 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_certfile        = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.588 14 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_enable          = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.589 14 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_keyfile         = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.589 14 DEBUG cotyledon.oslo_config_glue [-] publish_errors                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.589 14 DEBUG cotyledon.oslo_config_glue [-] rate_limit_burst               = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.589 14 DEBUG cotyledon.oslo_config_glue [-] rate_limit_except_level        = CRITICAL log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.589 14 DEBUG cotyledon.oslo_config_glue [-] rate_limit_interval            = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.589 14 DEBUG cotyledon.oslo_config_glue [-] reseller_prefix                = AUTH_ log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.589 14 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_keys         = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.589 14 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_length       = 256 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.589 14 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_namespace    = ['metering.'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.589 14 DEBUG cotyledon.oslo_config_glue [-] rootwrap_config                = /etc/ceilometer/rootwrap.conf log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.589 14 DEBUG cotyledon.oslo_config_glue [-] sample_source                  = openstack log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.589 14 DEBUG cotyledon.oslo_config_glue [-] shell_completion               = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.589 14 DEBUG cotyledon.oslo_config_glue [-] syslog_log_facility            = LOG_USER log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.590 14 DEBUG cotyledon.oslo_config_glue [-] threads_to_process_pollsters   = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.590 14 DEBUG cotyledon.oslo_config_glue [-] use_journal                    = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.590 14 DEBUG cotyledon.oslo_config_glue [-] use_json                       = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.590 14 DEBUG cotyledon.oslo_config_glue [-] use_stderr                     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.590 14 DEBUG cotyledon.oslo_config_glue [-] use_syslog                     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.590 14 DEBUG cotyledon.oslo_config_glue [-] watch_log_file                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.590 14 DEBUG cotyledon.oslo_config_glue [-] compute.fetch_extra_metadata   = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.590 14 DEBUG cotyledon.oslo_config_glue [-] compute.instance_discovery_method = libvirt_metadata log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.590 14 DEBUG cotyledon.oslo_config_glue [-] compute.resource_cache_expiry  = 3600 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.590 14 DEBUG cotyledon.oslo_config_glue [-] compute.resource_update_interval = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.590 14 DEBUG cotyledon.oslo_config_glue [-] coordination.backend_url       = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.590 14 DEBUG cotyledon.oslo_config_glue [-] event.definitions_cfg_file     = event_definitions.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.590 14 DEBUG cotyledon.oslo_config_glue [-] event.drop_unmatched_notifications = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.590 14 DEBUG cotyledon.oslo_config_glue [-] event.store_raw                = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.590 14 DEBUG cotyledon.oslo_config_glue [-] ipmi.polling_retry             = 3 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.591 14 DEBUG cotyledon.oslo_config_glue [-] meter.meter_definitions_dirs   = ['/etc/ceilometer/meters.d', '/usr/lib/python3.12/site-packages/ceilometer/data/meters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.591 14 DEBUG cotyledon.oslo_config_glue [-] notification.ack_on_event_error = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.591 14 DEBUG cotyledon.oslo_config_glue [-] notification.batch_size        = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.591 14 DEBUG cotyledon.oslo_config_glue [-] notification.batch_timeout     = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.591 14 DEBUG cotyledon.oslo_config_glue [-] notification.messaging_urls    = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.591 14 DEBUG cotyledon.oslo_config_glue [-] notification.notification_control_exchanges = ['nova', 'glance', 'neutron', 'cinder', 'heat', 'keystone', 'trove', 'zaqar', 'swift', 'ceilometer', 'magnum', 'dns', 'ironic', 'aodh'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.591 14 DEBUG cotyledon.oslo_config_glue [-] notification.pipelines         = ['meter', 'event'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.591 14 DEBUG cotyledon.oslo_config_glue [-] notification.workers           = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.591 14 DEBUG cotyledon.oslo_config_glue [-] polling.batch_size             = 50 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.591 14 DEBUG cotyledon.oslo_config_glue [-] polling.cfg_file               = polling.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.591 14 DEBUG cotyledon.oslo_config_glue [-] polling.enable_notifications   = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.591 14 DEBUG cotyledon.oslo_config_glue [-] polling.enable_prometheus_exporter = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.591 14 DEBUG cotyledon.oslo_config_glue [-] polling.heartbeat_socket_dir   = /var/lib/ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.592 14 DEBUG cotyledon.oslo_config_glue [-] polling.identity_name_discovery = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.592 14 DEBUG cotyledon.oslo_config_glue [-] polling.ignore_disabled_projects = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.592 14 DEBUG cotyledon.oslo_config_glue [-] polling.partitioning_group_prefix = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.592 14 DEBUG cotyledon.oslo_config_glue [-] polling.pollsters_definitions_dirs = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.592 14 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_listen_addresses = ['[::]:9101'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.592 14 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_certfile = /etc/ceilometer/tls/tls.crt log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.592 14 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_enable  = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.592 14 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_keyfile = /etc/ceilometer/tls/tls.key log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.592 14 DEBUG cotyledon.oslo_config_glue [-] polling.threads_to_process_pollsters = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.592 14 DEBUG cotyledon.oslo_config_glue [-] publisher.telemetry_secret     = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.592 14 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.event_topic = event log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.592 14 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.metering_topic = metering log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.592 14 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.telemetry_driver = messagingv2 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.592 14 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.access_key = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.593 14 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.secret_key = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.593 14 DEBUG cotyledon.oslo_config_glue [-] rgw_client.implicit_tenants    = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.593 14 DEBUG cotyledon.oslo_config_glue [-] service_types.aodh             = alarming log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.593 14 DEBUG cotyledon.oslo_config_glue [-] service_types.cinder           = volumev3 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.593 14 DEBUG cotyledon.oslo_config_glue [-] service_types.glance           = image log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.593 14 DEBUG cotyledon.oslo_config_glue [-] service_types.neutron          = network log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.593 14 DEBUG cotyledon.oslo_config_glue [-] service_types.nova             = compute log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.593 14 DEBUG cotyledon.oslo_config_glue [-] service_types.radosgw          = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.593 14 DEBUG cotyledon.oslo_config_glue [-] service_types.swift            = object-store log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.593 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_section = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.593 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_type  = password log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.593 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_url   = https://keystone-internal.openstack.svc:5000 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.593 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.cafile     = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.593 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.certfile   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.593 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.collect_timing = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.default_domain_id = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.default_domain_name = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.domain_id  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.domain_name = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.insecure   = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.interface  = internalURL log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.keyfile    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.password   = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.project_domain_id = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.project_domain_name = Default log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.project_id = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.project_name = service log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.region_name = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.split_loggers = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.system_scope = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.timeout    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.trust_id   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.user_domain_id = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.user_domain_name = Default log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.user_id    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.594 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.username   = ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.595 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_section           = service_credentials log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.595 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_type              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.595 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.cafile                 = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.595 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.certfile               = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.595 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.collect_timing         = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.595 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.insecure               = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.595 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.interface              = internal log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.595 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.keyfile                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.595 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.region_name            = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.595 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.split_loggers          = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.595 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.timeout                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.595 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_section             = service_credentials log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.595 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_type                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.595 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.cafile                   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.595 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.certfile                 = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.596 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.collect_timing           = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.596 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.insecure                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.596 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.interface                = internal log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.596 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.keyfile                  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.596 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.region_name              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.596 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.split_loggers            = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.596 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.timeout                  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.596 14 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.file_event_handler = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.596 14 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.file_event_handler_interval = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.596 14 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.log_dir           = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.596 14 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2828
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.596 14 DEBUG cotyledon._service [-] Run service AgentManager(0) [14] wait_forever /usr/lib/python3.12/site-packages/cotyledon/_service.py:263
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.597 14 INFO cotyledon._service [-] Caught SIGTERM signal, graceful exiting of service AgentManager(0) [14]
Oct 11 01:35:03 compute-0 virtqemud[153560]: libvirt version: 10.10.0, package: 15.el9 (builder@centos.org, 2025-08-18-13:22:20, )
Oct 11 01:35:03 compute-0 virtqemud[153560]: hostname: compute-0
Oct 11 01:35:03 compute-0 virtqemud[153560]: End of file while reading data: Input/output error
Oct 11 01:35:03 compute-0 ceilometer_agent_compute[153325]: 2025-10-11 01:35:03.611 2 DEBUG cotyledon._service_manager [-] Shutdown finish _shutdown /usr/lib/python3.12/site-packages/cotyledon/_service_manager.py:335
Oct 11 01:35:03 compute-0 systemd[1]: libpod-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope: Deactivated successfully.
Oct 11 01:35:03 compute-0 podman[153545]: 2025-10-11 01:35:03.878152003 +0000 UTC m=+0.629873938 container died c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_compute, config_id=edpm, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, io.buildah.version=1.41.4, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2)
Oct 11 01:35:03 compute-0 systemd[1]: libpod-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope: Consumed 1.778s CPU time.
Oct 11 01:35:03 compute-0 systemd[1]: c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6-2f465ccaa4da2a95.timer: Deactivated successfully.
Oct 11 01:35:03 compute-0 systemd[1]: Stopped /usr/bin/podman healthcheck run c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.
Oct 11 01:35:03 compute-0 systemd[1]: var-lib-containers-storage-overlay-b3d18363ce40705c64403cf6057216716d6bebcedd4dc52be32b80ac0420f1aa-merged.mount: Deactivated successfully.
Oct 11 01:35:03 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6-userdata-shm.mount: Deactivated successfully.
Oct 11 01:35:06 compute-0 podman[153545]: 2025-10-11 01:35:06.359630256 +0000 UTC m=+3.111352191 container cleanup c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, config_id=edpm, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_compute, org.label-schema.name=CentOS Stream 10 Base Image, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.vendor=CentOS, io.buildah.version=1.41.4, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 01:35:06 compute-0 podman[153545]: ceilometer_agent_compute
Oct 11 01:35:06 compute-0 podman[153598]: ceilometer_agent_compute
Oct 11 01:35:06 compute-0 systemd[1]: edpm_ceilometer_agent_compute.service: Deactivated successfully.
Oct 11 01:35:06 compute-0 systemd[1]: Stopped ceilometer_agent_compute container.
Oct 11 01:35:06 compute-0 systemd[1]: Starting ceilometer_agent_compute container...
Oct 11 01:35:06 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:35:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b3d18363ce40705c64403cf6057216716d6bebcedd4dc52be32b80ac0420f1aa/merged/etc/ceilometer/ceilometer_prom_exporter.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b3d18363ce40705c64403cf6057216716d6bebcedd4dc52be32b80ac0420f1aa/merged/etc/ceilometer/tls supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b3d18363ce40705c64403cf6057216716d6bebcedd4dc52be32b80ac0420f1aa/merged/var/lib/openstack/config supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b3d18363ce40705c64403cf6057216716d6bebcedd4dc52be32b80ac0420f1aa/merged/var/lib/kolla/config_files/config.json supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:06 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.
Oct 11 01:35:06 compute-0 podman[153611]: 2025-10-11 01:35:06.697508214 +0000 UTC m=+0.188304700 container init c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251007, config_id=edpm, io.buildah.version=1.41.4)
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: + sudo -E kolla_set_configs
Oct 11 01:35:06 compute-0 podman[153611]: 2025-10-11 01:35:06.735845969 +0000 UTC m=+0.226642415 container start c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, managed_by=edpm_ansible)
Oct 11 01:35:06 compute-0 sudo[153633]: ceilometer : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_set_configs
Oct 11 01:35:06 compute-0 podman[153611]: ceilometer_agent_compute
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: sudo: unable to send audit message: Operation not permitted
Oct 11 01:35:06 compute-0 sudo[153633]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=42405)
Oct 11 01:35:06 compute-0 systemd[1]: Started ceilometer_agent_compute container.
Oct 11 01:35:06 compute-0 sudo[153522]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: INFO:__main__:Loading config file at /var/lib/kolla/config_files/config.json
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: INFO:__main__:Validating config file
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: INFO:__main__:Kolla config strategy set to: COPY_ALWAYS
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: INFO:__main__:Copying service configuration files
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: INFO:__main__:Deleting /etc/ceilometer/ceilometer.conf
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: INFO:__main__:Copying /var/lib/openstack/config/ceilometer.conf to /etc/ceilometer/ceilometer.conf
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: INFO:__main__:Setting permission for /etc/ceilometer/ceilometer.conf
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: INFO:__main__:Deleting /etc/ceilometer/polling.yaml
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: INFO:__main__:Copying /var/lib/openstack/config/polling.yaml to /etc/ceilometer/polling.yaml
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: INFO:__main__:Setting permission for /etc/ceilometer/polling.yaml
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: INFO:__main__:Deleting /etc/ceilometer/ceilometer.conf.d/01-ceilometer-custom.conf
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: INFO:__main__:Copying /var/lib/openstack/config/custom.conf to /etc/ceilometer/ceilometer.conf.d/01-ceilometer-custom.conf
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: INFO:__main__:Setting permission for /etc/ceilometer/ceilometer.conf.d/01-ceilometer-custom.conf
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: INFO:__main__:Deleting /etc/ceilometer/ceilometer.conf.d/02-ceilometer-host-specific.conf
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: INFO:__main__:Copying /var/lib/openstack/config/ceilometer-host-specific.conf to /etc/ceilometer/ceilometer.conf.d/02-ceilometer-host-specific.conf
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: INFO:__main__:Setting permission for /etc/ceilometer/ceilometer.conf.d/02-ceilometer-host-specific.conf
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: INFO:__main__:Writing out command to execute
Oct 11 01:35:06 compute-0 sudo[153633]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: ++ cat /run_command
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: + CMD='/usr/bin/ceilometer-polling --polling-namespaces compute --logfile /dev/stdout'
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: + ARGS=
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: + sudo kolla_copy_cacerts
Oct 11 01:35:06 compute-0 sudo[153657]: ceilometer : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_copy_cacerts
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: sudo: unable to send audit message: Operation not permitted
Oct 11 01:35:06 compute-0 sudo[153657]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=42405)
Oct 11 01:35:06 compute-0 sudo[153657]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: Running command: '/usr/bin/ceilometer-polling --polling-namespaces compute --logfile /dev/stdout'
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: + [[ ! -n '' ]]
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: + . kolla_extend_start
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: + echo 'Running command: '\''/usr/bin/ceilometer-polling --polling-namespaces compute --logfile /dev/stdout'\'''
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: + umask 0022
Oct 11 01:35:06 compute-0 ceilometer_agent_compute[153627]: + exec /usr/bin/ceilometer-polling --polling-namespaces compute --logfile /dev/stdout
Oct 11 01:35:06 compute-0 podman[153634]: 2025-10-11 01:35:06.87258502 +0000 UTC m=+0.115820063 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=starting, health_failing_streak=1, health_log=, io.buildah.version=1.41.4, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 01:35:06 compute-0 systemd[1]: c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6-2870149d5fcb13d2.service: Main process exited, code=exited, status=1/FAILURE
Oct 11 01:35:06 compute-0 systemd[1]: c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6-2870149d5fcb13d2.service: Failed with result 'exit-code'.
Oct 11 01:35:07 compute-0 sudo[153809]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cvwpcyrnbxcrleypewisevlvhvnbgvak ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146507.074418-578-206113146675835/AnsiballZ_stat.py'
Oct 11 01:35:07 compute-0 sudo[153809]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.641 2 DEBUG cotyledon.oslo_config_glue [-] Full set of CONF: _load_service_manager_options /usr/lib/python3.12/site-packages/cotyledon/oslo_config_glue.py:45
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.642 2 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2804
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.643 2 DEBUG cotyledon.oslo_config_glue [-] Configuration options gathered from: log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2805
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.643 2 DEBUG cotyledon.oslo_config_glue [-] command line args: ['--polling-namespaces', 'compute', '--logfile', '/dev/stdout'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2806
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.643 2 DEBUG cotyledon.oslo_config_glue [-] config files: ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2807
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.643 2 DEBUG cotyledon.oslo_config_glue [-] ================================================================================ log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2809
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.644 2 DEBUG cotyledon.oslo_config_glue [-] batch_size                     = 50 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.644 2 DEBUG cotyledon.oslo_config_glue [-] cfg_file                       = polling.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.644 2 DEBUG cotyledon.oslo_config_glue [-] config_dir                     = ['/etc/ceilometer/ceilometer.conf.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.644 2 DEBUG cotyledon.oslo_config_glue [-] config_file                    = ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.644 2 DEBUG cotyledon.oslo_config_glue [-] config_source                  = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.645 2 DEBUG cotyledon.oslo_config_glue [-] debug                          = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.645 2 DEBUG cotyledon.oslo_config_glue [-] default_log_levels             = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'oslo_messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'requests.packages.urllib3.util.retry=WARN', 'urllib3.util.retry=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'taskflow=WARN', 'keystoneauth=WARN', 'oslo.cache=INFO', 'oslo_policy=INFO', 'dogpile.core.dogpile=INFO', 'futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.645 2 DEBUG cotyledon.oslo_config_glue [-] enable_notifications           = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.645 2 DEBUG cotyledon.oslo_config_glue [-] enable_prometheus_exporter     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.645 2 DEBUG cotyledon.oslo_config_glue [-] event_pipeline_cfg_file        = event_pipeline.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.646 2 DEBUG cotyledon.oslo_config_glue [-] graceful_shutdown_timeout      = 60 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.646 2 DEBUG cotyledon.oslo_config_glue [-] heartbeat_socket_dir           = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.646 2 DEBUG cotyledon.oslo_config_glue [-] host                           = compute-0.ctlplane.example.com log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.646 2 DEBUG cotyledon.oslo_config_glue [-] http_timeout                   = 600 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.646 2 DEBUG cotyledon.oslo_config_glue [-] hypervisor_inspector           = libvirt log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.647 2 WARNING oslo_config.cfg [-] Deprecated: Option "tenant_name_discovery" from group "DEFAULT" is deprecated. Use option "identity_name_discovery" from group "DEFAULT".
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.647 2 DEBUG cotyledon.oslo_config_glue [-] identity_name_discovery        = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.647 2 DEBUG cotyledon.oslo_config_glue [-] ignore_disabled_projects       = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.647 2 DEBUG cotyledon.oslo_config_glue [-] instance_format                = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.648 2 DEBUG cotyledon.oslo_config_glue [-] instance_uuid_format           = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.648 2 DEBUG cotyledon.oslo_config_glue [-] libvirt_type                   = kvm log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.648 2 DEBUG cotyledon.oslo_config_glue [-] libvirt_uri                    =  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.648 2 DEBUG cotyledon.oslo_config_glue [-] log_color                      = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.648 2 DEBUG cotyledon.oslo_config_glue [-] log_config_append              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.648 2 DEBUG cotyledon.oslo_config_glue [-] log_date_format                = %Y-%m-%d %H:%M:%S log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.649 2 DEBUG cotyledon.oslo_config_glue [-] log_dir                        = /var/log/ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.649 2 DEBUG cotyledon.oslo_config_glue [-] log_file                       = /dev/stdout log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.649 2 DEBUG cotyledon.oslo_config_glue [-] log_options                    = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.649 2 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval            = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.649 2 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval_type       = days log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.649 2 DEBUG cotyledon.oslo_config_glue [-] log_rotation_type              = none log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.650 2 DEBUG cotyledon.oslo_config_glue [-] logging_context_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.650 2 DEBUG cotyledon.oslo_config_glue [-] logging_debug_format_suffix    = %(funcName)s %(pathname)s:%(lineno)d log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.650 2 DEBUG cotyledon.oslo_config_glue [-] logging_default_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.650 2 DEBUG cotyledon.oslo_config_glue [-] logging_exception_prefix       = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.650 2 DEBUG cotyledon.oslo_config_glue [-] logging_user_identity_format   = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.650 2 DEBUG cotyledon.oslo_config_glue [-] max_logfile_count              = 30 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.650 2 DEBUG cotyledon.oslo_config_glue [-] max_logfile_size_mb            = 200 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.651 2 DEBUG cotyledon.oslo_config_glue [-] max_parallel_requests          = 64 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.651 2 DEBUG cotyledon.oslo_config_glue [-] partitioning_group_prefix      = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.651 2 DEBUG cotyledon.oslo_config_glue [-] pipeline_cfg_file              = pipeline.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.651 2 DEBUG cotyledon.oslo_config_glue [-] polling_namespaces             = ['compute'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.651 2 DEBUG cotyledon.oslo_config_glue [-] pollsters_definitions_dirs     = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.652 2 DEBUG cotyledon.oslo_config_glue [-] prometheus_listen_addresses    = ['127.0.0.1:9101'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.652 2 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_certfile        = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.652 2 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_enable          = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.652 2 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_keyfile         = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.652 2 DEBUG cotyledon.oslo_config_glue [-] publish_errors                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.652 2 DEBUG cotyledon.oslo_config_glue [-] rate_limit_burst               = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.652 2 DEBUG cotyledon.oslo_config_glue [-] rate_limit_except_level        = CRITICAL log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.653 2 DEBUG cotyledon.oslo_config_glue [-] rate_limit_interval            = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.653 2 DEBUG cotyledon.oslo_config_glue [-] reseller_prefix                = AUTH_ log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.653 2 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_keys         = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.653 2 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_length       = 256 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.653 2 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_namespace    = ['metering.'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.653 2 DEBUG cotyledon.oslo_config_glue [-] rootwrap_config                = /etc/ceilometer/rootwrap.conf log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.654 2 DEBUG cotyledon.oslo_config_glue [-] sample_source                  = openstack log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.654 2 DEBUG cotyledon.oslo_config_glue [-] shell_completion               = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.654 2 DEBUG cotyledon.oslo_config_glue [-] syslog_log_facility            = LOG_USER log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.654 2 DEBUG cotyledon.oslo_config_glue [-] threads_to_process_pollsters   = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.654 2 DEBUG cotyledon.oslo_config_glue [-] use_journal                    = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.654 2 DEBUG cotyledon.oslo_config_glue [-] use_json                       = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.655 2 DEBUG cotyledon.oslo_config_glue [-] use_stderr                     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.655 2 DEBUG cotyledon.oslo_config_glue [-] use_syslog                     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.655 2 DEBUG cotyledon.oslo_config_glue [-] watch_log_file                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.655 2 DEBUG cotyledon.oslo_config_glue [-] compute.fetch_extra_metadata   = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.655 2 DEBUG cotyledon.oslo_config_glue [-] compute.instance_discovery_method = libvirt_metadata log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.656 2 DEBUG cotyledon.oslo_config_glue [-] compute.resource_cache_expiry  = 3600 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.656 2 DEBUG cotyledon.oslo_config_glue [-] compute.resource_update_interval = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.656 2 DEBUG cotyledon.oslo_config_glue [-] coordination.backend_url       = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.656 2 DEBUG cotyledon.oslo_config_glue [-] event.definitions_cfg_file     = event_definitions.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.656 2 DEBUG cotyledon.oslo_config_glue [-] event.drop_unmatched_notifications = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.656 2 DEBUG cotyledon.oslo_config_glue [-] event.store_raw                = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.657 2 DEBUG cotyledon.oslo_config_glue [-] ipmi.polling_retry             = 3 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.657 2 DEBUG cotyledon.oslo_config_glue [-] meter.meter_definitions_dirs   = ['/etc/ceilometer/meters.d', '/usr/lib/python3.12/site-packages/ceilometer/data/meters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.657 2 DEBUG cotyledon.oslo_config_glue [-] notification.ack_on_event_error = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.657 2 DEBUG cotyledon.oslo_config_glue [-] notification.batch_size        = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.657 2 DEBUG cotyledon.oslo_config_glue [-] notification.batch_timeout     = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.658 2 DEBUG cotyledon.oslo_config_glue [-] notification.messaging_urls    = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.658 2 DEBUG cotyledon.oslo_config_glue [-] notification.notification_control_exchanges = ['nova', 'glance', 'neutron', 'cinder', 'heat', 'keystone', 'trove', 'zaqar', 'swift', 'ceilometer', 'magnum', 'dns', 'ironic', 'aodh'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.658 2 DEBUG cotyledon.oslo_config_glue [-] notification.pipelines         = ['meter', 'event'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.658 2 DEBUG cotyledon.oslo_config_glue [-] notification.workers           = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.658 2 DEBUG cotyledon.oslo_config_glue [-] polling.batch_size             = 50 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.659 2 DEBUG cotyledon.oslo_config_glue [-] polling.cfg_file               = polling.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.659 2 DEBUG cotyledon.oslo_config_glue [-] polling.enable_notifications   = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.659 2 DEBUG cotyledon.oslo_config_glue [-] polling.enable_prometheus_exporter = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.659 2 DEBUG cotyledon.oslo_config_glue [-] polling.heartbeat_socket_dir   = /var/lib/ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.659 2 DEBUG cotyledon.oslo_config_glue [-] polling.identity_name_discovery = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.659 2 DEBUG cotyledon.oslo_config_glue [-] polling.ignore_disabled_projects = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.660 2 DEBUG cotyledon.oslo_config_glue [-] polling.partitioning_group_prefix = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.660 2 DEBUG cotyledon.oslo_config_glue [-] polling.pollsters_definitions_dirs = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.660 2 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_listen_addresses = ['[::]:9101'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.660 2 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_certfile = /etc/ceilometer/tls/tls.crt log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.660 2 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_enable  = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.660 2 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_keyfile = /etc/ceilometer/tls/tls.key log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.661 2 DEBUG cotyledon.oslo_config_glue [-] polling.threads_to_process_pollsters = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.661 2 DEBUG cotyledon.oslo_config_glue [-] publisher.telemetry_secret     = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.661 2 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.event_topic = event log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.661 2 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.metering_topic = metering log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.661 2 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.telemetry_driver = messagingv2 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.661 2 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.access_key = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.662 2 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.secret_key = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.662 2 DEBUG cotyledon.oslo_config_glue [-] rgw_client.implicit_tenants    = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.662 2 DEBUG cotyledon.oslo_config_glue [-] service_types.aodh             = alarming log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.662 2 DEBUG cotyledon.oslo_config_glue [-] service_types.cinder           = volumev3 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.662 2 DEBUG cotyledon.oslo_config_glue [-] service_types.glance           = image log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.662 2 DEBUG cotyledon.oslo_config_glue [-] service_types.neutron          = network log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.663 2 DEBUG cotyledon.oslo_config_glue [-] service_types.nova             = compute log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.663 2 DEBUG cotyledon.oslo_config_glue [-] service_types.radosgw          = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.663 2 DEBUG cotyledon.oslo_config_glue [-] service_types.swift            = object-store log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.663 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_section = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.663 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_type  = password log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.663 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.cafile     = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.664 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.certfile   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.664 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.collect_timing = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.664 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.insecure   = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.664 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.interface  = internalURL log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.664 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.keyfile    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.665 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.region_name = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.665 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.split_loggers = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.665 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.timeout    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.665 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_section           = service_credentials log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.665 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_type              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.665 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.cafile                 = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.666 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.certfile               = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.666 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.collect_timing         = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.666 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.insecure               = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.666 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.interface              = internal log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.666 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.keyfile                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.666 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.region_name            = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.666 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.split_loggers          = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.667 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.timeout                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.667 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_section             = service_credentials log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.667 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_type                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.667 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.cafile                   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.667 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.certfile                 = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.667 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.collect_timing           = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.668 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.insecure                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.668 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.interface                = internal log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.668 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.keyfile                  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.668 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.region_name              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.668 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.split_loggers            = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.668 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.timeout                  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.669 2 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.file_event_handler = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.669 2 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.file_event_handler_interval = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.669 2 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.log_dir           = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.669 2 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2828
Oct 11 01:35:07 compute-0 python3.9[153811]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/node_exporter/healthcheck follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:35:07 compute-0 sudo[153809]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.697 12 INFO ceilometer.polling.manager [-] Starting heartbeat child service. Listening on /var/lib/ceilometer/ceilometer-compute.socket
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.698 12 DEBUG cotyledon.oslo_config_glue [-] Full set of CONF: _load_service_options /usr/lib/python3.12/site-packages/cotyledon/oslo_config_glue.py:53
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.698 12 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2804
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.698 12 DEBUG cotyledon.oslo_config_glue [-] Configuration options gathered from: log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2805
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.698 12 DEBUG cotyledon.oslo_config_glue [-] command line args: ['--polling-namespaces', 'compute', '--logfile', '/dev/stdout'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2806
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.699 12 DEBUG cotyledon.oslo_config_glue [-] config files: ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2807
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.699 12 DEBUG cotyledon.oslo_config_glue [-] ================================================================================ log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2809
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.699 12 DEBUG cotyledon.oslo_config_glue [-] batch_size                     = 50 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.699 12 DEBUG cotyledon.oslo_config_glue [-] cfg_file                       = polling.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.699 12 DEBUG cotyledon.oslo_config_glue [-] config_dir                     = ['/etc/ceilometer/ceilometer.conf.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.700 12 DEBUG cotyledon.oslo_config_glue [-] config_file                    = ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.700 12 DEBUG cotyledon.oslo_config_glue [-] config_source                  = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.700 12 DEBUG cotyledon.oslo_config_glue [-] debug                          = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.700 12 DEBUG cotyledon.oslo_config_glue [-] default_log_levels             = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'oslo_messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'requests.packages.urllib3.util.retry=WARN', 'urllib3.util.retry=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'taskflow=WARN', 'keystoneauth=WARN', 'oslo.cache=INFO', 'oslo_policy=INFO', 'dogpile.core.dogpile=INFO', 'futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.700 12 DEBUG cotyledon.oslo_config_glue [-] enable_notifications           = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.700 12 DEBUG cotyledon.oslo_config_glue [-] enable_prometheus_exporter     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.701 12 DEBUG cotyledon.oslo_config_glue [-] event_pipeline_cfg_file        = event_pipeline.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.701 12 DEBUG cotyledon.oslo_config_glue [-] graceful_shutdown_timeout      = 60 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.701 12 DEBUG cotyledon.oslo_config_glue [-] heartbeat_socket_dir           = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.701 12 DEBUG cotyledon.oslo_config_glue [-] host                           = compute-0.ctlplane.example.com log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.701 12 DEBUG cotyledon.oslo_config_glue [-] http_timeout                   = 600 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.701 12 DEBUG cotyledon.oslo_config_glue [-] hypervisor_inspector           = libvirt log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.702 12 DEBUG cotyledon.oslo_config_glue [-] identity_name_discovery        = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.702 12 DEBUG cotyledon.oslo_config_glue [-] ignore_disabled_projects       = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.702 12 DEBUG cotyledon.oslo_config_glue [-] instance_format                = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.702 12 DEBUG cotyledon.oslo_config_glue [-] instance_uuid_format           = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.702 12 DEBUG cotyledon.oslo_config_glue [-] libvirt_type                   = kvm log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.702 12 DEBUG cotyledon.oslo_config_glue [-] libvirt_uri                    =  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.702 12 DEBUG cotyledon.oslo_config_glue [-] log_color                      = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.702 12 DEBUG cotyledon.oslo_config_glue [-] log_config_append              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.703 12 DEBUG cotyledon.oslo_config_glue [-] log_date_format                = %Y-%m-%d %H:%M:%S log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.703 12 DEBUG cotyledon.oslo_config_glue [-] log_dir                        = /var/log/ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.703 12 DEBUG cotyledon.oslo_config_glue [-] log_file                       = /dev/stdout log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.703 12 DEBUG cotyledon.oslo_config_glue [-] log_options                    = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.704 12 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval            = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.704 12 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval_type       = days log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.704 12 DEBUG cotyledon.oslo_config_glue [-] log_rotation_type              = none log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.704 12 DEBUG cotyledon.oslo_config_glue [-] logging_context_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.704 12 DEBUG cotyledon.oslo_config_glue [-] logging_debug_format_suffix    = %(funcName)s %(pathname)s:%(lineno)d log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.704 12 DEBUG cotyledon.oslo_config_glue [-] logging_default_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.705 12 DEBUG cotyledon.oslo_config_glue [-] logging_exception_prefix       = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.705 12 DEBUG cotyledon.oslo_config_glue [-] logging_user_identity_format   = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.705 12 DEBUG cotyledon.oslo_config_glue [-] max_logfile_count              = 30 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.705 12 DEBUG cotyledon.oslo_config_glue [-] max_logfile_size_mb            = 200 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.705 12 DEBUG cotyledon.oslo_config_glue [-] max_parallel_requests          = 64 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.705 12 DEBUG cotyledon.oslo_config_glue [-] partitioning_group_prefix      = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.705 12 DEBUG cotyledon.oslo_config_glue [-] pipeline_cfg_file              = pipeline.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.706 12 DEBUG cotyledon.oslo_config_glue [-] polling_namespaces             = ['compute'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.706 12 DEBUG cotyledon.oslo_config_glue [-] pollsters_definitions_dirs     = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.706 12 DEBUG cotyledon.oslo_config_glue [-] prometheus_listen_addresses    = ['127.0.0.1:9101'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.706 12 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_certfile        = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.706 12 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_enable          = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.706 12 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_keyfile         = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.707 12 DEBUG cotyledon.oslo_config_glue [-] publish_errors                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.707 12 DEBUG cotyledon.oslo_config_glue [-] rate_limit_burst               = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.707 12 DEBUG cotyledon.oslo_config_glue [-] rate_limit_except_level        = CRITICAL log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.707 12 DEBUG cotyledon.oslo_config_glue [-] rate_limit_interval            = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.707 12 DEBUG cotyledon.oslo_config_glue [-] reseller_prefix                = AUTH_ log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.707 12 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_keys         = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.707 12 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_length       = 256 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.708 12 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_namespace    = ['metering.'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.708 12 DEBUG cotyledon.oslo_config_glue [-] rootwrap_config                = /etc/ceilometer/rootwrap.conf log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.708 12 DEBUG cotyledon.oslo_config_glue [-] sample_source                  = openstack log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.708 12 DEBUG cotyledon.oslo_config_glue [-] shell_completion               = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.708 12 DEBUG cotyledon.oslo_config_glue [-] syslog_log_facility            = LOG_USER log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.708 12 DEBUG cotyledon.oslo_config_glue [-] threads_to_process_pollsters   = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.708 12 DEBUG cotyledon.oslo_config_glue [-] use_journal                    = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.709 12 DEBUG cotyledon.oslo_config_glue [-] use_json                       = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.709 12 DEBUG cotyledon.oslo_config_glue [-] use_stderr                     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.709 12 DEBUG cotyledon.oslo_config_glue [-] use_syslog                     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.709 12 DEBUG cotyledon.oslo_config_glue [-] watch_log_file                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.709 12 DEBUG cotyledon.oslo_config_glue [-] compute.fetch_extra_metadata   = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.709 12 DEBUG cotyledon.oslo_config_glue [-] compute.instance_discovery_method = libvirt_metadata log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.710 12 DEBUG cotyledon.oslo_config_glue [-] compute.resource_cache_expiry  = 3600 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.710 12 DEBUG cotyledon.oslo_config_glue [-] compute.resource_update_interval = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.710 12 DEBUG cotyledon.oslo_config_glue [-] coordination.backend_url       = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.710 12 DEBUG cotyledon.oslo_config_glue [-] event.definitions_cfg_file     = event_definitions.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.710 12 DEBUG cotyledon.oslo_config_glue [-] event.drop_unmatched_notifications = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.710 12 DEBUG cotyledon.oslo_config_glue [-] event.store_raw                = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.710 12 DEBUG cotyledon.oslo_config_glue [-] ipmi.polling_retry             = 3 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.711 12 DEBUG cotyledon.oslo_config_glue [-] meter.meter_definitions_dirs   = ['/etc/ceilometer/meters.d', '/usr/lib/python3.12/site-packages/ceilometer/data/meters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.711 12 DEBUG cotyledon.oslo_config_glue [-] notification.ack_on_event_error = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.711 12 DEBUG cotyledon.oslo_config_glue [-] notification.batch_size        = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.711 12 DEBUG cotyledon.oslo_config_glue [-] notification.batch_timeout     = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.711 12 DEBUG cotyledon.oslo_config_glue [-] notification.messaging_urls    = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.711 12 DEBUG cotyledon.oslo_config_glue [-] notification.notification_control_exchanges = ['nova', 'glance', 'neutron', 'cinder', 'heat', 'keystone', 'trove', 'zaqar', 'swift', 'ceilometer', 'magnum', 'dns', 'ironic', 'aodh'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.712 12 DEBUG cotyledon.oslo_config_glue [-] notification.pipelines         = ['meter', 'event'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.712 12 DEBUG cotyledon.oslo_config_glue [-] notification.workers           = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.712 12 DEBUG cotyledon.oslo_config_glue [-] polling.batch_size             = 50 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.712 12 DEBUG cotyledon.oslo_config_glue [-] polling.cfg_file               = polling.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.712 12 DEBUG cotyledon.oslo_config_glue [-] polling.enable_notifications   = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.712 12 DEBUG cotyledon.oslo_config_glue [-] polling.enable_prometheus_exporter = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.712 12 DEBUG cotyledon.oslo_config_glue [-] polling.heartbeat_socket_dir   = /var/lib/ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.712 12 DEBUG cotyledon.oslo_config_glue [-] polling.identity_name_discovery = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.713 12 DEBUG cotyledon.oslo_config_glue [-] polling.ignore_disabled_projects = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.713 12 DEBUG cotyledon.oslo_config_glue [-] polling.partitioning_group_prefix = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.713 12 DEBUG cotyledon.oslo_config_glue [-] polling.pollsters_definitions_dirs = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.713 12 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_listen_addresses = ['[::]:9101'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.713 12 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_certfile = /etc/ceilometer/tls/tls.crt log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.713 12 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_enable  = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.713 12 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_keyfile = /etc/ceilometer/tls/tls.key log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.714 12 DEBUG cotyledon.oslo_config_glue [-] polling.threads_to_process_pollsters = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.714 12 DEBUG cotyledon.oslo_config_glue [-] publisher.telemetry_secret     = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.714 12 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.event_topic = event log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.714 12 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.metering_topic = metering log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.714 12 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.telemetry_driver = messagingv2 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.714 12 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.access_key = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.715 12 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.secret_key = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.715 12 DEBUG cotyledon.oslo_config_glue [-] rgw_client.implicit_tenants    = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.715 12 DEBUG cotyledon.oslo_config_glue [-] service_types.aodh             = alarming log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.715 12 DEBUG cotyledon.oslo_config_glue [-] service_types.cinder           = volumev3 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.715 12 DEBUG cotyledon.oslo_config_glue [-] service_types.glance           = image log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.715 12 DEBUG cotyledon.oslo_config_glue [-] service_types.neutron          = network log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.716 12 DEBUG cotyledon.oslo_config_glue [-] service_types.nova             = compute log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.716 12 DEBUG cotyledon.oslo_config_glue [-] service_types.radosgw          = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.716 12 DEBUG cotyledon.oslo_config_glue [-] service_types.swift            = object-store log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.716 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_section = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.716 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_type  = password log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.717 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.cafile     = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.717 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.certfile   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.717 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.collect_timing = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.717 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.insecure   = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.717 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.interface  = internalURL log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.718 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.keyfile    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.718 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.region_name = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.718 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.split_loggers = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.718 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.timeout    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.718 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_section           = service_credentials log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.718 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_type              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.718 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.cafile                 = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.719 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.certfile               = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.719 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.collect_timing         = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.719 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.insecure               = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.719 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.interface              = internal log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.719 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.keyfile                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.719 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.region_name            = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.719 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.split_loggers          = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.720 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.timeout                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.720 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_section             = service_credentials log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.720 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_type                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.720 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.cafile                   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.720 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.certfile                 = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.720 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.collect_timing           = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.720 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.insecure                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.721 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.interface                = internal log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.721 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.keyfile                  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.721 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.region_name              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.721 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.split_loggers            = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.721 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.timeout                  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.721 12 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.file_event_handler = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.722 12 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.file_event_handler_interval = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.722 12 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.log_dir           = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.722 12 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2828
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.721 14 DEBUG ceilometer.compute.virt.libvirt.utils [-] Connecting to libvirt: qemu:///system new_libvirt_connection /usr/lib/python3.12/site-packages/ceilometer/compute/virt/libvirt/utils.py:96
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.722 12 DEBUG cotyledon._service [-] Run service AgentHeartBeatManager(0) [12] wait_forever /usr/lib/python3.12/site-packages/cotyledon/_service.py:263
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.725 12 DEBUG ceilometer.polling.manager [-] Started heartbeat child process. run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:519
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.727 12 DEBUG ceilometer.polling.manager [-] Started heartbeat update thread _read_queue /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:522
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.728 12 DEBUG ceilometer.polling.manager [-] Started heartbeat reporting thread _report_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:527
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.735 14 INFO ceilometer.polling.manager [-] Looking for dynamic pollsters configurations at [['/etc/ceilometer/pollsters.d']].
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.736 14 INFO ceilometer.polling.manager [-] No dynamic pollsters found in folder [/etc/ceilometer/pollsters.d].
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.736 14 INFO ceilometer.polling.manager [-] No dynamic pollsters file found in dirs [['/etc/ceilometer/pollsters.d']].
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.892 14 DEBUG cotyledon.oslo_config_glue [-] Full set of CONF: _load_service_options /usr/lib/python3.12/site-packages/cotyledon/oslo_config_glue.py:53
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.893 14 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2804
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.893 14 DEBUG cotyledon.oslo_config_glue [-] Configuration options gathered from: log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2805
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.893 14 DEBUG cotyledon.oslo_config_glue [-] command line args: ['--polling-namespaces', 'compute', '--logfile', '/dev/stdout'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2806
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.893 14 DEBUG cotyledon.oslo_config_glue [-] config files: ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2807
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.893 14 DEBUG cotyledon.oslo_config_glue [-] ================================================================================ log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2809
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.894 14 DEBUG cotyledon.oslo_config_glue [-] batch_size                     = 50 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.894 14 DEBUG cotyledon.oslo_config_glue [-] cfg_file                       = polling.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.894 14 DEBUG cotyledon.oslo_config_glue [-] config_dir                     = ['/etc/ceilometer/ceilometer.conf.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.894 14 DEBUG cotyledon.oslo_config_glue [-] config_file                    = ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.894 14 DEBUG cotyledon.oslo_config_glue [-] config_source                  = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.894 14 DEBUG cotyledon.oslo_config_glue [-] debug                          = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.895 14 DEBUG cotyledon.oslo_config_glue [-] default_log_levels             = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'oslo_messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'requests.packages.urllib3.util.retry=WARN', 'urllib3.util.retry=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'taskflow=WARN', 'keystoneauth=WARN', 'oslo.cache=INFO', 'oslo_policy=INFO', 'dogpile.core.dogpile=INFO', 'futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.895 14 DEBUG cotyledon.oslo_config_glue [-] enable_notifications           = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.895 14 DEBUG cotyledon.oslo_config_glue [-] enable_prometheus_exporter     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.895 14 DEBUG cotyledon.oslo_config_glue [-] event_pipeline_cfg_file        = event_pipeline.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.895 14 DEBUG cotyledon.oslo_config_glue [-] graceful_shutdown_timeout      = 60 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.895 14 DEBUG cotyledon.oslo_config_glue [-] heartbeat_socket_dir           = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.895 14 DEBUG cotyledon.oslo_config_glue [-] host                           = compute-0.ctlplane.example.com log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.895 14 DEBUG cotyledon.oslo_config_glue [-] http_timeout                   = 600 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.896 14 DEBUG cotyledon.oslo_config_glue [-] hypervisor_inspector           = libvirt log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.896 14 DEBUG cotyledon.oslo_config_glue [-] identity_name_discovery        = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.896 14 DEBUG cotyledon.oslo_config_glue [-] ignore_disabled_projects       = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.896 14 DEBUG cotyledon.oslo_config_glue [-] instance_format                = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.896 14 DEBUG cotyledon.oslo_config_glue [-] instance_uuid_format           = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.896 14 DEBUG cotyledon.oslo_config_glue [-] libvirt_type                   = kvm log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.896 14 DEBUG cotyledon.oslo_config_glue [-] libvirt_uri                    =  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.896 14 DEBUG cotyledon.oslo_config_glue [-] log_color                      = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.897 14 DEBUG cotyledon.oslo_config_glue [-] log_config_append              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.897 14 DEBUG cotyledon.oslo_config_glue [-] log_date_format                = %Y-%m-%d %H:%M:%S log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.897 14 DEBUG cotyledon.oslo_config_glue [-] log_dir                        = /var/log/ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.897 14 DEBUG cotyledon.oslo_config_glue [-] log_file                       = /dev/stdout log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.897 14 DEBUG cotyledon.oslo_config_glue [-] log_options                    = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.897 14 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval            = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.897 14 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval_type       = days log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.897 14 DEBUG cotyledon.oslo_config_glue [-] log_rotation_type              = none log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.897 14 DEBUG cotyledon.oslo_config_glue [-] logging_context_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.898 14 DEBUG cotyledon.oslo_config_glue [-] logging_debug_format_suffix    = %(funcName)s %(pathname)s:%(lineno)d log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.898 14 DEBUG cotyledon.oslo_config_glue [-] logging_default_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.898 14 DEBUG cotyledon.oslo_config_glue [-] logging_exception_prefix       = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.898 14 DEBUG cotyledon.oslo_config_glue [-] logging_user_identity_format   = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.898 14 DEBUG cotyledon.oslo_config_glue [-] max_logfile_count              = 30 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.898 14 DEBUG cotyledon.oslo_config_glue [-] max_logfile_size_mb            = 200 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.898 14 DEBUG cotyledon.oslo_config_glue [-] max_parallel_requests          = 64 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.898 14 DEBUG cotyledon.oslo_config_glue [-] partitioning_group_prefix      = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.898 14 DEBUG cotyledon.oslo_config_glue [-] pipeline_cfg_file              = pipeline.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.899 14 DEBUG cotyledon.oslo_config_glue [-] polling_namespaces             = ['compute'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.899 14 DEBUG cotyledon.oslo_config_glue [-] pollsters_definitions_dirs     = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.899 14 DEBUG cotyledon.oslo_config_glue [-] prometheus_listen_addresses    = ['127.0.0.1:9101'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.899 14 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_certfile        = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.899 14 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_enable          = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.899 14 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_keyfile         = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.899 14 DEBUG cotyledon.oslo_config_glue [-] publish_errors                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.899 14 DEBUG cotyledon.oslo_config_glue [-] rate_limit_burst               = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.899 14 DEBUG cotyledon.oslo_config_glue [-] rate_limit_except_level        = CRITICAL log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.900 14 DEBUG cotyledon.oslo_config_glue [-] rate_limit_interval            = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.900 14 DEBUG cotyledon.oslo_config_glue [-] reseller_prefix                = AUTH_ log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.900 14 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_keys         = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.900 14 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_length       = 256 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.900 14 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_namespace    = ['metering.'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.900 14 DEBUG cotyledon.oslo_config_glue [-] rootwrap_config                = /etc/ceilometer/rootwrap.conf log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.900 14 DEBUG cotyledon.oslo_config_glue [-] sample_source                  = openstack log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.900 14 DEBUG cotyledon.oslo_config_glue [-] shell_completion               = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.900 14 DEBUG cotyledon.oslo_config_glue [-] syslog_log_facility            = LOG_USER log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.901 14 DEBUG cotyledon.oslo_config_glue [-] threads_to_process_pollsters   = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.901 14 DEBUG cotyledon.oslo_config_glue [-] use_journal                    = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.901 14 DEBUG cotyledon.oslo_config_glue [-] use_json                       = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.901 14 DEBUG cotyledon.oslo_config_glue [-] use_stderr                     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.901 14 DEBUG cotyledon.oslo_config_glue [-] use_syslog                     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.901 14 DEBUG cotyledon.oslo_config_glue [-] watch_log_file                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.901 14 DEBUG cotyledon.oslo_config_glue [-] compute.fetch_extra_metadata   = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.901 14 DEBUG cotyledon.oslo_config_glue [-] compute.instance_discovery_method = libvirt_metadata log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.901 14 DEBUG cotyledon.oslo_config_glue [-] compute.resource_cache_expiry  = 3600 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.902 14 DEBUG cotyledon.oslo_config_glue [-] compute.resource_update_interval = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.902 14 DEBUG cotyledon.oslo_config_glue [-] coordination.backend_url       = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.902 14 DEBUG cotyledon.oslo_config_glue [-] event.definitions_cfg_file     = event_definitions.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.902 14 DEBUG cotyledon.oslo_config_glue [-] event.drop_unmatched_notifications = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.902 14 DEBUG cotyledon.oslo_config_glue [-] event.store_raw                = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.902 14 DEBUG cotyledon.oslo_config_glue [-] ipmi.polling_retry             = 3 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.902 14 DEBUG cotyledon.oslo_config_glue [-] meter.meter_definitions_dirs   = ['/etc/ceilometer/meters.d', '/usr/lib/python3.12/site-packages/ceilometer/data/meters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.902 14 DEBUG cotyledon.oslo_config_glue [-] notification.ack_on_event_error = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.903 14 DEBUG cotyledon.oslo_config_glue [-] notification.batch_size        = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.903 14 DEBUG cotyledon.oslo_config_glue [-] notification.batch_timeout     = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.903 14 DEBUG cotyledon.oslo_config_glue [-] notification.messaging_urls    = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.903 14 DEBUG cotyledon.oslo_config_glue [-] notification.notification_control_exchanges = ['nova', 'glance', 'neutron', 'cinder', 'heat', 'keystone', 'trove', 'zaqar', 'swift', 'ceilometer', 'magnum', 'dns', 'ironic', 'aodh'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.903 14 DEBUG cotyledon.oslo_config_glue [-] notification.pipelines         = ['meter', 'event'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.903 14 DEBUG cotyledon.oslo_config_glue [-] notification.workers           = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.903 14 DEBUG cotyledon.oslo_config_glue [-] polling.batch_size             = 50 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.904 14 DEBUG cotyledon.oslo_config_glue [-] polling.cfg_file               = polling.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.904 14 DEBUG cotyledon.oslo_config_glue [-] polling.enable_notifications   = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.904 14 DEBUG cotyledon.oslo_config_glue [-] polling.enable_prometheus_exporter = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.904 14 DEBUG cotyledon.oslo_config_glue [-] polling.heartbeat_socket_dir   = /var/lib/ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.904 14 DEBUG cotyledon.oslo_config_glue [-] polling.identity_name_discovery = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.904 14 DEBUG cotyledon.oslo_config_glue [-] polling.ignore_disabled_projects = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.904 14 DEBUG cotyledon.oslo_config_glue [-] polling.partitioning_group_prefix = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.904 14 DEBUG cotyledon.oslo_config_glue [-] polling.pollsters_definitions_dirs = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.904 14 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_listen_addresses = ['[::]:9101'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.904 14 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_certfile = /etc/ceilometer/tls/tls.crt log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.904 14 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_enable  = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.905 14 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_keyfile = /etc/ceilometer/tls/tls.key log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.905 14 DEBUG cotyledon.oslo_config_glue [-] polling.threads_to_process_pollsters = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.905 14 DEBUG cotyledon.oslo_config_glue [-] publisher.telemetry_secret     = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.905 14 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.event_topic = event log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.905 14 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.metering_topic = metering log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.905 14 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.telemetry_driver = messagingv2 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.905 14 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.access_key = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.905 14 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.secret_key = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.906 14 DEBUG cotyledon.oslo_config_glue [-] rgw_client.implicit_tenants    = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.906 14 DEBUG cotyledon.oslo_config_glue [-] service_types.aodh             = alarming log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.906 14 DEBUG cotyledon.oslo_config_glue [-] service_types.cinder           = volumev3 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.906 14 DEBUG cotyledon.oslo_config_glue [-] service_types.glance           = image log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.906 14 DEBUG cotyledon.oslo_config_glue [-] service_types.neutron          = network log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.906 14 DEBUG cotyledon.oslo_config_glue [-] service_types.nova             = compute log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.906 14 DEBUG cotyledon.oslo_config_glue [-] service_types.radosgw          = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.906 14 DEBUG cotyledon.oslo_config_glue [-] service_types.swift            = object-store log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.906 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_section = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.907 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_type  = password log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.907 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_url   = https://keystone-internal.openstack.svc:5000 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.907 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.cafile     = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.907 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.certfile   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.907 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.collect_timing = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.907 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.default_domain_id = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.907 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.default_domain_name = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.907 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.domain_id  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.907 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.domain_name = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.907 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.insecure   = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.908 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.interface  = internalURL log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.908 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.keyfile    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.908 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.password   = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.908 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.project_domain_id = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.908 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.project_domain_name = Default log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.908 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.project_id = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.908 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.project_name = service log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.908 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.region_name = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.908 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.split_loggers = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.908 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.system_scope = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.908 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.timeout    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.908 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.trust_id   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.909 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.user_domain_id = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.909 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.user_domain_name = Default log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.909 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.user_id    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.909 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.username   = ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.909 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_section           = service_credentials log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.909 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_type              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.909 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.cafile                 = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.909 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.certfile               = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.910 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.collect_timing         = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.910 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.insecure               = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.910 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.interface              = internal log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.910 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.keyfile                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.910 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.region_name            = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.910 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.split_loggers          = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.910 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.timeout                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.911 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_section             = service_credentials log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.911 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_type                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.911 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.cafile                   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.911 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.certfile                 = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.911 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.collect_timing           = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.911 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.insecure                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.911 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.interface                = internal log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.911 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.keyfile                  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.912 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.region_name              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.912 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.split_loggers            = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.912 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.timeout                  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.912 14 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.file_event_handler = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.912 14 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.file_event_handler_interval = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.912 14 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.log_dir           = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.912 14 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2828
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.912 14 DEBUG cotyledon._service [-] Run service AgentManager(0) [14] wait_forever /usr/lib/python3.12/site-packages/cotyledon/_service.py:263
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.915 14 DEBUG ceilometer.agent [-] Config file: {'sources': [{'name': 'pollsters', 'interval': 120, 'meters': ['power.state', 'cpu', 'memory.usage', 'disk.*', 'network.*']}]} load_config /usr/lib/python3.12/site-packages/ceilometer/agent.py:64
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.930 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.931 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.931 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.932 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f8ed27f97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.932 14 DEBUG ceilometer.compute.virt.libvirt.utils [-] Connecting to libvirt: qemu:///system new_libvirt_connection /usr/lib/python3.12/site-packages/ceilometer/compute/virt/libvirt/utils.py:96
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.932 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb8c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.933 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.933 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.933 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb1a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.933 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb200>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.933 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.933 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed2874260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.934 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.934 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.934 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed3ab42f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb350>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb90>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fa390>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb3b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbbf0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbc80>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27f9610>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.937 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb620>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.937 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbe30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.937 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbec0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.938 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbf50>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.941 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.941 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f8ed27fbad0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.941 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.941 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f8ed27faff0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.941 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.941 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f8ed27fb110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.941 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.942 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f8ed27fb170>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.942 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.942 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f8ed27fb1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.942 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.942 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f8ed27fb230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.942 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.942 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f8ed2874230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.942 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.942 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f8ed27fb290>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.943 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.943 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f8ed5778d70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.943 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.943 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f8ed27fb650>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.943 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.943 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f8ed27fbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.943 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.943 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f8ed27fb320>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.943 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.943 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f8ed27fbb60>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.944 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.944 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f8ed27fa3f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.944 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.944 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f8ed27fb380>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.944 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.944 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f8ed27fbbc0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.944 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.944 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f8ed27fbc50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.944 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.944 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f8ed27fbce0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.945 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.945 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f8ed27fbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.945 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.945 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f8ed27fb590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.945 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.945 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f8ed27f95e0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.945 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.945 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f8ed27fb5f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.945 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.946 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f8ed27fbe00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.946 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.946 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f8ed27fbe90>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.946 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.946 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f8ed27fbf20>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.946 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.946 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.946 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.947 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.947 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.947 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.947 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.947 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.947 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.948 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.948 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.948 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.948 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.948 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.948 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.948 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.949 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.949 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.949 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.949 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.949 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.949 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.949 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.949 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.950 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.950 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:35:07.950 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:35:08 compute-0 sudo[153945]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-itydooozrlazxmcqvwdobufxwkwuyzsh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146507.074418-578-206113146675835/AnsiballZ_copy.py'
Oct 11 01:35:08 compute-0 sudo[153945]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:08 compute-0 python3.9[153947]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/healthchecks/node_exporter/ group=zuul mode=0700 owner=zuul setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760146507.074418-578-206113146675835/.source _original_basename=healthcheck follow=False checksum=e380c11c36804bfc65a818f2960cfa663daacfe5 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:35:08 compute-0 sudo[153945]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:09 compute-0 sudo[154097]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-itqpzvnbongigjmebtucgeztookfysph ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146508.815122-595-249890155161080/AnsiballZ_container_config_data.py'
Oct 11 01:35:09 compute-0 sudo[154097]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:09 compute-0 python3.9[154099]: ansible-container_config_data Invoked with config_overrides={} config_path=/var/lib/openstack/config/telemetry config_pattern=node_exporter.json debug=False
Oct 11 01:35:09 compute-0 sudo[154097]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:10 compute-0 sudo[154249]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-awmnvmsfzobneiehlrncjarcmyfhjzfc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146509.8204463-604-198473862789353/AnsiballZ_container_config_hash.py'
Oct 11 01:35:10 compute-0 sudo[154249]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:10 compute-0 python3.9[154251]: ansible-container_config_hash Invoked with check_mode=False config_vol_prefix=/var/lib/config-data
Oct 11 01:35:10 compute-0 sudo[154249]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:11 compute-0 sudo[154401]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jhzarlsuwsbflpbcprervxcsjhvnsigh ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760146510.942549-614-186226116392323/AnsiballZ_edpm_container_manage.py'
Oct 11 01:35:11 compute-0 sudo[154401]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:11 compute-0 python3[154403]: ansible-edpm_container_manage Invoked with concurrency=1 config_dir=/var/lib/openstack/config/telemetry config_id=edpm config_overrides={} config_patterns=node_exporter.json log_base_path=/var/log/containers/stdouts debug=False
Oct 11 01:35:13 compute-0 podman[154416]: 2025-10-11 01:35:13.042907462 +0000 UTC m=+1.295784807 image pull 0da6a335fe1356545476b749c68f022c897de3a2139e8f0054f6937349ee2b83 quay.io/prometheus/node-exporter:v1.5.0
Oct 11 01:35:13 compute-0 podman[154513]: 2025-10-11 01:35:13.287759114 +0000 UTC m=+0.085185527 container create adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, config_id=edpm, container_name=node_exporter, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 01:35:13 compute-0 podman[154513]: 2025-10-11 01:35:13.244390329 +0000 UTC m=+0.041816782 image pull 0da6a335fe1356545476b749c68f022c897de3a2139e8f0054f6937349ee2b83 quay.io/prometheus/node-exporter:v1.5.0
Oct 11 01:35:13 compute-0 python3[154403]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman create --name node_exporter --conmon-pidfile /run/node_exporter.pid --env OS_ENDPOINT_TYPE=internal --healthcheck-command /openstack/healthcheck node_exporter --label config_id=edpm --label container_name=node_exporter --label managed_by=edpm_ansible --label config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']} --log-driver journald --log-level info --network host --privileged=True --publish 9100:9100 --user root --volume /var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z --volume /var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z --volume /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw --volume /var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z quay.io/prometheus/node-exporter:v1.5.0 --web.config.file=/etc/node_exporter/node_exporter.yaml --web.disable-exporter-metrics --collector.systemd --collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\.service --no-collector.dmi --no-collector.entropy --no-collector.thermal_zone --no-collector.time --no-collector.timex --no-collector.uname --no-collector.stat --no-collector.hwmon --no-collector.os --no-collector.selinux --no-collector.textfile --no-collector.powersupplyclass --no-collector.pressure --no-collector.rapl
Oct 11 01:35:13 compute-0 sudo[154401]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:14 compute-0 sudo[154701]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zxptartjqrzlsmojrmsqowsefnkmnmcz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146513.7806652-622-28041245320826/AnsiballZ_stat.py'
Oct 11 01:35:14 compute-0 sudo[154701]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:14 compute-0 python3.9[154703]: ansible-ansible.builtin.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:35:14 compute-0 sudo[154701]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:15 compute-0 sudo[154855]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lowhxuteqnykfqafahtfynwnaqdcznrm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146514.7970035-631-96955236659671/AnsiballZ_file.py'
Oct 11 01:35:15 compute-0 sudo[154855]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:15 compute-0 python3.9[154857]: ansible-file Invoked with path=/etc/systemd/system/edpm_node_exporter.requires state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:35:15 compute-0 sudo[154855]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:16 compute-0 sudo[155006]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gcyufazgmhiqxqxnvjpqfxffvbbrnliy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146515.5670047-631-188316470603361/AnsiballZ_copy.py'
Oct 11 01:35:16 compute-0 sudo[155006]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:16 compute-0 python3.9[155008]: ansible-copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760146515.5670047-631-188316470603361/source dest=/etc/systemd/system/edpm_node_exporter.service mode=0644 owner=root group=root backup=False force=True remote_src=False follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:35:16 compute-0 sudo[155006]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:16 compute-0 sudo[155082]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iuyxpcjizzpmwrzmqykwrtactqoxtzlb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146515.5670047-631-188316470603361/AnsiballZ_systemd.py'
Oct 11 01:35:16 compute-0 sudo[155082]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:17 compute-0 python3.9[155084]: ansible-systemd Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 01:35:17 compute-0 systemd[1]: Reloading.
Oct 11 01:35:17 compute-0 systemd-rc-local-generator[155109]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:35:17 compute-0 systemd-sysv-generator[155115]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:35:17 compute-0 sudo[155082]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:17 compute-0 sudo[155193]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-swcwlicwjwudnrlgaofaorvidtjpnooz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146515.5670047-631-188316470603361/AnsiballZ_systemd.py'
Oct 11 01:35:17 compute-0 sudo[155193]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:18 compute-0 python3.9[155195]: ansible-systemd Invoked with state=restarted name=edpm_node_exporter.service enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:35:18 compute-0 systemd[1]: Reloading.
Oct 11 01:35:18 compute-0 systemd-rc-local-generator[155224]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:35:18 compute-0 systemd-sysv-generator[155229]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:35:18 compute-0 systemd[1]: Starting node_exporter container...
Oct 11 01:35:18 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:35:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/231aaa5d674eb54e8581f7727e06857007d94f590816ef0295f4b42fe3deb791/merged/etc/node_exporter/node_exporter.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/231aaa5d674eb54e8581f7727e06857007d94f590816ef0295f4b42fe3deb791/merged/etc/node_exporter/tls supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:18 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae.
Oct 11 01:35:18 compute-0 podman[155234]: 2025-10-11 01:35:18.780546062 +0000 UTC m=+0.231678857 container init adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.807Z caller=node_exporter.go:180 level=info msg="Starting node_exporter" version="(version=1.5.0, branch=HEAD, revision=1b48970ffcf5630534fb00bb0687d73c66d1c959)"
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.807Z caller=node_exporter.go:181 level=info msg="Build context" build_context="(go=go1.19.3, user=root@6e7732a7b81b, date=20221129-18:59:09)"
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.807Z caller=node_exporter.go:183 level=warn msg="Node Exporter is running as root user. This exporter is designed to run as unprivileged user, root is not required."
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.808Z caller=diskstats_common.go:111 level=info collector=diskstats msg="Parsed flag --collector.diskstats.device-exclude" flag=^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\d+n\d+p)\d+$
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.808Z caller=diskstats_linux.go:264 level=error collector=diskstats msg="Failed to open directory, disabling udev device properties" path=/run/udev/data
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.808Z caller=systemd_linux.go:152 level=info collector=systemd msg="Parsed flag --collector.systemd.unit-include" flag=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\.service
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.808Z caller=systemd_linux.go:154 level=info collector=systemd msg="Parsed flag --collector.systemd.unit-exclude" flag=.+\.(automount|device|mount|scope|slice)
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.808Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/)
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:110 level=info msg="Enabled collectors"
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=arp
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=bcache
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=bonding
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=btrfs
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=conntrack
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=cpu
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=cpufreq
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=diskstats
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=edac
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=fibrechannel
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=filefd
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=filesystem
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=infiniband
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=ipvs
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=loadavg
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=mdadm
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=meminfo
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=netclass
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=netdev
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=netstat
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=nfs
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=nfsd
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=nvme
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=schedstat
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=sockstat
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=softnet
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=systemd
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=tapestats
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=udp_queues
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=vmstat
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=xfs
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.809Z caller=node_exporter.go:117 level=info collector=zfs
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.811Z caller=tls_config.go:232 level=info msg="Listening on" address=[::]:9100
Oct 11 01:35:18 compute-0 node_exporter[155249]: ts=2025-10-11T01:35:18.812Z caller=tls_config.go:268 level=info msg="TLS is enabled." http2=true address=[::]:9100
Oct 11 01:35:18 compute-0 podman[155234]: 2025-10-11 01:35:18.822119292 +0000 UTC m=+0.273252037 container start adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 01:35:18 compute-0 podman[155234]: node_exporter
Oct 11 01:35:18 compute-0 systemd[1]: Started node_exporter container.
Oct 11 01:35:18 compute-0 sudo[155193]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:18 compute-0 podman[155258]: 2025-10-11 01:35:18.916631431 +0000 UTC m=+0.083349050 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:35:19 compute-0 sudo[155431]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hdbkltlbtqvsazmxvddakgjgluqzhiht ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146519.1183395-655-3191197976987/AnsiballZ_systemd.py'
Oct 11 01:35:19 compute-0 sudo[155431]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:19 compute-0 python3.9[155433]: ansible-ansible.builtin.systemd Invoked with name=edpm_node_exporter.service state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:35:19 compute-0 systemd[1]: Stopping node_exporter container...
Oct 11 01:35:20 compute-0 systemd[1]: libpod-adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae.scope: Deactivated successfully.
Oct 11 01:35:20 compute-0 podman[155437]: 2025-10-11 01:35:20.027742748 +0000 UTC m=+0.071774991 container died adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 01:35:20 compute-0 systemd[1]: adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae-33aafe5bdaa423d9.timer: Deactivated successfully.
Oct 11 01:35:20 compute-0 systemd[1]: Stopped /usr/bin/podman healthcheck run adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae.
Oct 11 01:35:20 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae-userdata-shm.mount: Deactivated successfully.
Oct 11 01:35:20 compute-0 systemd[1]: var-lib-containers-storage-overlay-231aaa5d674eb54e8581f7727e06857007d94f590816ef0295f4b42fe3deb791-merged.mount: Deactivated successfully.
Oct 11 01:35:20 compute-0 podman[155437]: 2025-10-11 01:35:20.194881586 +0000 UTC m=+0.238913829 container cleanup adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:35:20 compute-0 podman[155437]: node_exporter
Oct 11 01:35:20 compute-0 systemd[1]: edpm_node_exporter.service: Main process exited, code=exited, status=2/INVALIDARGUMENT
Oct 11 01:35:20 compute-0 podman[155466]: node_exporter
Oct 11 01:35:20 compute-0 systemd[1]: edpm_node_exporter.service: Failed with result 'exit-code'.
Oct 11 01:35:20 compute-0 systemd[1]: Stopped node_exporter container.
Oct 11 01:35:20 compute-0 systemd[1]: Starting node_exporter container...
Oct 11 01:35:20 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:35:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/231aaa5d674eb54e8581f7727e06857007d94f590816ef0295f4b42fe3deb791/merged/etc/node_exporter/node_exporter.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/231aaa5d674eb54e8581f7727e06857007d94f590816ef0295f4b42fe3deb791/merged/etc/node_exporter/tls supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:20 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae.
Oct 11 01:35:20 compute-0 podman[155480]: 2025-10-11 01:35:20.492453553 +0000 UTC m=+0.168232888 container init adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.513Z caller=node_exporter.go:180 level=info msg="Starting node_exporter" version="(version=1.5.0, branch=HEAD, revision=1b48970ffcf5630534fb00bb0687d73c66d1c959)"
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.513Z caller=node_exporter.go:181 level=info msg="Build context" build_context="(go=go1.19.3, user=root@6e7732a7b81b, date=20221129-18:59:09)"
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.513Z caller=node_exporter.go:183 level=warn msg="Node Exporter is running as root user. This exporter is designed to run as unprivileged user, root is not required."
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.514Z caller=diskstats_common.go:111 level=info collector=diskstats msg="Parsed flag --collector.diskstats.device-exclude" flag=^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\d+n\d+p)\d+$
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.514Z caller=diskstats_linux.go:264 level=error collector=diskstats msg="Failed to open directory, disabling udev device properties" path=/run/udev/data
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.514Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/)
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.514Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=systemd_linux.go:152 level=info collector=systemd msg="Parsed flag --collector.systemd.unit-include" flag=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\.service
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=systemd_linux.go:154 level=info collector=systemd msg="Parsed flag --collector.systemd.unit-exclude" flag=.+\.(automount|device|mount|scope|slice)
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:110 level=info msg="Enabled collectors"
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=arp
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=bcache
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=bonding
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=btrfs
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=conntrack
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=cpu
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=cpufreq
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=diskstats
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=edac
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=fibrechannel
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=filefd
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=filesystem
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=infiniband
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=ipvs
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=loadavg
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=mdadm
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=meminfo
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=netclass
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=netdev
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=netstat
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=nfs
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=nfsd
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=nvme
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=schedstat
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=sockstat
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=softnet
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=systemd
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=tapestats
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=udp_queues
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=vmstat
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=xfs
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.515Z caller=node_exporter.go:117 level=info collector=zfs
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.516Z caller=tls_config.go:232 level=info msg="Listening on" address=[::]:9100
Oct 11 01:35:20 compute-0 node_exporter[155496]: ts=2025-10-11T01:35:20.517Z caller=tls_config.go:268 level=info msg="TLS is enabled." http2=true address=[::]:9100
Oct 11 01:35:20 compute-0 podman[155480]: 2025-10-11 01:35:20.530335216 +0000 UTC m=+0.206114541 container start adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 01:35:20 compute-0 podman[155480]: node_exporter
Oct 11 01:35:20 compute-0 systemd[1]: Started node_exporter container.
Oct 11 01:35:20 compute-0 sudo[155431]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:20 compute-0 podman[155505]: 2025-10-11 01:35:20.656933645 +0000 UTC m=+0.107472215 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 01:35:21 compute-0 sudo[155678]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kcjunndlnvzuypxiuvqrmilviybkefzl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146520.8111327-663-78630646188708/AnsiballZ_stat.py'
Oct 11 01:35:21 compute-0 sudo[155678]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:21 compute-0 python3.9[155680]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/podman_exporter/healthcheck follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:35:21 compute-0 sudo[155678]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:21 compute-0 sudo[155801]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-trphgybisydcwrubucefuahonugdvmgb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146520.8111327-663-78630646188708/AnsiballZ_copy.py'
Oct 11 01:35:21 compute-0 sudo[155801]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:22 compute-0 python3.9[155803]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/healthchecks/podman_exporter/ group=zuul mode=0700 owner=zuul setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760146520.8111327-663-78630646188708/.source _original_basename=healthcheck follow=False checksum=e380c11c36804bfc65a818f2960cfa663daacfe5 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:35:22 compute-0 sudo[155801]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:22 compute-0 sudo[155953]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-njvlaewefawauukfyhtckgwdszljgdyv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146522.5831962-680-162288356927351/AnsiballZ_container_config_data.py'
Oct 11 01:35:22 compute-0 sudo[155953]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:23 compute-0 python3.9[155955]: ansible-container_config_data Invoked with config_overrides={} config_path=/var/lib/openstack/config/telemetry config_pattern=podman_exporter.json debug=False
Oct 11 01:35:23 compute-0 sudo[155953]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:23 compute-0 sudo[156105]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kliyawcjokawonmrrpgaapjmkebpngxu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146523.5277302-689-190508765071452/AnsiballZ_container_config_hash.py'
Oct 11 01:35:23 compute-0 sudo[156105]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:24 compute-0 python3.9[156107]: ansible-container_config_hash Invoked with check_mode=False config_vol_prefix=/var/lib/config-data
Oct 11 01:35:24 compute-0 sudo[156105]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:24 compute-0 sudo[156257]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-axzvlfxqjspqabsnynohemmmhlxuzzji ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760146524.5264535-699-230104070336781/AnsiballZ_edpm_container_manage.py'
Oct 11 01:35:24 compute-0 sudo[156257]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:25 compute-0 python3[156259]: ansible-edpm_container_manage Invoked with concurrency=1 config_dir=/var/lib/openstack/config/telemetry config_id=edpm config_overrides={} config_patterns=podman_exporter.json log_base_path=/var/log/containers/stdouts debug=False
Oct 11 01:35:26 compute-0 podman[156271]: 2025-10-11 01:35:26.730324391 +0000 UTC m=+1.434880965 image pull e56d40e393eb5ea8704d9af8cf0d74665df83747106713fda91530f201837815 quay.io/navidys/prometheus-podman-exporter:v1.10.1
Oct 11 01:35:26 compute-0 podman[156371]: 2025-10-11 01:35:26.971353897 +0000 UTC m=+0.072429832 container create 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, container_name=podman_exporter, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, config_id=edpm)
Oct 11 01:35:26 compute-0 podman[156371]: 2025-10-11 01:35:26.932001175 +0000 UTC m=+0.033077170 image pull e56d40e393eb5ea8704d9af8cf0d74665df83747106713fda91530f201837815 quay.io/navidys/prometheus-podman-exporter:v1.10.1
Oct 11 01:35:26 compute-0 python3[156259]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman create --name podman_exporter --conmon-pidfile /run/podman_exporter.pid --env OS_ENDPOINT_TYPE=internal --env CONTAINER_HOST=unix:///run/podman/podman.sock --healthcheck-command /openstack/healthcheck podman_exporter --label config_id=edpm --label container_name=podman_exporter --label managed_by=edpm_ansible --label config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']} --log-driver journald --log-level info --network host --privileged=True --publish 9882:9882 --user root --volume /var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z --volume /var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z --volume /run/podman/podman.sock:/run/podman/podman.sock:rw,z --volume /var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z quay.io/navidys/prometheus-podman-exporter:v1.10.1 --web.config.file=/etc/podman_exporter/podman_exporter.yaml
Oct 11 01:35:27 compute-0 sudo[156257]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:27 compute-0 sudo[156559]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ieiidkzosfcoiylcwqsimhzywiejjdqn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146527.4456067-707-123647339097766/AnsiballZ_stat.py'
Oct 11 01:35:27 compute-0 sudo[156559]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:28 compute-0 python3.9[156561]: ansible-ansible.builtin.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:35:28 compute-0 sudo[156559]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:28 compute-0 sudo[156713]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vdmlaokcodrnfvalwnznqvfewiyqvhir ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146528.373158-716-137317716460013/AnsiballZ_file.py'
Oct 11 01:35:28 compute-0 sudo[156713]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:28 compute-0 python3.9[156715]: ansible-file Invoked with path=/etc/systemd/system/edpm_podman_exporter.requires state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:35:28 compute-0 sudo[156713]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:29 compute-0 sudo[156864]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iccbwzeieeaxehqdbontiyglnzuvyxum ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146529.0669265-716-173092825251721/AnsiballZ_copy.py'
Oct 11 01:35:29 compute-0 sudo[156864]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:29 compute-0 python3.9[156866]: ansible-copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760146529.0669265-716-173092825251721/source dest=/etc/systemd/system/edpm_podman_exporter.service mode=0644 owner=root group=root backup=False force=True remote_src=False follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:35:29 compute-0 sudo[156864]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:30 compute-0 sudo[156940]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-aqeesnuqpembtfbfjbfethzhnxawpudt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146529.0669265-716-173092825251721/AnsiballZ_systemd.py'
Oct 11 01:35:30 compute-0 sudo[156940]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:30 compute-0 python3.9[156942]: ansible-systemd Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 01:35:30 compute-0 systemd[1]: Reloading.
Oct 11 01:35:30 compute-0 systemd-rc-local-generator[156970]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:35:30 compute-0 systemd-sysv-generator[156974]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:35:30 compute-0 sudo[156940]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:31 compute-0 sudo[157051]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lnqvluopyrhvvubrkivenmoimloqqeqf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146529.0669265-716-173092825251721/AnsiballZ_systemd.py'
Oct 11 01:35:31 compute-0 sudo[157051]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:31 compute-0 python3.9[157053]: ansible-systemd Invoked with state=restarted name=edpm_podman_exporter.service enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:35:31 compute-0 systemd[1]: Reloading.
Oct 11 01:35:31 compute-0 systemd-sysv-generator[157087]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:35:31 compute-0 systemd-rc-local-generator[157083]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:35:31 compute-0 systemd[1]: Starting podman_exporter container...
Oct 11 01:35:32 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:35:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8890de7455df195df0bcfea2886507750b2c2505c8ec69ea3d86cad543bcbee1/merged/etc/podman_exporter/tls supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8890de7455df195df0bcfea2886507750b2c2505c8ec69ea3d86cad543bcbee1/merged/etc/podman_exporter/podman_exporter.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:32 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899.
Oct 11 01:35:32 compute-0 podman[157093]: 2025-10-11 01:35:32.189462264 +0000 UTC m=+0.198146704 container init 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 01:35:32 compute-0 podman_exporter[157108]: ts=2025-10-11T01:35:32.220Z caller=exporter.go:68 level=info msg="Starting podman-prometheus-exporter" version="(version=1.10.1, branch=HEAD, revision=1)"
Oct 11 01:35:32 compute-0 podman_exporter[157108]: ts=2025-10-11T01:35:32.220Z caller=exporter.go:69 level=info msg=metrics enhanced=false
Oct 11 01:35:32 compute-0 podman_exporter[157108]: ts=2025-10-11T01:35:32.220Z caller=handler.go:94 level=info msg="enabled collectors"
Oct 11 01:35:32 compute-0 podman_exporter[157108]: ts=2025-10-11T01:35:32.220Z caller=handler.go:105 level=info collector=container
Oct 11 01:35:32 compute-0 systemd[1]: Starting Podman API Service...
Oct 11 01:35:32 compute-0 podman[157093]: 2025-10-11 01:35:32.23865716 +0000 UTC m=+0.247341580 container start 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 01:35:32 compute-0 systemd[1]: Started Podman API Service.
Oct 11 01:35:32 compute-0 podman[157093]: podman_exporter
Oct 11 01:35:32 compute-0 systemd[1]: Started podman_exporter container.
Oct 11 01:35:32 compute-0 podman[157119]: time="2025-10-11T01:35:32Z" level=info msg="/usr/bin/podman filtering at log level info"
Oct 11 01:35:32 compute-0 podman[157119]: time="2025-10-11T01:35:32Z" level=info msg="Setting parallel job count to 25"
Oct 11 01:35:32 compute-0 podman[157119]: time="2025-10-11T01:35:32Z" level=info msg="Using sqlite as database backend"
Oct 11 01:35:32 compute-0 podman[157119]: time="2025-10-11T01:35:32Z" level=info msg="Not using native diff for overlay, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled"
Oct 11 01:35:32 compute-0 podman[157119]: time="2025-10-11T01:35:32Z" level=info msg="Using systemd socket activation to determine API endpoint"
Oct 11 01:35:32 compute-0 podman[157119]: time="2025-10-11T01:35:32Z" level=info msg="API service listening on \"/run/podman/podman.sock\". URI: \"unix:///run/podman/podman.sock\""
Oct 11 01:35:32 compute-0 sudo[157051]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:32 compute-0 podman[157119]: @ - - [11/Oct/2025:01:35:32 +0000] "GET /v4.9.3/libpod/_ping HTTP/1.1" 200 2 "" "Go-http-client/1.1"
Oct 11 01:35:32 compute-0 podman[157119]: time="2025-10-11T01:35:32Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:35:32 compute-0 podman[157117]: 2025-10-11 01:35:32.354082346 +0000 UTC m=+0.102451189 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=starting, health_failing_streak=1, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 01:35:32 compute-0 podman[157119]: @ - - [11/Oct/2025:01:35:32 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=true&sync=false HTTP/1.1" 200 9686 "" "Go-http-client/1.1"
Oct 11 01:35:32 compute-0 podman_exporter[157108]: ts=2025-10-11T01:35:32.358Z caller=exporter.go:96 level=info msg="Listening on" address=:9882
Oct 11 01:35:32 compute-0 podman_exporter[157108]: ts=2025-10-11T01:35:32.359Z caller=tls_config.go:313 level=info msg="Listening on" address=[::]:9882
Oct 11 01:35:32 compute-0 podman_exporter[157108]: ts=2025-10-11T01:35:32.360Z caller=tls_config.go:349 level=info msg="TLS is enabled." http2=true address=[::]:9882
Oct 11 01:35:32 compute-0 systemd[1]: 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899-386ed991d4e01075.service: Main process exited, code=exited, status=1/FAILURE
Oct 11 01:35:32 compute-0 systemd[1]: 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899-386ed991d4e01075.service: Failed with result 'exit-code'.
Oct 11 01:35:33 compute-0 sudo[157317]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kyqorjwrbgxqyhwyfwsrxcvpeqhtrupy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146532.5845568-740-41346147906670/AnsiballZ_systemd.py'
Oct 11 01:35:33 compute-0 sudo[157317]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:33 compute-0 podman[157278]: 2025-10-11 01:35:33.133720157 +0000 UTC m=+0.144532988 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, io.buildah.version=1.41.3, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team)
Oct 11 01:35:33 compute-0 python3.9[157324]: ansible-ansible.builtin.systemd Invoked with name=edpm_podman_exporter.service state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:35:33 compute-0 systemd[1]: Stopping podman_exporter container...
Oct 11 01:35:33 compute-0 podman[157119]: @ - - [11/Oct/2025:01:35:32 +0000] "GET /v4.9.3/libpod/events?filters=%7B%7D&since=&stream=true&until= HTTP/1.1" 200 3437 "" "Go-http-client/1.1"
Oct 11 01:35:33 compute-0 systemd[1]: libpod-2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899.scope: Deactivated successfully.
Oct 11 01:35:33 compute-0 podman[157334]: 2025-10-11 01:35:33.556506319 +0000 UTC m=+0.052613055 container died 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 01:35:33 compute-0 systemd[1]: 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899-386ed991d4e01075.timer: Deactivated successfully.
Oct 11 01:35:33 compute-0 systemd[1]: Stopped /usr/bin/podman healthcheck run 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899.
Oct 11 01:35:33 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899-userdata-shm.mount: Deactivated successfully.
Oct 11 01:35:33 compute-0 systemd[1]: var-lib-containers-storage-overlay-8890de7455df195df0bcfea2886507750b2c2505c8ec69ea3d86cad543bcbee1-merged.mount: Deactivated successfully.
Oct 11 01:35:33 compute-0 podman[157334]: 2025-10-11 01:35:33.772888198 +0000 UTC m=+0.268994954 container cleanup 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 01:35:33 compute-0 podman[157334]: podman_exporter
Oct 11 01:35:33 compute-0 systemd[1]: edpm_podman_exporter.service: Main process exited, code=exited, status=2/INVALIDARGUMENT
Oct 11 01:35:33 compute-0 podman[157362]: podman_exporter
Oct 11 01:35:33 compute-0 systemd[1]: edpm_podman_exporter.service: Failed with result 'exit-code'.
Oct 11 01:35:33 compute-0 systemd[1]: Stopped podman_exporter container.
Oct 11 01:35:33 compute-0 systemd[1]: Starting podman_exporter container...
Oct 11 01:35:34 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:35:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8890de7455df195df0bcfea2886507750b2c2505c8ec69ea3d86cad543bcbee1/merged/etc/podman_exporter/tls supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8890de7455df195df0bcfea2886507750b2c2505c8ec69ea3d86cad543bcbee1/merged/etc/podman_exporter/podman_exporter.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:34 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899.
Oct 11 01:35:34 compute-0 podman[157376]: 2025-10-11 01:35:34.079415469 +0000 UTC m=+0.166494891 container init 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 01:35:34 compute-0 podman_exporter[157391]: ts=2025-10-11T01:35:34.099Z caller=exporter.go:68 level=info msg="Starting podman-prometheus-exporter" version="(version=1.10.1, branch=HEAD, revision=1)"
Oct 11 01:35:34 compute-0 podman_exporter[157391]: ts=2025-10-11T01:35:34.099Z caller=exporter.go:69 level=info msg=metrics enhanced=false
Oct 11 01:35:34 compute-0 podman_exporter[157391]: ts=2025-10-11T01:35:34.099Z caller=handler.go:94 level=info msg="enabled collectors"
Oct 11 01:35:34 compute-0 podman_exporter[157391]: ts=2025-10-11T01:35:34.099Z caller=handler.go:105 level=info collector=container
Oct 11 01:35:34 compute-0 podman[157119]: @ - - [11/Oct/2025:01:35:34 +0000] "GET /v4.9.3/libpod/_ping HTTP/1.1" 200 2 "" "Go-http-client/1.1"
Oct 11 01:35:34 compute-0 podman[157119]: time="2025-10-11T01:35:34Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:35:34 compute-0 podman[157376]: 2025-10-11 01:35:34.117715354 +0000 UTC m=+0.204794716 container start 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 01:35:34 compute-0 podman[157376]: podman_exporter
Oct 11 01:35:34 compute-0 podman[157119]: @ - - [11/Oct/2025:01:35:34 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=true&sync=false HTTP/1.1" 200 9688 "" "Go-http-client/1.1"
Oct 11 01:35:34 compute-0 podman_exporter[157391]: ts=2025-10-11T01:35:34.126Z caller=exporter.go:96 level=info msg="Listening on" address=:9882
Oct 11 01:35:34 compute-0 podman_exporter[157391]: ts=2025-10-11T01:35:34.127Z caller=tls_config.go:313 level=info msg="Listening on" address=[::]:9882
Oct 11 01:35:34 compute-0 podman_exporter[157391]: ts=2025-10-11T01:35:34.128Z caller=tls_config.go:349 level=info msg="TLS is enabled." http2=true address=[::]:9882
Oct 11 01:35:34 compute-0 systemd[1]: Started podman_exporter container.
Oct 11 01:35:34 compute-0 sudo[157317]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:34 compute-0 podman[157401]: 2025-10-11 01:35:34.222708224 +0000 UTC m=+0.089113704 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 01:35:35 compute-0 sudo[157574]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mczgpteissmdwkizsewncvvwgvdhovwf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146534.498449-748-113461408166612/AnsiballZ_stat.py'
Oct 11 01:35:35 compute-0 sudo[157574]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:35 compute-0 python3.9[157576]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/openstack_network_exporter/healthcheck follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:35:35 compute-0 sudo[157574]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:35 compute-0 sudo[157697]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mzzmshqdfktulrxgkfvzwoltaluyhmrs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146534.498449-748-113461408166612/AnsiballZ_copy.py'
Oct 11 01:35:35 compute-0 sudo[157697]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:35 compute-0 python3.9[157699]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/healthchecks/openstack_network_exporter/ group=zuul mode=0700 owner=zuul setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760146534.498449-748-113461408166612/.source _original_basename=healthcheck follow=False checksum=e380c11c36804bfc65a818f2960cfa663daacfe5 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:35:35 compute-0 sudo[157697]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:36 compute-0 sudo[157849]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eqfckheyyiwbqzfjsufidvmwoxashsmy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146536.4186506-765-5422705065885/AnsiballZ_container_config_data.py'
Oct 11 01:35:36 compute-0 sudo[157849]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:37 compute-0 python3.9[157851]: ansible-container_config_data Invoked with config_overrides={} config_path=/var/lib/openstack/config/telemetry config_pattern=openstack_network_exporter.json debug=False
Oct 11 01:35:37 compute-0 sudo[157849]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:37 compute-0 podman[157852]: 2025-10-11 01:35:37.176437708 +0000 UTC m=+0.079349189 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=starting, health_failing_streak=2, health_log=, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, org.label-schema.build-date=20251007, org.label-schema.vendor=CentOS, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 01:35:37 compute-0 systemd[1]: c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6-2870149d5fcb13d2.service: Main process exited, code=exited, status=1/FAILURE
Oct 11 01:35:37 compute-0 systemd[1]: c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6-2870149d5fcb13d2.service: Failed with result 'exit-code'.
Oct 11 01:35:37 compute-0 sudo[158019]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zgxagidqobodhkkguznczntgaxdypckb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146537.3489885-774-203658595680463/AnsiballZ_container_config_hash.py'
Oct 11 01:35:37 compute-0 sudo[158019]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:37 compute-0 python3.9[158021]: ansible-container_config_hash Invoked with check_mode=False config_vol_prefix=/var/lib/config-data
Oct 11 01:35:37 compute-0 sudo[158019]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:38 compute-0 sudo[158171]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-btpldbupduchhvukgqycwenjofmrmezb ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760146538.370243-784-190619999229453/AnsiballZ_edpm_container_manage.py'
Oct 11 01:35:38 compute-0 sudo[158171]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:39 compute-0 python3[158173]: ansible-edpm_container_manage Invoked with concurrency=1 config_dir=/var/lib/openstack/config/telemetry config_id=edpm config_overrides={} config_patterns=openstack_network_exporter.json log_base_path=/var/log/containers/stdouts debug=False
Oct 11 01:35:41 compute-0 podman[158185]: 2025-10-11 01:35:41.73218022 +0000 UTC m=+2.567321872 image pull 186c5e97c6f6912533851a0044ea6da23938910e7bddfb4a6c0be9b48ab2a1d1 quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified
Oct 11 01:35:41 compute-0 podman[158283]: 2025-10-11 01:35:41.971691316 +0000 UTC m=+0.075831668 container create ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, distribution-scope=public, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, architecture=x86_64, io.openshift.tags=minimal rhel9, build-date=2025-08-20T13:12:41, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=openstack_network_exporter, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.buildah.version=1.33.7, vendor=Red Hat, Inc., managed_by=edpm_ansible, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.6, io.openshift.expose-services=, release=1755695350, vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, maintainer=Red Hat, Inc., config_id=edpm, name=ubi9-minimal, com.redhat.component=ubi9-minimal-container)
Oct 11 01:35:41 compute-0 podman[158283]: 2025-10-11 01:35:41.936096313 +0000 UTC m=+0.040236745 image pull 186c5e97c6f6912533851a0044ea6da23938910e7bddfb4a6c0be9b48ab2a1d1 quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified
Oct 11 01:35:41 compute-0 python3[158173]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman create --name openstack_network_exporter --conmon-pidfile /run/openstack_network_exporter.pid --env OS_ENDPOINT_TYPE=internal --env OPENSTACK_NETWORK_EXPORTER_YAML=/etc/openstack_network_exporter/openstack_network_exporter.yaml --healthcheck-command /openstack/healthcheck openstack-netwo --label config_id=edpm --label container_name=openstack_network_exporter --label managed_by=edpm_ansible --label config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']} --log-driver journald --log-level info --network host --privileged=True --publish 9105:9105 --volume /var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z --volume /var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z --volume /var/run/openvswitch:/run/openvswitch:rw,z --volume /var/lib/openvswitch/ovn:/run/ovn:rw,z --volume /proc:/host/proc:ro --volume /var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified
Oct 11 01:35:42 compute-0 sudo[158171]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:42 compute-0 sudo[158471]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jofzuhmfmiyufdkppentldnnfztkikvt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146542.4342117-792-180878041854606/AnsiballZ_stat.py'
Oct 11 01:35:42 compute-0 sudo[158471]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:43 compute-0 python3.9[158473]: ansible-ansible.builtin.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:35:43 compute-0 sudo[158471]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:43 compute-0 sudo[158625]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fgdhpsqkovqekvlyomzysrfiooeuonoe ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146543.4132335-801-259628965751798/AnsiballZ_file.py'
Oct 11 01:35:43 compute-0 sudo[158625]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:44 compute-0 python3.9[158627]: ansible-file Invoked with path=/etc/systemd/system/edpm_openstack_network_exporter.requires state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:35:44 compute-0 sudo[158625]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:44 compute-0 sudo[158776]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gipocqeqirerqacquccvozeumbbqekai ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146544.1433833-801-225490355977598/AnsiballZ_copy.py'
Oct 11 01:35:44 compute-0 sudo[158776]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:45 compute-0 python3.9[158778]: ansible-copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760146544.1433833-801-225490355977598/source dest=/etc/systemd/system/edpm_openstack_network_exporter.service mode=0644 owner=root group=root backup=False force=True remote_src=False follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:35:45 compute-0 sudo[158776]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:45 compute-0 sudo[158852]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nksammdouopocucpccndnqtfezvrrqzx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146544.1433833-801-225490355977598/AnsiballZ_systemd.py'
Oct 11 01:35:45 compute-0 sudo[158852]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:45 compute-0 python3.9[158854]: ansible-systemd Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 01:35:45 compute-0 systemd[1]: Reloading.
Oct 11 01:35:45 compute-0 systemd-sysv-generator[158885]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:35:45 compute-0 systemd-rc-local-generator[158882]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:35:46 compute-0 sudo[158852]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:46 compute-0 sudo[158963]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oyunqogkdcoxqtuydakyewnlahofamwo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146544.1433833-801-225490355977598/AnsiballZ_systemd.py'
Oct 11 01:35:46 compute-0 sudo[158963]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:46 compute-0 python3.9[158965]: ansible-systemd Invoked with state=restarted name=edpm_openstack_network_exporter.service enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:35:46 compute-0 systemd[1]: Reloading.
Oct 11 01:35:46 compute-0 systemd-sysv-generator[158997]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:35:46 compute-0 systemd-rc-local-generator[158993]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:35:47 compute-0 systemd[1]: Starting openstack_network_exporter container...
Oct 11 01:35:47 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:35:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/71da1f264f6c84273ff7279842042e12ee9d363bb98b441e04efeeff07ab2585/merged/run/ovn supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/71da1f264f6c84273ff7279842042e12ee9d363bb98b441e04efeeff07ab2585/merged/etc/openstack_network_exporter/openstack_network_exporter.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/71da1f264f6c84273ff7279842042e12ee9d363bb98b441e04efeeff07ab2585/merged/etc/openstack_network_exporter/tls supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:47 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46.
Oct 11 01:35:47 compute-0 podman[159004]: 2025-10-11 01:35:47.359355024 +0000 UTC m=+0.170004696 container init ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, release=1755695350, io.buildah.version=1.33.7, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, managed_by=edpm_ansible, name=ubi9-minimal, vcs-type=git, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, container_name=openstack_network_exporter, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., io.openshift.expose-services=, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, vendor=Red Hat, Inc., version=9.6, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, com.redhat.component=ubi9-minimal-container, build-date=2025-08-20T13:12:41, config_id=edpm)
Oct 11 01:35:47 compute-0 openstack_network_exporter[159020]: INFO    01:35:47 main.go:48: registering *bridge.Collector
Oct 11 01:35:47 compute-0 openstack_network_exporter[159020]: INFO    01:35:47 main.go:48: registering *coverage.Collector
Oct 11 01:35:47 compute-0 openstack_network_exporter[159020]: INFO    01:35:47 main.go:48: registering *datapath.Collector
Oct 11 01:35:47 compute-0 openstack_network_exporter[159020]: INFO    01:35:47 main.go:48: registering *iface.Collector
Oct 11 01:35:47 compute-0 openstack_network_exporter[159020]: INFO    01:35:47 main.go:48: registering *memory.Collector
Oct 11 01:35:47 compute-0 openstack_network_exporter[159020]: INFO    01:35:47 main.go:48: registering *ovnnorthd.Collector
Oct 11 01:35:47 compute-0 openstack_network_exporter[159020]: INFO    01:35:47 main.go:48: registering *ovn.Collector
Oct 11 01:35:47 compute-0 openstack_network_exporter[159020]: INFO    01:35:47 main.go:48: registering *ovsdbserver.Collector
Oct 11 01:35:47 compute-0 openstack_network_exporter[159020]: INFO    01:35:47 main.go:48: registering *pmd_perf.Collector
Oct 11 01:35:47 compute-0 openstack_network_exporter[159020]: INFO    01:35:47 main.go:48: registering *pmd_rxq.Collector
Oct 11 01:35:47 compute-0 openstack_network_exporter[159020]: INFO    01:35:47 main.go:48: registering *vswitch.Collector
Oct 11 01:35:47 compute-0 openstack_network_exporter[159020]: NOTICE  01:35:47 main.go:76: listening on https://:9105/metrics
Oct 11 01:35:47 compute-0 podman[159004]: 2025-10-11 01:35:47.385686876 +0000 UTC m=+0.196336478 container start ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, name=ubi9-minimal, release=1755695350, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, io.openshift.tags=minimal rhel9, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, url=https://catalog.redhat.com/en/search?searchType=containers, vcs-type=git, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, build-date=2025-08-20T13:12:41, container_name=openstack_network_exporter, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, maintainer=Red Hat, Inc., io.openshift.expose-services=, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., managed_by=edpm_ansible, com.redhat.component=ubi9-minimal-container, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, version=9.6)
Oct 11 01:35:47 compute-0 podman[159004]: openstack_network_exporter
Oct 11 01:35:47 compute-0 systemd[1]: Started openstack_network_exporter container.
Oct 11 01:35:47 compute-0 sudo[158963]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:47 compute-0 podman[159030]: 2025-10-11 01:35:47.51353427 +0000 UTC m=+0.108841178 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vcs-type=git, container_name=openstack_network_exporter, distribution-scope=public, release=1755695350, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_id=edpm, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, name=ubi9-minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, build-date=2025-08-20T13:12:41, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., managed_by=edpm_ansible, io.buildah.version=1.33.7, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.component=ubi9-minimal-container, url=https://catalog.redhat.com/en/search?searchType=containers, version=9.6, architecture=x86_64, maintainer=Red Hat, Inc., io.openshift.expose-services=, io.openshift.tags=minimal rhel9)
Oct 11 01:35:48 compute-0 sudo[159202]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nebcewzwfxsejlyvypfwczygkkgfweca ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146547.6949844-825-9449526385172/AnsiballZ_systemd.py'
Oct 11 01:35:48 compute-0 sudo[159202]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:48 compute-0 python3.9[159204]: ansible-ansible.builtin.systemd Invoked with name=edpm_openstack_network_exporter.service state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:35:48 compute-0 systemd[1]: Stopping openstack_network_exporter container...
Oct 11 01:35:48 compute-0 systemd[1]: libpod-ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46.scope: Deactivated successfully.
Oct 11 01:35:48 compute-0 podman[159208]: 2025-10-11 01:35:48.553589063 +0000 UTC m=+0.069818065 container died ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, version=9.6, vcs-type=git, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, distribution-scope=public, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., url=https://catalog.redhat.com/en/search?searchType=containers, vendor=Red Hat, Inc., io.openshift.expose-services=, name=ubi9-minimal, build-date=2025-08-20T13:12:41, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, container_name=openstack_network_exporter, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, managed_by=edpm_ansible, architecture=x86_64, com.redhat.component=ubi9-minimal-container, config_id=edpm, io.openshift.tags=minimal rhel9, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1755695350, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.33.7)
Oct 11 01:35:48 compute-0 systemd[1]: ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46-52850eabfa7e0932.timer: Deactivated successfully.
Oct 11 01:35:48 compute-0 systemd[1]: Stopped /usr/bin/podman healthcheck run ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46.
Oct 11 01:35:48 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46-userdata-shm.mount: Deactivated successfully.
Oct 11 01:35:48 compute-0 systemd[1]: var-lib-containers-storage-overlay-71da1f264f6c84273ff7279842042e12ee9d363bb98b441e04efeeff07ab2585-merged.mount: Deactivated successfully.
Oct 11 01:35:49 compute-0 podman[159208]: 2025-10-11 01:35:49.21861203 +0000 UTC m=+0.734841022 container cleanup ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vendor=Red Hat, Inc., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.expose-services=, managed_by=edpm_ansible, build-date=2025-08-20T13:12:41, distribution-scope=public, vcs-type=git, name=ubi9-minimal, architecture=x86_64, com.redhat.component=ubi9-minimal-container, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1755695350, io.openshift.tags=minimal rhel9, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, config_id=edpm, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.33.7, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., version=9.6, container_name=openstack_network_exporter, maintainer=Red Hat, Inc., url=https://catalog.redhat.com/en/search?searchType=containers)
Oct 11 01:35:49 compute-0 podman[159208]: openstack_network_exporter
Oct 11 01:35:49 compute-0 systemd[1]: edpm_openstack_network_exporter.service: Main process exited, code=exited, status=2/INVALIDARGUMENT
Oct 11 01:35:49 compute-0 podman[159237]: openstack_network_exporter
Oct 11 01:35:49 compute-0 systemd[1]: edpm_openstack_network_exporter.service: Failed with result 'exit-code'.
Oct 11 01:35:49 compute-0 systemd[1]: Stopped openstack_network_exporter container.
Oct 11 01:35:49 compute-0 systemd[1]: Starting openstack_network_exporter container...
Oct 11 01:35:49 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:35:49 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/71da1f264f6c84273ff7279842042e12ee9d363bb98b441e04efeeff07ab2585/merged/run/ovn supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:49 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/71da1f264f6c84273ff7279842042e12ee9d363bb98b441e04efeeff07ab2585/merged/etc/openstack_network_exporter/openstack_network_exporter.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:49 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/71da1f264f6c84273ff7279842042e12ee9d363bb98b441e04efeeff07ab2585/merged/etc/openstack_network_exporter/tls supports timestamps until 2038 (0x7fffffff)
Oct 11 01:35:49 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46.
Oct 11 01:35:49 compute-0 podman[159250]: 2025-10-11 01:35:49.512959218 +0000 UTC m=+0.158274903 container init ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, managed_by=edpm_ansible, container_name=openstack_network_exporter, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, distribution-scope=public, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., config_id=edpm, name=ubi9-minimal, release=1755695350, vendor=Red Hat, Inc., version=9.6, com.redhat.component=ubi9-minimal-container, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, io.openshift.expose-services=, build-date=2025-08-20T13:12:41, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, architecture=x86_64)
Oct 11 01:35:49 compute-0 openstack_network_exporter[159265]: INFO    01:35:49 main.go:48: registering *bridge.Collector
Oct 11 01:35:49 compute-0 openstack_network_exporter[159265]: INFO    01:35:49 main.go:48: registering *coverage.Collector
Oct 11 01:35:49 compute-0 openstack_network_exporter[159265]: INFO    01:35:49 main.go:48: registering *datapath.Collector
Oct 11 01:35:49 compute-0 openstack_network_exporter[159265]: INFO    01:35:49 main.go:48: registering *iface.Collector
Oct 11 01:35:49 compute-0 openstack_network_exporter[159265]: INFO    01:35:49 main.go:48: registering *memory.Collector
Oct 11 01:35:49 compute-0 openstack_network_exporter[159265]: INFO    01:35:49 main.go:48: registering *ovnnorthd.Collector
Oct 11 01:35:49 compute-0 openstack_network_exporter[159265]: INFO    01:35:49 main.go:48: registering *ovn.Collector
Oct 11 01:35:49 compute-0 openstack_network_exporter[159265]: INFO    01:35:49 main.go:48: registering *ovsdbserver.Collector
Oct 11 01:35:49 compute-0 openstack_network_exporter[159265]: INFO    01:35:49 main.go:48: registering *pmd_perf.Collector
Oct 11 01:35:49 compute-0 openstack_network_exporter[159265]: INFO    01:35:49 main.go:48: registering *pmd_rxq.Collector
Oct 11 01:35:49 compute-0 openstack_network_exporter[159265]: INFO    01:35:49 main.go:48: registering *vswitch.Collector
Oct 11 01:35:49 compute-0 openstack_network_exporter[159265]: NOTICE  01:35:49 main.go:76: listening on https://:9105/metrics
Oct 11 01:35:49 compute-0 podman[159250]: 2025-10-11 01:35:49.565671371 +0000 UTC m=+0.210987066 container start ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, name=ubi9-minimal, build-date=2025-08-20T13:12:41, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, config_id=edpm, managed_by=edpm_ansible, release=1755695350, container_name=openstack_network_exporter, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.33.7, io.openshift.expose-services=, version=9.6, vcs-type=git, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, distribution-scope=public, vendor=Red Hat, Inc., com.redhat.component=ubi9-minimal-container, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, url=https://catalog.redhat.com/en/search?searchType=containers)
Oct 11 01:35:49 compute-0 podman[159250]: openstack_network_exporter
Oct 11 01:35:49 compute-0 systemd[1]: Started openstack_network_exporter container.
Oct 11 01:35:49 compute-0 sudo[159202]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:49 compute-0 podman[159275]: 2025-10-11 01:35:49.687446002 +0000 UTC m=+0.103323290 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, name=ubi9-minimal, release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, version=9.6, maintainer=Red Hat, Inc., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-type=git, com.redhat.component=ubi9-minimal-container, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, build-date=2025-08-20T13:12:41, config_id=edpm, vendor=Red Hat, Inc., container_name=openstack_network_exporter, io.openshift.tags=minimal rhel9, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.buildah.version=1.33.7, managed_by=edpm_ansible)
Oct 11 01:35:50 compute-0 sudo[159446]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-atbczmsofmqgeijhlhlvrovbffgngrjr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146549.8843572-833-132239508949908/AnsiballZ_find.py'
Oct 11 01:35:50 compute-0 sudo[159446]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:50 compute-0 python3.9[159448]: ansible-ansible.builtin.find Invoked with file_type=directory paths=['/var/lib/openstack/healthchecks/'] patterns=[] read_whole_file=False age_stamp=mtime recurse=False hidden=False follow=False get_checksum=False checksum_algorithm=sha1 use_regex=False exact_mode=True excludes=None contains=None age=None size=None depth=None mode=None encoding=None limit=None
Oct 11 01:35:50 compute-0 sudo[159446]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:51 compute-0 podman[159525]: 2025-10-11 01:35:51.175430364 +0000 UTC m=+0.080459868 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 01:35:51 compute-0 sudo[159622]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xauixreeclmlthgqzdgejctbhpxgkiku ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146550.9481692-843-230757577000938/AnsiballZ_podman_container_info.py'
Oct 11 01:35:51 compute-0 sudo[159622]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:51 compute-0 python3.9[159624]: ansible-containers.podman.podman_container_info Invoked with name=['ovn_controller'] executable=podman
Oct 11 01:35:51 compute-0 sudo[159622]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:52 compute-0 sudo[159787]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iawaqfgosmgjqyzszxuvxftrszizxkly ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146552.2864206-851-123373315687585/AnsiballZ_podman_container_exec.py'
Oct 11 01:35:52 compute-0 sudo[159787]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:53 compute-0 python3.9[159789]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=ovn_controller detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:35:53 compute-0 systemd[1]: Started libpod-conmon-861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112.scope.
Oct 11 01:35:53 compute-0 rsyslogd[998]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 01:35:53 compute-0 podman[159790]: 2025-10-11 01:35:53.280097138 +0000 UTC m=+0.113060375 container exec 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_id=ovn_controller, io.buildah.version=1.41.3, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']})
Oct 11 01:35:53 compute-0 podman[159790]: 2025-10-11 01:35:53.31444333 +0000 UTC m=+0.147406577 container exec_died 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, managed_by=edpm_ansible)
Oct 11 01:35:53 compute-0 systemd[1]: libpod-conmon-861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112.scope: Deactivated successfully.
Oct 11 01:35:53 compute-0 sudo[159787]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:54 compute-0 sudo[159971]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kbdibmoclbfrczjwobvzeatbatkdqign ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146553.6395953-859-221080566393565/AnsiballZ_podman_container_exec.py'
Oct 11 01:35:54 compute-0 sudo[159971]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:54 compute-0 python3.9[159973]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=ovn_controller detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:35:54 compute-0 systemd[1]: Started libpod-conmon-861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112.scope.
Oct 11 01:35:54 compute-0 podman[159974]: 2025-10-11 01:35:54.434397007 +0000 UTC m=+0.106778120 container exec 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, managed_by=edpm_ansible, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 01:35:54 compute-0 podman[159974]: 2025-10-11 01:35:54.470905198 +0000 UTC m=+0.143286271 container exec_died 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, org.label-schema.build-date=20251009)
Oct 11 01:35:54 compute-0 systemd[1]: libpod-conmon-861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112.scope: Deactivated successfully.
Oct 11 01:35:54 compute-0 sudo[159971]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:55 compute-0 sudo[160155]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mciectqqlfqrgwaubmuoisdtqqcmmcrn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146554.7672095-867-85146017781421/AnsiballZ_file.py'
Oct 11 01:35:55 compute-0 sudo[160155]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:55 compute-0 python3.9[160157]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/ovn_controller recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:35:55 compute-0 sudo[160155]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:56 compute-0 sudo[160307]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gqtlwgareeucugfifipebmghxwzvyhnx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146555.679907-876-231522818830422/AnsiballZ_podman_container_info.py'
Oct 11 01:35:56 compute-0 sudo[160307]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:56 compute-0 python3.9[160309]: ansible-containers.podman.podman_container_info Invoked with name=['ceilometer_agent_compute'] executable=podman
Oct 11 01:35:56 compute-0 sudo[160307]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:57 compute-0 sudo[160473]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bklzodfrkjdpbdpmzjyusyvxsmfsoewz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146556.7153747-884-227648996599098/AnsiballZ_podman_container_exec.py'
Oct 11 01:35:57 compute-0 sudo[160473]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:57 compute-0 python3.9[160475]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=ceilometer_agent_compute detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:35:57 compute-0 systemd[1]: Started libpod-conmon-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope.
Oct 11 01:35:57 compute-0 podman[160476]: 2025-10-11 01:35:57.423005422 +0000 UTC m=+0.105641460 container exec c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251007, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.license=GPLv2, container_name=ceilometer_agent_compute)
Oct 11 01:35:57 compute-0 podman[160476]: 2025-10-11 01:35:57.455304186 +0000 UTC m=+0.137940254 container exec_died c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS)
Oct 11 01:35:57 compute-0 systemd[1]: libpod-conmon-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope: Deactivated successfully.
Oct 11 01:35:57 compute-0 sudo[160473]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:58 compute-0 sudo[160657]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wgkyymtqinurzzhjxpxdgnfxshqtcmiu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146557.7443707-892-82120569642282/AnsiballZ_podman_container_exec.py'
Oct 11 01:35:58 compute-0 sudo[160657]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:58 compute-0 python3.9[160659]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=ceilometer_agent_compute detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:35:58 compute-0 systemd[1]: Started libpod-conmon-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope.
Oct 11 01:35:58 compute-0 podman[160660]: 2025-10-11 01:35:58.458823599 +0000 UTC m=+0.110112900 container exec c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, io.buildah.version=1.41.4, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, tcib_managed=true, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007)
Oct 11 01:35:58 compute-0 podman[160660]: 2025-10-11 01:35:58.490184664 +0000 UTC m=+0.141473975 container exec_died c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007)
Oct 11 01:35:58 compute-0 systemd[1]: libpod-conmon-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope: Deactivated successfully.
Oct 11 01:35:58 compute-0 sudo[160657]: pam_unix(sudo:session): session closed for user root
Oct 11 01:35:59 compute-0 sudo[160842]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rqaqzyynpciepphjcqajtojvjsiyqhfl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146558.7989223-900-184775653411980/AnsiballZ_file.py'
Oct 11 01:35:59 compute-0 sudo[160842]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:35:59 compute-0 python3.9[160844]: ansible-ansible.builtin.file Invoked with group=42405 mode=0700 owner=42405 path=/var/lib/openstack/healthchecks/ceilometer_agent_compute recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:35:59 compute-0 sudo[160842]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:00 compute-0 sudo[160994]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iicztamfhvsjsnkuuutwzwdzhhlyduiw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146559.7683198-909-273849011679565/AnsiballZ_podman_container_info.py'
Oct 11 01:36:00 compute-0 sudo[160994]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:00 compute-0 python3.9[160996]: ansible-containers.podman.podman_container_info Invoked with name=['node_exporter'] executable=podman
Oct 11 01:36:00 compute-0 sudo[160994]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:01 compute-0 sudo[161159]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fkmkycemlqvjexqzpgrimdtogzlguyzq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146560.7592936-917-198670922027242/AnsiballZ_podman_container_exec.py'
Oct 11 01:36:01 compute-0 sudo[161159]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:01 compute-0 python3.9[161161]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=node_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:36:01 compute-0 systemd[1]: Started libpod-conmon-adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae.scope.
Oct 11 01:36:01 compute-0 podman[161162]: 2025-10-11 01:36:01.467647322 +0000 UTC m=+0.096932423 container exec adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 01:36:01 compute-0 podman[161162]: 2025-10-11 01:36:01.501988804 +0000 UTC m=+0.131273865 container exec_died adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:36:01 compute-0 systemd[1]: libpod-conmon-adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae.scope: Deactivated successfully.
Oct 11 01:36:01 compute-0 sudo[161159]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:02 compute-0 sudo[161344]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ubbaiaohwlgaowoamibuhrrncqmragik ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146561.8097806-925-91988758283512/AnsiballZ_podman_container_exec.py'
Oct 11 01:36:02 compute-0 sudo[161344]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:02 compute-0 python3.9[161346]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=node_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:36:02 compute-0 systemd[1]: Started libpod-conmon-adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae.scope.
Oct 11 01:36:02 compute-0 podman[161347]: 2025-10-11 01:36:02.569118955 +0000 UTC m=+0.093128570 container exec adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:36:02 compute-0 podman[161347]: 2025-10-11 01:36:02.604740938 +0000 UTC m=+0.128750523 container exec_died adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 01:36:02 compute-0 systemd[1]: libpod-conmon-adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae.scope: Deactivated successfully.
Oct 11 01:36:02 compute-0 sudo[161344]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:03 compute-0 sudo[161538]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tcywtzlonljdgidyhfdbnpfhoceajmmn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146562.8654153-933-194130559242475/AnsiballZ_file.py'
Oct 11 01:36:03 compute-0 sudo[161538]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:03 compute-0 podman[161500]: 2025-10-11 01:36:03.344587433 +0000 UTC m=+0.132064512 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, managed_by=edpm_ansible, tcib_managed=true, config_id=ovn_controller, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 01:36:03 compute-0 python3.9[161545]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/node_exporter recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:36:03 compute-0 sudo[161538]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:04 compute-0 sudo[161704]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nxbexjiycpapbsuguidzoeiqmbgqsuox ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146563.8029132-942-50574333555668/AnsiballZ_podman_container_info.py'
Oct 11 01:36:04 compute-0 sudo[161704]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:04 compute-0 python3.9[161706]: ansible-containers.podman.podman_container_info Invoked with name=['podman_exporter'] executable=podman
Oct 11 01:36:04 compute-0 sudo[161704]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:05 compute-0 sudo[161883]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bcnmpsgrqvandyulfjfqzijhnmolbsuv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146564.7665672-950-67768879698412/AnsiballZ_podman_container_exec.py'
Oct 11 01:36:05 compute-0 sudo[161883]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:05 compute-0 podman[161844]: 2025-10-11 01:36:05.172361796 +0000 UTC m=+0.089293199 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 01:36:05 compute-0 python3.9[161891]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=podman_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:36:05 compute-0 systemd[1]: Started libpod-conmon-2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899.scope.
Oct 11 01:36:05 compute-0 podman[161897]: 2025-10-11 01:36:05.521872816 +0000 UTC m=+0.117243883 container exec 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:36:05 compute-0 podman[161897]: 2025-10-11 01:36:05.556906131 +0000 UTC m=+0.152277148 container exec_died 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:36:05 compute-0 systemd[1]: libpod-conmon-2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899.scope: Deactivated successfully.
Oct 11 01:36:05 compute-0 sudo[161883]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:06 compute-0 sudo[162077]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iudmulxfpqdadmjjgbqsrwqtkgttkmby ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146565.8954298-958-105425422192069/AnsiballZ_podman_container_exec.py'
Oct 11 01:36:06 compute-0 sudo[162077]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:06 compute-0 python3.9[162079]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=podman_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:36:06 compute-0 systemd[1]: Started libpod-conmon-2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899.scope.
Oct 11 01:36:06 compute-0 podman[162080]: 2025-10-11 01:36:06.609089242 +0000 UTC m=+0.097737486 container exec 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 01:36:06 compute-0 podman[162080]: 2025-10-11 01:36:06.643129599 +0000 UTC m=+0.131777823 container exec_died 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 01:36:06 compute-0 systemd[1]: libpod-conmon-2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899.scope: Deactivated successfully.
Oct 11 01:36:06 compute-0 sudo[162077]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:07 compute-0 sudo[162274]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pzcveomwpdpumqzveydadsfyghzhdjys ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146566.9634752-966-87824997482649/AnsiballZ_file.py'
Oct 11 01:36:07 compute-0 sudo[162274]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:07 compute-0 podman[162236]: 2025-10-11 01:36:07.408480449 +0000 UTC m=+0.103844521 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_id=edpm, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 01:36:07 compute-0 python3.9[162282]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/podman_exporter recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:36:07 compute-0 sudo[162274]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:08 compute-0 sudo[162434]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gyepslyvlaioichbofkdfihffhejmcln ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146567.901654-975-61315729806963/AnsiballZ_podman_container_info.py'
Oct 11 01:36:08 compute-0 sudo[162434]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:08 compute-0 python3.9[162436]: ansible-containers.podman.podman_container_info Invoked with name=['openstack_network_exporter'] executable=podman
Oct 11 01:36:08 compute-0 sudo[162434]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:09 compute-0 sudo[162600]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-afobwwufwvgkabaorsypqqqcyhrxxvig ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146568.9037275-983-52278224634206/AnsiballZ_podman_container_exec.py'
Oct 11 01:36:09 compute-0 sudo[162600]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:09 compute-0 python3.9[162602]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=openstack_network_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:36:09 compute-0 systemd[1]: Started libpod-conmon-ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46.scope.
Oct 11 01:36:09 compute-0 podman[162603]: 2025-10-11 01:36:09.660876723 +0000 UTC m=+0.113313348 container exec ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, release=1755695350, distribution-scope=public, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, url=https://catalog.redhat.com/en/search?searchType=containers, io.openshift.tags=minimal rhel9, architecture=x86_64, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, config_id=edpm, name=ubi9-minimal, container_name=openstack_network_exporter, vcs-type=git, build-date=2025-08-20T13:12:41, com.redhat.component=ubi9-minimal-container, maintainer=Red Hat, Inc., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, version=9.6, io.buildah.version=1.33.7)
Oct 11 01:36:09 compute-0 podman[162603]: 2025-10-11 01:36:09.694525072 +0000 UTC m=+0.146961707 container exec_died ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, io.openshift.expose-services=, version=9.6, io.openshift.tags=minimal rhel9, vcs-type=git, name=ubi9-minimal, vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi9-minimal-container, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc., architecture=x86_64, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.buildah.version=1.33.7, url=https://catalog.redhat.com/en/search?searchType=containers, distribution-scope=public, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, container_name=openstack_network_exporter, managed_by=edpm_ansible, build-date=2025-08-20T13:12:41, release=1755695350, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 01:36:09 compute-0 systemd[1]: libpod-conmon-ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46.scope: Deactivated successfully.
Oct 11 01:36:09 compute-0 sudo[162600]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:10 compute-0 sudo[162785]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qmjvcbtwdpwauhmcjchvzlrlgtrvkvss ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146569.996587-991-255265460832757/AnsiballZ_podman_container_exec.py'
Oct 11 01:36:10 compute-0 sudo[162785]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:10 compute-0 python3.9[162787]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=openstack_network_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:36:10 compute-0 systemd[1]: Started libpod-conmon-ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46.scope.
Oct 11 01:36:10 compute-0 podman[162788]: 2025-10-11 01:36:10.774784426 +0000 UTC m=+0.114956443 container exec ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, architecture=x86_64, maintainer=Red Hat, Inc., config_id=edpm, release=1755695350, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, build-date=2025-08-20T13:12:41, url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, com.redhat.component=ubi9-minimal-container, managed_by=edpm_ansible, vcs-type=git, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=openstack_network_exporter, name=ubi9-minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.buildah.version=1.33.7, io.openshift.expose-services=, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., version=9.6)
Oct 11 01:36:10 compute-0 podman[162788]: 2025-10-11 01:36:10.81007556 +0000 UTC m=+0.150247537 container exec_died ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, version=9.6, io.buildah.version=1.33.7, managed_by=edpm_ansible, vendor=Red Hat, Inc., build-date=2025-08-20T13:12:41, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-type=git, architecture=x86_64, io.openshift.expose-services=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=openstack_network_exporter, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, io.openshift.tags=minimal rhel9, name=ubi9-minimal, distribution-scope=public, com.redhat.component=ubi9-minimal-container, maintainer=Red Hat, Inc., config_id=edpm, release=1755695350, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b)
Oct 11 01:36:10 compute-0 systemd[1]: libpod-conmon-ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46.scope: Deactivated successfully.
Oct 11 01:36:10 compute-0 sudo[162785]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:11 compute-0 sudo[162970]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ofldtdyovgltzenezedyybzqgoovxyqo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146571.0858963-999-61885844333703/AnsiballZ_file.py'
Oct 11 01:36:11 compute-0 sudo[162970]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:11 compute-0 python3.9[162972]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/openstack_network_exporter recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:36:11 compute-0 sudo[162970]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:12 compute-0 sudo[163122]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xiqwtykdiavtgytorvmzxsalkthlbauw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146571.9685302-1008-177842738030181/AnsiballZ_file.py'
Oct 11 01:36:12 compute-0 sudo[163122]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:12 compute-0 python3.9[163124]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/lib/edpm-config/firewall/ state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:36:12 compute-0 sudo[163122]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:13 compute-0 sudo[163274]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bpevuxjsfkoqukppbtgbtwwwpcsylljg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146572.8332741-1016-109133196841796/AnsiballZ_stat.py'
Oct 11 01:36:13 compute-0 sudo[163274]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:13 compute-0 python3.9[163276]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/telemetry.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:36:13 compute-0 sudo[163274]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:14 compute-0 sudo[163397]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hebwhhbvuvpbcliahfwxhjpdqcyikapx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146572.8332741-1016-109133196841796/AnsiballZ_copy.py'
Oct 11 01:36:14 compute-0 sudo[163397]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:14 compute-0 python3.9[163399]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/edpm-config/firewall/telemetry.yaml mode=0640 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146572.8332741-1016-109133196841796/.source.yaml follow=False _original_basename=firewall.yaml.j2 checksum=d942d984493b214bda2913f753ff68cdcedff00e backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:36:14 compute-0 sudo[163397]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:15 compute-0 sudo[163549]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-phbfovtzugjkulquqccvllkdtlsporfj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146574.6046321-1032-225763597748041/AnsiballZ_file.py'
Oct 11 01:36:15 compute-0 sudo[163549]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:15 compute-0 python3.9[163551]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/lib/edpm-config/firewall state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:36:15 compute-0 sudo[163549]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:15 compute-0 sudo[163701]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oqfvrqvxkmszvpeeyaokanrcemsmsbci ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146575.515791-1040-275499938383768/AnsiballZ_stat.py'
Oct 11 01:36:15 compute-0 sudo[163701]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:16 compute-0 python3.9[163703]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:36:16 compute-0 sudo[163701]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:16 compute-0 sudo[163779]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sbqsonwmdukhtzteyugqbooazjmsrcss ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146575.515791-1040-275499938383768/AnsiballZ_file.py'
Oct 11 01:36:16 compute-0 sudo[163779]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:16 compute-0 python3.9[163781]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml _original_basename=base-rules.yaml.j2 recurse=False state=file path=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:36:16 compute-0 sudo[163779]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:17 compute-0 sudo[163931]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-soobulosawsugllclxtbbkyymdorpzzr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146576.9994938-1052-197572597441956/AnsiballZ_stat.py'
Oct 11 01:36:17 compute-0 sudo[163931]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:17 compute-0 python3.9[163933]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:36:17 compute-0 sudo[163931]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:17 compute-0 sudo[164009]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jaovqunqpklqvnavfxbhijktdpwjmckt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146576.9994938-1052-197572597441956/AnsiballZ_file.py'
Oct 11 01:36:17 compute-0 sudo[164009]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:18 compute-0 python3.9[164011]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml _original_basename=.qsjmby06 recurse=False state=file path=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:36:18 compute-0 sudo[164009]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:18 compute-0 sudo[164161]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-laysaffvyvbiigtsqgxjrfogghbdgjiq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146578.4168293-1064-37463613618381/AnsiballZ_stat.py'
Oct 11 01:36:18 compute-0 sudo[164161]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:19 compute-0 python3.9[164163]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/iptables.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:36:19 compute-0 sudo[164161]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:19 compute-0 sudo[164239]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lgnckmrqwmmlnuwgxylqwkifzawlzrwr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146578.4168293-1064-37463613618381/AnsiballZ_file.py'
Oct 11 01:36:19 compute-0 sudo[164239]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:19 compute-0 python3.9[164241]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/iptables.nft _original_basename=iptables.nft recurse=False state=file path=/etc/nftables/iptables.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:36:19 compute-0 sudo[164239]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:20 compute-0 podman[164318]: 2025-10-11 01:36:20.214756905 +0000 UTC m=+0.105108598 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, vcs-type=git, io.openshift.expose-services=, architecture=x86_64, io.buildah.version=1.33.7, vendor=Red Hat, Inc., config_id=edpm, io.openshift.tags=minimal rhel9, com.redhat.component=ubi9-minimal-container, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, release=1755695350, managed_by=edpm_ansible, build-date=2025-08-20T13:12:41, container_name=openstack_network_exporter, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, url=https://catalog.redhat.com/en/search?searchType=containers, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., distribution-scope=public, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, version=9.6, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9.)
Oct 11 01:36:20 compute-0 PackageKit[114970]: daemon quit
Oct 11 01:36:20 compute-0 systemd[1]: packagekit.service: Deactivated successfully.
Oct 11 01:36:20 compute-0 sudo[164413]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-chsnpaavwatbynkdcheqheehtegjrsfr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146579.979774-1077-253370121325562/AnsiballZ_command.py'
Oct 11 01:36:20 compute-0 sudo[164413]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:20 compute-0 python3.9[164415]: ansible-ansible.legacy.command Invoked with _raw_params=nft -j list ruleset _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:36:20 compute-0 sudo[164413]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:21 compute-0 sudo[164576]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fzuogldwpjvdlbrcxlwlvxfnuvthrsqm ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760146580.8621197-1085-78197755548357/AnsiballZ_edpm_nftables_from_files.py'
Oct 11 01:36:21 compute-0 sudo[164576]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:21 compute-0 podman[164540]: 2025-10-11 01:36:21.419433438 +0000 UTC m=+0.078840442 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 01:36:21 compute-0 python3[164582]: ansible-edpm_nftables_from_files Invoked with src=/var/lib/edpm-config/firewall
Oct 11 01:36:21 compute-0 sudo[164576]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:22 compute-0 sudo[164739]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ydtksthsfuacpprwbnoojkzzilntsjlk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146581.8670862-1093-25778484184232/AnsiballZ_stat.py'
Oct 11 01:36:22 compute-0 sudo[164739]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:22 compute-0 python3.9[164741]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:36:22 compute-0 sudo[164739]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:22 compute-0 sudo[164817]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mbrizhcjrwmtktpkgxcsslpuhjpxbftn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146581.8670862-1093-25778484184232/AnsiballZ_file.py'
Oct 11 01:36:22 compute-0 sudo[164817]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:22 compute-0 python3.9[164819]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-jumps.nft _original_basename=jump-chain.j2 recurse=False state=file path=/etc/nftables/edpm-jumps.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:36:22 compute-0 sudo[164817]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:23 compute-0 sudo[164969]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-efvvebajibjddpsoydydtwvcnquwnruq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146583.1784737-1105-237992433615787/AnsiballZ_stat.py'
Oct 11 01:36:23 compute-0 sudo[164969]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:23 compute-0 python3.9[164971]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-update-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:36:23 compute-0 sudo[164969]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:24 compute-0 sudo[165047]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ilhmzjlkeemxlhpxdvwcikhkusfhusam ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146583.1784737-1105-237992433615787/AnsiballZ_file.py'
Oct 11 01:36:24 compute-0 sudo[165047]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:24 compute-0 python3.9[165049]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-update-jumps.nft _original_basename=jump-chain.j2 recurse=False state=file path=/etc/nftables/edpm-update-jumps.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:36:24 compute-0 sudo[165047]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:25 compute-0 sudo[165199]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qhmjijkicuagaadrpphwpzqanmgsgogb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146584.6613424-1117-47796410360864/AnsiballZ_stat.py'
Oct 11 01:36:25 compute-0 sudo[165199]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:25 compute-0 python3.9[165201]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-flushes.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:36:25 compute-0 sudo[165199]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:25 compute-0 sudo[165277]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ydurbghoqajwfswcxjvemfezlmspiktp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146584.6613424-1117-47796410360864/AnsiballZ_file.py'
Oct 11 01:36:25 compute-0 sudo[165277]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:25 compute-0 python3.9[165279]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-flushes.nft _original_basename=flush-chain.j2 recurse=False state=file path=/etc/nftables/edpm-flushes.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:36:25 compute-0 sudo[165277]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:26 compute-0 sudo[165429]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ehxfsvznrnkjdfcefgmjwksvnmaxybri ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146586.0941896-1129-276546208141450/AnsiballZ_stat.py'
Oct 11 01:36:26 compute-0 sudo[165429]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:26 compute-0 python3.9[165431]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-chains.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:36:26 compute-0 sudo[165429]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:27 compute-0 sudo[165507]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jimbtvzonakwynuygnqpcsxzckwtfwpx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146586.0941896-1129-276546208141450/AnsiballZ_file.py'
Oct 11 01:36:27 compute-0 sudo[165507]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:27 compute-0 python3.9[165509]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-chains.nft _original_basename=chains.j2 recurse=False state=file path=/etc/nftables/edpm-chains.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:36:27 compute-0 sudo[165507]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:28 compute-0 sudo[165659]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-apchxucxsmtbwyhwjtrbyinyqpvfbuvj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146587.5455341-1141-265750981739195/AnsiballZ_stat.py'
Oct 11 01:36:28 compute-0 sudo[165659]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:28 compute-0 python3.9[165661]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-rules.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:36:28 compute-0 sudo[165659]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:28 compute-0 sudo[165784]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hwbymfaewvdvnrtitzuqkbreulqasrlf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146587.5455341-1141-265750981739195/AnsiballZ_copy.py'
Oct 11 01:36:28 compute-0 sudo[165784]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:29 compute-0 python3.9[165786]: ansible-ansible.legacy.copy Invoked with dest=/etc/nftables/edpm-rules.nft group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146587.5455341-1141-265750981739195/.source.nft follow=False _original_basename=ruleset.j2 checksum=bc835bd485c96b4ac7465e87d3a790a8d097f2aa backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:36:29 compute-0 sudo[165784]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:29 compute-0 sudo[165936]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dwtqztanpfjhvmnepzxetsiesosgrtqs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146589.2839084-1156-130917457441034/AnsiballZ_file.py'
Oct 11 01:36:29 compute-0 sudo[165936]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:29 compute-0 python3.9[165938]: ansible-ansible.builtin.file Invoked with group=root mode=0600 owner=root path=/etc/nftables/edpm-rules.nft.changed state=touch recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:36:29 compute-0 sudo[165936]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:30 compute-0 sudo[166088]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rwkwtkxxdvyrlefrlavinhdjrdjsksal ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146590.1307998-1164-214601581305827/AnsiballZ_command.py'
Oct 11 01:36:30 compute-0 sudo[166088]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:30 compute-0 python3.9[166090]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail; cat /etc/nftables/edpm-chains.nft /etc/nftables/edpm-flushes.nft /etc/nftables/edpm-rules.nft /etc/nftables/edpm-update-jumps.nft /etc/nftables/edpm-jumps.nft | nft -c -f - _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:36:30 compute-0 sudo[166088]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:31 compute-0 sudo[166243]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dadbjbmbntxipfnzskuhgrppbqdkkekt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146591.0231574-1172-134888067838862/AnsiballZ_blockinfile.py'
Oct 11 01:36:31 compute-0 sudo[166243]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:31 compute-0 python3.9[166245]: ansible-ansible.builtin.blockinfile Invoked with backup=False block=include "/etc/nftables/iptables.nft"
                                             include "/etc/nftables/edpm-chains.nft"
                                             include "/etc/nftables/edpm-rules.nft"
                                             include "/etc/nftables/edpm-jumps.nft"
                                              path=/etc/sysconfig/nftables.conf validate=nft -c -f %s state=present marker=# {mark} ANSIBLE MANAGED BLOCK create=False marker_begin=BEGIN marker_end=END append_newline=False prepend_newline=False unsafe_writes=False insertafter=None insertbefore=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:36:31 compute-0 sudo[166243]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:32 compute-0 sudo[166395]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kdxwdkoocgnqathdskfqblvhhqcahqsg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146592.2627926-1181-10153533462545/AnsiballZ_command.py'
Oct 11 01:36:32 compute-0 sudo[166395]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:32 compute-0 python3.9[166397]: ansible-ansible.legacy.command Invoked with _raw_params=nft -f /etc/nftables/edpm-chains.nft _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:36:32 compute-0 sudo[166395]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:33 compute-0 sudo[166559]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nismmcsyjyxcqnkwpbcxgbjnzgbjrfcd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146593.1794834-1189-225239786085370/AnsiballZ_stat.py'
Oct 11 01:36:33 compute-0 sudo[166559]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:33 compute-0 podman[166522]: 2025-10-11 01:36:33.621637058 +0000 UTC m=+0.114263065 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, tcib_managed=true, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller)
Oct 11 01:36:33 compute-0 python3.9[166568]: ansible-ansible.builtin.stat Invoked with path=/etc/nftables/edpm-rules.nft.changed follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:36:33 compute-0 sudo[166559]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:34 compute-0 sudo[166727]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zcedhtoriftpqxuslknqwviodwnyikua ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146593.9900873-1197-45031037772005/AnsiballZ_command.py'
Oct 11 01:36:34 compute-0 sudo[166727]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:34 compute-0 python3.9[166729]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail; cat /etc/nftables/edpm-flushes.nft /etc/nftables/edpm-rules.nft /etc/nftables/edpm-update-jumps.nft | nft -f - _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:36:34 compute-0 sudo[166727]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:35 compute-0 sudo[166882]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jgxkvtjtstkhhuawagdccbizheyjqwjd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146594.8104131-1205-198308658267103/AnsiballZ_file.py'
Oct 11 01:36:35 compute-0 sudo[166882]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:35 compute-0 python3.9[166884]: ansible-ansible.builtin.file Invoked with path=/etc/nftables/edpm-rules.nft.changed state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:36:35 compute-0 sudo[166882]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:35 compute-0 sshd-session[143130]: Connection closed by 192.168.122.30 port 37804
Oct 11 01:36:35 compute-0 sshd-session[143127]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:36:35 compute-0 systemd[1]: session-22.scope: Deactivated successfully.
Oct 11 01:36:35 compute-0 systemd[1]: session-22.scope: Consumed 2min 41.345s CPU time.
Oct 11 01:36:35 compute-0 systemd-logind[804]: Session 22 logged out. Waiting for processes to exit.
Oct 11 01:36:35 compute-0 systemd-logind[804]: Removed session 22.
Oct 11 01:36:35 compute-0 podman[166909]: 2025-10-11 01:36:35.979629413 +0000 UTC m=+0.061521728 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 01:36:36 compute-0 podman[157119]: time="2025-10-11T01:36:36Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:36:36 compute-0 podman[157119]: @ - - [11/Oct/2025:01:36:36 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 12784 "" "Go-http-client/1.1"
Oct 11 01:36:36 compute-0 podman[157119]: @ - - [11/Oct/2025:01:36:36 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 2140 "" "Go-http-client/1.1"
Oct 11 01:36:38 compute-0 podman[166936]: 2025-10-11 01:36:38.208504582 +0000 UTC m=+0.097729346 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, config_id=edpm, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, org.label-schema.name=CentOS Stream 10 Base Image, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 01:36:38 compute-0 openstack_network_exporter[159265]: ERROR   01:36:38 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:36:38 compute-0 openstack_network_exporter[159265]: ERROR   01:36:38 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:36:38 compute-0 openstack_network_exporter[159265]: ERROR   01:36:38 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:36:38 compute-0 openstack_network_exporter[159265]: ERROR   01:36:38 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:36:38 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:36:38 compute-0 openstack_network_exporter[159265]: ERROR   01:36:38 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:36:38 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:36:41 compute-0 sshd-session[166962]: Accepted publickey for zuul from 192.168.122.30 port 50914 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:36:41 compute-0 systemd-logind[804]: New session 23 of user zuul.
Oct 11 01:36:41 compute-0 systemd[1]: Started Session 23 of User zuul.
Oct 11 01:36:41 compute-0 sshd-session[166962]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:36:42 compute-0 sudo[167115]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eptvvtgmdvvpiaoervwlmqfjlqcshtjd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146601.9741035-24-98537872990678/AnsiballZ_systemd_service.py'
Oct 11 01:36:42 compute-0 sudo[167115]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:43 compute-0 python3.9[167117]: ansible-ansible.builtin.systemd_service Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 01:36:43 compute-0 systemd[1]: Reloading.
Oct 11 01:36:43 compute-0 systemd-sysv-generator[167147]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:36:43 compute-0 systemd-rc-local-generator[167142]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:36:43 compute-0 sudo[167115]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:44 compute-0 python3.9[167301]: ansible-ansible.builtin.service_facts Invoked
Oct 11 01:36:44 compute-0 network[167318]: You are using 'network' service provided by 'network-scripts', which are now deprecated.
Oct 11 01:36:44 compute-0 network[167319]: 'network-scripts' will be removed from distribution in near future.
Oct 11 01:36:44 compute-0 network[167320]: It is advised to switch to 'NetworkManager' instead for network management.
Oct 11 01:36:49 compute-0 sudo[167593]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-twkjlaallmqirrqkxlqswrlornqenjyh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146609.2732475-47-154345469623974/AnsiballZ_systemd_service.py'
Oct 11 01:36:49 compute-0 sudo[167593]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:50 compute-0 python3.9[167595]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_ceilometer_agent_ipmi.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:36:50 compute-0 sudo[167593]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:51 compute-0 sudo[167759]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qyvakdjcujjgctxcllnoggvjyihcufkn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146610.4187346-57-257774430365164/AnsiballZ_file.py'
Oct 11 01:36:51 compute-0 sudo[167759]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:51 compute-0 podman[167720]: 2025-10-11 01:36:51.066467501 +0000 UTC m=+0.097278854 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, release=1755695350, io.openshift.tags=minimal rhel9, distribution-scope=public, maintainer=Red Hat, Inc., io.buildah.version=1.33.7, vcs-type=git, com.redhat.component=ubi9-minimal-container, url=https://catalog.redhat.com/en/search?searchType=containers, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vendor=Red Hat, Inc., version=9.6, container_name=openstack_network_exporter, name=ubi9-minimal, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, managed_by=edpm_ansible, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.openshift.expose-services=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64)
Oct 11 01:36:51 compute-0 python3.9[167769]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_ceilometer_agent_ipmi.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:36:51 compute-0 sudo[167759]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:52 compute-0 sudo[167935]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bmbwyvjnwvehdexksxjudqxxolrdxefg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146611.5383818-65-106390142503465/AnsiballZ_file.py'
Oct 11 01:36:52 compute-0 podman[167893]: 2025-10-11 01:36:52.030524026 +0000 UTC m=+0.088334373 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 01:36:52 compute-0 sudo[167935]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:52 compute-0 python3.9[167946]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_ceilometer_agent_ipmi.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:36:52 compute-0 sudo[167935]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:53 compute-0 sudo[168096]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eqramtxirndugfuprpbosewqtxmwrtwi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146612.5663826-74-72201869321811/AnsiballZ_command.py'
Oct 11 01:36:53 compute-0 sudo[168096]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:53 compute-0 python3.9[168098]: ansible-ansible.legacy.command Invoked with _raw_params=if systemctl is-active certmonger.service; then
                                               systemctl disable --now certmonger.service
                                               test -f /etc/systemd/system/certmonger.service || systemctl mask certmonger.service
                                             fi
                                              _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:36:53 compute-0 sudo[168096]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:54 compute-0 python3.9[168250]: ansible-ansible.builtin.find Invoked with file_type=any hidden=True paths=['/var/lib/certmonger/requests'] patterns=[] read_whole_file=False age_stamp=mtime recurse=False follow=False get_checksum=False checksum_algorithm=sha1 use_regex=False exact_mode=True excludes=None contains=None age=None size=None depth=None mode=None encoding=None limit=None
Oct 11 01:36:55 compute-0 sudo[168400]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mhkqjomhssmryyxwousezjxkrfijqbzz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146614.700946-92-176409610206181/AnsiballZ_systemd_service.py'
Oct 11 01:36:55 compute-0 sudo[168400]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:55 compute-0 python3.9[168402]: ansible-ansible.builtin.systemd_service Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 01:36:55 compute-0 systemd[1]: Reloading.
Oct 11 01:36:55 compute-0 systemd-sysv-generator[168432]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:36:55 compute-0 systemd-rc-local-generator[168427]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:36:55 compute-0 sudo[168400]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:56 compute-0 sudo[168587]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nkmlnyoigckeqrxbxxtvgqgkspttsejp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146616.0209866-100-172369304669517/AnsiballZ_command.py'
Oct 11 01:36:56 compute-0 sudo[168587]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:56 compute-0 python3.9[168589]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_ceilometer_agent_ipmi.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:36:56 compute-0 sudo[168587]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:57 compute-0 sudo[168740]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lythvsthufqsgzbwttojkiioeqdglxqi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146616.9704442-109-255899725563475/AnsiballZ_file.py'
Oct 11 01:36:57 compute-0 sudo[168740]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:36:57 compute-0 python3.9[168742]: ansible-ansible.builtin.file Invoked with group=zuul mode=0750 owner=zuul path=/var/lib/openstack/config/telemetry-power-monitoring recurse=True setype=container_file_t state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:36:57 compute-0 sudo[168740]: pam_unix(sudo:session): session closed for user root
Oct 11 01:36:58 compute-0 python3.9[168892]: ansible-ansible.builtin.stat Invoked with path=/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:36:59 compute-0 python3.9[169044]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-host-specific.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:36:59 compute-0 podman[157119]: time="2025-10-11T01:36:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:36:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:36:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 12784 "" "Go-http-client/1.1"
Oct 11 01:36:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:36:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 2149 "" "Go-http-client/1.1"
Oct 11 01:37:00 compute-0 python3.9[169168]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-host-specific.conf mode=0644 setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760146618.9749913-125-231717615337586/.source.conf follow=False _original_basename=ceilometer-host-specific.conf.j2 checksum=e86e0e43000ce9ccfe5aefbf8e8f2e3d15d05584 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:37:01 compute-0 openstack_network_exporter[159265]: ERROR   01:37:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:37:01 compute-0 openstack_network_exporter[159265]: ERROR   01:37:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:37:01 compute-0 openstack_network_exporter[159265]: ERROR   01:37:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:37:01 compute-0 openstack_network_exporter[159265]: ERROR   01:37:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:37:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:37:01 compute-0 openstack_network_exporter[159265]: ERROR   01:37:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:37:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:37:01 compute-0 sudo[169318]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oropgeyxrrlgzhdaakpvathpjmvnkcht ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146620.867529-143-59510745242511/AnsiballZ_getent.py'
Oct 11 01:37:01 compute-0 sudo[169318]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:01 compute-0 python3.9[169320]: ansible-ansible.builtin.getent Invoked with database=passwd key=ceilometer fail_key=True service=None split=None
Oct 11 01:37:01 compute-0 sudo[169318]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:03 compute-0 python3.9[169471]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:37:03 compute-0 podman[169566]: 2025-10-11 01:37:03.984950217 +0000 UTC m=+0.144543231 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, org.label-schema.vendor=CentOS, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_controller, io.buildah.version=1.41.3)
Oct 11 01:37:04 compute-0 python3.9[169604]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer.conf mode=0640 remote_src=False src=/home/zuul/.ansible/tmp/ansible-tmp-1760146622.8274302-171-229394543937242/.source.conf _original_basename=ceilometer.conf follow=False checksum=e93ef84feaa07737af66c0c1da2fd4bdcae81d37 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:37:04 compute-0 python3.9[169770]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/polling.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:37:05 compute-0 python3.9[169891]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry-power-monitoring/polling.yaml mode=0640 remote_src=False src=/home/zuul/.ansible/tmp/ansible-tmp-1760146624.3169801-171-108841610337940/.source.yaml _original_basename=polling.yaml follow=False checksum=5ef7021082c6431099dde63e021011029cd65119 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:37:06 compute-0 podman[169999]: 2025-10-11 01:37:06.186157793 +0000 UTC m=+0.080868933 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 01:37:06 compute-0 python3.9[170065]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/custom.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:37:07 compute-0 python3.9[170186]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry-power-monitoring/custom.conf mode=0640 remote_src=False src=/home/zuul/.ansible/tmp/ansible-tmp-1760146625.8166423-171-189888296807134/.source.conf _original_basename=custom.conf follow=False checksum=838b8b0a7d7f72e55ab67d39f32e3cb3eca2139b backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.931 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.932 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.932 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.933 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f8ed27f97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.933 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb8c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.934 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.934 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.934 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb1a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.934 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb200>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.934 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.934 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed2874260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.934 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.934 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.934 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed3ab42f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.934 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.934 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb350>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.934 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb90>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.934 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fa390>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb3b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbbf0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbc80>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27f9610>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb620>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbe30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbec0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbf50>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238bd70>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.936 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.936 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f8ed27fbad0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.936 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.936 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f8ed27faff0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.937 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.937 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f8ed27fb110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.937 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.937 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f8ed27fb170>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.937 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.937 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f8ed27fb1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.938 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.938 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f8ed27fb230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.938 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.938 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f8ed2874230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.938 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.938 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f8ed27fb290>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.939 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.939 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f8ed5778d70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.939 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.939 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f8ed27fb650>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.939 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.939 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f8ed27fbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.939 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.940 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f8ed27fb320>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.940 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.940 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f8ed27fbb60>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.940 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.940 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f8ed27fa3f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.940 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.941 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f8ed27fb380>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.941 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.941 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f8ed27fbbc0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.941 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.941 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f8ed27fbc50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.941 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.941 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f8ed27fbce0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.942 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.942 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f8ed27fbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.942 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.942 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f8ed27fb590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.942 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.942 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f8ed27f95e0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.943 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.943 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f8ed27fb5f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.943 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.943 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f8ed27fbe00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.943 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.943 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f8ed27fbe90>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.944 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.944 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f8ed27fbf20>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.944 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.944 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.944 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.944 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.945 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.945 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.945 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.945 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.945 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.945 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.945 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.945 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.945 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.945 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.945 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.945 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.945 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.946 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.946 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.946 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.946 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.946 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.946 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.946 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.946 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.946 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:37:07.946 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:37:08 compute-0 python3.9[170336]: ansible-ansible.builtin.stat Invoked with path=/var/lib/openstack/certs/telemetry-power-monitoring/default/tls.crt follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:37:08 compute-0 podman[170463]: 2025-10-11 01:37:08.699064762 +0000 UTC m=+0.100157097 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=edpm, io.buildah.version=1.41.4, managed_by=edpm_ansible, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2)
Oct 11 01:37:08 compute-0 python3.9[170504]: ansible-ansible.builtin.stat Invoked with path=/var/lib/openstack/certs/telemetry-power-monitoring/default/tls.key follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:37:09 compute-0 python3.9[170661]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:37:10 compute-0 python3.9[170782]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json mode=420 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146629.0929465-230-214116994647729/.source.json follow=False _original_basename=ceilometer-agent-ipmi.json.j2 checksum=21255e7f7db3155b4a491729298d9407fe6f8335 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:37:11 compute-0 python3.9[170932]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-host-specific.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:37:11 compute-0 python3.9[171008]: ansible-ansible.legacy.file Invoked with mode=420 dest=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-host-specific.conf _original_basename=ceilometer-host-specific.conf.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-host-specific.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:37:12 compute-0 python3.9[171158]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_agent_ipmi.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:37:13 compute-0 python3.9[171279]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_agent_ipmi.json mode=420 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146632.0349276-230-51707194556528/.source.json follow=False _original_basename=ceilometer_agent_ipmi.json.j2 checksum=cf81874b7544c057599ec397442879f74d42b3ec backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:37:14 compute-0 python3.9[171429]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:37:14 compute-0 python3.9[171550]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml mode=420 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146633.6494932-230-28005049707946/.source.yaml follow=False _original_basename=ceilometer_prom_exporter.yaml.j2 checksum=10157c879411ee6023e506dc85a343cedc52700f backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:37:15 compute-0 python3.9[171700]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/firewall.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:37:16 compute-0 python3.9[171821]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry-power-monitoring/firewall.yaml mode=420 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146635.1615245-230-134269864706701/.source.yaml follow=False _original_basename=firewall.yaml.j2 checksum=40b8960d32c81de936cddbeb137a8240ecc54e7b backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:37:17 compute-0 python3.9[171971]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/kepler.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:37:17 compute-0 python3.9[172092]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/telemetry-power-monitoring/kepler.json mode=420 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146636.6514888-230-77865344914955/.source.json follow=False _original_basename=kepler.json.j2 checksum=89451093c8765edd3915016a9e87770fe489178d backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:37:18 compute-0 python3.9[172242]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:37:19 compute-0 python3.9[172318]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml _original_basename=ceilometer_prom_exporter.yaml.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:37:19 compute-0 sudo[172469]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ajgzpyfxdvoftawvxeoildeimbotvucb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146639.524331-325-261426443578458/AnsiballZ_file.py'
Oct 11 01:37:19 compute-0 sudo[172469]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:20 compute-0 python3.9[172471]: ansible-ansible.builtin.file Invoked with group=ceilometer mode=0644 owner=ceilometer path=/var/lib/openstack/certs/telemetry-power-monitoring/default/tls.crt recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False state=None _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:37:20 compute-0 sudo[172469]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:20 compute-0 sudo[172621]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ofhmgbgjumhntwqsxpkncguuawxbqhuz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146640.425017-333-126201537863076/AnsiballZ_file.py'
Oct 11 01:37:20 compute-0 sudo[172621]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:20 compute-0 python3.9[172623]: ansible-ansible.builtin.file Invoked with group=ceilometer mode=0644 owner=ceilometer path=/var/lib/openstack/certs/telemetry-power-monitoring/default/tls.key recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False state=None _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:37:21 compute-0 sudo[172621]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:21 compute-0 sudo[172784]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ydglyixcgsqljclhjhdbyxphkhspwfka ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146641.2428143-341-141612077626176/AnsiballZ_file.py'
Oct 11 01:37:21 compute-0 sudo[172784]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:21 compute-0 podman[172747]: 2025-10-11 01:37:21.704949075 +0000 UTC m=+0.122622816 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=openstack_network_exporter, config_id=edpm, io.openshift.tags=minimal rhel9, url=https://catalog.redhat.com/en/search?searchType=containers, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, com.redhat.component=ubi9-minimal-container, io.openshift.expose-services=, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., version=9.6, distribution-scope=public, maintainer=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.33.7, managed_by=edpm_ansible, build-date=2025-08-20T13:12:41, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, release=1755695350, name=ubi9-minimal, vcs-type=git)
Oct 11 01:37:21 compute-0 python3.9[172792]: ansible-ansible.builtin.file Invoked with group=zuul mode=0755 owner=zuul path=/var/lib/openstack/healthchecks setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:37:21 compute-0 sudo[172784]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:22 compute-0 podman[172823]: 2025-10-11 01:37:22.17742938 +0000 UTC m=+0.071004306 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:37:22 compute-0 sudo[172969]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nofyfwxgvtpyshtkhykgxvcoupisthmh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146642.1358037-349-147879739286080/AnsiballZ_stat.py'
Oct 11 01:37:22 compute-0 sudo[172969]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:22 compute-0 python3.9[172971]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/ceilometer_agent_ipmi/healthcheck follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:37:22 compute-0 sudo[172969]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:23 compute-0 sudo[173092]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rnygnofntvrpylkhcwgqbwqwnagldhxf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146642.1358037-349-147879739286080/AnsiballZ_copy.py'
Oct 11 01:37:23 compute-0 sudo[173092]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:23 compute-0 python3.9[173094]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/healthchecks/ceilometer_agent_ipmi/ group=zuul mode=0700 owner=zuul setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760146642.1358037-349-147879739286080/.source _original_basename=healthcheck follow=False checksum=ebb343c21fce35a02591a9351660cb7035a47d42 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:37:23 compute-0 sudo[173092]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:23 compute-0 sudo[173168]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eeurnciuaemguemgvcsoqkipmmtsyviy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146642.1358037-349-147879739286080/AnsiballZ_stat.py'
Oct 11 01:37:23 compute-0 sudo[173168]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:23 compute-0 python3.9[173170]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/ceilometer_agent_ipmi/healthcheck.future follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:37:23 compute-0 sudo[173168]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:24 compute-0 sudo[173291]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gyzpqirezfhaiquqvcgqzepuzdcswprj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146642.1358037-349-147879739286080/AnsiballZ_copy.py'
Oct 11 01:37:24 compute-0 sudo[173291]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:24 compute-0 python3.9[173293]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/healthchecks/ceilometer_agent_ipmi/ group=zuul mode=0700 owner=zuul setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760146642.1358037-349-147879739286080/.source.future _original_basename=healthcheck.future follow=False checksum=d500a98192f4ddd70b4dfdc059e2d81aed36a294 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:37:24 compute-0 sudo[173291]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:25 compute-0 sudo[173443]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ipfhsqslakfaqkqqfxjstatmvdhpluky ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146644.9291885-349-275081057573205/AnsiballZ_stat.py'
Oct 11 01:37:25 compute-0 sudo[173443]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:25 compute-0 python3.9[173445]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/kepler/healthcheck follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:37:25 compute-0 sudo[173443]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:26 compute-0 sudo[173566]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-crtdqmokqejphvrcjkzwvvlhcztxkhfr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146644.9291885-349-275081057573205/AnsiballZ_copy.py'
Oct 11 01:37:26 compute-0 sudo[173566]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:26 compute-0 python3.9[173568]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/healthchecks/kepler/ group=zuul mode=0700 owner=zuul setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760146644.9291885-349-275081057573205/.source _original_basename=healthcheck follow=False checksum=57ed53cc150174efd98819129660d5b9ea9ea61a backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:37:26 compute-0 sudo[173566]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:27 compute-0 sudo[173718]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-woridtesmobuaboavohwxigdvkriwqzi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146646.7057703-391-17033899547741/AnsiballZ_container_config_data.py'
Oct 11 01:37:27 compute-0 sudo[173718]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:27 compute-0 python3.9[173720]: ansible-container_config_data Invoked with config_overrides={} config_path=/var/lib/openstack/config/telemetry-power-monitoring config_pattern=ceilometer_agent_ipmi.json debug=False
Oct 11 01:37:27 compute-0 sudo[173718]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:28 compute-0 sudo[173870]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yhnpuiitecqvpdkjjdkvxieuwbnweeov ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146647.8619437-400-233029743260247/AnsiballZ_container_config_hash.py'
Oct 11 01:37:28 compute-0 sudo[173870]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:28 compute-0 python3.9[173872]: ansible-container_config_hash Invoked with check_mode=False config_vol_prefix=/var/lib/config-data
Oct 11 01:37:28 compute-0 sudo[173870]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:29 compute-0 sudo[174022]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uvimeitpfxkambwwafjysftlvkgxymvw ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760146649.0103507-410-256232040804742/AnsiballZ_edpm_container_manage.py'
Oct 11 01:37:29 compute-0 sudo[174022]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:29 compute-0 podman[157119]: time="2025-10-11T01:37:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:37:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:37:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 12784 "" "Go-http-client/1.1"
Oct 11 01:37:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:37:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 2146 "" "Go-http-client/1.1"
Oct 11 01:37:29 compute-0 python3[174024]: ansible-edpm_container_manage Invoked with concurrency=1 config_dir=/var/lib/openstack/config/telemetry-power-monitoring config_id=edpm config_overrides={} config_patterns=ceilometer_agent_ipmi.json log_base_path=/var/log/containers/stdouts debug=False
Oct 11 01:37:31 compute-0 openstack_network_exporter[159265]: ERROR   01:37:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:37:31 compute-0 openstack_network_exporter[159265]: ERROR   01:37:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:37:31 compute-0 openstack_network_exporter[159265]: ERROR   01:37:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:37:31 compute-0 openstack_network_exporter[159265]: ERROR   01:37:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:37:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:37:31 compute-0 openstack_network_exporter[159265]: ERROR   01:37:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:37:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:37:35 compute-0 podman[174098]: 2025-10-11 01:37:35.270704997 +0000 UTC m=+1.154517933 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, config_id=ovn_controller, container_name=ovn_controller, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team)
Oct 11 01:37:35 compute-0 podman[174038]: 2025-10-11 01:37:35.485095153 +0000 UTC m=+5.375066060 image pull 3fd38304666e26ceda31e631b76b1276c0e32061a70084c62e30140f9f182bd7 quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified
Oct 11 01:37:35 compute-0 podman[174163]: 2025-10-11 01:37:35.727480617 +0000 UTC m=+0.081516068 container create 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.3, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.build-date=20251009, config_id=edpm, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team)
Oct 11 01:37:35 compute-0 podman[174163]: 2025-10-11 01:37:35.680706065 +0000 UTC m=+0.034741566 image pull 3fd38304666e26ceda31e631b76b1276c0e32061a70084c62e30140f9f182bd7 quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified
Oct 11 01:37:35 compute-0 python3[174024]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman create --name ceilometer_agent_ipmi --conmon-pidfile /run/ceilometer_agent_ipmi.pid --env KOLLA_CONFIG_STRATEGY=COPY_ALWAYS --env OS_ENDPOINT_TYPE=internal --healthcheck-command /openstack/healthcheck ipmi --label config_id=edpm --label container_name=ceilometer_agent_ipmi --label managed_by=edpm_ansible --label config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']} --log-driver journald --log-level info --network host --privileged=True --security-opt label:type:ceilometer_polling_t --user ceilometer --volume /var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z --volume /var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z --volume /etc/hosts:/etc/hosts:ro --volume /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro --volume /etc/localtime:/etc/localtime:ro --volume /etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro --volume /var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z --volume /var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z --volume /var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z --volume /dev/log:/dev/log --volume /var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified kolla_start
Oct 11 01:37:35 compute-0 sudo[174022]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:36 compute-0 sudo[174365]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dhynxofdvtkdkvccazuudmbhpjqcvkyj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146656.2327163-418-161521736787010/AnsiballZ_stat.py'
Oct 11 01:37:36 compute-0 sudo[174365]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:36 compute-0 podman[174325]: 2025-10-11 01:37:36.682276197 +0000 UTC m=+0.101698301 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:37:36 compute-0 python3.9[174378]: ansible-ansible.builtin.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:37:36 compute-0 sudo[174365]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:37 compute-0 sudo[174530]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hwchcnlxtjkleiylwpbcouqjwxnzdptu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146657.229592-427-104174728266739/AnsiballZ_file.py'
Oct 11 01:37:37 compute-0 sudo[174530]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:37 compute-0 python3.9[174532]: ansible-file Invoked with path=/etc/systemd/system/edpm_ceilometer_agent_ipmi.requires state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:37:37 compute-0 sudo[174530]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:38 compute-0 sudo[174681]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cdouadoetolnpqexrqbcijdnutgbirdy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146657.9410393-427-197716803127529/AnsiballZ_copy.py'
Oct 11 01:37:38 compute-0 sudo[174681]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:38 compute-0 python3.9[174683]: ansible-copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760146657.9410393-427-197716803127529/source dest=/etc/systemd/system/edpm_ceilometer_agent_ipmi.service mode=0644 owner=root group=root backup=False force=True remote_src=False follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:37:38 compute-0 sudo[174681]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:39 compute-0 podman[174684]: 2025-10-11 01:37:39.225646298 +0000 UTC m=+0.111422508 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.license=GPLv2, config_id=edpm)
Oct 11 01:37:39 compute-0 sudo[174777]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wfjupexackyktthpznniwtvrsvrfzsmy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146657.9410393-427-197716803127529/AnsiballZ_systemd.py'
Oct 11 01:37:39 compute-0 sudo[174777]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:39 compute-0 python3.9[174779]: ansible-systemd Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 01:37:39 compute-0 systemd[1]: Reloading.
Oct 11 01:37:39 compute-0 systemd-sysv-generator[174810]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:37:39 compute-0 systemd-rc-local-generator[174807]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:37:40 compute-0 sudo[174777]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:40 compute-0 sudo[174888]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nncjoaerekytdommmnlkbnrvqymigtpx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146657.9410393-427-197716803127529/AnsiballZ_systemd.py'
Oct 11 01:37:40 compute-0 sudo[174888]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:40 compute-0 python3.9[174890]: ansible-systemd Invoked with state=restarted name=edpm_ceilometer_agent_ipmi.service enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:37:41 compute-0 systemd[1]: Reloading.
Oct 11 01:37:42 compute-0 systemd-rc-local-generator[174920]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:37:42 compute-0 systemd-sysv-generator[174924]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:37:42 compute-0 systemd[1]: Starting ceilometer_agent_ipmi container...
Oct 11 01:37:42 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:37:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/88244608e55640e82c5cb6a8a67a46420230ae4798b24d993c337ff432ee2d45/merged/etc/ceilometer/tls supports timestamps until 2038 (0x7fffffff)
Oct 11 01:37:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/88244608e55640e82c5cb6a8a67a46420230ae4798b24d993c337ff432ee2d45/merged/etc/ceilometer/ceilometer_prom_exporter.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:37:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/88244608e55640e82c5cb6a8a67a46420230ae4798b24d993c337ff432ee2d45/merged/var/lib/openstack/config supports timestamps until 2038 (0x7fffffff)
Oct 11 01:37:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/88244608e55640e82c5cb6a8a67a46420230ae4798b24d993c337ff432ee2d45/merged/var/lib/kolla/config_files/config.json supports timestamps until 2038 (0x7fffffff)
Oct 11 01:37:42 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.
Oct 11 01:37:42 compute-0 podman[174930]: 2025-10-11 01:37:42.475044103 +0000 UTC m=+0.204578075 container init 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: + sudo -E kolla_set_configs
Oct 11 01:37:42 compute-0 podman[174930]: 2025-10-11 01:37:42.514960372 +0000 UTC m=+0.244494304 container start 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, container_name=ceilometer_agent_ipmi, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 01:37:42 compute-0 podman[174930]: ceilometer_agent_ipmi
Oct 11 01:37:42 compute-0 sudo[174953]: ceilometer : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_set_configs
Oct 11 01:37:42 compute-0 sudo[174953]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Oct 11 01:37:42 compute-0 sudo[174953]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=42405)
Oct 11 01:37:42 compute-0 systemd[1]: Started ceilometer_agent_ipmi container.
Oct 11 01:37:42 compute-0 sudo[174888]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: INFO:__main__:Loading config file at /var/lib/kolla/config_files/config.json
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: INFO:__main__:Validating config file
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: INFO:__main__:Kolla config strategy set to: COPY_ALWAYS
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: INFO:__main__:Copying service configuration files
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: INFO:__main__:Deleting /etc/ceilometer/ceilometer.conf
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: INFO:__main__:Copying /var/lib/openstack/config/ceilometer.conf to /etc/ceilometer/ceilometer.conf
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: INFO:__main__:Setting permission for /etc/ceilometer/ceilometer.conf
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: INFO:__main__:Deleting /etc/ceilometer/polling.yaml
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: INFO:__main__:Copying /var/lib/openstack/config/polling.yaml to /etc/ceilometer/polling.yaml
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: INFO:__main__:Setting permission for /etc/ceilometer/polling.yaml
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: INFO:__main__:Copying /var/lib/openstack/config/custom.conf to /etc/ceilometer/ceilometer.conf.d/01-ceilometer-custom.conf
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: INFO:__main__:Setting permission for /etc/ceilometer/ceilometer.conf.d/01-ceilometer-custom.conf
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: INFO:__main__:Copying /var/lib/openstack/config/ceilometer-host-specific.conf to /etc/ceilometer/ceilometer.conf.d/02-ceilometer-host-specific.conf
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: INFO:__main__:Setting permission for /etc/ceilometer/ceilometer.conf.d/02-ceilometer-host-specific.conf
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: INFO:__main__:Writing out command to execute
Oct 11 01:37:42 compute-0 sudo[174953]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:42 compute-0 podman[174954]: 2025-10-11 01:37:42.626337659 +0000 UTC m=+0.089820543 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=starting, health_failing_streak=1, health_log=, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, io.buildah.version=1.41.3)
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: ++ cat /run_command
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: + CMD='/usr/bin/ceilometer-polling --polling-namespaces ipmi --logfile /dev/stdout'
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: + ARGS=
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: + sudo kolla_copy_cacerts
Oct 11 01:37:42 compute-0 systemd[1]: 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c-34479a48e9e141d1.service: Main process exited, code=exited, status=1/FAILURE
Oct 11 01:37:42 compute-0 systemd[1]: 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c-34479a48e9e141d1.service: Failed with result 'exit-code'.
Oct 11 01:37:42 compute-0 sudo[174982]: ceilometer : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_copy_cacerts
Oct 11 01:37:42 compute-0 sudo[174982]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Oct 11 01:37:42 compute-0 sudo[174982]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=42405)
Oct 11 01:37:42 compute-0 sudo[174982]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: + [[ ! -n '' ]]
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: + . kolla_extend_start
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: + echo 'Running command: '\''/usr/bin/ceilometer-polling --polling-namespaces ipmi --logfile /dev/stdout'\'''
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: Running command: '/usr/bin/ceilometer-polling --polling-namespaces ipmi --logfile /dev/stdout'
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: + umask 0022
Oct 11 01:37:42 compute-0 ceilometer_agent_ipmi[174947]: + exec /usr/bin/ceilometer-polling --polling-namespaces ipmi --logfile /dev/stdout
Oct 11 01:37:43 compute-0 sudo[175129]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zdgtbkxyzlskijckdyzgqhfsllbbjnfl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146662.9672456-453-164905148327851/AnsiballZ_container_config_data.py'
Oct 11 01:37:43 compute-0 sudo[175129]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.440 2 DEBUG cotyledon.oslo_config_glue [-] Full set of CONF: _load_service_manager_options /usr/lib/python3.9/site-packages/cotyledon/oslo_config_glue.py:40
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.440 2 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2589
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.440 2 DEBUG cotyledon.oslo_config_glue [-] Configuration options gathered from: log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2590
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.440 2 DEBUG cotyledon.oslo_config_glue [-] command line args: ['--polling-namespaces', 'ipmi', '--logfile', '/dev/stdout'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2591
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.441 2 DEBUG cotyledon.oslo_config_glue [-] config files: ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2592
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.441 2 DEBUG cotyledon.oslo_config_glue [-] ================================================================================ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2594
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.441 2 DEBUG cotyledon.oslo_config_glue [-] batch_size                     = 50 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.441 2 DEBUG cotyledon.oslo_config_glue [-] cfg_file                       = polling.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.441 2 DEBUG cotyledon.oslo_config_glue [-] config_dir                     = ['/etc/ceilometer/ceilometer.conf.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.441 2 DEBUG cotyledon.oslo_config_glue [-] config_file                    = ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.441 2 DEBUG cotyledon.oslo_config_glue [-] config_source                  = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.441 2 DEBUG cotyledon.oslo_config_glue [-] debug                          = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.441 2 DEBUG cotyledon.oslo_config_glue [-] default_log_levels             = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'oslo_messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'requests.packages.urllib3.util.retry=WARN', 'urllib3.util.retry=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'taskflow=WARN', 'keystoneauth=WARN', 'oslo.cache=INFO', 'oslo_policy=INFO', 'dogpile.core.dogpile=INFO', 'futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.441 2 DEBUG cotyledon.oslo_config_glue [-] event_pipeline_cfg_file        = event_pipeline.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.441 2 DEBUG cotyledon.oslo_config_glue [-] graceful_shutdown_timeout      = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.441 2 DEBUG cotyledon.oslo_config_glue [-] host                           = compute-0.ctlplane.example.com log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.442 2 DEBUG cotyledon.oslo_config_glue [-] http_timeout                   = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.442 2 DEBUG cotyledon.oslo_config_glue [-] hypervisor_inspector           = libvirt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.442 2 DEBUG cotyledon.oslo_config_glue [-] instance_format                = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.442 2 DEBUG cotyledon.oslo_config_glue [-] instance_uuid_format           = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.442 2 DEBUG cotyledon.oslo_config_glue [-] libvirt_type                   = kvm log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.442 2 DEBUG cotyledon.oslo_config_glue [-] libvirt_uri                    =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.442 2 DEBUG cotyledon.oslo_config_glue [-] log_config_append              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.442 2 DEBUG cotyledon.oslo_config_glue [-] log_date_format                = %Y-%m-%d %H:%M:%S log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.442 2 DEBUG cotyledon.oslo_config_glue [-] log_dir                        = /var/log/ceilometer log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.442 2 DEBUG cotyledon.oslo_config_glue [-] log_file                       = /dev/stdout log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.442 2 DEBUG cotyledon.oslo_config_glue [-] log_options                    = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.442 2 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval            = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.442 2 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval_type       = days log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.443 2 DEBUG cotyledon.oslo_config_glue [-] log_rotation_type              = none log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.443 2 DEBUG cotyledon.oslo_config_glue [-] logging_context_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.443 2 DEBUG cotyledon.oslo_config_glue [-] logging_debug_format_suffix    = %(funcName)s %(pathname)s:%(lineno)d log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.443 2 DEBUG cotyledon.oslo_config_glue [-] logging_default_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.443 2 DEBUG cotyledon.oslo_config_glue [-] logging_exception_prefix       = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.443 2 DEBUG cotyledon.oslo_config_glue [-] logging_user_identity_format   = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.443 2 DEBUG cotyledon.oslo_config_glue [-] max_logfile_count              = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.443 2 DEBUG cotyledon.oslo_config_glue [-] max_logfile_size_mb            = 200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.443 2 DEBUG cotyledon.oslo_config_glue [-] max_parallel_requests          = 64 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.443 2 DEBUG cotyledon.oslo_config_glue [-] partitioning_group_prefix      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.443 2 DEBUG cotyledon.oslo_config_glue [-] pipeline_cfg_file              = pipeline.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.443 2 DEBUG cotyledon.oslo_config_glue [-] polling_namespaces             = ['ipmi'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.444 2 DEBUG cotyledon.oslo_config_glue [-] pollsters_definitions_dirs     = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.444 2 DEBUG cotyledon.oslo_config_glue [-] publish_errors                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.444 2 DEBUG cotyledon.oslo_config_glue [-] rate_limit_burst               = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.444 2 DEBUG cotyledon.oslo_config_glue [-] rate_limit_except_level        = CRITICAL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.444 2 DEBUG cotyledon.oslo_config_glue [-] rate_limit_interval            = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.444 2 DEBUG cotyledon.oslo_config_glue [-] reseller_prefix                = AUTH_ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.444 2 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_keys         = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.444 2 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_length       = 256 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.444 2 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_namespace    = ['metering.'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.444 2 DEBUG cotyledon.oslo_config_glue [-] rootwrap_config                = /etc/ceilometer/rootwrap.conf log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.444 2 DEBUG cotyledon.oslo_config_glue [-] sample_source                  = openstack log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.444 2 DEBUG cotyledon.oslo_config_glue [-] syslog_log_facility            = LOG_USER log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.445 2 DEBUG cotyledon.oslo_config_glue [-] tenant_name_discovery          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.445 2 DEBUG cotyledon.oslo_config_glue [-] use_eventlog                   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.445 2 DEBUG cotyledon.oslo_config_glue [-] use_journal                    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.445 2 DEBUG cotyledon.oslo_config_glue [-] use_json                       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.445 2 DEBUG cotyledon.oslo_config_glue [-] use_stderr                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.445 2 DEBUG cotyledon.oslo_config_glue [-] use_syslog                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.445 2 DEBUG cotyledon.oslo_config_glue [-] watch_log_file                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.445 2 DEBUG cotyledon.oslo_config_glue [-] compute.instance_discovery_method = libvirt_metadata log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.445 2 DEBUG cotyledon.oslo_config_glue [-] compute.resource_cache_expiry  = 3600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.445 2 DEBUG cotyledon.oslo_config_glue [-] compute.resource_update_interval = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.445 2 DEBUG cotyledon.oslo_config_glue [-] coordination.backend_url       = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.445 2 DEBUG cotyledon.oslo_config_glue [-] event.definitions_cfg_file     = event_definitions.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.446 2 DEBUG cotyledon.oslo_config_glue [-] event.drop_unmatched_notifications = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.446 2 DEBUG cotyledon.oslo_config_glue [-] event.store_raw                = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.446 2 DEBUG cotyledon.oslo_config_glue [-] ipmi.node_manager_init_retry   = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.446 2 DEBUG cotyledon.oslo_config_glue [-] ipmi.polling_retry             = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.446 2 DEBUG cotyledon.oslo_config_glue [-] meter.meter_definitions_dirs   = ['/etc/ceilometer/meters.d', '/usr/lib/python3.9/site-packages/ceilometer/data/meters.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.446 2 DEBUG cotyledon.oslo_config_glue [-] monasca.archive_on_failure     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.446 2 DEBUG cotyledon.oslo_config_glue [-] monasca.archive_path           = mon_pub_failures.txt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.446 2 DEBUG cotyledon.oslo_config_glue [-] monasca.auth_section           = service_credentials log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.446 2 DEBUG cotyledon.oslo_config_glue [-] monasca.auth_type              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.446 2 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_count            = 1000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.446 2 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_max_retries      = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.446 2 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_mode             = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.447 2 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_polling_interval = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.447 2 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_timeout          = 15 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.447 2 DEBUG cotyledon.oslo_config_glue [-] monasca.cafile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.447 2 DEBUG cotyledon.oslo_config_glue [-] monasca.certfile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.447 2 DEBUG cotyledon.oslo_config_glue [-] monasca.client_max_retries     = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.447 2 DEBUG cotyledon.oslo_config_glue [-] monasca.client_retry_interval  = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.447 2 DEBUG cotyledon.oslo_config_glue [-] monasca.clientapi_version      = 2_0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.447 2 DEBUG cotyledon.oslo_config_glue [-] monasca.cloud_name             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.447 2 DEBUG cotyledon.oslo_config_glue [-] monasca.cluster                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.447 2 DEBUG cotyledon.oslo_config_glue [-] monasca.collect_timing         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.447 2 DEBUG cotyledon.oslo_config_glue [-] monasca.control_plane          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.447 2 DEBUG cotyledon.oslo_config_glue [-] monasca.enable_api_pagination  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.447 2 DEBUG cotyledon.oslo_config_glue [-] monasca.insecure               = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.448 2 DEBUG cotyledon.oslo_config_glue [-] monasca.interface              = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.448 2 DEBUG cotyledon.oslo_config_glue [-] monasca.keyfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.448 2 DEBUG cotyledon.oslo_config_glue [-] monasca.monasca_mappings       = /etc/ceilometer/monasca_field_definitions.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.448 2 DEBUG cotyledon.oslo_config_glue [-] monasca.region_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.448 2 DEBUG cotyledon.oslo_config_glue [-] monasca.retry_on_failure       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.448 2 DEBUG cotyledon.oslo_config_glue [-] monasca.split_loggers          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.448 2 DEBUG cotyledon.oslo_config_glue [-] monasca.timeout                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.448 2 DEBUG cotyledon.oslo_config_glue [-] notification.ack_on_event_error = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.448 2 DEBUG cotyledon.oslo_config_glue [-] notification.batch_size        = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.448 2 DEBUG cotyledon.oslo_config_glue [-] notification.batch_timeout     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.448 2 DEBUG cotyledon.oslo_config_glue [-] notification.messaging_urls    = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.449 2 DEBUG cotyledon.oslo_config_glue [-] notification.notification_control_exchanges = ['nova', 'glance', 'neutron', 'cinder', 'heat', 'keystone', 'sahara', 'trove', 'zaqar', 'swift', 'ceilometer', 'magnum', 'dns', 'ironic', 'aodh'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.449 2 DEBUG cotyledon.oslo_config_glue [-] notification.pipelines         = ['meter', 'event'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.449 2 DEBUG cotyledon.oslo_config_glue [-] notification.workers           = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.449 2 DEBUG cotyledon.oslo_config_glue [-] polling.batch_size             = 50 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.449 2 DEBUG cotyledon.oslo_config_glue [-] polling.cfg_file               = polling.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.449 2 DEBUG cotyledon.oslo_config_glue [-] polling.partitioning_group_prefix = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.449 2 DEBUG cotyledon.oslo_config_glue [-] polling.pollsters_definitions_dirs = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.449 2 DEBUG cotyledon.oslo_config_glue [-] polling.tenant_name_discovery  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.449 2 DEBUG cotyledon.oslo_config_glue [-] publisher.telemetry_secret     = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.449 2 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.event_topic = event log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.449 2 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.metering_topic = metering log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.449 2 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.telemetry_driver = messagingv2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.450 2 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.access_key = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.450 2 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.secret_key = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.450 2 DEBUG cotyledon.oslo_config_glue [-] rgw_client.implicit_tenants    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.450 2 DEBUG cotyledon.oslo_config_glue [-] service_types.cinder           = volumev3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.450 2 DEBUG cotyledon.oslo_config_glue [-] service_types.glance           = image log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.450 2 DEBUG cotyledon.oslo_config_glue [-] service_types.neutron          = network log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.450 2 DEBUG cotyledon.oslo_config_glue [-] service_types.nova             = compute log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.450 2 DEBUG cotyledon.oslo_config_glue [-] service_types.radosgw          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.450 2 DEBUG cotyledon.oslo_config_glue [-] service_types.swift            = object-store log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.450 2 DEBUG cotyledon.oslo_config_glue [-] vmware.api_retry_count         = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.450 2 DEBUG cotyledon.oslo_config_glue [-] vmware.ca_file                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.450 2 DEBUG cotyledon.oslo_config_glue [-] vmware.host_ip                 = 127.0.0.1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.451 2 DEBUG cotyledon.oslo_config_glue [-] vmware.host_password           = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.451 2 DEBUG cotyledon.oslo_config_glue [-] vmware.host_port               = 443 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.451 2 DEBUG cotyledon.oslo_config_glue [-] vmware.host_username           =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.451 2 DEBUG cotyledon.oslo_config_glue [-] vmware.insecure                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.451 2 DEBUG cotyledon.oslo_config_glue [-] vmware.task_poll_interval      = 0.5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.451 2 DEBUG cotyledon.oslo_config_glue [-] vmware.wsdl_location           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.451 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_section = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.451 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_type  = password log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.451 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.cafile     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.451 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.certfile   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.451 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.collect_timing = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.451 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.insecure   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.452 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.interface  = internalURL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.452 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.keyfile    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.452 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.region_name = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.452 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.split_loggers = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.452 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.timeout    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.452 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_section           = service_credentials log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.452 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_type              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.452 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.cafile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.452 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.certfile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.452 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.collect_timing         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.452 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.insecure               = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.452 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.interface              = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.452 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.keyfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.453 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.region_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.453 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.split_loggers          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.453 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.timeout                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.453 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_section             = service_credentials log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.453 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_type                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.453 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.cafile                   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.453 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.certfile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.453 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.collect_timing           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.453 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.insecure                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.453 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.interface                = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.453 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.keyfile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.453 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.region_name              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.454 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.split_loggers            = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.454 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.timeout                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.454 2 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2613
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.476 12 INFO ceilometer.polling.manager [-] Looking for dynamic pollsters configurations at [['/etc/ceilometer/pollsters.d']].
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.479 12 INFO ceilometer.polling.manager [-] No dynamic pollsters found in folder [/etc/ceilometer/pollsters.d].
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.482 12 INFO ceilometer.polling.manager [-] No dynamic pollsters file found in dirs [['/etc/ceilometer/pollsters.d']].
Oct 11 01:37:43 compute-0 python3.9[175131]: ansible-container_config_data Invoked with config_overrides={} config_path=/var/lib/openstack/config/telemetry-power-monitoring config_pattern=kepler.json debug=False
Oct 11 01:37:43 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:43.608 12 INFO oslo.privsep.daemon [-] Running privsep helper: ['sudo', 'ceilometer-rootwrap', '/etc/ceilometer/rootwrap.conf', 'privsep-helper', '--privsep_context', 'ceilometer.privsep.sys_admin_pctxt', '--privsep_sock_path', '/tmp/tmptz0cyimf/privsep.sock']
Oct 11 01:37:43 compute-0 sudo[175129]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:43 compute-0 sudo[175136]: ceilometer : PWD=/ ; USER=root ; COMMAND=/usr/bin/ceilometer-rootwrap /etc/ceilometer/rootwrap.conf privsep-helper --privsep_context ceilometer.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmptz0cyimf/privsep.sock
Oct 11 01:37:43 compute-0 sudo[175136]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Oct 11 01:37:43 compute-0 sudo[175136]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=42405)
Oct 11 01:37:44 compute-0 kernel: capability: warning: `privsep-helper' uses deprecated v2 capabilities in a way that may be insecure
Oct 11 01:37:44 compute-0 sudo[175136]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.233 12 INFO oslo.privsep.daemon [-] Spawned new privsep daemon via rootwrap
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.234 12 DEBUG oslo.privsep.daemon [-] Accepted privsep connection to /tmp/tmptz0cyimf/privsep.sock __init__ /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:362
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.119 19 INFO oslo.privsep.daemon [-] privsep daemon starting
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.127 19 INFO oslo.privsep.daemon [-] privsep process running with uid/gid: 0/0
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.130 19 INFO oslo.privsep.daemon [-] privsep process running with capabilities (eff/prm/inh): CAP_CHOWN|CAP_DAC_OVERRIDE|CAP_DAC_READ_SEARCH|CAP_FOWNER|CAP_NET_ADMIN|CAP_SYS_ADMIN/CAP_CHOWN|CAP_DAC_OVERRIDE|CAP_DAC_READ_SEARCH|CAP_FOWNER|CAP_NET_ADMIN|CAP_SYS_ADMIN/none
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.131 19 INFO oslo.privsep.daemon [-] privsep daemon running as pid 19
Oct 11 01:37:44 compute-0 sudo[175292]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ycwouhkogprkutjlyyzyaalmtxnwcnka ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146663.8864295-462-76038783661884/AnsiballZ_container_config_hash.py'
Oct 11 01:37:44 compute-0 sudo[175292]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.333 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.current: IPMITool not supported on host _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.334 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.fan: IPMITool not supported on host _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.336 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.airflow: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.336 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.cpu_util: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.336 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.cups: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.336 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.io_util: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.337 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.mem_util: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.337 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.outlet_temperature: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.337 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.power: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.337 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.temperature: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.338 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.temperature: IPMITool not supported on host _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.338 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.voltage: IPMITool not supported on host _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.338 12 WARNING ceilometer.polling.manager [-] No valid pollsters can be loaded from ['ipmi'] namespaces
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.344 12 DEBUG cotyledon.oslo_config_glue [-] Full set of CONF: _load_service_options /usr/lib/python3.9/site-packages/cotyledon/oslo_config_glue.py:48
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.345 12 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2589
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.345 12 DEBUG cotyledon.oslo_config_glue [-] Configuration options gathered from: log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2590
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.345 12 DEBUG cotyledon.oslo_config_glue [-] command line args: ['--polling-namespaces', 'ipmi', '--logfile', '/dev/stdout'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2591
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.345 12 DEBUG cotyledon.oslo_config_glue [-] config files: ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2592
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.345 12 DEBUG cotyledon.oslo_config_glue [-] ================================================================================ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2594
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.346 12 DEBUG cotyledon.oslo_config_glue [-] batch_size                     = 50 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.346 12 DEBUG cotyledon.oslo_config_glue [-] cfg_file                       = polling.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.346 12 DEBUG cotyledon.oslo_config_glue [-] config_dir                     = ['/etc/ceilometer/ceilometer.conf.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.346 12 DEBUG cotyledon.oslo_config_glue [-] config_file                    = ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.346 12 DEBUG cotyledon.oslo_config_glue [-] config_source                  = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.347 12 DEBUG cotyledon.oslo_config_glue [-] control_exchange               = ceilometer log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.347 12 DEBUG cotyledon.oslo_config_glue [-] debug                          = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.347 12 DEBUG cotyledon.oslo_config_glue [-] default_log_levels             = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'oslo_messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'requests.packages.urllib3.util.retry=WARN', 'urllib3.util.retry=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'taskflow=WARN', 'keystoneauth=WARN', 'oslo.cache=INFO', 'oslo_policy=INFO', 'dogpile.core.dogpile=INFO', 'futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.347 12 DEBUG cotyledon.oslo_config_glue [-] event_pipeline_cfg_file        = event_pipeline.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.348 12 DEBUG cotyledon.oslo_config_glue [-] graceful_shutdown_timeout      = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.348 12 DEBUG cotyledon.oslo_config_glue [-] host                           = compute-0.ctlplane.example.com log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.348 12 DEBUG cotyledon.oslo_config_glue [-] http_timeout                   = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.348 12 DEBUG cotyledon.oslo_config_glue [-] hypervisor_inspector           = libvirt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.349 12 DEBUG cotyledon.oslo_config_glue [-] instance_format                = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.349 12 DEBUG cotyledon.oslo_config_glue [-] instance_uuid_format           = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.349 12 DEBUG cotyledon.oslo_config_glue [-] libvirt_type                   = kvm log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.349 12 DEBUG cotyledon.oslo_config_glue [-] libvirt_uri                    =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.349 12 DEBUG cotyledon.oslo_config_glue [-] log_config_append              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.350 12 DEBUG cotyledon.oslo_config_glue [-] log_date_format                = %Y-%m-%d %H:%M:%S log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.350 12 DEBUG cotyledon.oslo_config_glue [-] log_dir                        = /var/log/ceilometer log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.350 12 DEBUG cotyledon.oslo_config_glue [-] log_file                       = /dev/stdout log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.350 12 DEBUG cotyledon.oslo_config_glue [-] log_options                    = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.350 12 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval            = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.351 12 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval_type       = days log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.351 12 DEBUG cotyledon.oslo_config_glue [-] log_rotation_type              = none log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.351 12 DEBUG cotyledon.oslo_config_glue [-] logging_context_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.351 12 DEBUG cotyledon.oslo_config_glue [-] logging_debug_format_suffix    = %(funcName)s %(pathname)s:%(lineno)d log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.351 12 DEBUG cotyledon.oslo_config_glue [-] logging_default_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.351 12 DEBUG cotyledon.oslo_config_glue [-] logging_exception_prefix       = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.352 12 DEBUG cotyledon.oslo_config_glue [-] logging_user_identity_format   = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.352 12 DEBUG cotyledon.oslo_config_glue [-] max_logfile_count              = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.352 12 DEBUG cotyledon.oslo_config_glue [-] max_logfile_size_mb            = 200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.352 12 DEBUG cotyledon.oslo_config_glue [-] max_parallel_requests          = 64 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.352 12 DEBUG cotyledon.oslo_config_glue [-] partitioning_group_prefix      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.353 12 DEBUG cotyledon.oslo_config_glue [-] pipeline_cfg_file              = pipeline.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.353 12 DEBUG cotyledon.oslo_config_glue [-] polling_namespaces             = ['ipmi'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.353 12 DEBUG cotyledon.oslo_config_glue [-] pollsters_definitions_dirs     = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.353 12 DEBUG cotyledon.oslo_config_glue [-] publish_errors                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.354 12 DEBUG cotyledon.oslo_config_glue [-] rate_limit_burst               = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.354 12 DEBUG cotyledon.oslo_config_glue [-] rate_limit_except_level        = CRITICAL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.354 12 DEBUG cotyledon.oslo_config_glue [-] rate_limit_interval            = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.354 12 DEBUG cotyledon.oslo_config_glue [-] reseller_prefix                = AUTH_ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.354 12 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_keys         = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.355 12 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_length       = 256 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.355 12 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_namespace    = ['metering.'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.355 12 DEBUG cotyledon.oslo_config_glue [-] rootwrap_config                = /etc/ceilometer/rootwrap.conf log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.355 12 DEBUG cotyledon.oslo_config_glue [-] sample_source                  = openstack log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.355 12 DEBUG cotyledon.oslo_config_glue [-] syslog_log_facility            = LOG_USER log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.356 12 DEBUG cotyledon.oslo_config_glue [-] tenant_name_discovery          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.356 12 DEBUG cotyledon.oslo_config_glue [-] transport_url                  = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.356 12 DEBUG cotyledon.oslo_config_glue [-] use_eventlog                   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.356 12 DEBUG cotyledon.oslo_config_glue [-] use_journal                    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.356 12 DEBUG cotyledon.oslo_config_glue [-] use_json                       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.357 12 DEBUG cotyledon.oslo_config_glue [-] use_stderr                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.357 12 DEBUG cotyledon.oslo_config_glue [-] use_syslog                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.357 12 DEBUG cotyledon.oslo_config_glue [-] watch_log_file                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.357 12 DEBUG cotyledon.oslo_config_glue [-] compute.instance_discovery_method = libvirt_metadata log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.357 12 DEBUG cotyledon.oslo_config_glue [-] compute.resource_cache_expiry  = 3600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.358 12 DEBUG cotyledon.oslo_config_glue [-] compute.resource_update_interval = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.358 12 DEBUG cotyledon.oslo_config_glue [-] coordination.backend_url       = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.358 12 DEBUG cotyledon.oslo_config_glue [-] event.definitions_cfg_file     = event_definitions.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.358 12 DEBUG cotyledon.oslo_config_glue [-] event.drop_unmatched_notifications = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.358 12 DEBUG cotyledon.oslo_config_glue [-] event.store_raw                = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.359 12 DEBUG cotyledon.oslo_config_glue [-] ipmi.node_manager_init_retry   = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.359 12 DEBUG cotyledon.oslo_config_glue [-] ipmi.polling_retry             = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.359 12 DEBUG cotyledon.oslo_config_glue [-] meter.meter_definitions_dirs   = ['/etc/ceilometer/meters.d', '/usr/lib/python3.9/site-packages/ceilometer/data/meters.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.359 12 DEBUG cotyledon.oslo_config_glue [-] monasca.archive_on_failure     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.360 12 DEBUG cotyledon.oslo_config_glue [-] monasca.archive_path           = mon_pub_failures.txt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.360 12 DEBUG cotyledon.oslo_config_glue [-] monasca.auth_section           = service_credentials log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.360 12 DEBUG cotyledon.oslo_config_glue [-] monasca.auth_type              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.360 12 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_count            = 1000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.360 12 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_max_retries      = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.360 12 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_mode             = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.361 12 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_polling_interval = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.361 12 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_timeout          = 15 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.361 12 DEBUG cotyledon.oslo_config_glue [-] monasca.cafile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.361 12 DEBUG cotyledon.oslo_config_glue [-] monasca.certfile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.361 12 DEBUG cotyledon.oslo_config_glue [-] monasca.client_max_retries     = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.362 12 DEBUG cotyledon.oslo_config_glue [-] monasca.client_retry_interval  = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.362 12 DEBUG cotyledon.oslo_config_glue [-] monasca.clientapi_version      = 2_0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.362 12 DEBUG cotyledon.oslo_config_glue [-] monasca.cloud_name             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.362 12 DEBUG cotyledon.oslo_config_glue [-] monasca.cluster                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.363 12 DEBUG cotyledon.oslo_config_glue [-] monasca.collect_timing         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.363 12 DEBUG cotyledon.oslo_config_glue [-] monasca.control_plane          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.363 12 DEBUG cotyledon.oslo_config_glue [-] monasca.enable_api_pagination  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.363 12 DEBUG cotyledon.oslo_config_glue [-] monasca.insecure               = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.363 12 DEBUG cotyledon.oslo_config_glue [-] monasca.interface              = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.364 12 DEBUG cotyledon.oslo_config_glue [-] monasca.keyfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.364 12 DEBUG cotyledon.oslo_config_glue [-] monasca.monasca_mappings       = /etc/ceilometer/monasca_field_definitions.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.364 12 DEBUG cotyledon.oslo_config_glue [-] monasca.region_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.364 12 DEBUG cotyledon.oslo_config_glue [-] monasca.retry_on_failure       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.364 12 DEBUG cotyledon.oslo_config_glue [-] monasca.split_loggers          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.365 12 DEBUG cotyledon.oslo_config_glue [-] monasca.timeout                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.365 12 DEBUG cotyledon.oslo_config_glue [-] notification.ack_on_event_error = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.365 12 DEBUG cotyledon.oslo_config_glue [-] notification.batch_size        = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.365 12 DEBUG cotyledon.oslo_config_glue [-] notification.batch_timeout     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.365 12 DEBUG cotyledon.oslo_config_glue [-] notification.messaging_urls    = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.366 12 DEBUG cotyledon.oslo_config_glue [-] notification.notification_control_exchanges = ['nova', 'glance', 'neutron', 'cinder', 'heat', 'keystone', 'sahara', 'trove', 'zaqar', 'swift', 'ceilometer', 'magnum', 'dns', 'ironic', 'aodh'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.366 12 DEBUG cotyledon.oslo_config_glue [-] notification.pipelines         = ['meter', 'event'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.366 12 DEBUG cotyledon.oslo_config_glue [-] notification.workers           = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.366 12 DEBUG cotyledon.oslo_config_glue [-] polling.batch_size             = 50 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.367 12 DEBUG cotyledon.oslo_config_glue [-] polling.cfg_file               = polling.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.367 12 DEBUG cotyledon.oslo_config_glue [-] polling.partitioning_group_prefix = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.367 12 DEBUG cotyledon.oslo_config_glue [-] polling.pollsters_definitions_dirs = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.367 12 DEBUG cotyledon.oslo_config_glue [-] polling.tenant_name_discovery  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.367 12 DEBUG cotyledon.oslo_config_glue [-] publisher.telemetry_secret     = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.368 12 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.event_topic = event log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.368 12 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.metering_topic = metering log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.368 12 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.telemetry_driver = messagingv2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.368 12 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.access_key = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.369 12 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.secret_key = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.369 12 DEBUG cotyledon.oslo_config_glue [-] rgw_client.implicit_tenants    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.369 12 DEBUG cotyledon.oslo_config_glue [-] service_types.cinder           = volumev3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.369 12 DEBUG cotyledon.oslo_config_glue [-] service_types.glance           = image log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.370 12 DEBUG cotyledon.oslo_config_glue [-] service_types.neutron          = network log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.370 12 DEBUG cotyledon.oslo_config_glue [-] service_types.nova             = compute log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.370 12 DEBUG cotyledon.oslo_config_glue [-] service_types.radosgw          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.370 12 DEBUG cotyledon.oslo_config_glue [-] service_types.swift            = object-store log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.370 12 DEBUG cotyledon.oslo_config_glue [-] vmware.api_retry_count         = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.371 12 DEBUG cotyledon.oslo_config_glue [-] vmware.ca_file                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.371 12 DEBUG cotyledon.oslo_config_glue [-] vmware.host_ip                 = 127.0.0.1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.371 12 DEBUG cotyledon.oslo_config_glue [-] vmware.host_password           = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.371 12 DEBUG cotyledon.oslo_config_glue [-] vmware.host_port               = 443 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.372 12 DEBUG cotyledon.oslo_config_glue [-] vmware.host_username           =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.372 12 DEBUG cotyledon.oslo_config_glue [-] vmware.insecure                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.372 12 DEBUG cotyledon.oslo_config_glue [-] vmware.task_poll_interval      = 0.5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.372 12 DEBUG cotyledon.oslo_config_glue [-] vmware.wsdl_location           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.372 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_section = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.372 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_type  = password log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.373 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.cafile     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.373 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.certfile   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.373 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.collect_timing = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.373 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.insecure   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.373 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.interface  = internalURL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.374 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.keyfile    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.374 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.region_name = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.374 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.split_loggers = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.374 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.timeout    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.374 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_section           = service_credentials log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.375 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_type              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.375 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.cafile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.375 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.certfile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.375 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.collect_timing         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.375 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.insecure               = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.375 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.interface              = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.376 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.keyfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.376 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.region_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.376 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.split_loggers          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.376 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.timeout                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.376 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_section             = service_credentials log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.377 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_type                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.377 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.cafile                   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.377 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.certfile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.377 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.collect_timing           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.377 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.insecure                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.378 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.interface                = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.378 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.keyfile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.378 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.region_name              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.378 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.split_loggers            = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.378 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.timeout                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.379 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_notifications.driver = ['noop'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.379 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_notifications.retry = -1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.379 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_notifications.topics = ['notifications'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.379 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_notifications.transport_url = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.379 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.amqp_auto_delete = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.379 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.amqp_durable_queues = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.380 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.conn_pool_min_size = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.380 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.conn_pool_ttl = 1200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.380 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.direct_mandatory_flag = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.380 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.enable_cancel_on_failover = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.380 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.heartbeat_in_pthread = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.381 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.heartbeat_rate = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.381 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.heartbeat_timeout_threshold = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.381 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.kombu_compression = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.381 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.kombu_failover_strategy = round-robin log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.381 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.kombu_missing_consumer_retry_timeout = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.381 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.kombu_reconnect_delay = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.382 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_ha_queues = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.382 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_interval_max = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.382 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_login_method = AMQPLAIN log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.382 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_qos_prefetch_count = 100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.382 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_quorum_delivery_limit = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.382 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_quorum_max_memory_bytes = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.382 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_quorum_max_memory_length = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.382 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_quorum_queue = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.383 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_retry_backoff = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.383 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_retry_interval = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.383 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_transient_queues_ttl = 1800 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.383 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rpc_conn_pool_size = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.383 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.ssl      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.383 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.ssl_ca_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.383 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.ssl_cert_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.383 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.ssl_enforce_fips_mode = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.384 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.ssl_key_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.384 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.ssl_version =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.384 12 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2613
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.384 12 DEBUG cotyledon._service [-] Run service AgentManager(0) [12] wait_forever /usr/lib/python3.9/site-packages/cotyledon/_service.py:241
Oct 11 01:37:44 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:44.388 12 DEBUG ceilometer.agent [-] Config file: {'sources': [{'name': 'pollsters', 'interval': 120, 'meters': ['hardware.*']}]} load_config /usr/lib/python3.9/site-packages/ceilometer/agent.py:64
Oct 11 01:37:44 compute-0 python3.9[175294]: ansible-container_config_hash Invoked with check_mode=False config_vol_prefix=/var/lib/config-data
Oct 11 01:37:44 compute-0 sudo[175292]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:45 compute-0 sudo[175446]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lzxqlugtpknvntgrvdfkazeicipafupy ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760146664.8849897-472-160835404219138/AnsiballZ_edpm_container_manage.py'
Oct 11 01:37:45 compute-0 sudo[175446]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:45 compute-0 python3[175448]: ansible-edpm_container_manage Invoked with concurrency=1 config_dir=/var/lib/openstack/config/telemetry-power-monitoring config_id=edpm config_overrides={} config_patterns=kepler.json log_base_path=/var/log/containers/stdouts debug=False
Oct 11 01:37:51 compute-0 podman[175461]: 2025-10-11 01:37:51.657380333 +0000 UTC m=+5.944851106 image pull ed61e3ea3188391c18595d8ceada2a5a01f0ece915c62fde355798735b5208d7 quay.io/sustainable_computing_io/kepler:release-0.7.12
Oct 11 01:37:51 compute-0 podman[175661]: 2025-10-11 01:37:51.813376842 +0000 UTC m=+0.054554965 container create e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, com.redhat.component=ubi9-container, release=1214.1726694543, managed_by=edpm_ansible, config_id=edpm, version=9.4, release-0.7.12=, io.openshift.expose-services=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, container_name=kepler, summary=Provides the latest release of Red Hat Universal Base Image 9., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.buildah.version=1.29.0, architecture=x86_64, name=ubi9, io.k8s.display-name=Red Hat Universal Base Image 9, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, build-date=2024-09-18T21:23:30, vendor=Red Hat, Inc., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, maintainer=Red Hat, Inc., vcs-type=git)
Oct 11 01:37:51 compute-0 podman[175661]: 2025-10-11 01:37:51.784021887 +0000 UTC m=+0.025200000 image pull ed61e3ea3188391c18595d8ceada2a5a01f0ece915c62fde355798735b5208d7 quay.io/sustainable_computing_io/kepler:release-0.7.12
Oct 11 01:37:51 compute-0 python3[175448]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman create --name kepler --conmon-pidfile /run/kepler.pid --env ENABLE_GPU=true --env EXPOSE_CONTAINER_METRICS=true --env ENABLE_PROCESS_METRICS=true --env EXPOSE_VM_METRICS=true --env EXPOSE_ESTIMATED_IDLE_POWER_METRICS=false --env LIBVIRT_METADATA_URI=http://openstack.org/xmlns/libvirt/nova/1.1 --healthcheck-command /openstack/healthcheck kepler --label config_id=edpm --label container_name=kepler --label managed_by=edpm_ansible --label config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']} --log-driver journald --log-level info --network host --privileged=True --publish 8888:8888 --volume /lib/modules:/lib/modules:ro --volume /run/libvirt:/run/libvirt:shared,ro --volume /sys:/sys --volume /proc:/proc --volume /var/lib/openstack/healthchecks/kepler:/openstack:ro,z quay.io/sustainable_computing_io/kepler:release-0.7.12 -v=2
Oct 11 01:37:52 compute-0 sudo[175446]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:52 compute-0 podman[175705]: 2025-10-11 01:37:52.199465254 +0000 UTC m=+0.088619433 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vendor=Red Hat, Inc., io.openshift.tags=minimal rhel9, build-date=2025-08-20T13:12:41, version=9.6, io.openshift.expose-services=, com.redhat.component=ubi9-minimal-container, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, name=ubi9-minimal, release=1755695350, managed_by=edpm_ansible, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=openstack_network_exporter, distribution-scope=public, maintainer=Red Hat, Inc., url=https://catalog.redhat.com/en/search?searchType=containers, config_id=edpm)
Oct 11 01:37:52 compute-0 podman[175747]: 2025-10-11 01:37:52.323312271 +0000 UTC m=+0.084310065 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 01:37:52 compute-0 sudo[175896]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mqjtvufjszeodwzjysvqnxqgscnnhblh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146672.2767973-480-89201890040142/AnsiballZ_stat.py'
Oct 11 01:37:52 compute-0 sudo[175896]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:52 compute-0 python3.9[175898]: ansible-ansible.builtin.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:37:52 compute-0 sudo[175896]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:53 compute-0 sudo[176050]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mbirkkmktdegkqimccttgvjhzcosylbk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146673.1797209-489-159482247985588/AnsiballZ_file.py'
Oct 11 01:37:53 compute-0 sudo[176050]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:53 compute-0 python3.9[176052]: ansible-file Invoked with path=/etc/systemd/system/edpm_kepler.requires state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:37:53 compute-0 sudo[176050]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:54 compute-0 sudo[176201]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-erifdhxnfzbwojzwrnktexlrisswuomy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146673.8792405-489-17554472765598/AnsiballZ_copy.py'
Oct 11 01:37:54 compute-0 sudo[176201]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:54 compute-0 python3.9[176203]: ansible-copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760146673.8792405-489-17554472765598/source dest=/etc/systemd/system/edpm_kepler.service mode=0644 owner=root group=root backup=False force=True remote_src=False follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:37:54 compute-0 sudo[176201]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:55 compute-0 sudo[176277]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nqtauvgtqeqvyhdcltnufslpoigfiwmn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146673.8792405-489-17554472765598/AnsiballZ_systemd.py'
Oct 11 01:37:55 compute-0 sudo[176277]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:55 compute-0 python3.9[176279]: ansible-systemd Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 01:37:55 compute-0 systemd[1]: Reloading.
Oct 11 01:37:55 compute-0 systemd-rc-local-generator[176307]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:37:55 compute-0 systemd-sysv-generator[176310]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:37:55 compute-0 sudo[176277]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:55 compute-0 sudo[176388]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gmihtexvxqsnafzihvjlwncoaykfbyrk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146673.8792405-489-17554472765598/AnsiballZ_systemd.py'
Oct 11 01:37:55 compute-0 sudo[176388]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:56 compute-0 python3.9[176390]: ansible-systemd Invoked with state=restarted name=edpm_kepler.service enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:37:56 compute-0 systemd[1]: Reloading.
Oct 11 01:37:56 compute-0 systemd-rc-local-generator[176415]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:37:56 compute-0 systemd-sysv-generator[176419]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:37:56 compute-0 systemd[1]: Starting kepler container...
Oct 11 01:37:56 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:37:57 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304.
Oct 11 01:37:57 compute-0 podman[176431]: 2025-10-11 01:37:57.976487122 +0000 UTC m=+1.177844260 container init e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, release=1214.1726694543, io.buildah.version=1.29.0, container_name=kepler, io.openshift.expose-services=, io.k8s.display-name=Red Hat Universal Base Image 9, maintainer=Red Hat, Inc., name=ubi9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., build-date=2024-09-18T21:23:30, config_id=edpm, architecture=x86_64, distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, version=9.4, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, summary=Provides the latest release of Red Hat Universal Base Image 9., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, release-0.7.12=, vcs-type=git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, managed_by=edpm_ansible, com.redhat.component=ubi9-container)
Oct 11 01:37:58 compute-0 kepler[176446]: WARNING: failed to read int from file: open /sys/devices/system/cpu/cpu0/online: no such file or directory
Oct 11 01:37:58 compute-0 podman[176431]: 2025-10-11 01:37:58.01655275 +0000 UTC m=+1.217909828 container start e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, release-0.7.12=, distribution-scope=public, vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, summary=Provides the latest release of Red Hat Universal Base Image 9., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, architecture=x86_64, io.openshift.tags=base rhel9, maintainer=Red Hat, Inc., name=ubi9, release=1214.1726694543, vendor=Red Hat, Inc., container_name=kepler, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, config_id=edpm, managed_by=edpm_ansible, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, version=9.4, io.buildah.version=1.29.0, build-date=2024-09-18T21:23:30, com.redhat.component=ubi9-container, io.k8s.display-name=Red Hat Universal Base Image 9)
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.025514       1 exporter.go:103] Kepler running on version: v0.7.12-dirty
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.025774       1 config.go:293] using gCgroup ID in the BPF program: true
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.025827       1 config.go:295] kernel version: 5.14
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.026952       1 power.go:78] Unable to obtain power, use estimate method
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.027002       1 redfish.go:169] failed to get redfish credential file path
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.027806       1 acpi.go:71] Could not find any ACPI power meter path. Is it a VM?
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.027833       1 power.go:79] using none to obtain power
Oct 11 01:37:58 compute-0 kepler[176446]: E1011 01:37:58.027862       1 accelerator.go:154] [DUMMY] doesn't contain GPU
Oct 11 01:37:58 compute-0 kepler[176446]: E1011 01:37:58.027902       1 exporter.go:154] failed to init GPU accelerators: no devices found
Oct 11 01:37:58 compute-0 kepler[176446]: WARNING: failed to read int from file: open /sys/devices/system/cpu/cpu0/online: no such file or directory
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.031137       1 exporter.go:84] Number of CPUs: 8
Oct 11 01:37:58 compute-0 podman[176431]: kepler
Oct 11 01:37:58 compute-0 systemd[1]: Started kepler container.
Oct 11 01:37:58 compute-0 sudo[176388]: pam_unix(sudo:session): session closed for user root
Oct 11 01:37:58 compute-0 podman[176457]: 2025-10-11 01:37:58.172432701 +0000 UTC m=+0.135900151 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=starting, health_failing_streak=1, health_log=, com.redhat.component=ubi9-container, config_id=edpm, release=1214.1726694543, distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.expose-services=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release-0.7.12=, vcs-type=git, vendor=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, managed_by=edpm_ansible, architecture=x86_64, io.buildah.version=1.29.0, io.openshift.tags=base rhel9, version=9.4, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=kepler, summary=Provides the latest release of Red Hat Universal Base Image 9., build-date=2024-09-18T21:23:30, name=ubi9)
Oct 11 01:37:58 compute-0 systemd[1]: e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304-66828feffb5c14b5.service: Main process exited, code=exited, status=1/FAILURE
Oct 11 01:37:58 compute-0 systemd[1]: e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304-66828feffb5c14b5.service: Failed with result 'exit-code'.
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.546311       1 watcher.go:83] Using in cluster k8s config
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.546365       1 watcher.go:90] failed to get config: unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined
Oct 11 01:37:58 compute-0 kepler[176446]: E1011 01:37:58.546458       1 manager.go:59] could not run the watcher k8s APIserver watcher was not enabled
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.553546       1 process_energy.go:129] Using the Ratio Power Model to estimate PROCESS_TOTAL Power
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.553588       1 process_energy.go:130] Feature names: [bpf_cpu_time_ms]
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.560326       1 process_energy.go:129] Using the Ratio Power Model to estimate PROCESS_COMPONENTS Power
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.560368       1 process_energy.go:130] Feature names: [bpf_cpu_time_ms bpf_cpu_time_ms bpf_cpu_time_ms   gpu_compute_util]
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.570822       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.570863       1 model.go:125] Requesting for Machine Spec: &{authenticamd amd_epyc_rome 8 8 7 2800 1}
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.570878       1 node_platform_energy.go:53] Using the Regressor/AbsPower Power Model to estimate Node Platform Power
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.581508       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.581541       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.581547       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.581552       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.581558       1 model.go:125] Requesting for Machine Spec: &{authenticamd amd_epyc_rome 8 8 7 2800 1}
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.581570       1 node_component_energy.go:57] Using the Regressor/AbsPower Power Model to estimate Node Component Power
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.581650       1 prometheus_collector.go:90] Registered Process Prometheus metrics
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.581679       1 prometheus_collector.go:95] Registered Container Prometheus metrics
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.581701       1 prometheus_collector.go:100] Registered VM Prometheus metrics
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.581719       1 prometheus_collector.go:104] Registered Node Prometheus metrics
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.581850       1 exporter.go:194] starting to listen on 0.0.0.0:8888
Oct 11 01:37:58 compute-0 kepler[176446]: I1011 01:37:58.582435       1 exporter.go:208] Started Kepler in 557.371049ms
Oct 11 01:37:58 compute-0 sudo[176641]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rjhmdyibhhscuavovrxzjkqtynyszgle ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146678.3087423-513-143257560679665/AnsiballZ_systemd.py'
Oct 11 01:37:58 compute-0 sudo[176641]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:37:59 compute-0 python3.9[176643]: ansible-ansible.builtin.systemd Invoked with name=edpm_ceilometer_agent_ipmi.service state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:37:59 compute-0 systemd[1]: Stopping ceilometer_agent_ipmi container...
Oct 11 01:37:59 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:59.324 2 INFO cotyledon._service_manager [-] Caught SIGTERM signal, graceful exiting of master process
Oct 11 01:37:59 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:59.426 2 DEBUG cotyledon._service_manager [-] Killing services with signal SIGTERM _shutdown /usr/lib/python3.9/site-packages/cotyledon/_service_manager.py:304
Oct 11 01:37:59 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:59.427 2 DEBUG cotyledon._service_manager [-] Waiting services to terminate _shutdown /usr/lib/python3.9/site-packages/cotyledon/_service_manager.py:308
Oct 11 01:37:59 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:59.428 12 INFO cotyledon._service [-] Caught SIGTERM signal, graceful exiting of service AgentManager(0) [12]
Oct 11 01:37:59 compute-0 ceilometer_agent_ipmi[174947]: 2025-10-11 01:37:59.444 2 DEBUG cotyledon._service_manager [-] Shutdown finish _shutdown /usr/lib/python3.9/site-packages/cotyledon/_service_manager.py:320
Oct 11 01:37:59 compute-0 podman[176647]: 2025-10-11 01:37:59.624993657 +0000 UTC m=+0.400977145 container stop 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_id=edpm, org.label-schema.schema-version=1.0)
Oct 11 01:37:59 compute-0 systemd[1]: libpod-47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.scope: Deactivated successfully.
Oct 11 01:37:59 compute-0 systemd[1]: libpod-47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.scope: Consumed 2.196s CPU time.
Oct 11 01:37:59 compute-0 podman[176647]: 2025-10-11 01:37:59.653335384 +0000 UTC m=+0.429318902 container died 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, managed_by=edpm_ansible)
Oct 11 01:37:59 compute-0 systemd[1]: 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c-34479a48e9e141d1.timer: Deactivated successfully.
Oct 11 01:37:59 compute-0 systemd[1]: Stopped /usr/bin/podman healthcheck run 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.
Oct 11 01:37:59 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c-userdata-shm.mount: Deactivated successfully.
Oct 11 01:37:59 compute-0 systemd[1]: var-lib-containers-storage-overlay-88244608e55640e82c5cb6a8a67a46420230ae4798b24d993c337ff432ee2d45-merged.mount: Deactivated successfully.
Oct 11 01:37:59 compute-0 podman[157119]: time="2025-10-11T01:37:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:38:00 compute-0 podman[176647]: 2025-10-11 01:38:00.313134895 +0000 UTC m=+1.089118403 container cleanup 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, config_id=edpm, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 01:38:00 compute-0 podman[176647]: ceilometer_agent_ipmi
Oct 11 01:38:00 compute-0 podman[157119]: @ - - [11/Oct/2025:01:37:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 18530 "" "Go-http-client/1.1"
Oct 11 01:38:00 compute-0 podman[157119]: @ - - [11/Oct/2025:01:38:00 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 2558 "" "Go-http-client/1.1"
Oct 11 01:38:00 compute-0 podman[176676]: ceilometer_agent_ipmi
Oct 11 01:38:00 compute-0 systemd[1]: edpm_ceilometer_agent_ipmi.service: Deactivated successfully.
Oct 11 01:38:00 compute-0 systemd[1]: Stopped ceilometer_agent_ipmi container.
Oct 11 01:38:00 compute-0 systemd[1]: edpm_ceilometer_agent_ipmi.service: Consumed 1.017s CPU time, 19.5M memory peak, read 0B from disk, written 133.0K to disk.
Oct 11 01:38:00 compute-0 systemd[1]: Starting ceilometer_agent_ipmi container...
Oct 11 01:38:00 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:38:00 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/88244608e55640e82c5cb6a8a67a46420230ae4798b24d993c337ff432ee2d45/merged/etc/ceilometer/tls supports timestamps until 2038 (0x7fffffff)
Oct 11 01:38:00 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/88244608e55640e82c5cb6a8a67a46420230ae4798b24d993c337ff432ee2d45/merged/etc/ceilometer/ceilometer_prom_exporter.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:38:00 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/88244608e55640e82c5cb6a8a67a46420230ae4798b24d993c337ff432ee2d45/merged/var/lib/openstack/config supports timestamps until 2038 (0x7fffffff)
Oct 11 01:38:00 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/88244608e55640e82c5cb6a8a67a46420230ae4798b24d993c337ff432ee2d45/merged/var/lib/kolla/config_files/config.json supports timestamps until 2038 (0x7fffffff)
Oct 11 01:38:00 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.
Oct 11 01:38:00 compute-0 podman[176689]: 2025-10-11 01:38:00.762625238 +0000 UTC m=+0.248023237 container init 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.3)
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: + sudo -E kolla_set_configs
Oct 11 01:38:00 compute-0 podman[176689]: 2025-10-11 01:38:00.81476272 +0000 UTC m=+0.300160659 container start 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, config_id=edpm, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_managed=true)
Oct 11 01:38:00 compute-0 podman[176689]: ceilometer_agent_ipmi
Oct 11 01:38:00 compute-0 sudo[176709]: ceilometer : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_set_configs
Oct 11 01:38:00 compute-0 sudo[176709]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Oct 11 01:38:00 compute-0 sudo[176709]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=42405)
Oct 11 01:38:00 compute-0 systemd[1]: Started ceilometer_agent_ipmi container.
Oct 11 01:38:00 compute-0 sudo[176641]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: INFO:__main__:Loading config file at /var/lib/kolla/config_files/config.json
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: INFO:__main__:Validating config file
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: INFO:__main__:Kolla config strategy set to: COPY_ALWAYS
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: INFO:__main__:Copying service configuration files
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: INFO:__main__:Deleting /etc/ceilometer/ceilometer.conf
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: INFO:__main__:Copying /var/lib/openstack/config/ceilometer.conf to /etc/ceilometer/ceilometer.conf
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: INFO:__main__:Setting permission for /etc/ceilometer/ceilometer.conf
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: INFO:__main__:Deleting /etc/ceilometer/polling.yaml
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: INFO:__main__:Copying /var/lib/openstack/config/polling.yaml to /etc/ceilometer/polling.yaml
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: INFO:__main__:Setting permission for /etc/ceilometer/polling.yaml
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: INFO:__main__:Deleting /etc/ceilometer/ceilometer.conf.d/01-ceilometer-custom.conf
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: INFO:__main__:Copying /var/lib/openstack/config/custom.conf to /etc/ceilometer/ceilometer.conf.d/01-ceilometer-custom.conf
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: INFO:__main__:Setting permission for /etc/ceilometer/ceilometer.conf.d/01-ceilometer-custom.conf
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: INFO:__main__:Deleting /etc/ceilometer/ceilometer.conf.d/02-ceilometer-host-specific.conf
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: INFO:__main__:Copying /var/lib/openstack/config/ceilometer-host-specific.conf to /etc/ceilometer/ceilometer.conf.d/02-ceilometer-host-specific.conf
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: INFO:__main__:Setting permission for /etc/ceilometer/ceilometer.conf.d/02-ceilometer-host-specific.conf
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: INFO:__main__:Writing out command to execute
Oct 11 01:38:00 compute-0 sudo[176709]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: ++ cat /run_command
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: + CMD='/usr/bin/ceilometer-polling --polling-namespaces ipmi --logfile /dev/stdout'
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: + ARGS=
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: + sudo kolla_copy_cacerts
Oct 11 01:38:00 compute-0 sudo[176732]: ceilometer : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_copy_cacerts
Oct 11 01:38:00 compute-0 sudo[176732]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Oct 11 01:38:00 compute-0 sudo[176732]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=42405)
Oct 11 01:38:00 compute-0 podman[176710]: 2025-10-11 01:38:00.941121185 +0000 UTC m=+0.112772451 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=starting, health_failing_streak=1, health_log=, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, tcib_managed=true, config_id=edpm, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 01:38:00 compute-0 sudo[176732]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: + [[ ! -n '' ]]
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: + . kolla_extend_start
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: Running command: '/usr/bin/ceilometer-polling --polling-namespaces ipmi --logfile /dev/stdout'
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: + echo 'Running command: '\''/usr/bin/ceilometer-polling --polling-namespaces ipmi --logfile /dev/stdout'\'''
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: + umask 0022
Oct 11 01:38:00 compute-0 ceilometer_agent_ipmi[176703]: + exec /usr/bin/ceilometer-polling --polling-namespaces ipmi --logfile /dev/stdout
Oct 11 01:38:00 compute-0 systemd[1]: 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c-39a327549edd6123.service: Main process exited, code=exited, status=1/FAILURE
Oct 11 01:38:00 compute-0 systemd[1]: 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c-39a327549edd6123.service: Failed with result 'exit-code'.
Oct 11 01:38:01 compute-0 openstack_network_exporter[159265]: ERROR   01:38:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:38:01 compute-0 openstack_network_exporter[159265]: ERROR   01:38:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:38:01 compute-0 openstack_network_exporter[159265]: ERROR   01:38:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:38:01 compute-0 openstack_network_exporter[159265]: ERROR   01:38:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:38:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:38:01 compute-0 openstack_network_exporter[159265]: ERROR   01:38:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:38:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.749 2 DEBUG cotyledon.oslo_config_glue [-] Full set of CONF: _load_service_manager_options /usr/lib/python3.9/site-packages/cotyledon/oslo_config_glue.py:40
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.750 2 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2589
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.750 2 DEBUG cotyledon.oslo_config_glue [-] Configuration options gathered from: log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2590
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.750 2 DEBUG cotyledon.oslo_config_glue [-] command line args: ['--polling-namespaces', 'ipmi', '--logfile', '/dev/stdout'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2591
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.750 2 DEBUG cotyledon.oslo_config_glue [-] config files: ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2592
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.750 2 DEBUG cotyledon.oslo_config_glue [-] ================================================================================ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2594
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.750 2 DEBUG cotyledon.oslo_config_glue [-] batch_size                     = 50 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.750 2 DEBUG cotyledon.oslo_config_glue [-] cfg_file                       = polling.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.750 2 DEBUG cotyledon.oslo_config_glue [-] config_dir                     = ['/etc/ceilometer/ceilometer.conf.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.751 2 DEBUG cotyledon.oslo_config_glue [-] config_file                    = ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.751 2 DEBUG cotyledon.oslo_config_glue [-] config_source                  = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.751 2 DEBUG cotyledon.oslo_config_glue [-] debug                          = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.751 2 DEBUG cotyledon.oslo_config_glue [-] default_log_levels             = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'oslo_messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'requests.packages.urllib3.util.retry=WARN', 'urllib3.util.retry=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'taskflow=WARN', 'keystoneauth=WARN', 'oslo.cache=INFO', 'oslo_policy=INFO', 'dogpile.core.dogpile=INFO', 'futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.751 2 DEBUG cotyledon.oslo_config_glue [-] event_pipeline_cfg_file        = event_pipeline.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.751 2 DEBUG cotyledon.oslo_config_glue [-] graceful_shutdown_timeout      = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.751 2 DEBUG cotyledon.oslo_config_glue [-] host                           = compute-0.ctlplane.example.com log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.751 2 DEBUG cotyledon.oslo_config_glue [-] http_timeout                   = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.751 2 DEBUG cotyledon.oslo_config_glue [-] hypervisor_inspector           = libvirt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.751 2 DEBUG cotyledon.oslo_config_glue [-] instance_format                = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.752 2 DEBUG cotyledon.oslo_config_glue [-] instance_uuid_format           = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.752 2 DEBUG cotyledon.oslo_config_glue [-] libvirt_type                   = kvm log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.752 2 DEBUG cotyledon.oslo_config_glue [-] libvirt_uri                    =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.752 2 DEBUG cotyledon.oslo_config_glue [-] log_config_append              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.752 2 DEBUG cotyledon.oslo_config_glue [-] log_date_format                = %Y-%m-%d %H:%M:%S log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.752 2 DEBUG cotyledon.oslo_config_glue [-] log_dir                        = /var/log/ceilometer log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.752 2 DEBUG cotyledon.oslo_config_glue [-] log_file                       = /dev/stdout log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.752 2 DEBUG cotyledon.oslo_config_glue [-] log_options                    = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.752 2 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval            = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.752 2 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval_type       = days log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.752 2 DEBUG cotyledon.oslo_config_glue [-] log_rotation_type              = none log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.752 2 DEBUG cotyledon.oslo_config_glue [-] logging_context_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.753 2 DEBUG cotyledon.oslo_config_glue [-] logging_debug_format_suffix    = %(funcName)s %(pathname)s:%(lineno)d log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.753 2 DEBUG cotyledon.oslo_config_glue [-] logging_default_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.753 2 DEBUG cotyledon.oslo_config_glue [-] logging_exception_prefix       = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.753 2 DEBUG cotyledon.oslo_config_glue [-] logging_user_identity_format   = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.753 2 DEBUG cotyledon.oslo_config_glue [-] max_logfile_count              = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.753 2 DEBUG cotyledon.oslo_config_glue [-] max_logfile_size_mb            = 200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.753 2 DEBUG cotyledon.oslo_config_glue [-] max_parallel_requests          = 64 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.753 2 DEBUG cotyledon.oslo_config_glue [-] partitioning_group_prefix      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.753 2 DEBUG cotyledon.oslo_config_glue [-] pipeline_cfg_file              = pipeline.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.753 2 DEBUG cotyledon.oslo_config_glue [-] polling_namespaces             = ['ipmi'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.753 2 DEBUG cotyledon.oslo_config_glue [-] pollsters_definitions_dirs     = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.753 2 DEBUG cotyledon.oslo_config_glue [-] publish_errors                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.754 2 DEBUG cotyledon.oslo_config_glue [-] rate_limit_burst               = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.754 2 DEBUG cotyledon.oslo_config_glue [-] rate_limit_except_level        = CRITICAL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.754 2 DEBUG cotyledon.oslo_config_glue [-] rate_limit_interval            = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.754 2 DEBUG cotyledon.oslo_config_glue [-] reseller_prefix                = AUTH_ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.754 2 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_keys         = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.754 2 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_length       = 256 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.754 2 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_namespace    = ['metering.'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.754 2 DEBUG cotyledon.oslo_config_glue [-] rootwrap_config                = /etc/ceilometer/rootwrap.conf log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.754 2 DEBUG cotyledon.oslo_config_glue [-] sample_source                  = openstack log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.754 2 DEBUG cotyledon.oslo_config_glue [-] syslog_log_facility            = LOG_USER log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.754 2 DEBUG cotyledon.oslo_config_glue [-] tenant_name_discovery          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.754 2 DEBUG cotyledon.oslo_config_glue [-] use_eventlog                   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.755 2 DEBUG cotyledon.oslo_config_glue [-] use_journal                    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.755 2 DEBUG cotyledon.oslo_config_glue [-] use_json                       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.755 2 DEBUG cotyledon.oslo_config_glue [-] use_stderr                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.755 2 DEBUG cotyledon.oslo_config_glue [-] use_syslog                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.755 2 DEBUG cotyledon.oslo_config_glue [-] watch_log_file                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.755 2 DEBUG cotyledon.oslo_config_glue [-] compute.instance_discovery_method = libvirt_metadata log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.755 2 DEBUG cotyledon.oslo_config_glue [-] compute.resource_cache_expiry  = 3600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.755 2 DEBUG cotyledon.oslo_config_glue [-] compute.resource_update_interval = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.755 2 DEBUG cotyledon.oslo_config_glue [-] coordination.backend_url       = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.755 2 DEBUG cotyledon.oslo_config_glue [-] event.definitions_cfg_file     = event_definitions.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.755 2 DEBUG cotyledon.oslo_config_glue [-] event.drop_unmatched_notifications = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.756 2 DEBUG cotyledon.oslo_config_glue [-] event.store_raw                = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.756 2 DEBUG cotyledon.oslo_config_glue [-] ipmi.node_manager_init_retry   = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.756 2 DEBUG cotyledon.oslo_config_glue [-] ipmi.polling_retry             = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.756 2 DEBUG cotyledon.oslo_config_glue [-] meter.meter_definitions_dirs   = ['/etc/ceilometer/meters.d', '/usr/lib/python3.9/site-packages/ceilometer/data/meters.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.756 2 DEBUG cotyledon.oslo_config_glue [-] monasca.archive_on_failure     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.756 2 DEBUG cotyledon.oslo_config_glue [-] monasca.archive_path           = mon_pub_failures.txt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.756 2 DEBUG cotyledon.oslo_config_glue [-] monasca.auth_section           = service_credentials log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.756 2 DEBUG cotyledon.oslo_config_glue [-] monasca.auth_type              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.756 2 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_count            = 1000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.756 2 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_max_retries      = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.756 2 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_mode             = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.757 2 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_polling_interval = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.757 2 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_timeout          = 15 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.757 2 DEBUG cotyledon.oslo_config_glue [-] monasca.cafile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.757 2 DEBUG cotyledon.oslo_config_glue [-] monasca.certfile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.757 2 DEBUG cotyledon.oslo_config_glue [-] monasca.client_max_retries     = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.757 2 DEBUG cotyledon.oslo_config_glue [-] monasca.client_retry_interval  = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.757 2 DEBUG cotyledon.oslo_config_glue [-] monasca.clientapi_version      = 2_0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.757 2 DEBUG cotyledon.oslo_config_glue [-] monasca.cloud_name             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.757 2 DEBUG cotyledon.oslo_config_glue [-] monasca.cluster                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.757 2 DEBUG cotyledon.oslo_config_glue [-] monasca.collect_timing         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.757 2 DEBUG cotyledon.oslo_config_glue [-] monasca.control_plane          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.757 2 DEBUG cotyledon.oslo_config_glue [-] monasca.enable_api_pagination  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.758 2 DEBUG cotyledon.oslo_config_glue [-] monasca.insecure               = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.758 2 DEBUG cotyledon.oslo_config_glue [-] monasca.interface              = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.758 2 DEBUG cotyledon.oslo_config_glue [-] monasca.keyfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.758 2 DEBUG cotyledon.oslo_config_glue [-] monasca.monasca_mappings       = /etc/ceilometer/monasca_field_definitions.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.758 2 DEBUG cotyledon.oslo_config_glue [-] monasca.region_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.758 2 DEBUG cotyledon.oslo_config_glue [-] monasca.retry_on_failure       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.758 2 DEBUG cotyledon.oslo_config_glue [-] monasca.split_loggers          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.758 2 DEBUG cotyledon.oslo_config_glue [-] monasca.timeout                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.758 2 DEBUG cotyledon.oslo_config_glue [-] notification.ack_on_event_error = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.758 2 DEBUG cotyledon.oslo_config_glue [-] notification.batch_size        = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.758 2 DEBUG cotyledon.oslo_config_glue [-] notification.batch_timeout     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.759 2 DEBUG cotyledon.oslo_config_glue [-] notification.messaging_urls    = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.759 2 DEBUG cotyledon.oslo_config_glue [-] notification.notification_control_exchanges = ['nova', 'glance', 'neutron', 'cinder', 'heat', 'keystone', 'sahara', 'trove', 'zaqar', 'swift', 'ceilometer', 'magnum', 'dns', 'ironic', 'aodh'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.759 2 DEBUG cotyledon.oslo_config_glue [-] notification.pipelines         = ['meter', 'event'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.759 2 DEBUG cotyledon.oslo_config_glue [-] notification.workers           = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.759 2 DEBUG cotyledon.oslo_config_glue [-] polling.batch_size             = 50 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.759 2 DEBUG cotyledon.oslo_config_glue [-] polling.cfg_file               = polling.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.759 2 DEBUG cotyledon.oslo_config_glue [-] polling.partitioning_group_prefix = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.759 2 DEBUG cotyledon.oslo_config_glue [-] polling.pollsters_definitions_dirs = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.759 2 DEBUG cotyledon.oslo_config_glue [-] polling.tenant_name_discovery  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.759 2 DEBUG cotyledon.oslo_config_glue [-] publisher.telemetry_secret     = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.760 2 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.event_topic = event log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.760 2 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.metering_topic = metering log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.760 2 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.telemetry_driver = messagingv2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.760 2 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.access_key = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.760 2 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.secret_key = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.760 2 DEBUG cotyledon.oslo_config_glue [-] rgw_client.implicit_tenants    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.760 2 DEBUG cotyledon.oslo_config_glue [-] service_types.cinder           = volumev3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.760 2 DEBUG cotyledon.oslo_config_glue [-] service_types.glance           = image log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.760 2 DEBUG cotyledon.oslo_config_glue [-] service_types.neutron          = network log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.760 2 DEBUG cotyledon.oslo_config_glue [-] service_types.nova             = compute log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.760 2 DEBUG cotyledon.oslo_config_glue [-] service_types.radosgw          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.760 2 DEBUG cotyledon.oslo_config_glue [-] service_types.swift            = object-store log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.761 2 DEBUG cotyledon.oslo_config_glue [-] vmware.api_retry_count         = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.761 2 DEBUG cotyledon.oslo_config_glue [-] vmware.ca_file                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.761 2 DEBUG cotyledon.oslo_config_glue [-] vmware.host_ip                 = 127.0.0.1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.761 2 DEBUG cotyledon.oslo_config_glue [-] vmware.host_password           = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.761 2 DEBUG cotyledon.oslo_config_glue [-] vmware.host_port               = 443 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.761 2 DEBUG cotyledon.oslo_config_glue [-] vmware.host_username           =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.761 2 DEBUG cotyledon.oslo_config_glue [-] vmware.insecure                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.761 2 DEBUG cotyledon.oslo_config_glue [-] vmware.task_poll_interval      = 0.5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.761 2 DEBUG cotyledon.oslo_config_glue [-] vmware.wsdl_location           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.761 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_section = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.761 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_type  = password log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.762 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.cafile     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.762 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.certfile   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.762 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.collect_timing = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.762 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.insecure   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.762 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.interface  = internalURL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.762 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.keyfile    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.762 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.region_name = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.762 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.split_loggers = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.762 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.timeout    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.762 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_section           = service_credentials log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.762 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_type              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.762 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.cafile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.763 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.certfile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.763 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.collect_timing         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.763 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.insecure               = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.763 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.interface              = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.763 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.keyfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.763 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.region_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.763 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.split_loggers          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.763 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.timeout                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.763 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_section             = service_credentials log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.763 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_type                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.763 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.cafile                   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.763 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.certfile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.764 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.collect_timing           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.764 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.insecure                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.764 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.interface                = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.764 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.keyfile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.764 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.region_name              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.764 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.split_loggers            = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.764 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.timeout                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.764 2 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2613
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.787 12 INFO ceilometer.polling.manager [-] Looking for dynamic pollsters configurations at [['/etc/ceilometer/pollsters.d']].
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.788 12 INFO ceilometer.polling.manager [-] No dynamic pollsters found in folder [/etc/ceilometer/pollsters.d].
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.789 12 INFO ceilometer.polling.manager [-] No dynamic pollsters file found in dirs [['/etc/ceilometer/pollsters.d']].
Oct 11 01:38:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:01.802 12 INFO oslo.privsep.daemon [-] Running privsep helper: ['sudo', 'ceilometer-rootwrap', '/etc/ceilometer/rootwrap.conf', 'privsep-helper', '--privsep_context', 'ceilometer.privsep.sys_admin_pctxt', '--privsep_sock_path', '/tmp/tmp8fv3yl3l/privsep.sock']
Oct 11 01:38:01 compute-0 sudo[176888]: ceilometer : PWD=/ ; USER=root ; COMMAND=/usr/bin/ceilometer-rootwrap /etc/ceilometer/rootwrap.conf privsep-helper --privsep_context ceilometer.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmp8fv3yl3l/privsep.sock
Oct 11 01:38:01 compute-0 sudo[176888]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Oct 11 01:38:01 compute-0 sudo[176888]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=42405)
Oct 11 01:38:01 compute-0 sudo[176889]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sbcoclevnfefyilgzaykigrzjypuomhj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146681.2051275-521-199075955550600/AnsiballZ_systemd.py'
Oct 11 01:38:01 compute-0 sudo[176889]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:02 compute-0 python3.9[176893]: ansible-ansible.builtin.systemd Invoked with name=edpm_kepler.service state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:38:02 compute-0 systemd[1]: Stopping kepler container...
Oct 11 01:38:02 compute-0 kepler[176446]: I1011 01:38:02.322201       1 exporter.go:218] Received shutdown signal
Oct 11 01:38:02 compute-0 kepler[176446]: I1011 01:38:02.322990       1 exporter.go:226] Exiting...
Oct 11 01:38:02 compute-0 sudo[176888]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.492 12 INFO oslo.privsep.daemon [-] Spawned new privsep daemon via rootwrap
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.493 12 DEBUG oslo.privsep.daemon [-] Accepted privsep connection to /tmp/tmp8fv3yl3l/privsep.sock __init__ /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:362
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.340 19 INFO oslo.privsep.daemon [-] privsep daemon starting
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.347 19 INFO oslo.privsep.daemon [-] privsep process running with uid/gid: 0/0
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.351 19 INFO oslo.privsep.daemon [-] privsep process running with capabilities (eff/prm/inh): CAP_CHOWN|CAP_DAC_OVERRIDE|CAP_DAC_READ_SEARCH|CAP_FOWNER|CAP_NET_ADMIN|CAP_SYS_ADMIN/CAP_CHOWN|CAP_DAC_OVERRIDE|CAP_DAC_READ_SEARCH|CAP_FOWNER|CAP_NET_ADMIN|CAP_SYS_ADMIN/none
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.352 19 INFO oslo.privsep.daemon [-] privsep daemon running as pid 19
Oct 11 01:38:02 compute-0 systemd[1]: libpod-e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304.scope: Deactivated successfully.
Oct 11 01:38:02 compute-0 podman[176898]: 2025-10-11 01:38:02.536487421 +0000 UTC m=+0.273846898 container died e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, architecture=x86_64, io.openshift.expose-services=, managed_by=edpm_ansible, release-0.7.12=, io.openshift.tags=base rhel9, container_name=kepler, build-date=2024-09-18T21:23:30, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9, io.buildah.version=1.29.0, release=1214.1726694543, version=9.4, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, distribution-scope=public, maintainer=Red Hat, Inc., summary=Provides the latest release of Red Hat Universal Base Image 9., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, config_id=edpm, vcs-type=git, com.redhat.component=ubi9-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f)
Oct 11 01:38:02 compute-0 systemd[1]: e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304-66828feffb5c14b5.timer: Deactivated successfully.
Oct 11 01:38:02 compute-0 systemd[1]: Stopped /usr/bin/podman healthcheck run e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304.
Oct 11 01:38:02 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304-userdata-shm.mount: Deactivated successfully.
Oct 11 01:38:02 compute-0 systemd[1]: var-lib-containers-storage-overlay-07c333d45365e5e1beffd98c126bb9e4df6c2eef205c9a1a789247673061f9f9-merged.mount: Deactivated successfully.
Oct 11 01:38:02 compute-0 podman[176898]: 2025-10-11 01:38:02.587729454 +0000 UTC m=+0.325088891 container cleanup e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, name=ubi9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=1214.1726694543, version=9.4, build-date=2024-09-18T21:23:30, io.openshift.expose-services=, io.openshift.tags=base rhel9, summary=Provides the latest release of Red Hat Universal Base Image 9., config_id=edpm, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9, vcs-type=git, com.redhat.component=ubi9-container, architecture=x86_64, release-0.7.12=, maintainer=Red Hat, Inc., container_name=kepler, io.buildah.version=1.29.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible)
Oct 11 01:38:02 compute-0 podman[176898]: kepler
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.654 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.current: IPMITool not supported on host _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.655 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.fan: IPMITool not supported on host _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.657 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.airflow: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.658 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.cpu_util: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.658 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.cups: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.659 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.io_util: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.659 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.mem_util: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.660 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.outlet_temperature: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.660 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.power: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.660 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.temperature: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.661 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.temperature: IPMITool not supported on host _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.661 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.voltage: IPMITool not supported on host _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.662 12 WARNING ceilometer.polling.manager [-] No valid pollsters can be loaded from ['ipmi'] namespaces
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.671 12 DEBUG cotyledon.oslo_config_glue [-] Full set of CONF: _load_service_options /usr/lib/python3.9/site-packages/cotyledon/oslo_config_glue.py:48
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.672 12 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2589
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.672 12 DEBUG cotyledon.oslo_config_glue [-] Configuration options gathered from: log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2590
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.672 12 DEBUG cotyledon.oslo_config_glue [-] command line args: ['--polling-namespaces', 'ipmi', '--logfile', '/dev/stdout'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2591
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.673 12 DEBUG cotyledon.oslo_config_glue [-] config files: ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2592
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.673 12 DEBUG cotyledon.oslo_config_glue [-] ================================================================================ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2594
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.674 12 DEBUG cotyledon.oslo_config_glue [-] batch_size                     = 50 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.674 12 DEBUG cotyledon.oslo_config_glue [-] cfg_file                       = polling.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.674 12 DEBUG cotyledon.oslo_config_glue [-] config_dir                     = ['/etc/ceilometer/ceilometer.conf.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.675 12 DEBUG cotyledon.oslo_config_glue [-] config_file                    = ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.675 12 DEBUG cotyledon.oslo_config_glue [-] config_source                  = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.675 12 DEBUG cotyledon.oslo_config_glue [-] control_exchange               = ceilometer log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.676 12 DEBUG cotyledon.oslo_config_glue [-] debug                          = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.677 12 DEBUG cotyledon.oslo_config_glue [-] default_log_levels             = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'oslo_messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'requests.packages.urllib3.util.retry=WARN', 'urllib3.util.retry=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'taskflow=WARN', 'keystoneauth=WARN', 'oslo.cache=INFO', 'oslo_policy=INFO', 'dogpile.core.dogpile=INFO', 'futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.677 12 DEBUG cotyledon.oslo_config_glue [-] event_pipeline_cfg_file        = event_pipeline.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.678 12 DEBUG cotyledon.oslo_config_glue [-] graceful_shutdown_timeout      = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.678 12 DEBUG cotyledon.oslo_config_glue [-] host                           = compute-0.ctlplane.example.com log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.679 12 DEBUG cotyledon.oslo_config_glue [-] http_timeout                   = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.679 12 DEBUG cotyledon.oslo_config_glue [-] hypervisor_inspector           = libvirt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.680 12 DEBUG cotyledon.oslo_config_glue [-] instance_format                = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.680 12 DEBUG cotyledon.oslo_config_glue [-] instance_uuid_format           = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.680 12 DEBUG cotyledon.oslo_config_glue [-] libvirt_type                   = kvm log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.680 12 DEBUG cotyledon.oslo_config_glue [-] libvirt_uri                    =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.680 12 DEBUG cotyledon.oslo_config_glue [-] log_config_append              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.680 12 DEBUG cotyledon.oslo_config_glue [-] log_date_format                = %Y-%m-%d %H:%M:%S log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.680 12 DEBUG cotyledon.oslo_config_glue [-] log_dir                        = /var/log/ceilometer log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.681 12 DEBUG cotyledon.oslo_config_glue [-] log_file                       = /dev/stdout log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.681 12 DEBUG cotyledon.oslo_config_glue [-] log_options                    = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.681 12 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval            = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.681 12 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval_type       = days log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.681 12 DEBUG cotyledon.oslo_config_glue [-] log_rotation_type              = none log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.681 12 DEBUG cotyledon.oslo_config_glue [-] logging_context_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.681 12 DEBUG cotyledon.oslo_config_glue [-] logging_debug_format_suffix    = %(funcName)s %(pathname)s:%(lineno)d log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.681 12 DEBUG cotyledon.oslo_config_glue [-] logging_default_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.681 12 DEBUG cotyledon.oslo_config_glue [-] logging_exception_prefix       = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.682 12 DEBUG cotyledon.oslo_config_glue [-] logging_user_identity_format   = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.682 12 DEBUG cotyledon.oslo_config_glue [-] max_logfile_count              = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.682 12 DEBUG cotyledon.oslo_config_glue [-] max_logfile_size_mb            = 200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.682 12 DEBUG cotyledon.oslo_config_glue [-] max_parallel_requests          = 64 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.682 12 DEBUG cotyledon.oslo_config_glue [-] partitioning_group_prefix      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.682 12 DEBUG cotyledon.oslo_config_glue [-] pipeline_cfg_file              = pipeline.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.682 12 DEBUG cotyledon.oslo_config_glue [-] polling_namespaces             = ['ipmi'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.682 12 DEBUG cotyledon.oslo_config_glue [-] pollsters_definitions_dirs     = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.682 12 DEBUG cotyledon.oslo_config_glue [-] publish_errors                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.683 12 DEBUG cotyledon.oslo_config_glue [-] rate_limit_burst               = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.683 12 DEBUG cotyledon.oslo_config_glue [-] rate_limit_except_level        = CRITICAL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.683 12 DEBUG cotyledon.oslo_config_glue [-] rate_limit_interval            = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.683 12 DEBUG cotyledon.oslo_config_glue [-] reseller_prefix                = AUTH_ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.683 12 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_keys         = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.683 12 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_length       = 256 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.683 12 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_namespace    = ['metering.'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.683 12 DEBUG cotyledon.oslo_config_glue [-] rootwrap_config                = /etc/ceilometer/rootwrap.conf log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.684 12 DEBUG cotyledon.oslo_config_glue [-] sample_source                  = openstack log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.684 12 DEBUG cotyledon.oslo_config_glue [-] syslog_log_facility            = LOG_USER log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.684 12 DEBUG cotyledon.oslo_config_glue [-] tenant_name_discovery          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.684 12 DEBUG cotyledon.oslo_config_glue [-] transport_url                  = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.684 12 DEBUG cotyledon.oslo_config_glue [-] use_eventlog                   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.684 12 DEBUG cotyledon.oslo_config_glue [-] use_journal                    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.684 12 DEBUG cotyledon.oslo_config_glue [-] use_json                       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.684 12 DEBUG cotyledon.oslo_config_glue [-] use_stderr                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.685 12 DEBUG cotyledon.oslo_config_glue [-] use_syslog                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.685 12 DEBUG cotyledon.oslo_config_glue [-] watch_log_file                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.685 12 DEBUG cotyledon.oslo_config_glue [-] compute.instance_discovery_method = libvirt_metadata log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.685 12 DEBUG cotyledon.oslo_config_glue [-] compute.resource_cache_expiry  = 3600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.685 12 DEBUG cotyledon.oslo_config_glue [-] compute.resource_update_interval = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.685 12 DEBUG cotyledon.oslo_config_glue [-] coordination.backend_url       = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.685 12 DEBUG cotyledon.oslo_config_glue [-] event.definitions_cfg_file     = event_definitions.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.685 12 DEBUG cotyledon.oslo_config_glue [-] event.drop_unmatched_notifications = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.686 12 DEBUG cotyledon.oslo_config_glue [-] event.store_raw                = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.686 12 DEBUG cotyledon.oslo_config_glue [-] ipmi.node_manager_init_retry   = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.686 12 DEBUG cotyledon.oslo_config_glue [-] ipmi.polling_retry             = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.686 12 DEBUG cotyledon.oslo_config_glue [-] meter.meter_definitions_dirs   = ['/etc/ceilometer/meters.d', '/usr/lib/python3.9/site-packages/ceilometer/data/meters.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.686 12 DEBUG cotyledon.oslo_config_glue [-] monasca.archive_on_failure     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.686 12 DEBUG cotyledon.oslo_config_glue [-] monasca.archive_path           = mon_pub_failures.txt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.686 12 DEBUG cotyledon.oslo_config_glue [-] monasca.auth_section           = service_credentials log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.686 12 DEBUG cotyledon.oslo_config_glue [-] monasca.auth_type              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.687 12 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_count            = 1000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.687 12 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_max_retries      = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.687 12 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_mode             = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.687 12 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_polling_interval = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.687 12 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_timeout          = 15 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.687 12 DEBUG cotyledon.oslo_config_glue [-] monasca.cafile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.687 12 DEBUG cotyledon.oslo_config_glue [-] monasca.certfile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.687 12 DEBUG cotyledon.oslo_config_glue [-] monasca.client_max_retries     = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.687 12 DEBUG cotyledon.oslo_config_glue [-] monasca.client_retry_interval  = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.688 12 DEBUG cotyledon.oslo_config_glue [-] monasca.clientapi_version      = 2_0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.688 12 DEBUG cotyledon.oslo_config_glue [-] monasca.cloud_name             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.688 12 DEBUG cotyledon.oslo_config_glue [-] monasca.cluster                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.688 12 DEBUG cotyledon.oslo_config_glue [-] monasca.collect_timing         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.688 12 DEBUG cotyledon.oslo_config_glue [-] monasca.control_plane          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.688 12 DEBUG cotyledon.oslo_config_glue [-] monasca.enable_api_pagination  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.688 12 DEBUG cotyledon.oslo_config_glue [-] monasca.insecure               = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.688 12 DEBUG cotyledon.oslo_config_glue [-] monasca.interface              = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.689 12 DEBUG cotyledon.oslo_config_glue [-] monasca.keyfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.689 12 DEBUG cotyledon.oslo_config_glue [-] monasca.monasca_mappings       = /etc/ceilometer/monasca_field_definitions.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.689 12 DEBUG cotyledon.oslo_config_glue [-] monasca.region_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.689 12 DEBUG cotyledon.oslo_config_glue [-] monasca.retry_on_failure       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.689 12 DEBUG cotyledon.oslo_config_glue [-] monasca.split_loggers          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.689 12 DEBUG cotyledon.oslo_config_glue [-] monasca.timeout                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.689 12 DEBUG cotyledon.oslo_config_glue [-] notification.ack_on_event_error = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 podman[176929]: kepler
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.689 12 DEBUG cotyledon.oslo_config_glue [-] notification.batch_size        = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.689 12 DEBUG cotyledon.oslo_config_glue [-] notification.batch_timeout     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.690 12 DEBUG cotyledon.oslo_config_glue [-] notification.messaging_urls    = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.690 12 DEBUG cotyledon.oslo_config_glue [-] notification.notification_control_exchanges = ['nova', 'glance', 'neutron', 'cinder', 'heat', 'keystone', 'sahara', 'trove', 'zaqar', 'swift', 'ceilometer', 'magnum', 'dns', 'ironic', 'aodh'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.690 12 DEBUG cotyledon.oslo_config_glue [-] notification.pipelines         = ['meter', 'event'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.690 12 DEBUG cotyledon.oslo_config_glue [-] notification.workers           = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.690 12 DEBUG cotyledon.oslo_config_glue [-] polling.batch_size             = 50 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.690 12 DEBUG cotyledon.oslo_config_glue [-] polling.cfg_file               = polling.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.690 12 DEBUG cotyledon.oslo_config_glue [-] polling.partitioning_group_prefix = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.691 12 DEBUG cotyledon.oslo_config_glue [-] polling.pollsters_definitions_dirs = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.691 12 DEBUG cotyledon.oslo_config_glue [-] polling.tenant_name_discovery  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.691 12 DEBUG cotyledon.oslo_config_glue [-] publisher.telemetry_secret     = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.691 12 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.event_topic = event log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.691 12 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.metering_topic = metering log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.691 12 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.telemetry_driver = messagingv2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.691 12 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.access_key = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.691 12 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.secret_key = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.692 12 DEBUG cotyledon.oslo_config_glue [-] rgw_client.implicit_tenants    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.692 12 DEBUG cotyledon.oslo_config_glue [-] service_types.cinder           = volumev3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.692 12 DEBUG cotyledon.oslo_config_glue [-] service_types.glance           = image log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.692 12 DEBUG cotyledon.oslo_config_glue [-] service_types.neutron          = network log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.692 12 DEBUG cotyledon.oslo_config_glue [-] service_types.nova             = compute log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.692 12 DEBUG cotyledon.oslo_config_glue [-] service_types.radosgw          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.692 12 DEBUG cotyledon.oslo_config_glue [-] service_types.swift            = object-store log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.692 12 DEBUG cotyledon.oslo_config_glue [-] vmware.api_retry_count         = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.693 12 DEBUG cotyledon.oslo_config_glue [-] vmware.ca_file                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.693 12 DEBUG cotyledon.oslo_config_glue [-] vmware.host_ip                 = 127.0.0.1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.693 12 DEBUG cotyledon.oslo_config_glue [-] vmware.host_password           = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.693 12 DEBUG cotyledon.oslo_config_glue [-] vmware.host_port               = 443 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.693 12 DEBUG cotyledon.oslo_config_glue [-] vmware.host_username           =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.693 12 DEBUG cotyledon.oslo_config_glue [-] vmware.insecure                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.693 12 DEBUG cotyledon.oslo_config_glue [-] vmware.task_poll_interval      = 0.5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.693 12 DEBUG cotyledon.oslo_config_glue [-] vmware.wsdl_location           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.694 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_section = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.694 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_type  = password log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.694 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.cafile     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.694 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.certfile   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.694 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.collect_timing = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.694 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.insecure   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.694 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.interface  = internalURL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.694 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.keyfile    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.694 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.region_name = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.695 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.split_loggers = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.695 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.timeout    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.695 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_section           = service_credentials log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.695 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_type              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.695 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.cafile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.695 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.certfile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.695 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.collect_timing         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.695 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.insecure               = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.695 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.interface              = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.696 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.keyfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.696 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.region_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.696 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.split_loggers          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.696 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.timeout                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.696 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_section             = service_credentials log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.696 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_type                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.696 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.cafile                   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.696 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.certfile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.696 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.collect_timing           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.697 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.insecure                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.697 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.interface                = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.697 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.keyfile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.697 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.region_name              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.697 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.split_loggers            = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.697 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.timeout                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.697 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_notifications.driver = ['noop'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.697 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_notifications.retry = -1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.698 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_notifications.topics = ['notifications'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.698 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_notifications.transport_url = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.698 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.amqp_auto_delete = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.698 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.amqp_durable_queues = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.698 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.conn_pool_min_size = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.698 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.conn_pool_ttl = 1200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.698 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.direct_mandatory_flag = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.698 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.enable_cancel_on_failover = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.698 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.heartbeat_in_pthread = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.699 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.heartbeat_rate = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.699 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.heartbeat_timeout_threshold = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.699 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.kombu_compression = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.699 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.kombu_failover_strategy = round-robin log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.699 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.kombu_missing_consumer_retry_timeout = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.699 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.kombu_reconnect_delay = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.699 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_ha_queues = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.699 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_interval_max = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.700 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_login_method = AMQPLAIN log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.700 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_qos_prefetch_count = 100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.700 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_quorum_delivery_limit = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.700 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_quorum_max_memory_bytes = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.700 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_quorum_max_memory_length = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.700 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_quorum_queue = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.700 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_retry_backoff = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.700 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_retry_interval = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.701 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_transient_queues_ttl = 1800 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.701 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rpc_conn_pool_size = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.701 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.ssl      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.701 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.ssl_ca_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.701 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.ssl_cert_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.701 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.ssl_enforce_fips_mode = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.701 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.ssl_key_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.701 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.ssl_version =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.701 12 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2613
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.702 12 DEBUG cotyledon._service [-] Run service AgentManager(0) [12] wait_forever /usr/lib/python3.9/site-packages/cotyledon/_service.py:241
Oct 11 01:38:02 compute-0 systemd[1]: edpm_kepler.service: Deactivated successfully.
Oct 11 01:38:02 compute-0 systemd[1]: Stopped kepler container.
Oct 11 01:38:02 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 01:38:02.707 12 DEBUG ceilometer.agent [-] Config file: {'sources': [{'name': 'pollsters', 'interval': 120, 'meters': ['hardware.*']}]} load_config /usr/lib/python3.9/site-packages/ceilometer/agent.py:64
Oct 11 01:38:02 compute-0 systemd[1]: Starting kepler container...
Oct 11 01:38:02 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:38:02 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304.
Oct 11 01:38:02 compute-0 podman[176944]: 2025-10-11 01:38:02.93288672 +0000 UTC m=+0.176185714 container init e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, version=9.4, io.openshift.expose-services=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.buildah.version=1.29.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, name=ubi9, summary=Provides the latest release of Red Hat Universal Base Image 9., release=1214.1726694543, release-0.7.12=, build-date=2024-09-18T21:23:30, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, container_name=kepler, distribution-scope=public, managed_by=edpm_ansible, vendor=Red Hat, Inc., maintainer=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, io.openshift.tags=base rhel9, architecture=x86_64, com.redhat.component=ubi9-container, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543)
Oct 11 01:38:02 compute-0 kepler[176959]: WARNING: failed to read int from file: open /sys/devices/system/cpu/cpu0/online: no such file or directory
Oct 11 01:38:02 compute-0 kepler[176959]: I1011 01:38:02.977051       1 exporter.go:103] Kepler running on version: v0.7.12-dirty
Oct 11 01:38:02 compute-0 kepler[176959]: I1011 01:38:02.977302       1 config.go:293] using gCgroup ID in the BPF program: true
Oct 11 01:38:02 compute-0 kepler[176959]: I1011 01:38:02.977340       1 config.go:295] kernel version: 5.14
Oct 11 01:38:02 compute-0 kepler[176959]: I1011 01:38:02.978104       1 power.go:78] Unable to obtain power, use estimate method
Oct 11 01:38:02 compute-0 kepler[176959]: I1011 01:38:02.978141       1 redfish.go:169] failed to get redfish credential file path
Oct 11 01:38:02 compute-0 kepler[176959]: I1011 01:38:02.978793       1 acpi.go:71] Could not find any ACPI power meter path. Is it a VM?
Oct 11 01:38:02 compute-0 kepler[176959]: I1011 01:38:02.978813       1 power.go:79] using none to obtain power
Oct 11 01:38:02 compute-0 kepler[176959]: E1011 01:38:02.978838       1 accelerator.go:154] [DUMMY] doesn't contain GPU
Oct 11 01:38:02 compute-0 kepler[176959]: E1011 01:38:02.978872       1 exporter.go:154] failed to init GPU accelerators: no devices found
Oct 11 01:38:02 compute-0 kepler[176959]: WARNING: failed to read int from file: open /sys/devices/system/cpu/cpu0/online: no such file or directory
Oct 11 01:38:02 compute-0 kepler[176959]: I1011 01:38:02.982314       1 exporter.go:84] Number of CPUs: 8
Oct 11 01:38:02 compute-0 podman[176944]: 2025-10-11 01:38:02.989581956 +0000 UTC m=+0.232880930 container start e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, config_id=edpm, io.k8s.display-name=Red Hat Universal Base Image 9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.29.0, maintainer=Red Hat, Inc., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, distribution-scope=public, com.redhat.component=ubi9-container, io.openshift.expose-services=, io.openshift.tags=base rhel9, architecture=x86_64, build-date=2024-09-18T21:23:30, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, summary=Provides the latest release of Red Hat Universal Base Image 9., vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, name=ubi9, release=1214.1726694543, release-0.7.12=, version=9.4, managed_by=edpm_ansible)
Oct 11 01:38:02 compute-0 podman[176944]: kepler
Oct 11 01:38:03 compute-0 systemd[1]: Started kepler container.
Oct 11 01:38:03 compute-0 sudo[176889]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:03 compute-0 podman[176969]: 2025-10-11 01:38:03.132642973 +0000 UTC m=+0.131687511 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=starting, health_failing_streak=1, health_log=, io.buildah.version=1.29.0, managed_by=edpm_ansible, name=ubi9, distribution-scope=public, maintainer=Red Hat, Inc., version=9.4, build-date=2024-09-18T21:23:30, config_id=edpm, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of Red Hat Universal Base Image 9., io.openshift.expose-services=, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9, com.redhat.component=ubi9-container, release=1214.1726694543, vendor=Red Hat, Inc., io.openshift.tags=base rhel9, release-0.7.12=, container_name=kepler)
Oct 11 01:38:03 compute-0 systemd[1]: e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304-5ec27e20c4a956d.service: Main process exited, code=exited, status=1/FAILURE
Oct 11 01:38:03 compute-0 systemd[1]: e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304-5ec27e20c4a956d.service: Failed with result 'exit-code'.
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.649690       1 watcher.go:83] Using in cluster k8s config
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.649788       1 watcher.go:90] failed to get config: unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined
Oct 11 01:38:03 compute-0 kepler[176959]: E1011 01:38:03.649882       1 manager.go:59] could not run the watcher k8s APIserver watcher was not enabled
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.657013       1 process_energy.go:129] Using the Ratio Power Model to estimate PROCESS_TOTAL Power
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.657154       1 process_energy.go:130] Feature names: [bpf_cpu_time_ms]
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.665289       1 process_energy.go:129] Using the Ratio Power Model to estimate PROCESS_COMPONENTS Power
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.665324       1 process_energy.go:130] Feature names: [bpf_cpu_time_ms bpf_cpu_time_ms bpf_cpu_time_ms   gpu_compute_util]
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.677154       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.677200       1 model.go:125] Requesting for Machine Spec: &{authenticamd amd_epyc_rome 8 8 7 2800 1}
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.677221       1 node_platform_energy.go:53] Using the Regressor/AbsPower Power Model to estimate Node Platform Power
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.688858       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.688891       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.688897       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.688903       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.688909       1 model.go:125] Requesting for Machine Spec: &{authenticamd amd_epyc_rome 8 8 7 2800 1}
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.688920       1 node_component_energy.go:57] Using the Regressor/AbsPower Power Model to estimate Node Component Power
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.689000       1 prometheus_collector.go:90] Registered Process Prometheus metrics
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.689027       1 prometheus_collector.go:95] Registered Container Prometheus metrics
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.689051       1 prometheus_collector.go:100] Registered VM Prometheus metrics
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.689068       1 prometheus_collector.go:104] Registered Node Prometheus metrics
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.689143       1 exporter.go:194] starting to listen on 0.0.0.0:8888
Oct 11 01:38:03 compute-0 kepler[176959]: I1011 01:38:03.689712       1 exporter.go:208] Started Kepler in 713.010254ms
Oct 11 01:38:03 compute-0 sudo[177153]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jqbplbtjhdiwvmmcxppyewetqbaxabfp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146683.247986-529-69889863970543/AnsiballZ_find.py'
Oct 11 01:38:03 compute-0 sudo[177153]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:03 compute-0 python3.9[177155]: ansible-ansible.builtin.find Invoked with file_type=directory paths=['/var/lib/openstack/healthchecks/'] patterns=[] read_whole_file=False age_stamp=mtime recurse=False hidden=False follow=False get_checksum=False checksum_algorithm=sha1 use_regex=False exact_mode=True excludes=None contains=None age=None size=None depth=None mode=None encoding=None limit=None
Oct 11 01:38:04 compute-0 sudo[177153]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:05 compute-0 sudo[177305]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ovzukmmbvpbucgdvdmlmuihrrrqrfgom ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146684.503096-539-92375392647038/AnsiballZ_podman_container_info.py'
Oct 11 01:38:05 compute-0 sudo[177305]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:05 compute-0 python3.9[177307]: ansible-containers.podman.podman_container_info Invoked with name=['ovn_controller'] executable=podman
Oct 11 01:38:05 compute-0 sudo[177305]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:06 compute-0 podman[177373]: 2025-10-11 01:38:06.328046625 +0000 UTC m=+0.214703809 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.build-date=20251009)
Oct 11 01:38:06 compute-0 sudo[177496]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gebtgcghromrqkqohyytbqhxstnehxpu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146686.033246-547-130077243164233/AnsiballZ_podman_container_exec.py'
Oct 11 01:38:06 compute-0 sudo[177496]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:06 compute-0 podman[177498]: 2025-10-11 01:38:06.954800025 +0000 UTC m=+0.110632939 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 01:38:07 compute-0 python3.9[177499]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=ovn_controller detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:38:07 compute-0 systemd[1]: Started libpod-conmon-861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112.scope.
Oct 11 01:38:07 compute-0 podman[177523]: 2025-10-11 01:38:07.268382858 +0000 UTC m=+0.173045195 container exec 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=ovn_controller, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 01:38:07 compute-0 podman[177523]: 2025-10-11 01:38:07.305305207 +0000 UTC m=+0.209967474 container exec_died 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.license=GPLv2, io.buildah.version=1.41.3)
Oct 11 01:38:07 compute-0 sudo[177496]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:07 compute-0 systemd[1]: libpod-conmon-861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112.scope: Deactivated successfully.
Oct 11 01:38:08 compute-0 sudo[177701]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iutqzxygmzdyxczsitbdgcmpzaxjnzpv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146687.7102067-555-81382279972274/AnsiballZ_podman_container_exec.py'
Oct 11 01:38:08 compute-0 sudo[177701]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:08 compute-0 python3.9[177703]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=ovn_controller detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:38:08 compute-0 systemd[1]: Started libpod-conmon-861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112.scope.
Oct 11 01:38:08 compute-0 podman[177704]: 2025-10-11 01:38:08.697790191 +0000 UTC m=+0.147371805 container exec 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, container_name=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.license=GPLv2, config_id=ovn_controller, managed_by=edpm_ansible, org.label-schema.vendor=CentOS)
Oct 11 01:38:08 compute-0 podman[177704]: 2025-10-11 01:38:08.734813267 +0000 UTC m=+0.184394891 container exec_died 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, org.label-schema.license=GPLv2, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:38:08 compute-0 systemd[1]: libpod-conmon-861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112.scope: Deactivated successfully.
Oct 11 01:38:08 compute-0 sudo[177701]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:09 compute-0 sudo[177897]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ytnzgmzpyxyashijdrwucvmbauiuszhv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146689.1204538-563-130026242232555/AnsiballZ_file.py'
Oct 11 01:38:09 compute-0 sudo[177897]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:09 compute-0 podman[177858]: 2025-10-11 01:38:09.758142837 +0000 UTC m=+0.140710932 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.build-date=20251007, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, config_id=edpm)
Oct 11 01:38:09 compute-0 python3.9[177907]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/ovn_controller recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:38:09 compute-0 sudo[177897]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:10 compute-0 sudo[178059]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-itfbapqvdcwbotuuwspqwxyiitmflmdf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146690.3002977-572-216933951399885/AnsiballZ_podman_container_info.py'
Oct 11 01:38:10 compute-0 sudo[178059]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:11 compute-0 python3.9[178061]: ansible-containers.podman.podman_container_info Invoked with name=['ceilometer_agent_compute'] executable=podman
Oct 11 01:38:11 compute-0 sudo[178059]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:12 compute-0 sudo[178224]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fpmltqhoboeplmkzaqmenaxipvapjyrz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146691.4865475-580-192423107355545/AnsiballZ_podman_container_exec.py'
Oct 11 01:38:12 compute-0 sudo[178224]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:12 compute-0 python3.9[178226]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=ceilometer_agent_compute detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:38:12 compute-0 systemd[1]: Started libpod-conmon-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope.
Oct 11 01:38:12 compute-0 podman[178227]: 2025-10-11 01:38:12.423949239 +0000 UTC m=+0.154971751 container exec c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, org.label-schema.build-date=20251007, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.4, container_name=ceilometer_agent_compute, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, maintainer=OpenStack Kubernetes Operator team)
Oct 11 01:38:12 compute-0 podman[178227]: 2025-10-11 01:38:12.460217625 +0000 UTC m=+0.191240127 container exec_died c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, org.label-schema.build-date=20251007, container_name=ceilometer_agent_compute, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_id=edpm, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team)
Oct 11 01:38:12 compute-0 sudo[178224]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:12 compute-0 systemd[1]: libpod-conmon-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope: Deactivated successfully.
Oct 11 01:38:13 compute-0 sudo[178408]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ypnelaiayayenasuewfxitrikciqoayl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146692.8259392-588-180132164172925/AnsiballZ_podman_container_exec.py'
Oct 11 01:38:13 compute-0 sudo[178408]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:13 compute-0 python3.9[178410]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=ceilometer_agent_compute detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:38:13 compute-0 systemd[1]: Started libpod-conmon-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope.
Oct 11 01:38:13 compute-0 podman[178411]: 2025-10-11 01:38:13.825508319 +0000 UTC m=+0.150001859 container exec c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, container_name=ceilometer_agent_compute, org.label-schema.vendor=CentOS)
Oct 11 01:38:13 compute-0 podman[178411]: 2025-10-11 01:38:13.864853472 +0000 UTC m=+0.189346992 container exec_died c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, tcib_managed=true, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image)
Oct 11 01:38:13 compute-0 systemd[1]: libpod-conmon-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope: Deactivated successfully.
Oct 11 01:38:13 compute-0 sudo[178408]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:14 compute-0 sudo[178590]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xiggpanjotzqnjoecxinmxlsynbnmkwe ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146694.2085774-596-21028229891486/AnsiballZ_file.py'
Oct 11 01:38:14 compute-0 sudo[178590]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:15 compute-0 python3.9[178592]: ansible-ansible.builtin.file Invoked with group=42405 mode=0700 owner=42405 path=/var/lib/openstack/healthchecks/ceilometer_agent_compute recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:38:15 compute-0 sudo[178590]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:15 compute-0 sudo[178742]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xjbplqxreyjvbydxhdaerfzxpqfvvmgo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146695.381924-605-31717956664210/AnsiballZ_podman_container_info.py'
Oct 11 01:38:15 compute-0 sudo[178742]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:16 compute-0 python3.9[178744]: ansible-containers.podman.podman_container_info Invoked with name=['node_exporter'] executable=podman
Oct 11 01:38:16 compute-0 sudo[178742]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:17 compute-0 sudo[178906]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-epkcjxrreqwiqjgzwkpvewtimopgzqcr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146696.7227912-613-140969913296087/AnsiballZ_podman_container_exec.py'
Oct 11 01:38:17 compute-0 sudo[178906]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:17 compute-0 python3.9[178908]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=node_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:38:17 compute-0 systemd[1]: Started libpod-conmon-adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae.scope.
Oct 11 01:38:17 compute-0 podman[178909]: 2025-10-11 01:38:17.705060684 +0000 UTC m=+0.169651449 container exec adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 01:38:17 compute-0 podman[178909]: 2025-10-11 01:38:17.741641264 +0000 UTC m=+0.206231969 container exec_died adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 01:38:17 compute-0 sudo[178906]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:17 compute-0 systemd[1]: libpod-conmon-adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae.scope: Deactivated successfully.
Oct 11 01:38:18 compute-0 sudo[179087]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-klhucejqjcaimebrzwnzyulegirhpcfc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146698.1289215-621-61202066101513/AnsiballZ_podman_container_exec.py'
Oct 11 01:38:18 compute-0 sudo[179087]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:18 compute-0 python3.9[179089]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=node_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:38:19 compute-0 systemd[1]: Started libpod-conmon-adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae.scope.
Oct 11 01:38:19 compute-0 podman[179090]: 2025-10-11 01:38:19.173460017 +0000 UTC m=+0.157135346 container exec adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 01:38:19 compute-0 podman[179090]: 2025-10-11 01:38:19.209308306 +0000 UTC m=+0.192983595 container exec_died adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 01:38:19 compute-0 systemd[1]: libpod-conmon-adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae.scope: Deactivated successfully.
Oct 11 01:38:19 compute-0 sudo[179087]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:20 compute-0 sudo[179271]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dzockxrublxjkjfyxkrgmottgwommqkh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146699.5832453-629-270686274109707/AnsiballZ_file.py'
Oct 11 01:38:20 compute-0 sudo[179271]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:20 compute-0 python3.9[179273]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/node_exporter recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:38:20 compute-0 sudo[179271]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:21 compute-0 sudo[179423]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ewfcsnnwrrpgnydqspeptynpwveobfuj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146700.6994286-638-255535853942415/AnsiballZ_podman_container_info.py'
Oct 11 01:38:21 compute-0 sudo[179423]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:21 compute-0 python3.9[179425]: ansible-containers.podman.podman_container_info Invoked with name=['podman_exporter'] executable=podman
Oct 11 01:38:21 compute-0 sudo[179423]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:22 compute-0 sudo[179619]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yajeczcqnuqjwwyifklfickqbuuphlvk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146701.96271-646-113404900354809/AnsiballZ_podman_container_exec.py'
Oct 11 01:38:22 compute-0 podman[179563]: 2025-10-11 01:38:22.513081569 +0000 UTC m=+0.124548294 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, version=9.6, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, managed_by=edpm_ansible, url=https://catalog.redhat.com/en/search?searchType=containers, architecture=x86_64, io.buildah.version=1.33.7, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.openshift.tags=minimal rhel9, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=openstack_network_exporter, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., build-date=2025-08-20T13:12:41, com.redhat.component=ubi9-minimal-container, name=ubi9-minimal, release=1755695350, vendor=Red Hat, Inc., io.openshift.expose-services=)
Oct 11 01:38:22 compute-0 sudo[179619]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:22 compute-0 podman[179562]: 2025-10-11 01:38:22.53331845 +0000 UTC m=+0.146480664 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:38:22 compute-0 python3.9[179632]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=podman_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:38:22 compute-0 systemd[1]: Started libpod-conmon-2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899.scope.
Oct 11 01:38:22 compute-0 podman[179634]: 2025-10-11 01:38:22.931609668 +0000 UTC m=+0.169838332 container exec 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 01:38:22 compute-0 podman[179634]: 2025-10-11 01:38:22.965269955 +0000 UTC m=+0.203498569 container exec_died 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 01:38:23 compute-0 systemd[1]: libpod-conmon-2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899.scope: Deactivated successfully.
Oct 11 01:38:23 compute-0 sudo[179619]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:23 compute-0 sudo[179810]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tsjownyoqznapktonmvtesekwtxniivk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146703.3164432-654-40381126548378/AnsiballZ_podman_container_exec.py'
Oct 11 01:38:23 compute-0 sudo[179810]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:24 compute-0 python3.9[179812]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=podman_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:38:24 compute-0 systemd[1]: Started libpod-conmon-2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899.scope.
Oct 11 01:38:24 compute-0 podman[179813]: 2025-10-11 01:38:24.369658276 +0000 UTC m=+0.174558971 container exec 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 01:38:24 compute-0 podman[179813]: 2025-10-11 01:38:24.405379957 +0000 UTC m=+0.210280622 container exec_died 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 01:38:24 compute-0 systemd[1]: libpod-conmon-2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899.scope: Deactivated successfully.
Oct 11 01:38:24 compute-0 sudo[179810]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:25 compute-0 sudo[179989]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nfpmqseiqocbyszyxpwxocpvkypyrjtc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146704.7374146-662-226000055617626/AnsiballZ_file.py'
Oct 11 01:38:25 compute-0 sudo[179989]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:25 compute-0 python3.9[179991]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/podman_exporter recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:38:25 compute-0 sudo[179989]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:26 compute-0 sudo[180141]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tzqkofhaftsfryugteetyknkanibegbq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146705.9109123-671-271607981694802/AnsiballZ_podman_container_info.py'
Oct 11 01:38:26 compute-0 sudo[180141]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:26 compute-0 python3.9[180143]: ansible-containers.podman.podman_container_info Invoked with name=['openstack_network_exporter'] executable=podman
Oct 11 01:38:26 compute-0 sudo[180141]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:27 compute-0 sudo[180306]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-aatntyrhbwdbhdaiogrdrznfsqoybtfa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146707.1939943-679-96594334468544/AnsiballZ_podman_container_exec.py'
Oct 11 01:38:27 compute-0 sudo[180306]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:28 compute-0 python3.9[180308]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=openstack_network_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:38:28 compute-0 systemd[1]: Started libpod-conmon-ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46.scope.
Oct 11 01:38:28 compute-0 podman[180309]: 2025-10-11 01:38:28.251031278 +0000 UTC m=+0.146927495 container exec ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, io.openshift.expose-services=, maintainer=Red Hat, Inc., name=ubi9-minimal, vcs-type=git, version=9.6, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, vendor=Red Hat, Inc., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, release=1755695350, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, build-date=2025-08-20T13:12:41, io.buildah.version=1.33.7, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, url=https://catalog.redhat.com/en/search?searchType=containers, config_id=edpm, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., architecture=x86_64, com.redhat.component=ubi9-minimal-container, managed_by=edpm_ansible, container_name=openstack_network_exporter)
Oct 11 01:38:28 compute-0 podman[180309]: 2025-10-11 01:38:28.285272385 +0000 UTC m=+0.181168522 container exec_died ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, distribution-scope=public, name=ubi9-minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, architecture=x86_64, managed_by=edpm_ansible, release=1755695350, version=9.6, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.buildah.version=1.33.7, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=openstack_network_exporter, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.component=ubi9-minimal-container, vcs-type=git, io.openshift.expose-services=, maintainer=Red Hat, Inc., vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=minimal rhel9, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_id=edpm)
Oct 11 01:38:28 compute-0 sudo[180306]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:28 compute-0 systemd[1]: libpod-conmon-ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46.scope: Deactivated successfully.
Oct 11 01:38:29 compute-0 sudo[180487]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sxjtlifisrdprzubxdnbqaxmgbkbtixk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146708.675846-687-236728991177788/AnsiballZ_podman_container_exec.py'
Oct 11 01:38:29 compute-0 sudo[180487]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:29 compute-0 python3.9[180489]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=openstack_network_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:38:29 compute-0 systemd[1]: Started libpod-conmon-ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46.scope.
Oct 11 01:38:29 compute-0 podman[180490]: 2025-10-11 01:38:29.673452605 +0000 UTC m=+0.155410465 container exec ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, io.openshift.tags=minimal rhel9, architecture=x86_64, managed_by=edpm_ansible, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc., vcs-type=git, build-date=2025-08-20T13:12:41, version=9.6, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, distribution-scope=public, url=https://catalog.redhat.com/en/search?searchType=containers, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.openshift.expose-services=, config_id=edpm, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, name=ubi9-minimal, container_name=openstack_network_exporter, com.redhat.component=ubi9-minimal-container, release=1755695350, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Oct 11 01:38:29 compute-0 podman[180490]: 2025-10-11 01:38:29.709327517 +0000 UTC m=+0.191285357 container exec_died ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, managed_by=edpm_ansible, name=ubi9-minimal, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.tags=minimal rhel9, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., maintainer=Red Hat, Inc., io.buildah.version=1.33.7, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, architecture=x86_64, release=1755695350, vcs-type=git, vendor=Red Hat, Inc., build-date=2025-08-20T13:12:41, container_name=openstack_network_exporter, com.redhat.component=ubi9-minimal-container, io.openshift.expose-services=, distribution-scope=public, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, version=9.6, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_id=edpm)
Oct 11 01:38:29 compute-0 podman[157119]: time="2025-10-11T01:38:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:38:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:38:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 18535 "" "Go-http-client/1.1"
Oct 11 01:38:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:38:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 2982 "" "Go-http-client/1.1"
Oct 11 01:38:29 compute-0 systemd[1]: libpod-conmon-ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46.scope: Deactivated successfully.
Oct 11 01:38:29 compute-0 sudo[180487]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:30 compute-0 sudo[180672]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oyukcawsjdyidvbjvrmbtwhhzlptqhhb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146710.1391587-695-241601739571131/AnsiballZ_file.py'
Oct 11 01:38:30 compute-0 sudo[180672]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:30 compute-0 python3.9[180674]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/openstack_network_exporter recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:38:30 compute-0 sudo[180672]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:31 compute-0 podman[180698]: 2025-10-11 01:38:31.263029418 +0000 UTC m=+0.152067583 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=starting, health_failing_streak=2, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_id=edpm, container_name=ceilometer_agent_ipmi, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 01:38:31 compute-0 systemd[1]: 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c-39a327549edd6123.service: Main process exited, code=exited, status=1/FAILURE
Oct 11 01:38:31 compute-0 systemd[1]: 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c-39a327549edd6123.service: Failed with result 'exit-code'.
Oct 11 01:38:31 compute-0 openstack_network_exporter[159265]: ERROR   01:38:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:38:31 compute-0 openstack_network_exporter[159265]: ERROR   01:38:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:38:31 compute-0 openstack_network_exporter[159265]: ERROR   01:38:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:38:31 compute-0 openstack_network_exporter[159265]: ERROR   01:38:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:38:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:38:31 compute-0 openstack_network_exporter[159265]: ERROR   01:38:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:38:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:38:31 compute-0 sudo[180843]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uqrsdyxqxslwclaxuvsmbcaiklfqseht ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146711.3307066-704-207612146907222/AnsiballZ_podman_container_info.py'
Oct 11 01:38:31 compute-0 sudo[180843]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:32 compute-0 python3.9[180845]: ansible-containers.podman.podman_container_info Invoked with name=['ceilometer_agent_ipmi'] executable=podman
Oct 11 01:38:32 compute-0 sudo[180843]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:33 compute-0 sudo[181007]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zrvwdxfknloiguqrnfwwsrairpnfacow ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146712.5992494-712-253833177934081/AnsiballZ_podman_container_exec.py'
Oct 11 01:38:33 compute-0 sudo[181007]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:33 compute-0 python3.9[181009]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=ceilometer_agent_ipmi detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:38:33 compute-0 systemd[1]: Started libpod-conmon-47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.scope.
Oct 11 01:38:33 compute-0 podman[181010]: 2025-10-11 01:38:33.513687589 +0000 UTC m=+0.164623408 container exec 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, org.label-schema.vendor=CentOS, config_id=edpm, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, container_name=ceilometer_agent_ipmi, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.schema-version=1.0)
Oct 11 01:38:33 compute-0 podman[181010]: 2025-10-11 01:38:33.549647286 +0000 UTC m=+0.200583105 container exec_died 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_ipmi, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.license=GPLv2, config_id=edpm, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Oct 11 01:38:33 compute-0 sudo[181007]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:33 compute-0 systemd[1]: libpod-conmon-47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.scope: Deactivated successfully.
Oct 11 01:38:33 compute-0 podman[181025]: 2025-10-11 01:38:33.655895864 +0000 UTC m=+0.155195081 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., name=ubi9, release-0.7.12=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, config_id=edpm, summary=Provides the latest release of Red Hat Universal Base Image 9., com.redhat.component=ubi9-container, io.k8s.display-name=Red Hat Universal Base Image 9, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, managed_by=edpm_ansible, version=9.4, io.openshift.tags=base rhel9, release=1214.1726694543, container_name=kepler, vcs-type=git, io.buildah.version=1.29.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, build-date=2024-09-18T21:23:30, architecture=x86_64, vendor=Red Hat, Inc.)
Oct 11 01:38:34 compute-0 sudo[181209]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vvjdvdzpwxcuiouebwxoargvofqtvjat ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146713.8634958-720-19779723054746/AnsiballZ_podman_container_exec.py'
Oct 11 01:38:34 compute-0 sudo[181209]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:34 compute-0 python3.9[181211]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=ceilometer_agent_ipmi detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:38:34 compute-0 systemd[1]: Started libpod-conmon-47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.scope.
Oct 11 01:38:34 compute-0 podman[181212]: 2025-10-11 01:38:34.805713487 +0000 UTC m=+0.165391672 container exec 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=edpm, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009)
Oct 11 01:38:34 compute-0 podman[181212]: 2025-10-11 01:38:34.839924592 +0000 UTC m=+0.199602777 container exec_died 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_managed=true, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_id=edpm)
Oct 11 01:38:34 compute-0 sudo[181209]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:34 compute-0 systemd[1]: libpod-conmon-47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.scope: Deactivated successfully.
Oct 11 01:38:35 compute-0 sudo[181391]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cywckdolzmbfqirsjzpikbnxucfqqfqn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146715.2076495-728-85979374469847/AnsiballZ_file.py'
Oct 11 01:38:35 compute-0 sudo[181391]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:36 compute-0 python3.9[181393]: ansible-ansible.builtin.file Invoked with group=42405 mode=0700 owner=42405 path=/var/lib/openstack/healthchecks/ceilometer_agent_ipmi recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:38:36 compute-0 sudo[181391]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:36 compute-0 sudo[181557]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pqrwerrvrdrhpevfvgloalbhkwaxvtvn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146716.3840923-737-250065873125453/AnsiballZ_podman_container_info.py'
Oct 11 01:38:36 compute-0 sudo[181557]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:37 compute-0 podman[181517]: 2025-10-11 01:38:37.058431192 +0000 UTC m=+0.205428783 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=ovn_controller)
Oct 11 01:38:37 compute-0 python3.9[181564]: ansible-containers.podman.podman_container_info Invoked with name=['kepler'] executable=podman
Oct 11 01:38:37 compute-0 podman[181571]: 2025-10-11 01:38:37.274925155 +0000 UTC m=+0.158364501 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 01:38:37 compute-0 sudo[181557]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:38 compute-0 sudo[181757]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mxklxmbqnjxavswnjscsuxjubcgthhnc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146717.7270317-745-238823916329477/AnsiballZ_podman_container_exec.py'
Oct 11 01:38:38 compute-0 sudo[181757]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:38 compute-0 python3.9[181759]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=kepler detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:38:38 compute-0 systemd[1]: Started libpod-conmon-e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304.scope.
Oct 11 01:38:38 compute-0 podman[181760]: 2025-10-11 01:38:38.705136677 +0000 UTC m=+0.139243099 container exec e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=kepler, distribution-scope=public, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, com.redhat.component=ubi9-container, build-date=2024-09-18T21:23:30, io.openshift.tags=base rhel9, managed_by=edpm_ansible, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.4, release=1214.1726694543, vendor=Red Hat, Inc., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, summary=Provides the latest release of Red Hat Universal Base Image 9., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, config_id=edpm, io.k8s.display-name=Red Hat Universal Base Image 9, vcs-type=git, maintainer=Red Hat, Inc., io.buildah.version=1.29.0, name=ubi9, release-0.7.12=, architecture=x86_64)
Oct 11 01:38:38 compute-0 podman[181760]: 2025-10-11 01:38:38.742930452 +0000 UTC m=+0.177036814 container exec_died e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, build-date=2024-09-18T21:23:30, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=kepler, io.openshift.expose-services=, vcs-type=git, com.redhat.component=ubi9-container, io.buildah.version=1.29.0, name=ubi9, release=1214.1726694543, managed_by=edpm_ansible, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, config_id=edpm, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, version=9.4, maintainer=Red Hat, Inc., vendor=Red Hat, Inc., io.openshift.tags=base rhel9, release-0.7.12=, architecture=x86_64, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of Red Hat Universal Base Image 9.)
Oct 11 01:38:38 compute-0 sudo[181757]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:38 compute-0 systemd[1]: libpod-conmon-e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304.scope: Deactivated successfully.
Oct 11 01:38:39 compute-0 sudo[181939]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kfpodjxmoiwomkegkdfiqphlgwfohvts ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146719.0976613-753-12624840500939/AnsiballZ_podman_container_exec.py'
Oct 11 01:38:39 compute-0 sudo[181939]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:39 compute-0 python3.9[181941]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=kepler detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 01:38:40 compute-0 systemd[1]: Started libpod-conmon-e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304.scope.
Oct 11 01:38:40 compute-0 podman[181942]: 2025-10-11 01:38:40.094967603 +0000 UTC m=+0.170220878 container exec e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, summary=Provides the latest release of Red Hat Universal Base Image 9., managed_by=edpm_ansible, name=ubi9, container_name=kepler, release-0.7.12=, vcs-type=git, build-date=2024-09-18T21:23:30, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi9-container, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.openshift.tags=base rhel9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, version=9.4, config_id=edpm, io.buildah.version=1.29.0, release=1214.1726694543, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9, vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, maintainer=Red Hat, Inc.)
Oct 11 01:38:40 compute-0 podman[181942]: 2025-10-11 01:38:40.130507241 +0000 UTC m=+0.205760466 container exec_died e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, io.k8s.display-name=Red Hat Universal Base Image 9, release-0.7.12=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, config_id=edpm, container_name=kepler, io.openshift.tags=base rhel9, vcs-type=git, version=9.4, maintainer=Red Hat, Inc., build-date=2024-09-18T21:23:30, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, name=ubi9, io.openshift.expose-services=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, com.redhat.component=ubi9-container, io.buildah.version=1.29.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1214.1726694543, architecture=x86_64, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, summary=Provides the latest release of Red Hat Universal Base Image 9., managed_by=edpm_ansible, vendor=Red Hat, Inc.)
Oct 11 01:38:40 compute-0 sudo[181939]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:40 compute-0 systemd[1]: libpod-conmon-e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304.scope: Deactivated successfully.
Oct 11 01:38:40 compute-0 podman[181958]: 2025-10-11 01:38:40.246981941 +0000 UTC m=+0.148349874 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 10 Base Image, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 01:38:41 compute-0 sudo[182138]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ioswtigycrbuspogzyqjvneakewynzrv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146720.480773-761-271133254717193/AnsiballZ_file.py'
Oct 11 01:38:41 compute-0 sudo[182138]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:41 compute-0 python3.9[182140]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/kepler recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:38:41 compute-0 sudo[182138]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:42 compute-0 sudo[182290]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-brnejyqpvkpespurnnlcarendkhtyrdt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146721.7087684-770-109796999500792/AnsiballZ_file.py'
Oct 11 01:38:42 compute-0 sudo[182290]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:42 compute-0 python3.9[182292]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/lib/edpm-config/firewall/ state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:38:42 compute-0 sudo[182290]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:43 compute-0 sudo[182442]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tqkhbhyufhxhoyczgdfsumgqjdijqthc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146722.8423467-778-207596084713574/AnsiballZ_stat.py'
Oct 11 01:38:43 compute-0 sudo[182442]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:43 compute-0 python3.9[182444]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/kepler.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:38:43 compute-0 sudo[182442]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:44 compute-0 sudo[182565]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mjpieggnbyugmgiwonnefbbvinvxqswz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146722.8423467-778-207596084713574/AnsiballZ_copy.py'
Oct 11 01:38:44 compute-0 sudo[182565]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:44 compute-0 python3.9[182567]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/edpm-config/firewall/kepler.yaml mode=0640 src=/home/zuul/.ansible/tmp/ansible-tmp-1760146722.8423467-778-207596084713574/.source.yaml follow=False _original_basename=firewall.yaml.j2 checksum=40b8960d32c81de936cddbeb137a8240ecc54e7b backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:38:44 compute-0 sudo[182565]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:45 compute-0 sudo[182717]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mhlvizkhworabrdxwkofznofzgbowyvu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146725.19004-794-274063658148562/AnsiballZ_file.py'
Oct 11 01:38:45 compute-0 sudo[182717]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:45 compute-0 python3.9[182719]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/lib/edpm-config/firewall state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:38:45 compute-0 sudo[182717]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:46 compute-0 sudo[182869]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gpcurnzmhkeouwyvnnehcpnklzvrmkhq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146726.244728-802-5213344079304/AnsiballZ_stat.py'
Oct 11 01:38:46 compute-0 sudo[182869]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:46 compute-0 python3.9[182871]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:38:46 compute-0 sudo[182869]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:47 compute-0 sudo[182947]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hrkggirxonjnncxyymqidfkgkukdktja ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146726.244728-802-5213344079304/AnsiballZ_file.py'
Oct 11 01:38:47 compute-0 sudo[182947]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:47 compute-0 python3.9[182949]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml _original_basename=base-rules.yaml.j2 recurse=False state=file path=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:38:47 compute-0 sudo[182947]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:48 compute-0 sudo[183099]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fippgkkwkkxvnvbklshvktexmtdvmitn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146727.8380377-814-68960341931496/AnsiballZ_stat.py'
Oct 11 01:38:48 compute-0 sudo[183099]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:48 compute-0 python3.9[183101]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:38:48 compute-0 sudo[183099]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:49 compute-0 sudo[183177]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kxsaavuzkjtvpxbaaokzphdkxmnohxqq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146727.8380377-814-68960341931496/AnsiballZ_file.py'
Oct 11 01:38:49 compute-0 sudo[183177]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:49 compute-0 python3.9[183179]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml _original_basename=.qyo4yw4a recurse=False state=file path=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:38:49 compute-0 sudo[183177]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:50 compute-0 sudo[183330]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fqakdesyqbhupdywbaqjimbgrchhmhyy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146729.7382534-826-257540907479760/AnsiballZ_stat.py'
Oct 11 01:38:50 compute-0 sudo[183330]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:50 compute-0 python3.9[183332]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/iptables.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:38:50 compute-0 sudo[183330]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:51 compute-0 sudo[183408]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xhwduvghsifsqihhmwzrnxuywbiuunho ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146729.7382534-826-257540907479760/AnsiballZ_file.py'
Oct 11 01:38:51 compute-0 sudo[183408]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:51 compute-0 python3.9[183410]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/iptables.nft _original_basename=iptables.nft recurse=False state=file path=/etc/nftables/iptables.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:38:51 compute-0 sudo[183408]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:52 compute-0 sudo[183560]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kichjjwcnkmqhlbxdbajbmufihvikxpy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146731.766027-839-220106512505522/AnsiballZ_command.py'
Oct 11 01:38:52 compute-0 sudo[183560]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:52 compute-0 python3.9[183562]: ansible-ansible.legacy.command Invoked with _raw_params=nft -j list ruleset _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:38:52 compute-0 sudo[183560]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:53 compute-0 podman[183619]: 2025-10-11 01:38:53.242990437 +0000 UTC m=+0.128591926 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:38:53 compute-0 podman[183625]: 2025-10-11 01:38:53.262458284 +0000 UTC m=+0.147629843 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.expose-services=, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, release=1755695350, architecture=x86_64, distribution-scope=public, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, maintainer=Red Hat, Inc., io.buildah.version=1.33.7, io.openshift.tags=minimal rhel9, container_name=openstack_network_exporter, name=ubi9-minimal, vcs-type=git, vendor=Red Hat, Inc., url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., build-date=2025-08-20T13:12:41, config_id=edpm, com.redhat.component=ubi9-minimal-container, version=9.6)
Oct 11 01:38:53 compute-0 sudo[183754]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lzimjhvbohmpbqqvdvteomkghtxdootn ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760146733.0143197-847-29709464097873/AnsiballZ_edpm_nftables_from_files.py'
Oct 11 01:38:53 compute-0 sudo[183754]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:54 compute-0 python3[183756]: ansible-edpm_nftables_from_files Invoked with src=/var/lib/edpm-config/firewall
Oct 11 01:38:54 compute-0 sudo[183754]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:55 compute-0 sudo[183906]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qhilrxxghlltlrkaagpokmbdlcdgmsrq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146734.4243069-855-118024180037673/AnsiballZ_stat.py'
Oct 11 01:38:55 compute-0 sudo[183906]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:55 compute-0 python3.9[183908]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:38:55 compute-0 sudo[183906]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:55 compute-0 sudo[183984]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jrxtnxmyzvqsfsmqxoipekasrrceafpv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146734.4243069-855-118024180037673/AnsiballZ_file.py'
Oct 11 01:38:55 compute-0 sudo[183984]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:56 compute-0 python3.9[183986]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-jumps.nft _original_basename=jump-chain.j2 recurse=False state=file path=/etc/nftables/edpm-jumps.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:38:56 compute-0 sudo[183984]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:57 compute-0 sudo[184136]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-quvmshzoyjsttplsxsyntysindxsuobm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146736.3741672-867-51373592487144/AnsiballZ_stat.py'
Oct 11 01:38:57 compute-0 sudo[184136]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:57 compute-0 python3.9[184138]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-update-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:38:57 compute-0 sudo[184136]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:57 compute-0 sudo[184214]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-erjfgnixoigsyublpdaeuwdploqqcpbg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146736.3741672-867-51373592487144/AnsiballZ_file.py'
Oct 11 01:38:57 compute-0 sudo[184214]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:57 compute-0 python3.9[184216]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-update-jumps.nft _original_basename=jump-chain.j2 recurse=False state=file path=/etc/nftables/edpm-update-jumps.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:38:58 compute-0 sudo[184214]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:58 compute-0 sudo[184366]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uidqsemzrokmxyppqvoudheofdbltubg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146738.3440022-879-91470323865194/AnsiballZ_stat.py'
Oct 11 01:38:58 compute-0 sudo[184366]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:59 compute-0 python3.9[184368]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-flushes.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:38:59 compute-0 sudo[184366]: pam_unix(sudo:session): session closed for user root
Oct 11 01:38:59 compute-0 sudo[184444]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xofgwossovnxbguifawhwumdbilslczw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146738.3440022-879-91470323865194/AnsiballZ_file.py'
Oct 11 01:38:59 compute-0 sudo[184444]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:38:59 compute-0 podman[157119]: time="2025-10-11T01:38:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:38:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:38:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 18534 "" "Go-http-client/1.1"
Oct 11 01:38:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:38:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 2987 "" "Go-http-client/1.1"
Oct 11 01:38:59 compute-0 python3.9[184446]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-flushes.nft _original_basename=flush-chain.j2 recurse=False state=file path=/etc/nftables/edpm-flushes.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:38:59 compute-0 sudo[184444]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:00 compute-0 sudo[184596]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rmdvzfhoauuihqvrqvzsmbkkvzudyvfs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146740.2342968-891-40369317105252/AnsiballZ_stat.py'
Oct 11 01:39:00 compute-0 sudo[184596]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:01 compute-0 python3.9[184598]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-chains.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:39:01 compute-0 sudo[184596]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:01 compute-0 openstack_network_exporter[159265]: ERROR   01:39:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:39:01 compute-0 openstack_network_exporter[159265]: ERROR   01:39:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:39:01 compute-0 openstack_network_exporter[159265]: ERROR   01:39:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:39:01 compute-0 openstack_network_exporter[159265]: ERROR   01:39:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:39:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:39:01 compute-0 openstack_network_exporter[159265]: ERROR   01:39:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:39:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:39:01 compute-0 sudo[184686]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nexuxjttnrvuinugsjtnbenflyqxipmw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146740.2342968-891-40369317105252/AnsiballZ_file.py'
Oct 11 01:39:01 compute-0 sudo[184686]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:01 compute-0 podman[184648]: 2025-10-11 01:39:01.609833305 +0000 UTC m=+0.133435844 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_id=edpm, org.label-schema.build-date=20251009, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_ipmi)
Oct 11 01:39:01 compute-0 python3.9[184694]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-chains.nft _original_basename=chains.j2 recurse=False state=file path=/etc/nftables/edpm-chains.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:39:01 compute-0 sudo[184686]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:02 compute-0 sudo[184845]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iyzuuodcvywlpshpcsvhddhwpvuhzqfj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146742.1278217-903-201143964336675/AnsiballZ_stat.py'
Oct 11 01:39:02 compute-0 sudo[184845]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:03 compute-0 python3.9[184847]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-rules.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:39:03 compute-0 sudo[184845]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:03 compute-0 sudo[184970]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vhpgtekgoqvdnjysmiigaqcrtfjgxtlm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146742.1278217-903-201143964336675/AnsiballZ_copy.py'
Oct 11 01:39:03 compute-0 sudo[184970]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:04 compute-0 podman[184972]: 2025-10-11 01:39:04.013748962 +0000 UTC m=+0.160446507 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.29.0, release=1214.1726694543, vendor=Red Hat, Inc., managed_by=edpm_ansible, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, com.redhat.component=ubi9-container, build-date=2024-09-18T21:23:30, config_id=edpm, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, version=9.4, maintainer=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, container_name=kepler, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.expose-services=, architecture=x86_64, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of Red Hat Universal Base Image 9., release-0.7.12=, distribution-scope=public, io.openshift.tags=base rhel9, vcs-type=git)
Oct 11 01:39:04 compute-0 python3.9[184973]: ansible-ansible.legacy.copy Invoked with dest=/etc/nftables/edpm-rules.nft group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760146742.1278217-903-201143964336675/.source.nft follow=False _original_basename=ruleset.j2 checksum=195cfcdc3ed4fc7d98b13eed88ef5cb7956fa1b3 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:39:04 compute-0 sudo[184970]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:04 compute-0 sudo[185142]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rmyfuqzqgmyygnvrivyppsvhmoanwiym ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146744.410828-918-173995321804721/AnsiballZ_file.py'
Oct 11 01:39:05 compute-0 sudo[185142]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:05 compute-0 python3.9[185144]: ansible-ansible.builtin.file Invoked with group=root mode=0600 owner=root path=/etc/nftables/edpm-rules.nft.changed state=touch recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:39:05 compute-0 sudo[185142]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:06 compute-0 sudo[185294]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yznrahopofdfdyfzuvucimyjnbzytxeg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146745.538759-926-62612838298801/AnsiballZ_command.py'
Oct 11 01:39:06 compute-0 sudo[185294]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:06 compute-0 python3.9[185296]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail; cat /etc/nftables/edpm-chains.nft /etc/nftables/edpm-flushes.nft /etc/nftables/edpm-rules.nft /etc/nftables/edpm-update-jumps.nft /etc/nftables/edpm-jumps.nft | nft -c -f - _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:39:06 compute-0 sudo[185294]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:07 compute-0 podman[185423]: 2025-10-11 01:39:07.457477042 +0000 UTC m=+0.121272246 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 01:39:07 compute-0 sudo[185482]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xnznossivsyzksxhccmviquwurwmayev ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146746.6591384-934-145655718905833/AnsiballZ_blockinfile.py'
Oct 11 01:39:07 compute-0 sudo[185482]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:07 compute-0 podman[185424]: 2025-10-11 01:39:07.542190898 +0000 UTC m=+0.201512400 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 01:39:07 compute-0 python3.9[185495]: ansible-ansible.builtin.blockinfile Invoked with backup=False block=include "/etc/nftables/iptables.nft"
                                             include "/etc/nftables/edpm-chains.nft"
                                             include "/etc/nftables/edpm-rules.nft"
                                             include "/etc/nftables/edpm-jumps.nft"
                                              path=/etc/sysconfig/nftables.conf validate=nft -c -f %s state=present marker=# {mark} ANSIBLE MANAGED BLOCK create=False marker_begin=BEGIN marker_end=END append_newline=False prepend_newline=False unsafe_writes=False insertafter=None insertbefore=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:39:07 compute-0 sudo[185482]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.932 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.933 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.934 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.935 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f8ed27f97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.936 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb8c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.936 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.936 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.936 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb1a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.937 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb200>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.937 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.937 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed2874260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.937 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.937 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.937 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed3ab42f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.938 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.938 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.939 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f8ed27fbad0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.939 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.939 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f8ed27faff0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.939 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.940 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f8ed27fb110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.938 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb350>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb90>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.940 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.941 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f8ed27fb170>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.941 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.942 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f8ed27fb1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.942 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fa390>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb3b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbbf0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbc80>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27f9610>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb620>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbe30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.942 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f8ed27fb230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.945 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbec0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbf50>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.945 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f8ed2874230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.947 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.947 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f8ed27fb290>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.947 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.947 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f8ed5778d70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.947 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.947 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f8ed27fb650>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.948 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f8ed27fbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.948 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f8ed27fb320>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.948 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f8ed27fbb60>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f8ed27fa3f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f8ed27fb380>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f8ed27fbbc0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f8ed27fbc50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f8ed27fbce0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f8ed27fbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f8ed27fb590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f8ed27f95e0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.952 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f8ed27fb5f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.952 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f8ed27fbe00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.953 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f8ed27fbe90>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.953 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.953 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f8ed27fbf20>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.953 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.955 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.955 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.955 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.955 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.955 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.955 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.956 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.956 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.956 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.956 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.957 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.957 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.957 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.957 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.957 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.958 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.958 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.958 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.958 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.959 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:39:07.959 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:39:08 compute-0 sudo[185651]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-flgibivkvsbmubjrluadeweszsrlzghn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146748.072767-943-189643818944499/AnsiballZ_command.py'
Oct 11 01:39:08 compute-0 sudo[185651]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:08 compute-0 python3.9[185653]: ansible-ansible.legacy.command Invoked with _raw_params=nft -f /etc/nftables/edpm-chains.nft _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:39:08 compute-0 sudo[185651]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:09 compute-0 sudo[185804]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gwmufxultsrdusamenryhidaanvufyrp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146749.2261693-951-39757237049398/AnsiballZ_stat.py'
Oct 11 01:39:09 compute-0 sudo[185804]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:09 compute-0 python3.9[185806]: ansible-ansible.builtin.stat Invoked with path=/etc/nftables/edpm-rules.nft.changed follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:39:10 compute-0 sudo[185804]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:10 compute-0 podman[185932]: 2025-10-11 01:39:10.899975146 +0000 UTC m=+0.099882184 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 10 Base Image, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, io.buildah.version=1.41.4, org.label-schema.schema-version=1.0, config_id=edpm, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2)
Oct 11 01:39:10 compute-0 sudo[185975]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xwpyczllffxfasteyzndebjihtkuwjyj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146750.3268418-959-128574926792937/AnsiballZ_command.py'
Oct 11 01:39:10 compute-0 sudo[185975]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:11 compute-0 python3.9[185979]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail; cat /etc/nftables/edpm-flushes.nft /etc/nftables/edpm-rules.nft /etc/nftables/edpm-update-jumps.nft | nft -f - _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:39:11 compute-0 sudo[185975]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:12 compute-0 sudo[186132]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-trvaaxvwsyjxirgoelpdpujifycuaohx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146751.4935298-967-106111916502203/AnsiballZ_file.py'
Oct 11 01:39:12 compute-0 sudo[186132]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:12 compute-0 python3.9[186134]: ansible-ansible.builtin.file Invoked with path=/etc/nftables/edpm-rules.nft.changed state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:39:12 compute-0 sudo[186132]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:12 compute-0 sshd-session[166965]: Connection closed by 192.168.122.30 port 50914
Oct 11 01:39:12 compute-0 sshd-session[166962]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:39:12 compute-0 systemd-logind[804]: Session 23 logged out. Waiting for processes to exit.
Oct 11 01:39:12 compute-0 systemd[1]: session-23.scope: Deactivated successfully.
Oct 11 01:39:12 compute-0 systemd[1]: session-23.scope: Consumed 2min 19.666s CPU time.
Oct 11 01:39:12 compute-0 systemd-logind[804]: Removed session 23.
Oct 11 01:39:18 compute-0 sshd-session[186159]: Accepted publickey for zuul from 192.168.122.30 port 39846 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:39:18 compute-0 systemd-logind[804]: New session 24 of user zuul.
Oct 11 01:39:18 compute-0 systemd[1]: Started Session 24 of User zuul.
Oct 11 01:39:18 compute-0 sshd-session[186159]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:39:19 compute-0 python3.9[186313]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:39:21 compute-0 sudo[186467]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gqsjusbaydhipacmklfgjrujahyoesyr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146760.6243072-34-71916603875657/AnsiballZ_systemd.py'
Oct 11 01:39:21 compute-0 sudo[186467]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:21 compute-0 python3.9[186469]: ansible-ansible.builtin.systemd Invoked with name=rsyslog daemon_reload=False daemon_reexec=False scope=system no_block=False state=None enabled=None force=None masked=None
Oct 11 01:39:21 compute-0 sudo[186467]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:22 compute-0 sudo[186620]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fmrhneyaelwcegxzmkqkgxpfmdkjgnqy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146762.236265-42-53841151830244/AnsiballZ_setup.py'
Oct 11 01:39:22 compute-0 sudo[186620]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:23 compute-0 python3.9[186622]: ansible-ansible.legacy.setup Invoked with filter=['ansible_pkg_mgr'] gather_subset=['!all'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:39:23 compute-0 sudo[186620]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:24 compute-0 sudo[186735]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-stuqnxpivvedltrbiuwqfsaxxztqqvka ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146762.236265-42-53841151830244/AnsiballZ_dnf.py'
Oct 11 01:39:24 compute-0 sudo[186735]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:24 compute-0 podman[186678]: 2025-10-11 01:39:24.249937299 +0000 UTC m=+0.129922801 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 01:39:24 compute-0 podman[186679]: 2025-10-11 01:39:24.262350902 +0000 UTC m=+0.137209357 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.expose-services=, distribution-scope=public, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, version=9.6, architecture=x86_64, maintainer=Red Hat, Inc., managed_by=edpm_ansible, com.redhat.component=ubi9-minimal-container, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, name=ubi9-minimal, release=1755695350, io.openshift.tags=minimal rhel9, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, url=https://catalog.redhat.com/en/search?searchType=containers, io.buildah.version=1.33.7, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_id=edpm, build-date=2025-08-20T13:12:41, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=openstack_network_exporter, vendor=Red Hat, Inc.)
Oct 11 01:39:24 compute-0 python3.9[186743]: ansible-ansible.legacy.dnf Invoked with name=['rsyslog-openssl'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:39:29 compute-0 podman[157119]: time="2025-10-11T01:39:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:39:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:39:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 18533 "" "Go-http-client/1.1"
Oct 11 01:39:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:39:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 2991 "" "Go-http-client/1.1"
Oct 11 01:39:30 compute-0 systemd[1]: Starting PackageKit Daemon...
Oct 11 01:39:30 compute-0 PackageKit[186757]: daemon start
Oct 11 01:39:30 compute-0 systemd[1]: Started PackageKit Daemon.
Oct 11 01:39:31 compute-0 sudo[186735]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:31 compute-0 openstack_network_exporter[159265]: ERROR   01:39:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:39:31 compute-0 openstack_network_exporter[159265]: ERROR   01:39:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:39:31 compute-0 openstack_network_exporter[159265]: ERROR   01:39:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:39:31 compute-0 openstack_network_exporter[159265]: ERROR   01:39:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:39:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:39:31 compute-0 openstack_network_exporter[159265]: ERROR   01:39:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:39:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:39:32 compute-0 podman[186878]: 2025-10-11 01:39:32.255912842 +0000 UTC m=+0.140071287 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_id=edpm)
Oct 11 01:39:32 compute-0 sudo[186930]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zpavpivbjixmlffbgegtbmrxloscgenz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146771.5018327-54-47032263518495/AnsiballZ_stat.py'
Oct 11 01:39:32 compute-0 sudo[186930]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:32 compute-0 python3.9[186932]: ansible-ansible.legacy.stat Invoked with path=/etc/pki/rsyslog/ca-openshift.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:39:32 compute-0 sudo[186930]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:33 compute-0 sudo[187053]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bifmjttvfaaxzmjaapohbaunkkdvdjts ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146771.5018327-54-47032263518495/AnsiballZ_copy.py'
Oct 11 01:39:33 compute-0 sudo[187053]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:33 compute-0 python3.9[187055]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/rsyslog/ca-openshift.crt mode=0644 remote_src=False src=/home/zuul/.ansible/tmp/ansible-tmp-1760146771.5018327-54-47032263518495/.source.crt _original_basename=ca-openshift.crt follow=False checksum=1d88bab26da5c85710a770c705f3555781bf2a38 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:39:33 compute-0 sudo[187053]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:34 compute-0 podman[187132]: 2025-10-11 01:39:34.258451945 +0000 UTC m=+0.149981986 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.display-name=Red Hat Universal Base Image 9, release=1214.1726694543, maintainer=Red Hat, Inc., managed_by=edpm_ansible, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.buildah.version=1.29.0, summary=Provides the latest release of Red Hat Universal Base Image 9., version=9.4, architecture=x86_64, io.openshift.expose-services=, vcs-type=git, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, config_id=edpm, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, release-0.7.12=, vendor=Red Hat, Inc., name=ubi9, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, com.redhat.component=ubi9-container, container_name=kepler, io.openshift.tags=base rhel9, build-date=2024-09-18T21:23:30, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 01:39:34 compute-0 sudo[187225]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-phsltpuyqhmnjnvlwisdrbgyjllnuuqg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146773.8425345-69-89540976475977/AnsiballZ_file.py'
Oct 11 01:39:34 compute-0 sudo[187225]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:34 compute-0 python3.9[187227]: ansible-ansible.builtin.file Invoked with mode=0755 path=/etc/rsyslog.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:39:34 compute-0 sudo[187225]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:35 compute-0 sudo[187377]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xtpbeiryfhdyfigsblakdxviyyifpfap ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146775.3287842-77-79430054822825/AnsiballZ_stat.py'
Oct 11 01:39:35 compute-0 sudo[187377]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:36 compute-0 python3.9[187379]: ansible-ansible.legacy.stat Invoked with path=/etc/rsyslog.d/10-telemetry.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:39:36 compute-0 sudo[187377]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:36 compute-0 sudo[187500]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wrvijzrltsoctnygqfhcjyhnhpgpchbn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146775.3287842-77-79430054822825/AnsiballZ_copy.py'
Oct 11 01:39:36 compute-0 sudo[187500]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:37 compute-0 python3.9[187502]: ansible-ansible.legacy.copy Invoked with dest=/etc/rsyslog.d/10-telemetry.conf mode=0644 remote_src=False src=/home/zuul/.ansible/tmp/ansible-tmp-1760146775.3287842-77-79430054822825/.source.conf _original_basename=10-telemetry.conf follow=False checksum=76865d9dd4bf9cd322a47065c046bcac194645ab backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:39:37 compute-0 sudo[187500]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:37 compute-0 podman[187626]: 2025-10-11 01:39:37.913620441 +0000 UTC m=+0.116240060 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:39:37 compute-0 sudo[187684]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hzxizrfmmkmgrmrcfejuhlvmzehokrww ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760146777.3527043-92-71355764159973/AnsiballZ_systemd.py'
Oct 11 01:39:37 compute-0 sudo[187684]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:37 compute-0 podman[187627]: 2025-10-11 01:39:37.978320308 +0000 UTC m=+0.180608794 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, container_name=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible)
Oct 11 01:39:38 compute-0 python3.9[187696]: ansible-ansible.builtin.systemd Invoked with name=rsyslog.service state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:39:38 compute-0 systemd[1]: Stopping System Logging Service...
Oct 11 01:39:38 compute-0 rsyslogd[998]: [origin software="rsyslogd" swVersion="8.2506.0-2.el9" x-pid="998" x-info="https://www.rsyslog.com"] exiting on signal 15.
Oct 11 01:39:38 compute-0 systemd[1]: rsyslog.service: Deactivated successfully.
Oct 11 01:39:38 compute-0 systemd[1]: Stopped System Logging Service.
Oct 11 01:39:38 compute-0 systemd[1]: rsyslog.service: Consumed 2.480s CPU time, 5.4M memory peak, read 0B from disk, written 4.1M to disk.
Oct 11 01:39:38 compute-0 systemd[1]: Starting System Logging Service...
Oct 11 01:39:38 compute-0 rsyslogd[187706]: [origin software="rsyslogd" swVersion="8.2506.0-2.el9" x-pid="187706" x-info="https://www.rsyslog.com"] start
Oct 11 01:39:38 compute-0 systemd[1]: Started System Logging Service.
Oct 11 01:39:38 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 01:39:38 compute-0 rsyslogd[187706]: Warning: Certificate file is not set [v8.2506.0-2.el9 try https://www.rsyslog.com/e/2330 ]
Oct 11 01:39:38 compute-0 rsyslogd[187706]: Warning: Key file is not set [v8.2506.0-2.el9 try https://www.rsyslog.com/e/2331 ]
Oct 11 01:39:38 compute-0 rsyslogd[187706]: nsd_ossl: TLS Connection initiated with remote syslog server '172.17.0.80'. [v8.2506.0-2.el9]
Oct 11 01:39:38 compute-0 sudo[187684]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:38 compute-0 rsyslogd[187706]: nsd_ossl: Information, no shared curve between syslog client '172.17.0.80' and server [v8.2506.0-2.el9]
Oct 11 01:39:39 compute-0 sshd-session[186162]: Connection closed by 192.168.122.30 port 39846
Oct 11 01:39:39 compute-0 sshd-session[186159]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:39:39 compute-0 systemd[1]: session-24.scope: Deactivated successfully.
Oct 11 01:39:39 compute-0 systemd-logind[804]: Session 24 logged out. Waiting for processes to exit.
Oct 11 01:39:39 compute-0 systemd[1]: session-24.scope: Consumed 16.751s CPU time.
Oct 11 01:39:39 compute-0 systemd-logind[804]: Removed session 24.
Oct 11 01:39:41 compute-0 podman[187735]: 2025-10-11 01:39:41.256865607 +0000 UTC m=+0.137396269 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_compute, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, managed_by=edpm_ansible, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 01:39:47 compute-0 sshd-session[187754]: Accepted publickey for zuul from 38.102.83.70 port 42574 ssh2: RSA SHA256:sxgyqRujXfGvMV2Eq7ZlGcFGCGFr/dtz6dk2ZJwy3W4
Oct 11 01:39:47 compute-0 systemd-logind[804]: New session 25 of user zuul.
Oct 11 01:39:47 compute-0 systemd[1]: Started Session 25 of User zuul.
Oct 11 01:39:47 compute-0 sshd-session[187754]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:39:47 compute-0 sudo[187830]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-whsjtahdamicxgmospxtpdtuspoaedjr ; /usr/bin/python3'
Oct 11 01:39:47 compute-0 sudo[187830]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:47 compute-0 useradd[187834]: new group: name=ceph-admin, GID=42478
Oct 11 01:39:47 compute-0 useradd[187834]: new user: name=ceph-admin, UID=42477, GID=42478, home=/home/ceph-admin, shell=/bin/bash, from=none
Oct 11 01:39:48 compute-0 sudo[187830]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:48 compute-0 sudo[187916]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pbxjobhtpbvckfzanxilrzezdiniduys ; /usr/bin/python3'
Oct 11 01:39:48 compute-0 sudo[187916]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:48 compute-0 sudo[187916]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:49 compute-0 sudo[187989]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hdpoogxdefobzrmtkjjicwtbhkocfjhi ; /usr/bin/python3'
Oct 11 01:39:49 compute-0 sudo[187989]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:49 compute-0 sudo[187989]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:49 compute-0 sudo[188040]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ljxwvvxuvpwpchiivaavmnuesrldwjbc ; /usr/bin/python3'
Oct 11 01:39:49 compute-0 sudo[188040]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:50 compute-0 sudo[188040]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:50 compute-0 sudo[188066]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iglxslhbltvhkcstxbndxyatqiaipvek ; /usr/bin/python3'
Oct 11 01:39:50 compute-0 sudo[188066]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:50 compute-0 sudo[188066]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:50 compute-0 sudo[188092]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eobhkowimrqyxqxljjxhwmcungwpicwr ; /usr/bin/python3'
Oct 11 01:39:50 compute-0 sudo[188092]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:50 compute-0 sudo[188092]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:51 compute-0 sudo[188118]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gxzewprglkbgskevaignbltjwooehdfz ; /usr/bin/python3'
Oct 11 01:39:51 compute-0 sudo[188118]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:51 compute-0 sudo[188118]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:52 compute-0 sudo[188196]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-flxxrbstxztvhuwrtmxugsgyzwvifygd ; /usr/bin/python3'
Oct 11 01:39:52 compute-0 sudo[188196]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:52 compute-0 sudo[188196]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:52 compute-0 sudo[188269]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-outhegvnhokdzpydoihbpjrsbiluhzin ; /usr/bin/python3'
Oct 11 01:39:52 compute-0 sudo[188269]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:52 compute-0 sudo[188269]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:53 compute-0 sudo[188371]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bqyoyxudymzizbqjkabsftzplzwpgfrg ; /usr/bin/python3'
Oct 11 01:39:53 compute-0 sudo[188371]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:53 compute-0 sudo[188371]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:54 compute-0 sudo[188444]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gvpijojzueujcttewnkjkheabeaowfna ; /usr/bin/python3'
Oct 11 01:39:54 compute-0 sudo[188444]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:54 compute-0 sudo[188444]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:54 compute-0 podman[188447]: 2025-10-11 01:39:54.455339096 +0000 UTC m=+0.127522404 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 01:39:54 compute-0 podman[188448]: 2025-10-11 01:39:54.455596474 +0000 UTC m=+0.126986367 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vcs-type=git, build-date=2025-08-20T13:12:41, com.redhat.component=ubi9-minimal-container, maintainer=Red Hat, Inc., vendor=Red Hat, Inc., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, managed_by=edpm_ansible, version=9.6, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, config_id=edpm, architecture=x86_64, io.openshift.tags=minimal rhel9, name=ubi9-minimal, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, release=1755695350, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, io.openshift.expose-services=, container_name=openstack_network_exporter, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 01:39:54 compute-0 sudo[188537]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bjeonxprrvoemfkikbyogjofxycjojae ; /usr/bin/python3'
Oct 11 01:39:54 compute-0 sudo[188537]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:55 compute-0 python3[188539]: ansible-ansible.legacy.setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:39:56 compute-0 sudo[188537]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:57 compute-0 sudo[188641]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mlnvtlvynyyciygtvgqexhkzwzgysmrm ; /usr/bin/python3'
Oct 11 01:39:57 compute-0 sudo[188641]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:57 compute-0 python3[188643]: ansible-ansible.legacy.dnf Invoked with name=['util-linux', 'lvm2', 'jq', 'podman'] state=present allow_downgrade=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 allowerasing=False nobest=False use_backend=auto conf_file=None disable_excludes=None download_dir=None list=None releasever=None
Oct 11 01:39:58 compute-0 sudo[188641]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:59 compute-0 sudo[188668]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iwjgoxttxpkkqydpjnxthtwzxukgxldf ; /usr/bin/python3'
Oct 11 01:39:59 compute-0 sudo[188668]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:59 compute-0 python3[188670]: ansible-ansible.builtin.stat Invoked with path=/dev/loop3 follow=False get_md5=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:39:59 compute-0 sudo[188668]: pam_unix(sudo:session): session closed for user root
Oct 11 01:39:59 compute-0 sudo[188694]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lmkiocjbqmrnzfqobnfgjtrsdcreqcds ; /usr/bin/python3'
Oct 11 01:39:59 compute-0 sudo[188694]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:39:59 compute-0 python3[188696]: ansible-ansible.legacy.command Invoked with _raw_params=dd if=/dev/zero of=/var/lib/ceph-osd-0.img bs=1 count=0 seek=20G
                                           losetup /dev/loop3 /var/lib/ceph-osd-0.img
                                           lsblk _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:39:59 compute-0 podman[157119]: time="2025-10-11T01:39:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:39:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:39:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 18533 "" "Go-http-client/1.1"
Oct 11 01:39:59 compute-0 kernel: loop: module loaded
Oct 11 01:39:59 compute-0 kernel: loop3: detected capacity change from 0 to 41943040
Oct 11 01:39:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:39:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 2993 "" "Go-http-client/1.1"
Oct 11 01:39:59 compute-0 sudo[188694]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:00 compute-0 sudo[188729]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qiacrgmuwcfjrhdtxvhwbivrheubzzbk ; /usr/bin/python3'
Oct 11 01:40:00 compute-0 sudo[188729]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:00 compute-0 python3[188731]: ansible-ansible.legacy.command Invoked with _raw_params=pvcreate /dev/loop3
                                           vgcreate ceph_vg0 /dev/loop3
                                           lvcreate -n ceph_lv0 -l +100%FREE ceph_vg0
                                           lvs _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:40:00 compute-0 lvm[188734]: PV /dev/loop3 not used.
Oct 11 01:40:00 compute-0 lvm[188736]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Oct 11 01:40:00 compute-0 systemd[1]: Started /usr/sbin/lvm vgchange -aay --autoactivation event ceph_vg0.
Oct 11 01:40:00 compute-0 lvm[188743]:   1 logical volume(s) in volume group "ceph_vg0" now active
Oct 11 01:40:00 compute-0 lvm[188746]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Oct 11 01:40:00 compute-0 lvm[188746]: VG ceph_vg0 finished
Oct 11 01:40:00 compute-0 systemd[1]: lvm-activate-ceph_vg0.service: Deactivated successfully.
Oct 11 01:40:00 compute-0 sudo[188729]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:01 compute-0 sudo[188822]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jvurhdcaqggfyefjvkrsuawggirjsrlx ; /usr/bin/python3'
Oct 11 01:40:01 compute-0 sudo[188822]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:01 compute-0 python3[188824]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/ceph-osd-losetup-0.service follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 01:40:01 compute-0 sudo[188822]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:01 compute-0 openstack_network_exporter[159265]: ERROR   01:40:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:40:01 compute-0 openstack_network_exporter[159265]: ERROR   01:40:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:40:01 compute-0 openstack_network_exporter[159265]: ERROR   01:40:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:40:01 compute-0 openstack_network_exporter[159265]: ERROR   01:40:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:40:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:40:01 compute-0 openstack_network_exporter[159265]: ERROR   01:40:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:40:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:40:01 compute-0 sudo[188895]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tzrwonfpsppcoujhhovrhmrqxxorweah ; /usr/bin/python3'
Oct 11 01:40:01 compute-0 sudo[188895]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:01 compute-0 python3[188897]: ansible-ansible.legacy.copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760146800.8469667-33524-39022645591484/source dest=/etc/systemd/system/ceph-osd-losetup-0.service mode=0644 force=True follow=False _original_basename=ceph-osd-losetup.service.j2 checksum=427b1db064a970126b729b07acf99fa7d0eecb9c backup=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:40:02 compute-0 sudo[188895]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:02 compute-0 sudo[188945]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ttnmwzitcffbskanmzsnwwqdhtucczxi ; /usr/bin/python3'
Oct 11 01:40:02 compute-0 sudo[188945]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:02 compute-0 podman[188947]: 2025-10-11 01:40:02.8361648 +0000 UTC m=+0.155277883 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true)
Oct 11 01:40:02 compute-0 python3[188948]: ansible-ansible.builtin.systemd Invoked with state=started enabled=True name=ceph-osd-losetup-0.service daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:40:03 compute-0 systemd[1]: Reloading.
Oct 11 01:40:03 compute-0 systemd-rc-local-generator[188995]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:40:03 compute-0 systemd-sysv-generator[188999]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:40:03 compute-0 systemd[1]: Starting Ceph OSD losetup...
Oct 11 01:40:03 compute-0 bash[189006]: /dev/loop3: [64513]:4496220 (/var/lib/ceph-osd-0.img)
Oct 11 01:40:03 compute-0 systemd[1]: Finished Ceph OSD losetup.
Oct 11 01:40:03 compute-0 sudo[188945]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:03 compute-0 lvm[189008]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Oct 11 01:40:03 compute-0 lvm[189008]: VG ceph_vg0 finished
Oct 11 01:40:03 compute-0 sudo[189032]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jrjxdyqmorslokvqrrotkwslbvjpdvdv ; /usr/bin/python3'
Oct 11 01:40:03 compute-0 sudo[189032]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:04 compute-0 python3[189034]: ansible-ansible.legacy.dnf Invoked with name=['util-linux', 'lvm2', 'jq', 'podman'] state=present allow_downgrade=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 allowerasing=False nobest=False use_backend=auto conf_file=None disable_excludes=None download_dir=None list=None releasever=None
Oct 11 01:40:05 compute-0 podman[189036]: 2025-10-11 01:40:05.24813222 +0000 UTC m=+0.137834311 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vcs-type=git, architecture=x86_64, io.openshift.expose-services=, release=1214.1726694543, io.buildah.version=1.29.0, config_id=edpm, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release-0.7.12=, vendor=Red Hat, Inc., build-date=2024-09-18T21:23:30, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, managed_by=edpm_ansible, version=9.4, container_name=kepler, io.openshift.tags=base rhel9, summary=Provides the latest release of Red Hat Universal Base Image 9., io.k8s.display-name=Red Hat Universal Base Image 9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, name=ubi9, maintainer=Red Hat, Inc., com.redhat.component=ubi9-container, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543)
Oct 11 01:40:05 compute-0 sudo[189032]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:05 compute-0 sudo[189079]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-icealmkmhfrjfzsstthpdbfzdwjrjqtd ; /usr/bin/python3'
Oct 11 01:40:05 compute-0 sudo[189079]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:05 compute-0 python3[189081]: ansible-ansible.builtin.stat Invoked with path=/dev/loop4 follow=False get_md5=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:40:05 compute-0 sudo[189079]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:06 compute-0 sudo[189105]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mzzglfbbalfwfwlrvdtrlhdkekmgdoyo ; /usr/bin/python3'
Oct 11 01:40:06 compute-0 sudo[189105]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:06 compute-0 python3[189107]: ansible-ansible.legacy.command Invoked with _raw_params=dd if=/dev/zero of=/var/lib/ceph-osd-1.img bs=1 count=0 seek=20G
                                           losetup /dev/loop4 /var/lib/ceph-osd-1.img
                                           lsblk _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:40:06 compute-0 kernel: loop4: detected capacity change from 0 to 41943040
Oct 11 01:40:06 compute-0 sudo[189105]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:06 compute-0 sudo[189136]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ipioswomvejynktvjqznoqqnfmqezhwf ; /usr/bin/python3'
Oct 11 01:40:06 compute-0 sudo[189136]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:06 compute-0 python3[189138]: ansible-ansible.legacy.command Invoked with _raw_params=pvcreate /dev/loop4
                                           vgcreate ceph_vg1 /dev/loop4
                                           lvcreate -n ceph_lv1 -l +100%FREE ceph_vg1
                                           lvs _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:40:06 compute-0 lvm[189143]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Oct 11 01:40:06 compute-0 systemd[1]: Started /usr/sbin/lvm vgchange -aay --autoactivation event ceph_vg1.
Oct 11 01:40:07 compute-0 lvm[189150]:   1 logical volume(s) in volume group "ceph_vg1" now active
Oct 11 01:40:07 compute-0 lvm[189155]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Oct 11 01:40:07 compute-0 lvm[189155]: VG ceph_vg1 finished
Oct 11 01:40:07 compute-0 systemd[1]: lvm-activate-ceph_vg1.service: Deactivated successfully.
Oct 11 01:40:07 compute-0 sudo[189136]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:07 compute-0 sudo[189231]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oyjhuvizumozkcxczchwhvmvonqjtvsy ; /usr/bin/python3'
Oct 11 01:40:07 compute-0 sudo[189231]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:07 compute-0 python3[189233]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/ceph-osd-losetup-1.service follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 01:40:07 compute-0 sudo[189231]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:08 compute-0 podman[189257]: 2025-10-11 01:40:08.280122094 +0000 UTC m=+0.160677729 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 01:40:08 compute-0 podman[189261]: 2025-10-11 01:40:08.305166465 +0000 UTC m=+0.189481121 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller)
Oct 11 01:40:08 compute-0 sudo[189350]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tqklsatcrlswepueajaesbijqihqtfyg ; /usr/bin/python3'
Oct 11 01:40:08 compute-0 sudo[189350]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:08 compute-0 python3[189352]: ansible-ansible.legacy.copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760146807.353523-33551-90356164748176/source dest=/etc/systemd/system/ceph-osd-losetup-1.service mode=0644 force=True follow=False _original_basename=ceph-osd-losetup.service.j2 checksum=19612168ea279db4171b94ee1f8625de1ec44b58 backup=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:40:08 compute-0 sudo[189350]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:08 compute-0 sudo[189400]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rpeaoeypodlexbccrwrhbjjlryhgonsx ; /usr/bin/python3'
Oct 11 01:40:08 compute-0 sudo[189400]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:09 compute-0 python3[189402]: ansible-ansible.builtin.systemd Invoked with state=started enabled=True name=ceph-osd-losetup-1.service daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:40:09 compute-0 systemd[1]: Reloading.
Oct 11 01:40:09 compute-0 systemd-rc-local-generator[189430]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:40:09 compute-0 systemd-sysv-generator[189433]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:40:09 compute-0 systemd[1]: Starting Ceph OSD losetup...
Oct 11 01:40:09 compute-0 bash[189444]: /dev/loop4: [64513]:4496222 (/var/lib/ceph-osd-1.img)
Oct 11 01:40:09 compute-0 systemd[1]: Finished Ceph OSD losetup.
Oct 11 01:40:09 compute-0 sudo[189400]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:09 compute-0 lvm[189446]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Oct 11 01:40:09 compute-0 lvm[189446]: VG ceph_vg1 finished
Oct 11 01:40:10 compute-0 sudo[189470]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kotwcwaqgiszrtsuhjvnaswfclnccuye ; /usr/bin/python3'
Oct 11 01:40:10 compute-0 sudo[189470]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:10 compute-0 python3[189472]: ansible-ansible.legacy.dnf Invoked with name=['util-linux', 'lvm2', 'jq', 'podman'] state=present allow_downgrade=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 allowerasing=False nobest=False use_backend=auto conf_file=None disable_excludes=None download_dir=None list=None releasever=None
Oct 11 01:40:11 compute-0 sudo[189470]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:11 compute-0 sudo[189497]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xktpculotcshtzigsrfqdvpqilbiindu ; /usr/bin/python3'
Oct 11 01:40:11 compute-0 sudo[189497]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:12 compute-0 python3[189500]: ansible-ansible.builtin.stat Invoked with path=/dev/loop5 follow=False get_md5=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:40:12 compute-0 sudo[189497]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:12 compute-0 podman[189499]: 2025-10-11 01:40:12.076159151 +0000 UTC m=+0.161246198 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Oct 11 01:40:12 compute-0 sudo[189543]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-utcoibbyadqogwyizjcbwjyxqkpyfefr ; /usr/bin/python3'
Oct 11 01:40:12 compute-0 sudo[189543]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:12 compute-0 python3[189545]: ansible-ansible.legacy.command Invoked with _raw_params=dd if=/dev/zero of=/var/lib/ceph-osd-2.img bs=1 count=0 seek=20G
                                           losetup /dev/loop5 /var/lib/ceph-osd-2.img
                                           lsblk _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:40:12 compute-0 kernel: loop5: detected capacity change from 0 to 41943040
Oct 11 01:40:12 compute-0 sudo[189543]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:12 compute-0 sudo[189574]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ihhcjrtdgjeawyfktymtrzvlaxydflpp ; /usr/bin/python3'
Oct 11 01:40:12 compute-0 sudo[189574]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:13 compute-0 python3[189576]: ansible-ansible.legacy.command Invoked with _raw_params=pvcreate /dev/loop5
                                           vgcreate ceph_vg2 /dev/loop5
                                           lvcreate -n ceph_lv2 -l +100%FREE ceph_vg2
                                           lvs _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:40:13 compute-0 lvm[189581]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Oct 11 01:40:13 compute-0 systemd[1]: Started /usr/sbin/lvm vgchange -aay --autoactivation event ceph_vg2.
Oct 11 01:40:13 compute-0 lvm[189584]:   1 logical volume(s) in volume group "ceph_vg2" now active
Oct 11 01:40:13 compute-0 systemd[1]: lvm-activate-ceph_vg2.service: Deactivated successfully.
Oct 11 01:40:13 compute-0 lvm[189592]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Oct 11 01:40:13 compute-0 lvm[189592]: VG ceph_vg2 finished
Oct 11 01:40:13 compute-0 sudo[189574]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:13 compute-0 sudo[189668]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wwmialzlzctdfvtgbibnlalcvvszvzwq ; /usr/bin/python3'
Oct 11 01:40:13 compute-0 sudo[189668]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:14 compute-0 python3[189670]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/ceph-osd-losetup-2.service follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 01:40:14 compute-0 sudo[189668]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:14 compute-0 sudo[189741]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oxyhwnreosiwkrbarsurxrqteokdutsi ; /usr/bin/python3'
Oct 11 01:40:14 compute-0 sudo[189741]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:14 compute-0 python3[189743]: ansible-ansible.legacy.copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760146813.638872-33578-225962025470367/source dest=/etc/systemd/system/ceph-osd-losetup-2.service mode=0644 force=True follow=False _original_basename=ceph-osd-losetup.service.j2 checksum=4c5b1bc5693c499ffe2edaa97d63f5df7075d845 backup=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:40:14 compute-0 sudo[189741]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:15 compute-0 sudo[189791]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mkujfdkgsjnmqhlnyhmtukuvnhubjyuy ; /usr/bin/python3'
Oct 11 01:40:15 compute-0 sudo[189791]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:15 compute-0 python3[189793]: ansible-ansible.builtin.systemd Invoked with state=started enabled=True name=ceph-osd-losetup-2.service daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:40:15 compute-0 systemd[1]: Reloading.
Oct 11 01:40:15 compute-0 systemd-rc-local-generator[189822]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:40:15 compute-0 systemd-sysv-generator[189826]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:40:15 compute-0 systemd[1]: Starting Ceph OSD losetup...
Oct 11 01:40:15 compute-0 bash[189833]: /dev/loop5: [64513]:4496223 (/var/lib/ceph-osd-2.img)
Oct 11 01:40:15 compute-0 systemd[1]: Finished Ceph OSD losetup.
Oct 11 01:40:16 compute-0 lvm[189834]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Oct 11 01:40:16 compute-0 lvm[189834]: VG ceph_vg2 finished
Oct 11 01:40:16 compute-0 sudo[189791]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:18 compute-0 python3[189858]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'network'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:40:20 compute-0 sudo[189958]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qvzxzxxdbpimkctagebahktxjijlwkca ; /usr/bin/python3'
Oct 11 01:40:20 compute-0 sudo[189958]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:21 compute-0 python3[189960]: ansible-ansible.legacy.dnf Invoked with name=['cephadm'] state=present allow_downgrade=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 allowerasing=False nobest=False use_backend=auto conf_file=None disable_excludes=None download_dir=None list=None releasever=None
Oct 11 01:40:22 compute-0 groupadd[189966]: group added to /etc/group: name=cephadm, GID=990
Oct 11 01:40:22 compute-0 groupadd[189966]: group added to /etc/gshadow: name=cephadm
Oct 11 01:40:22 compute-0 groupadd[189966]: new group: name=cephadm, GID=990
Oct 11 01:40:22 compute-0 useradd[189973]: new user: name=cephadm, UID=990, GID=990, home=/var/lib/cephadm, shell=/bin/bash, from=none
Oct 11 01:40:22 compute-0 systemd[1]: Started /usr/bin/systemctl start man-db-cache-update.
Oct 11 01:40:22 compute-0 systemd[1]: Starting man-db-cache-update.service...
Oct 11 01:40:23 compute-0 sudo[189958]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:23 compute-0 systemd[1]: man-db-cache-update.service: Deactivated successfully.
Oct 11 01:40:23 compute-0 systemd[1]: Finished man-db-cache-update.service.
Oct 11 01:40:23 compute-0 systemd[1]: run-rdfc85878d0c74072b1f224bd330f18c0.service: Deactivated successfully.
Oct 11 01:40:23 compute-0 sudo[190090]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wdkjyjhvorqcmusncjcibfiqbzvymsyt ; /usr/bin/python3'
Oct 11 01:40:23 compute-0 sudo[190090]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:23 compute-0 python3[190092]: ansible-ansible.builtin.stat Invoked with path=/usr/sbin/cephadm follow=False get_md5=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:40:23 compute-0 sudo[190090]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:24 compute-0 sudo[190118]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bfoohemcsutvvlpxiiddyzgwzoeozeuz ; /usr/bin/python3'
Oct 11 01:40:24 compute-0 sudo[190118]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:24 compute-0 python3[190120]: ansible-ansible.legacy.command Invoked with _raw_params=/usr/sbin/cephadm ls --no-detail _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:40:24 compute-0 sudo[190118]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:25 compute-0 podman[190159]: 2025-10-11 01:40:25.248520981 +0000 UTC m=+0.131198019 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, version=9.6, com.redhat.component=ubi9-minimal-container, io.openshift.expose-services=, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., managed_by=edpm_ansible, config_id=edpm, vendor=Red Hat, Inc., distribution-scope=public, io.openshift.tags=minimal rhel9, name=ubi9-minimal, architecture=x86_64, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, release=1755695350, maintainer=Red Hat, Inc., url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, io.buildah.version=1.33.7, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, build-date=2025-08-20T13:12:41, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, container_name=openstack_network_exporter, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 01:40:25 compute-0 podman[190158]: 2025-10-11 01:40:25.26296553 +0000 UTC m=+0.146231559 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 01:40:25 compute-0 sudo[190224]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cmtxldrdenohtmersgbhpupxvzgapxcs ; /usr/bin/python3'
Oct 11 01:40:25 compute-0 sudo[190224]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:25 compute-0 python3[190226]: ansible-ansible.builtin.file Invoked with path=/etc/ceph state=directory mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:40:25 compute-0 sudo[190224]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:25 compute-0 sudo[190250]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mvtfkbdvotmzpoukzagrwznvzsdjndpv ; /usr/bin/python3'
Oct 11 01:40:25 compute-0 sudo[190250]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:26 compute-0 python3[190252]: ansible-ansible.builtin.file Invoked with path=/home/ceph-admin/specs owner=ceph-admin group=ceph-admin mode=0755 state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:40:26 compute-0 sudo[190250]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:26 compute-0 sudo[190328]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ybwhepzaobvkrvttdtyhuilrvppuymvy ; /usr/bin/python3'
Oct 11 01:40:26 compute-0 sudo[190328]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:27 compute-0 python3[190330]: ansible-ansible.legacy.stat Invoked with path=/home/ceph-admin/specs/ceph_spec.yaml follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 01:40:27 compute-0 sudo[190328]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:27 compute-0 sudo[190401]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yyjqycxugpcrmxuxppgvkzhwfjdqivlg ; /usr/bin/python3'
Oct 11 01:40:27 compute-0 sudo[190401]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:27 compute-0 python3[190403]: ansible-ansible.legacy.copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760146826.5567255-33725-143732534701411/source dest=/home/ceph-admin/specs/ceph_spec.yaml owner=ceph-admin group=ceph-admin mode=0644 _original_basename=ceph_spec.yml follow=False checksum=bb83c53af4ffd926a3f1eafe26a8be437df6401f backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:40:27 compute-0 sudo[190401]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:28 compute-0 sudo[190503]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bzdmhyhojbxsldazbmasxpddyefqkbrm ; /usr/bin/python3'
Oct 11 01:40:28 compute-0 sudo[190503]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:28 compute-0 python3[190505]: ansible-ansible.legacy.stat Invoked with path=/home/ceph-admin/assimilate_ceph.conf follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 01:40:28 compute-0 sudo[190503]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:29 compute-0 sudo[190576]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vkfgvjdxtyohxbubcubkwomzmdwyowzk ; /usr/bin/python3'
Oct 11 01:40:29 compute-0 sudo[190576]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:29 compute-0 python3[190578]: ansible-ansible.legacy.copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760146828.3324134-33743-220167248608519/source dest=/home/ceph-admin/assimilate_ceph.conf owner=ceph-admin group=ceph-admin mode=0644 _original_basename=initial_ceph.conf follow=False checksum=41828f7c2442fdf376911255e33c12863fc3b1b3 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:40:29 compute-0 sudo[190576]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:29 compute-0 podman[157119]: time="2025-10-11T01:40:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:40:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:40:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 18533 "" "Go-http-client/1.1"
Oct 11 01:40:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:40:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 2992 "" "Go-http-client/1.1"
Oct 11 01:40:29 compute-0 sudo[190626]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dygxawaxgcqqaqkujehcvpgwizfrgtkw ; /usr/bin/python3'
Oct 11 01:40:29 compute-0 sudo[190626]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:30 compute-0 python3[190628]: ansible-ansible.builtin.stat Invoked with path=/home/ceph-admin/.ssh/id_rsa follow=False get_md5=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:40:30 compute-0 sudo[190626]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:30 compute-0 sudo[190654]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-smjyampzybnhjomquqilkpfuxlffaclm ; /usr/bin/python3'
Oct 11 01:40:30 compute-0 sudo[190654]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:30 compute-0 python3[190656]: ansible-ansible.builtin.stat Invoked with path=/home/ceph-admin/.ssh/id_rsa.pub follow=False get_md5=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:40:30 compute-0 sudo[190654]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:30 compute-0 sudo[190682]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ftxeugtxevjlbalseqltlxvnkovvyckn ; /usr/bin/python3'
Oct 11 01:40:30 compute-0 sudo[190682]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:31 compute-0 python3[190684]: ansible-ansible.builtin.stat Invoked with path=/home/ceph-admin/assimilate_ceph.conf follow=False get_md5=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:40:31 compute-0 sudo[190682]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:31 compute-0 openstack_network_exporter[159265]: ERROR   01:40:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:40:31 compute-0 openstack_network_exporter[159265]: ERROR   01:40:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:40:31 compute-0 openstack_network_exporter[159265]: ERROR   01:40:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:40:31 compute-0 openstack_network_exporter[159265]: ERROR   01:40:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:40:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:40:31 compute-0 openstack_network_exporter[159265]: ERROR   01:40:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:40:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:40:31 compute-0 sudo[190710]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wnqbdxytegnelnekbedbzqvdxndugomp ; /usr/bin/python3'
Oct 11 01:40:31 compute-0 sudo[190710]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:40:31 compute-0 python3[190712]: ansible-ansible.legacy.command Invoked with _raw_params=/usr/sbin/cephadm bootstrap --skip-firewalld --skip-prepare-host --ssh-private-key /home/ceph-admin/.ssh/id_rsa --ssh-public-key /home/ceph-admin/.ssh/id_rsa.pub --ssh-user ceph-admin --allow-fqdn-hostname --output-keyring /etc/ceph/ceph.client.admin.keyring --output-config /etc/ceph/ceph.conf --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config /home/ceph-admin/assimilate_ceph.conf \--single-host-defaults \--skip-monitoring-stack --skip-dashboard --mon-ip 192.168.122.100
                                            _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:40:32 compute-0 sshd-session[190726]: Accepted publickey for ceph-admin from 192.168.122.100 port 59902 ssh2: RSA SHA256:s3wA1uQmx1ezgfjQez1lP/VjOHwzWTFNHeyPt4wAHzs
Oct 11 01:40:32 compute-0 systemd-logind[804]: New session 26 of user ceph-admin.
Oct 11 01:40:32 compute-0 systemd[1]: Created slice User Slice of UID 42477.
Oct 11 01:40:32 compute-0 systemd[1]: Starting User Runtime Directory /run/user/42477...
Oct 11 01:40:32 compute-0 systemd[1]: Finished User Runtime Directory /run/user/42477.
Oct 11 01:40:32 compute-0 systemd[1]: Starting User Manager for UID 42477...
Oct 11 01:40:32 compute-0 systemd[190730]: pam_unix(systemd-user:session): session opened for user ceph-admin(uid=42477) by ceph-admin(uid=0)
Oct 11 01:40:32 compute-0 systemd[190730]: Queued start job for default target Main User Target.
Oct 11 01:40:32 compute-0 systemd[190730]: Created slice User Application Slice.
Oct 11 01:40:32 compute-0 systemd[190730]: Started Mark boot as successful after the user session has run 2 minutes.
Oct 11 01:40:32 compute-0 systemd[190730]: Started Daily Cleanup of User's Temporary Directories.
Oct 11 01:40:32 compute-0 systemd[190730]: Reached target Paths.
Oct 11 01:40:32 compute-0 systemd[190730]: Reached target Timers.
Oct 11 01:40:32 compute-0 systemd[190730]: Starting D-Bus User Message Bus Socket...
Oct 11 01:40:32 compute-0 systemd[190730]: Starting Create User's Volatile Files and Directories...
Oct 11 01:40:32 compute-0 systemd[190730]: Listening on D-Bus User Message Bus Socket.
Oct 11 01:40:32 compute-0 systemd[190730]: Reached target Sockets.
Oct 11 01:40:32 compute-0 systemd[190730]: Finished Create User's Volatile Files and Directories.
Oct 11 01:40:32 compute-0 systemd[190730]: Reached target Basic System.
Oct 11 01:40:32 compute-0 systemd[190730]: Reached target Main User Target.
Oct 11 01:40:32 compute-0 systemd[190730]: Startup finished in 168ms.
Oct 11 01:40:32 compute-0 systemd[1]: Started User Manager for UID 42477.
Oct 11 01:40:32 compute-0 systemd[1]: Started Session 26 of User ceph-admin.
Oct 11 01:40:32 compute-0 sshd-session[190726]: pam_unix(sshd:session): session opened for user ceph-admin(uid=42477) by ceph-admin(uid=0)
Oct 11 01:40:32 compute-0 sudo[190746]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/echo
Oct 11 01:40:32 compute-0 sudo[190746]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:40:32 compute-0 sudo[190746]: pam_unix(sudo:session): session closed for user root
Oct 11 01:40:32 compute-0 sshd-session[190745]: Received disconnect from 192.168.122.100 port 59902:11: disconnected by user
Oct 11 01:40:32 compute-0 sshd-session[190745]: Disconnected from user ceph-admin 192.168.122.100 port 59902
Oct 11 01:40:32 compute-0 sshd-session[190726]: pam_unix(sshd:session): session closed for user ceph-admin
Oct 11 01:40:32 compute-0 systemd[1]: session-26.scope: Deactivated successfully.
Oct 11 01:40:32 compute-0 systemd-logind[804]: Session 26 logged out. Waiting for processes to exit.
Oct 11 01:40:32 compute-0 systemd-logind[804]: Removed session 26.
Oct 11 01:40:33 compute-0 podman[190797]: 2025-10-11 01:40:33.264906184 +0000 UTC m=+0.147804252 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, tcib_managed=true, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3)
Oct 11 01:40:38 compute-0 podman[190842]: 2025-10-11 01:40:38.625649522 +0000 UTC m=+3.034928027 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.display-name=Red Hat Universal Base Image 9, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, config_id=edpm, release=1214.1726694543, version=9.4, maintainer=Red Hat, Inc., io.openshift.expose-services=, architecture=x86_64, distribution-scope=public, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.buildah.version=1.29.0, name=ubi9, com.redhat.component=ubi9-container, managed_by=edpm_ansible, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, build-date=2024-09-18T21:23:30, release-0.7.12=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vendor=Red Hat, Inc., io.openshift.tags=base rhel9, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git)
Oct 11 01:40:38 compute-0 podman[190862]: 2025-10-11 01:40:38.744760084 +0000 UTC m=+0.078894529 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 01:40:38 compute-0 podman[190863]: 2025-10-11 01:40:38.79621708 +0000 UTC m=+0.118707883 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, config_id=ovn_controller, io.buildah.version=1.41.3)
Oct 11 01:40:42 compute-0 systemd[1]: Stopping User Manager for UID 42477...
Oct 11 01:40:42 compute-0 systemd[190730]: Activating special unit Exit the Session...
Oct 11 01:40:42 compute-0 systemd[190730]: Stopped target Main User Target.
Oct 11 01:40:42 compute-0 systemd[190730]: Stopped target Basic System.
Oct 11 01:40:42 compute-0 systemd[190730]: Stopped target Paths.
Oct 11 01:40:42 compute-0 systemd[190730]: Stopped target Sockets.
Oct 11 01:40:42 compute-0 systemd[190730]: Stopped target Timers.
Oct 11 01:40:42 compute-0 systemd[190730]: Stopped Mark boot as successful after the user session has run 2 minutes.
Oct 11 01:40:42 compute-0 systemd[190730]: Stopped Daily Cleanup of User's Temporary Directories.
Oct 11 01:40:42 compute-0 systemd[190730]: Closed D-Bus User Message Bus Socket.
Oct 11 01:40:42 compute-0 systemd[190730]: Stopped Create User's Volatile Files and Directories.
Oct 11 01:40:42 compute-0 systemd[190730]: Removed slice User Application Slice.
Oct 11 01:40:42 compute-0 systemd[190730]: Reached target Shutdown.
Oct 11 01:40:42 compute-0 systemd[190730]: Finished Exit the Session.
Oct 11 01:40:42 compute-0 systemd[190730]: Reached target Exit the Session.
Oct 11 01:40:42 compute-0 systemd[1]: user@42477.service: Deactivated successfully.
Oct 11 01:40:42 compute-0 systemd[1]: Stopped User Manager for UID 42477.
Oct 11 01:40:42 compute-0 systemd[1]: Stopping User Runtime Directory /run/user/42477...
Oct 11 01:40:42 compute-0 systemd[1]: run-user-42477.mount: Deactivated successfully.
Oct 11 01:40:42 compute-0 systemd[1]: user-runtime-dir@42477.service: Deactivated successfully.
Oct 11 01:40:42 compute-0 systemd[1]: Stopped User Runtime Directory /run/user/42477.
Oct 11 01:40:42 compute-0 systemd[1]: Removed slice User Slice of UID 42477.
Oct 11 01:40:42 compute-0 podman[190912]: 2025-10-11 01:40:42.710578313 +0000 UTC m=+0.107622489 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20251007, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.41.4)
Oct 11 01:40:58 compute-0 podman[190949]: 2025-10-11 01:40:58.239976333 +0000 UTC m=+2.140972656 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:40:58 compute-0 podman[190950]: 2025-10-11 01:40:58.241897054 +0000 UTC m=+2.133845821 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, version=9.6, vendor=Red Hat, Inc., io.buildah.version=1.33.7, io.openshift.tags=minimal rhel9, name=ubi9-minimal, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, url=https://catalog.redhat.com/en/search?searchType=containers, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, container_name=openstack_network_exporter, distribution-scope=public, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, build-date=2025-08-20T13:12:41, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, maintainer=Red Hat, Inc., managed_by=edpm_ansible, release=1755695350, config_id=edpm, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.component=ubi9-minimal-container, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']})
Oct 11 01:40:58 compute-0 podman[190783]: 2025-10-11 01:40:58.316575241 +0000 UTC m=+25.722300158 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:40:58 compute-0 podman[190987]: 2025-10-11 01:40:58.459743478 +0000 UTC m=+0.090050256 container create afa22fdec669a6cb2f1a18667d5f562a9cb593a3496d6d6d7f9336e33bf378d8 (image=quay.io/ceph/ceph:v18, name=compassionate_cori, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:40:58 compute-0 podman[190987]: 2025-10-11 01:40:58.418348551 +0000 UTC m=+0.048655389 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:40:58 compute-0 systemd[1]: Started libpod-conmon-afa22fdec669a6cb2f1a18667d5f562a9cb593a3496d6d6d7f9336e33bf378d8.scope.
Oct 11 01:40:58 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:40:58 compute-0 podman[190987]: 2025-10-11 01:40:58.621344684 +0000 UTC m=+0.251651522 container init afa22fdec669a6cb2f1a18667d5f562a9cb593a3496d6d6d7f9336e33bf378d8 (image=quay.io/ceph/ceph:v18, name=compassionate_cori, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 01:40:58 compute-0 podman[190987]: 2025-10-11 01:40:58.641392038 +0000 UTC m=+0.271698776 container start afa22fdec669a6cb2f1a18667d5f562a9cb593a3496d6d6d7f9336e33bf378d8 (image=quay.io/ceph/ceph:v18, name=compassionate_cori, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0)
Oct 11 01:40:58 compute-0 podman[190987]: 2025-10-11 01:40:58.64712929 +0000 UTC m=+0.277436078 container attach afa22fdec669a6cb2f1a18667d5f562a9cb593a3496d6d6d7f9336e33bf378d8 (image=quay.io/ceph/ceph:v18, name=compassionate_cori, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:40:58 compute-0 compassionate_cori[191003]: ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable)
Oct 11 01:40:58 compute-0 systemd[1]: libpod-afa22fdec669a6cb2f1a18667d5f562a9cb593a3496d6d6d7f9336e33bf378d8.scope: Deactivated successfully.
Oct 11 01:40:58 compute-0 podman[190987]: 2025-10-11 01:40:58.972825634 +0000 UTC m=+0.603132412 container died afa22fdec669a6cb2f1a18667d5f562a9cb593a3496d6d6d7f9336e33bf378d8 (image=quay.io/ceph/ceph:v18, name=compassionate_cori, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef)
Oct 11 01:40:59 compute-0 systemd[1]: var-lib-containers-storage-overlay-9bb387783ba5df2ef110ef6d0cf3bbe86838b695d279bfc30a7fb49500569ae5-merged.mount: Deactivated successfully.
Oct 11 01:40:59 compute-0 podman[190987]: 2025-10-11 01:40:59.066448676 +0000 UTC m=+0.696755454 container remove afa22fdec669a6cb2f1a18667d5f562a9cb593a3496d6d6d7f9336e33bf378d8 (image=quay.io/ceph/ceph:v18, name=compassionate_cori, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 01:40:59 compute-0 systemd[1]: libpod-conmon-afa22fdec669a6cb2f1a18667d5f562a9cb593a3496d6d6d7f9336e33bf378d8.scope: Deactivated successfully.
Oct 11 01:40:59 compute-0 podman[191020]: 2025-10-11 01:40:59.189533328 +0000 UTC m=+0.086645437 container create 427b7ab7c9b57de08801dc4e8341ed70741a9f7ec1d12de86746174ea63b4f70 (image=quay.io/ceph/ceph:v18, name=happy_einstein, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:40:59 compute-0 podman[191020]: 2025-10-11 01:40:59.152387225 +0000 UTC m=+0.049499384 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:40:59 compute-0 systemd[1]: Started libpod-conmon-427b7ab7c9b57de08801dc4e8341ed70741a9f7ec1d12de86746174ea63b4f70.scope.
Oct 11 01:40:59 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:40:59 compute-0 podman[191020]: 2025-10-11 01:40:59.334304209 +0000 UTC m=+0.231416378 container init 427b7ab7c9b57de08801dc4e8341ed70741a9f7ec1d12de86746174ea63b4f70 (image=quay.io/ceph/ceph:v18, name=happy_einstein, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 01:40:59 compute-0 podman[191020]: 2025-10-11 01:40:59.351397278 +0000 UTC m=+0.248509417 container start 427b7ab7c9b57de08801dc4e8341ed70741a9f7ec1d12de86746174ea63b4f70 (image=quay.io/ceph/ceph:v18, name=happy_einstein, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True)
Oct 11 01:40:59 compute-0 podman[191020]: 2025-10-11 01:40:59.360972492 +0000 UTC m=+0.258084681 container attach 427b7ab7c9b57de08801dc4e8341ed70741a9f7ec1d12de86746174ea63b4f70 (image=quay.io/ceph/ceph:v18, name=happy_einstein, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:40:59 compute-0 happy_einstein[191036]: 167 167
Oct 11 01:40:59 compute-0 systemd[1]: libpod-427b7ab7c9b57de08801dc4e8341ed70741a9f7ec1d12de86746174ea63b4f70.scope: Deactivated successfully.
Oct 11 01:40:59 compute-0 podman[191020]: 2025-10-11 01:40:59.365760553 +0000 UTC m=+0.262872672 container died 427b7ab7c9b57de08801dc4e8341ed70741a9f7ec1d12de86746174ea63b4f70 (image=quay.io/ceph/ceph:v18, name=happy_einstein, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, OSD_FLAVOR=default)
Oct 11 01:40:59 compute-0 systemd[1]: var-lib-containers-storage-overlay-dfb04a19be1267fc385ce79413b114788bdc73b4513563227658a7775fe20685-merged.mount: Deactivated successfully.
Oct 11 01:40:59 compute-0 podman[191020]: 2025-10-11 01:40:59.426842445 +0000 UTC m=+0.323954524 container remove 427b7ab7c9b57de08801dc4e8341ed70741a9f7ec1d12de86746174ea63b4f70 (image=quay.io/ceph/ceph:v18, name=happy_einstein, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True)
Oct 11 01:40:59 compute-0 systemd[1]: libpod-conmon-427b7ab7c9b57de08801dc4e8341ed70741a9f7ec1d12de86746174ea63b4f70.scope: Deactivated successfully.
Oct 11 01:40:59 compute-0 podman[191054]: 2025-10-11 01:40:59.53003832 +0000 UTC m=+0.065371958 container create 923285f52c9c925a3ef5ed59c9e189d13a04de836ddcf049b068930182b970ce (image=quay.io/ceph/ceph:v18, name=adoring_kare, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507)
Oct 11 01:40:59 compute-0 systemd[1]: Started libpod-conmon-923285f52c9c925a3ef5ed59c9e189d13a04de836ddcf049b068930182b970ce.scope.
Oct 11 01:40:59 compute-0 podman[191054]: 2025-10-11 01:40:59.505622707 +0000 UTC m=+0.040956425 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:40:59 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:40:59 compute-0 podman[191054]: 2025-10-11 01:40:59.642595317 +0000 UTC m=+0.177928995 container init 923285f52c9c925a3ef5ed59c9e189d13a04de836ddcf049b068930182b970ce (image=quay.io/ceph/ceph:v18, name=adoring_kare, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:40:59 compute-0 podman[191054]: 2025-10-11 01:40:59.650578957 +0000 UTC m=+0.185912595 container start 923285f52c9c925a3ef5ed59c9e189d13a04de836ddcf049b068930182b970ce (image=quay.io/ceph/ceph:v18, name=adoring_kare, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:40:59 compute-0 podman[191054]: 2025-10-11 01:40:59.655455193 +0000 UTC m=+0.190788911 container attach 923285f52c9c925a3ef5ed59c9e189d13a04de836ddcf049b068930182b970ce (image=quay.io/ceph/ceph:v18, name=adoring_kare, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, ceph=True, CEPH_REF=reef)
Oct 11 01:40:59 compute-0 adoring_kare[191070]: AQCrteloEngbKBAA2BkqzPXzARCPIcIeG5ITSA==
Oct 11 01:40:59 compute-0 systemd[1]: libpod-923285f52c9c925a3ef5ed59c9e189d13a04de836ddcf049b068930182b970ce.scope: Deactivated successfully.
Oct 11 01:40:59 compute-0 podman[191054]: 2025-10-11 01:40:59.680001204 +0000 UTC m=+0.215334852 container died 923285f52c9c925a3ef5ed59c9e189d13a04de836ddcf049b068930182b970ce (image=quay.io/ceph/ceph:v18, name=adoring_kare, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, OSD_FLAVOR=default)
Oct 11 01:40:59 compute-0 systemd[1]: var-lib-containers-storage-overlay-a5da60aaa5b1169db0288f6bbec085d4c916505c2987c106c49d7ee52f6921c4-merged.mount: Deactivated successfully.
Oct 11 01:40:59 compute-0 podman[191054]: 2025-10-11 01:40:59.732875104 +0000 UTC m=+0.268208742 container remove 923285f52c9c925a3ef5ed59c9e189d13a04de836ddcf049b068930182b970ce (image=quay.io/ceph/ceph:v18, name=adoring_kare, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.schema-version=1.0)
Oct 11 01:40:59 compute-0 podman[157119]: time="2025-10-11T01:40:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:40:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:40:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 18533 "" "Go-http-client/1.1"
Oct 11 01:40:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:40:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 3003 "" "Go-http-client/1.1"
Oct 11 01:40:59 compute-0 systemd[1]: libpod-conmon-923285f52c9c925a3ef5ed59c9e189d13a04de836ddcf049b068930182b970ce.scope: Deactivated successfully.
Oct 11 01:40:59 compute-0 podman[191089]: 2025-10-11 01:40:59.854948202 +0000 UTC m=+0.074216343 container create b8bb7ac7e71d0493fc74786a62814bdd46f63bccb5fce2e6db2c4cd1fa45b62b (image=quay.io/ceph/ceph:v18, name=nostalgic_mcnulty, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:40:59 compute-0 systemd[1]: Started libpod-conmon-b8bb7ac7e71d0493fc74786a62814bdd46f63bccb5fce2e6db2c4cd1fa45b62b.scope.
Oct 11 01:40:59 compute-0 podman[191089]: 2025-10-11 01:40:59.833204009 +0000 UTC m=+0.052472150 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:40:59 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:00 compute-0 podman[191089]: 2025-10-11 01:41:00.053744434 +0000 UTC m=+0.273012665 container init b8bb7ac7e71d0493fc74786a62814bdd46f63bccb5fce2e6db2c4cd1fa45b62b (image=quay.io/ceph/ceph:v18, name=nostalgic_mcnulty, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:41:00 compute-0 podman[191089]: 2025-10-11 01:41:00.06659908 +0000 UTC m=+0.285867251 container start b8bb7ac7e71d0493fc74786a62814bdd46f63bccb5fce2e6db2c4cd1fa45b62b (image=quay.io/ceph/ceph:v18, name=nostalgic_mcnulty, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:41:00 compute-0 podman[191089]: 2025-10-11 01:41:00.073792589 +0000 UTC m=+0.293060830 container attach b8bb7ac7e71d0493fc74786a62814bdd46f63bccb5fce2e6db2c4cd1fa45b62b (image=quay.io/ceph/ceph:v18, name=nostalgic_mcnulty, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0)
Oct 11 01:41:00 compute-0 nostalgic_mcnulty[191104]: AQCsteloZ/6pBhAAHGdYxCeG9D4jzTn85CpPHg==
Oct 11 01:41:00 compute-0 systemd[1]: libpod-b8bb7ac7e71d0493fc74786a62814bdd46f63bccb5fce2e6db2c4cd1fa45b62b.scope: Deactivated successfully.
Oct 11 01:41:00 compute-0 podman[191089]: 2025-10-11 01:41:00.125034563 +0000 UTC m=+0.344302734 container died b8bb7ac7e71d0493fc74786a62814bdd46f63bccb5fce2e6db2c4cd1fa45b62b (image=quay.io/ceph/ceph:v18, name=nostalgic_mcnulty, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:41:00 compute-0 systemd[1]: var-lib-containers-storage-overlay-8cd70482523a9070a9f7ec8a59161713e5b4e1c35a7e8f430b6e24926aa1eddb-merged.mount: Deactivated successfully.
Oct 11 01:41:00 compute-0 podman[191089]: 2025-10-11 01:41:00.196189354 +0000 UTC m=+0.415457525 container remove b8bb7ac7e71d0493fc74786a62814bdd46f63bccb5fce2e6db2c4cd1fa45b62b (image=quay.io/ceph/ceph:v18, name=nostalgic_mcnulty, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_REF=reef, io.buildah.version=1.39.3, ceph=True)
Oct 11 01:41:00 compute-0 systemd[1]: libpod-conmon-b8bb7ac7e71d0493fc74786a62814bdd46f63bccb5fce2e6db2c4cd1fa45b62b.scope: Deactivated successfully.
Oct 11 01:41:00 compute-0 podman[191123]: 2025-10-11 01:41:00.301891781 +0000 UTC m=+0.072856681 container create 1eb986aed90dcbce7ac7768cb1141d245e85d4d9ccb16214d91782d737f28f63 (image=quay.io/ceph/ceph:v18, name=cranky_ritchie, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3)
Oct 11 01:41:00 compute-0 podman[191123]: 2025-10-11 01:41:00.276354859 +0000 UTC m=+0.047319779 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:00 compute-0 systemd[1]: Started libpod-conmon-1eb986aed90dcbce7ac7768cb1141d245e85d4d9ccb16214d91782d737f28f63.scope.
Oct 11 01:41:00 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:00 compute-0 podman[191123]: 2025-10-11 01:41:00.70736145 +0000 UTC m=+0.478326410 container init 1eb986aed90dcbce7ac7768cb1141d245e85d4d9ccb16214d91782d737f28f63 (image=quay.io/ceph/ceph:v18, name=cranky_ritchie, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 01:41:00 compute-0 podman[191123]: 2025-10-11 01:41:00.717597148 +0000 UTC m=+0.488562028 container start 1eb986aed90dcbce7ac7768cb1141d245e85d4d9ccb16214d91782d737f28f63 (image=quay.io/ceph/ceph:v18, name=cranky_ritchie, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:41:00 compute-0 cranky_ritchie[191138]: AQCsteloRWGMLBAAHg8izrr+g6vqcQHLuzA0Zw==
Oct 11 01:41:00 compute-0 systemd[1]: libpod-1eb986aed90dcbce7ac7768cb1141d245e85d4d9ccb16214d91782d737f28f63.scope: Deactivated successfully.
Oct 11 01:41:01 compute-0 podman[191123]: 2025-10-11 01:41:01.238312796 +0000 UTC m=+1.009277766 container attach 1eb986aed90dcbce7ac7768cb1141d245e85d4d9ccb16214d91782d737f28f63 (image=quay.io/ceph/ceph:v18, name=cranky_ritchie, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:41:01 compute-0 podman[191123]: 2025-10-11 01:41:01.239378932 +0000 UTC m=+1.010343892 container died 1eb986aed90dcbce7ac7768cb1141d245e85d4d9ccb16214d91782d737f28f63 (image=quay.io/ceph/ceph:v18, name=cranky_ritchie, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3)
Oct 11 01:41:01 compute-0 systemd[1]: var-lib-containers-storage-overlay-8d9fb99b0498f540015e1643bff34141f53d31905f1510916f518af0cc4636ee-merged.mount: Deactivated successfully.
Oct 11 01:41:01 compute-0 podman[191123]: 2025-10-11 01:41:01.320447384 +0000 UTC m=+1.091412314 container remove 1eb986aed90dcbce7ac7768cb1141d245e85d4d9ccb16214d91782d737f28f63 (image=quay.io/ceph/ceph:v18, name=cranky_ritchie, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, OSD_FLAVOR=default)
Oct 11 01:41:01 compute-0 systemd[1]: libpod-conmon-1eb986aed90dcbce7ac7768cb1141d245e85d4d9ccb16214d91782d737f28f63.scope: Deactivated successfully.
Oct 11 01:41:01 compute-0 openstack_network_exporter[159265]: ERROR   01:41:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:41:01 compute-0 openstack_network_exporter[159265]: ERROR   01:41:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:41:01 compute-0 openstack_network_exporter[159265]: ERROR   01:41:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:41:01 compute-0 openstack_network_exporter[159265]: ERROR   01:41:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:41:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:41:01 compute-0 openstack_network_exporter[159265]: ERROR   01:41:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:41:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:41:01 compute-0 podman[191156]: 2025-10-11 01:41:01.473808357 +0000 UTC m=+0.104615051 container create 4b370d71371f16fa3564a10edb0aaf33d98e40766e82101bf7d25597df6bc6a0 (image=quay.io/ceph/ceph:v18, name=amazing_williamson, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507)
Oct 11 01:41:01 compute-0 podman[191156]: 2025-10-11 01:41:01.434611456 +0000 UTC m=+0.065418190 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:01 compute-0 systemd[1]: Started libpod-conmon-4b370d71371f16fa3564a10edb0aaf33d98e40766e82101bf7d25597df6bc6a0.scope.
Oct 11 01:41:01 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/da7060712c4b828d68fe004f516aa3fc9c6fe39effff05121d36e59c94d71b86/merged/tmp/monmap supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:01 compute-0 podman[191156]: 2025-10-11 01:41:01.621631089 +0000 UTC m=+0.252437833 container init 4b370d71371f16fa3564a10edb0aaf33d98e40766e82101bf7d25597df6bc6a0 (image=quay.io/ceph/ceph:v18, name=amazing_williamson, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 01:41:01 compute-0 podman[191156]: 2025-10-11 01:41:01.636606496 +0000 UTC m=+0.267413190 container start 4b370d71371f16fa3564a10edb0aaf33d98e40766e82101bf7d25597df6bc6a0 (image=quay.io/ceph/ceph:v18, name=amazing_williamson, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2)
Oct 11 01:41:01 compute-0 podman[191156]: 2025-10-11 01:41:01.644187975 +0000 UTC m=+0.274994709 container attach 4b370d71371f16fa3564a10edb0aaf33d98e40766e82101bf7d25597df6bc6a0 (image=quay.io/ceph/ceph:v18, name=amazing_williamson, ceph=True, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507)
Oct 11 01:41:01 compute-0 amazing_williamson[191172]: /usr/bin/monmaptool: monmap file /tmp/monmap
Oct 11 01:41:01 compute-0 amazing_williamson[191172]: setting min_mon_release = pacific
Oct 11 01:41:01 compute-0 amazing_williamson[191172]: /usr/bin/monmaptool: set fsid to 3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:41:01 compute-0 amazing_williamson[191172]: /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors)
Oct 11 01:41:01 compute-0 systemd[1]: libpod-4b370d71371f16fa3564a10edb0aaf33d98e40766e82101bf7d25597df6bc6a0.scope: Deactivated successfully.
Oct 11 01:41:01 compute-0 podman[191156]: 2025-10-11 01:41:01.703315054 +0000 UTC m=+0.334121738 container died 4b370d71371f16fa3564a10edb0aaf33d98e40766e82101bf7d25597df6bc6a0 (image=quay.io/ceph/ceph:v18, name=amazing_williamson, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:41:01 compute-0 systemd[1]: var-lib-containers-storage-overlay-da7060712c4b828d68fe004f516aa3fc9c6fe39effff05121d36e59c94d71b86-merged.mount: Deactivated successfully.
Oct 11 01:41:01 compute-0 podman[191156]: 2025-10-11 01:41:01.789000129 +0000 UTC m=+0.419806813 container remove 4b370d71371f16fa3564a10edb0aaf33d98e40766e82101bf7d25597df6bc6a0 (image=quay.io/ceph/ceph:v18, name=amazing_williamson, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default)
Oct 11 01:41:01 compute-0 systemd[1]: libpod-conmon-4b370d71371f16fa3564a10edb0aaf33d98e40766e82101bf7d25597df6bc6a0.scope: Deactivated successfully.
Oct 11 01:41:01 compute-0 podman[191189]: 2025-10-11 01:41:01.951358245 +0000 UTC m=+0.103590677 container create e4aaf9e720b34d297a03e9857abe06e012c7a85108ec1ca34d2490b482a87627 (image=quay.io/ceph/ceph:v18, name=condescending_lamport, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, ceph=True)
Oct 11 01:41:01 compute-0 podman[191189]: 2025-10-11 01:41:01.908676691 +0000 UTC m=+0.060909113 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:02 compute-0 systemd[1]: Started libpod-conmon-e4aaf9e720b34d297a03e9857abe06e012c7a85108ec1ca34d2490b482a87627.scope.
Oct 11 01:41:02 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4ff635a1e1574da81b16e758551695d588cff11e820fe4a3ea2e8588a2d88273/merged/tmp/keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4ff635a1e1574da81b16e758551695d588cff11e820fe4a3ea2e8588a2d88273/merged/tmp/monmap supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4ff635a1e1574da81b16e758551695d588cff11e820fe4a3ea2e8588a2d88273/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4ff635a1e1574da81b16e758551695d588cff11e820fe4a3ea2e8588a2d88273/merged/var/lib/ceph/mon/ceph-compute-0 supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:02 compute-0 podman[191189]: 2025-10-11 01:41:02.111922757 +0000 UTC m=+0.264155239 container init e4aaf9e720b34d297a03e9857abe06e012c7a85108ec1ca34d2490b482a87627 (image=quay.io/ceph/ceph:v18, name=condescending_lamport, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 01:41:02 compute-0 podman[191189]: 2025-10-11 01:41:02.140164032 +0000 UTC m=+0.292396474 container start e4aaf9e720b34d297a03e9857abe06e012c7a85108ec1ca34d2490b482a87627 (image=quay.io/ceph/ceph:v18, name=condescending_lamport, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:41:02 compute-0 podman[191189]: 2025-10-11 01:41:02.148646618 +0000 UTC m=+0.300879070 container attach e4aaf9e720b34d297a03e9857abe06e012c7a85108ec1ca34d2490b482a87627 (image=quay.io/ceph/ceph:v18, name=condescending_lamport, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True)
Oct 11 01:41:02 compute-0 systemd[1]: libpod-e4aaf9e720b34d297a03e9857abe06e012c7a85108ec1ca34d2490b482a87627.scope: Deactivated successfully.
Oct 11 01:41:02 compute-0 podman[191189]: 2025-10-11 01:41:02.282178999 +0000 UTC m=+0.434411431 container died e4aaf9e720b34d297a03e9857abe06e012c7a85108ec1ca34d2490b482a87627 (image=quay.io/ceph/ceph:v18, name=condescending_lamport, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:41:02 compute-0 systemd[1]: var-lib-containers-storage-overlay-4ff635a1e1574da81b16e758551695d588cff11e820fe4a3ea2e8588a2d88273-merged.mount: Deactivated successfully.
Oct 11 01:41:02 compute-0 podman[191189]: 2025-10-11 01:41:02.36037251 +0000 UTC m=+0.512604942 container remove e4aaf9e720b34d297a03e9857abe06e012c7a85108ec1ca34d2490b482a87627 (image=quay.io/ceph/ceph:v18, name=condescending_lamport, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:41:02 compute-0 systemd[1]: libpod-conmon-e4aaf9e720b34d297a03e9857abe06e012c7a85108ec1ca34d2490b482a87627.scope: Deactivated successfully.
Oct 11 01:41:02 compute-0 systemd[1]: Reloading.
Oct 11 01:41:02 compute-0 systemd-sysv-generator[191276]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:41:02 compute-0 systemd-rc-local-generator[191273]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:41:02 compute-0 systemd[1]: Reloading.
Oct 11 01:41:03 compute-0 systemd-rc-local-generator[191306]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:41:03 compute-0 systemd-sysv-generator[191310]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:41:03 compute-0 systemd[1]: Reached target All Ceph clusters and services.
Oct 11 01:41:03 compute-0 systemd[1]: Reloading.
Oct 11 01:41:03 compute-0 systemd-sysv-generator[191373]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:41:03 compute-0 systemd-rc-local-generator[191369]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:41:03 compute-0 podman[191319]: 2025-10-11 01:41:03.496099441 +0000 UTC m=+0.168399234 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.build-date=20251009)
Oct 11 01:41:03 compute-0 systemd[1]: Reached target Ceph cluster 3c7617c3-7a20-523e-a9de-20c0d6ba41da.
Oct 11 01:41:03 compute-0 systemd[1]: Reloading.
Oct 11 01:41:03 compute-0 systemd-rc-local-generator[191404]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:41:03 compute-0 systemd-sysv-generator[191408]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:41:04 compute-0 systemd[1]: Reloading.
Oct 11 01:41:04 compute-0 systemd-rc-local-generator[191446]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:41:04 compute-0 systemd-sysv-generator[191450]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:41:04 compute-0 systemd[1]: Created slice Slice /system/ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da.
Oct 11 01:41:04 compute-0 systemd[1]: Reached target System Time Set.
Oct 11 01:41:04 compute-0 systemd[1]: Reached target System Time Synchronized.
Oct 11 01:41:04 compute-0 systemd[1]: Starting Ceph mon.compute-0 for 3c7617c3-7a20-523e-a9de-20c0d6ba41da...
Oct 11 01:41:05 compute-0 podman[191502]: 2025-10-11 01:41:05.165368436 +0000 UTC m=+0.061621031 container create 30c4480e08e356d11ccada0a4a93adca906bb488cf48c66ab1d12e0fc0dc80b9 (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:41:05 compute-0 podman[191502]: 2025-10-11 01:41:05.139495206 +0000 UTC m=+0.035747841 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e3b3c9919d0c2def63c03b4d926e7cc287ec31084d8d968c1bd5824a59ba04f8/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e3b3c9919d0c2def63c03b4d926e7cc287ec31084d8d968c1bd5824a59ba04f8/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e3b3c9919d0c2def63c03b4d926e7cc287ec31084d8d968c1bd5824a59ba04f8/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e3b3c9919d0c2def63c03b4d926e7cc287ec31084d8d968c1bd5824a59ba04f8/merged/var/lib/ceph/mon/ceph-compute-0 supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:05 compute-0 podman[191502]: 2025-10-11 01:41:05.315459837 +0000 UTC m=+0.211712452 container init 30c4480e08e356d11ccada0a4a93adca906bb488cf48c66ab1d12e0fc0dc80b9 (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2)
Oct 11 01:41:05 compute-0 podman[191502]: 2025-10-11 01:41:05.338973064 +0000 UTC m=+0.235225659 container start 30c4480e08e356d11ccada0a4a93adca906bb488cf48c66ab1d12e0fc0dc80b9 (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, ceph=True, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:41:05 compute-0 bash[191502]: 30c4480e08e356d11ccada0a4a93adca906bb488cf48c66ab1d12e0fc0dc80b9
Oct 11 01:41:05 compute-0 systemd[1]: Started Ceph mon.compute-0 for 3c7617c3-7a20-523e-a9de-20c0d6ba41da.
Oct 11 01:41:05 compute-0 ceph-mon[191519]: set uid:gid to 167:167 (ceph:ceph)
Oct 11 01:41:05 compute-0 ceph-mon[191519]: ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable), process ceph-mon, pid 2
Oct 11 01:41:05 compute-0 ceph-mon[191519]: pidfile_write: ignore empty --pid-file
Oct 11 01:41:05 compute-0 ceph-mon[191519]: load: jerasure load: lrc 
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: RocksDB version: 7.9.2
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Git sha 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Compile date 2025-05-06 23:30:25
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: DB SUMMARY
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: DB Session ID:  XOB2YHNWICP08MYIBD38
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: CURRENT file:  CURRENT
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: IDENTITY file:  IDENTITY
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: MANIFEST file:  MANIFEST-000005 size: 59 Bytes
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: SST files in /var/lib/ceph/mon/ceph-compute-0/store.db dir, Total Num: 0, files: 
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-compute-0/store.db: 000004.log size: 807 ; 
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                         Options.error_if_exists: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                       Options.create_if_missing: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                         Options.paranoid_checks: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.flush_verify_memtable_count: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                               Options.track_and_verify_wals_in_manifest: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:        Options.verify_sst_unique_id_in_manifest: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                                     Options.env: 0x561f4be04c40
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                                      Options.fs: PosixFileSystem
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                                Options.info_log: 0x561f4dacee80
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                Options.max_file_opening_threads: 16
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                              Options.statistics: (nil)
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                               Options.use_fsync: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                       Options.max_log_file_size: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                  Options.max_manifest_file_size: 1073741824
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                   Options.log_file_time_to_roll: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                       Options.keep_log_file_num: 1000
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                    Options.recycle_log_file_num: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                         Options.allow_fallocate: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                        Options.allow_mmap_reads: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                       Options.allow_mmap_writes: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                        Options.use_direct_reads: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                        Options.use_direct_io_for_flush_and_compaction: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:          Options.create_missing_column_families: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                              Options.db_log_dir: 
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                                 Options.wal_dir: 
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                Options.table_cache_numshardbits: 6
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                         Options.WAL_ttl_seconds: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                       Options.WAL_size_limit_MB: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                        Options.max_write_batch_group_size_bytes: 1048576
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.manifest_preallocation_size: 4194304
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                     Options.is_fd_close_on_exec: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                   Options.advise_random_on_open: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                    Options.db_write_buffer_size: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                    Options.write_buffer_manager: 0x561f4dadeb40
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         Options.access_hint_on_compaction_start: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:           Options.random_access_max_buffer_size: 1048576
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                      Options.use_adaptive_mutex: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                            Options.rate_limiter: (nil)
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:     Options.sst_file_manager.rate_bytes_per_sec: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                       Options.wal_recovery_mode: 2
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                  Options.enable_thread_tracking: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                  Options.enable_pipelined_write: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                  Options.unordered_write: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         Options.allow_concurrent_memtable_write: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:      Options.enable_write_thread_adaptive_yield: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.write_thread_max_yield_usec: 100
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:            Options.write_thread_slow_yield_usec: 3
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                               Options.row_cache: None
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                              Options.wal_filter: None
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.avoid_flush_during_recovery: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.allow_ingest_behind: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.two_write_queues: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.manual_wal_flush: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.wal_compression: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.atomic_flush: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.avoid_unnecessary_blocking_io: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                 Options.persist_stats_to_disk: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                 Options.write_dbid_to_manifest: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                 Options.log_readahead_size: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                 Options.file_checksum_gen_factory: Unknown
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                 Options.best_efforts_recovery: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                Options.max_bgerror_resume_count: 2147483647
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:            Options.bgerror_resume_retry_interval: 1000000
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.allow_data_in_errors: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.db_host_id: __hostname__
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.enforce_single_del_contracts: true
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.max_background_jobs: 2
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.max_background_compactions: -1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.max_subcompactions: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.avoid_flush_during_shutdown: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:           Options.writable_file_max_buffer_size: 1048576
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.delayed_write_rate : 16777216
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.max_total_wal_size: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.delete_obsolete_files_period_micros: 21600000000
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                   Options.stats_dump_period_sec: 600
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                 Options.stats_persist_period_sec: 600
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                 Options.stats_history_buffer_size: 1048576
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                          Options.max_open_files: -1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                          Options.bytes_per_sync: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                      Options.wal_bytes_per_sync: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                   Options.strict_bytes_per_sync: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:       Options.compaction_readahead_size: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                  Options.max_background_flushes: -1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Compression algorithms supported:
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         kZSTD supported: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         kXpressCompression supported: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         kBZip2Compression supported: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         kZSTDNotFinalCompression supported: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         kLZ4Compression supported: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         kZlibCompression supported: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         kLZ4HCCompression supported: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         kSnappyCompression supported: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Fast CRC32 supported: Supported on x86
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: DMutex implementation: pthread_mutex_t
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-compute-0/store.db/MANIFEST-000005
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]:
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:           Options.merge_operator: 
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:        Options.compaction_filter: None
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x561f4dacea80)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x561f4dac71f0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:        Options.write_buffer_size: 33554432
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:  Options.max_write_buffer_number: 2
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:          Options.compression: NoCompression
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.num_levels: 7
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:        Options.min_write_buffer_number_to_merge: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:      Options.level0_file_num_compaction_trigger: 4
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                Options.max_bytes_for_level_base: 268435456
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:          Options.max_bytes_for_level_multiplier: 10.000000
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-compute-0/store.db/MANIFEST-000005 succeeded,manifest_file_number is 5, next_file_number is 7, last_sequence is 0, log_number is 0,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: dcdf6145-9d9a-452f-b56e-35ebdfe48072
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146865393731, "job": 1, "event": "recovery_started", "wal_files": [4]}
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #4 mode 2
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146865396177, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 8, "file_size": 1944, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 1, "largest_seqno": 5, "table_properties": {"data_size": 819, "index_size": 31, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 115, "raw_average_key_size": 23, "raw_value_size": 696, "raw_average_value_size": 139, "num_data_blocks": 1, "num_entries": 5, "num_filter_entries": 5, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "XOB2YHNWICP08MYIBD38", "orig_file_number": 8, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146865396297, "job": 1, "event": "recovery_finished"}
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: [db/version_set.cc:5047] Creating manifest 10
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000004.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x561f4daf0e00
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: DB pointer 0x561f4db7a000
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 01:41:05 compute-0 ceph-mon[191519]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 0.0 total, 0.0 interval
                                            Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s
                                            Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s
                                            Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
                                            
                                            ** Compaction Stats [default] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      1/0    1.90 KB   0.2      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.8      0.00              0.00         1    0.002       0      0       0.0       0.0
                                             Sum      1/0    1.90 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.8      0.00              0.00         1    0.002       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.8      0.00              0.00         1    0.002       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [default] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.8      0.00              0.00         1    0.002       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.0 total, 0.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.12 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.12 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x561f4dac71f0#2 capacity: 512.00 MB usage: 1.17 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 4.3e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1,0.95 KB,0.000181794%) FilterBlock(1,0.11 KB,2.08616e-05%) IndexBlock(1,0.11 KB,2.08616e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [default] **
Oct 11 01:41:05 compute-0 ceph-mon[191519]: starting mon.compute-0 rank 0 at public addrs [v2:192.168.122.100:3300/0,v1:192.168.122.100:6789/0] at bind addrs [v2:192.168.122.100:3300/0,v1:192.168.122.100:6789/0] mon_data /var/lib/ceph/mon/ceph-compute-0 fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@-1(???) e0 preinit fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@-1(probing) e0  my rank is now 0 (was -1)
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(probing) e0 win_standalone_election
Oct 11 01:41:05 compute-0 ceph-mon[191519]: paxos.0).electionLogic(0) init, first boot, initializing epoch at 1 
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(electing) e0 collect_metadata vda:  no unique device id for vda: fallback method has no model nor serial
Oct 11 01:41:05 compute-0 ceph-mon[191519]: log_channel(cluster) log [INF] : mon.compute-0 is new leader, mons compute-0 in quorum (ranks 0)
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).osd e0 create_pending setting backfillfull_ratio = 0.9
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).osd e0 create_pending setting full_ratio = 0.95
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).osd e0 create_pending setting nearfull_ratio = 0.85
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).osd e0 do_prune osdmap full prune enabled
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).osd e0 encode_pending skipping prime_pg_temp; mapping job did not start
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader) e0 _apply_compatset_features enabling new quorum features: compat={},rocompat={},incompat={4=support erasure code pools,5=new-style osdmap encoding,6=support isa/lrc erasure code,7=support shec erasure code}
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).paxosservice(auth 0..0) refresh upgraded, format 3 -> 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(probing) e1 win_standalone_election
Oct 11 01:41:05 compute-0 ceph-mon[191519]: paxos.0).electionLogic(2) init, last seen epoch 2
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(electing) e1 collect_metadata vda:  no unique device id for vda: fallback method has no model nor serial
Oct 11 01:41:05 compute-0 ceph-mon[191519]: log_channel(cluster) log [INF] : mon.compute-0 is new leader, mons compute-0 in quorum (ranks 0)
Oct 11 01:41:05 compute-0 ceph-mon[191519]: log_channel(cluster) log [DBG] : monmap e1: 1 mons at {compute-0=[v2:192.168.122.100:3300/0,v1:192.168.122.100:6789/0]} removed_ranks: {} disallowed_leaders: {}
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader) e1 collect_metadata vda:  no unique device id for vda: fallback method has no model nor serial
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mgrc update_daemon_metadata mon.compute-0 metadata {addrs=[v2:192.168.122.100:3300/0,v1:192.168.122.100:6789/0],arch=x86_64,ceph_release=reef,ceph_version=ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable),ceph_version_short=18.2.7,ceph_version_when_created=ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable),compression_algorithms=none, snappy, zlib, zstd, lz4,container_hostname=compute-0,container_image=quay.io/ceph/ceph:v18,cpu=AMD EPYC-Rome Processor,created_at=2025-10-11T01:41:02.203565Z,device_ids=,device_paths=vda=/dev/disk/by-path/pci-0000:00:04.0,devices=vda,distro=centos,distro_description=CentOS Stream 9,distro_version=9,hostname=compute-0,kernel_description=#1 SMP PREEMPT_DYNAMIC Tue Sep 30 07:37:35 UTC 2025,kernel_version=5.14.0-621.el9.x86_64,mem_swap_kb=1048572,mem_total_kb=7864348,os=Linux}
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).osd e0 create_pending setting backfillfull_ratio = 0.9
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).osd e0 create_pending setting full_ratio = 0.95
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).osd e0 create_pending setting nearfull_ratio = 0.85
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).osd e0 do_prune osdmap full prune enabled
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).osd e0 encode_pending skipping prime_pg_temp; mapping job did not start
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader) e1 _apply_compatset_features enabling new quorum features: compat={},rocompat={},incompat={8=support monmap features,9=luminous ondisk layout,10=mimic ondisk layout,11=nautilus ondisk layout,12=octopus ondisk layout,13=pacific ondisk layout,14=quincy ondisk layout,15=reef ondisk layout}
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).mds e1 new map
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).mds e1 print_map
                                            e1
                                            enable_multiple, ever_enabled_multiple: 1,1
                                            default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2}
                                            legacy client fscid: -1
                                             
                                            No filesystems configured
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).paxosservice(auth 0..0) refresh upgraded, format 3 -> 0
Oct 11 01:41:05 compute-0 ceph-mon[191519]: log_channel(cluster) log [DBG] : fsmap 
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).osd e0 _set_cache_ratios kv ratio 0.25 inc ratio 0.375 full ratio 0.375
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).osd e0 register_cache_with_pcm pcm target: 2147483648 pcm max: 1020054732 pcm min: 134217728 inc_osd_cache size: 1
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).osd e1 e1: 0 total, 0 up, 0 in
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).osd e1 crush map has features 3314932999778484224, adjusting msgr requires
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).osd e1 crush map has features 288514050185494528, adjusting msgr requires
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).osd e1 crush map has features 288514050185494528, adjusting msgr requires
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).osd e1 crush map has features 288514050185494528, adjusting msgr requires
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mkfs 3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:41:05 compute-0 podman[191520]: 2025-10-11 01:41:05.469968201 +0000 UTC m=+0.070250765 container create d366ce6f3a1b9735b84431b484b30cb5f6bb3228c6b3055b2e5c2f538bf068f6 (image=quay.io/ceph/ceph:v18, name=infallible_shaw, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0@0(leader).paxosservice(auth 1..1) refresh upgraded, format 0 -> 3
Oct 11 01:41:05 compute-0 ceph-mon[191519]: log_channel(cluster) log [DBG] : osdmap e1: 0 total, 0 up, 0 in
Oct 11 01:41:05 compute-0 ceph-mon[191519]: log_channel(cluster) log [DBG] : mgrmap e1: no daemons active
Oct 11 01:41:05 compute-0 ceph-mon[191519]: mon.compute-0 is new leader, mons compute-0 in quorum (ranks 0)
Oct 11 01:41:05 compute-0 systemd[1]: Started libpod-conmon-d366ce6f3a1b9735b84431b484b30cb5f6bb3228c6b3055b2e5c2f538bf068f6.scope.
Oct 11 01:41:05 compute-0 podman[191520]: 2025-10-11 01:41:05.449505045 +0000 UTC m=+0.049787629 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:05 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5790a12fbfe93da3d0e31658194e2b753f500651fce6e73e9ba3a4af59cafd5e/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5790a12fbfe93da3d0e31658194e2b753f500651fce6e73e9ba3a4af59cafd5e/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5790a12fbfe93da3d0e31658194e2b753f500651fce6e73e9ba3a4af59cafd5e/merged/var/lib/ceph/mon/ceph-compute-0 supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:05 compute-0 podman[191520]: 2025-10-11 01:41:05.602552712 +0000 UTC m=+0.202835306 container init d366ce6f3a1b9735b84431b484b30cb5f6bb3228c6b3055b2e5c2f538bf068f6 (image=quay.io/ceph/ceph:v18, name=infallible_shaw, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_REF=reef, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 01:41:05 compute-0 podman[191520]: 2025-10-11 01:41:05.622829618 +0000 UTC m=+0.223112212 container start d366ce6f3a1b9735b84431b484b30cb5f6bb3228c6b3055b2e5c2f538bf068f6 (image=quay.io/ceph/ceph:v18, name=infallible_shaw, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:41:05 compute-0 podman[191520]: 2025-10-11 01:41:05.629034134 +0000 UTC m=+0.229316828 container attach d366ce6f3a1b9735b84431b484b30cb5f6bb3228c6b3055b2e5c2f538bf068f6 (image=quay.io/ceph/ceph:v18, name=infallible_shaw, CEPH_REF=reef, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0)
Oct 11 01:41:06 compute-0 ceph-mon[191519]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status"} v 0) v1
Oct 11 01:41:06 compute-0 ceph-mon[191519]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3505406778' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch
Oct 11 01:41:06 compute-0 infallible_shaw[191574]:   cluster:
Oct 11 01:41:06 compute-0 infallible_shaw[191574]:     id:     3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:41:06 compute-0 infallible_shaw[191574]:     health: HEALTH_OK
Oct 11 01:41:06 compute-0 infallible_shaw[191574]:  
Oct 11 01:41:06 compute-0 infallible_shaw[191574]:   services:
Oct 11 01:41:06 compute-0 infallible_shaw[191574]:     mon: 1 daemons, quorum compute-0 (age 0.638009s)
Oct 11 01:41:06 compute-0 infallible_shaw[191574]:     mgr: no daemons active
Oct 11 01:41:06 compute-0 infallible_shaw[191574]:     osd: 0 osds: 0 up, 0 in
Oct 11 01:41:06 compute-0 infallible_shaw[191574]:  
Oct 11 01:41:06 compute-0 infallible_shaw[191574]:   data:
Oct 11 01:41:06 compute-0 infallible_shaw[191574]:     pools:   0 pools, 0 pgs
Oct 11 01:41:06 compute-0 infallible_shaw[191574]:     objects: 0 objects, 0 B
Oct 11 01:41:06 compute-0 infallible_shaw[191574]:     usage:   0 B used, 0 B / 0 B avail
Oct 11 01:41:06 compute-0 infallible_shaw[191574]:     pgs:     
Oct 11 01:41:06 compute-0 infallible_shaw[191574]:  
Oct 11 01:41:06 compute-0 systemd[1]: libpod-d366ce6f3a1b9735b84431b484b30cb5f6bb3228c6b3055b2e5c2f538bf068f6.scope: Deactivated successfully.
Oct 11 01:41:06 compute-0 podman[191520]: 2025-10-11 01:41:06.120009128 +0000 UTC m=+0.720291732 container died d366ce6f3a1b9735b84431b484b30cb5f6bb3228c6b3055b2e5c2f538bf068f6 (image=quay.io/ceph/ceph:v18, name=infallible_shaw, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:41:06 compute-0 systemd[1]: var-lib-containers-storage-overlay-5790a12fbfe93da3d0e31658194e2b753f500651fce6e73e9ba3a4af59cafd5e-merged.mount: Deactivated successfully.
Oct 11 01:41:06 compute-0 podman[191520]: 2025-10-11 01:41:06.205883053 +0000 UTC m=+0.806165627 container remove d366ce6f3a1b9735b84431b484b30cb5f6bb3228c6b3055b2e5c2f538bf068f6 (image=quay.io/ceph/ceph:v18, name=infallible_shaw, ceph=True, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2)
Oct 11 01:41:06 compute-0 systemd[1]: libpod-conmon-d366ce6f3a1b9735b84431b484b30cb5f6bb3228c6b3055b2e5c2f538bf068f6.scope: Deactivated successfully.
Oct 11 01:41:06 compute-0 podman[191611]: 2025-10-11 01:41:06.305633237 +0000 UTC m=+0.064003506 container create 04910b057feb76fd8130971c43fa298035cd2680f0945d62c722bd0570ac8d01 (image=quay.io/ceph/ceph:v18, name=relaxed_carver, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:41:06 compute-0 systemd[1]: Started libpod-conmon-04910b057feb76fd8130971c43fa298035cd2680f0945d62c722bd0570ac8d01.scope.
Oct 11 01:41:06 compute-0 podman[191611]: 2025-10-11 01:41:06.283518055 +0000 UTC m=+0.041888424 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:06 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4cfd0b76dc1370947bc118bcfafcbf94a3f90d0f5b1a952bc05a29d857ecbb51/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4cfd0b76dc1370947bc118bcfafcbf94a3f90d0f5b1a952bc05a29d857ecbb51/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4cfd0b76dc1370947bc118bcfafcbf94a3f90d0f5b1a952bc05a29d857ecbb51/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4cfd0b76dc1370947bc118bcfafcbf94a3f90d0f5b1a952bc05a29d857ecbb51/merged/var/lib/ceph/mon/ceph-compute-0 supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:06 compute-0 podman[191611]: 2025-10-11 01:41:06.472944504 +0000 UTC m=+0.231314853 container init 04910b057feb76fd8130971c43fa298035cd2680f0945d62c722bd0570ac8d01 (image=quay.io/ceph/ceph:v18, name=relaxed_carver, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2)
Oct 11 01:41:06 compute-0 ceph-mon[191519]: mon.compute-0 is new leader, mons compute-0 in quorum (ranks 0)
Oct 11 01:41:06 compute-0 ceph-mon[191519]: monmap e1: 1 mons at {compute-0=[v2:192.168.122.100:3300/0,v1:192.168.122.100:6789/0]} removed_ranks: {} disallowed_leaders: {}
Oct 11 01:41:06 compute-0 ceph-mon[191519]: fsmap 
Oct 11 01:41:06 compute-0 ceph-mon[191519]: osdmap e1: 0 total, 0 up, 0 in
Oct 11 01:41:06 compute-0 ceph-mon[191519]: mgrmap e1: no daemons active
Oct 11 01:41:06 compute-0 ceph-mon[191519]: from='client.? 192.168.122.100:0/3505406778' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch
Oct 11 01:41:06 compute-0 podman[191611]: 2025-10-11 01:41:06.496681642 +0000 UTC m=+0.255051951 container start 04910b057feb76fd8130971c43fa298035cd2680f0945d62c722bd0570ac8d01 (image=quay.io/ceph/ceph:v18, name=relaxed_carver, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:41:06 compute-0 podman[191611]: 2025-10-11 01:41:06.505156848 +0000 UTC m=+0.263527167 container attach 04910b057feb76fd8130971c43fa298035cd2680f0945d62c722bd0570ac8d01 (image=quay.io/ceph/ceph:v18, name=relaxed_carver, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0)
Oct 11 01:41:06 compute-0 ceph-mon[191519]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config assimilate-conf"} v 0) v1
Oct 11 01:41:06 compute-0 ceph-mon[191519]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/1179648382' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch
Oct 11 01:41:06 compute-0 ceph-mon[191519]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/1179648382' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished
Oct 11 01:41:06 compute-0 relaxed_carver[191626]: 
Oct 11 01:41:06 compute-0 relaxed_carver[191626]: [global]
Oct 11 01:41:06 compute-0 relaxed_carver[191626]:         fsid = 3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:41:06 compute-0 relaxed_carver[191626]:         mon_host = [v2:192.168.122.100:3300,v1:192.168.122.100:6789]
Oct 11 01:41:06 compute-0 relaxed_carver[191626]:         osd_crush_chooseleaf_type = 0
Oct 11 01:41:06 compute-0 systemd[1]: libpod-04910b057feb76fd8130971c43fa298035cd2680f0945d62c722bd0570ac8d01.scope: Deactivated successfully.
Oct 11 01:41:06 compute-0 podman[191611]: 2025-10-11 01:41:06.978719215 +0000 UTC m=+0.737089524 container died 04910b057feb76fd8130971c43fa298035cd2680f0945d62c722bd0570ac8d01 (image=quay.io/ceph/ceph:v18, name=relaxed_carver, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Oct 11 01:41:07 compute-0 systemd[1]: var-lib-containers-storage-overlay-4cfd0b76dc1370947bc118bcfafcbf94a3f90d0f5b1a952bc05a29d857ecbb51-merged.mount: Deactivated successfully.
Oct 11 01:41:07 compute-0 podman[191611]: 2025-10-11 01:41:07.073099457 +0000 UTC m=+0.831469756 container remove 04910b057feb76fd8130971c43fa298035cd2680f0945d62c722bd0570ac8d01 (image=quay.io/ceph/ceph:v18, name=relaxed_carver, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS)
Oct 11 01:41:07 compute-0 systemd[1]: libpod-conmon-04910b057feb76fd8130971c43fa298035cd2680f0945d62c722bd0570ac8d01.scope: Deactivated successfully.
Oct 11 01:41:07 compute-0 podman[191665]: 2025-10-11 01:41:07.2089632 +0000 UTC m=+0.091170954 container create 14da9313686d6c92041a747e4cf745fab808e15a4a37f2f5071af49b7b3ef9ef (image=quay.io/ceph/ceph:v18, name=frosty_shaw, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, io.buildah.version=1.39.3)
Oct 11 01:41:07 compute-0 podman[191665]: 2025-10-11 01:41:07.175042957 +0000 UTC m=+0.057250761 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:07 compute-0 systemd[1]: Started libpod-conmon-14da9313686d6c92041a747e4cf745fab808e15a4a37f2f5071af49b7b3ef9ef.scope.
Oct 11 01:41:07 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:07 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/463ff1ec698989ed3c9067281f19530c92869c6a4e0bdd0d1dcfcfdfa819f65f/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:07 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/463ff1ec698989ed3c9067281f19530c92869c6a4e0bdd0d1dcfcfdfa819f65f/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:07 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/463ff1ec698989ed3c9067281f19530c92869c6a4e0bdd0d1dcfcfdfa819f65f/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:07 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/463ff1ec698989ed3c9067281f19530c92869c6a4e0bdd0d1dcfcfdfa819f65f/merged/var/lib/ceph/mon/ceph-compute-0 supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:07 compute-0 podman[191665]: 2025-10-11 01:41:07.377868371 +0000 UTC m=+0.260076115 container init 14da9313686d6c92041a747e4cf745fab808e15a4a37f2f5071af49b7b3ef9ef (image=quay.io/ceph/ceph:v18, name=frosty_shaw, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default)
Oct 11 01:41:07 compute-0 podman[191665]: 2025-10-11 01:41:07.398495175 +0000 UTC m=+0.280702929 container start 14da9313686d6c92041a747e4cf745fab808e15a4a37f2f5071af49b7b3ef9ef (image=quay.io/ceph/ceph:v18, name=frosty_shaw, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2)
Oct 11 01:41:07 compute-0 podman[191665]: 2025-10-11 01:41:07.404758865 +0000 UTC m=+0.286966609 container attach 14da9313686d6c92041a747e4cf745fab808e15a4a37f2f5071af49b7b3ef9ef (image=quay.io/ceph/ceph:v18, name=frosty_shaw, ceph=True, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:41:07 compute-0 ceph-mon[191519]: from='client.? 192.168.122.100:0/1179648382' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch
Oct 11 01:41:07 compute-0 ceph-mon[191519]: from='client.? 192.168.122.100:0/1179648382' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished
Oct 11 01:41:07 compute-0 ceph-mon[191519]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:41:07 compute-0 ceph-mon[191519]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2052165626' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:41:07 compute-0 systemd[1]: libpod-14da9313686d6c92041a747e4cf745fab808e15a4a37f2f5071af49b7b3ef9ef.scope: Deactivated successfully.
Oct 11 01:41:07 compute-0 podman[191707]: 2025-10-11 01:41:07.926847454 +0000 UTC m=+0.056122301 container died 14da9313686d6c92041a747e4cf745fab808e15a4a37f2f5071af49b7b3ef9ef (image=quay.io/ceph/ceph:v18, name=frosty_shaw, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.933 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.935 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.936 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f8ed27f97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.936 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb8c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.937 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.938 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.938 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb1a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.938 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb200>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.938 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.938 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed2874260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.939 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.939 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.939 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed3ab42f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.939 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb350>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb90>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fa390>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb3b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbbf0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.940 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.942 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f8ed27fbad0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.942 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.942 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f8ed27faff0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.942 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.942 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f8ed27fb110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.943 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbc80>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27f9610>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb620>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbe30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbec0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbf50>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.943 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f8ed27fb170>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.946 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.946 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f8ed27fb1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.946 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.946 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f8ed27fb230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.946 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.947 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f8ed2874230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.947 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.947 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f8ed27fb290>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.947 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.947 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f8ed5778d70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.947 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.947 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f8ed27fb650>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.948 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f8ed27fbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.948 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f8ed27fb320>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.948 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f8ed27fbb60>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f8ed27fa3f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f8ed27fb380>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f8ed27fbbc0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f8ed27fbc50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f8ed27fbce0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f8ed27fbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f8ed27fb590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f8ed27f95e0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f8ed27fb5f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.952 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f8ed27fbe00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.952 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f8ed27fbe90>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.952 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f8ed27fbf20>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.953 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.953 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.953 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.953 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.955 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.955 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.955 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.955 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.955 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.955 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.956 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.956 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.956 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.956 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.956 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.956 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.956 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.957 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.957 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:41:07.957 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:41:07 compute-0 systemd[1]: var-lib-containers-storage-overlay-463ff1ec698989ed3c9067281f19530c92869c6a4e0bdd0d1dcfcfdfa819f65f-merged.mount: Deactivated successfully.
Oct 11 01:41:08 compute-0 podman[191707]: 2025-10-11 01:41:08.022519455 +0000 UTC m=+0.151794222 container remove 14da9313686d6c92041a747e4cf745fab808e15a4a37f2f5071af49b7b3ef9ef (image=quay.io/ceph/ceph:v18, name=frosty_shaw, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:41:08 compute-0 systemd[1]: libpod-conmon-14da9313686d6c92041a747e4cf745fab808e15a4a37f2f5071af49b7b3ef9ef.scope: Deactivated successfully.
Oct 11 01:41:08 compute-0 systemd[1]: Stopping Ceph mon.compute-0 for 3c7617c3-7a20-523e-a9de-20c0d6ba41da...
Oct 11 01:41:08 compute-0 ceph-mon[191519]: received  signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false  (PID: 1) UID: 0
Oct 11 01:41:08 compute-0 ceph-mon[191519]: mon.compute-0@0(leader) e1 *** Got Signal Terminated ***
Oct 11 01:41:08 compute-0 ceph-mon[191519]: mon.compute-0@0(leader) e1 shutdown
Oct 11 01:41:08 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0[191515]: 2025-10-11T01:41:08.432+0000 7f13fd761640 -1 received  signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false  (PID: 1) UID: 0
Oct 11 01:41:08 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0[191515]: 2025-10-11T01:41:08.432+0000 7f13fd761640 -1 mon.compute-0@0(leader) e1 *** Got Signal Terminated ***
Oct 11 01:41:08 compute-0 ceph-mon[191519]: rocksdb: [db/db_impl/db_impl.cc:496] Shutdown: canceling all background work
Oct 11 01:41:08 compute-0 ceph-mon[191519]: rocksdb: [db/db_impl/db_impl.cc:704] Shutdown complete
Oct 11 01:41:08 compute-0 podman[191751]: 2025-10-11 01:41:08.486913821 +0000 UTC m=+0.141552824 container died 30c4480e08e356d11ccada0a4a93adca906bb488cf48c66ab1d12e0fc0dc80b9 (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3)
Oct 11 01:41:08 compute-0 systemd[1]: var-lib-containers-storage-overlay-e3b3c9919d0c2def63c03b4d926e7cc287ec31084d8d968c1bd5824a59ba04f8-merged.mount: Deactivated successfully.
Oct 11 01:41:08 compute-0 podman[191751]: 2025-10-11 01:41:08.565521494 +0000 UTC m=+0.220160477 container remove 30c4480e08e356d11ccada0a4a93adca906bb488cf48c66ab1d12e0fc0dc80b9 (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0)
Oct 11 01:41:08 compute-0 bash[191751]: ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0
Oct 11 01:41:08 compute-0 systemd[1]: ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da@mon.compute-0.service: Deactivated successfully.
Oct 11 01:41:08 compute-0 systemd[1]: Stopped Ceph mon.compute-0 for 3c7617c3-7a20-523e-a9de-20c0d6ba41da.
Oct 11 01:41:08 compute-0 systemd[1]: ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da@mon.compute-0.service: Consumed 2.205s CPU time.
Oct 11 01:41:08 compute-0 systemd[1]: Starting Ceph mon.compute-0 for 3c7617c3-7a20-523e-a9de-20c0d6ba41da...
Oct 11 01:41:08 compute-0 podman[191803]: 2025-10-11 01:41:08.998294528 +0000 UTC m=+0.116881927 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 01:41:09 compute-0 podman[191805]: 2025-10-11 01:41:09.056302947 +0000 UTC m=+0.160401094 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, com.redhat.component=ubi9-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, managed_by=edpm_ansible, maintainer=Red Hat, Inc., io.openshift.tags=base rhel9, io.openshift.expose-services=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, name=ubi9, release=1214.1726694543, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release-0.7.12=, vcs-type=git, build-date=2024-09-18T21:23:30, vendor=Red Hat, Inc., config_id=edpm, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of Red Hat Universal Base Image 9., version=9.4, container_name=kepler, io.k8s.display-name=Red Hat Universal Base Image 9, distribution-scope=public, io.buildah.version=1.29.0, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543)
Oct 11 01:41:09 compute-0 podman[191804]: 2025-10-11 01:41:09.087079696 +0000 UTC m=+0.199754664 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller)
Oct 11 01:41:09 compute-0 podman[191911]: 2025-10-11 01:41:09.297118079 +0000 UTC m=+0.084985160 container create ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0)
Oct 11 01:41:09 compute-0 podman[191911]: 2025-10-11 01:41:09.25910933 +0000 UTC m=+0.046976491 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9256116990ce5d8ffe20dc510c504658adb4ca59ff21de492e090438ec633628/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9256116990ce5d8ffe20dc510c504658adb4ca59ff21de492e090438ec633628/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9256116990ce5d8ffe20dc510c504658adb4ca59ff21de492e090438ec633628/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9256116990ce5d8ffe20dc510c504658adb4ca59ff21de492e090438ec633628/merged/var/lib/ceph/mon/ceph-compute-0 supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:09 compute-0 podman[191911]: 2025-10-11 01:41:09.428733729 +0000 UTC m=+0.216600840 container init ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_REF=reef, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True)
Oct 11 01:41:09 compute-0 podman[191911]: 2025-10-11 01:41:09.452409453 +0000 UTC m=+0.240276564 container start ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:41:09 compute-0 bash[191911]: ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e
Oct 11 01:41:09 compute-0 systemd[1]: Started Ceph mon.compute-0 for 3c7617c3-7a20-523e-a9de-20c0d6ba41da.
Oct 11 01:41:09 compute-0 ceph-mon[191930]: set uid:gid to 167:167 (ceph:ceph)
Oct 11 01:41:09 compute-0 ceph-mon[191930]: ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable), process ceph-mon, pid 2
Oct 11 01:41:09 compute-0 ceph-mon[191930]: pidfile_write: ignore empty --pid-file
Oct 11 01:41:09 compute-0 ceph-mon[191930]: load: jerasure load: lrc 
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: RocksDB version: 7.9.2
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Git sha 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Compile date 2025-05-06 23:30:25
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: DB SUMMARY
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: DB Session ID:  7YDO48KWFK8QJTXVXJNU
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: CURRENT file:  CURRENT
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: IDENTITY file:  IDENTITY
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: MANIFEST file:  MANIFEST-000010 size: 179 Bytes
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: SST files in /var/lib/ceph/mon/ceph-compute-0/store.db dir, Total Num: 1, files: 000008.sst 
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-compute-0/store.db: 000009.log size: 54564 ; 
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                         Options.error_if_exists: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                       Options.create_if_missing: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                         Options.paranoid_checks: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.flush_verify_memtable_count: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                               Options.track_and_verify_wals_in_manifest: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:        Options.verify_sst_unique_id_in_manifest: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                                     Options.env: 0x55816d9e5c40
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                                      Options.fs: PosixFileSystem
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                                Options.info_log: 0x55816e487040
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                Options.max_file_opening_threads: 16
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                              Options.statistics: (nil)
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                               Options.use_fsync: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                       Options.max_log_file_size: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                  Options.max_manifest_file_size: 1073741824
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                   Options.log_file_time_to_roll: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                       Options.keep_log_file_num: 1000
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                    Options.recycle_log_file_num: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                         Options.allow_fallocate: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                        Options.allow_mmap_reads: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                       Options.allow_mmap_writes: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                        Options.use_direct_reads: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                        Options.use_direct_io_for_flush_and_compaction: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:          Options.create_missing_column_families: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                              Options.db_log_dir: 
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                                 Options.wal_dir: 
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                Options.table_cache_numshardbits: 6
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                         Options.WAL_ttl_seconds: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                       Options.WAL_size_limit_MB: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                        Options.max_write_batch_group_size_bytes: 1048576
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.manifest_preallocation_size: 4194304
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                     Options.is_fd_close_on_exec: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                   Options.advise_random_on_open: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                    Options.db_write_buffer_size: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                    Options.write_buffer_manager: 0x55816e496b40
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         Options.access_hint_on_compaction_start: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:           Options.random_access_max_buffer_size: 1048576
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                      Options.use_adaptive_mutex: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                            Options.rate_limiter: (nil)
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:     Options.sst_file_manager.rate_bytes_per_sec: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                       Options.wal_recovery_mode: 2
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                  Options.enable_thread_tracking: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                  Options.enable_pipelined_write: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                  Options.unordered_write: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         Options.allow_concurrent_memtable_write: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:      Options.enable_write_thread_adaptive_yield: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.write_thread_max_yield_usec: 100
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:            Options.write_thread_slow_yield_usec: 3
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                               Options.row_cache: None
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                              Options.wal_filter: None
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.avoid_flush_during_recovery: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.allow_ingest_behind: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.two_write_queues: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.manual_wal_flush: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.wal_compression: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.atomic_flush: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.avoid_unnecessary_blocking_io: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                 Options.persist_stats_to_disk: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                 Options.write_dbid_to_manifest: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                 Options.log_readahead_size: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                 Options.file_checksum_gen_factory: Unknown
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                 Options.best_efforts_recovery: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                Options.max_bgerror_resume_count: 2147483647
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:            Options.bgerror_resume_retry_interval: 1000000
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.allow_data_in_errors: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.db_host_id: __hostname__
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.enforce_single_del_contracts: true
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.max_background_jobs: 2
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.max_background_compactions: -1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.max_subcompactions: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.avoid_flush_during_shutdown: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:           Options.writable_file_max_buffer_size: 1048576
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.delayed_write_rate : 16777216
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.max_total_wal_size: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.delete_obsolete_files_period_micros: 21600000000
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                   Options.stats_dump_period_sec: 600
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                 Options.stats_persist_period_sec: 600
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                 Options.stats_history_buffer_size: 1048576
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                          Options.max_open_files: -1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                          Options.bytes_per_sync: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                      Options.wal_bytes_per_sync: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                   Options.strict_bytes_per_sync: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:       Options.compaction_readahead_size: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                  Options.max_background_flushes: -1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Compression algorithms supported:
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         kZSTD supported: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         kXpressCompression supported: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         kBZip2Compression supported: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         kZSTDNotFinalCompression supported: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         kLZ4Compression supported: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         kZlibCompression supported: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         kLZ4HCCompression supported: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         kSnappyCompression supported: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Fast CRC32 supported: Supported on x86
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: DMutex implementation: pthread_mutex_t
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-compute-0/store.db/MANIFEST-000010
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]:
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:           Options.merge_operator: 
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:        Options.compaction_filter: None
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55816e486c40)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x55816e47f1f0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:        Options.write_buffer_size: 33554432
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:  Options.max_write_buffer_number: 2
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:          Options.compression: NoCompression
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.num_levels: 7
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:        Options.min_write_buffer_number_to_merge: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:      Options.level0_file_num_compaction_trigger: 4
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                Options.max_bytes_for_level_base: 268435456
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:          Options.max_bytes_for_level_multiplier: 10.000000
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-compute-0/store.db/MANIFEST-000010 succeeded,manifest_file_number is 10, next_file_number is 12, last_sequence is 5, log_number is 5,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 5
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 5
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: dcdf6145-9d9a-452f-b56e-35ebdfe48072
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146869506154, "job": 1, "event": "recovery_started", "wal_files": [9]}
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #9 mode 2
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146869510835, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 13, "file_size": 54153, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 8, "largest_seqno": 137, "table_properties": {"data_size": 52695, "index_size": 164, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 261, "raw_key_size": 3023, "raw_average_key_size": 30, "raw_value_size": 50297, "raw_average_value_size": 502, "num_data_blocks": 8, "num_entries": 100, "num_filter_entries": 100, "num_deletions": 3, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146869, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 13, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146869510935, "job": 1, "event": "recovery_finished"}
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:5047] Creating manifest 15
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000009.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x55816e4a8e00
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: DB pointer 0x55816e532000
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 01:41:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 0.0 total, 0.0 interval
                                            Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s
                                            Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s
                                            Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
                                            
                                            ** Compaction Stats [default] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      2/0   54.78 KB   0.5      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0     12.0      0.00              0.00         1    0.004       0      0       0.0       0.0
                                             Sum      2/0   54.78 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0     12.0      0.00              0.00         1    0.004       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0     12.0      0.00              0.00         1    0.004       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [default] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0     12.0      0.00              0.00         1    0.004       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.0 total, 0.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 2.77 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 2.77 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x55816e47f1f0#2 capacity: 512.00 MB usage: 25.89 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 4.2e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,25.11 KB,0.00478923%) FilterBlock(2,0.42 KB,8.04663e-05%) IndexBlock(2,0.36 KB,6.85453e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [default] **
Oct 11 01:41:09 compute-0 ceph-mon[191930]: starting mon.compute-0 rank 0 at public addrs [v2:192.168.122.100:3300/0,v1:192.168.122.100:6789/0] at bind addrs [v2:192.168.122.100:3300/0,v1:192.168.122.100:6789/0] mon_data /var/lib/ceph/mon/ceph-compute-0 fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:41:09 compute-0 ceph-mon[191930]: mon.compute-0@-1(???) e1 preinit fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:41:09 compute-0 ceph-mon[191930]: mon.compute-0@-1(???).mds e1 new map
Oct 11 01:41:09 compute-0 ceph-mon[191930]: mon.compute-0@-1(???).mds e1 print_map
                                            e1
                                            enable_multiple, ever_enabled_multiple: 1,1
                                            default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2}
                                            legacy client fscid: -1
                                             
                                            No filesystems configured
Oct 11 01:41:09 compute-0 ceph-mon[191930]: mon.compute-0@-1(???).osd e1 crush map has features 3314932999778484224, adjusting msgr requires
Oct 11 01:41:09 compute-0 ceph-mon[191930]: mon.compute-0@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires
Oct 11 01:41:09 compute-0 ceph-mon[191930]: mon.compute-0@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires
Oct 11 01:41:09 compute-0 ceph-mon[191930]: mon.compute-0@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires
Oct 11 01:41:09 compute-0 ceph-mon[191930]: mon.compute-0@-1(???).paxosservice(auth 1..2) refresh upgraded, format 0 -> 3
Oct 11 01:41:09 compute-0 ceph-mon[191930]: mon.compute-0@-1(probing) e1  my rank is now 0 (was -1)
Oct 11 01:41:09 compute-0 ceph-mon[191930]: mon.compute-0@0(probing) e1 win_standalone_election
Oct 11 01:41:09 compute-0 ceph-mon[191930]: paxos.0).electionLogic(3) init, last seen epoch 3, mid-election, bumping
Oct 11 01:41:09 compute-0 ceph-mon[191930]: mon.compute-0@0(electing) e1 collect_metadata vda:  no unique device id for vda: fallback method has no model nor serial
Oct 11 01:41:09 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : mon.compute-0 is new leader, mons compute-0 in quorum (ranks 0)
Oct 11 01:41:09 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : monmap e1: 1 mons at {compute-0=[v2:192.168.122.100:3300/0,v1:192.168.122.100:6789/0]} removed_ranks: {} disallowed_leaders: {}
Oct 11 01:41:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 collect_metadata vda:  no unique device id for vda: fallback method has no model nor serial
Oct 11 01:41:09 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : fsmap 
Oct 11 01:41:09 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e1: 0 total, 0 up, 0 in
Oct 11 01:41:09 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : mgrmap e1: no daemons active
Oct 11 01:41:09 compute-0 podman[191931]: 2025-10-11 01:41:09.593615487 +0000 UTC m=+0.090695319 container create 9e678f90cb197cca3b58bdff69ca4f976afe6e5c34a91dfda90c91d59b143f1c (image=quay.io/ceph/ceph:v18, name=beautiful_dewdney, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:41:09 compute-0 ceph-mon[191930]: mon.compute-0 is new leader, mons compute-0 in quorum (ranks 0)
Oct 11 01:41:09 compute-0 ceph-mon[191930]: monmap e1: 1 mons at {compute-0=[v2:192.168.122.100:3300/0,v1:192.168.122.100:6789/0]} removed_ranks: {} disallowed_leaders: {}
Oct 11 01:41:09 compute-0 ceph-mon[191930]: fsmap 
Oct 11 01:41:09 compute-0 ceph-mon[191930]: osdmap e1: 0 total, 0 up, 0 in
Oct 11 01:41:09 compute-0 ceph-mon[191930]: mgrmap e1: no daemons active
Oct 11 01:41:09 compute-0 podman[191931]: 2025-10-11 01:41:09.556725808 +0000 UTC m=+0.053805700 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:09 compute-0 systemd[1]: Started libpod-conmon-9e678f90cb197cca3b58bdff69ca4f976afe6e5c34a91dfda90c91d59b143f1c.scope.
Oct 11 01:41:09 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d52f65f032477e88e7d74d42d6421ef5bbfe6994ba653547d5ed29515ae9c131/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d52f65f032477e88e7d74d42d6421ef5bbfe6994ba653547d5ed29515ae9c131/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d52f65f032477e88e7d74d42d6421ef5bbfe6994ba653547d5ed29515ae9c131/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:09 compute-0 podman[191931]: 2025-10-11 01:41:09.745309403 +0000 UTC m=+0.242389275 container init 9e678f90cb197cca3b58bdff69ca4f976afe6e5c34a91dfda90c91d59b143f1c (image=quay.io/ceph/ceph:v18, name=beautiful_dewdney, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:41:09 compute-0 podman[191931]: 2025-10-11 01:41:09.767941093 +0000 UTC m=+0.265020945 container start 9e678f90cb197cca3b58bdff69ca4f976afe6e5c34a91dfda90c91d59b143f1c (image=quay.io/ceph/ceph:v18, name=beautiful_dewdney, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0)
Oct 11 01:41:09 compute-0 podman[191931]: 2025-10-11 01:41:09.774277076 +0000 UTC m=+0.271356928 container attach 9e678f90cb197cca3b58bdff69ca4f976afe6e5c34a91dfda90c91d59b143f1c (image=quay.io/ceph/ceph:v18, name=beautiful_dewdney, ceph=True, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:41:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config set, name=public_network}] v 0) v1
Oct 11 01:41:10 compute-0 systemd[1]: libpod-9e678f90cb197cca3b58bdff69ca4f976afe6e5c34a91dfda90c91d59b143f1c.scope: Deactivated successfully.
Oct 11 01:41:10 compute-0 podman[192011]: 2025-10-11 01:41:10.294136057 +0000 UTC m=+0.054311545 container died 9e678f90cb197cca3b58bdff69ca4f976afe6e5c34a91dfda90c91d59b143f1c (image=quay.io/ceph/ceph:v18, name=beautiful_dewdney, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:41:10 compute-0 systemd[1]: var-lib-containers-storage-overlay-d52f65f032477e88e7d74d42d6421ef5bbfe6994ba653547d5ed29515ae9c131-merged.mount: Deactivated successfully.
Oct 11 01:41:10 compute-0 podman[192011]: 2025-10-11 01:41:10.394871014 +0000 UTC m=+0.155046442 container remove 9e678f90cb197cca3b58bdff69ca4f976afe6e5c34a91dfda90c91d59b143f1c (image=quay.io/ceph/ceph:v18, name=beautiful_dewdney, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:41:10 compute-0 systemd[1]: libpod-conmon-9e678f90cb197cca3b58bdff69ca4f976afe6e5c34a91dfda90c91d59b143f1c.scope: Deactivated successfully.
Oct 11 01:41:10 compute-0 podman[192025]: 2025-10-11 01:41:10.550966581 +0000 UTC m=+0.092568488 container create 11fe74d3a1fc414ccba24804901af3cbf804b07cd299fd60eb979eb4f43efd7d (image=quay.io/ceph/ceph:v18, name=stupefied_black, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:41:10 compute-0 podman[192025]: 2025-10-11 01:41:10.515827813 +0000 UTC m=+0.057429760 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:10 compute-0 systemd[1]: Started libpod-conmon-11fe74d3a1fc414ccba24804901af3cbf804b07cd299fd60eb979eb4f43efd7d.scope.
Oct 11 01:41:10 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:10 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0efe5ed3d479e508d85a111d5908e208198f611589673bc0ad125ccdced5d1cc/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:10 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0efe5ed3d479e508d85a111d5908e208198f611589673bc0ad125ccdced5d1cc/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:10 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0efe5ed3d479e508d85a111d5908e208198f611589673bc0ad125ccdced5d1cc/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:10 compute-0 podman[192025]: 2025-10-11 01:41:10.707348813 +0000 UTC m=+0.248950770 container init 11fe74d3a1fc414ccba24804901af3cbf804b07cd299fd60eb979eb4f43efd7d (image=quay.io/ceph/ceph:v18, name=stupefied_black, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef)
Oct 11 01:41:10 compute-0 podman[192025]: 2025-10-11 01:41:10.736074703 +0000 UTC m=+0.277676620 container start 11fe74d3a1fc414ccba24804901af3cbf804b07cd299fd60eb979eb4f43efd7d (image=quay.io/ceph/ceph:v18, name=stupefied_black, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:41:10 compute-0 podman[192025]: 2025-10-11 01:41:10.743215439 +0000 UTC m=+0.284817396 container attach 11fe74d3a1fc414ccba24804901af3cbf804b07cd299fd60eb979eb4f43efd7d (image=quay.io/ceph/ceph:v18, name=stupefied_black, CEPH_REF=reef, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:41:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config set, name=cluster_network}] v 0) v1
Oct 11 01:41:11 compute-0 systemd[1]: libpod-11fe74d3a1fc414ccba24804901af3cbf804b07cd299fd60eb979eb4f43efd7d.scope: Deactivated successfully.
Oct 11 01:41:11 compute-0 podman[192025]: 2025-10-11 01:41:11.30337277 +0000 UTC m=+0.844974687 container died 11fe74d3a1fc414ccba24804901af3cbf804b07cd299fd60eb979eb4f43efd7d (image=quay.io/ceph/ceph:v18, name=stupefied_black, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:41:11 compute-0 systemd[1]: var-lib-containers-storage-overlay-0efe5ed3d479e508d85a111d5908e208198f611589673bc0ad125ccdced5d1cc-merged.mount: Deactivated successfully.
Oct 11 01:41:11 compute-0 podman[192025]: 2025-10-11 01:41:11.396497996 +0000 UTC m=+0.938099903 container remove 11fe74d3a1fc414ccba24804901af3cbf804b07cd299fd60eb979eb4f43efd7d (image=quay.io/ceph/ceph:v18, name=stupefied_black, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:41:11 compute-0 systemd[1]: libpod-conmon-11fe74d3a1fc414ccba24804901af3cbf804b07cd299fd60eb979eb4f43efd7d.scope: Deactivated successfully.
Oct 11 01:41:11 compute-0 systemd[1]: Reloading.
Oct 11 01:41:11 compute-0 systemd-rc-local-generator[192107]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:41:11 compute-0 systemd-sysv-generator[192111]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:41:11 compute-0 systemd[1]: Reloading.
Oct 11 01:41:12 compute-0 systemd-sysv-generator[192151]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:41:12 compute-0 systemd-rc-local-generator[192147]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:41:12 compute-0 systemd[1]: Starting Ceph mgr.compute-0.bzgmgr for 3c7617c3-7a20-523e-a9de-20c0d6ba41da...
Oct 11 01:41:12 compute-0 podman[192200]: 2025-10-11 01:41:12.930458306 +0000 UTC m=+0.083055287 container create c1da5b49478dd1fed70faedfca56c27c45a1e1e66421f55ef104038ad3774386 (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:41:12 compute-0 podman[192200]: 2025-10-11 01:41:12.896205156 +0000 UTC m=+0.048802197 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cf7ad5f525ed2232ce5c03f1f16d7f40a53a583a6251d7c1af482f0c29f8e224/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cf7ad5f525ed2232ce5c03f1f16d7f40a53a583a6251d7c1af482f0c29f8e224/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cf7ad5f525ed2232ce5c03f1f16d7f40a53a583a6251d7c1af482f0c29f8e224/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cf7ad5f525ed2232ce5c03f1f16d7f40a53a583a6251d7c1af482f0c29f8e224/merged/var/lib/ceph/mgr/ceph-compute-0.bzgmgr supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:13 compute-0 podman[192200]: 2025-10-11 01:41:13.0682223 +0000 UTC m=+0.220819321 container init c1da5b49478dd1fed70faedfca56c27c45a1e1e66421f55ef104038ad3774386 (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:41:13 compute-0 podman[192200]: 2025-10-11 01:41:13.087971558 +0000 UTC m=+0.240568519 container start c1da5b49478dd1fed70faedfca56c27c45a1e1e66421f55ef104038ad3774386 (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:41:13 compute-0 bash[192200]: c1da5b49478dd1fed70faedfca56c27c45a1e1e66421f55ef104038ad3774386
Oct 11 01:41:13 compute-0 systemd[1]: Started Ceph mgr.compute-0.bzgmgr for 3c7617c3-7a20-523e-a9de-20c0d6ba41da.
Oct 11 01:41:13 compute-0 podman[192213]: 2025-10-11 01:41:13.158874116 +0000 UTC m=+0.165143984 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4)
Oct 11 01:41:13 compute-0 ceph-mgr[192233]: set uid:gid to 167:167 (ceph:ceph)
Oct 11 01:41:13 compute-0 ceph-mgr[192233]: ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable), process ceph-mgr, pid 2
Oct 11 01:41:13 compute-0 ceph-mgr[192233]: pidfile_write: ignore empty --pid-file
Oct 11 01:41:13 compute-0 podman[192237]: 2025-10-11 01:41:13.237813706 +0000 UTC m=+0.078527779 container create 7387d8476b602996b7feafc1741e280843a4f5467938b48be0f3bebf122584da (image=quay.io/ceph/ceph:v18, name=frosty_curie, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:41:13 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'alerts'
Oct 11 01:41:13 compute-0 podman[192237]: 2025-10-11 01:41:13.213114328 +0000 UTC m=+0.053828431 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:13 compute-0 systemd[1]: Started libpod-conmon-7387d8476b602996b7feafc1741e280843a4f5467938b48be0f3bebf122584da.scope.
Oct 11 01:41:13 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7afeaea3b6afd365c6230955f50c4278842c1534be9fff9ae76e50961bde6f2d/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7afeaea3b6afd365c6230955f50c4278842c1534be9fff9ae76e50961bde6f2d/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7afeaea3b6afd365c6230955f50c4278842c1534be9fff9ae76e50961bde6f2d/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:13 compute-0 podman[192237]: 2025-10-11 01:41:13.392181563 +0000 UTC m=+0.232895676 container init 7387d8476b602996b7feafc1741e280843a4f5467938b48be0f3bebf122584da (image=quay.io/ceph/ceph:v18, name=frosty_curie, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:41:13 compute-0 podman[192237]: 2025-10-11 01:41:13.40638744 +0000 UTC m=+0.247101513 container start 7387d8476b602996b7feafc1741e280843a4f5467938b48be0f3bebf122584da (image=quay.io/ceph/ceph:v18, name=frosty_curie, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:41:13 compute-0 podman[192237]: 2025-10-11 01:41:13.411841786 +0000 UTC m=+0.252555889 container attach 7387d8476b602996b7feafc1741e280843a4f5467938b48be0f3bebf122584da (image=quay.io/ceph/ceph:v18, name=frosty_curie, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_REF=reef)
Oct 11 01:41:13 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:13.655+0000 7f1dc5d73140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member
Oct 11 01:41:13 compute-0 ceph-mgr[192233]: mgr[py] Module alerts has missing NOTIFY_TYPES member
Oct 11 01:41:13 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'balancer'
Oct 11 01:41:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status", "format": "json-pretty"} v 0) v1
Oct 11 01:41:13 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/126368274' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 01:41:13 compute-0 frosty_curie[192277]: 
Oct 11 01:41:13 compute-0 frosty_curie[192277]: {
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     "fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     "health": {
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "status": "HEALTH_OK",
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "checks": {},
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "mutes": []
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     },
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     "election_epoch": 5,
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     "quorum": [
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         0
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     ],
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     "quorum_names": [
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "compute-0"
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     ],
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     "quorum_age": 4,
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     "monmap": {
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "epoch": 1,
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "min_mon_release_name": "reef",
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "num_mons": 1
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     },
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     "osdmap": {
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "epoch": 1,
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "num_osds": 0,
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "num_up_osds": 0,
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "osd_up_since": 0,
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "num_in_osds": 0,
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "osd_in_since": 0,
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "num_remapped_pgs": 0
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     },
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     "pgmap": {
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "pgs_by_state": [],
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "num_pgs": 0,
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "num_pools": 0,
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "num_objects": 0,
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "data_bytes": 0,
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "bytes_used": 0,
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "bytes_avail": 0,
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "bytes_total": 0
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     },
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     "fsmap": {
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "epoch": 1,
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "by_rank": [],
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "up:standby": 0
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     },
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     "mgrmap": {
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "available": false,
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "num_standbys": 0,
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "modules": [
Oct 11 01:41:13 compute-0 frosty_curie[192277]:             "iostat",
Oct 11 01:41:13 compute-0 frosty_curie[192277]:             "nfs",
Oct 11 01:41:13 compute-0 frosty_curie[192277]:             "restful"
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         ],
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "services": {}
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     },
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     "servicemap": {
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "epoch": 1,
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "modified": "2025-10-11T01:41:05.449924+0000",
Oct 11 01:41:13 compute-0 frosty_curie[192277]:         "services": {}
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     },
Oct 11 01:41:13 compute-0 frosty_curie[192277]:     "progress_events": {}
Oct 11 01:41:13 compute-0 frosty_curie[192277]: }
Oct 11 01:41:13 compute-0 systemd[1]: libpod-7387d8476b602996b7feafc1741e280843a4f5467938b48be0f3bebf122584da.scope: Deactivated successfully.
Oct 11 01:41:13 compute-0 podman[192237]: 2025-10-11 01:41:13.88134885 +0000 UTC m=+0.722062913 container died 7387d8476b602996b7feafc1741e280843a4f5467938b48be0f3bebf122584da (image=quay.io/ceph/ceph:v18, name=frosty_curie, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:41:13 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/126368274' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 01:41:13 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:13.912+0000 7f1dc5d73140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member
Oct 11 01:41:13 compute-0 ceph-mgr[192233]: mgr[py] Module balancer has missing NOTIFY_TYPES member
Oct 11 01:41:13 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'cephadm'
Oct 11 01:41:13 compute-0 systemd[1]: var-lib-containers-storage-overlay-7afeaea3b6afd365c6230955f50c4278842c1534be9fff9ae76e50961bde6f2d-merged.mount: Deactivated successfully.
Oct 11 01:41:13 compute-0 podman[192237]: 2025-10-11 01:41:13.950594041 +0000 UTC m=+0.791308114 container remove 7387d8476b602996b7feafc1741e280843a4f5467938b48be0f3bebf122584da (image=quay.io/ceph/ceph:v18, name=frosty_curie, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3)
Oct 11 01:41:13 compute-0 systemd[1]: libpod-conmon-7387d8476b602996b7feafc1741e280843a4f5467938b48be0f3bebf122584da.scope: Deactivated successfully.
Oct 11 01:41:15 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'crash'
Oct 11 01:41:16 compute-0 podman[192327]: 2025-10-11 01:41:16.108162249 +0000 UTC m=+0.104645693 container create e2a956bbda1a18e050b6e13c738f32bd43e8fb5015cfee6ace07d34c2a539ce5 (image=quay.io/ceph/ceph:v18, name=brave_kare, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default)
Oct 11 01:41:16 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:16.113+0000 7f1dc5d73140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member
Oct 11 01:41:16 compute-0 ceph-mgr[192233]: mgr[py] Module crash has missing NOTIFY_TYPES member
Oct 11 01:41:16 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'dashboard'
Oct 11 01:41:16 compute-0 podman[192327]: 2025-10-11 01:41:16.06995612 +0000 UTC m=+0.066439614 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:16 compute-0 systemd[1]: Started libpod-conmon-e2a956bbda1a18e050b6e13c738f32bd43e8fb5015cfee6ace07d34c2a539ce5.scope.
Oct 11 01:41:16 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/64e13e57a7ad8cccc56bbf464c1f9ea1ecf015d9556695b5dfd677e15790fa46/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/64e13e57a7ad8cccc56bbf464c1f9ea1ecf015d9556695b5dfd677e15790fa46/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/64e13e57a7ad8cccc56bbf464c1f9ea1ecf015d9556695b5dfd677e15790fa46/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:16 compute-0 podman[192327]: 2025-10-11 01:41:16.250577017 +0000 UTC m=+0.247060501 container init e2a956bbda1a18e050b6e13c738f32bd43e8fb5015cfee6ace07d34c2a539ce5 (image=quay.io/ceph/ceph:v18, name=brave_kare, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:41:16 compute-0 podman[192327]: 2025-10-11 01:41:16.266960198 +0000 UTC m=+0.263443642 container start e2a956bbda1a18e050b6e13c738f32bd43e8fb5015cfee6ace07d34c2a539ce5 (image=quay.io/ceph/ceph:v18, name=brave_kare, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:41:16 compute-0 podman[192327]: 2025-10-11 01:41:16.273443289 +0000 UTC m=+0.269926783 container attach e2a956bbda1a18e050b6e13c738f32bd43e8fb5015cfee6ace07d34c2a539ce5 (image=quay.io/ceph/ceph:v18, name=brave_kare, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507)
Oct 11 01:41:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status", "format": "json-pretty"} v 0) v1
Oct 11 01:41:16 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/474515728' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 01:41:16 compute-0 brave_kare[192343]: 
Oct 11 01:41:16 compute-0 brave_kare[192343]: {
Oct 11 01:41:16 compute-0 brave_kare[192343]:     "fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:41:16 compute-0 brave_kare[192343]:     "health": {
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "status": "HEALTH_OK",
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "checks": {},
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "mutes": []
Oct 11 01:41:16 compute-0 brave_kare[192343]:     },
Oct 11 01:41:16 compute-0 brave_kare[192343]:     "election_epoch": 5,
Oct 11 01:41:16 compute-0 brave_kare[192343]:     "quorum": [
Oct 11 01:41:16 compute-0 brave_kare[192343]:         0
Oct 11 01:41:16 compute-0 brave_kare[192343]:     ],
Oct 11 01:41:16 compute-0 brave_kare[192343]:     "quorum_names": [
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "compute-0"
Oct 11 01:41:16 compute-0 brave_kare[192343]:     ],
Oct 11 01:41:16 compute-0 brave_kare[192343]:     "quorum_age": 7,
Oct 11 01:41:16 compute-0 brave_kare[192343]:     "monmap": {
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "epoch": 1,
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "min_mon_release_name": "reef",
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "num_mons": 1
Oct 11 01:41:16 compute-0 brave_kare[192343]:     },
Oct 11 01:41:16 compute-0 brave_kare[192343]:     "osdmap": {
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "epoch": 1,
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "num_osds": 0,
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "num_up_osds": 0,
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "osd_up_since": 0,
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "num_in_osds": 0,
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "osd_in_since": 0,
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "num_remapped_pgs": 0
Oct 11 01:41:16 compute-0 brave_kare[192343]:     },
Oct 11 01:41:16 compute-0 brave_kare[192343]:     "pgmap": {
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "pgs_by_state": [],
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "num_pgs": 0,
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "num_pools": 0,
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "num_objects": 0,
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "data_bytes": 0,
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "bytes_used": 0,
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "bytes_avail": 0,
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "bytes_total": 0
Oct 11 01:41:16 compute-0 brave_kare[192343]:     },
Oct 11 01:41:16 compute-0 brave_kare[192343]:     "fsmap": {
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "epoch": 1,
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "by_rank": [],
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "up:standby": 0
Oct 11 01:41:16 compute-0 brave_kare[192343]:     },
Oct 11 01:41:16 compute-0 brave_kare[192343]:     "mgrmap": {
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "available": false,
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "num_standbys": 0,
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "modules": [
Oct 11 01:41:16 compute-0 brave_kare[192343]:             "iostat",
Oct 11 01:41:16 compute-0 brave_kare[192343]:             "nfs",
Oct 11 01:41:16 compute-0 brave_kare[192343]:             "restful"
Oct 11 01:41:16 compute-0 brave_kare[192343]:         ],
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "services": {}
Oct 11 01:41:16 compute-0 brave_kare[192343]:     },
Oct 11 01:41:16 compute-0 brave_kare[192343]:     "servicemap": {
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "epoch": 1,
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "modified": "2025-10-11T01:41:05.449924+0000",
Oct 11 01:41:16 compute-0 brave_kare[192343]:         "services": {}
Oct 11 01:41:16 compute-0 brave_kare[192343]:     },
Oct 11 01:41:16 compute-0 brave_kare[192343]:     "progress_events": {}
Oct 11 01:41:16 compute-0 brave_kare[192343]: }
Oct 11 01:41:16 compute-0 systemd[1]: libpod-e2a956bbda1a18e050b6e13c738f32bd43e8fb5015cfee6ace07d34c2a539ce5.scope: Deactivated successfully.
Oct 11 01:41:16 compute-0 podman[192327]: 2025-10-11 01:41:16.770347394 +0000 UTC m=+0.766830838 container died e2a956bbda1a18e050b6e13c738f32bd43e8fb5015cfee6ace07d34c2a539ce5 (image=quay.io/ceph/ceph:v18, name=brave_kare, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 01:41:16 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/474515728' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 01:41:16 compute-0 systemd[1]: var-lib-containers-storage-overlay-64e13e57a7ad8cccc56bbf464c1f9ea1ecf015d9556695b5dfd677e15790fa46-merged.mount: Deactivated successfully.
Oct 11 01:41:16 compute-0 podman[192327]: 2025-10-11 01:41:16.840675032 +0000 UTC m=+0.837158466 container remove e2a956bbda1a18e050b6e13c738f32bd43e8fb5015cfee6ace07d34c2a539ce5 (image=quay.io/ceph/ceph:v18, name=brave_kare, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:41:16 compute-0 systemd[1]: libpod-conmon-e2a956bbda1a18e050b6e13c738f32bd43e8fb5015cfee6ace07d34c2a539ce5.scope: Deactivated successfully.
Oct 11 01:41:17 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'devicehealth'
Oct 11 01:41:17 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:17.916+0000 7f1dc5d73140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member
Oct 11 01:41:17 compute-0 ceph-mgr[192233]: mgr[py] Module devicehealth has missing NOTIFY_TYPES member
Oct 11 01:41:17 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'diskprediction_local'
Oct 11 01:41:18 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode.
Oct 11 01:41:18 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve.
Oct 11 01:41:18 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]:   from numpy import show_config as show_numpy_config
Oct 11 01:41:18 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:18.461+0000 7f1dc5d73140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member
Oct 11 01:41:18 compute-0 ceph-mgr[192233]: mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member
Oct 11 01:41:18 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'influx'
Oct 11 01:41:18 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:18.690+0000 7f1dc5d73140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member
Oct 11 01:41:18 compute-0 ceph-mgr[192233]: mgr[py] Module influx has missing NOTIFY_TYPES member
Oct 11 01:41:18 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'insights'
Oct 11 01:41:18 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'iostat'
Oct 11 01:41:18 compute-0 podman[192382]: 2025-10-11 01:41:18.986208726 +0000 UTC m=+0.100775219 container create 46fa54773362c0303231adc002b944607190dea259ac3b07c8c8e2425ec0b32f (image=quay.io/ceph/ceph:v18, name=musing_brown, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:41:19 compute-0 podman[192382]: 2025-10-11 01:41:18.942332979 +0000 UTC m=+0.056899482 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:19 compute-0 systemd[1]: Started libpod-conmon-46fa54773362c0303231adc002b944607190dea259ac3b07c8c8e2425ec0b32f.scope.
Oct 11 01:41:19 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9c2265e355d97f922778417d9677fc591157f8c11961726e63dc23f58bec88d9/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9c2265e355d97f922778417d9677fc591157f8c11961726e63dc23f58bec88d9/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9c2265e355d97f922778417d9677fc591157f8c11961726e63dc23f58bec88d9/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:19 compute-0 podman[192382]: 2025-10-11 01:41:19.138107853 +0000 UTC m=+0.252674356 container init 46fa54773362c0303231adc002b944607190dea259ac3b07c8c8e2425ec0b32f (image=quay.io/ceph/ceph:v18, name=musing_brown, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default)
Oct 11 01:41:19 compute-0 podman[192382]: 2025-10-11 01:41:19.155141008 +0000 UTC m=+0.269707491 container start 46fa54773362c0303231adc002b944607190dea259ac3b07c8c8e2425ec0b32f (image=quay.io/ceph/ceph:v18, name=musing_brown, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507)
Oct 11 01:41:19 compute-0 podman[192382]: 2025-10-11 01:41:19.162195229 +0000 UTC m=+0.276761712 container attach 46fa54773362c0303231adc002b944607190dea259ac3b07c8c8e2425ec0b32f (image=quay.io/ceph/ceph:v18, name=musing_brown, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:41:19 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:19.262+0000 7f1dc5d73140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member
Oct 11 01:41:19 compute-0 ceph-mgr[192233]: mgr[py] Module iostat has missing NOTIFY_TYPES member
Oct 11 01:41:19 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'k8sevents'
Oct 11 01:41:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status", "format": "json-pretty"} v 0) v1
Oct 11 01:41:19 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3358486290' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 01:41:19 compute-0 musing_brown[192397]: 
Oct 11 01:41:19 compute-0 musing_brown[192397]: {
Oct 11 01:41:19 compute-0 musing_brown[192397]:     "fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:41:19 compute-0 musing_brown[192397]:     "health": {
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "status": "HEALTH_OK",
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "checks": {},
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "mutes": []
Oct 11 01:41:19 compute-0 musing_brown[192397]:     },
Oct 11 01:41:19 compute-0 musing_brown[192397]:     "election_epoch": 5,
Oct 11 01:41:19 compute-0 musing_brown[192397]:     "quorum": [
Oct 11 01:41:19 compute-0 musing_brown[192397]:         0
Oct 11 01:41:19 compute-0 musing_brown[192397]:     ],
Oct 11 01:41:19 compute-0 musing_brown[192397]:     "quorum_names": [
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "compute-0"
Oct 11 01:41:19 compute-0 musing_brown[192397]:     ],
Oct 11 01:41:19 compute-0 musing_brown[192397]:     "quorum_age": 10,
Oct 11 01:41:19 compute-0 musing_brown[192397]:     "monmap": {
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "epoch": 1,
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "min_mon_release_name": "reef",
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "num_mons": 1
Oct 11 01:41:19 compute-0 musing_brown[192397]:     },
Oct 11 01:41:19 compute-0 musing_brown[192397]:     "osdmap": {
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "epoch": 1,
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "num_osds": 0,
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "num_up_osds": 0,
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "osd_up_since": 0,
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "num_in_osds": 0,
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "osd_in_since": 0,
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "num_remapped_pgs": 0
Oct 11 01:41:19 compute-0 musing_brown[192397]:     },
Oct 11 01:41:19 compute-0 musing_brown[192397]:     "pgmap": {
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "pgs_by_state": [],
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "num_pgs": 0,
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "num_pools": 0,
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "num_objects": 0,
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "data_bytes": 0,
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "bytes_used": 0,
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "bytes_avail": 0,
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "bytes_total": 0
Oct 11 01:41:19 compute-0 musing_brown[192397]:     },
Oct 11 01:41:19 compute-0 musing_brown[192397]:     "fsmap": {
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "epoch": 1,
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "by_rank": [],
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "up:standby": 0
Oct 11 01:41:19 compute-0 musing_brown[192397]:     },
Oct 11 01:41:19 compute-0 musing_brown[192397]:     "mgrmap": {
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "available": false,
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "num_standbys": 0,
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "modules": [
Oct 11 01:41:19 compute-0 musing_brown[192397]:             "iostat",
Oct 11 01:41:19 compute-0 musing_brown[192397]:             "nfs",
Oct 11 01:41:19 compute-0 musing_brown[192397]:             "restful"
Oct 11 01:41:19 compute-0 musing_brown[192397]:         ],
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "services": {}
Oct 11 01:41:19 compute-0 musing_brown[192397]:     },
Oct 11 01:41:19 compute-0 musing_brown[192397]:     "servicemap": {
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "epoch": 1,
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "modified": "2025-10-11T01:41:05.449924+0000",
Oct 11 01:41:19 compute-0 musing_brown[192397]:         "services": {}
Oct 11 01:41:19 compute-0 musing_brown[192397]:     },
Oct 11 01:41:19 compute-0 musing_brown[192397]:     "progress_events": {}
Oct 11 01:41:19 compute-0 musing_brown[192397]: }
Oct 11 01:41:19 compute-0 systemd[1]: libpod-46fa54773362c0303231adc002b944607190dea259ac3b07c8c8e2425ec0b32f.scope: Deactivated successfully.
Oct 11 01:41:19 compute-0 podman[192382]: 2025-10-11 01:41:19.679136068 +0000 UTC m=+0.793702581 container died 46fa54773362c0303231adc002b944607190dea259ac3b07c8c8e2425ec0b32f (image=quay.io/ceph/ceph:v18, name=musing_brown, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:41:19 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3358486290' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 01:41:19 compute-0 systemd[1]: var-lib-containers-storage-overlay-9c2265e355d97f922778417d9677fc591157f8c11961726e63dc23f58bec88d9-merged.mount: Deactivated successfully.
Oct 11 01:41:19 compute-0 podman[192382]: 2025-10-11 01:41:19.767061961 +0000 UTC m=+0.881628424 container remove 46fa54773362c0303231adc002b944607190dea259ac3b07c8c8e2425ec0b32f (image=quay.io/ceph/ceph:v18, name=musing_brown, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS)
Oct 11 01:41:19 compute-0 systemd[1]: libpod-conmon-46fa54773362c0303231adc002b944607190dea259ac3b07c8c8e2425ec0b32f.scope: Deactivated successfully.
Oct 11 01:41:20 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'localpool'
Oct 11 01:41:21 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'mds_autoscaler'
Oct 11 01:41:21 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'mirroring'
Oct 11 01:41:21 compute-0 podman[192436]: 2025-10-11 01:41:21.909179205 +0000 UTC m=+0.100190719 container create ef524649561acd166b147286aaa0297d5bba18af58c696991ffaace44a4f0884 (image=quay.io/ceph/ceph:v18, name=goofy_snyder, CEPH_REF=reef, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.schema-version=1.0)
Oct 11 01:41:21 compute-0 podman[192436]: 2025-10-11 01:41:21.871597639 +0000 UTC m=+0.062609213 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:21 compute-0 systemd[1]: Started libpod-conmon-ef524649561acd166b147286aaa0297d5bba18af58c696991ffaace44a4f0884.scope.
Oct 11 01:41:22 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/aeebd2bf8d27a385ca2cea3cb803d5765a1bb57c9ffe555215b5dd23f5c97d9b/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/aeebd2bf8d27a385ca2cea3cb803d5765a1bb57c9ffe555215b5dd23f5c97d9b/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/aeebd2bf8d27a385ca2cea3cb803d5765a1bb57c9ffe555215b5dd23f5c97d9b/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:22 compute-0 podman[192436]: 2025-10-11 01:41:22.106457847 +0000 UTC m=+0.297469341 container init ef524649561acd166b147286aaa0297d5bba18af58c696991ffaace44a4f0884 (image=quay.io/ceph/ceph:v18, name=goofy_snyder, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0)
Oct 11 01:41:22 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'nfs'
Oct 11 01:41:22 compute-0 podman[192436]: 2025-10-11 01:41:22.124685396 +0000 UTC m=+0.315696900 container start ef524649561acd166b147286aaa0297d5bba18af58c696991ffaace44a4f0884 (image=quay.io/ceph/ceph:v18, name=goofy_snyder, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Oct 11 01:41:22 compute-0 podman[192436]: 2025-10-11 01:41:22.130825578 +0000 UTC m=+0.321837092 container attach ef524649561acd166b147286aaa0297d5bba18af58c696991ffaace44a4f0884 (image=quay.io/ceph/ceph:v18, name=goofy_snyder, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 01:41:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status", "format": "json-pretty"} v 0) v1
Oct 11 01:41:22 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3852748532' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 01:41:22 compute-0 goofy_snyder[192452]: 
Oct 11 01:41:22 compute-0 goofy_snyder[192452]: {
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     "fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     "health": {
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "status": "HEALTH_OK",
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "checks": {},
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "mutes": []
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     },
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     "election_epoch": 5,
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     "quorum": [
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         0
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     ],
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     "quorum_names": [
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "compute-0"
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     ],
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     "quorum_age": 13,
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     "monmap": {
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "epoch": 1,
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "min_mon_release_name": "reef",
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "num_mons": 1
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     },
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     "osdmap": {
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "epoch": 1,
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "num_osds": 0,
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "num_up_osds": 0,
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "osd_up_since": 0,
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "num_in_osds": 0,
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "osd_in_since": 0,
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "num_remapped_pgs": 0
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     },
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     "pgmap": {
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "pgs_by_state": [],
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "num_pgs": 0,
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "num_pools": 0,
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "num_objects": 0,
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "data_bytes": 0,
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "bytes_used": 0,
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "bytes_avail": 0,
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "bytes_total": 0
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     },
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     "fsmap": {
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "epoch": 1,
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "by_rank": [],
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "up:standby": 0
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     },
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     "mgrmap": {
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "available": false,
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "num_standbys": 0,
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "modules": [
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:             "iostat",
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:             "nfs",
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:             "restful"
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         ],
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "services": {}
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     },
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     "servicemap": {
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "epoch": 1,
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "modified": "2025-10-11T01:41:05.449924+0000",
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:         "services": {}
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     },
Oct 11 01:41:22 compute-0 goofy_snyder[192452]:     "progress_events": {}
Oct 11 01:41:22 compute-0 goofy_snyder[192452]: }
Oct 11 01:41:22 compute-0 systemd[1]: libpod-ef524649561acd166b147286aaa0297d5bba18af58c696991ffaace44a4f0884.scope: Deactivated successfully.
Oct 11 01:41:22 compute-0 podman[192436]: 2025-10-11 01:41:22.590885607 +0000 UTC m=+0.781897091 container died ef524649561acd166b147286aaa0297d5bba18af58c696991ffaace44a4f0884 (image=quay.io/ceph/ceph:v18, name=goofy_snyder, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 01:41:22 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3852748532' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 01:41:22 compute-0 systemd[1]: var-lib-containers-storage-overlay-aeebd2bf8d27a385ca2cea3cb803d5765a1bb57c9ffe555215b5dd23f5c97d9b-merged.mount: Deactivated successfully.
Oct 11 01:41:22 compute-0 podman[192436]: 2025-10-11 01:41:22.660633024 +0000 UTC m=+0.851644508 container remove ef524649561acd166b147286aaa0297d5bba18af58c696991ffaace44a4f0884 (image=quay.io/ceph/ceph:v18, name=goofy_snyder, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:41:22 compute-0 systemd[1]: libpod-conmon-ef524649561acd166b147286aaa0297d5bba18af58c696991ffaace44a4f0884.scope: Deactivated successfully.
Oct 11 01:41:22 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:22.867+0000 7f1dc5d73140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member
Oct 11 01:41:22 compute-0 ceph-mgr[192233]: mgr[py] Module nfs has missing NOTIFY_TYPES member
Oct 11 01:41:22 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'orchestrator'
Oct 11 01:41:23 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:23.503+0000 7f1dc5d73140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member
Oct 11 01:41:23 compute-0 ceph-mgr[192233]: mgr[py] Module orchestrator has missing NOTIFY_TYPES member
Oct 11 01:41:23 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'osd_perf_query'
Oct 11 01:41:23 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:23.762+0000 7f1dc5d73140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member
Oct 11 01:41:23 compute-0 ceph-mgr[192233]: mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member
Oct 11 01:41:23 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'osd_support'
Oct 11 01:41:23 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:23.988+0000 7f1dc5d73140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member
Oct 11 01:41:23 compute-0 ceph-mgr[192233]: mgr[py] Module osd_support has missing NOTIFY_TYPES member
Oct 11 01:41:23 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'pg_autoscaler'
Oct 11 01:41:24 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:24.247+0000 7f1dc5d73140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member
Oct 11 01:41:24 compute-0 ceph-mgr[192233]: mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member
Oct 11 01:41:24 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'progress'
Oct 11 01:41:24 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:24.482+0000 7f1dc5d73140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member
Oct 11 01:41:24 compute-0 ceph-mgr[192233]: mgr[py] Module progress has missing NOTIFY_TYPES member
Oct 11 01:41:24 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'prometheus'
Oct 11 01:41:24 compute-0 podman[192489]: 2025-10-11 01:41:24.80049739 +0000 UTC m=+0.093801583 container create 00b29f474b1d28406686d31840c24e9a66b000f7e12fa44a0dceaa748e2a5253 (image=quay.io/ceph/ceph:v18, name=magical_chatterjee, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, ceph=True, OSD_FLAVOR=default)
Oct 11 01:41:24 compute-0 podman[192489]: 2025-10-11 01:41:24.767540557 +0000 UTC m=+0.060844800 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:24 compute-0 systemd[1]: Started libpod-conmon-00b29f474b1d28406686d31840c24e9a66b000f7e12fa44a0dceaa748e2a5253.scope.
Oct 11 01:41:24 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c760fd365e084b76b4025fc32364fc0904c4d3be332c523010c14bf7e374643f/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c760fd365e084b76b4025fc32364fc0904c4d3be332c523010c14bf7e374643f/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c760fd365e084b76b4025fc32364fc0904c4d3be332c523010c14bf7e374643f/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:24 compute-0 podman[192489]: 2025-10-11 01:41:24.974006252 +0000 UTC m=+0.267310505 container init 00b29f474b1d28406686d31840c24e9a66b000f7e12fa44a0dceaa748e2a5253 (image=quay.io/ceph/ceph:v18, name=magical_chatterjee, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2)
Oct 11 01:41:24 compute-0 podman[192489]: 2025-10-11 01:41:24.989852316 +0000 UTC m=+0.283156509 container start 00b29f474b1d28406686d31840c24e9a66b000f7e12fa44a0dceaa748e2a5253 (image=quay.io/ceph/ceph:v18, name=magical_chatterjee, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:41:24 compute-0 podman[192489]: 2025-10-11 01:41:24.996661554 +0000 UTC m=+0.289965757 container attach 00b29f474b1d28406686d31840c24e9a66b000f7e12fa44a0dceaa748e2a5253 (image=quay.io/ceph/ceph:v18, name=magical_chatterjee, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True)
Oct 11 01:41:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status", "format": "json-pretty"} v 0) v1
Oct 11 01:41:25 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/359894087' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]: 
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]: {
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     "fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     "health": {
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "status": "HEALTH_OK",
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "checks": {},
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "mutes": []
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     },
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     "election_epoch": 5,
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     "quorum": [
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         0
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     ],
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     "quorum_names": [
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "compute-0"
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     ],
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     "quorum_age": 15,
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     "monmap": {
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "epoch": 1,
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "min_mon_release_name": "reef",
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "num_mons": 1
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     },
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     "osdmap": {
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "epoch": 1,
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "num_osds": 0,
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "num_up_osds": 0,
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "osd_up_since": 0,
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "num_in_osds": 0,
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "osd_in_since": 0,
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "num_remapped_pgs": 0
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     },
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     "pgmap": {
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "pgs_by_state": [],
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "num_pgs": 0,
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "num_pools": 0,
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "num_objects": 0,
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "data_bytes": 0,
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "bytes_used": 0,
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "bytes_avail": 0,
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "bytes_total": 0
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     },
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     "fsmap": {
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "epoch": 1,
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "by_rank": [],
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "up:standby": 0
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     },
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     "mgrmap": {
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "available": false,
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "num_standbys": 0,
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "modules": [
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:             "iostat",
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:             "nfs",
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:             "restful"
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         ],
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "services": {}
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     },
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     "servicemap": {
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "epoch": 1,
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "modified": "2025-10-11T01:41:05.449924+0000",
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:         "services": {}
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     },
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]:     "progress_events": {}
Oct 11 01:41:25 compute-0 magical_chatterjee[192505]: }
Oct 11 01:41:25 compute-0 systemd[1]: libpod-00b29f474b1d28406686d31840c24e9a66b000f7e12fa44a0dceaa748e2a5253.scope: Deactivated successfully.
Oct 11 01:41:25 compute-0 podman[192489]: 2025-10-11 01:41:25.444608965 +0000 UTC m=+0.737913148 container died 00b29f474b1d28406686d31840c24e9a66b000f7e12fa44a0dceaa748e2a5253 (image=quay.io/ceph/ceph:v18, name=magical_chatterjee, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507)
Oct 11 01:41:25 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/359894087' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 01:41:25 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:25.473+0000 7f1dc5d73140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member
Oct 11 01:41:25 compute-0 ceph-mgr[192233]: mgr[py] Module prometheus has missing NOTIFY_TYPES member
Oct 11 01:41:25 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'rbd_support'
Oct 11 01:41:25 compute-0 systemd[1]: var-lib-containers-storage-overlay-c760fd365e084b76b4025fc32364fc0904c4d3be332c523010c14bf7e374643f-merged.mount: Deactivated successfully.
Oct 11 01:41:25 compute-0 podman[192489]: 2025-10-11 01:41:25.515820459 +0000 UTC m=+0.809124632 container remove 00b29f474b1d28406686d31840c24e9a66b000f7e12fa44a0dceaa748e2a5253 (image=quay.io/ceph/ceph:v18, name=magical_chatterjee, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default)
Oct 11 01:41:25 compute-0 systemd[1]: libpod-conmon-00b29f474b1d28406686d31840c24e9a66b000f7e12fa44a0dceaa748e2a5253.scope: Deactivated successfully.
Oct 11 01:41:25 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:25.771+0000 7f1dc5d73140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member
Oct 11 01:41:25 compute-0 ceph-mgr[192233]: mgr[py] Module rbd_support has missing NOTIFY_TYPES member
Oct 11 01:41:25 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'restful'
Oct 11 01:41:26 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'rgw'
Oct 11 01:41:27 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:27.150+0000 7f1dc5d73140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member
Oct 11 01:41:27 compute-0 ceph-mgr[192233]: mgr[py] Module rgw has missing NOTIFY_TYPES member
Oct 11 01:41:27 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'rook'
Oct 11 01:41:27 compute-0 podman[192544]: 2025-10-11 01:41:27.679918296 +0000 UTC m=+0.118933950 container create 77b8e5a2f58c40039e554158ffe2641cda518f408a9db71e73009a706de11f02 (image=quay.io/ceph/ceph:v18, name=elegant_bell, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0)
Oct 11 01:41:27 compute-0 podman[192544]: 2025-10-11 01:41:27.621659822 +0000 UTC m=+0.060675496 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:27 compute-0 systemd[1]: Started libpod-conmon-77b8e5a2f58c40039e554158ffe2641cda518f408a9db71e73009a706de11f02.scope.
Oct 11 01:41:27 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/661907cf34a06726ef9e399dc6c1857002dfba89179d30f45934ca7b71354fa7/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/661907cf34a06726ef9e399dc6c1857002dfba89179d30f45934ca7b71354fa7/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/661907cf34a06726ef9e399dc6c1857002dfba89179d30f45934ca7b71354fa7/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:27 compute-0 podman[192544]: 2025-10-11 01:41:27.854493671 +0000 UTC m=+0.293509315 container init 77b8e5a2f58c40039e554158ffe2641cda518f408a9db71e73009a706de11f02 (image=quay.io/ceph/ceph:v18, name=elegant_bell, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:41:27 compute-0 podman[192544]: 2025-10-11 01:41:27.869348745 +0000 UTC m=+0.308364379 container start 77b8e5a2f58c40039e554158ffe2641cda518f408a9db71e73009a706de11f02 (image=quay.io/ceph/ceph:v18, name=elegant_bell, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507)
Oct 11 01:41:27 compute-0 podman[192544]: 2025-10-11 01:41:27.874535695 +0000 UTC m=+0.313551329 container attach 77b8e5a2f58c40039e554158ffe2641cda518f408a9db71e73009a706de11f02 (image=quay.io/ceph/ceph:v18, name=elegant_bell, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:41:28 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status", "format": "json-pretty"} v 0) v1
Oct 11 01:41:28 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3551506535' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 01:41:28 compute-0 elegant_bell[192559]: 
Oct 11 01:41:28 compute-0 elegant_bell[192559]: {
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     "fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     "health": {
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "status": "HEALTH_OK",
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "checks": {},
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "mutes": []
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     },
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     "election_epoch": 5,
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     "quorum": [
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         0
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     ],
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     "quorum_names": [
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "compute-0"
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     ],
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     "quorum_age": 18,
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     "monmap": {
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "epoch": 1,
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "min_mon_release_name": "reef",
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "num_mons": 1
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     },
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     "osdmap": {
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "epoch": 1,
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "num_osds": 0,
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "num_up_osds": 0,
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "osd_up_since": 0,
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "num_in_osds": 0,
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "osd_in_since": 0,
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "num_remapped_pgs": 0
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     },
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     "pgmap": {
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "pgs_by_state": [],
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "num_pgs": 0,
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "num_pools": 0,
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "num_objects": 0,
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "data_bytes": 0,
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "bytes_used": 0,
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "bytes_avail": 0,
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "bytes_total": 0
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     },
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     "fsmap": {
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "epoch": 1,
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "by_rank": [],
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "up:standby": 0
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     },
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     "mgrmap": {
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "available": false,
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "num_standbys": 0,
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "modules": [
Oct 11 01:41:28 compute-0 elegant_bell[192559]:             "iostat",
Oct 11 01:41:28 compute-0 elegant_bell[192559]:             "nfs",
Oct 11 01:41:28 compute-0 elegant_bell[192559]:             "restful"
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         ],
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "services": {}
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     },
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     "servicemap": {
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "epoch": 1,
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "modified": "2025-10-11T01:41:05.449924+0000",
Oct 11 01:41:28 compute-0 elegant_bell[192559]:         "services": {}
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     },
Oct 11 01:41:28 compute-0 elegant_bell[192559]:     "progress_events": {}
Oct 11 01:41:28 compute-0 elegant_bell[192559]: }
Oct 11 01:41:28 compute-0 systemd[1]: libpod-77b8e5a2f58c40039e554158ffe2641cda518f408a9db71e73009a706de11f02.scope: Deactivated successfully.
Oct 11 01:41:28 compute-0 podman[192544]: 2025-10-11 01:41:28.35233023 +0000 UTC m=+0.791345854 container died 77b8e5a2f58c40039e554158ffe2641cda518f408a9db71e73009a706de11f02 (image=quay.io/ceph/ceph:v18, name=elegant_bell, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:41:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3551506535' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 01:41:28 compute-0 systemd[1]: var-lib-containers-storage-overlay-661907cf34a06726ef9e399dc6c1857002dfba89179d30f45934ca7b71354fa7-merged.mount: Deactivated successfully.
Oct 11 01:41:28 compute-0 podman[192544]: 2025-10-11 01:41:28.424032326 +0000 UTC m=+0.863047950 container remove 77b8e5a2f58c40039e554158ffe2641cda518f408a9db71e73009a706de11f02 (image=quay.io/ceph/ceph:v18, name=elegant_bell, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:41:28 compute-0 systemd[1]: libpod-conmon-77b8e5a2f58c40039e554158ffe2641cda518f408a9db71e73009a706de11f02.scope: Deactivated successfully.
Oct 11 01:41:28 compute-0 podman[192586]: 2025-10-11 01:41:28.505615933 +0000 UTC m=+0.096891257 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 01:41:28 compute-0 podman[192593]: 2025-10-11 01:41:28.521510929 +0000 UTC m=+0.113195616 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., version=9.6, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.expose-services=, com.redhat.component=ubi9-minimal-container, name=ubi9-minimal, io.openshift.tags=minimal rhel9, architecture=x86_64, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=openstack_network_exporter, release=1755695350, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.buildah.version=1.33.7, build-date=2025-08-20T13:12:41, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, distribution-scope=public, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vendor=Red Hat, Inc., url=https://catalog.redhat.com/en/search?searchType=containers, vcs-type=git, managed_by=edpm_ansible)
Oct 11 01:41:29 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:29.275+0000 7f1dc5d73140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member
Oct 11 01:41:29 compute-0 ceph-mgr[192233]: mgr[py] Module rook has missing NOTIFY_TYPES member
Oct 11 01:41:29 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'selftest'
Oct 11 01:41:29 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:29.519+0000 7f1dc5d73140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member
Oct 11 01:41:29 compute-0 ceph-mgr[192233]: mgr[py] Module selftest has missing NOTIFY_TYPES member
Oct 11 01:41:29 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'snap_schedule'
Oct 11 01:41:29 compute-0 podman[157119]: time="2025-10-11T01:41:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:41:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:41:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 22105 "" "Go-http-client/1.1"
Oct 11 01:41:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:41:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 3940 "" "Go-http-client/1.1"
Oct 11 01:41:29 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:29.783+0000 7f1dc5d73140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member
Oct 11 01:41:29 compute-0 ceph-mgr[192233]: mgr[py] Module snap_schedule has missing NOTIFY_TYPES member
Oct 11 01:41:29 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'stats'
Oct 11 01:41:30 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'status'
Oct 11 01:41:30 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:30.277+0000 7f1dc5d73140 -1 mgr[py] Module status has missing NOTIFY_TYPES member
Oct 11 01:41:30 compute-0 ceph-mgr[192233]: mgr[py] Module status has missing NOTIFY_TYPES member
Oct 11 01:41:30 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'telegraf'
Oct 11 01:41:30 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:30.511+0000 7f1dc5d73140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member
Oct 11 01:41:30 compute-0 ceph-mgr[192233]: mgr[py] Module telegraf has missing NOTIFY_TYPES member
Oct 11 01:41:30 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'telemetry'
Oct 11 01:41:30 compute-0 podman[192637]: 2025-10-11 01:41:30.567623508 +0000 UTC m=+0.095319613 container create 439690d6705d549ace3feecc7810e4c8d824c4d6ca2245cee721efd20dadbe9a (image=quay.io/ceph/ceph:v18, name=thirsty_elgamal, org.label-schema.build-date=20250507, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:41:30 compute-0 podman[192637]: 2025-10-11 01:41:30.532374133 +0000 UTC m=+0.060070278 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:30 compute-0 systemd[1]: Started libpod-conmon-439690d6705d549ace3feecc7810e4c8d824c4d6ca2245cee721efd20dadbe9a.scope.
Oct 11 01:41:30 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7bfd29336789d1b1b6ec3a6a59dc0df891155b9358b4c0f32160881eeb5d56a5/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7bfd29336789d1b1b6ec3a6a59dc0df891155b9358b4c0f32160881eeb5d56a5/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7bfd29336789d1b1b6ec3a6a59dc0df891155b9358b4c0f32160881eeb5d56a5/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:30 compute-0 podman[192637]: 2025-10-11 01:41:30.72468929 +0000 UTC m=+0.252385405 container init 439690d6705d549ace3feecc7810e4c8d824c4d6ca2245cee721efd20dadbe9a (image=quay.io/ceph/ceph:v18, name=thirsty_elgamal, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Oct 11 01:41:30 compute-0 podman[192637]: 2025-10-11 01:41:30.748617401 +0000 UTC m=+0.276313466 container start 439690d6705d549ace3feecc7810e4c8d824c4d6ca2245cee721efd20dadbe9a (image=quay.io/ceph/ceph:v18, name=thirsty_elgamal, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3)
Oct 11 01:41:30 compute-0 podman[192637]: 2025-10-11 01:41:30.753617076 +0000 UTC m=+0.281313241 container attach 439690d6705d549ace3feecc7810e4c8d824c4d6ca2245cee721efd20dadbe9a (image=quay.io/ceph/ceph:v18, name=thirsty_elgamal, org.label-schema.schema-version=1.0, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.vendor=CentOS)
Oct 11 01:41:31 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:31.122+0000 7f1dc5d73140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member
Oct 11 01:41:31 compute-0 ceph-mgr[192233]: mgr[py] Module telemetry has missing NOTIFY_TYPES member
Oct 11 01:41:31 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'test_orchestrator'
Oct 11 01:41:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status", "format": "json-pretty"} v 0) v1
Oct 11 01:41:31 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2410608061' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]: 
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]: {
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     "fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     "health": {
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "status": "HEALTH_OK",
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "checks": {},
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "mutes": []
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     },
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     "election_epoch": 5,
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     "quorum": [
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         0
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     ],
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     "quorum_names": [
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "compute-0"
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     ],
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     "quorum_age": 21,
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     "monmap": {
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "epoch": 1,
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "min_mon_release_name": "reef",
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "num_mons": 1
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     },
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     "osdmap": {
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "epoch": 1,
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "num_osds": 0,
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "num_up_osds": 0,
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "osd_up_since": 0,
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "num_in_osds": 0,
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "osd_in_since": 0,
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "num_remapped_pgs": 0
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     },
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     "pgmap": {
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "pgs_by_state": [],
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "num_pgs": 0,
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "num_pools": 0,
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "num_objects": 0,
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "data_bytes": 0,
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "bytes_used": 0,
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "bytes_avail": 0,
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "bytes_total": 0
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     },
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     "fsmap": {
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "epoch": 1,
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "by_rank": [],
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "up:standby": 0
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     },
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     "mgrmap": {
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "available": false,
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "num_standbys": 0,
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "modules": [
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:             "iostat",
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:             "nfs",
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:             "restful"
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         ],
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "services": {}
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     },
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     "servicemap": {
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "epoch": 1,
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "modified": "2025-10-11T01:41:05.449924+0000",
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:         "services": {}
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     },
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]:     "progress_events": {}
Oct 11 01:41:31 compute-0 thirsty_elgamal[192652]: }
Oct 11 01:41:31 compute-0 systemd[1]: libpod-439690d6705d549ace3feecc7810e4c8d824c4d6ca2245cee721efd20dadbe9a.scope: Deactivated successfully.
Oct 11 01:41:31 compute-0 podman[192637]: 2025-10-11 01:41:31.281011662 +0000 UTC m=+0.808707767 container died 439690d6705d549ace3feecc7810e4c8d824c4d6ca2245cee721efd20dadbe9a (image=quay.io/ceph/ceph:v18, name=thirsty_elgamal, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:41:31 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2410608061' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 01:41:31 compute-0 systemd[1]: var-lib-containers-storage-overlay-7bfd29336789d1b1b6ec3a6a59dc0df891155b9358b4c0f32160881eeb5d56a5-merged.mount: Deactivated successfully.
Oct 11 01:41:31 compute-0 podman[192637]: 2025-10-11 01:41:31.37703567 +0000 UTC m=+0.904731745 container remove 439690d6705d549ace3feecc7810e4c8d824c4d6ca2245cee721efd20dadbe9a (image=quay.io/ceph/ceph:v18, name=thirsty_elgamal, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:41:31 compute-0 systemd[1]: libpod-conmon-439690d6705d549ace3feecc7810e4c8d824c4d6ca2245cee721efd20dadbe9a.scope: Deactivated successfully.
Oct 11 01:41:31 compute-0 openstack_network_exporter[159265]: ERROR   01:41:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:41:31 compute-0 openstack_network_exporter[159265]: ERROR   01:41:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:41:31 compute-0 openstack_network_exporter[159265]: ERROR   01:41:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:41:31 compute-0 openstack_network_exporter[159265]: ERROR   01:41:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:41:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:41:31 compute-0 openstack_network_exporter[159265]: ERROR   01:41:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:41:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:41:31 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:31.821+0000 7f1dc5d73140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member
Oct 11 01:41:31 compute-0 ceph-mgr[192233]: mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member
Oct 11 01:41:31 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'volumes'
Oct 11 01:41:32 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:32.526+0000 7f1dc5d73140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: mgr[py] Module volumes has missing NOTIFY_TYPES member
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'zabbix'
Oct 11 01:41:32 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:32.762+0000 7f1dc5d73140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: mgr[py] Module zabbix has missing NOTIFY_TYPES member
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: ms_deliver_dispatch: unhandled message 0x55f9d13df1e0 mon_map magic: 0 v1 from mon.0 v2:192.168.122.100:3300/0
Oct 11 01:41:32 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : Activating manager daemon compute-0.bzgmgr
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: mgr handle_mgr_map Activating!
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: mgr handle_mgr_map I am now activating
Oct 11 01:41:32 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : mgrmap e2: compute-0.bzgmgr(active, starting, since 0.0350446s)
Oct 11 01:41:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mds metadata"} v 0) v1
Oct 11 01:41:32 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mds metadata"}]: dispatch
Oct 11 01:41:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).mds e1 all = 1
Oct 11 01:41:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata"} v 0) v1
Oct 11 01:41:32 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata"}]: dispatch
Oct 11 01:41:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon metadata"} v 0) v1
Oct 11 01:41:32 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mon metadata"}]: dispatch
Oct 11 01:41:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon metadata", "id": "compute-0"} v 0) v1
Oct 11 01:41:32 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mon metadata", "id": "compute-0"}]: dispatch
Oct 11 01:41:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr metadata", "who": "compute-0.bzgmgr", "id": "compute-0.bzgmgr"} v 0) v1
Oct 11 01:41:32 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mgr metadata", "who": "compute-0.bzgmgr", "id": "compute-0.bzgmgr"}]: dispatch
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [balancer DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:32 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : Manager daemon compute-0.bzgmgr is now available
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: balancer
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [crash DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: crash
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [balancer INFO root] Starting
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [devicehealth DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: devicehealth
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:41:32
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [balancer INFO root] No pools available
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [devicehealth INFO root] Starting
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [iostat DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: iostat
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [nfs DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: nfs
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [orchestrator DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: orchestrator
Oct 11 01:41:32 compute-0 ceph-mon[191930]: Activating manager daemon compute-0.bzgmgr
Oct 11 01:41:32 compute-0 ceph-mon[191930]: mgrmap e2: compute-0.bzgmgr(active, starting, since 0.0350446s)
Oct 11 01:41:32 compute-0 ceph-mon[191930]: from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mds metadata"}]: dispatch
Oct 11 01:41:32 compute-0 ceph-mon[191930]: from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata"}]: dispatch
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [pg_autoscaler DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: pg_autoscaler
Oct 11 01:41:32 compute-0 ceph-mon[191930]: from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mon metadata"}]: dispatch
Oct 11 01:41:32 compute-0 ceph-mon[191930]: from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mon metadata", "id": "compute-0"}]: dispatch
Oct 11 01:41:32 compute-0 ceph-mon[191930]: from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mgr metadata", "who": "compute-0.bzgmgr", "id": "compute-0.bzgmgr"}]: dispatch
Oct 11 01:41:32 compute-0 ceph-mon[191930]: Manager daemon compute-0.bzgmgr is now available
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [progress DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: progress
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [rbd_support DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [progress INFO root] Loading...
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [progress INFO root] No stored events to load
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [progress INFO root] Loaded [] historic events
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [progress INFO root] Loaded OSDMap, ready.
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [rbd_support INFO root] recovery thread starting
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [rbd_support INFO root] starting setup
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: rbd_support
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [restful DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: restful
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [status DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: status
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [restful INFO root] server_addr: :: server_port: 8003
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [restful WARNING root] server not running: no certificate configured
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [telemetry DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: telemetry
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [volumes DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/compute-0.bzgmgr/mirror_snapshot_schedule"} v 0) v1
Oct 11 01:41:32 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/compute-0.bzgmgr/mirror_snapshot_schedule"}]: dispatch
Oct 11 01:41:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/telemetry/report_id}] v 0) v1
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: starting
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [rbd_support INFO root] PerfHandler: starting
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TaskHandler: starting
Oct 11 01:41:32 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:41:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/compute-0.bzgmgr/trash_purge_schedule"} v 0) v1
Oct 11 01:41:32 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/compute-0.bzgmgr/trash_purge_schedule"}]: dispatch
Oct 11 01:41:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/telemetry/salt}] v 0) v1
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: starting
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: [rbd_support INFO root] setup complete
Oct 11 01:41:32 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:41:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/telemetry/collection}] v 0) v1
Oct 11 01:41:32 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:41:32 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: volumes
Oct 11 01:41:33 compute-0 podman[192768]: 2025-10-11 01:41:33.529149544 +0000 UTC m=+0.096748546 container create fd4c3c47355a0d412c0fa4602c1f6bc7744d43ad6b3d99e503f3cca8489c8111 (image=quay.io/ceph/ceph:v18, name=clever_rosalind, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, ceph=True, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:41:33 compute-0 podman[192768]: 2025-10-11 01:41:33.491878909 +0000 UTC m=+0.059477941 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:33 compute-0 systemd[1]: Started libpod-conmon-fd4c3c47355a0d412c0fa4602c1f6bc7744d43ad6b3d99e503f3cca8489c8111.scope.
Oct 11 01:41:33 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c1ca4974d2ac5951cb77c4c4edbd2b39ca5dce0cbf3bf003ca3101d79c89983e/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c1ca4974d2ac5951cb77c4c4edbd2b39ca5dce0cbf3bf003ca3101d79c89983e/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c1ca4974d2ac5951cb77c4c4edbd2b39ca5dce0cbf3bf003ca3101d79c89983e/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:33 compute-0 podman[192768]: 2025-10-11 01:41:33.696000059 +0000 UTC m=+0.263599091 container init fd4c3c47355a0d412c0fa4602c1f6bc7744d43ad6b3d99e503f3cca8489c8111 (image=quay.io/ceph/ceph:v18, name=clever_rosalind, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True)
Oct 11 01:41:33 compute-0 podman[192768]: 2025-10-11 01:41:33.724721199 +0000 UTC m=+0.292320161 container start fd4c3c47355a0d412c0fa4602c1f6bc7744d43ad6b3d99e503f3cca8489c8111 (image=quay.io/ceph/ceph:v18, name=clever_rosalind, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS)
Oct 11 01:41:33 compute-0 podman[192768]: 2025-10-11 01:41:33.731627725 +0000 UTC m=+0.299226757 container attach fd4c3c47355a0d412c0fa4602c1f6bc7744d43ad6b3d99e503f3cca8489c8111 (image=quay.io/ceph/ceph:v18, name=clever_rosalind, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Oct 11 01:41:33 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : mgrmap e3: compute-0.bzgmgr(active, since 1.05487s)
Oct 11 01:41:33 compute-0 ceph-mon[191930]: from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/compute-0.bzgmgr/mirror_snapshot_schedule"}]: dispatch
Oct 11 01:41:33 compute-0 ceph-mon[191930]: from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:41:33 compute-0 ceph-mon[191930]: from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/compute-0.bzgmgr/trash_purge_schedule"}]: dispatch
Oct 11 01:41:33 compute-0 ceph-mon[191930]: from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:41:33 compute-0 ceph-mon[191930]: from='mgr.14102 192.168.122.100:0/1104441913' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:41:33 compute-0 ceph-mon[191930]: mgrmap e3: compute-0.bzgmgr(active, since 1.05487s)
Oct 11 01:41:34 compute-0 podman[192789]: 2025-10-11 01:41:34.248084667 +0000 UTC m=+0.140515075 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, config_id=edpm)
Oct 11 01:41:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status", "format": "json-pretty"} v 0) v1
Oct 11 01:41:34 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3130688524' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 01:41:34 compute-0 clever_rosalind[192783]: 
Oct 11 01:41:34 compute-0 clever_rosalind[192783]: {
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     "fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     "health": {
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "status": "HEALTH_OK",
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "checks": {},
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "mutes": []
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     },
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     "election_epoch": 5,
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     "quorum": [
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         0
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     ],
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     "quorum_names": [
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "compute-0"
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     ],
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     "quorum_age": 24,
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     "monmap": {
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "epoch": 1,
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "min_mon_release_name": "reef",
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "num_mons": 1
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     },
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     "osdmap": {
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "epoch": 1,
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "num_osds": 0,
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "num_up_osds": 0,
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "osd_up_since": 0,
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "num_in_osds": 0,
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "osd_in_since": 0,
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "num_remapped_pgs": 0
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     },
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     "pgmap": {
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "pgs_by_state": [],
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "num_pgs": 0,
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "num_pools": 0,
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "num_objects": 0,
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "data_bytes": 0,
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "bytes_used": 0,
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "bytes_avail": 0,
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "bytes_total": 0
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     },
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     "fsmap": {
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "epoch": 1,
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "by_rank": [],
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "up:standby": 0
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     },
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     "mgrmap": {
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "available": true,
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "num_standbys": 0,
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "modules": [
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:             "iostat",
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:             "nfs",
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:             "restful"
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         ],
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "services": {}
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     },
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     "servicemap": {
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "epoch": 1,
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "modified": "2025-10-11T01:41:05.449924+0000",
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:         "services": {}
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     },
Oct 11 01:41:34 compute-0 clever_rosalind[192783]:     "progress_events": {}
Oct 11 01:41:34 compute-0 clever_rosalind[192783]: }
Oct 11 01:41:34 compute-0 systemd[1]: libpod-fd4c3c47355a0d412c0fa4602c1f6bc7744d43ad6b3d99e503f3cca8489c8111.scope: Deactivated successfully.
Oct 11 01:41:34 compute-0 podman[192768]: 2025-10-11 01:41:34.410132822 +0000 UTC m=+0.977731814 container died fd4c3c47355a0d412c0fa4602c1f6bc7744d43ad6b3d99e503f3cca8489c8111 (image=quay.io/ceph/ceph:v18, name=clever_rosalind, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:41:34 compute-0 systemd[1]: var-lib-containers-storage-overlay-c1ca4974d2ac5951cb77c4c4edbd2b39ca5dce0cbf3bf003ca3101d79c89983e-merged.mount: Deactivated successfully.
Oct 11 01:41:34 compute-0 podman[192768]: 2025-10-11 01:41:34.498149737 +0000 UTC m=+1.065748729 container remove fd4c3c47355a0d412c0fa4602c1f6bc7744d43ad6b3d99e503f3cca8489c8111 (image=quay.io/ceph/ceph:v18, name=clever_rosalind, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:41:34 compute-0 systemd[1]: libpod-conmon-fd4c3c47355a0d412c0fa4602c1f6bc7744d43ad6b3d99e503f3cca8489c8111.scope: Deactivated successfully.
Oct 11 01:41:34 compute-0 podman[192839]: 2025-10-11 01:41:34.613706639 +0000 UTC m=+0.070298326 container create 93dff963866ff876fbbd402c9612711e37a2b00256b8ce00326ea16a316b608a (image=quay.io/ceph/ceph:v18, name=boring_liskov, org.label-schema.build-date=20250507, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 01:41:34 compute-0 systemd[1]: Started libpod-conmon-93dff963866ff876fbbd402c9612711e37a2b00256b8ce00326ea16a316b608a.scope.
Oct 11 01:41:34 compute-0 podman[192839]: 2025-10-11 01:41:34.593897173 +0000 UTC m=+0.050488880 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:34 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1d28fce4753aa86b3e69634081f4262bb864dcf6096684e590dcae902c7d1c57/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1d28fce4753aa86b3e69634081f4262bb864dcf6096684e590dcae902c7d1c57/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1d28fce4753aa86b3e69634081f4262bb864dcf6096684e590dcae902c7d1c57/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1d28fce4753aa86b3e69634081f4262bb864dcf6096684e590dcae902c7d1c57/merged/var/lib/ceph/user.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:34 compute-0 podman[192839]: 2025-10-11 01:41:34.770456855 +0000 UTC m=+0.227048572 container init 93dff963866ff876fbbd402c9612711e37a2b00256b8ce00326ea16a316b608a (image=quay.io/ceph/ceph:v18, name=boring_liskov, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:41:34 compute-0 podman[192839]: 2025-10-11 01:41:34.794338753 +0000 UTC m=+0.250930460 container start 93dff963866ff876fbbd402c9612711e37a2b00256b8ce00326ea16a316b608a (image=quay.io/ceph/ceph:v18, name=boring_liskov, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:41:34 compute-0 podman[192839]: 2025-10-11 01:41:34.800483318 +0000 UTC m=+0.257075095 container attach 93dff963866ff876fbbd402c9612711e37a2b00256b8ce00326ea16a316b608a (image=quay.io/ceph/ceph:v18, name=boring_liskov, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2)
Oct 11 01:41:34 compute-0 ceph-mgr[192233]: mgr.server send_report Not sending PG status to monitor yet, waiting for OSDs
Oct 11 01:41:34 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3130688524' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 01:41:34 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : mgrmap e4: compute-0.bzgmgr(active, since 2s)
Oct 11 01:41:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config assimilate-conf"} v 0) v1
Oct 11 01:41:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/810983984' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch
Oct 11 01:41:35 compute-0 systemd[1]: libpod-93dff963866ff876fbbd402c9612711e37a2b00256b8ce00326ea16a316b608a.scope: Deactivated successfully.
Oct 11 01:41:35 compute-0 conmon[192855]: conmon 93dff963866ff876fbbd <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-93dff963866ff876fbbd402c9612711e37a2b00256b8ce00326ea16a316b608a.scope/container/memory.events
Oct 11 01:41:35 compute-0 podman[192839]: 2025-10-11 01:41:35.394844695 +0000 UTC m=+0.851436422 container died 93dff963866ff876fbbd402c9612711e37a2b00256b8ce00326ea16a316b608a (image=quay.io/ceph/ceph:v18, name=boring_liskov, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:41:35 compute-0 systemd[1]: var-lib-containers-storage-overlay-1d28fce4753aa86b3e69634081f4262bb864dcf6096684e590dcae902c7d1c57-merged.mount: Deactivated successfully.
Oct 11 01:41:35 compute-0 podman[192839]: 2025-10-11 01:41:35.463271462 +0000 UTC m=+0.919863149 container remove 93dff963866ff876fbbd402c9612711e37a2b00256b8ce00326ea16a316b608a (image=quay.io/ceph/ceph:v18, name=boring_liskov, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:41:35 compute-0 systemd[1]: libpod-conmon-93dff963866ff876fbbd402c9612711e37a2b00256b8ce00326ea16a316b608a.scope: Deactivated successfully.
Oct 11 01:41:35 compute-0 podman[192891]: 2025-10-11 01:41:35.556392261 +0000 UTC m=+0.061395243 container create 1a9d429e14a7a6f73f97cd63c7292f54903aaa47c0197a172764d29cb746740f (image=quay.io/ceph/ceph:v18, name=priceless_carver, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, io.buildah.version=1.39.3)
Oct 11 01:41:35 compute-0 podman[192891]: 2025-10-11 01:41:35.528994286 +0000 UTC m=+0.033997348 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:35 compute-0 systemd[1]: Started libpod-conmon-1a9d429e14a7a6f73f97cd63c7292f54903aaa47c0197a172764d29cb746740f.scope.
Oct 11 01:41:35 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d6d92e94da8f7d671119ebdc2427883e17c4358d388b88551b2c2cbef13f4884/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d6d92e94da8f7d671119ebdc2427883e17c4358d388b88551b2c2cbef13f4884/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d6d92e94da8f7d671119ebdc2427883e17c4358d388b88551b2c2cbef13f4884/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:35 compute-0 podman[192891]: 2025-10-11 01:41:35.717393924 +0000 UTC m=+0.222396936 container init 1a9d429e14a7a6f73f97cd63c7292f54903aaa47c0197a172764d29cb746740f (image=quay.io/ceph/ceph:v18, name=priceless_carver, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, io.buildah.version=1.39.3)
Oct 11 01:41:35 compute-0 podman[192891]: 2025-10-11 01:41:35.735794938 +0000 UTC m=+0.240797910 container start 1a9d429e14a7a6f73f97cd63c7292f54903aaa47c0197a172764d29cb746740f (image=quay.io/ceph/ceph:v18, name=priceless_carver, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:41:35 compute-0 podman[192891]: 2025-10-11 01:41:35.740662532 +0000 UTC m=+0.245665554 container attach 1a9d429e14a7a6f73f97cd63c7292f54903aaa47c0197a172764d29cb746740f (image=quay.io/ceph/ceph:v18, name=priceless_carver, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:41:35 compute-0 ceph-mon[191930]: mgrmap e4: compute-0.bzgmgr(active, since 2s)
Oct 11 01:41:35 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/810983984' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch
Oct 11 01:41:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr module enable", "module": "cephadm"} v 0) v1
Oct 11 01:41:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/273638870' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch
Oct 11 01:41:36 compute-0 ceph-mgr[192233]: mgr.server send_report Not sending PG status to monitor yet, waiting for OSDs
Oct 11 01:41:36 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/273638870' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch
Oct 11 01:41:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/273638870' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished
Oct 11 01:41:36 compute-0 ceph-mgr[192233]: mgr handle_mgr_map respawning because set of enabled modules changed!
Oct 11 01:41:36 compute-0 ceph-mgr[192233]: mgr respawn  e: '/usr/bin/ceph-mgr'
Oct 11 01:41:36 compute-0 ceph-mgr[192233]: mgr respawn  0: '/usr/bin/ceph-mgr'
Oct 11 01:41:36 compute-0 ceph-mgr[192233]: mgr respawn  1: '-n'
Oct 11 01:41:36 compute-0 ceph-mgr[192233]: mgr respawn  2: 'mgr.compute-0.bzgmgr'
Oct 11 01:41:36 compute-0 ceph-mgr[192233]: mgr respawn  3: '-f'
Oct 11 01:41:36 compute-0 ceph-mgr[192233]: mgr respawn  4: '--setuser'
Oct 11 01:41:36 compute-0 ceph-mgr[192233]: mgr respawn  5: 'ceph'
Oct 11 01:41:36 compute-0 ceph-mgr[192233]: mgr respawn  6: '--setgroup'
Oct 11 01:41:36 compute-0 ceph-mgr[192233]: mgr respawn  7: 'ceph'
Oct 11 01:41:36 compute-0 ceph-mgr[192233]: mgr respawn  8: '--default-log-to-file=false'
Oct 11 01:41:36 compute-0 ceph-mgr[192233]: mgr respawn  9: '--default-log-to-journald=true'
Oct 11 01:41:36 compute-0 ceph-mgr[192233]: mgr respawn  10: '--default-log-to-stderr=false'
Oct 11 01:41:36 compute-0 ceph-mgr[192233]: mgr respawn respawning with exe /usr/bin/ceph-mgr
Oct 11 01:41:36 compute-0 ceph-mgr[192233]: mgr respawn  exe_path /proc/self/exe
Oct 11 01:41:36 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : mgrmap e5: compute-0.bzgmgr(active, since 4s)
Oct 11 01:41:36 compute-0 systemd[1]: libpod-1a9d429e14a7a6f73f97cd63c7292f54903aaa47c0197a172764d29cb746740f.scope: Deactivated successfully.
Oct 11 01:41:36 compute-0 podman[192891]: 2025-10-11 01:41:36.967956066 +0000 UTC m=+1.472959088 container died 1a9d429e14a7a6f73f97cd63c7292f54903aaa47c0197a172764d29cb746740f (image=quay.io/ceph/ceph:v18, name=priceless_carver, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=reef)
Oct 11 01:41:37 compute-0 systemd[1]: var-lib-containers-storage-overlay-d6d92e94da8f7d671119ebdc2427883e17c4358d388b88551b2c2cbef13f4884-merged.mount: Deactivated successfully.
Oct 11 01:41:37 compute-0 podman[192891]: 2025-10-11 01:41:37.059581196 +0000 UTC m=+1.564584178 container remove 1a9d429e14a7a6f73f97cd63c7292f54903aaa47c0197a172764d29cb746740f (image=quay.io/ceph/ceph:v18, name=priceless_carver, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:41:37 compute-0 systemd[1]: libpod-conmon-1a9d429e14a7a6f73f97cd63c7292f54903aaa47c0197a172764d29cb746740f.scope: Deactivated successfully.
Oct 11 01:41:37 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: ignoring --setuser ceph since I am not root
Oct 11 01:41:37 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: ignoring --setgroup ceph since I am not root
Oct 11 01:41:37 compute-0 ceph-mgr[192233]: ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable), process ceph-mgr, pid 2
Oct 11 01:41:37 compute-0 ceph-mgr[192233]: pidfile_write: ignore empty --pid-file
Oct 11 01:41:37 compute-0 podman[192944]: 2025-10-11 01:41:37.16794989 +0000 UTC m=+0.071906913 container create 6fbeb6fbcb95e9efc785dd04dd27d2763325db02c448b545df346372abf997ca (image=quay.io/ceph/ceph:v18, name=modest_engelbart, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef)
Oct 11 01:41:37 compute-0 systemd[1]: Started libpod-conmon-6fbeb6fbcb95e9efc785dd04dd27d2763325db02c448b545df346372abf997ca.scope.
Oct 11 01:41:37 compute-0 podman[192944]: 2025-10-11 01:41:37.143983766 +0000 UTC m=+0.047940819 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:37 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'alerts'
Oct 11 01:41:37 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9008e1462fb9b345200df0e86a84e02cfa31cb25b61ed11009623989368fb3b3/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9008e1462fb9b345200df0e86a84e02cfa31cb25b61ed11009623989368fb3b3/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9008e1462fb9b345200df0e86a84e02cfa31cb25b61ed11009623989368fb3b3/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:37 compute-0 podman[192944]: 2025-10-11 01:41:37.334089199 +0000 UTC m=+0.238046282 container init 6fbeb6fbcb95e9efc785dd04dd27d2763325db02c448b545df346372abf997ca (image=quay.io/ceph/ceph:v18, name=modest_engelbart, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:41:37 compute-0 podman[192944]: 2025-10-11 01:41:37.355735479 +0000 UTC m=+0.259692502 container start 6fbeb6fbcb95e9efc785dd04dd27d2763325db02c448b545df346372abf997ca (image=quay.io/ceph/ceph:v18, name=modest_engelbart, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 01:41:37 compute-0 podman[192944]: 2025-10-11 01:41:37.362190149 +0000 UTC m=+0.266147202 container attach 6fbeb6fbcb95e9efc785dd04dd27d2763325db02c448b545df346372abf997ca (image=quay.io/ceph/ceph:v18, name=modest_engelbart, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.build-date=20250507)
Oct 11 01:41:37 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:37.577+0000 7fe8f41b9140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member
Oct 11 01:41:37 compute-0 ceph-mgr[192233]: mgr[py] Module alerts has missing NOTIFY_TYPES member
Oct 11 01:41:37 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'balancer'
Oct 11 01:41:37 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:37.839+0000 7fe8f41b9140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member
Oct 11 01:41:37 compute-0 ceph-mgr[192233]: mgr[py] Module balancer has missing NOTIFY_TYPES member
Oct 11 01:41:37 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'cephadm'
Oct 11 01:41:37 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/273638870' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished
Oct 11 01:41:37 compute-0 ceph-mon[191930]: mgrmap e5: compute-0.bzgmgr(active, since 4s)
Oct 11 01:41:37 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr stat"} v 0) v1
Oct 11 01:41:37 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3957849042' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch
Oct 11 01:41:37 compute-0 modest_engelbart[192984]: {
Oct 11 01:41:37 compute-0 modest_engelbart[192984]:     "epoch": 5,
Oct 11 01:41:37 compute-0 modest_engelbart[192984]:     "available": true,
Oct 11 01:41:37 compute-0 modest_engelbart[192984]:     "active_name": "compute-0.bzgmgr",
Oct 11 01:41:37 compute-0 modest_engelbart[192984]:     "num_standby": 0
Oct 11 01:41:37 compute-0 modest_engelbart[192984]: }
Oct 11 01:41:38 compute-0 systemd[1]: libpod-6fbeb6fbcb95e9efc785dd04dd27d2763325db02c448b545df346372abf997ca.scope: Deactivated successfully.
Oct 11 01:41:38 compute-0 podman[192944]: 2025-10-11 01:41:38.025334093 +0000 UTC m=+0.929291146 container died 6fbeb6fbcb95e9efc785dd04dd27d2763325db02c448b545df346372abf997ca (image=quay.io/ceph/ceph:v18, name=modest_engelbart, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:41:38 compute-0 systemd[1]: var-lib-containers-storage-overlay-9008e1462fb9b345200df0e86a84e02cfa31cb25b61ed11009623989368fb3b3-merged.mount: Deactivated successfully.
Oct 11 01:41:38 compute-0 podman[192944]: 2025-10-11 01:41:38.095957903 +0000 UTC m=+0.999914916 container remove 6fbeb6fbcb95e9efc785dd04dd27d2763325db02c448b545df346372abf997ca (image=quay.io/ceph/ceph:v18, name=modest_engelbart, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:41:38 compute-0 systemd[1]: libpod-conmon-6fbeb6fbcb95e9efc785dd04dd27d2763325db02c448b545df346372abf997ca.scope: Deactivated successfully.
Oct 11 01:41:38 compute-0 podman[193020]: 2025-10-11 01:41:38.213062227 +0000 UTC m=+0.075952343 container create da8941dc24124359958b3d112eb580655427a9bbcd5878508effa95cfb585287 (image=quay.io/ceph/ceph:v18, name=thirsty_liskov, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:41:38 compute-0 podman[193020]: 2025-10-11 01:41:38.182746442 +0000 UTC m=+0.045636598 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:38 compute-0 systemd[1]: Started libpod-conmon-da8941dc24124359958b3d112eb580655427a9bbcd5878508effa95cfb585287.scope.
Oct 11 01:41:38 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ef1b002ea24da71880a37cc1a173f0e5dc44f40f7329e62d389815759224d92e/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ef1b002ea24da71880a37cc1a173f0e5dc44f40f7329e62d389815759224d92e/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ef1b002ea24da71880a37cc1a173f0e5dc44f40f7329e62d389815759224d92e/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:38 compute-0 podman[193020]: 2025-10-11 01:41:38.381462135 +0000 UTC m=+0.244352241 container init da8941dc24124359958b3d112eb580655427a9bbcd5878508effa95cfb585287 (image=quay.io/ceph/ceph:v18, name=thirsty_liskov, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:41:38 compute-0 podman[193020]: 2025-10-11 01:41:38.397570347 +0000 UTC m=+0.260460433 container start da8941dc24124359958b3d112eb580655427a9bbcd5878508effa95cfb585287 (image=quay.io/ceph/ceph:v18, name=thirsty_liskov, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, ceph=True, org.label-schema.vendor=CentOS)
Oct 11 01:41:38 compute-0 podman[193020]: 2025-10-11 01:41:38.41101842 +0000 UTC m=+0.273908506 container attach da8941dc24124359958b3d112eb580655427a9bbcd5878508effa95cfb585287 (image=quay.io/ceph/ceph:v18, name=thirsty_liskov, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507)
Oct 11 01:41:38 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3957849042' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch
Oct 11 01:41:39 compute-0 podman[193059]: 2025-10-11 01:41:39.241842873 +0000 UTC m=+0.127677731 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:41:39 compute-0 podman[193060]: 2025-10-11 01:41:39.282184841 +0000 UTC m=+0.165285422 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, build-date=2024-09-18T21:23:30, release=1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., name=ubi9, vcs-type=git, version=9.4, config_id=edpm, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.29.0, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.expose-services=, maintainer=Red Hat, Inc., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, managed_by=edpm_ansible, com.redhat.component=ubi9-container, release-0.7.12=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, distribution-scope=public, container_name=kepler, vendor=Red Hat, Inc.)
Oct 11 01:41:39 compute-0 podman[193099]: 2025-10-11 01:41:39.452875969 +0000 UTC m=+0.162031015 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, tcib_managed=true, container_name=ovn_controller, io.buildah.version=1.41.3, managed_by=edpm_ansible)
Oct 11 01:41:39 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'crash'
Oct 11 01:41:40 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:40.230+0000 7fe8f41b9140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member
Oct 11 01:41:40 compute-0 ceph-mgr[192233]: mgr[py] Module crash has missing NOTIFY_TYPES member
Oct 11 01:41:40 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'dashboard'
Oct 11 01:41:41 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'devicehealth'
Oct 11 01:41:41 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:41.846+0000 7fe8f41b9140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member
Oct 11 01:41:41 compute-0 ceph-mgr[192233]: mgr[py] Module devicehealth has missing NOTIFY_TYPES member
Oct 11 01:41:41 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'diskprediction_local'
Oct 11 01:41:42 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode.
Oct 11 01:41:42 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve.
Oct 11 01:41:42 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]:   from numpy import show_config as show_numpy_config
Oct 11 01:41:42 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:42.376+0000 7fe8f41b9140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member
Oct 11 01:41:42 compute-0 ceph-mgr[192233]: mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member
Oct 11 01:41:42 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'influx'
Oct 11 01:41:42 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:42.619+0000 7fe8f41b9140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member
Oct 11 01:41:42 compute-0 ceph-mgr[192233]: mgr[py] Module influx has missing NOTIFY_TYPES member
Oct 11 01:41:42 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'insights'
Oct 11 01:41:42 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'iostat'
Oct 11 01:41:43 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:43.097+0000 7fe8f41b9140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member
Oct 11 01:41:43 compute-0 ceph-mgr[192233]: mgr[py] Module iostat has missing NOTIFY_TYPES member
Oct 11 01:41:43 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'k8sevents'
Oct 11 01:41:44 compute-0 podman[193134]: 2025-10-11 01:41:44.241074643 +0000 UTC m=+0.130936848 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, org.label-schema.vendor=CentOS, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_id=edpm)
Oct 11 01:41:44 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'localpool'
Oct 11 01:41:45 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'mds_autoscaler'
Oct 11 01:41:45 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'mirroring'
Oct 11 01:41:45 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'nfs'
Oct 11 01:41:46 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:46.596+0000 7fe8f41b9140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member
Oct 11 01:41:46 compute-0 ceph-mgr[192233]: mgr[py] Module nfs has missing NOTIFY_TYPES member
Oct 11 01:41:46 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'orchestrator'
Oct 11 01:41:47 compute-0 unix_chkpwd[193156]: password check failed for user (root)
Oct 11 01:41:47 compute-0 sshd-session[193154]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=78.128.112.74  user=root
Oct 11 01:41:47 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:47.276+0000 7fe8f41b9140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member
Oct 11 01:41:47 compute-0 ceph-mgr[192233]: mgr[py] Module orchestrator has missing NOTIFY_TYPES member
Oct 11 01:41:47 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'osd_perf_query'
Oct 11 01:41:47 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:47.556+0000 7fe8f41b9140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member
Oct 11 01:41:47 compute-0 ceph-mgr[192233]: mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member
Oct 11 01:41:47 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'osd_support'
Oct 11 01:41:47 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:47.786+0000 7fe8f41b9140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member
Oct 11 01:41:47 compute-0 ceph-mgr[192233]: mgr[py] Module osd_support has missing NOTIFY_TYPES member
Oct 11 01:41:47 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'pg_autoscaler'
Oct 11 01:41:48 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:48.059+0000 7fe8f41b9140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member
Oct 11 01:41:48 compute-0 ceph-mgr[192233]: mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member
Oct 11 01:41:48 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'progress'
Oct 11 01:41:48 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:48.296+0000 7fe8f41b9140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member
Oct 11 01:41:48 compute-0 ceph-mgr[192233]: mgr[py] Module progress has missing NOTIFY_TYPES member
Oct 11 01:41:48 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'prometheus'
Oct 11 01:41:49 compute-0 sshd-session[193154]: Failed password for root from 78.128.112.74 port 36620 ssh2
Oct 11 01:41:49 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:49.313+0000 7fe8f41b9140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member
Oct 11 01:41:49 compute-0 ceph-mgr[192233]: mgr[py] Module prometheus has missing NOTIFY_TYPES member
Oct 11 01:41:49 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'rbd_support'
Oct 11 01:41:49 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:49.648+0000 7fe8f41b9140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member
Oct 11 01:41:49 compute-0 ceph-mgr[192233]: mgr[py] Module rbd_support has missing NOTIFY_TYPES member
Oct 11 01:41:49 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'restful'
Oct 11 01:41:50 compute-0 sshd-session[193154]: Connection closed by authenticating user root 78.128.112.74 port 36620 [preauth]
Oct 11 01:41:50 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'rgw'
Oct 11 01:41:51 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:51.057+0000 7fe8f41b9140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member
Oct 11 01:41:51 compute-0 ceph-mgr[192233]: mgr[py] Module rgw has missing NOTIFY_TYPES member
Oct 11 01:41:51 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'rook'
Oct 11 01:41:53 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:53.064+0000 7fe8f41b9140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member
Oct 11 01:41:53 compute-0 ceph-mgr[192233]: mgr[py] Module rook has missing NOTIFY_TYPES member
Oct 11 01:41:53 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'selftest'
Oct 11 01:41:53 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:53.293+0000 7fe8f41b9140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member
Oct 11 01:41:53 compute-0 ceph-mgr[192233]: mgr[py] Module selftest has missing NOTIFY_TYPES member
Oct 11 01:41:53 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'snap_schedule'
Oct 11 01:41:53 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:53.533+0000 7fe8f41b9140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member
Oct 11 01:41:53 compute-0 ceph-mgr[192233]: mgr[py] Module snap_schedule has missing NOTIFY_TYPES member
Oct 11 01:41:53 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'stats'
Oct 11 01:41:53 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'status'
Oct 11 01:41:54 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:54.013+0000 7fe8f41b9140 -1 mgr[py] Module status has missing NOTIFY_TYPES member
Oct 11 01:41:54 compute-0 ceph-mgr[192233]: mgr[py] Module status has missing NOTIFY_TYPES member
Oct 11 01:41:54 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'telegraf'
Oct 11 01:41:54 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:54.235+0000 7fe8f41b9140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member
Oct 11 01:41:54 compute-0 ceph-mgr[192233]: mgr[py] Module telegraf has missing NOTIFY_TYPES member
Oct 11 01:41:54 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'telemetry'
Oct 11 01:41:54 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:54.798+0000 7fe8f41b9140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member
Oct 11 01:41:54 compute-0 ceph-mgr[192233]: mgr[py] Module telemetry has missing NOTIFY_TYPES member
Oct 11 01:41:54 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'test_orchestrator'
Oct 11 01:41:55 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:55.420+0000 7fe8f41b9140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member
Oct 11 01:41:55 compute-0 ceph-mgr[192233]: mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member
Oct 11 01:41:55 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'volumes'
Oct 11 01:41:56 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:56.080+0000 7fe8f41b9140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: mgr[py] Module volumes has missing NOTIFY_TYPES member
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: mgr[py] Loading python module 'zabbix'
Oct 11 01:41:56 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T01:41:56.306+0000 7fe8f41b9140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: mgr[py] Module zabbix has missing NOTIFY_TYPES member
Oct 11 01:41:56 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : Active manager daemon compute-0.bzgmgr restarted
Oct 11 01:41:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e1 do_prune osdmap full prune enabled
Oct 11 01:41:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e1 encode_pending skipping prime_pg_temp; mapping job did not start
Oct 11 01:41:56 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : Activating manager daemon compute-0.bzgmgr
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: ms_deliver_dispatch: unhandled message 0x556660b9d1e0 mon_map magic: 0 v1 from mon.0 v2:192.168.122.100:3300/0
Oct 11 01:41:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e1 _set_cache_ratios kv ratio 0.25 inc ratio 0.375 full ratio 0.375
Oct 11 01:41:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e1 register_cache_with_pcm pcm target: 2147483648 pcm max: 1020054732 pcm min: 134217728 inc_osd_cache size: 1
Oct 11 01:41:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e2 e2: 0 total, 0 up, 0 in
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: mgr handle_mgr_map Activating!
Oct 11 01:41:56 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e2: 0 total, 0 up, 0 in
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: mgr handle_mgr_map I am now activating
Oct 11 01:41:56 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : mgrmap e6: compute-0.bzgmgr(active, starting, since 0.0231789s)
Oct 11 01:41:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon metadata", "id": "compute-0"} v 0) v1
Oct 11 01:41:56 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mon metadata", "id": "compute-0"}]: dispatch
Oct 11 01:41:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr metadata", "who": "compute-0.bzgmgr", "id": "compute-0.bzgmgr"} v 0) v1
Oct 11 01:41:56 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mgr metadata", "who": "compute-0.bzgmgr", "id": "compute-0.bzgmgr"}]: dispatch
Oct 11 01:41:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mds metadata"} v 0) v1
Oct 11 01:41:56 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mds metadata"}]: dispatch
Oct 11 01:41:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).mds e1 all = 1
Oct 11 01:41:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata"} v 0) v1
Oct 11 01:41:56 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata"}]: dispatch
Oct 11 01:41:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon metadata"} v 0) v1
Oct 11 01:41:56 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mon metadata"}]: dispatch
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [balancer DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: balancer
Oct 11 01:41:56 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : Manager daemon compute-0.bzgmgr is now available
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [cephadm DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Starting
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:41:56
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [balancer INFO root] No pools available
Oct 11 01:41:56 compute-0 ceph-mon[191930]: Active manager daemon compute-0.bzgmgr restarted
Oct 11 01:41:56 compute-0 ceph-mon[191930]: Activating manager daemon compute-0.bzgmgr
Oct 11 01:41:56 compute-0 ceph-mon[191930]: osdmap e2: 0 total, 0 up, 0 in
Oct 11 01:41:56 compute-0 ceph-mon[191930]: mgrmap e6: compute-0.bzgmgr(active, starting, since 0.0231789s)
Oct 11 01:41:56 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mon metadata", "id": "compute-0"}]: dispatch
Oct 11 01:41:56 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mgr metadata", "who": "compute-0.bzgmgr", "id": "compute-0.bzgmgr"}]: dispatch
Oct 11 01:41:56 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mds metadata"}]: dispatch
Oct 11 01:41:56 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata"}]: dispatch
Oct 11 01:41:56 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mon metadata"}]: dispatch
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.migrations] Found migration_current of "None". Setting to last migration.
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Found migration_current of "None". Setting to last migration.
Oct 11 01:41:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config set, name=mgr/cephadm/migration_current}] v 0) v1
Oct 11 01:41:56 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:41:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/config_checks}] v 0) v1
Oct 11 01:41:56 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: cephadm
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [crash DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: crash
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [devicehealth DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: devicehealth
Oct 11 01:41:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config dump", "format": "json"} v 0) v1
Oct 11 01:41:56 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [iostat DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: iostat
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [devicehealth INFO root] Starting
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [nfs DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: nfs
Oct 11 01:41:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config dump", "format": "json"} v 0) v1
Oct 11 01:41:56 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [orchestrator DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: orchestrator
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [pg_autoscaler DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: pg_autoscaler
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [progress DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: progress
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [progress INFO root] Loading...
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [progress INFO root] No stored events to load
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [progress INFO root] Loaded [] historic events
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [progress INFO root] Loaded OSDMap, ready.
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [rbd_support DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] recovery thread starting
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] starting setup
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: rbd_support
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [restful DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: restful
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [status DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: status
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [restful INFO root] server_addr: :: server_port: 8003
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [restful WARNING root] server not running: no certificate configured
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [telemetry DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/compute-0.bzgmgr/mirror_snapshot_schedule"} v 0) v1
Oct 11 01:41:56 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/compute-0.bzgmgr/mirror_snapshot_schedule"}]: dispatch
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: telemetry
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [volumes DEBUG root] setting log level based on debug_mgr: INFO (2/5)
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: starting
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] PerfHandler: starting
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TaskHandler: starting
Oct 11 01:41:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/compute-0.bzgmgr/trash_purge_schedule"} v 0) v1
Oct 11 01:41:56 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/compute-0.bzgmgr/trash_purge_schedule"}]: dispatch
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: starting
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] setup complete
Oct 11 01:41:56 compute-0 ceph-mgr[192233]: mgr load Constructed class from module: volumes
Oct 11 01:41:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/cephadm_agent/root/cert}] v 0) v1
Oct 11 01:41:57 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:41:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/cephadm_agent/root/key}] v 0) v1
Oct 11 01:41:57 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:41:57 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : mgrmap e7: compute-0.bzgmgr(active, since 1.04536s)
Oct 11 01:41:57 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14134 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch
Oct 11 01:41:57 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14134 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch
Oct 11 01:41:57 compute-0 thirsty_liskov[193035]: {
Oct 11 01:41:57 compute-0 thirsty_liskov[193035]:     "mgrmap_epoch": 7,
Oct 11 01:41:57 compute-0 thirsty_liskov[193035]:     "initialized": true
Oct 11 01:41:57 compute-0 thirsty_liskov[193035]: }
Oct 11 01:41:57 compute-0 ceph-mon[191930]: Manager daemon compute-0.bzgmgr is now available
Oct 11 01:41:57 compute-0 ceph-mon[191930]: Found migration_current of "None". Setting to last migration.
Oct 11 01:41:57 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:41:57 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:41:57 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch
Oct 11 01:41:57 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch
Oct 11 01:41:57 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/compute-0.bzgmgr/mirror_snapshot_schedule"}]: dispatch
Oct 11 01:41:57 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/compute-0.bzgmgr/trash_purge_schedule"}]: dispatch
Oct 11 01:41:57 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:41:57 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:41:57 compute-0 ceph-mon[191930]: mgrmap e7: compute-0.bzgmgr(active, since 1.04536s)
Oct 11 01:41:57 compute-0 systemd[1]: libpod-da8941dc24124359958b3d112eb580655427a9bbcd5878508effa95cfb585287.scope: Deactivated successfully.
Oct 11 01:41:57 compute-0 podman[193020]: 2025-10-11 01:41:57.421640638 +0000 UTC m=+19.284530754 container died da8941dc24124359958b3d112eb580655427a9bbcd5878508effa95cfb585287 (image=quay.io/ceph/ceph:v18, name=thirsty_liskov, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:41:57 compute-0 systemd[1]: var-lib-containers-storage-overlay-ef1b002ea24da71880a37cc1a173f0e5dc44f40f7329e62d389815759224d92e-merged.mount: Deactivated successfully.
Oct 11 01:41:57 compute-0 podman[193020]: 2025-10-11 01:41:57.533469025 +0000 UTC m=+19.396359111 container remove da8941dc24124359958b3d112eb580655427a9bbcd5878508effa95cfb585287 (image=quay.io/ceph/ceph:v18, name=thirsty_liskov, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:41:57 compute-0 systemd[1]: libpod-conmon-da8941dc24124359958b3d112eb580655427a9bbcd5878508effa95cfb585287.scope: Deactivated successfully.
Oct 11 01:41:57 compute-0 podman[193285]: 2025-10-11 01:41:57.667891498 +0000 UTC m=+0.091195618 container create 31bfc63938031c47373b2208afb8caac2a7c728578d6741e2e6e05f18f35d4a9 (image=quay.io/ceph/ceph:v18, name=charming_wu, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507)
Oct 11 01:41:57 compute-0 podman[193285]: 2025-10-11 01:41:57.632068927 +0000 UTC m=+0.055373127 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:57 compute-0 systemd[1]: Started libpod-conmon-31bfc63938031c47373b2208afb8caac2a7c728578d6741e2e6e05f18f35d4a9.scope.
Oct 11 01:41:57 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:57 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fbb6f157f05b5ceb09eda7a8641bc06e5faacc702a7f1b9cbe8b5ef03cba4f4a/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:57 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fbb6f157f05b5ceb09eda7a8641bc06e5faacc702a7f1b9cbe8b5ef03cba4f4a/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:57 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fbb6f157f05b5ceb09eda7a8641bc06e5faacc702a7f1b9cbe8b5ef03cba4f4a/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:57 compute-0 podman[193285]: 2025-10-11 01:41:57.865422757 +0000 UTC m=+0.288726957 container init 31bfc63938031c47373b2208afb8caac2a7c728578d6741e2e6e05f18f35d4a9 (image=quay.io/ceph/ceph:v18, name=charming_wu, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:41:57 compute-0 podman[193285]: 2025-10-11 01:41:57.888971768 +0000 UTC m=+0.312275888 container start 31bfc63938031c47373b2208afb8caac2a7c728578d6741e2e6e05f18f35d4a9 (image=quay.io/ceph/ceph:v18, name=charming_wu, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default)
Oct 11 01:41:57 compute-0 podman[193285]: 2025-10-11 01:41:57.894569741 +0000 UTC m=+0.317873891 container attach 31bfc63938031c47373b2208afb8caac2a7c728578d6741e2e6e05f18f35d4a9 (image=quay.io/ceph/ceph:v18, name=charming_wu, ceph=True, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 01:41:58 compute-0 ceph-mgr[192233]: mgr.server send_report Not sending PG status to monitor yet, waiting for OSDs
Oct 11 01:41:58 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : mgrmap e8: compute-0.bzgmgr(active, since 2s)
Oct 11 01:41:58 compute-0 ceph-mon[191930]: from='client.14134 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch
Oct 11 01:41:58 compute-0 ceph-mon[191930]: from='client.14134 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch
Oct 11 01:41:58 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:41:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config set, name=mgr/orchestrator/orchestrator}] v 0) v1
Oct 11 01:41:58 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:41:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config dump", "format": "json"} v 0) v1
Oct 11 01:41:58 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch
Oct 11 01:41:58 compute-0 systemd[1]: libpod-31bfc63938031c47373b2208afb8caac2a7c728578d6741e2e6e05f18f35d4a9.scope: Deactivated successfully.
Oct 11 01:41:58 compute-0 podman[193326]: 2025-10-11 01:41:58.559030248 +0000 UTC m=+0.045861345 container died 31bfc63938031c47373b2208afb8caac2a7c728578d6741e2e6e05f18f35d4a9 (image=quay.io/ceph/ceph:v18, name=charming_wu, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS)
Oct 11 01:41:58 compute-0 systemd[1]: var-lib-containers-storage-overlay-fbb6f157f05b5ceb09eda7a8641bc06e5faacc702a7f1b9cbe8b5ef03cba4f4a-merged.mount: Deactivated successfully.
Oct 11 01:41:58 compute-0 ceph-mgr[192233]: [cephadm INFO cherrypy.error] [11/Oct/2025:01:41:58] ENGINE Bus STARTING
Oct 11 01:41:58 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : [11/Oct/2025:01:41:58] ENGINE Bus STARTING
Oct 11 01:41:58 compute-0 podman[193326]: 2025-10-11 01:41:58.664405165 +0000 UTC m=+0.151236192 container remove 31bfc63938031c47373b2208afb8caac2a7c728578d6741e2e6e05f18f35d4a9 (image=quay.io/ceph/ceph:v18, name=charming_wu, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 01:41:58 compute-0 systemd[1]: libpod-conmon-31bfc63938031c47373b2208afb8caac2a7c728578d6741e2e6e05f18f35d4a9.scope: Deactivated successfully.
Oct 11 01:41:58 compute-0 podman[193341]: 2025-10-11 01:41:58.743388796 +0000 UTC m=+0.112113100 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 01:41:58 compute-0 podman[193342]: 2025-10-11 01:41:58.775145006 +0000 UTC m=+0.125444524 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vcs-type=git, architecture=x86_64, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, vendor=Red Hat, Inc., com.redhat.component=ubi9-minimal-container, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.buildah.version=1.33.7, url=https://catalog.redhat.com/en/search?searchType=containers, version=9.6, maintainer=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, config_id=edpm, container_name=openstack_network_exporter, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, managed_by=edpm_ansible, io.openshift.tags=minimal rhel9, name=ubi9-minimal, release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., distribution-scope=public, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Oct 11 01:41:58 compute-0 ceph-mgr[192233]: [cephadm INFO cherrypy.error] [11/Oct/2025:01:41:58] ENGINE Serving on https://192.168.122.100:7150
Oct 11 01:41:58 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : [11/Oct/2025:01:41:58] ENGINE Serving on https://192.168.122.100:7150
Oct 11 01:41:58 compute-0 podman[193374]: 2025-10-11 01:41:58.788584878 +0000 UTC m=+0.070259083 container create c68240e99009272b162aed9b3f1bb569cfebd1ca05a3c9662023bc5c727d58b8 (image=quay.io/ceph/ceph:v18, name=gracious_mclean, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef)
Oct 11 01:41:58 compute-0 ceph-mgr[192233]: [cephadm INFO cherrypy.error] [11/Oct/2025:01:41:58] ENGINE Client ('192.168.122.100', 53848) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)')
Oct 11 01:41:58 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : [11/Oct/2025:01:41:58] ENGINE Client ('192.168.122.100', 53848) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)')
Oct 11 01:41:58 compute-0 podman[193374]: 2025-10-11 01:41:58.761713824 +0000 UTC m=+0.043388059 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:58 compute-0 systemd[1]: Started libpod-conmon-c68240e99009272b162aed9b3f1bb569cfebd1ca05a3c9662023bc5c727d58b8.scope.
Oct 11 01:41:58 compute-0 ceph-mgr[192233]: [cephadm INFO cherrypy.error] [11/Oct/2025:01:41:58] ENGINE Serving on http://192.168.122.100:8765
Oct 11 01:41:58 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : [11/Oct/2025:01:41:58] ENGINE Serving on http://192.168.122.100:8765
Oct 11 01:41:58 compute-0 ceph-mgr[192233]: [cephadm INFO cherrypy.error] [11/Oct/2025:01:41:58] ENGINE Bus STARTED
Oct 11 01:41:58 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : [11/Oct/2025:01:41:58] ENGINE Bus STARTED
Oct 11 01:41:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config dump", "format": "json"} v 0) v1
Oct 11 01:41:58 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch
Oct 11 01:41:58 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e7a4e231f14d2a054871e37ea9219eb4d33df8c2909b025851bd053755a9861a/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e7a4e231f14d2a054871e37ea9219eb4d33df8c2909b025851bd053755a9861a/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e7a4e231f14d2a054871e37ea9219eb4d33df8c2909b025851bd053755a9861a/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:58 compute-0 podman[193374]: 2025-10-11 01:41:58.94236438 +0000 UTC m=+0.224038595 container init c68240e99009272b162aed9b3f1bb569cfebd1ca05a3c9662023bc5c727d58b8 (image=quay.io/ceph/ceph:v18, name=gracious_mclean, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True)
Oct 11 01:41:58 compute-0 podman[193374]: 2025-10-11 01:41:58.960555497 +0000 UTC m=+0.242229732 container start c68240e99009272b162aed9b3f1bb569cfebd1ca05a3c9662023bc5c727d58b8 (image=quay.io/ceph/ceph:v18, name=gracious_mclean, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:41:58 compute-0 podman[193374]: 2025-10-11 01:41:58.965966025 +0000 UTC m=+0.247640220 container attach c68240e99009272b162aed9b3f1bb569cfebd1ca05a3c9662023bc5c727d58b8 (image=quay.io/ceph/ceph:v18, name=gracious_mclean, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True)
Oct 11 01:41:59 compute-0 ceph-mon[191930]: mgrmap e8: compute-0.bzgmgr(active, since 2s)
Oct 11 01:41:59 compute-0 ceph-mon[191930]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:41:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:41:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch
Oct 11 01:41:59 compute-0 ceph-mon[191930]: [11/Oct/2025:01:41:58] ENGINE Bus STARTING
Oct 11 01:41:59 compute-0 ceph-mon[191930]: [11/Oct/2025:01:41:58] ENGINE Serving on https://192.168.122.100:7150
Oct 11 01:41:59 compute-0 ceph-mon[191930]: [11/Oct/2025:01:41:58] ENGINE Client ('192.168.122.100', 53848) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)')
Oct 11 01:41:59 compute-0 ceph-mon[191930]: [11/Oct/2025:01:41:58] ENGINE Serving on http://192.168.122.100:8765
Oct 11 01:41:59 compute-0 ceph-mon[191930]: [11/Oct/2025:01:41:58] ENGINE Bus STARTED
Oct 11 01:41:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch
Oct 11 01:41:59 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14144 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "ceph-admin", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:41:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/ssh_user}] v 0) v1
Oct 11 01:41:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e2 _set_new_cache_sizes cache_size:1019919166 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:41:59 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:41:59 compute-0 ceph-mgr[192233]: [cephadm INFO root] Set ssh ssh_user
Oct 11 01:41:59 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Set ssh ssh_user
Oct 11 01:41:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/ssh_config}] v 0) v1
Oct 11 01:41:59 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:41:59 compute-0 ceph-mgr[192233]: [cephadm INFO root] Set ssh ssh_config
Oct 11 01:41:59 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Set ssh ssh_config
Oct 11 01:41:59 compute-0 ceph-mgr[192233]: [cephadm INFO root] ssh user set to ceph-admin. sudo will be used
Oct 11 01:41:59 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : ssh user set to ceph-admin. sudo will be used
Oct 11 01:41:59 compute-0 gracious_mclean[193420]: ssh user set to ceph-admin. sudo will be used
Oct 11 01:41:59 compute-0 systemd[1]: libpod-c68240e99009272b162aed9b3f1bb569cfebd1ca05a3c9662023bc5c727d58b8.scope: Deactivated successfully.
Oct 11 01:41:59 compute-0 podman[193374]: 2025-10-11 01:41:59.596927585 +0000 UTC m=+0.878601820 container died c68240e99009272b162aed9b3f1bb569cfebd1ca05a3c9662023bc5c727d58b8 (image=quay.io/ceph/ceph:v18, name=gracious_mclean, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default)
Oct 11 01:41:59 compute-0 systemd[1]: var-lib-containers-storage-overlay-e7a4e231f14d2a054871e37ea9219eb4d33df8c2909b025851bd053755a9861a-merged.mount: Deactivated successfully.
Oct 11 01:41:59 compute-0 podman[193374]: 2025-10-11 01:41:59.670963015 +0000 UTC m=+0.952637200 container remove c68240e99009272b162aed9b3f1bb569cfebd1ca05a3c9662023bc5c727d58b8 (image=quay.io/ceph/ceph:v18, name=gracious_mclean, ceph=True, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2)
Oct 11 01:41:59 compute-0 systemd[1]: libpod-conmon-c68240e99009272b162aed9b3f1bb569cfebd1ca05a3c9662023bc5c727d58b8.scope: Deactivated successfully.
Oct 11 01:41:59 compute-0 podman[157119]: time="2025-10-11T01:41:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:41:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:41:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 22105 "" "Go-http-client/1.1"
Oct 11 01:41:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:41:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 3935 "" "Go-http-client/1.1"
Oct 11 01:41:59 compute-0 podman[193460]: 2025-10-11 01:41:59.80135675 +0000 UTC m=+0.087569021 container create 290a2e8b0662e0fce94e6626ce3ff66a1d0f1c73d0c6e7750a8baaf077b2857b (image=quay.io/ceph/ceph:v18, name=angry_hugle, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:41:59 compute-0 podman[193460]: 2025-10-11 01:41:59.766994044 +0000 UTC m=+0.053206375 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:41:59 compute-0 systemd[1]: Started libpod-conmon-290a2e8b0662e0fce94e6626ce3ff66a1d0f1c73d0c6e7750a8baaf077b2857b.scope.
Oct 11 01:41:59 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:41:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c15bc8dce3c9af8d167a94321e12ad709bbb571a6b2cf630b6a3c0403941dc56/merged/tmp/cephadm-ssh-key supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c15bc8dce3c9af8d167a94321e12ad709bbb571a6b2cf630b6a3c0403941dc56/merged/tmp/cephadm-ssh-key.pub supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c15bc8dce3c9af8d167a94321e12ad709bbb571a6b2cf630b6a3c0403941dc56/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c15bc8dce3c9af8d167a94321e12ad709bbb571a6b2cf630b6a3c0403941dc56/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c15bc8dce3c9af8d167a94321e12ad709bbb571a6b2cf630b6a3c0403941dc56/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:41:59 compute-0 podman[193460]: 2025-10-11 01:41:59.956539592 +0000 UTC m=+0.242751923 container init 290a2e8b0662e0fce94e6626ce3ff66a1d0f1c73d0c6e7750a8baaf077b2857b (image=quay.io/ceph/ceph:v18, name=angry_hugle, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True)
Oct 11 01:41:59 compute-0 podman[193460]: 2025-10-11 01:41:59.977454085 +0000 UTC m=+0.263666416 container start 290a2e8b0662e0fce94e6626ce3ff66a1d0f1c73d0c6e7750a8baaf077b2857b (image=quay.io/ceph/ceph:v18, name=angry_hugle, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:41:59 compute-0 podman[193460]: 2025-10-11 01:41:59.985440286 +0000 UTC m=+0.271652667 container attach 290a2e8b0662e0fce94e6626ce3ff66a1d0f1c73d0c6e7750a8baaf077b2857b (image=quay.io/ceph/ceph:v18, name=angry_hugle, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True)
Oct 11 01:42:00 compute-0 ceph-mgr[192233]: mgr.server send_report Not sending PG status to monitor yet, waiting for OSDs
Oct 11 01:42:00 compute-0 ceph-mon[191930]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "ceph-admin", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:00 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:00 compute-0 ceph-mon[191930]: Set ssh ssh_user
Oct 11 01:42:00 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:00 compute-0 ceph-mon[191930]: Set ssh ssh_config
Oct 11 01:42:00 compute-0 ceph-mon[191930]: ssh user set to ceph-admin. sudo will be used
Oct 11 01:42:00 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14146 -' entity='client.admin' cmd=[{"prefix": "cephadm set-priv-key", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/ssh_identity_key}] v 0) v1
Oct 11 01:42:00 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:00 compute-0 ceph-mgr[192233]: [cephadm INFO root] Set ssh ssh_identity_key
Oct 11 01:42:00 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Set ssh ssh_identity_key
Oct 11 01:42:00 compute-0 ceph-mgr[192233]: [cephadm INFO root] Set ssh private key
Oct 11 01:42:00 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Set ssh private key
Oct 11 01:42:00 compute-0 systemd[1]: libpod-290a2e8b0662e0fce94e6626ce3ff66a1d0f1c73d0c6e7750a8baaf077b2857b.scope: Deactivated successfully.
Oct 11 01:42:00 compute-0 conmon[193475]: conmon 290a2e8b0662e0fce94e <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-290a2e8b0662e0fce94e6626ce3ff66a1d0f1c73d0c6e7750a8baaf077b2857b.scope/container/memory.events
Oct 11 01:42:00 compute-0 podman[193460]: 2025-10-11 01:42:00.614585512 +0000 UTC m=+0.900797793 container died 290a2e8b0662e0fce94e6626ce3ff66a1d0f1c73d0c6e7750a8baaf077b2857b (image=quay.io/ceph/ceph:v18, name=angry_hugle, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default)
Oct 11 01:42:00 compute-0 systemd[1]: var-lib-containers-storage-overlay-c15bc8dce3c9af8d167a94321e12ad709bbb571a6b2cf630b6a3c0403941dc56-merged.mount: Deactivated successfully.
Oct 11 01:42:00 compute-0 podman[193460]: 2025-10-11 01:42:00.678676256 +0000 UTC m=+0.964888577 container remove 290a2e8b0662e0fce94e6626ce3ff66a1d0f1c73d0c6e7750a8baaf077b2857b (image=quay.io/ceph/ceph:v18, name=angry_hugle, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:42:00 compute-0 systemd[1]: libpod-conmon-290a2e8b0662e0fce94e6626ce3ff66a1d0f1c73d0c6e7750a8baaf077b2857b.scope: Deactivated successfully.
Oct 11 01:42:00 compute-0 podman[193511]: 2025-10-11 01:42:00.813024733 +0000 UTC m=+0.096429961 container create 3498377458f03413888b706a9253e9d24efd2c61f4882212469da44a7fdd7f6e (image=quay.io/ceph/ceph:v18, name=gifted_lehmann, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, ceph=True)
Oct 11 01:42:00 compute-0 podman[193511]: 2025-10-11 01:42:00.775297772 +0000 UTC m=+0.058703060 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:00 compute-0 systemd[1]: Started libpod-conmon-3498377458f03413888b706a9253e9d24efd2c61f4882212469da44a7fdd7f6e.scope.
Oct 11 01:42:00 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:00 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cb189bd4d9a62bf6ba07e03b57f57b7e01df886666474daee596227bd91bb070/merged/tmp/cephadm-ssh-key supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:00 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cb189bd4d9a62bf6ba07e03b57f57b7e01df886666474daee596227bd91bb070/merged/tmp/cephadm-ssh-key.pub supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:00 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cb189bd4d9a62bf6ba07e03b57f57b7e01df886666474daee596227bd91bb070/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:00 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cb189bd4d9a62bf6ba07e03b57f57b7e01df886666474daee596227bd91bb070/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:00 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cb189bd4d9a62bf6ba07e03b57f57b7e01df886666474daee596227bd91bb070/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:00 compute-0 podman[193511]: 2025-10-11 01:42:00.964886803 +0000 UTC m=+0.248292101 container init 3498377458f03413888b706a9253e9d24efd2c61f4882212469da44a7fdd7f6e (image=quay.io/ceph/ceph:v18, name=gifted_lehmann, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0)
Oct 11 01:42:00 compute-0 podman[193511]: 2025-10-11 01:42:00.979861967 +0000 UTC m=+0.263267195 container start 3498377458f03413888b706a9253e9d24efd2c61f4882212469da44a7fdd7f6e (image=quay.io/ceph/ceph:v18, name=gifted_lehmann, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 01:42:00 compute-0 podman[193511]: 2025-10-11 01:42:00.986568657 +0000 UTC m=+0.269973945 container attach 3498377458f03413888b706a9253e9d24efd2c61f4882212469da44a7fdd7f6e (image=quay.io/ceph/ceph:v18, name=gifted_lehmann, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:42:01 compute-0 openstack_network_exporter[159265]: ERROR   01:42:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:42:01 compute-0 openstack_network_exporter[159265]: ERROR   01:42:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:42:01 compute-0 openstack_network_exporter[159265]: ERROR   01:42:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:42:01 compute-0 openstack_network_exporter[159265]: ERROR   01:42:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:42:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:42:01 compute-0 openstack_network_exporter[159265]: ERROR   01:42:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:42:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:42:01 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14148 -' entity='client.admin' cmd=[{"prefix": "cephadm set-pub-key", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/ssh_identity_pub}] v 0) v1
Oct 11 01:42:01 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:01 compute-0 ceph-mgr[192233]: [cephadm INFO root] Set ssh ssh_identity_pub
Oct 11 01:42:01 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Set ssh ssh_identity_pub
Oct 11 01:42:01 compute-0 ceph-mon[191930]: from='client.14146 -' entity='client.admin' cmd=[{"prefix": "cephadm set-priv-key", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:01 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:01 compute-0 ceph-mon[191930]: Set ssh ssh_identity_key
Oct 11 01:42:01 compute-0 ceph-mon[191930]: Set ssh private key
Oct 11 01:42:01 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:01 compute-0 systemd[1]: libpod-3498377458f03413888b706a9253e9d24efd2c61f4882212469da44a7fdd7f6e.scope: Deactivated successfully.
Oct 11 01:42:01 compute-0 podman[193511]: 2025-10-11 01:42:01.584096315 +0000 UTC m=+0.867501543 container died 3498377458f03413888b706a9253e9d24efd2c61f4882212469da44a7fdd7f6e (image=quay.io/ceph/ceph:v18, name=gifted_lehmann, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True)
Oct 11 01:42:01 compute-0 systemd[1]: var-lib-containers-storage-overlay-cb189bd4d9a62bf6ba07e03b57f57b7e01df886666474daee596227bd91bb070-merged.mount: Deactivated successfully.
Oct 11 01:42:01 compute-0 podman[193511]: 2025-10-11 01:42:01.668082102 +0000 UTC m=+0.951487340 container remove 3498377458f03413888b706a9253e9d24efd2c61f4882212469da44a7fdd7f6e (image=quay.io/ceph/ceph:v18, name=gifted_lehmann, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:42:01 compute-0 systemd[1]: libpod-conmon-3498377458f03413888b706a9253e9d24efd2c61f4882212469da44a7fdd7f6e.scope: Deactivated successfully.
Oct 11 01:42:01 compute-0 podman[193566]: 2025-10-11 01:42:01.786554793 +0000 UTC m=+0.087930659 container create 854c4a2e11bd2878e0d9161f5861d9ebadf56bdd48b8c2e16c2bf98cdd211d98 (image=quay.io/ceph/ceph:v18, name=dreamy_benz, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3)
Oct 11 01:42:01 compute-0 podman[193566]: 2025-10-11 01:42:01.744504571 +0000 UTC m=+0.045880487 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:01 compute-0 systemd[1]: Started libpod-conmon-854c4a2e11bd2878e0d9161f5861d9ebadf56bdd48b8c2e16c2bf98cdd211d98.scope.
Oct 11 01:42:01 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b4127ac8afc4b0d1cec0a4bc58356ee40082e5f4eeb2c6e449c311fd552a367b/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b4127ac8afc4b0d1cec0a4bc58356ee40082e5f4eeb2c6e449c311fd552a367b/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b4127ac8afc4b0d1cec0a4bc58356ee40082e5f4eeb2c6e449c311fd552a367b/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:01 compute-0 podman[193566]: 2025-10-11 01:42:01.947951557 +0000 UTC m=+0.249327453 container init 854c4a2e11bd2878e0d9161f5861d9ebadf56bdd48b8c2e16c2bf98cdd211d98 (image=quay.io/ceph/ceph:v18, name=dreamy_benz, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS)
Oct 11 01:42:01 compute-0 podman[193566]: 2025-10-11 01:42:01.965765765 +0000 UTC m=+0.267141621 container start 854c4a2e11bd2878e0d9161f5861d9ebadf56bdd48b8c2e16c2bf98cdd211d98 (image=quay.io/ceph/ceph:v18, name=dreamy_benz, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2)
Oct 11 01:42:01 compute-0 podman[193566]: 2025-10-11 01:42:01.972885528 +0000 UTC m=+0.274261394 container attach 854c4a2e11bd2878e0d9161f5861d9ebadf56bdd48b8c2e16c2bf98cdd211d98 (image=quay.io/ceph/ceph:v18, name=dreamy_benz, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:42:02 compute-0 ceph-mgr[192233]: mgr.server send_report Not sending PG status to monitor yet, waiting for OSDs
Oct 11 01:42:02 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14150 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:02 compute-0 dreamy_benz[193582]: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDP+lCEBPDwlnkWxQweJKIoYavt3R3WPgi9Bpg73ziV6Ljvr6sHOSp/NVDz6gcgr/GpqaE1q89pT7sgXB4UQ9jfGpSN/RO7c65aM1sRO3NXxtzx8q7eA64m14wyQ+hcuGeXpicbGtbKqs5+DyRA7AMqZ8z4MwUq0Eeezd3vG6JWfMuwTe7zvn3ZKvcobOK6Q/zyZFnxnkgVsywONGcxxDPd2Fke9nsF0PxZlhRkd2SN2Xg5WPyJkmsHMl0UUPpgU4hP1TQqAP6gDRXGTr2/osEPU8qZwC2bn5984lbCOgoNnf7bLHSutO897bKqzUxBoiJkSzUBQOmbDXVAxS0jsLyVjqQm6mrFsaqd8P2pJWqWR9bB9AKenG/gLy/4nxx1aqPa/3uxn8MHhYMOjdp0r7R6vGAfP3mT6paorndWHfUOptj5g3R4XzQtgdx/gnWk48a/Ct5+EKmLSzV26TgNceh4+PH714oOqMgfpqcPd1HKpIEowN5fVmypDBFA2jfKPCE= zuul@controller
Oct 11 01:42:02 compute-0 ceph-mon[191930]: from='client.14148 -' entity='client.admin' cmd=[{"prefix": "cephadm set-pub-key", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:02 compute-0 ceph-mon[191930]: Set ssh ssh_identity_pub
Oct 11 01:42:02 compute-0 systemd[1]: libpod-854c4a2e11bd2878e0d9161f5861d9ebadf56bdd48b8c2e16c2bf98cdd211d98.scope: Deactivated successfully.
Oct 11 01:42:02 compute-0 podman[193566]: 2025-10-11 01:42:02.595027741 +0000 UTC m=+0.896403597 container died 854c4a2e11bd2878e0d9161f5861d9ebadf56bdd48b8c2e16c2bf98cdd211d98 (image=quay.io/ceph/ceph:v18, name=dreamy_benz, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, ceph=True)
Oct 11 01:42:02 compute-0 systemd[1]: var-lib-containers-storage-overlay-b4127ac8afc4b0d1cec0a4bc58356ee40082e5f4eeb2c6e449c311fd552a367b-merged.mount: Deactivated successfully.
Oct 11 01:42:02 compute-0 podman[193566]: 2025-10-11 01:42:02.678137988 +0000 UTC m=+0.979513854 container remove 854c4a2e11bd2878e0d9161f5861d9ebadf56bdd48b8c2e16c2bf98cdd211d98 (image=quay.io/ceph/ceph:v18, name=dreamy_benz, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:42:02 compute-0 systemd[1]: libpod-conmon-854c4a2e11bd2878e0d9161f5861d9ebadf56bdd48b8c2e16c2bf98cdd211d98.scope: Deactivated successfully.
Oct 11 01:42:02 compute-0 podman[193619]: 2025-10-11 01:42:02.806379342 +0000 UTC m=+0.080796515 container create 1df649e7ee32dc0a4fbca69af889e712fa4db34bf9ea2374658f81cc4ed1f7c7 (image=quay.io/ceph/ceph:v18, name=distracted_easley, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef)
Oct 11 01:42:02 compute-0 systemd[1]: Started libpod-conmon-1df649e7ee32dc0a4fbca69af889e712fa4db34bf9ea2374658f81cc4ed1f7c7.scope.
Oct 11 01:42:02 compute-0 podman[193619]: 2025-10-11 01:42:02.776888222 +0000 UTC m=+0.051305435 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:02 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/efc7d72d05c5c77d037ec331ce1e2872f00405bb5d8d3965870c9ddec4e31a0b/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/efc7d72d05c5c77d037ec331ce1e2872f00405bb5d8d3965870c9ddec4e31a0b/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/efc7d72d05c5c77d037ec331ce1e2872f00405bb5d8d3965870c9ddec4e31a0b/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:02 compute-0 podman[193619]: 2025-10-11 01:42:02.966504356 +0000 UTC m=+0.240921529 container init 1df649e7ee32dc0a4fbca69af889e712fa4db34bf9ea2374658f81cc4ed1f7c7 (image=quay.io/ceph/ceph:v18, name=distracted_easley, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:42:02 compute-0 podman[193619]: 2025-10-11 01:42:02.984761849 +0000 UTC m=+0.259179022 container start 1df649e7ee32dc0a4fbca69af889e712fa4db34bf9ea2374658f81cc4ed1f7c7 (image=quay.io/ceph/ceph:v18, name=distracted_easley, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, OSD_FLAVOR=default)
Oct 11 01:42:02 compute-0 podman[193619]: 2025-10-11 01:42:02.99098442 +0000 UTC m=+0.265401603 container attach 1df649e7ee32dc0a4fbca69af889e712fa4db34bf9ea2374658f81cc4ed1f7c7 (image=quay.io/ceph/ceph:v18, name=distracted_easley, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:42:03 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "compute-0", "addr": "192.168.122.100", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:03 compute-0 ceph-mon[191930]: from='client.14150 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:03 compute-0 sshd-session[193661]: Accepted publickey for ceph-admin from 192.168.122.100 port 48540 ssh2: RSA SHA256:s3wA1uQmx1ezgfjQez1lP/VjOHwzWTFNHeyPt4wAHzs
Oct 11 01:42:03 compute-0 systemd[1]: Created slice User Slice of UID 42477.
Oct 11 01:42:03 compute-0 systemd[1]: Starting User Runtime Directory /run/user/42477...
Oct 11 01:42:03 compute-0 systemd-logind[804]: New session 28 of user ceph-admin.
Oct 11 01:42:03 compute-0 systemd[1]: Finished User Runtime Directory /run/user/42477.
Oct 11 01:42:03 compute-0 systemd[1]: Starting User Manager for UID 42477...
Oct 11 01:42:03 compute-0 systemd[193665]: pam_unix(systemd-user:session): session opened for user ceph-admin(uid=42477) by ceph-admin(uid=0)
Oct 11 01:42:04 compute-0 sshd-session[193668]: Accepted publickey for ceph-admin from 192.168.122.100 port 48546 ssh2: RSA SHA256:s3wA1uQmx1ezgfjQez1lP/VjOHwzWTFNHeyPt4wAHzs
Oct 11 01:42:04 compute-0 systemd-logind[804]: New session 30 of user ceph-admin.
Oct 11 01:42:04 compute-0 systemd[193665]: Queued start job for default target Main User Target.
Oct 11 01:42:04 compute-0 systemd[193665]: Created slice User Application Slice.
Oct 11 01:42:04 compute-0 systemd[193665]: Started Mark boot as successful after the user session has run 2 minutes.
Oct 11 01:42:04 compute-0 systemd[193665]: Started Daily Cleanup of User's Temporary Directories.
Oct 11 01:42:04 compute-0 systemd[193665]: Reached target Paths.
Oct 11 01:42:04 compute-0 systemd[193665]: Reached target Timers.
Oct 11 01:42:04 compute-0 systemd[193665]: Starting D-Bus User Message Bus Socket...
Oct 11 01:42:04 compute-0 systemd[193665]: Starting Create User's Volatile Files and Directories...
Oct 11 01:42:04 compute-0 systemd[193665]: Listening on D-Bus User Message Bus Socket.
Oct 11 01:42:04 compute-0 systemd[193665]: Finished Create User's Volatile Files and Directories.
Oct 11 01:42:04 compute-0 systemd[193665]: Reached target Sockets.
Oct 11 01:42:04 compute-0 systemd[193665]: Reached target Basic System.
Oct 11 01:42:04 compute-0 systemd[193665]: Reached target Main User Target.
Oct 11 01:42:04 compute-0 systemd[193665]: Startup finished in 193ms.
Oct 11 01:42:04 compute-0 systemd[1]: Started User Manager for UID 42477.
Oct 11 01:42:04 compute-0 systemd[1]: Started Session 28 of User ceph-admin.
Oct 11 01:42:04 compute-0 systemd[1]: Started Session 30 of User ceph-admin.
Oct 11 01:42:04 compute-0 sshd-session[193661]: pam_unix(sshd:session): session opened for user ceph-admin(uid=42477) by ceph-admin(uid=0)
Oct 11 01:42:04 compute-0 sshd-session[193668]: pam_unix(sshd:session): session opened for user ceph-admin(uid=42477) by ceph-admin(uid=0)
Oct 11 01:42:04 compute-0 ceph-mgr[192233]: mgr.server send_report Not sending PG status to monitor yet, waiting for OSDs
Oct 11 01:42:04 compute-0 sudo[193685]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:04 compute-0 sudo[193685]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:04 compute-0 sudo[193685]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e2 _set_new_cache_sizes cache_size:1020052996 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:42:04 compute-0 sudo[193716]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:42:04 compute-0 sudo[193716]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:04 compute-0 ceph-mon[191930]: from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "compute-0", "addr": "192.168.122.100", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:04 compute-0 sudo[193716]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:04 compute-0 podman[193709]: 2025-10-11 01:42:04.605458649 +0000 UTC m=+0.148561601 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ceilometer_agent_ipmi, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_managed=true)
Oct 11 01:42:04 compute-0 sshd-session[193753]: Accepted publickey for ceph-admin from 192.168.122.100 port 48548 ssh2: RSA SHA256:s3wA1uQmx1ezgfjQez1lP/VjOHwzWTFNHeyPt4wAHzs
Oct 11 01:42:04 compute-0 systemd-logind[804]: New session 31 of user ceph-admin.
Oct 11 01:42:04 compute-0 systemd[1]: Started Session 31 of User ceph-admin.
Oct 11 01:42:04 compute-0 sshd-session[193753]: pam_unix(sshd:session): session opened for user ceph-admin(uid=42477) by ceph-admin(uid=0)
Oct 11 01:42:05 compute-0 sudo[193757]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:05 compute-0 sudo[193757]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:05 compute-0 sudo[193757]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:05 compute-0 sudo[193782]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 check-host --expect-hostname compute-0
Oct 11 01:42:05 compute-0 sudo[193782]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:05 compute-0 sudo[193782]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:05 compute-0 sshd-session[193807]: Accepted publickey for ceph-admin from 192.168.122.100 port 37740 ssh2: RSA SHA256:s3wA1uQmx1ezgfjQez1lP/VjOHwzWTFNHeyPt4wAHzs
Oct 11 01:42:05 compute-0 systemd-logind[804]: New session 32 of user ceph-admin.
Oct 11 01:42:05 compute-0 systemd[1]: Started Session 32 of User ceph-admin.
Oct 11 01:42:05 compute-0 sshd-session[193807]: pam_unix(sshd:session): session opened for user ceph-admin(uid=42477) by ceph-admin(uid=0)
Oct 11 01:42:05 compute-0 sudo[193811]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:05 compute-0 sudo[193811]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:05 compute-0 sudo[193811]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:05 compute-0 sudo[193836]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d
Oct 11 01:42:05 compute-0 sudo[193836]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:05 compute-0 sudo[193836]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:05 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.serve] Deploying cephadm binary to compute-0
Oct 11 01:42:05 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Deploying cephadm binary to compute-0
Oct 11 01:42:06 compute-0 sshd-session[193861]: Accepted publickey for ceph-admin from 192.168.122.100 port 37750 ssh2: RSA SHA256:s3wA1uQmx1ezgfjQez1lP/VjOHwzWTFNHeyPt4wAHzs
Oct 11 01:42:06 compute-0 systemd-logind[804]: New session 33 of user ceph-admin.
Oct 11 01:42:06 compute-0 systemd[1]: Started Session 33 of User ceph-admin.
Oct 11 01:42:06 compute-0 sshd-session[193861]: pam_unix(sshd:session): session opened for user ceph-admin(uid=42477) by ceph-admin(uid=0)
Oct 11 01:42:06 compute-0 ceph-mon[191930]: Deploying cephadm binary to compute-0
Oct 11 01:42:06 compute-0 sudo[193865]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:06 compute-0 sudo[193865]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:06 compute-0 sudo[193865]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:06 compute-0 ceph-mgr[192233]: mgr.server send_report Not sending PG status to monitor yet, waiting for OSDs
Oct 11 01:42:06 compute-0 sudo[193890]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:42:06 compute-0 sudo[193890]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:06 compute-0 sudo[193890]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:06 compute-0 sshd-session[193915]: Accepted publickey for ceph-admin from 192.168.122.100 port 37752 ssh2: RSA SHA256:s3wA1uQmx1ezgfjQez1lP/VjOHwzWTFNHeyPt4wAHzs
Oct 11 01:42:06 compute-0 systemd-logind[804]: New session 34 of user ceph-admin.
Oct 11 01:42:06 compute-0 systemd[1]: Started Session 34 of User ceph-admin.
Oct 11 01:42:06 compute-0 sshd-session[193915]: pam_unix(sshd:session): session opened for user ceph-admin(uid=42477) by ceph-admin(uid=0)
Oct 11 01:42:06 compute-0 sudo[193919]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:06 compute-0 sudo[193919]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:06 compute-0 sudo[193919]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:07 compute-0 sudo[193944]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:42:07 compute-0 sudo[193944]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:07 compute-0 sudo[193944]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:07 compute-0 sshd-session[193969]: Accepted publickey for ceph-admin from 192.168.122.100 port 37762 ssh2: RSA SHA256:s3wA1uQmx1ezgfjQez1lP/VjOHwzWTFNHeyPt4wAHzs
Oct 11 01:42:07 compute-0 systemd-logind[804]: New session 35 of user ceph-admin.
Oct 11 01:42:07 compute-0 systemd[1]: Started Session 35 of User ceph-admin.
Oct 11 01:42:07 compute-0 sshd-session[193969]: pam_unix(sshd:session): session opened for user ceph-admin(uid=42477) by ceph-admin(uid=0)
Oct 11 01:42:07 compute-0 sudo[193973]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:07 compute-0 sudo[193973]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:07 compute-0 sudo[193973]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:07 compute-0 sudo[193998]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/touch /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new
Oct 11 01:42:07 compute-0 sudo[193998]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:07 compute-0 sudo[193998]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:07 compute-0 sshd-session[194023]: Accepted publickey for ceph-admin from 192.168.122.100 port 37766 ssh2: RSA SHA256:s3wA1uQmx1ezgfjQez1lP/VjOHwzWTFNHeyPt4wAHzs
Oct 11 01:42:07 compute-0 systemd-logind[804]: New session 36 of user ceph-admin.
Oct 11 01:42:07 compute-0 systemd[1]: Started Session 36 of User ceph-admin.
Oct 11 01:42:08 compute-0 sshd-session[194023]: pam_unix(sshd:session): session opened for user ceph-admin(uid=42477) by ceph-admin(uid=0)
Oct 11 01:42:08 compute-0 sudo[194027]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:08 compute-0 sudo[194027]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:08 compute-0 sudo[194027]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:08 compute-0 sudo[194052]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R ceph-admin /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:42:08 compute-0 sudo[194052]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:08 compute-0 sudo[194052]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:08 compute-0 ceph-mgr[192233]: mgr.server send_report Not sending PG status to monitor yet, waiting for OSDs
Oct 11 01:42:08 compute-0 sshd-session[194077]: Accepted publickey for ceph-admin from 192.168.122.100 port 37772 ssh2: RSA SHA256:s3wA1uQmx1ezgfjQez1lP/VjOHwzWTFNHeyPt4wAHzs
Oct 11 01:42:08 compute-0 systemd-logind[804]: New session 37 of user ceph-admin.
Oct 11 01:42:08 compute-0 systemd[1]: Started Session 37 of User ceph-admin.
Oct 11 01:42:08 compute-0 sshd-session[194077]: pam_unix(sshd:session): session opened for user ceph-admin(uid=42477) by ceph-admin(uid=0)
Oct 11 01:42:08 compute-0 sudo[194081]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:08 compute-0 sudo[194081]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:08 compute-0 sudo[194081]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:08 compute-0 sudo[194106]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 644 /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new
Oct 11 01:42:08 compute-0 sudo[194106]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:08 compute-0 sudo[194106]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:09 compute-0 sshd-session[194131]: Accepted publickey for ceph-admin from 192.168.122.100 port 37784 ssh2: RSA SHA256:s3wA1uQmx1ezgfjQez1lP/VjOHwzWTFNHeyPt4wAHzs
Oct 11 01:42:09 compute-0 systemd-logind[804]: New session 38 of user ceph-admin.
Oct 11 01:42:09 compute-0 systemd[1]: Started Session 38 of User ceph-admin.
Oct 11 01:42:09 compute-0 sshd-session[194131]: pam_unix(sshd:session): session opened for user ceph-admin(uid=42477) by ceph-admin(uid=0)
Oct 11 01:42:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e2 _set_new_cache_sizes cache_size:1020054709 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:42:09 compute-0 sshd-session[194158]: Accepted publickey for ceph-admin from 192.168.122.100 port 37798 ssh2: RSA SHA256:s3wA1uQmx1ezgfjQez1lP/VjOHwzWTFNHeyPt4wAHzs
Oct 11 01:42:09 compute-0 systemd-logind[804]: New session 39 of user ceph-admin.
Oct 11 01:42:09 compute-0 systemd[1]: Started Session 39 of User ceph-admin.
Oct 11 01:42:09 compute-0 sshd-session[194158]: pam_unix(sshd:session): session opened for user ceph-admin(uid=42477) by ceph-admin(uid=0)
Oct 11 01:42:10 compute-0 podman[194160]: 2025-10-11 01:42:10.04078063 +0000 UTC m=+0.132417425 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:42:10 compute-0 podman[194163]: 2025-10-11 01:42:10.081011479 +0000 UTC m=+0.164865949 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.display-name=Red Hat Universal Base Image 9, architecture=x86_64, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, container_name=kepler, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=1214.1726694543, com.redhat.component=ubi9-container, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, version=9.4, maintainer=Red Hat, Inc., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., io.openshift.expose-services=, name=ubi9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vendor=Red Hat, Inc., vcs-type=git, io.buildah.version=1.29.0, io.openshift.tags=base rhel9, config_id=edpm, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2024-09-18T21:23:30, release-0.7.12=)
Oct 11 01:42:10 compute-0 podman[194161]: 2025-10-11 01:42:10.09937664 +0000 UTC m=+0.190779787 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=ovn_controller, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 01:42:10 compute-0 sudo[194210]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:10 compute-0 sudo[194210]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:10 compute-0 sudo[194210]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:10 compute-0 sudo[194255]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mv /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d
Oct 11 01:42:10 compute-0 sudo[194255]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:10 compute-0 sudo[194255]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:10 compute-0 ceph-mgr[192233]: mgr.server send_report Not sending PG status to monitor yet, waiting for OSDs
Oct 11 01:42:10 compute-0 sshd-session[194280]: Accepted publickey for ceph-admin from 192.168.122.100 port 37806 ssh2: RSA SHA256:s3wA1uQmx1ezgfjQez1lP/VjOHwzWTFNHeyPt4wAHzs
Oct 11 01:42:10 compute-0 systemd-logind[804]: New session 40 of user ceph-admin.
Oct 11 01:42:10 compute-0 systemd[1]: Started Session 40 of User ceph-admin.
Oct 11 01:42:10 compute-0 sshd-session[194280]: pam_unix(sshd:session): session opened for user ceph-admin(uid=42477) by ceph-admin(uid=0)
Oct 11 01:42:10 compute-0 sudo[194284]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:10 compute-0 sudo[194284]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:10 compute-0 sudo[194284]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:10 compute-0 sudo[194309]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 check-host --expect-hostname compute-0
Oct 11 01:42:10 compute-0 sudo[194309]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:11 compute-0 sudo[194309]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/inventory}] v 0) v1
Oct 11 01:42:11 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:11 compute-0 ceph-mgr[192233]: [cephadm INFO root] Added host compute-0
Oct 11 01:42:11 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Added host compute-0
Oct 11 01:42:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config dump", "format": "json"} v 0) v1
Oct 11 01:42:11 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch
Oct 11 01:42:11 compute-0 distracted_easley[193635]: Added host 'compute-0' with addr '192.168.122.100'
Oct 11 01:42:11 compute-0 systemd[1]: libpod-1df649e7ee32dc0a4fbca69af889e712fa4db34bf9ea2374658f81cc4ed1f7c7.scope: Deactivated successfully.
Oct 11 01:42:11 compute-0 conmon[193635]: conmon 1df649e7ee32dc0a4fbc <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-1df649e7ee32dc0a4fbca69af889e712fa4db34bf9ea2374658f81cc4ed1f7c7.scope/container/memory.events
Oct 11 01:42:11 compute-0 podman[193619]: 2025-10-11 01:42:11.416114581 +0000 UTC m=+8.690531764 container died 1df649e7ee32dc0a4fbca69af889e712fa4db34bf9ea2374658f81cc4ed1f7c7 (image=quay.io/ceph/ceph:v18, name=distracted_easley, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507)
Oct 11 01:42:11 compute-0 systemd[1]: var-lib-containers-storage-overlay-efc7d72d05c5c77d037ec331ce1e2872f00405bb5d8d3965870c9ddec4e31a0b-merged.mount: Deactivated successfully.
Oct 11 01:42:11 compute-0 podman[193619]: 2025-10-11 01:42:11.517200799 +0000 UTC m=+8.791617962 container remove 1df649e7ee32dc0a4fbca69af889e712fa4db34bf9ea2374658f81cc4ed1f7c7 (image=quay.io/ceph/ceph:v18, name=distracted_easley, io.buildah.version=1.39.3, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:42:11 compute-0 systemd[1]: libpod-conmon-1df649e7ee32dc0a4fbca69af889e712fa4db34bf9ea2374658f81cc4ed1f7c7.scope: Deactivated successfully.
Oct 11 01:42:11 compute-0 sudo[194354]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:11 compute-0 sudo[194354]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:11 compute-0 sudo[194354]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:11 compute-0 podman[194388]: 2025-10-11 01:42:11.640534365 +0000 UTC m=+0.080021954 container create 3197c05d16ba68af4aee0de2256013f1003b3bb09b6b44e3a919feaec5c3c210 (image=quay.io/ceph/ceph:v18, name=quizzical_joliot, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2)
Oct 11 01:42:11 compute-0 podman[194388]: 2025-10-11 01:42:11.613628549 +0000 UTC m=+0.053116108 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:11 compute-0 systemd[1]: Started libpod-conmon-3197c05d16ba68af4aee0de2256013f1003b3bb09b6b44e3a919feaec5c3c210.scope.
Oct 11 01:42:11 compute-0 sudo[194396]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:42:11 compute-0 sudo[194396]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:11 compute-0 sudo[194396]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:11 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d56f8424e30f713dfd5ebb3cda8b64080baeade3755c11a114ad18510110085c/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d56f8424e30f713dfd5ebb3cda8b64080baeade3755c11a114ad18510110085c/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d56f8424e30f713dfd5ebb3cda8b64080baeade3755c11a114ad18510110085c/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:11 compute-0 podman[194388]: 2025-10-11 01:42:11.805125782 +0000 UTC m=+0.244613401 container init 3197c05d16ba68af4aee0de2256013f1003b3bb09b6b44e3a919feaec5c3c210 (image=quay.io/ceph/ceph:v18, name=quizzical_joliot, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 01:42:11 compute-0 podman[194388]: 2025-10-11 01:42:11.826883661 +0000 UTC m=+0.266371250 container start 3197c05d16ba68af4aee0de2256013f1003b3bb09b6b44e3a919feaec5c3c210 (image=quay.io/ceph/ceph:v18, name=quizzical_joliot, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:42:11 compute-0 podman[194388]: 2025-10-11 01:42:11.837655592 +0000 UTC m=+0.277143231 container attach 3197c05d16ba68af4aee0de2256013f1003b3bb09b6b44e3a919feaec5c3c210 (image=quay.io/ceph/ceph:v18, name=quizzical_joliot, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:42:11 compute-0 sudo[194431]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:11 compute-0 sudo[194431]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:11 compute-0 sudo[194431]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:12 compute-0 sudo[194458]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph:v18 --timeout 895 inspect-image
Oct 11 01:42:12 compute-0 sudo[194458]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:12 compute-0 ceph-mon[191930]: Added host compute-0
Oct 11 01:42:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch
Oct 11 01:42:12 compute-0 ceph-mgr[192233]: mgr.server send_report Not sending PG status to monitor yet, waiting for OSDs
Oct 11 01:42:12 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:12 compute-0 ceph-mgr[192233]: [cephadm INFO root] Saving service mon spec with placement count:5
Oct 11 01:42:12 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Saving service mon spec with placement count:5
Oct 11 01:42:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.mon}] v 0) v1
Oct 11 01:42:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:12 compute-0 quizzical_joliot[194427]: Scheduled mon update...
Oct 11 01:42:12 compute-0 systemd[1]: libpod-3197c05d16ba68af4aee0de2256013f1003b3bb09b6b44e3a919feaec5c3c210.scope: Deactivated successfully.
Oct 11 01:42:12 compute-0 podman[194388]: 2025-10-11 01:42:12.470600709 +0000 UTC m=+0.910088288 container died 3197c05d16ba68af4aee0de2256013f1003b3bb09b6b44e3a919feaec5c3c210 (image=quay.io/ceph/ceph:v18, name=quizzical_joliot, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:42:12 compute-0 systemd[1]: var-lib-containers-storage-overlay-d56f8424e30f713dfd5ebb3cda8b64080baeade3755c11a114ad18510110085c-merged.mount: Deactivated successfully.
Oct 11 01:42:12 compute-0 podman[194388]: 2025-10-11 01:42:12.561942737 +0000 UTC m=+1.001430326 container remove 3197c05d16ba68af4aee0de2256013f1003b3bb09b6b44e3a919feaec5c3c210 (image=quay.io/ceph/ceph:v18, name=quizzical_joliot, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:42:12 compute-0 podman[194527]: 2025-10-11 01:42:12.567572772 +0000 UTC m=+0.107829922 container create b642fabe9078e3e357ad0442b5bcfb4fc1431c2bfde44b91822250651c146324 (image=quay.io/ceph/ceph:v18, name=quirky_bartik, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=reef)
Oct 11 01:42:12 compute-0 systemd[1]: libpod-conmon-3197c05d16ba68af4aee0de2256013f1003b3bb09b6b44e3a919feaec5c3c210.scope: Deactivated successfully.
Oct 11 01:42:12 compute-0 podman[194527]: 2025-10-11 01:42:12.531536654 +0000 UTC m=+0.071793794 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:12 compute-0 systemd[1]: Started libpod-conmon-b642fabe9078e3e357ad0442b5bcfb4fc1431c2bfde44b91822250651c146324.scope.
Oct 11 01:42:12 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:12 compute-0 podman[194555]: 2025-10-11 01:42:12.684360951 +0000 UTC m=+0.086605145 container create 811e52c493a147a41bb74f2919ded95adecaaaa9cc10b539998429838ff95df8 (image=quay.io/ceph/ceph:v18, name=admiring_ardinghelli, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507)
Oct 11 01:42:12 compute-0 podman[194527]: 2025-10-11 01:42:12.695911264 +0000 UTC m=+0.236168444 container init b642fabe9078e3e357ad0442b5bcfb4fc1431c2bfde44b91822250651c146324 (image=quay.io/ceph/ceph:v18, name=quirky_bartik, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:42:12 compute-0 podman[194527]: 2025-10-11 01:42:12.714988261 +0000 UTC m=+0.255245411 container start b642fabe9078e3e357ad0442b5bcfb4fc1431c2bfde44b91822250651c146324 (image=quay.io/ceph/ceph:v18, name=quirky_bartik, ceph=True, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507)
Oct 11 01:42:12 compute-0 podman[194527]: 2025-10-11 01:42:12.722493044 +0000 UTC m=+0.262750274 container attach b642fabe9078e3e357ad0442b5bcfb4fc1431c2bfde44b91822250651c146324 (image=quay.io/ceph/ceph:v18, name=quirky_bartik, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:42:12 compute-0 podman[194555]: 2025-10-11 01:42:12.640786268 +0000 UTC m=+0.043030442 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:12 compute-0 systemd[1]: Started libpod-conmon-811e52c493a147a41bb74f2919ded95adecaaaa9cc10b539998429838ff95df8.scope.
Oct 11 01:42:12 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7496cab017374826f7aac410232d2503e3d5b1b7844ddb9458b64d5765e8b661/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7496cab017374826f7aac410232d2503e3d5b1b7844ddb9458b64d5765e8b661/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7496cab017374826f7aac410232d2503e3d5b1b7844ddb9458b64d5765e8b661/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:12 compute-0 podman[194555]: 2025-10-11 01:42:12.832494097 +0000 UTC m=+0.234738271 container init 811e52c493a147a41bb74f2919ded95adecaaaa9cc10b539998429838ff95df8 (image=quay.io/ceph/ceph:v18, name=admiring_ardinghelli, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 01:42:12 compute-0 podman[194555]: 2025-10-11 01:42:12.850336827 +0000 UTC m=+0.252580971 container start 811e52c493a147a41bb74f2919ded95adecaaaa9cc10b539998429838ff95df8 (image=quay.io/ceph/ceph:v18, name=admiring_ardinghelli, ceph=True, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2)
Oct 11 01:42:12 compute-0 podman[194555]: 2025-10-11 01:42:12.857021015 +0000 UTC m=+0.259265169 container attach 811e52c493a147a41bb74f2919ded95adecaaaa9cc10b539998429838ff95df8 (image=quay.io/ceph/ceph:v18, name=admiring_ardinghelli, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:42:13 compute-0 quirky_bartik[194567]: ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable)
Oct 11 01:42:13 compute-0 systemd[1]: libpod-b642fabe9078e3e357ad0442b5bcfb4fc1431c2bfde44b91822250651c146324.scope: Deactivated successfully.
Oct 11 01:42:13 compute-0 podman[194527]: 2025-10-11 01:42:13.023209258 +0000 UTC m=+0.563466428 container died b642fabe9078e3e357ad0442b5bcfb4fc1431c2bfde44b91822250651c146324 (image=quay.io/ceph/ceph:v18, name=quirky_bartik, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:42:13 compute-0 systemd[1]: var-lib-containers-storage-overlay-96bd3f5469c017333810c2dc381ece5e40385bae85914880a274c927fb9263c6-merged.mount: Deactivated successfully.
Oct 11 01:42:13 compute-0 podman[194527]: 2025-10-11 01:42:13.107542012 +0000 UTC m=+0.647799172 container remove b642fabe9078e3e357ad0442b5bcfb4fc1431c2bfde44b91822250651c146324 (image=quay.io/ceph/ceph:v18, name=quirky_bartik, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:42:13 compute-0 systemd[1]: libpod-conmon-b642fabe9078e3e357ad0442b5bcfb4fc1431c2bfde44b91822250651c146324.scope: Deactivated successfully.
Oct 11 01:42:13 compute-0 sudo[194458]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config set, name=container_image}] v 0) v1
Oct 11 01:42:13 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:13 compute-0 sudo[194614]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:13 compute-0 sudo[194614]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:13 compute-0 sudo[194614]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:13 compute-0 ceph-mon[191930]: from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:13 compute-0 ceph-mon[191930]: Saving service mon spec with placement count:5
Oct 11 01:42:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:13 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14156 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:13 compute-0 ceph-mgr[192233]: [cephadm INFO root] Saving service mgr spec with placement count:2
Oct 11 01:42:13 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Saving service mgr spec with placement count:2
Oct 11 01:42:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.mgr}] v 0) v1
Oct 11 01:42:13 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:13 compute-0 admiring_ardinghelli[194577]: Scheduled mgr update...
Oct 11 01:42:13 compute-0 sudo[194640]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:42:13 compute-0 sudo[194640]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:13 compute-0 sudo[194640]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:13 compute-0 systemd[1]: libpod-811e52c493a147a41bb74f2919ded95adecaaaa9cc10b539998429838ff95df8.scope: Deactivated successfully.
Oct 11 01:42:13 compute-0 podman[194555]: 2025-10-11 01:42:13.51942415 +0000 UTC m=+0.921668344 container died 811e52c493a147a41bb74f2919ded95adecaaaa9cc10b539998429838ff95df8 (image=quay.io/ceph/ceph:v18, name=admiring_ardinghelli, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default)
Oct 11 01:42:13 compute-0 systemd[1]: var-lib-containers-storage-overlay-7496cab017374826f7aac410232d2503e3d5b1b7844ddb9458b64d5765e8b661-merged.mount: Deactivated successfully.
Oct 11 01:42:13 compute-0 podman[194555]: 2025-10-11 01:42:13.602828331 +0000 UTC m=+1.005072485 container remove 811e52c493a147a41bb74f2919ded95adecaaaa9cc10b539998429838ff95df8 (image=quay.io/ceph/ceph:v18, name=admiring_ardinghelli, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:42:13 compute-0 systemd[1]: libpod-conmon-811e52c493a147a41bb74f2919ded95adecaaaa9cc10b539998429838ff95df8.scope: Deactivated successfully.
Oct 11 01:42:13 compute-0 sudo[194667]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:13 compute-0 sudo[194667]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:13 compute-0 sudo[194667]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:13 compute-0 podman[194704]: 2025-10-11 01:42:13.725909337 +0000 UTC m=+0.085104626 container create 0f76d3fedc28708105d2941923e29783e05ef020c54a173d0a91841a41ff2592 (image=quay.io/ceph/ceph:v18, name=amazing_zhukovsky, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef)
Oct 11 01:42:13 compute-0 podman[194704]: 2025-10-11 01:42:13.688519942 +0000 UTC m=+0.047715291 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:13 compute-0 systemd[1]: Started libpod-conmon-0f76d3fedc28708105d2941923e29783e05ef020c54a173d0a91841a41ff2592.scope.
Oct 11 01:42:13 compute-0 sudo[194714]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 check-host
Oct 11 01:42:13 compute-0 sudo[194714]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:13 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d49573936286db65fc05466a3829441a946ef0b13d1015633dc0e3f786821376/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d49573936286db65fc05466a3829441a946ef0b13d1015633dc0e3f786821376/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d49573936286db65fc05466a3829441a946ef0b13d1015633dc0e3f786821376/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:13 compute-0 podman[194704]: 2025-10-11 01:42:13.878168419 +0000 UTC m=+0.237363718 container init 0f76d3fedc28708105d2941923e29783e05ef020c54a173d0a91841a41ff2592 (image=quay.io/ceph/ceph:v18, name=amazing_zhukovsky, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=reef, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:42:13 compute-0 podman[194704]: 2025-10-11 01:42:13.909637936 +0000 UTC m=+0.268833205 container start 0f76d3fedc28708105d2941923e29783e05ef020c54a173d0a91841a41ff2592 (image=quay.io/ceph/ceph:v18, name=amazing_zhukovsky, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS)
Oct 11 01:42:13 compute-0 podman[194704]: 2025-10-11 01:42:13.915151341 +0000 UTC m=+0.274346640 container attach 0f76d3fedc28708105d2941923e29783e05ef020c54a173d0a91841a41ff2592 (image=quay.io/ceph/ceph:v18, name=amazing_zhukovsky, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 01:42:14 compute-0 sudo[194714]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:42:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:14 compute-0 sudo[194774]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:14 compute-0 sudo[194774]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:14 compute-0 ceph-mgr[192233]: mgr.server send_report Not sending PG status to monitor yet, waiting for OSDs
Oct 11 01:42:14 compute-0 sudo[194774]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:14 compute-0 ceph-mon[191930]: from='client.14156 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:14 compute-0 ceph-mon[191930]: Saving service mgr spec with placement count:2
Oct 11 01:42:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:14 compute-0 podman[194817]: 2025-10-11 01:42:14.492074461 +0000 UTC m=+0.127443552 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_id=edpm, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, org.label-schema.build-date=20251007, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible)
Oct 11 01:42:14 compute-0 sudo[194824]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:42:14 compute-0 sudo[194824]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:14 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14158 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:14 compute-0 ceph-mgr[192233]: [cephadm INFO root] Saving service crash spec with placement *
Oct 11 01:42:14 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Saving service crash spec with placement *
Oct 11 01:42:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.crash}] v 0) v1
Oct 11 01:42:14 compute-0 sudo[194824]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:14 compute-0 amazing_zhukovsky[194746]: Scheduled crash update...
Oct 11 01:42:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e2 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:42:14 compute-0 systemd[1]: libpod-0f76d3fedc28708105d2941923e29783e05ef020c54a173d0a91841a41ff2592.scope: Deactivated successfully.
Oct 11 01:42:14 compute-0 podman[194704]: 2025-10-11 01:42:14.58052062 +0000 UTC m=+0.939715919 container died 0f76d3fedc28708105d2941923e29783e05ef020c54a173d0a91841a41ff2592 (image=quay.io/ceph/ceph:v18, name=amazing_zhukovsky, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 01:42:14 compute-0 systemd[1]: var-lib-containers-storage-overlay-d49573936286db65fc05466a3829441a946ef0b13d1015633dc0e3f786821376-merged.mount: Deactivated successfully.
Oct 11 01:42:14 compute-0 podman[194704]: 2025-10-11 01:42:14.666861523 +0000 UTC m=+1.026056792 container remove 0f76d3fedc28708105d2941923e29783e05ef020c54a173d0a91841a41ff2592 (image=quay.io/ceph/ceph:v18, name=amazing_zhukovsky, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:42:14 compute-0 sudo[194862]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:14 compute-0 sudo[194862]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:14 compute-0 systemd[1]: libpod-conmon-0f76d3fedc28708105d2941923e29783e05ef020c54a173d0a91841a41ff2592.scope: Deactivated successfully.
Oct 11 01:42:14 compute-0 sudo[194862]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:14 compute-0 podman[194896]: 2025-10-11 01:42:14.789864182 +0000 UTC m=+0.077588932 container create 16bc02ebeca5122c37f1f4826ff876b2a15e16ae854eb12438874d2abc7bfb13 (image=quay.io/ceph/ceph:v18, name=dreamy_matsumoto, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507)
Oct 11 01:42:14 compute-0 sudo[194903]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ls
Oct 11 01:42:14 compute-0 sudo[194903]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:14 compute-0 podman[194896]: 2025-10-11 01:42:14.765499067 +0000 UTC m=+0.053223847 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:14 compute-0 systemd[1]: Started libpod-conmon-16bc02ebeca5122c37f1f4826ff876b2a15e16ae854eb12438874d2abc7bfb13.scope.
Oct 11 01:42:14 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:14 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4a52c5edd7dc35b994e462c50d00598a49aea7ce0807a581a942207ff1435346/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:14 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4a52c5edd7dc35b994e462c50d00598a49aea7ce0807a581a942207ff1435346/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:14 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4a52c5edd7dc35b994e462c50d00598a49aea7ce0807a581a942207ff1435346/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:14 compute-0 podman[194896]: 2025-10-11 01:42:14.948762259 +0000 UTC m=+0.236487039 container init 16bc02ebeca5122c37f1f4826ff876b2a15e16ae854eb12438874d2abc7bfb13 (image=quay.io/ceph/ceph:v18, name=dreamy_matsumoto, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 01:42:14 compute-0 podman[194896]: 2025-10-11 01:42:14.96611946 +0000 UTC m=+0.253844240 container start 16bc02ebeca5122c37f1f4826ff876b2a15e16ae854eb12438874d2abc7bfb13 (image=quay.io/ceph/ceph:v18, name=dreamy_matsumoto, org.label-schema.build-date=20250507, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:42:14 compute-0 podman[194896]: 2025-10-11 01:42:14.97358317 +0000 UTC m=+0.261307960 container attach 16bc02ebeca5122c37f1f4826ff876b2a15e16ae854eb12438874d2abc7bfb13 (image=quay.io/ceph/ceph:v18, name=dreamy_matsumoto, CEPH_REF=reef, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 01:42:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config set, name=mgr/cephadm/container_init}] v 0) v1
Oct 11 01:42:15 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/1140065290' entity='client.admin' 
Oct 11 01:42:15 compute-0 ceph-mon[191930]: from='client.14158 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:15 compute-0 ceph-mon[191930]: Saving service crash spec with placement *
Oct 11 01:42:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:15 compute-0 systemd[1]: libpod-16bc02ebeca5122c37f1f4826ff876b2a15e16ae854eb12438874d2abc7bfb13.scope: Deactivated successfully.
Oct 11 01:42:15 compute-0 podman[194896]: 2025-10-11 01:42:15.594453573 +0000 UTC m=+0.882178343 container died 16bc02ebeca5122c37f1f4826ff876b2a15e16ae854eb12438874d2abc7bfb13 (image=quay.io/ceph/ceph:v18, name=dreamy_matsumoto, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:42:15 compute-0 systemd[1]: var-lib-containers-storage-overlay-4a52c5edd7dc35b994e462c50d00598a49aea7ce0807a581a942207ff1435346-merged.mount: Deactivated successfully.
Oct 11 01:42:15 compute-0 podman[195031]: 2025-10-11 01:42:15.675338785 +0000 UTC m=+0.130131755 container exec ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0)
Oct 11 01:42:15 compute-0 podman[194896]: 2025-10-11 01:42:15.677209512 +0000 UTC m=+0.964934252 container remove 16bc02ebeca5122c37f1f4826ff876b2a15e16ae854eb12438874d2abc7bfb13 (image=quay.io/ceph/ceph:v18, name=dreamy_matsumoto, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True)
Oct 11 01:42:15 compute-0 systemd[1]: libpod-conmon-16bc02ebeca5122c37f1f4826ff876b2a15e16ae854eb12438874d2abc7bfb13.scope: Deactivated successfully.
Oct 11 01:42:15 compute-0 podman[195064]: 2025-10-11 01:42:15.785904172 +0000 UTC m=+0.071480530 container create fc8664313c2ae714a1c3e6d8542d8f2f7cb2e206359142160145e192567db15b (image=quay.io/ceph/ceph:v18, name=competent_faraday, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:42:15 compute-0 podman[195064]: 2025-10-11 01:42:15.753195767 +0000 UTC m=+0.038772195 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:15 compute-0 systemd[1]: Started libpod-conmon-fc8664313c2ae714a1c3e6d8542d8f2f7cb2e206359142160145e192567db15b.scope.
Oct 11 01:42:15 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e73b19aa519a9bd3891ee1590ad0da9e3d49e88a3b85344fa9c8db678388f51d/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e73b19aa519a9bd3891ee1590ad0da9e3d49e88a3b85344fa9c8db678388f51d/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e73b19aa519a9bd3891ee1590ad0da9e3d49e88a3b85344fa9c8db678388f51d/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:15 compute-0 podman[195064]: 2025-10-11 01:42:15.946635303 +0000 UTC m=+0.232211681 container init fc8664313c2ae714a1c3e6d8542d8f2f7cb2e206359142160145e192567db15b (image=quay.io/ceph/ceph:v18, name=competent_faraday, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:42:15 compute-0 podman[195064]: 2025-10-11 01:42:15.957819067 +0000 UTC m=+0.243395415 container start fc8664313c2ae714a1c3e6d8542d8f2f7cb2e206359142160145e192567db15b (image=quay.io/ceph/ceph:v18, name=competent_faraday, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:42:15 compute-0 podman[195064]: 2025-10-11 01:42:15.962370347 +0000 UTC m=+0.247946725 container attach fc8664313c2ae714a1c3e6d8542d8f2f7cb2e206359142160145e192567db15b (image=quay.io/ceph/ceph:v18, name=competent_faraday, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:42:16 compute-0 podman[195031]: 2025-10-11 01:42:16.01151912 +0000 UTC m=+0.466312060 container exec_died ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True)
Oct 11 01:42:16 compute-0 sudo[194903]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:42:16 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:16 compute-0 ceph-mgr[192233]: mgr.server send_report Giving up on OSDs that haven't reported yet, sending potentially incomplete PG state to mon
Oct 11 01:42:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:16 compute-0 ceph-mon[191930]: log_channel(cluster) log [WRN] : Health check failed: OSD count 0 < osd_pool_default_size 1 (TOO_FEW_OSDS)
Oct 11 01:42:16 compute-0 sudo[195114]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:16 compute-0 sudo[195114]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:16 compute-0 sudo[195114]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:16 compute-0 sudo[195158]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:42:16 compute-0 sudo[195158]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:16 compute-0 sudo[195158]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:16 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1140065290' entity='client.admin' 
Oct 11 01:42:16 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:16 compute-0 ceph-mon[191930]: Health check failed: OSD count 0 < osd_pool_default_size 1 (TOO_FEW_OSDS)
Oct 11 01:42:16 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14162 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "label:_admin", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/client_keyrings}] v 0) v1
Oct 11 01:42:16 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:16 compute-0 systemd[1]: libpod-fc8664313c2ae714a1c3e6d8542d8f2f7cb2e206359142160145e192567db15b.scope: Deactivated successfully.
Oct 11 01:42:16 compute-0 podman[195064]: 2025-10-11 01:42:16.605298812 +0000 UTC m=+0.890875180 container died fc8664313c2ae714a1c3e6d8542d8f2f7cb2e206359142160145e192567db15b (image=quay.io/ceph/ceph:v18, name=competent_faraday, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 01:42:16 compute-0 systemd[1]: var-lib-containers-storage-overlay-e73b19aa519a9bd3891ee1590ad0da9e3d49e88a3b85344fa9c8db678388f51d-merged.mount: Deactivated successfully.
Oct 11 01:42:16 compute-0 sudo[195183]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:16 compute-0 sudo[195183]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:16 compute-0 sudo[195183]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:16 compute-0 podman[195064]: 2025-10-11 01:42:16.709026519 +0000 UTC m=+0.994602857 container remove fc8664313c2ae714a1c3e6d8542d8f2f7cb2e206359142160145e192567db15b (image=quay.io/ceph/ceph:v18, name=competent_faraday, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:42:16 compute-0 systemd[1]: libpod-conmon-fc8664313c2ae714a1c3e6d8542d8f2f7cb2e206359142160145e192567db15b.scope: Deactivated successfully.
Oct 11 01:42:16 compute-0 sudo[195223]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 01:42:16 compute-0 podman[195229]: 2025-10-11 01:42:16.812454222 +0000 UTC m=+0.062910102 container create b2f34a92eff4cee9f502a2bd0a1ebfd88d58ea51d533a1cd6f0e49be87268bd3 (image=quay.io/ceph/ceph:v18, name=youthful_newton, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2)
Oct 11 01:42:16 compute-0 sudo[195223]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:16 compute-0 systemd[1]: Started libpod-conmon-b2f34a92eff4cee9f502a2bd0a1ebfd88d58ea51d533a1cd6f0e49be87268bd3.scope.
Oct 11 01:42:16 compute-0 podman[195229]: 2025-10-11 01:42:16.791127777 +0000 UTC m=+0.041583677 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:16 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fc240e8f88c4253d25e005860a732c61fc798b12ae5c15c6ade95e07d5209705/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fc240e8f88c4253d25e005860a732c61fc798b12ae5c15c6ade95e07d5209705/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fc240e8f88c4253d25e005860a732c61fc798b12ae5c15c6ade95e07d5209705/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:16 compute-0 podman[195229]: 2025-10-11 01:42:16.96960052 +0000 UTC m=+0.220056490 container init b2f34a92eff4cee9f502a2bd0a1ebfd88d58ea51d533a1cd6f0e49be87268bd3 (image=quay.io/ceph/ceph:v18, name=youthful_newton, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS)
Oct 11 01:42:16 compute-0 podman[195229]: 2025-10-11 01:42:16.979828638 +0000 UTC m=+0.230284528 container start b2f34a92eff4cee9f502a2bd0a1ebfd88d58ea51d533a1cd6f0e49be87268bd3 (image=quay.io/ceph/ceph:v18, name=youthful_newton, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0)
Oct 11 01:42:16 compute-0 podman[195229]: 2025-10-11 01:42:16.984630578 +0000 UTC m=+0.235086498 container attach b2f34a92eff4cee9f502a2bd0a1ebfd88d58ea51d533a1cd6f0e49be87268bd3 (image=quay.io/ceph/ceph:v18, name=youthful_newton, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 01:42:17 compute-0 systemd[1]: proc-sys-fs-binfmt_misc.automount: Got automount request for /proc/sys/fs/binfmt_misc, triggered by 195280 (sysctl)
Oct 11 01:42:17 compute-0 systemd[1]: Mounting Arbitrary Executable File Formats File System...
Oct 11 01:42:17 compute-0 systemd[1]: Mounted Arbitrary Executable File Formats File System.
Oct 11 01:42:17 compute-0 sudo[195223]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:17 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14164 -' entity='client.admin' cmd=[{"prefix": "orch host label add", "hostname": "compute-0", "label": "_admin", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:17 compute-0 ceph-mon[191930]: pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:17 compute-0 ceph-mon[191930]: from='client.14162 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "label:_admin", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:17 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/inventory}] v 0) v1
Oct 11 01:42:17 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:17 compute-0 ceph-mgr[192233]: [cephadm INFO root] Added label _admin to host compute-0
Oct 11 01:42:17 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Added label _admin to host compute-0
Oct 11 01:42:17 compute-0 youthful_newton[195263]: Added label _admin to host compute-0
Oct 11 01:42:17 compute-0 systemd[1]: libpod-b2f34a92eff4cee9f502a2bd0a1ebfd88d58ea51d533a1cd6f0e49be87268bd3.scope: Deactivated successfully.
Oct 11 01:42:17 compute-0 podman[195229]: 2025-10-11 01:42:17.63446854 +0000 UTC m=+0.884924460 container died b2f34a92eff4cee9f502a2bd0a1ebfd88d58ea51d533a1cd6f0e49be87268bd3 (image=quay.io/ceph/ceph:v18, name=youthful_newton, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:42:17 compute-0 systemd[1]: var-lib-containers-storage-overlay-fc240e8f88c4253d25e005860a732c61fc798b12ae5c15c6ade95e07d5209705-merged.mount: Deactivated successfully.
Oct 11 01:42:17 compute-0 sudo[195321]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:17 compute-0 sudo[195321]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:17 compute-0 sudo[195321]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:17 compute-0 podman[195229]: 2025-10-11 01:42:17.731066833 +0000 UTC m=+0.981522713 container remove b2f34a92eff4cee9f502a2bd0a1ebfd88d58ea51d533a1cd6f0e49be87268bd3 (image=quay.io/ceph/ceph:v18, name=youthful_newton, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef)
Oct 11 01:42:17 compute-0 systemd[1]: libpod-conmon-b2f34a92eff4cee9f502a2bd0a1ebfd88d58ea51d533a1cd6f0e49be87268bd3.scope: Deactivated successfully.
Oct 11 01:42:17 compute-0 sudo[195361]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:42:17 compute-0 sudo[195361]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:17 compute-0 podman[195364]: 2025-10-11 01:42:17.850297005 +0000 UTC m=+0.073391681 container create ee6575f5f4353bf2e2bcdae7513eb85ce2136ec12b30283b47e0d324311d5b7e (image=quay.io/ceph/ceph:v18, name=zealous_dirac, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 01:42:17 compute-0 sudo[195361]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:17 compute-0 systemd[1]: Started libpod-conmon-ee6575f5f4353bf2e2bcdae7513eb85ce2136ec12b30283b47e0d324311d5b7e.scope.
Oct 11 01:42:17 compute-0 podman[195364]: 2025-10-11 01:42:17.821789612 +0000 UTC m=+0.044884328 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:17 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2cda687707c541faf171c6b43dc06f845a6128d45de995a4cea4303cbaf3b870/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2cda687707c541faf171c6b43dc06f845a6128d45de995a4cea4303cbaf3b870/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2cda687707c541faf171c6b43dc06f845a6128d45de995a4cea4303cbaf3b870/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:17 compute-0 sudo[195400]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:17 compute-0 sudo[195400]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:17 compute-0 sudo[195400]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:18 compute-0 podman[195364]: 2025-10-11 01:42:18.017793271 +0000 UTC m=+0.240887987 container init ee6575f5f4353bf2e2bcdae7513eb85ce2136ec12b30283b47e0d324311d5b7e (image=quay.io/ceph/ceph:v18, name=zealous_dirac, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:42:18 compute-0 podman[195364]: 2025-10-11 01:42:18.037084815 +0000 UTC m=+0.260179501 container start ee6575f5f4353bf2e2bcdae7513eb85ce2136ec12b30283b47e0d324311d5b7e (image=quay.io/ceph/ceph:v18, name=zealous_dirac, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:42:18 compute-0 podman[195364]: 2025-10-11 01:42:18.04258177 +0000 UTC m=+0.265676456 container attach ee6575f5f4353bf2e2bcdae7513eb85ce2136ec12b30283b47e0d324311d5b7e (image=quay.io/ceph/ceph:v18, name=zealous_dirac, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 01:42:18 compute-0 sudo[195430]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 list-networks
Oct 11 01:42:18 compute-0 sudo[195430]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:18 compute-0 sudo[195430]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:42:18 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:18 compute-0 sudo[195495]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:18 compute-0 sudo[195495]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:18 compute-0 sudo[195495]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:18 compute-0 ceph-mon[191930]: from='client.14164 -' entity='client.admin' cmd=[{"prefix": "orch host label add", "hostname": "compute-0", "label": "_admin", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:18 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:18 compute-0 ceph-mon[191930]: Added label _admin to host compute-0
Oct 11 01:42:18 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config set, name=osd_memory_target_autotune}] v 0) v1
Oct 11 01:42:18 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/1820136290' entity='client.admin' 
Oct 11 01:42:18 compute-0 podman[195364]: 2025-10-11 01:42:18.699470018 +0000 UTC m=+0.922564734 container died ee6575f5f4353bf2e2bcdae7513eb85ce2136ec12b30283b47e0d324311d5b7e (image=quay.io/ceph/ceph:v18, name=zealous_dirac, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, CEPH_REF=reef)
Oct 11 01:42:18 compute-0 systemd[1]: libpod-ee6575f5f4353bf2e2bcdae7513eb85ce2136ec12b30283b47e0d324311d5b7e.scope: Deactivated successfully.
Oct 11 01:42:18 compute-0 sudo[195520]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:42:18 compute-0 sudo[195520]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:18 compute-0 sudo[195520]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:18 compute-0 systemd[1]: var-lib-containers-storage-overlay-2cda687707c541faf171c6b43dc06f845a6128d45de995a4cea4303cbaf3b870-merged.mount: Deactivated successfully.
Oct 11 01:42:18 compute-0 podman[195364]: 2025-10-11 01:42:18.773304522 +0000 UTC m=+0.996399208 container remove ee6575f5f4353bf2e2bcdae7513eb85ce2136ec12b30283b47e0d324311d5b7e (image=quay.io/ceph/ceph:v18, name=zealous_dirac, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3)
Oct 11 01:42:18 compute-0 systemd[1]: libpod-conmon-ee6575f5f4353bf2e2bcdae7513eb85ce2136ec12b30283b47e0d324311d5b7e.scope: Deactivated successfully.
Oct 11 01:42:18 compute-0 sudo[195555]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:18 compute-0 sudo[195555]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:18 compute-0 sudo[195555]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:18 compute-0 podman[195570]: 2025-10-11 01:42:18.874984047 +0000 UTC m=+0.063925362 container create 4ce055674d2a545345b7c7f38fea0cf1121c9cde0dabcc746a59519d58293f65 (image=quay.io/ceph/ceph:v18, name=sleepy_colden, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2)
Oct 11 01:42:18 compute-0 systemd[1]: Started libpod-conmon-4ce055674d2a545345b7c7f38fea0cf1121c9cde0dabcc746a59519d58293f65.scope.
Oct 11 01:42:18 compute-0 podman[195570]: 2025-10-11 01:42:18.850871492 +0000 UTC m=+0.039812837 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:18 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:18 compute-0 sudo[195597]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- inventory --format=json-pretty --filter-for-batch
Oct 11 01:42:18 compute-0 sudo[195597]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fedf4c786c168d027493c5750f5c8475af1b4f039c0200a7c7c72664eeb9f6da/merged/etc/ceph/ceph.client.admin.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fedf4c786c168d027493c5750f5c8475af1b4f039c0200a7c7c72664eeb9f6da/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fedf4c786c168d027493c5750f5c8475af1b4f039c0200a7c7c72664eeb9f6da/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:19 compute-0 podman[195570]: 2025-10-11 01:42:19.004920595 +0000 UTC m=+0.193861990 container init 4ce055674d2a545345b7c7f38fea0cf1121c9cde0dabcc746a59519d58293f65 (image=quay.io/ceph/ceph:v18, name=sleepy_colden, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True)
Oct 11 01:42:19 compute-0 podman[195570]: 2025-10-11 01:42:19.020938821 +0000 UTC m=+0.209880156 container start 4ce055674d2a545345b7c7f38fea0cf1121c9cde0dabcc746a59519d58293f65 (image=quay.io/ceph/ceph:v18, name=sleepy_colden, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:42:19 compute-0 podman[195570]: 2025-10-11 01:42:19.028912021 +0000 UTC m=+0.217853426 container attach 4ce055674d2a545345b7c7f38fea0cf1121c9cde0dabcc746a59519d58293f65 (image=quay.io/ceph/ceph:v18, name=sleepy_colden, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 01:42:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e2 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:42:19 compute-0 podman[195685]: 2025-10-11 01:42:19.551601255 +0000 UTC m=+0.084161782 container create 36a3efd803d8d9ba701598e9950ee4e52be896739b76d2546928be913a42ebe1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eloquent_pike, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:42:19 compute-0 podman[195685]: 2025-10-11 01:42:19.511462833 +0000 UTC m=+0.044023400 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:42:19 compute-0 systemd[1]: Started libpod-conmon-36a3efd803d8d9ba701598e9950ee4e52be896739b76d2546928be913a42ebe1.scope.
Oct 11 01:42:19 compute-0 ceph-mon[191930]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:19 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1820136290' entity='client.admin' 
Oct 11 01:42:19 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:19 compute-0 podman[195685]: 2025-10-11 01:42:19.686876835 +0000 UTC m=+0.219437362 container init 36a3efd803d8d9ba701598e9950ee4e52be896739b76d2546928be913a42ebe1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eloquent_pike, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:42:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/dashboard/cluster/status}] v 0) v1
Oct 11 01:42:19 compute-0 podman[195685]: 2025-10-11 01:42:19.702744199 +0000 UTC m=+0.235304726 container start 36a3efd803d8d9ba701598e9950ee4e52be896739b76d2546928be913a42ebe1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eloquent_pike, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:42:19 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/4193506895' entity='client.admin' 
Oct 11 01:42:19 compute-0 podman[195685]: 2025-10-11 01:42:19.712065345 +0000 UTC m=+0.244625882 container attach 36a3efd803d8d9ba701598e9950ee4e52be896739b76d2546928be913a42ebe1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eloquent_pike, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:42:19 compute-0 eloquent_pike[195703]: 167 167
Oct 11 01:42:19 compute-0 sleepy_colden[195622]: set mgr/dashboard/cluster/status
Oct 11 01:42:19 compute-0 systemd[1]: libpod-36a3efd803d8d9ba701598e9950ee4e52be896739b76d2546928be913a42ebe1.scope: Deactivated successfully.
Oct 11 01:42:19 compute-0 podman[195685]: 2025-10-11 01:42:19.71604899 +0000 UTC m=+0.248609477 container died 36a3efd803d8d9ba701598e9950ee4e52be896739b76d2546928be913a42ebe1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eloquent_pike, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3)
Oct 11 01:42:19 compute-0 systemd[1]: var-lib-containers-storage-overlay-322ad221196240b5123b55c167101687b4fcb24349fb483f1b94ac2c0de834c0-merged.mount: Deactivated successfully.
Oct 11 01:42:19 compute-0 systemd[1]: libpod-4ce055674d2a545345b7c7f38fea0cf1121c9cde0dabcc746a59519d58293f65.scope: Deactivated successfully.
Oct 11 01:42:19 compute-0 podman[195570]: 2025-10-11 01:42:19.776628487 +0000 UTC m=+0.965569832 container died 4ce055674d2a545345b7c7f38fea0cf1121c9cde0dabcc746a59519d58293f65 (image=quay.io/ceph/ceph:v18, name=sleepy_colden, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:42:19 compute-0 podman[195685]: 2025-10-11 01:42:19.808569281 +0000 UTC m=+0.341129798 container remove 36a3efd803d8d9ba701598e9950ee4e52be896739b76d2546928be913a42ebe1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eloquent_pike, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:42:19 compute-0 systemd[1]: var-lib-containers-storage-overlay-fedf4c786c168d027493c5750f5c8475af1b4f039c0200a7c7c72664eeb9f6da-merged.mount: Deactivated successfully.
Oct 11 01:42:19 compute-0 podman[195570]: 2025-10-11 01:42:19.864095669 +0000 UTC m=+1.053037014 container remove 4ce055674d2a545345b7c7f38fea0cf1121c9cde0dabcc746a59519d58293f65 (image=quay.io/ceph/ceph:v18, name=sleepy_colden, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default)
Oct 11 01:42:19 compute-0 systemd[1]: libpod-conmon-36a3efd803d8d9ba701598e9950ee4e52be896739b76d2546928be913a42ebe1.scope: Deactivated successfully.
Oct 11 01:42:19 compute-0 systemd[1]: libpod-conmon-4ce055674d2a545345b7c7f38fea0cf1121c9cde0dabcc746a59519d58293f65.scope: Deactivated successfully.
Oct 11 01:42:19 compute-0 sudo[190710]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:20 compute-0 podman[195740]: 2025-10-11 01:42:20.182259801 +0000 UTC m=+0.076729484 container create 516ce036c1e01895ff7adc27c856ae9c274bec273ab183c8ca70cf80adbf3d5c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_curran, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 01:42:20 compute-0 podman[195740]: 2025-10-11 01:42:20.157520506 +0000 UTC m=+0.051990189 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:42:20 compute-0 systemd[1]: Started libpod-conmon-516ce036c1e01895ff7adc27c856ae9c274bec273ab183c8ca70cf80adbf3d5c.scope.
Oct 11 01:42:20 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ff35568b412c19231e2ed596ff3e7501452309313ac9a67435c6ae757ac89874/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ff35568b412c19231e2ed596ff3e7501452309313ac9a67435c6ae757ac89874/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ff35568b412c19231e2ed596ff3e7501452309313ac9a67435c6ae757ac89874/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ff35568b412c19231e2ed596ff3e7501452309313ac9a67435c6ae757ac89874/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:20 compute-0 podman[195740]: 2025-10-11 01:42:20.349041491 +0000 UTC m=+0.243511184 container init 516ce036c1e01895ff7adc27c856ae9c274bec273ab183c8ca70cf80adbf3d5c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_curran, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:42:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:20 compute-0 podman[195740]: 2025-10-11 01:42:20.371171009 +0000 UTC m=+0.265640682 container start 516ce036c1e01895ff7adc27c856ae9c274bec273ab183c8ca70cf80adbf3d5c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_curran, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 01:42:20 compute-0 podman[195740]: 2025-10-11 01:42:20.375114421 +0000 UTC m=+0.269584094 container attach 516ce036c1e01895ff7adc27c856ae9c274bec273ab183c8ca70cf80adbf3d5c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_curran, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0)
Oct 11 01:42:20 compute-0 sudo[195783]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iwdirnqhrjddijigxkorkklfvxwnblcy ; /usr/bin/python3'
Oct 11 01:42:20 compute-0 sudo[195783]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:42:20 compute-0 python3[195786]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   config set mgr mgr/cephadm/use_repo_digest false
                                            _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:42:20 compute-0 podman[195787]: 2025-10-11 01:42:20.655816183 +0000 UTC m=+0.072359889 container create 2209b69601a983c243dc465fc3ce7f860211ba11f4995cb8073ce1a09153a2e0 (image=quay.io/ceph/ceph:v18, name=youthful_goldberg, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:42:20 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/4193506895' entity='client.admin' 
Oct 11 01:42:20 compute-0 podman[195787]: 2025-10-11 01:42:20.626601374 +0000 UTC m=+0.043145080 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:20 compute-0 systemd[1]: Started libpod-conmon-2209b69601a983c243dc465fc3ce7f860211ba11f4995cb8073ce1a09153a2e0.scope.
Oct 11 01:42:20 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c22257f827fa38ac2fea7e33f93b9fac6258d559446286147215fd7fc76606b4/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c22257f827fa38ac2fea7e33f93b9fac6258d559446286147215fd7fc76606b4/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:20 compute-0 podman[195787]: 2025-10-11 01:42:20.815006132 +0000 UTC m=+0.231549868 container init 2209b69601a983c243dc465fc3ce7f860211ba11f4995cb8073ce1a09153a2e0 (image=quay.io/ceph/ceph:v18, name=youthful_goldberg, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS)
Oct 11 01:42:20 compute-0 podman[195787]: 2025-10-11 01:42:20.832848252 +0000 UTC m=+0.249391918 container start 2209b69601a983c243dc465fc3ce7f860211ba11f4995cb8073ce1a09153a2e0 (image=quay.io/ceph/ceph:v18, name=youthful_goldberg, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef)
Oct 11 01:42:20 compute-0 podman[195787]: 2025-10-11 01:42:20.838525271 +0000 UTC m=+0.255068967 container attach 2209b69601a983c243dc465fc3ce7f860211ba11f4995cb8073ce1a09153a2e0 (image=quay.io/ceph/ceph:v18, name=youthful_goldberg, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 01:42:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config set, name=mgr/cephadm/use_repo_digest}] v 0) v1
Oct 11 01:42:21 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/2394001959' entity='client.admin' 
Oct 11 01:42:21 compute-0 systemd[1]: libpod-2209b69601a983c243dc465fc3ce7f860211ba11f4995cb8073ce1a09153a2e0.scope: Deactivated successfully.
Oct 11 01:42:21 compute-0 conmon[195802]: conmon 2209b69601a983c243dc <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-2209b69601a983c243dc465fc3ce7f860211ba11f4995cb8073ce1a09153a2e0.scope/container/memory.events
Oct 11 01:42:21 compute-0 podman[195787]: 2025-10-11 01:42:21.490051346 +0000 UTC m=+0.906595072 container died 2209b69601a983c243dc465fc3ce7f860211ba11f4995cb8073ce1a09153a2e0 (image=quay.io/ceph/ceph:v18, name=youthful_goldberg, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default)
Oct 11 01:42:21 compute-0 systemd[1]: var-lib-containers-storage-overlay-c22257f827fa38ac2fea7e33f93b9fac6258d559446286147215fd7fc76606b4-merged.mount: Deactivated successfully.
Oct 11 01:42:21 compute-0 podman[195787]: 2025-10-11 01:42:21.593842678 +0000 UTC m=+1.010386384 container remove 2209b69601a983c243dc465fc3ce7f860211ba11f4995cb8073ce1a09153a2e0 (image=quay.io/ceph/ceph:v18, name=youthful_goldberg, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS)
Oct 11 01:42:21 compute-0 systemd[1]: libpod-conmon-2209b69601a983c243dc465fc3ce7f860211ba11f4995cb8073ce1a09153a2e0.scope: Deactivated successfully.
Oct 11 01:42:21 compute-0 sudo[195783]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:21 compute-0 ceph-mon[191930]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:21 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2394001959' entity='client.admin' 
Oct 11 01:42:22 compute-0 auditd[699]: Audit daemon rotating log files
Oct 11 01:42:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:22 compute-0 unruffled_curran[195757]: [
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:     {
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:         "available": false,
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:         "ceph_device": false,
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:         "device_id": "QEMU_DVD-ROM_QM00001",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:         "lsm_data": {},
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:         "lvs": [],
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:         "path": "/dev/sr0",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:         "rejected_reasons": [
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "Has a FileSystem",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "Insufficient space (<5GB)"
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:         ],
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:         "sys_api": {
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "actuators": null,
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "device_nodes": "sr0",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "devname": "sr0",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "human_readable_size": "482.00 KB",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "id_bus": "ata",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "model": "QEMU DVD-ROM",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "nr_requests": "2",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "parent": "/dev/sr0",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "partitions": {},
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "path": "/dev/sr0",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "removable": "1",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "rev": "2.5+",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "ro": "0",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "rotational": "0",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "sas_address": "",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "sas_device_handle": "",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "scheduler_mode": "mq-deadline",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "sectors": 0,
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "sectorsize": "2048",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "size": 493568.0,
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "support_discard": "2048",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "type": "disk",
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:             "vendor": "QEMU"
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:         }
Oct 11 01:42:22 compute-0 unruffled_curran[195757]:     }
Oct 11 01:42:22 compute-0 unruffled_curran[195757]: ]
Oct 11 01:42:22 compute-0 systemd[1]: libpod-516ce036c1e01895ff7adc27c856ae9c274bec273ab183c8ca70cf80adbf3d5c.scope: Deactivated successfully.
Oct 11 01:42:22 compute-0 systemd[1]: libpod-516ce036c1e01895ff7adc27c856ae9c274bec273ab183c8ca70cf80adbf3d5c.scope: Consumed 2.400s CPU time.
Oct 11 01:42:22 compute-0 podman[195740]: 2025-10-11 01:42:22.715813888 +0000 UTC m=+2.610283651 container died 516ce036c1e01895ff7adc27c856ae9c274bec273ab183c8ca70cf80adbf3d5c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_curran, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:42:22 compute-0 systemd[1]: var-lib-containers-storage-overlay-ff35568b412c19231e2ed596ff3e7501452309313ac9a67435c6ae757ac89874-merged.mount: Deactivated successfully.
Oct 11 01:42:22 compute-0 podman[195740]: 2025-10-11 01:42:22.803485336 +0000 UTC m=+2.697955009 container remove 516ce036c1e01895ff7adc27c856ae9c274bec273ab183c8ca70cf80adbf3d5c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_curran, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:42:22 compute-0 systemd[1]: libpod-conmon-516ce036c1e01895ff7adc27c856ae9c274bec273ab183c8ca70cf80adbf3d5c.scope: Deactivated successfully.
Oct 11 01:42:22 compute-0 sudo[195597]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:42:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:22 compute-0 sudo[197822]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-diwxwjhwcaohlwfcoxrtajdorfxpfmus ; ANSIBLE_ASYNC_DIR=\'~/.ansible_async\' /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760146942.1201315-33788-209178414560172/async_wrapper.py j848529716497 30 /home/zuul/.ansible/tmp/ansible-tmp-1760146942.1201315-33788-209178414560172/AnsiballZ_command.py _'
Oct 11 01:42:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:42:22 compute-0 sudo[197822]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:42:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:42:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:42:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"} v 0) v1
Oct 11 01:42:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"}]: dispatch
Oct 11 01:42:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:42:22 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:42:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:42:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:42:22 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.serve] Updating compute-0:/etc/ceph/ceph.conf
Oct 11 01:42:22 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Updating compute-0:/etc/ceph/ceph.conf
Oct 11 01:42:23 compute-0 sudo[197825]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:23 compute-0 sudo[197825]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:23 compute-0 sudo[197825]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:23 compute-0 ansible-async_wrapper.py[197824]: Invoked with j848529716497 30 /home/zuul/.ansible/tmp/ansible-tmp-1760146942.1201315-33788-209178414560172/AnsiballZ_command.py _
Oct 11 01:42:23 compute-0 ansible-async_wrapper.py[197859]: Starting module and watcher
Oct 11 01:42:23 compute-0 ansible-async_wrapper.py[197859]: Start watching 197861 (30)
Oct 11 01:42:23 compute-0 ansible-async_wrapper.py[197861]: Start module (197861)
Oct 11 01:42:23 compute-0 ansible-async_wrapper.py[197824]: Return async_wrapper task started.
Oct 11 01:42:23 compute-0 sudo[197822]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:23 compute-0 sudo[197850]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /etc/ceph
Oct 11 01:42:23 compute-0 sudo[197850]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:23 compute-0 sudo[197850]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:23 compute-0 python3[197867]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   orch status --format json _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:42:23 compute-0 sudo[197880]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:23 compute-0 sudo[197880]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:23 compute-0 sudo[197880]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:23 compute-0 podman[197903]: 2025-10-11 01:42:23.369659036 +0000 UTC m=+0.084309833 container create e65a8bc36a348cc8b8505b8f494d11feb8dd0a15a7d46c383766789045eaf4ae (image=quay.io/ceph/ceph:v18, name=adoring_proskuriakova, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:42:23 compute-0 podman[197903]: 2025-10-11 01:42:23.337197571 +0000 UTC m=+0.051848448 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:23 compute-0 systemd[1]: Started libpod-conmon-e65a8bc36a348cc8b8505b8f494d11feb8dd0a15a7d46c383766789045eaf4ae.scope.
Oct 11 01:42:23 compute-0 sudo[197915]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/etc/ceph
Oct 11 01:42:23 compute-0 sudo[197915]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:23 compute-0 sudo[197915]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:23 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:23 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dce0b1749e5eba53091a4f91656b6461ec50f9259b2223b5dc2168ba1d2cd24e/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:23 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dce0b1749e5eba53091a4f91656b6461ec50f9259b2223b5dc2168ba1d2cd24e/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:23 compute-0 podman[197903]: 2025-10-11 01:42:23.51929096 +0000 UTC m=+0.233941837 container init e65a8bc36a348cc8b8505b8f494d11feb8dd0a15a7d46c383766789045eaf4ae (image=quay.io/ceph/ceph:v18, name=adoring_proskuriakova, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 01:42:23 compute-0 podman[197903]: 2025-10-11 01:42:23.537908992 +0000 UTC m=+0.252559809 container start e65a8bc36a348cc8b8505b8f494d11feb8dd0a15a7d46c383766789045eaf4ae (image=quay.io/ceph/ceph:v18, name=adoring_proskuriakova, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:42:23 compute-0 podman[197903]: 2025-10-11 01:42:23.54471992 +0000 UTC m=+0.259370757 container attach e65a8bc36a348cc8b8505b8f494d11feb8dd0a15a7d46c383766789045eaf4ae (image=quay.io/ceph/ceph:v18, name=adoring_proskuriakova, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:42:23 compute-0 sudo[197949]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:23 compute-0 sudo[197949]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:23 compute-0 sudo[197949]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:23 compute-0 sudo[197975]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/touch /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/etc/ceph/ceph.conf.new
Oct 11 01:42:23 compute-0 sudo[197975]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:23 compute-0 sudo[197975]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:23 compute-0 sudo[198000]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:23 compute-0 sudo[198000]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:23 compute-0 sudo[198000]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:23 compute-0 ceph-mon[191930]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"}]: dispatch
Oct 11 01:42:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:42:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:42:23 compute-0 ceph-mon[191930]: Updating compute-0:/etc/ceph/ceph.conf
Oct 11 01:42:23 compute-0 sudo[198028]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R ceph-admin /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:42:23 compute-0 sudo[198028]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:23 compute-0 sudo[198028]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:24 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14172 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""], "format": "json"}]: dispatch
Oct 11 01:42:24 compute-0 adoring_proskuriakova[197945]: 
Oct 11 01:42:24 compute-0 adoring_proskuriakova[197945]: {"available": true, "backend": "cephadm", "paused": false, "workers": 10}
Oct 11 01:42:24 compute-0 systemd[1]: libpod-e65a8bc36a348cc8b8505b8f494d11feb8dd0a15a7d46c383766789045eaf4ae.scope: Deactivated successfully.
Oct 11 01:42:24 compute-0 podman[197903]: 2025-10-11 01:42:24.139394742 +0000 UTC m=+0.854045599 container died e65a8bc36a348cc8b8505b8f494d11feb8dd0a15a7d46c383766789045eaf4ae (image=quay.io/ceph/ceph:v18, name=adoring_proskuriakova, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:42:24 compute-0 sudo[198069]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:24 compute-0 sudo[198069]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:24 compute-0 sudo[198069]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:24 compute-0 systemd[1]: var-lib-containers-storage-overlay-dce0b1749e5eba53091a4f91656b6461ec50f9259b2223b5dc2168ba1d2cd24e-merged.mount: Deactivated successfully.
Oct 11 01:42:24 compute-0 podman[197903]: 2025-10-11 01:42:24.218678298 +0000 UTC m=+0.933329095 container remove e65a8bc36a348cc8b8505b8f494d11feb8dd0a15a7d46c383766789045eaf4ae (image=quay.io/ceph/ceph:v18, name=adoring_proskuriakova, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, ceph=True)
Oct 11 01:42:24 compute-0 systemd[1]: libpod-conmon-e65a8bc36a348cc8b8505b8f494d11feb8dd0a15a7d46c383766789045eaf4ae.scope: Deactivated successfully.
Oct 11 01:42:24 compute-0 ansible-async_wrapper.py[197861]: Module complete (197861)
Oct 11 01:42:24 compute-0 sudo[198099]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 644 /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/etc/ceph/ceph.conf.new
Oct 11 01:42:24 compute-0 sudo[198099]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:24 compute-0 sudo[198099]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e2 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:42:24 compute-0 sudo[198179]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:24 compute-0 sudo[198179]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:24 compute-0 sudo[198179]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:24 compute-0 sudo[198226]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ntgmwcmsugukwegsjfpdiipojokoqnef ; /usr/bin/python3'
Oct 11 01:42:24 compute-0 sudo[198226]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:42:24 compute-0 sudo[198229]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R 0:0 /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/etc/ceph/ceph.conf.new
Oct 11 01:42:24 compute-0 sudo[198229]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:24 compute-0 sudo[198229]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:24 compute-0 python3[198230]: ansible-ansible.legacy.async_status Invoked with jid=j848529716497.197824 mode=status _async_dir=/root/.ansible_async
Oct 11 01:42:24 compute-0 sudo[198226]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:24 compute-0 sudo[198255]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:24 compute-0 sudo[198255]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:24 compute-0 sudo[198255]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:24 compute-0 ceph-mon[191930]: from='client.14172 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""], "format": "json"}]: dispatch
Oct 11 01:42:25 compute-0 sudo[198292]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 644 /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/etc/ceph/ceph.conf.new
Oct 11 01:42:25 compute-0 sudo[198292]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:25 compute-0 sudo[198292]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:25 compute-0 sudo[198356]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qumhzqfaqtdzetcjkwbyszgbshcgbvle ; /usr/bin/python3'
Oct 11 01:42:25 compute-0 sudo[198356]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:42:25 compute-0 sudo[198349]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:25 compute-0 sudo[198349]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:25 compute-0 sudo[198349]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:25 compute-0 sudo[198379]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mv /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/etc/ceph/ceph.conf.new /etc/ceph/ceph.conf
Oct 11 01:42:25 compute-0 sudo[198379]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:25 compute-0 sudo[198379]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:25 compute-0 python3[198371]: ansible-ansible.legacy.async_status Invoked with jid=j848529716497.197824 mode=cleanup _async_dir=/root/.ansible_async
Oct 11 01:42:25 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.serve] Updating compute-0:/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config/ceph.conf
Oct 11 01:42:25 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Updating compute-0:/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config/ceph.conf
Oct 11 01:42:25 compute-0 sudo[198356]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:25 compute-0 sudo[198404]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:25 compute-0 sudo[198404]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:25 compute-0 sudo[198404]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:25 compute-0 sudo[198429]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config
Oct 11 01:42:25 compute-0 sudo[198429]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:25 compute-0 sudo[198429]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:25 compute-0 sudo[198454]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:25 compute-0 sudo[198454]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:25 compute-0 sudo[198454]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:25 compute-0 sudo[198519]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qmyrzjhypvxtjvbnfpufbdgwxitvluew ; /usr/bin/python3'
Oct 11 01:42:25 compute-0 sudo[198519]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:42:25 compute-0 sudo[198489]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config
Oct 11 01:42:25 compute-0 sudo[198489]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:25 compute-0 sudo[198489]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:25 compute-0 ceph-mon[191930]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:26 compute-0 sudo[198530]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:26 compute-0 python3[198527]: ansible-ansible.builtin.stat Invoked with path=/home/ceph-admin/specs/ceph_spec.yaml follow=False get_md5=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:42:26 compute-0 sudo[198530]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:26 compute-0 sudo[198530]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:26 compute-0 sudo[198519]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:26 compute-0 sudo[198556]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/touch /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config/ceph.conf.new
Oct 11 01:42:26 compute-0 sudo[198556]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:26 compute-0 sudo[198556]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:26 compute-0 sudo[198582]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:26 compute-0 sudo[198582]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:26 compute-0 sudo[198582]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:26 compute-0 sudo[198607]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R ceph-admin /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:42:26 compute-0 sudo[198607]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:26 compute-0 sudo[198607]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:42:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:42:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:42:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:42:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:42:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:42:26 compute-0 sudo[198632]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:26 compute-0 sudo[198632]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:26 compute-0 sudo[198632]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:26 compute-0 sudo[198679]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-coiqpmgmwlguiyyjhybdxzerqsspmwls ; /usr/bin/python3'
Oct 11 01:42:26 compute-0 sudo[198679]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:42:26 compute-0 sudo[198682]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 644 /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config/ceph.conf.new
Oct 11 01:42:26 compute-0 sudo[198682]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:26 compute-0 sudo[198682]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:26 compute-0 python3[198684]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z   --volume /home/ceph-admin/specs/ceph_spec.yaml:/home/ceph_spec.yaml:z   --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   orch status --format json _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:42:26 compute-0 ceph-mon[191930]: Updating compute-0:/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config/ceph.conf
Oct 11 01:42:26 compute-0 podman[198731]: 2025-10-11 01:42:26.926377105 +0000 UTC m=+0.074906050 container create c175879d12d48f593ca3a9c5d8b58ab5a8365978a00674ffd2f3334e5a132ef7 (image=quay.io/ceph/ceph:v18, name=gifted_euler, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:42:26 compute-0 sudo[198732]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:26 compute-0 sudo[198732]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:26 compute-0 sudo[198732]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:26 compute-0 systemd[1]: Started libpod-conmon-c175879d12d48f593ca3a9c5d8b58ab5a8365978a00674ffd2f3334e5a132ef7.scope.
Oct 11 01:42:26 compute-0 podman[198731]: 2025-10-11 01:42:26.893799911 +0000 UTC m=+0.042328896 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:27 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2591ba436ae30fc34320442a9d37738f0c219bfcff5aa8bc24fa1f45ed7b5b12/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2591ba436ae30fc34320442a9d37738f0c219bfcff5aa8bc24fa1f45ed7b5b12/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2591ba436ae30fc34320442a9d37738f0c219bfcff5aa8bc24fa1f45ed7b5b12/merged/home/ceph_spec.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:27 compute-0 podman[198731]: 2025-10-11 01:42:27.056894459 +0000 UTC m=+0.205423434 container init c175879d12d48f593ca3a9c5d8b58ab5a8365978a00674ffd2f3334e5a132ef7 (image=quay.io/ceph/ceph:v18, name=gifted_euler, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=reef, io.buildah.version=1.39.3, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:42:27 compute-0 podman[198731]: 2025-10-11 01:42:27.07348107 +0000 UTC m=+0.222010035 container start c175879d12d48f593ca3a9c5d8b58ab5a8365978a00674ffd2f3334e5a132ef7 (image=quay.io/ceph/ceph:v18, name=gifted_euler, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507)
Oct 11 01:42:27 compute-0 podman[198731]: 2025-10-11 01:42:27.084178075 +0000 UTC m=+0.232707060 container attach c175879d12d48f593ca3a9c5d8b58ab5a8365978a00674ffd2f3334e5a132ef7 (image=quay.io/ceph/ceph:v18, name=gifted_euler, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.schema-version=1.0)
Oct 11 01:42:27 compute-0 sudo[198771]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R 0:0 /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config/ceph.conf.new
Oct 11 01:42:27 compute-0 sudo[198771]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:27 compute-0 sudo[198771]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:27 compute-0 sudo[198800]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:27 compute-0 sudo[198800]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:27 compute-0 sudo[198800]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:27 compute-0 sudo[198825]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 644 /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config/ceph.conf.new
Oct 11 01:42:27 compute-0 sudo[198825]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:27 compute-0 sudo[198825]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:27 compute-0 sudo[198851]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:27 compute-0 sudo[198851]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:27 compute-0 sudo[198851]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:27 compute-0 sudo[198894]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mv /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config/ceph.conf.new /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config/ceph.conf
Oct 11 01:42:27 compute-0 sudo[198894]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:27 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14174 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""], "format": "json"}]: dispatch
Oct 11 01:42:27 compute-0 gifted_euler[198772]: 
Oct 11 01:42:27 compute-0 gifted_euler[198772]: {"available": true, "backend": "cephadm", "paused": false, "workers": 10}
Oct 11 01:42:27 compute-0 sudo[198894]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:27 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.serve] Updating compute-0:/etc/ceph/ceph.client.admin.keyring
Oct 11 01:42:27 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Updating compute-0:/etc/ceph/ceph.client.admin.keyring
Oct 11 01:42:27 compute-0 systemd[1]: libpod-c175879d12d48f593ca3a9c5d8b58ab5a8365978a00674ffd2f3334e5a132ef7.scope: Deactivated successfully.
Oct 11 01:42:27 compute-0 podman[198731]: 2025-10-11 01:42:27.646300405 +0000 UTC m=+0.794829370 container died c175879d12d48f593ca3a9c5d8b58ab5a8365978a00674ffd2f3334e5a132ef7 (image=quay.io/ceph/ceph:v18, name=gifted_euler, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:42:27 compute-0 systemd[1]: var-lib-containers-storage-overlay-2591ba436ae30fc34320442a9d37738f0c219bfcff5aa8bc24fa1f45ed7b5b12-merged.mount: Deactivated successfully.
Oct 11 01:42:27 compute-0 podman[198731]: 2025-10-11 01:42:27.729528692 +0000 UTC m=+0.878057637 container remove c175879d12d48f593ca3a9c5d8b58ab5a8365978a00674ffd2f3334e5a132ef7 (image=quay.io/ceph/ceph:v18, name=gifted_euler, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3)
Oct 11 01:42:27 compute-0 systemd[1]: libpod-conmon-c175879d12d48f593ca3a9c5d8b58ab5a8365978a00674ffd2f3334e5a132ef7.scope: Deactivated successfully.
Oct 11 01:42:27 compute-0 sudo[198921]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:27 compute-0 sudo[198921]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:27 compute-0 sudo[198679]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:27 compute-0 sudo[198921]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:27 compute-0 sudo[198960]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /etc/ceph
Oct 11 01:42:27 compute-0 sudo[198960]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:27 compute-0 sudo[198960]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:27 compute-0 ceph-mon[191930]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:28 compute-0 sudo[198985]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:28 compute-0 sudo[198985]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:28 compute-0 sudo[198985]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:28 compute-0 ansible-async_wrapper.py[197859]: Done in kid B.
Oct 11 01:42:28 compute-0 sudo[199010]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/etc/ceph
Oct 11 01:42:28 compute-0 sudo[199010]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:28 compute-0 sudo[199010]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:28 compute-0 sudo[199065]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pfgprlnffvayddgneunwekgiqqwtetuc ; /usr/bin/python3'
Oct 11 01:42:28 compute-0 sudo[199065]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:42:28 compute-0 sudo[199053]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:28 compute-0 sudo[199053]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:28 compute-0 sudo[199053]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:28 compute-0 sudo[199086]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/touch /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/etc/ceph/ceph.client.admin.keyring.new
Oct 11 01:42:28 compute-0 sudo[199086]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:28 compute-0 sudo[199086]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:28 compute-0 python3[199083]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z   --volume /home/ceph-admin/specs/ceph_spec.yaml:/home/ceph_spec.yaml:z   --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   config set global log_to_file true _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:42:28 compute-0 sudo[199111]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:28 compute-0 sudo[199111]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:28 compute-0 sudo[199111]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:28 compute-0 podman[199114]: 2025-10-11 01:42:28.481533557 +0000 UTC m=+0.086353774 container create dcd0af9f9dd963243c46bd206fb38210b74b1d73d3c0899d7ef6fcfcc182057a (image=quay.io/ceph/ceph:v18, name=affectionate_wiles, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 01:42:28 compute-0 podman[199114]: 2025-10-11 01:42:28.447180843 +0000 UTC m=+0.052001140 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:28 compute-0 systemd[1]: Started libpod-conmon-dcd0af9f9dd963243c46bd206fb38210b74b1d73d3c0899d7ef6fcfcc182057a.scope.
Oct 11 01:42:28 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:28 compute-0 sudo[199148]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R ceph-admin /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:42:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/958548505d30eb60948997c116e397b0a9b777301d86e454052bc1dced0f1b7a/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/958548505d30eb60948997c116e397b0a9b777301d86e454052bc1dced0f1b7a/merged/home/ceph_spec.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/958548505d30eb60948997c116e397b0a9b777301d86e454052bc1dced0f1b7a/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:28 compute-0 sudo[199148]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:28 compute-0 sudo[199148]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:28 compute-0 podman[199114]: 2025-10-11 01:42:28.637876382 +0000 UTC m=+0.242696629 container init dcd0af9f9dd963243c46bd206fb38210b74b1d73d3c0899d7ef6fcfcc182057a (image=quay.io/ceph/ceph:v18, name=affectionate_wiles, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 01:42:28 compute-0 podman[199114]: 2025-10-11 01:42:28.651623478 +0000 UTC m=+0.256443705 container start dcd0af9f9dd963243c46bd206fb38210b74b1d73d3c0899d7ef6fcfcc182057a (image=quay.io/ceph/ceph:v18, name=affectionate_wiles, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:42:28 compute-0 podman[199114]: 2025-10-11 01:42:28.657321119 +0000 UTC m=+0.262141356 container attach dcd0af9f9dd963243c46bd206fb38210b74b1d73d3c0899d7ef6fcfcc182057a (image=quay.io/ceph/ceph:v18, name=affectionate_wiles, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507)
Oct 11 01:42:28 compute-0 sudo[199178]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:28 compute-0 sudo[199178]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:28 compute-0 sudo[199178]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:28 compute-0 sudo[199204]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 644 /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/etc/ceph/ceph.client.admin.keyring.new
Oct 11 01:42:28 compute-0 sudo[199204]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:28 compute-0 sudo[199204]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:28 compute-0 ceph-mon[191930]: from='client.14174 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""], "format": "json"}]: dispatch
Oct 11 01:42:28 compute-0 ceph-mon[191930]: Updating compute-0:/etc/ceph/ceph.client.admin.keyring
Oct 11 01:42:29 compute-0 podman[199229]: 2025-10-11 01:42:29.015628833 +0000 UTC m=+0.110619113 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, build-date=2025-08-20T13:12:41, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-type=git, config_id=edpm, io.openshift.expose-services=, container_name=openstack_network_exporter, maintainer=Red Hat, Inc., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.buildah.version=1.33.7, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, architecture=x86_64, com.redhat.component=ubi9-minimal-container, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1755695350, distribution-scope=public, name=ubi9-minimal, vendor=Red Hat, Inc., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, managed_by=edpm_ansible, io.openshift.tags=minimal rhel9, version=9.6, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://catalog.redhat.com/en/search?searchType=containers)
Oct 11 01:42:29 compute-0 podman[199228]: 2025-10-11 01:42:29.029582875 +0000 UTC m=+0.126104576 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 01:42:29 compute-0 sudo[199312]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:29 compute-0 sudo[199312]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:29 compute-0 sudo[199312]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config set, name=log_to_file}] v 0) v1
Oct 11 01:42:29 compute-0 sudo[199338]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R 0:0 /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/etc/ceph/ceph.client.admin.keyring.new
Oct 11 01:42:29 compute-0 sudo[199338]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:29 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/2533501138' entity='client.admin' 
Oct 11 01:42:29 compute-0 sudo[199338]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:29 compute-0 systemd[1]: libpod-dcd0af9f9dd963243c46bd206fb38210b74b1d73d3c0899d7ef6fcfcc182057a.scope: Deactivated successfully.
Oct 11 01:42:29 compute-0 conmon[199170]: conmon dcd0af9f9dd963243c46 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-dcd0af9f9dd963243c46bd206fb38210b74b1d73d3c0899d7ef6fcfcc182057a.scope/container/memory.events
Oct 11 01:42:29 compute-0 podman[199114]: 2025-10-11 01:42:29.283016882 +0000 UTC m=+0.887837119 container died dcd0af9f9dd963243c46bd206fb38210b74b1d73d3c0899d7ef6fcfcc182057a (image=quay.io/ceph/ceph:v18, name=affectionate_wiles, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507)
Oct 11 01:42:29 compute-0 systemd[1]: var-lib-containers-storage-overlay-958548505d30eb60948997c116e397b0a9b777301d86e454052bc1dced0f1b7a-merged.mount: Deactivated successfully.
Oct 11 01:42:29 compute-0 podman[199114]: 2025-10-11 01:42:29.378188272 +0000 UTC m=+0.983008499 container remove dcd0af9f9dd963243c46bd206fb38210b74b1d73d3c0899d7ef6fcfcc182057a (image=quay.io/ceph/ceph:v18, name=affectionate_wiles, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0)
Oct 11 01:42:29 compute-0 systemd[1]: libpod-conmon-dcd0af9f9dd963243c46bd206fb38210b74b1d73d3c0899d7ef6fcfcc182057a.scope: Deactivated successfully.
Oct 11 01:42:29 compute-0 sudo[199365]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:29 compute-0 sudo[199365]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:29 compute-0 sudo[199365]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:29 compute-0 sudo[199065]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:29 compute-0 sudo[199400]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 600 /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/etc/ceph/ceph.client.admin.keyring.new
Oct 11 01:42:29 compute-0 sudo[199400]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:29 compute-0 sudo[199400]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e2 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:42:29 compute-0 sudo[199425]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:29 compute-0 sudo[199425]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:29 compute-0 sudo[199425]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:29 compute-0 sudo[199472]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-guqhamsnzwfsmavqkzszkegqpxzwmfnr ; /usr/bin/python3'
Oct 11 01:42:29 compute-0 sudo[199472]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:42:29 compute-0 podman[157119]: time="2025-10-11T01:42:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:42:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:42:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 22105 "" "Go-http-client/1.1"
Oct 11 01:42:29 compute-0 sudo[199475]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mv /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/etc/ceph/ceph.client.admin.keyring.new /etc/ceph/ceph.client.admin.keyring
Oct 11 01:42:29 compute-0 sudo[199475]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:42:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 3942 "" "Go-http-client/1.1"
Oct 11 01:42:29 compute-0 sudo[199475]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:29 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.serve] Updating compute-0:/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config/ceph.client.admin.keyring
Oct 11 01:42:29 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Updating compute-0:/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config/ceph.client.admin.keyring
Oct 11 01:42:29 compute-0 python3[199476]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z   --volume /home/ceph-admin/specs/ceph_spec.yaml:/home/ceph_spec.yaml:z   --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   config set global mon_cluster_log_to_file true _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:42:29 compute-0 sudo[199501]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:29 compute-0 sudo[199501]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:29 compute-0 sudo[199501]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:29 compute-0 ceph-mon[191930]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:29 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2533501138' entity='client.admin' 
Oct 11 01:42:29 compute-0 podman[199523]: 2025-10-11 01:42:29.998267862 +0000 UTC m=+0.091485450 container create a1724799250a7d07edde6cb5745af3ac3ff3ab8fca8077499fde44f57f224969 (image=quay.io/ceph/ceph:v18, name=peaceful_benz, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 01:42:30 compute-0 podman[199523]: 2025-10-11 01:42:29.964303108 +0000 UTC m=+0.057520786 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:30 compute-0 systemd[1]: Started libpod-conmon-a1724799250a7d07edde6cb5745af3ac3ff3ab8fca8077499fde44f57f224969.scope.
Oct 11 01:42:30 compute-0 sudo[199536]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config
Oct 11 01:42:30 compute-0 sudo[199536]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:30 compute-0 sudo[199536]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:30 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a30f3bc5773d9c3fba86b8b3009bfc2e10bf28cd45d9ffcbd189b7d82c77c209/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a30f3bc5773d9c3fba86b8b3009bfc2e10bf28cd45d9ffcbd189b7d82c77c209/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a30f3bc5773d9c3fba86b8b3009bfc2e10bf28cd45d9ffcbd189b7d82c77c209/merged/home/ceph_spec.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:30 compute-0 podman[199523]: 2025-10-11 01:42:30.165777019 +0000 UTC m=+0.258994657 container init a1724799250a7d07edde6cb5745af3ac3ff3ab8fca8077499fde44f57f224969 (image=quay.io/ceph/ceph:v18, name=peaceful_benz, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:42:30 compute-0 podman[199523]: 2025-10-11 01:42:30.185844915 +0000 UTC m=+0.279062533 container start a1724799250a7d07edde6cb5745af3ac3ff3ab8fca8077499fde44f57f224969 (image=quay.io/ceph/ceph:v18, name=peaceful_benz, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3)
Oct 11 01:42:30 compute-0 podman[199523]: 2025-10-11 01:42:30.193459637 +0000 UTC m=+0.286677255 container attach a1724799250a7d07edde6cb5745af3ac3ff3ab8fca8077499fde44f57f224969 (image=quay.io/ceph/ceph:v18, name=peaceful_benz, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:42:30 compute-0 sudo[199569]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:30 compute-0 sudo[199569]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:30 compute-0 sudo[199569]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:30 compute-0 sudo[199595]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config
Oct 11 01:42:30 compute-0 sudo[199595]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:30 compute-0 sudo[199595]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:30 compute-0 sudo[199620]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:30 compute-0 sudo[199620]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:30 compute-0 sudo[199620]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:30 compute-0 sudo[199656]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/touch /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config/ceph.client.admin.keyring.new
Oct 11 01:42:30 compute-0 sudo[199656]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:30 compute-0 sudo[199656]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:30 compute-0 sudo[199689]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:30 compute-0 sudo[199689]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:30 compute-0 sudo[199689]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config set, name=mon_cluster_log_to_file}] v 0) v1
Oct 11 01:42:30 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/854619407' entity='client.admin' 
Oct 11 01:42:30 compute-0 systemd[1]: libpod-a1724799250a7d07edde6cb5745af3ac3ff3ab8fca8077499fde44f57f224969.scope: Deactivated successfully.
Oct 11 01:42:30 compute-0 podman[199523]: 2025-10-11 01:42:30.875048408 +0000 UTC m=+0.968266046 container died a1724799250a7d07edde6cb5745af3ac3ff3ab8fca8077499fde44f57f224969 (image=quay.io/ceph/ceph:v18, name=peaceful_benz, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:42:30 compute-0 systemd[1]: var-lib-containers-storage-overlay-a30f3bc5773d9c3fba86b8b3009bfc2e10bf28cd45d9ffcbd189b7d82c77c209-merged.mount: Deactivated successfully.
Oct 11 01:42:30 compute-0 sudo[199714]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R ceph-admin /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:42:30 compute-0 sudo[199714]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:30 compute-0 sudo[199714]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:30 compute-0 ceph-mon[191930]: Updating compute-0:/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config/ceph.client.admin.keyring
Oct 11 01:42:30 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/854619407' entity='client.admin' 
Oct 11 01:42:30 compute-0 podman[199523]: 2025-10-11 01:42:30.969870321 +0000 UTC m=+1.063087909 container remove a1724799250a7d07edde6cb5745af3ac3ff3ab8fca8077499fde44f57f224969 (image=quay.io/ceph/ceph:v18, name=peaceful_benz, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507)
Oct 11 01:42:30 compute-0 systemd[1]: libpod-conmon-a1724799250a7d07edde6cb5745af3ac3ff3ab8fca8077499fde44f57f224969.scope: Deactivated successfully.
Oct 11 01:42:31 compute-0 sudo[199472]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:31 compute-0 sudo[199752]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:31 compute-0 sudo[199752]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:31 compute-0 sudo[199752]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:31 compute-0 sudo[199777]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 644 /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config/ceph.client.admin.keyring.new
Oct 11 01:42:31 compute-0 sudo[199777]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:31 compute-0 sudo[199777]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:31 compute-0 sudo[199852]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-isfbfhkcfasyutxbtrqccceptinqbepz ; /usr/bin/python3'
Oct 11 01:42:31 compute-0 sudo[199852]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:42:31 compute-0 openstack_network_exporter[159265]: ERROR   01:42:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:42:31 compute-0 openstack_network_exporter[159265]: ERROR   01:42:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:42:31 compute-0 openstack_network_exporter[159265]: ERROR   01:42:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:42:31 compute-0 openstack_network_exporter[159265]: ERROR   01:42:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:42:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:42:31 compute-0 openstack_network_exporter[159265]: ERROR   01:42:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:42:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:42:31 compute-0 sudo[199845]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:31 compute-0 sudo[199845]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:31 compute-0 sudo[199845]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:31 compute-0 python3[199867]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z   --volume /home/ceph-admin/specs/ceph_spec.yaml:/home/ceph_spec.yaml:z   --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   osd set-require-min-compat-client mimic
                                            _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:42:31 compute-0 sudo[199876]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R 0:0 /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config/ceph.client.admin.keyring.new
Oct 11 01:42:31 compute-0 sudo[199876]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:31 compute-0 sudo[199876]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:31 compute-0 podman[199900]: 2025-10-11 01:42:31.655309546 +0000 UTC m=+0.073553154 container create 2d551c7ae4ce5c0a36e1ed942167b12a57e4dafcd893d5dc43de45883dedbc57 (image=quay.io/ceph/ceph:v18, name=quizzical_kalam, io.buildah.version=1.39.3, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0)
Oct 11 01:42:31 compute-0 systemd[1]: Started libpod-conmon-2d551c7ae4ce5c0a36e1ed942167b12a57e4dafcd893d5dc43de45883dedbc57.scope.
Oct 11 01:42:31 compute-0 podman[199900]: 2025-10-11 01:42:31.629074622 +0000 UTC m=+0.047318240 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:31 compute-0 sudo[199908]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:31 compute-0 sudo[199908]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:31 compute-0 sudo[199908]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:31 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:31 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6994b4835d5743d4f1dba2db9ca78258a41a606e81bc5ca84a88759be3c5b2e0/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:31 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6994b4835d5743d4f1dba2db9ca78258a41a606e81bc5ca84a88759be3c5b2e0/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:31 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6994b4835d5743d4f1dba2db9ca78258a41a606e81bc5ca84a88759be3c5b2e0/merged/home/ceph_spec.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:31 compute-0 podman[199900]: 2025-10-11 01:42:31.788658043 +0000 UTC m=+0.206901711 container init 2d551c7ae4ce5c0a36e1ed942167b12a57e4dafcd893d5dc43de45883dedbc57 (image=quay.io/ceph/ceph:v18, name=quizzical_kalam, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True)
Oct 11 01:42:31 compute-0 podman[199900]: 2025-10-11 01:42:31.806865452 +0000 UTC m=+0.225109060 container start 2d551c7ae4ce5c0a36e1ed942167b12a57e4dafcd893d5dc43de45883dedbc57 (image=quay.io/ceph/ceph:v18, name=quizzical_kalam, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_REF=reef)
Oct 11 01:42:31 compute-0 podman[199900]: 2025-10-11 01:42:31.813603614 +0000 UTC m=+0.231847312 container attach 2d551c7ae4ce5c0a36e1ed942167b12a57e4dafcd893d5dc43de45883dedbc57 (image=quay.io/ceph/ceph:v18, name=quizzical_kalam, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:42:31 compute-0 sudo[199944]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 600 /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config/ceph.client.admin.keyring.new
Oct 11 01:42:31 compute-0 sudo[199944]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:31 compute-0 sudo[199944]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:31 compute-0 ceph-mon[191930]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:32 compute-0 sudo[199970]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:32 compute-0 sudo[199970]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:32 compute-0 sudo[199970]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:32 compute-0 sudo[199995]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mv /tmp/cephadm-3c7617c3-7a20-523e-a9de-20c0d6ba41da/var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config/ceph.client.admin.keyring.new /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/config/ceph.client.admin.keyring
Oct 11 01:42:32 compute-0 sudo[199995]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:32 compute-0 sudo[199995]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:42:32 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:42:32 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:42:32 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:32 compute-0 ceph-mgr[192233]: [progress INFO root] update: starting ev a940cf42-53d9-4533-8593-e84a8bef41cc (Updating crash deployment (+1 -> 1))
Oct 11 01:42:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get-or-create", "entity": "client.crash.compute-0", "caps": ["mon", "profile crash", "mgr", "profile crash"]} v 0) v1
Oct 11 01:42:32 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.compute-0", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch
Oct 11 01:42:32 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.compute-0", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished
Oct 11 01:42:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:42:32 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:42:32 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.serve] Deploying daemon crash.compute-0 on compute-0
Oct 11 01:42:32 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Deploying daemon crash.compute-0 on compute-0
Oct 11 01:42:32 compute-0 sudo[200039]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:32 compute-0 sudo[200039]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:32 compute-0 sudo[200039]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd set-require-min-compat-client", "version": "mimic"} v 0) v1
Oct 11 01:42:32 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/3860803967' entity='client.admin' cmd=[{"prefix": "osd set-require-min-compat-client", "version": "mimic"}]: dispatch
Oct 11 01:42:32 compute-0 sudo[200064]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:42:32 compute-0 sudo[200064]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:32 compute-0 sudo[200064]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:32 compute-0 sudo[200090]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:32 compute-0 sudo[200090]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:32 compute-0 sudo[200090]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:32 compute-0 sudo[200115]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 _orch deploy --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:42:32 compute-0 sudo[200115]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.compute-0", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch
Oct 11 01:42:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.compute-0", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished
Oct 11 01:42:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:42:33 compute-0 ceph-mon[191930]: Deploying daemon crash.compute-0 on compute-0
Oct 11 01:42:33 compute-0 ceph-mon[191930]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:33 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3860803967' entity='client.admin' cmd=[{"prefix": "osd set-require-min-compat-client", "version": "mimic"}]: dispatch
Oct 11 01:42:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e2 do_prune osdmap full prune enabled
Oct 11 01:42:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e2 encode_pending skipping prime_pg_temp; mapping job did not start
Oct 11 01:42:33 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/3860803967' entity='client.admin' cmd='[{"prefix": "osd set-require-min-compat-client", "version": "mimic"}]': finished
Oct 11 01:42:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e3 e3: 0 total, 0 up, 0 in
Oct 11 01:42:33 compute-0 quizzical_kalam[199939]: set require_min_compat_client to mimic
Oct 11 01:42:33 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e3: 0 total, 0 up, 0 in
Oct 11 01:42:33 compute-0 systemd[1]: libpod-2d551c7ae4ce5c0a36e1ed942167b12a57e4dafcd893d5dc43de45883dedbc57.scope: Deactivated successfully.
Oct 11 01:42:33 compute-0 podman[200181]: 2025-10-11 01:42:33.382876398 +0000 UTC m=+0.072760630 container died 2d551c7ae4ce5c0a36e1ed942167b12a57e4dafcd893d5dc43de45883dedbc57 (image=quay.io/ceph/ceph:v18, name=quizzical_kalam, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:42:33 compute-0 podman[200180]: 2025-10-11 01:42:33.398002695 +0000 UTC m=+0.082620151 container create d44c89cfe0cf9294edb212681a1a0607f2db67669c251070330f5e33af9dfe1a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_grothendieck, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:42:33 compute-0 systemd[1]: var-lib-containers-storage-overlay-6994b4835d5743d4f1dba2db9ca78258a41a606e81bc5ca84a88759be3c5b2e0-merged.mount: Deactivated successfully.
Oct 11 01:42:33 compute-0 podman[200180]: 2025-10-11 01:42:33.361978016 +0000 UTC m=+0.046595512 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:42:33 compute-0 systemd[1]: Started libpod-conmon-d44c89cfe0cf9294edb212681a1a0607f2db67669c251070330f5e33af9dfe1a.scope.
Oct 11 01:42:33 compute-0 podman[200181]: 2025-10-11 01:42:33.506218417 +0000 UTC m=+0.196102609 container remove 2d551c7ae4ce5c0a36e1ed942167b12a57e4dafcd893d5dc43de45883dedbc57 (image=quay.io/ceph/ceph:v18, name=quizzical_kalam, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 01:42:33 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:33 compute-0 systemd[1]: libpod-conmon-2d551c7ae4ce5c0a36e1ed942167b12a57e4dafcd893d5dc43de45883dedbc57.scope: Deactivated successfully.
Oct 11 01:42:33 compute-0 podman[200180]: 2025-10-11 01:42:33.543509455 +0000 UTC m=+0.228126951 container init d44c89cfe0cf9294edb212681a1a0607f2db67669c251070330f5e33af9dfe1a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_grothendieck, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.license=GPLv2)
Oct 11 01:42:33 compute-0 podman[200180]: 2025-10-11 01:42:33.562784343 +0000 UTC m=+0.247401799 container start d44c89cfe0cf9294edb212681a1a0607f2db67669c251070330f5e33af9dfe1a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_grothendieck, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:42:33 compute-0 upbeat_grothendieck[200208]: 167 167
Oct 11 01:42:33 compute-0 podman[200180]: 2025-10-11 01:42:33.570031729 +0000 UTC m=+0.254649185 container attach d44c89cfe0cf9294edb212681a1a0607f2db67669c251070330f5e33af9dfe1a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_grothendieck, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:42:33 compute-0 systemd[1]: libpod-d44c89cfe0cf9294edb212681a1a0607f2db67669c251070330f5e33af9dfe1a.scope: Deactivated successfully.
Oct 11 01:42:33 compute-0 podman[200180]: 2025-10-11 01:42:33.571427193 +0000 UTC m=+0.256044639 container died d44c89cfe0cf9294edb212681a1a0607f2db67669c251070330f5e33af9dfe1a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_grothendieck, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:42:33 compute-0 sudo[199852]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:33 compute-0 systemd[1]: var-lib-containers-storage-overlay-0f8b01163d6f21f9f8bc1a6e984db62a5c0eef290b28532973becd4052bd822d-merged.mount: Deactivated successfully.
Oct 11 01:42:33 compute-0 podman[200180]: 2025-10-11 01:42:33.641870916 +0000 UTC m=+0.326488342 container remove d44c89cfe0cf9294edb212681a1a0607f2db67669c251070330f5e33af9dfe1a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_grothendieck, ceph=True, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:42:33 compute-0 systemd[1]: libpod-conmon-d44c89cfe0cf9294edb212681a1a0607f2db67669c251070330f5e33af9dfe1a.scope: Deactivated successfully.
Oct 11 01:42:33 compute-0 systemd[1]: Reloading.
Oct 11 01:42:33 compute-0 systemd-rc-local-generator[200254]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:42:33 compute-0 systemd-sysv-generator[200257]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:42:34 compute-0 systemd[1]: Reloading.
Oct 11 01:42:34 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3860803967' entity='client.admin' cmd='[{"prefix": "osd set-require-min-compat-client", "version": "mimic"}]': finished
Oct 11 01:42:34 compute-0 ceph-mon[191930]: osdmap e3: 0 total, 0 up, 0 in
Oct 11 01:42:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:34 compute-0 systemd-rc-local-generator[200327]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:42:34 compute-0 systemd-sysv-generator[200331]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:42:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e3 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:42:34 compute-0 sudo[200296]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xovapafuigcyeiuxboomqjmubhkxlxph ; /usr/bin/python3'
Oct 11 01:42:34 compute-0 systemd[1]: Starting Ceph crash.compute-0 for 3c7617c3-7a20-523e-a9de-20c0d6ba41da...
Oct 11 01:42:34 compute-0 sudo[200296]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:42:34 compute-0 python3[200344]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z   --volume /home/ceph-admin/specs/ceph_spec.yaml:/home/ceph_spec.yaml:z   --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   orch apply --in-file /home/ceph_spec.yaml _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:42:34 compute-0 podman[200335]: 2025-10-11 01:42:34.95875114 +0000 UTC m=+0.142252983 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=edpm, io.buildah.version=1.41.3, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi)
Oct 11 01:42:35 compute-0 podman[200377]: 2025-10-11 01:42:35.072523677 +0000 UTC m=+0.077849215 container create b67f15a1b80713b1a09a65eafe7d3e49bd08ad0f2ff2135fdcc436ef7e746e8e (image=quay.io/ceph/ceph:v18, name=cool_galois, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:42:35 compute-0 podman[200377]: 2025-10-11 01:42:35.041799502 +0000 UTC m=+0.047125120 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:35 compute-0 systemd[1]: Started libpod-conmon-b67f15a1b80713b1a09a65eafe7d3e49bd08ad0f2ff2135fdcc436ef7e746e8e.scope.
Oct 11 01:42:35 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/60e029d50cd65d8d59708cc66cedeebf52e29e78e8e06a74b97744267b580791/merged/home/ceph_spec.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/60e029d50cd65d8d59708cc66cedeebf52e29e78e8e06a74b97744267b580791/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/60e029d50cd65d8d59708cc66cedeebf52e29e78e8e06a74b97744267b580791/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:35 compute-0 podman[200377]: 2025-10-11 01:42:35.214747499 +0000 UTC m=+0.220073047 container init b67f15a1b80713b1a09a65eafe7d3e49bd08ad0f2ff2135fdcc436ef7e746e8e (image=quay.io/ceph/ceph:v18, name=cool_galois, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:42:35 compute-0 podman[200377]: 2025-10-11 01:42:35.237088494 +0000 UTC m=+0.242414042 container start b67f15a1b80713b1a09a65eafe7d3e49bd08ad0f2ff2135fdcc436ef7e746e8e (image=quay.io/ceph/ceph:v18, name=cool_galois, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507)
Oct 11 01:42:35 compute-0 podman[200408]: 2025-10-11 01:42:35.244939445 +0000 UTC m=+0.092969861 container create cba4b470035c7c7daebf34beb62acba51cef0b69081fa66559c36ef9e7f53cdc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-crash-compute-0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:42:35 compute-0 podman[200377]: 2025-10-11 01:42:35.260267304 +0000 UTC m=+0.265592852 container attach b67f15a1b80713b1a09a65eafe7d3e49bd08ad0f2ff2135fdcc436ef7e746e8e (image=quay.io/ceph/ceph:v18, name=cool_galois, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, ceph=True)
Oct 11 01:42:35 compute-0 ceph-mon[191930]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:35 compute-0 podman[200408]: 2025-10-11 01:42:35.204768414 +0000 UTC m=+0.052798890 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:42:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/32073b83eacb6fef9c31d13ef62afe49a68b4505fd6668ecc66e23fa6fadb81f/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/32073b83eacb6fef9c31d13ef62afe49a68b4505fd6668ecc66e23fa6fadb81f/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/32073b83eacb6fef9c31d13ef62afe49a68b4505fd6668ecc66e23fa6fadb81f/merged/etc/ceph/ceph.client.crash.compute-0.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/32073b83eacb6fef9c31d13ef62afe49a68b4505fd6668ecc66e23fa6fadb81f/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:35 compute-0 podman[200408]: 2025-10-11 01:42:35.349780014 +0000 UTC m=+0.197810440 container init cba4b470035c7c7daebf34beb62acba51cef0b69081fa66559c36ef9e7f53cdc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-crash-compute-0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:42:35 compute-0 podman[200408]: 2025-10-11 01:42:35.360859215 +0000 UTC m=+0.208889591 container start cba4b470035c7c7daebf34beb62acba51cef0b69081fa66559c36ef9e7f53cdc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-crash-compute-0, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:42:35 compute-0 bash[200408]: cba4b470035c7c7daebf34beb62acba51cef0b69081fa66559c36ef9e7f53cdc
Oct 11 01:42:35 compute-0 systemd[1]: Started Ceph crash.compute-0 for 3c7617c3-7a20-523e-a9de-20c0d6ba41da.
Oct 11 01:42:35 compute-0 sudo[200115]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:42:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:42:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.crash}] v 0) v1
Oct 11 01:42:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:35 compute-0 ceph-mgr[192233]: [progress INFO root] complete: finished ev a940cf42-53d9-4533-8593-e84a8bef41cc (Updating crash deployment (+1 -> 1))
Oct 11 01:42:35 compute-0 ceph-mgr[192233]: [progress INFO root] Completed event a940cf42-53d9-4533-8593-e84a8bef41cc (Updating crash deployment (+1 -> 1)) in 3 seconds
Oct 11 01:42:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.crash}] v 0) v1
Oct 11 01:42:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:35 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev bac08e36-8a12-4edf-8b23-b9b2e64a5c39 does not exist
Oct 11 01:42:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.mon}] v 0) v1
Oct 11 01:42:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:35 compute-0 ceph-mgr[192233]: [progress INFO root] update: starting ev 890e5a84-e165-4eaa-82c5-f20eb2652119 (Updating mgr deployment (+1 -> 2))
Oct 11 01:42:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get-or-create", "entity": "mgr.compute-0.jwossk", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} v 0) v1
Oct 11 01:42:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.compute-0.jwossk", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch
Oct 11 01:42:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.compute-0.jwossk", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished
Oct 11 01:42:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr services"} v 0) v1
Oct 11 01:42:35 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mgr services"}]: dispatch
Oct 11 01:42:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:42:35 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:42:35 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.serve] Deploying daemon mgr.compute-0.jwossk on compute-0
Oct 11 01:42:35 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Deploying daemon mgr.compute-0.jwossk on compute-0
Oct 11 01:42:35 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-crash-compute-0[200427]: INFO:ceph-crash:pinging cluster to exercise our key
Oct 11 01:42:35 compute-0 sudo[200433]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:35 compute-0 sudo[200433]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:35 compute-0 sudo[200433]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:35 compute-0 sudo[200479]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:42:35 compute-0 sudo[200479]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:35 compute-0 sudo[200479]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:35 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-crash-compute-0[200427]: 2025-10-11T01:42:35.788+0000 7f9d82d77640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Oct 11 01:42:35 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-crash-compute-0[200427]: 2025-10-11T01:42:35.788+0000 7f9d82d77640 -1 AuthRegistry(0x7f9d7c067440) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Oct 11 01:42:35 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-crash-compute-0[200427]: 2025-10-11T01:42:35.791+0000 7f9d82d77640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Oct 11 01:42:35 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-crash-compute-0[200427]: 2025-10-11T01:42:35.791+0000 7f9d82d77640 -1 AuthRegistry(0x7f9d82d76000) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Oct 11 01:42:35 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-crash-compute-0[200427]: 2025-10-11T01:42:35.794+0000 7f9d80aec640 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [1]
Oct 11 01:42:35 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-crash-compute-0[200427]: 2025-10-11T01:42:35.794+0000 7f9d82d77640 -1 monclient: authenticate NOTE: no keyring found; disabled cephx authentication
Oct 11 01:42:35 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-crash-compute-0[200427]: [errno 13] RADOS permission denied (error connecting to the cluster)
Oct 11 01:42:35 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14182 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:35 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-crash-compute-0[200427]: INFO:ceph-crash:monitoring path /var/lib/ceph/crash, delay 600s
Oct 11 01:42:35 compute-0 sudo[200504]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:35 compute-0 sudo[200504]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:35 compute-0 sudo[200504]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:35 compute-0 sudo[200538]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:35 compute-0 sudo[200538]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:35 compute-0 sudo[200538]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:35 compute-0 sudo[200548]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 _orch deploy --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:42:35 compute-0 sudo[200548]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:36 compute-0 sudo[200589]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:42:36 compute-0 sudo[200589]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:36 compute-0 sudo[200589]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:36 compute-0 sudo[200622]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:36 compute-0 sudo[200622]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:36 compute-0 sudo[200622]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:36 compute-0 sudo[200663]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 check-host --expect-hostname compute-0
Oct 11 01:42:36 compute-0 sudo[200663]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.compute-0.jwossk", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch
Oct 11 01:42:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.compute-0.jwossk", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished
Oct 11 01:42:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mgr services"}]: dispatch
Oct 11 01:42:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:42:36 compute-0 ceph-mon[191930]: Deploying daemon mgr.compute-0.jwossk on compute-0
Oct 11 01:42:36 compute-0 ceph-mon[191930]: from='client.14182 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:42:36 compute-0 ceph-mgr[192233]: [progress INFO root] Writing back 1 completed events
Oct 11 01:42:36 compute-0 podman[200703]: 2025-10-11 01:42:36.486517851 +0000 UTC m=+0.089216933 container create 9d834943bff5793fccd126711befeabc02ff8b9d320215eee98b0bb9b88acd0d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sad_kalam, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=reef)
Oct 11 01:42:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/progress/completed}] v 0) v1
Oct 11 01:42:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:36 compute-0 podman[200703]: 2025-10-11 01:42:36.452800658 +0000 UTC m=+0.055499760 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:42:36 compute-0 systemd[1]: Started libpod-conmon-9d834943bff5793fccd126711befeabc02ff8b9d320215eee98b0bb9b88acd0d.scope.
Oct 11 01:42:36 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:36 compute-0 podman[200703]: 2025-10-11 01:42:36.640047776 +0000 UTC m=+0.242746868 container init 9d834943bff5793fccd126711befeabc02ff8b9d320215eee98b0bb9b88acd0d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sad_kalam, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:42:36 compute-0 podman[200703]: 2025-10-11 01:42:36.654101726 +0000 UTC m=+0.256800788 container start 9d834943bff5793fccd126711befeabc02ff8b9d320215eee98b0bb9b88acd0d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sad_kalam, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 01:42:36 compute-0 podman[200703]: 2025-10-11 01:42:36.659204629 +0000 UTC m=+0.261903701 container attach 9d834943bff5793fccd126711befeabc02ff8b9d320215eee98b0bb9b88acd0d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sad_kalam, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:42:36 compute-0 sad_kalam[200727]: 167 167
Oct 11 01:42:36 compute-0 systemd[1]: libpod-9d834943bff5793fccd126711befeabc02ff8b9d320215eee98b0bb9b88acd0d.scope: Deactivated successfully.
Oct 11 01:42:36 compute-0 podman[200703]: 2025-10-11 01:42:36.663850541 +0000 UTC m=+0.266549632 container died 9d834943bff5793fccd126711befeabc02ff8b9d320215eee98b0bb9b88acd0d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sad_kalam, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:42:36 compute-0 systemd[1]: var-lib-containers-storage-overlay-7225f41f05f91caa924e2b8f1604c862fd85ae528d575ca1b6ec9f25f072f6ac-merged.mount: Deactivated successfully.
Oct 11 01:42:36 compute-0 sudo[200663]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:36 compute-0 podman[200703]: 2025-10-11 01:42:36.743856956 +0000 UTC m=+0.346556018 container remove 9d834943bff5793fccd126711befeabc02ff8b9d320215eee98b0bb9b88acd0d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sad_kalam, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:42:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/inventory}] v 0) v1
Oct 11 01:42:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/inventory}] v 0) v1
Oct 11 01:42:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/inventory}] v 0) v1
Oct 11 01:42:36 compute-0 systemd[1]: libpod-conmon-9d834943bff5793fccd126711befeabc02ff8b9d320215eee98b0bb9b88acd0d.scope: Deactivated successfully.
Oct 11 01:42:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/inventory}] v 0) v1
Oct 11 01:42:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:36 compute-0 ceph-mgr[192233]: [cephadm INFO root] Added host compute-0
Oct 11 01:42:36 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Added host compute-0
Oct 11 01:42:36 compute-0 ceph-mgr[192233]: [cephadm INFO root] Saving service mon spec with placement compute-0
Oct 11 01:42:36 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Saving service mon spec with placement compute-0
Oct 11 01:42:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.mon}] v 0) v1
Oct 11 01:42:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:36 compute-0 ceph-mgr[192233]: [cephadm INFO root] Saving service mgr spec with placement compute-0
Oct 11 01:42:36 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Saving service mgr spec with placement compute-0
Oct 11 01:42:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.mgr}] v 0) v1
Oct 11 01:42:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:36 compute-0 ceph-mgr[192233]: [cephadm INFO root] Marking host: compute-0 for OSDSpec preview refresh.
Oct 11 01:42:36 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Marking host: compute-0 for OSDSpec preview refresh.
Oct 11 01:42:36 compute-0 ceph-mgr[192233]: [cephadm INFO root] Saving service osd.default_drive_group spec with placement compute-0
Oct 11 01:42:36 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Saving service osd.default_drive_group spec with placement compute-0
Oct 11 01:42:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.osd.default_drive_group}] v 0) v1
Oct 11 01:42:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:36 compute-0 cool_galois[200409]: Added host 'compute-0' with addr '192.168.122.100'
Oct 11 01:42:36 compute-0 cool_galois[200409]: Scheduled mon update...
Oct 11 01:42:36 compute-0 cool_galois[200409]: Scheduled mgr update...
Oct 11 01:42:36 compute-0 cool_galois[200409]: Scheduled osd.default_drive_group update...
Oct 11 01:42:36 compute-0 systemd[1]: Reloading.
Oct 11 01:42:36 compute-0 podman[200760]: 2025-10-11 01:42:36.913304516 +0000 UTC m=+0.038019940 container died b67f15a1b80713b1a09a65eafe7d3e49bd08ad0f2ff2135fdcc436ef7e746e8e (image=quay.io/ceph/ceph:v18, name=cool_galois, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 01:42:37 compute-0 systemd-sysv-generator[200799]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:42:37 compute-0 systemd-rc-local-generator[200796]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:42:37 compute-0 systemd[1]: libpod-b67f15a1b80713b1a09a65eafe7d3e49bd08ad0f2ff2135fdcc436ef7e746e8e.scope: Deactivated successfully.
Oct 11 01:42:37 compute-0 systemd[1]: var-lib-containers-storage-overlay-60e029d50cd65d8d59708cc66cedeebf52e29e78e8e06a74b97744267b580791-merged.mount: Deactivated successfully.
Oct 11 01:42:37 compute-0 podman[200760]: 2025-10-11 01:42:37.310918363 +0000 UTC m=+0.435633767 container remove b67f15a1b80713b1a09a65eafe7d3e49bd08ad0f2ff2135fdcc436ef7e746e8e (image=quay.io/ceph/ceph:v18, name=cool_galois, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS)
Oct 11 01:42:37 compute-0 systemd[1]: libpod-conmon-b67f15a1b80713b1a09a65eafe7d3e49bd08ad0f2ff2135fdcc436ef7e746e8e.scope: Deactivated successfully.
Oct 11 01:42:37 compute-0 systemd[1]: Reloading.
Oct 11 01:42:37 compute-0 sudo[200296]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:37 compute-0 ceph-mon[191930]: pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:37 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:37 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:37 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:37 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:37 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:37 compute-0 ceph-mon[191930]: Added host compute-0
Oct 11 01:42:37 compute-0 ceph-mon[191930]: Saving service mon spec with placement compute-0
Oct 11 01:42:37 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:37 compute-0 ceph-mon[191930]: Saving service mgr spec with placement compute-0
Oct 11 01:42:37 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:37 compute-0 ceph-mon[191930]: Marking host: compute-0 for OSDSpec preview refresh.
Oct 11 01:42:37 compute-0 ceph-mon[191930]: Saving service osd.default_drive_group spec with placement compute-0
Oct 11 01:42:37 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:37 compute-0 systemd-sysv-generator[200843]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:42:37 compute-0 systemd-rc-local-generator[200837]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:42:37 compute-0 sudo[200872]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zlxzkofqybghiprdqgndzasyqdgxzymc ; /usr/bin/python3'
Oct 11 01:42:37 compute-0 sudo[200872]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:42:37 compute-0 systemd[1]: Starting Ceph mgr.compute-0.jwossk for 3c7617c3-7a20-523e-a9de-20c0d6ba41da...
Oct 11 01:42:38 compute-0 python3[200877]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z   --volume /home/ceph-admin/specs/ceph_spec.yaml:/home/ceph_spec.yaml:z   --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   status --format json | jq .osdmap.num_up_osds _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:42:38 compute-0 podman[200900]: 2025-10-11 01:42:38.151535648 +0000 UTC m=+0.089653196 container create eb21a3bece83836a7af7b95bf33afd69e1cf2dfdcc250de73f1ef1ff39f4c4de (image=quay.io/ceph/ceph:v18, name=sleepy_mestorf, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:42:38 compute-0 podman[200900]: 2025-10-11 01:42:38.113520729 +0000 UTC m=+0.051638297 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:42:38 compute-0 systemd[1]: Started libpod-conmon-eb21a3bece83836a7af7b95bf33afd69e1cf2dfdcc250de73f1ef1ff39f4c4de.scope.
Oct 11 01:42:38 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7f936d430d2313c536a4c3986e98723a131e021defe099e481b49eb982279fb6/merged/home/ceph_spec.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7f936d430d2313c536a4c3986e98723a131e021defe099e481b49eb982279fb6/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7f936d430d2313c536a4c3986e98723a131e021defe099e481b49eb982279fb6/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:38 compute-0 podman[200900]: 2025-10-11 01:42:38.30661313 +0000 UTC m=+0.244730688 container init eb21a3bece83836a7af7b95bf33afd69e1cf2dfdcc250de73f1ef1ff39f4c4de (image=quay.io/ceph/ceph:v18, name=sleepy_mestorf, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:42:38 compute-0 podman[200900]: 2025-10-11 01:42:38.32462258 +0000 UTC m=+0.262740128 container start eb21a3bece83836a7af7b95bf33afd69e1cf2dfdcc250de73f1ef1ff39f4c4de (image=quay.io/ceph/ceph:v18, name=sleepy_mestorf, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:42:38 compute-0 podman[200900]: 2025-10-11 01:42:38.33249277 +0000 UTC m=+0.270610368 container attach eb21a3bece83836a7af7b95bf33afd69e1cf2dfdcc250de73f1ef1ff39f4c4de (image=quay.io/ceph/ceph:v18, name=sleepy_mestorf, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:42:38 compute-0 podman[200938]: 2025-10-11 01:42:38.346013552 +0000 UTC m=+0.100325433 container create 10851b4f39ea40f06725d27f40874161245f96d424a9eb2c50a7904d43b723c6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-jwossk, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:42:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:38 compute-0 podman[200938]: 2025-10-11 01:42:38.297409803 +0000 UTC m=+0.051721704 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:42:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd75ad0348e8f7e4b9c978afa82850aeeb16a31974371e07e10720f9cfcdf3cc/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd75ad0348e8f7e4b9c978afa82850aeeb16a31974371e07e10720f9cfcdf3cc/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd75ad0348e8f7e4b9c978afa82850aeeb16a31974371e07e10720f9cfcdf3cc/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd75ad0348e8f7e4b9c978afa82850aeeb16a31974371e07e10720f9cfcdf3cc/merged/var/lib/ceph/mgr/ceph-compute-0.jwossk supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:38 compute-0 podman[200938]: 2025-10-11 01:42:38.468836972 +0000 UTC m=+0.223148863 container init 10851b4f39ea40f06725d27f40874161245f96d424a9eb2c50a7904d43b723c6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-jwossk, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507)
Oct 11 01:42:38 compute-0 podman[200938]: 2025-10-11 01:42:38.497058838 +0000 UTC m=+0.251370719 container start 10851b4f39ea40f06725d27f40874161245f96d424a9eb2c50a7904d43b723c6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-jwossk, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:42:38 compute-0 bash[200938]: 10851b4f39ea40f06725d27f40874161245f96d424a9eb2c50a7904d43b723c6
Oct 11 01:42:38 compute-0 systemd[1]: Started Ceph mgr.compute-0.jwossk for 3c7617c3-7a20-523e-a9de-20c0d6ba41da.
Oct 11 01:42:38 compute-0 ceph-mgr[200960]: set uid:gid to 167:167 (ceph:ceph)
Oct 11 01:42:38 compute-0 ceph-mgr[200960]: ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable), process ceph-mgr, pid 2
Oct 11 01:42:38 compute-0 ceph-mgr[200960]: pidfile_write: ignore empty --pid-file
Oct 11 01:42:38 compute-0 sudo[200548]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:42:38 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:42:38 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.mgr}] v 0) v1
Oct 11 01:42:38 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:38 compute-0 ceph-mgr[192233]: [progress INFO root] complete: finished ev 890e5a84-e165-4eaa-82c5-f20eb2652119 (Updating mgr deployment (+1 -> 2))
Oct 11 01:42:38 compute-0 ceph-mgr[192233]: [progress INFO root] Completed event 890e5a84-e165-4eaa-82c5-f20eb2652119 (Updating mgr deployment (+1 -> 2)) in 3 seconds
Oct 11 01:42:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.mgr}] v 0) v1
Oct 11 01:42:38 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:38 compute-0 ceph-mgr[200960]: mgr[py] Loading python module 'alerts'
Oct 11 01:42:38 compute-0 sudo[200985]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:38 compute-0 sudo[200985]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:38 compute-0 sudo[200985]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:38 compute-0 sudo[201029]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:42:38 compute-0 sudo[201029]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:38 compute-0 sudo[201029]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status", "format": "json"} v 0) v1
Oct 11 01:42:38 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1054900619' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
Oct 11 01:42:38 compute-0 sleepy_mestorf[200939]: 
Oct 11 01:42:38 compute-0 sleepy_mestorf[200939]: {"fsid":"3c7617c3-7a20-523e-a9de-20c0d6ba41da","health":{"status":"HEALTH_WARN","checks":{"TOO_FEW_OSDS":{"severity":"HEALTH_WARN","summary":{"message":"OSD count 0 < osd_pool_default_size 1","count":1},"muted":false}},"mutes":[]},"election_epoch":5,"quorum":[0],"quorum_names":["compute-0"],"quorum_age":89,"monmap":{"epoch":1,"min_mon_release_name":"reef","num_mons":1},"osdmap":{"epoch":3,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0},"pgmap":{"pgs_by_state":[],"num_pgs":0,"num_pools":0,"num_objects":0,"data_bytes":0,"bytes_used":0,"bytes_avail":0,"bytes_total":0},"fsmap":{"epoch":1,"by_rank":[],"up:standby":0},"mgrmap":{"available":true,"num_standbys":0,"modules":["cephadm","iostat","nfs","restful"],"services":{}},"servicemap":{"epoch":1,"modified":"2025-10-11T01:41:05.449924+0000","services":{}},"progress_events":{"890e5a84-e165-4eaa-82c5-f20eb2652119":{"message":"Updating mgr deployment (+1 -> 2) (0s)\n      [............................] ","progress":0,"add_to_ceph_s":true}}}
Oct 11 01:42:39 compute-0 systemd[1]: libpod-eb21a3bece83836a7af7b95bf33afd69e1cf2dfdcc250de73f1ef1ff39f4c4de.scope: Deactivated successfully.
Oct 11 01:42:39 compute-0 podman[200900]: 2025-10-11 01:42:39.036501724 +0000 UTC m=+0.974619272 container died eb21a3bece83836a7af7b95bf33afd69e1cf2dfdcc250de73f1ef1ff39f4c4de (image=quay.io/ceph/ceph:v18, name=sleepy_mestorf, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:42:39 compute-0 sudo[201054]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:39 compute-0 sudo[201054]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:39 compute-0 sudo[201054]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:39 compute-0 ceph-mgr[200960]: mgr[py] Module alerts has missing NOTIFY_TYPES member
Oct 11 01:42:39 compute-0 ceph-mgr[200960]: mgr[py] Loading python module 'balancer'
Oct 11 01:42:39 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-jwossk[200956]: 2025-10-11T01:42:39.081+0000 7f4fc08ac140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member
Oct 11 01:42:39 compute-0 systemd[1]: var-lib-containers-storage-overlay-7f936d430d2313c536a4c3986e98723a131e021defe099e481b49eb982279fb6-merged.mount: Deactivated successfully.
Oct 11 01:42:39 compute-0 podman[200900]: 2025-10-11 01:42:39.140576574 +0000 UTC m=+1.078694082 container remove eb21a3bece83836a7af7b95bf33afd69e1cf2dfdcc250de73f1ef1ff39f4c4de (image=quay.io/ceph/ceph:v18, name=sleepy_mestorf, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 01:42:39 compute-0 systemd[1]: libpod-conmon-eb21a3bece83836a7af7b95bf33afd69e1cf2dfdcc250de73f1ef1ff39f4c4de.scope: Deactivated successfully.
Oct 11 01:42:39 compute-0 sudo[200872]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:39 compute-0 sudo[201093]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:42:39 compute-0 sudo[201093]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:39 compute-0 sudo[201093]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:39 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-jwossk[200956]: 2025-10-11T01:42:39.328+0000 7f4fc08ac140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member
Oct 11 01:42:39 compute-0 ceph-mgr[200960]: mgr[py] Module balancer has missing NOTIFY_TYPES member
Oct 11 01:42:39 compute-0 ceph-mgr[200960]: mgr[py] Loading python module 'cephadm'
Oct 11 01:42:39 compute-0 sudo[201118]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:39 compute-0 sudo[201118]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:39 compute-0 sudo[201118]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:39 compute-0 sudo[201143]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ls
Oct 11 01:42:39 compute-0 sudo[201143]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e3 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:42:39 compute-0 ceph-mon[191930]: pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:39 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:39 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:39 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:39 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:39 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1054900619' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
Oct 11 01:42:40 compute-0 podman[201221]: 2025-10-11 01:42:40.255069313 +0000 UTC m=+0.136220858 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 01:42:40 compute-0 podman[201259]: 2025-10-11 01:42:40.343873742 +0000 UTC m=+0.087311201 container exec ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 01:42:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:40 compute-0 podman[201258]: 2025-10-11 01:42:40.36858423 +0000 UTC m=+0.114700570 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, version=9.4, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.expose-services=, vcs-type=git, config_id=edpm, io.buildah.version=1.29.0, architecture=x86_64, name=ubi9, container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, release=1214.1726694543, com.redhat.component=ubi9-container, build-date=2024-09-18T21:23:30, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, distribution-scope=public, maintainer=Red Hat, Inc., release-0.7.12=, summary=Provides the latest release of Red Hat Universal Base Image 9.)
Oct 11 01:42:40 compute-0 podman[201257]: 2025-10-11 01:42:40.39818414 +0000 UTC m=+0.150608764 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team)
Oct 11 01:42:40 compute-0 podman[201259]: 2025-10-11 01:42:40.444719703 +0000 UTC m=+0.188157162 container exec_died ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:42:41 compute-0 sudo[201143]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:42:41 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:42:41 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:42:41 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:42:41 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:42:41 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:42:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:42:41 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:42:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:42:41 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:41 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev b44ff243-acfa-4347-a27b-7aef52c63908 does not exist
Oct 11 01:42:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.mon}] v 0) v1
Oct 11 01:42:41 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:41 compute-0 ceph-mgr[192233]: [progress INFO root] update: starting ev fa4a756f-c66f-452f-9ce3-33725e1f132b (Updating mgr deployment (-1 -> 1))
Oct 11 01:42:41 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.serve] Removing daemon mgr.compute-0.jwossk from compute-0 -- ports [8765]
Oct 11 01:42:41 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Removing daemon mgr.compute-0.jwossk from compute-0 -- ports [8765]
Oct 11 01:42:41 compute-0 sudo[201392]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:41 compute-0 sudo[201392]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:41 compute-0 sudo[201392]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:41 compute-0 sudo[201417]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:42:41 compute-0 sudo[201417]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:41 compute-0 sudo[201417]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:41 compute-0 ceph-mgr[192233]: [progress INFO root] Writing back 2 completed events
Oct 11 01:42:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/progress/completed}] v 0) v1
Oct 11 01:42:41 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:41 compute-0 ceph-mgr[200960]: mgr[py] Loading python module 'crash'
Oct 11 01:42:41 compute-0 sudo[201442]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:41 compute-0 sudo[201442]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:41 compute-0 sudo[201442]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:41 compute-0 sudo[201467]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 rm-daemon --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --name mgr.compute-0.jwossk --force --tcp-ports 8765
Oct 11 01:42:41 compute-0 sudo[201467]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:41 compute-0 ceph-mgr[200960]: mgr[py] Module crash has missing NOTIFY_TYPES member
Oct 11 01:42:41 compute-0 ceph-mgr[200960]: mgr[py] Loading python module 'dashboard'
Oct 11 01:42:41 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-jwossk[200956]: 2025-10-11T01:42:41.794+0000 7f4fc08ac140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member
Oct 11 01:42:42 compute-0 ceph-mon[191930]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:42:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:42:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:42 compute-0 ceph-mon[191930]: Removing daemon mgr.compute-0.jwossk from compute-0 -- ports [8765]
Oct 11 01:42:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:42 compute-0 systemd[1]: Stopping Ceph mgr.compute-0.jwossk for 3c7617c3-7a20-523e-a9de-20c0d6ba41da...
Oct 11 01:42:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:42 compute-0 podman[201557]: 2025-10-11 01:42:42.629720016 +0000 UTC m=+0.111800428 container died 10851b4f39ea40f06725d27f40874161245f96d424a9eb2c50a7904d43b723c6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-jwossk, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:42:42 compute-0 systemd[1]: var-lib-containers-storage-overlay-fd75ad0348e8f7e4b9c978afa82850aeeb16a31974371e07e10720f9cfcdf3cc-merged.mount: Deactivated successfully.
Oct 11 01:42:42 compute-0 podman[201557]: 2025-10-11 01:42:42.710471491 +0000 UTC m=+0.192551873 container remove 10851b4f39ea40f06725d27f40874161245f96d424a9eb2c50a7904d43b723c6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-jwossk, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS)
Oct 11 01:42:42 compute-0 bash[201557]: ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-jwossk
Oct 11 01:42:42 compute-0 systemd[1]: ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da@mgr.compute-0.jwossk.service: Main process exited, code=exited, status=143/n/a
Oct 11 01:42:43 compute-0 systemd[1]: ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da@mgr.compute-0.jwossk.service: Failed with result 'exit-code'.
Oct 11 01:42:43 compute-0 systemd[1]: Stopped Ceph mgr.compute-0.jwossk for 3c7617c3-7a20-523e-a9de-20c0d6ba41da.
Oct 11 01:42:43 compute-0 systemd[1]: ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da@mgr.compute-0.jwossk.service: Consumed 6.090s CPU time.
Oct 11 01:42:43 compute-0 systemd[1]: Reloading.
Oct 11 01:42:43 compute-0 systemd-rc-local-generator[201636]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:42:43 compute-0 systemd-sysv-generator[201643]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:42:43 compute-0 sudo[201467]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:43 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.services.cephadmservice] Removing key for mgr.compute-0.jwossk
Oct 11 01:42:43 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Removing key for mgr.compute-0.jwossk
Oct 11 01:42:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth rm", "entity": "mgr.compute-0.jwossk"} v 0) v1
Oct 11 01:42:43 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth rm", "entity": "mgr.compute-0.jwossk"}]: dispatch
Oct 11 01:42:43 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "auth rm", "entity": "mgr.compute-0.jwossk"}]': finished
Oct 11 01:42:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.mgr}] v 0) v1
Oct 11 01:42:43 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:43 compute-0 ceph-mgr[192233]: [progress INFO root] complete: finished ev fa4a756f-c66f-452f-9ce3-33725e1f132b (Updating mgr deployment (-1 -> 1))
Oct 11 01:42:43 compute-0 ceph-mgr[192233]: [progress INFO root] Completed event fa4a756f-c66f-452f-9ce3-33725e1f132b (Updating mgr deployment (-1 -> 1)) in 3 seconds
Oct 11 01:42:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.mgr}] v 0) v1
Oct 11 01:42:43 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:43 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 06a24ed8-982d-46f1-9f4e-81ced66bf2a0 does not exist
Oct 11 01:42:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 01:42:43 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:42:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 01:42:43 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:42:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:42:43 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:42:43 compute-0 sudo[201649]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:43 compute-0 sudo[201649]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:43 compute-0 sudo[201649]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:43 compute-0 sudo[201674]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:42:43 compute-0 sudo[201674]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:43 compute-0 sudo[201674]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:44 compute-0 ceph-mon[191930]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:44 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth rm", "entity": "mgr.compute-0.jwossk"}]: dispatch
Oct 11 01:42:44 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "auth rm", "entity": "mgr.compute-0.jwossk"}]': finished
Oct 11 01:42:44 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:44 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:44 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:42:44 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:42:44 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:42:44 compute-0 sudo[201699]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:42:44 compute-0 sudo[201699]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:44 compute-0 sudo[201699]: pam_unix(sudo:session): session closed for user root
Oct 11 01:42:44 compute-0 sudo[201724]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 01:42:44 compute-0 sudo[201724]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:42:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v18: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e3 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:42:44 compute-0 podman[201774]: 2025-10-11 01:42:44.876641347 +0000 UTC m=+0.150508938 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute)
Oct 11 01:42:44 compute-0 podman[201795]: 2025-10-11 01:42:44.904994477 +0000 UTC m=+0.079914400 container create 4e9fd4d63364642d7f0f4a5424196b4725673a65c48cd351e76b30b68e6a6bba (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=exciting_aryabhata, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 01:42:44 compute-0 podman[201795]: 2025-10-11 01:42:44.872724836 +0000 UTC m=+0.047644829 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:42:44 compute-0 systemd[1]: Started libpod-conmon-4e9fd4d63364642d7f0f4a5424196b4725673a65c48cd351e76b30b68e6a6bba.scope.
Oct 11 01:42:45 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:45 compute-0 podman[201795]: 2025-10-11 01:42:45.049334674 +0000 UTC m=+0.224254667 container init 4e9fd4d63364642d7f0f4a5424196b4725673a65c48cd351e76b30b68e6a6bba (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=exciting_aryabhata, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS)
Oct 11 01:42:45 compute-0 podman[201795]: 2025-10-11 01:42:45.069601803 +0000 UTC m=+0.244521746 container start 4e9fd4d63364642d7f0f4a5424196b4725673a65c48cd351e76b30b68e6a6bba (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=exciting_aryabhata, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef)
Oct 11 01:42:45 compute-0 podman[201795]: 2025-10-11 01:42:45.077631407 +0000 UTC m=+0.252551570 container attach 4e9fd4d63364642d7f0f4a5424196b4725673a65c48cd351e76b30b68e6a6bba (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=exciting_aryabhata, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:42:45 compute-0 exciting_aryabhata[201821]: 167 167
Oct 11 01:42:45 compute-0 systemd[1]: libpod-4e9fd4d63364642d7f0f4a5424196b4725673a65c48cd351e76b30b68e6a6bba.scope: Deactivated successfully.
Oct 11 01:42:45 compute-0 ceph-mon[191930]: Removing key for mgr.compute-0.jwossk
Oct 11 01:42:45 compute-0 podman[201826]: 2025-10-11 01:42:45.168748932 +0000 UTC m=+0.057124264 container died 4e9fd4d63364642d7f0f4a5424196b4725673a65c48cd351e76b30b68e6a6bba (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=exciting_aryabhata, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:42:45 compute-0 systemd[1]: var-lib-containers-storage-overlay-4d5a50d8083fa48432bcdc7b728908b01417920c9a197973677ada9193582582-merged.mount: Deactivated successfully.
Oct 11 01:42:45 compute-0 podman[201826]: 2025-10-11 01:42:45.248618474 +0000 UTC m=+0.136993766 container remove 4e9fd4d63364642d7f0f4a5424196b4725673a65c48cd351e76b30b68e6a6bba (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=exciting_aryabhata, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507)
Oct 11 01:42:45 compute-0 systemd[1]: libpod-conmon-4e9fd4d63364642d7f0f4a5424196b4725673a65c48cd351e76b30b68e6a6bba.scope: Deactivated successfully.
Oct 11 01:42:45 compute-0 podman[201847]: 2025-10-11 01:42:45.558552056 +0000 UTC m=+0.082733146 container create 31d846995bb0fca82ab9c4d4bb89fdb615646d214d724ace90837247769d278b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_chandrasekhar, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:42:45 compute-0 podman[201847]: 2025-10-11 01:42:45.525772816 +0000 UTC m=+0.049953906 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:42:45 compute-0 systemd[1]: Started libpod-conmon-31d846995bb0fca82ab9c4d4bb89fdb615646d214d724ace90837247769d278b.scope.
Oct 11 01:42:45 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:42:45 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/83b7ee39fca7eed00869c6cda2bd9066f97403b1d4b5f3e6f5dda1da1487e945/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:45 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/83b7ee39fca7eed00869c6cda2bd9066f97403b1d4b5f3e6f5dda1da1487e945/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:45 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/83b7ee39fca7eed00869c6cda2bd9066f97403b1d4b5f3e6f5dda1da1487e945/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:45 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/83b7ee39fca7eed00869c6cda2bd9066f97403b1d4b5f3e6f5dda1da1487e945/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:45 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/83b7ee39fca7eed00869c6cda2bd9066f97403b1d4b5f3e6f5dda1da1487e945/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:42:45 compute-0 podman[201847]: 2025-10-11 01:42:45.725377162 +0000 UTC m=+0.249558332 container init 31d846995bb0fca82ab9c4d4bb89fdb615646d214d724ace90837247769d278b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_chandrasekhar, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 01:42:45 compute-0 podman[201847]: 2025-10-11 01:42:45.746689338 +0000 UTC m=+0.270870428 container start 31d846995bb0fca82ab9c4d4bb89fdb615646d214d724ace90837247769d278b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_chandrasekhar, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3)
Oct 11 01:42:45 compute-0 podman[201847]: 2025-10-11 01:42:45.753413255 +0000 UTC m=+0.277594415 container attach 31d846995bb0fca82ab9c4d4bb89fdb615646d214d724ace90837247769d278b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_chandrasekhar, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:42:46 compute-0 ceph-mon[191930]: pgmap v18: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v19: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:46 compute-0 ceph-mgr[192233]: [progress INFO root] Writing back 3 completed events
Oct 11 01:42:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/progress/completed}] v 0) v1
Oct 11 01:42:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:47 compute-0 goofy_chandrasekhar[201865]: --> passed data devices: 0 physical, 3 LVM
Oct 11 01:42:47 compute-0 goofy_chandrasekhar[201865]: --> relative data size: 1.0
Oct 11 01:42:47 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ceph-authtool --gen-print-key
Oct 11 01:42:47 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new a9c7940d-c154-46ef-9c18-8ba55dddd3d6
Oct 11 01:42:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd new", "uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6"} v 0) v1
Oct 11 01:42:47 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/4150954949' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6"}]: dispatch
Oct 11 01:42:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e3 do_prune osdmap full prune enabled
Oct 11 01:42:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e3 encode_pending skipping prime_pg_temp; mapping job did not start
Oct 11 01:42:47 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/4150954949' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6"}]': finished
Oct 11 01:42:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e4 e4: 1 total, 0 up, 1 in
Oct 11 01:42:47 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e4: 1 total, 0 up, 1 in
Oct 11 01:42:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 0} v 0) v1
Oct 11 01:42:47 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:42:47 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.0: (2) No such file or directory
Oct 11 01:42:47 compute-0 ceph-mon[191930]: pgmap v19: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:42:47 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/4150954949' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6"}]: dispatch
Oct 11 01:42:47 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/4150954949' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6"}]': finished
Oct 11 01:42:47 compute-0 ceph-mon[191930]: osdmap e4: 1 total, 0 up, 1 in
Oct 11 01:42:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:42:47 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ceph-authtool --gen-print-key
Oct 11 01:42:47 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0
Oct 11 01:42:47 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -h ceph:ceph /dev/ceph_vg0/ceph_lv0
Oct 11 01:42:47 compute-0 lvm[201927]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Oct 11 01:42:47 compute-0 lvm[201927]: VG ceph_vg0 finished
Oct 11 01:42:47 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0
Oct 11 01:42:47 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ln -s /dev/ceph_vg0/ceph_lv0 /var/lib/ceph/osd/ceph-0/block
Oct 11 01:42:47 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap
Oct 11 01:42:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon getmap"} v 0) v1
Oct 11 01:42:48 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3109436244' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch
Oct 11 01:42:48 compute-0 goofy_chandrasekhar[201865]:  stderr: got monmap epoch 1
Oct 11 01:42:48 compute-0 goofy_chandrasekhar[201865]: --> Creating keyring file for osd.0
Oct 11 01:42:48 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring
Oct 11 01:42:48 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/
Oct 11 01:42:48 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osdspec-affinity default_drive_group --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid a9c7940d-c154-46ef-9c18-8ba55dddd3d6 --setuser ceph --setgroup ceph
Oct 11 01:42:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v21: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:48 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : Health check cleared: TOO_FEW_OSDS (was: OSD count 0 < osd_pool_default_size 1)
Oct 11 01:42:48 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : Cluster is now healthy
Oct 11 01:42:48 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3109436244' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch
Oct 11 01:42:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e4 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:42:49 compute-0 ceph-mon[191930]: pgmap v21: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:49 compute-0 ceph-mon[191930]: Health check cleared: TOO_FEW_OSDS (was: OSD count 0 < osd_pool_default_size 1)
Oct 11 01:42:49 compute-0 ceph-mon[191930]: Cluster is now healthy
Oct 11 01:42:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:50 compute-0 goofy_chandrasekhar[201865]:  stderr: 2025-10-11T01:42:48.393+0000 7fb6df5f7740 -1 bluestore(/var/lib/ceph/osd/ceph-0//block) _read_bdev_label unable to decode label at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3]
Oct 11 01:42:50 compute-0 goofy_chandrasekhar[201865]:  stderr: 2025-10-11T01:42:48.394+0000 7fb6df5f7740 -1 bluestore(/var/lib/ceph/osd/ceph-0//block) _read_bdev_label unable to decode label at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3]
Oct 11 01:42:50 compute-0 goofy_chandrasekhar[201865]:  stderr: 2025-10-11T01:42:48.394+0000 7fb6df5f7740 -1 bluestore(/var/lib/ceph/osd/ceph-0//block) _read_bdev_label unable to decode label at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3]
Oct 11 01:42:50 compute-0 goofy_chandrasekhar[201865]:  stderr: 2025-10-11T01:42:48.394+0000 7fb6df5f7740 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid
Oct 11 01:42:50 compute-0 goofy_chandrasekhar[201865]: --> ceph-volume lvm prepare successful for: ceph_vg0/ceph_lv0
Oct 11 01:42:51 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
Oct 11 01:42:51 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph_vg0/ceph_lv0 --path /var/lib/ceph/osd/ceph-0 --no-mon-config
Oct 11 01:42:51 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ln -snf /dev/ceph_vg0/ceph_lv0 /var/lib/ceph/osd/ceph-0/block
Oct 11 01:42:51 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block
Oct 11 01:42:51 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0
Oct 11 01:42:51 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
Oct 11 01:42:51 compute-0 goofy_chandrasekhar[201865]: --> ceph-volume lvm activate successful for osd ID: 0
Oct 11 01:42:51 compute-0 goofy_chandrasekhar[201865]: --> ceph-volume lvm create successful for: ceph_vg0/ceph_lv0
Oct 11 01:42:51 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ceph-authtool --gen-print-key
Oct 11 01:42:51 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 6af45214-b1a1-4565-9175-30c80d9ec207
Oct 11 01:42:51 compute-0 ceph-mon[191930]: pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd new", "uuid": "6af45214-b1a1-4565-9175-30c80d9ec207"} v 0) v1
Oct 11 01:42:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/462155972' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "6af45214-b1a1-4565-9175-30c80d9ec207"}]: dispatch
Oct 11 01:42:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e4 do_prune osdmap full prune enabled
Oct 11 01:42:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e4 encode_pending skipping prime_pg_temp; mapping job did not start
Oct 11 01:42:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/462155972' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "6af45214-b1a1-4565-9175-30c80d9ec207"}]': finished
Oct 11 01:42:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e5 e5: 2 total, 0 up, 2 in
Oct 11 01:42:51 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e5: 2 total, 0 up, 2 in
Oct 11 01:42:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 0} v 0) v1
Oct 11 01:42:51 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:42:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 1} v 0) v1
Oct 11 01:42:51 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.0: (2) No such file or directory
Oct 11 01:42:51 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:42:51 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.1: (2) No such file or directory
Oct 11 01:42:51 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ceph-authtool --gen-print-key
Oct 11 01:42:51 compute-0 lvm[202875]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Oct 11 01:42:51 compute-0 lvm[202875]: VG ceph_vg1 finished
Oct 11 01:42:51 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-1
Oct 11 01:42:51 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -h ceph:ceph /dev/ceph_vg1/ceph_lv1
Oct 11 01:42:51 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1
Oct 11 01:42:52 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ln -s /dev/ceph_vg1/ceph_lv1 /var/lib/ceph/osd/ceph-1/block
Oct 11 01:42:52 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-1/activate.monmap
Oct 11 01:42:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon getmap"} v 0) v1
Oct 11 01:42:52 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3497425688' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch
Oct 11 01:42:52 compute-0 goofy_chandrasekhar[201865]:  stderr: got monmap epoch 1
Oct 11 01:42:52 compute-0 goofy_chandrasekhar[201865]: --> Creating keyring file for osd.1
Oct 11 01:42:52 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1/keyring
Oct 11 01:42:52 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1/
Oct 11 01:42:52 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 1 --monmap /var/lib/ceph/osd/ceph-1/activate.monmap --keyfile - --osdspec-affinity default_drive_group --osd-data /var/lib/ceph/osd/ceph-1/ --osd-uuid 6af45214-b1a1-4565-9175-30c80d9ec207 --setuser ceph --setgroup ceph
Oct 11 01:42:52 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/462155972' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "6af45214-b1a1-4565-9175-30c80d9ec207"}]: dispatch
Oct 11 01:42:52 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/462155972' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "6af45214-b1a1-4565-9175-30c80d9ec207"}]': finished
Oct 11 01:42:52 compute-0 ceph-mon[191930]: osdmap e5: 2 total, 0 up, 2 in
Oct 11 01:42:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:42:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:42:52 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3497425688' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch
Oct 11 01:42:53 compute-0 ceph-mon[191930]: pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v25: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e5 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:42:55 compute-0 goofy_chandrasekhar[201865]:  stderr: 2025-10-11T01:42:52.666+0000 7f70d275c740 -1 bluestore(/var/lib/ceph/osd/ceph-1//block) _read_bdev_label unable to decode label at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3]
Oct 11 01:42:55 compute-0 goofy_chandrasekhar[201865]:  stderr: 2025-10-11T01:42:52.667+0000 7f70d275c740 -1 bluestore(/var/lib/ceph/osd/ceph-1//block) _read_bdev_label unable to decode label at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3]
Oct 11 01:42:55 compute-0 goofy_chandrasekhar[201865]:  stderr: 2025-10-11T01:42:52.667+0000 7f70d275c740 -1 bluestore(/var/lib/ceph/osd/ceph-1//block) _read_bdev_label unable to decode label at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3]
Oct 11 01:42:55 compute-0 goofy_chandrasekhar[201865]:  stderr: 2025-10-11T01:42:52.668+0000 7f70d275c740 -1 bluestore(/var/lib/ceph/osd/ceph-1/) _read_fsid unparsable uuid
Oct 11 01:42:55 compute-0 goofy_chandrasekhar[201865]: --> ceph-volume lvm prepare successful for: ceph_vg1/ceph_lv1
Oct 11 01:42:55 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1
Oct 11 01:42:55 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph_vg1/ceph_lv1 --path /var/lib/ceph/osd/ceph-1 --no-mon-config
Oct 11 01:42:55 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ln -snf /dev/ceph_vg1/ceph_lv1 /var/lib/ceph/osd/ceph-1/block
Oct 11 01:42:55 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block
Oct 11 01:42:55 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1
Oct 11 01:42:55 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1
Oct 11 01:42:55 compute-0 goofy_chandrasekhar[201865]: --> ceph-volume lvm activate successful for osd ID: 1
Oct 11 01:42:55 compute-0 goofy_chandrasekhar[201865]: --> ceph-volume lvm create successful for: ceph_vg1/ceph_lv1
Oct 11 01:42:55 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ceph-authtool --gen-print-key
Oct 11 01:42:55 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 8fabd243-1f3b-4c55-a0cd-bf4f8313cb83
Oct 11 01:42:55 compute-0 ceph-mon[191930]: pgmap v25: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd new", "uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83"} v 0) v1
Oct 11 01:42:55 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/1294033669' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83"}]: dispatch
Oct 11 01:42:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e5 do_prune osdmap full prune enabled
Oct 11 01:42:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e5 encode_pending skipping prime_pg_temp; mapping job did not start
Oct 11 01:42:55 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/1294033669' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83"}]': finished
Oct 11 01:42:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e6 e6: 3 total, 0 up, 3 in
Oct 11 01:42:56 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e6: 3 total, 0 up, 3 in
Oct 11 01:42:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 0} v 0) v1
Oct 11 01:42:56 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:42:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 1} v 0) v1
Oct 11 01:42:56 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:42:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 2} v 0) v1
Oct 11 01:42:56 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:42:56 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.0: (2) No such file or directory
Oct 11 01:42:56 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.1: (2) No such file or directory
Oct 11 01:42:56 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.2: (2) No such file or directory
Oct 11 01:42:56 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ceph-authtool --gen-print-key
Oct 11 01:42:56 compute-0 lvm[203824]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Oct 11 01:42:56 compute-0 lvm[203824]: VG ceph_vg2 finished
Oct 11 01:42:56 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-2
Oct 11 01:42:56 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -h ceph:ceph /dev/ceph_vg2/ceph_lv2
Oct 11 01:42:56 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Oct 11 01:42:56 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ln -s /dev/ceph_vg2/ceph_lv2 /var/lib/ceph/osd/ceph-2/block
Oct 11 01:42:56 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-2/activate.monmap
Oct 11 01:42:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v27: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:42:56
Oct 11 01:42:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:42:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 01:42:56 compute-0 ceph-mgr[192233]: [balancer INFO root] No pools available
Oct 11 01:42:56 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 01:42:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:42:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:42:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:42:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:42:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:42:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:42:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:42:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:42:56 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1294033669' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83"}]: dispatch
Oct 11 01:42:56 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1294033669' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83"}]': finished
Oct 11 01:42:56 compute-0 ceph-mon[191930]: osdmap e6: 3 total, 0 up, 3 in
Oct 11 01:42:56 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:42:56 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:42:56 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:42:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon getmap"} v 0) v1
Oct 11 01:42:56 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2429763630' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch
Oct 11 01:42:56 compute-0 goofy_chandrasekhar[201865]:  stderr: got monmap epoch 1
Oct 11 01:42:56 compute-0 goofy_chandrasekhar[201865]: --> Creating keyring file for osd.2
Oct 11 01:42:56 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/keyring
Oct 11 01:42:56 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/
Oct 11 01:42:56 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 2 --monmap /var/lib/ceph/osd/ceph-2/activate.monmap --keyfile - --osdspec-affinity default_drive_group --osd-data /var/lib/ceph/osd/ceph-2/ --osd-uuid 8fabd243-1f3b-4c55-a0cd-bf4f8313cb83 --setuser ceph --setgroup ceph
Oct 11 01:42:57 compute-0 ceph-mon[191930]: pgmap v27: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:57 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2429763630' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch
Oct 11 01:42:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v28: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:59 compute-0 podman[204643]: 2025-10-11 01:42:59.2396941 +0000 UTC m=+0.124224574 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 01:42:59 compute-0 podman[204644]: 2025-10-11 01:42:59.24956147 +0000 UTC m=+0.131154203 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vcs-type=git, io.openshift.tags=minimal rhel9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, name=ubi9-minimal, vendor=Red Hat, Inc., io.buildah.version=1.33.7, release=1755695350, architecture=x86_64, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, version=9.6, build-date=2025-08-20T13:12:41, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi9-minimal-container, maintainer=Red Hat, Inc., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, container_name=openstack_network_exporter, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.expose-services=, managed_by=edpm_ansible)
Oct 11 01:42:59 compute-0 goofy_chandrasekhar[201865]:  stderr: 2025-10-11T01:42:56.963+0000 7f7b1cd60740 -1 bluestore(/var/lib/ceph/osd/ceph-2//block) _read_bdev_label unable to decode label at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3]
Oct 11 01:42:59 compute-0 goofy_chandrasekhar[201865]:  stderr: 2025-10-11T01:42:56.964+0000 7f7b1cd60740 -1 bluestore(/var/lib/ceph/osd/ceph-2//block) _read_bdev_label unable to decode label at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3]
Oct 11 01:42:59 compute-0 goofy_chandrasekhar[201865]:  stderr: 2025-10-11T01:42:56.964+0000 7f7b1cd60740 -1 bluestore(/var/lib/ceph/osd/ceph-2//block) _read_bdev_label unable to decode label at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3]
Oct 11 01:42:59 compute-0 goofy_chandrasekhar[201865]:  stderr: 2025-10-11T01:42:56.964+0000 7f7b1cd60740 -1 bluestore(/var/lib/ceph/osd/ceph-2/) _read_fsid unparsable uuid
Oct 11 01:42:59 compute-0 goofy_chandrasekhar[201865]: --> ceph-volume lvm prepare successful for: ceph_vg2/ceph_lv2
Oct 11 01:42:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e6 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:42:59 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2
Oct 11 01:42:59 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph_vg2/ceph_lv2 --path /var/lib/ceph/osd/ceph-2 --no-mon-config
Oct 11 01:42:59 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/ln -snf /dev/ceph_vg2/ceph_lv2 /var/lib/ceph/osd/ceph-2/block
Oct 11 01:42:59 compute-0 podman[157119]: time="2025-10-11T01:42:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:42:59 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block
Oct 11 01:42:59 compute-0 ceph-mon[191930]: pgmap v28: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:42:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:42:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 25444 "" "Go-http-client/1.1"
Oct 11 01:42:59 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Oct 11 01:42:59 compute-0 goofy_chandrasekhar[201865]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2
Oct 11 01:42:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:42:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 4849 "" "Go-http-client/1.1"
Oct 11 01:42:59 compute-0 goofy_chandrasekhar[201865]: --> ceph-volume lvm activate successful for osd ID: 2
Oct 11 01:42:59 compute-0 goofy_chandrasekhar[201865]: --> ceph-volume lvm create successful for: ceph_vg2/ceph_lv2
Oct 11 01:42:59 compute-0 systemd[1]: libpod-31d846995bb0fca82ab9c4d4bb89fdb615646d214d724ace90837247769d278b.scope: Deactivated successfully.
Oct 11 01:42:59 compute-0 systemd[1]: libpod-31d846995bb0fca82ab9c4d4bb89fdb615646d214d724ace90837247769d278b.scope: Consumed 8.737s CPU time.
Oct 11 01:42:59 compute-0 podman[201847]: 2025-10-11 01:42:59.870596208 +0000 UTC m=+14.394777358 container died 31d846995bb0fca82ab9c4d4bb89fdb615646d214d724ace90837247769d278b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_chandrasekhar, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:42:59 compute-0 systemd[1]: var-lib-containers-storage-overlay-83b7ee39fca7eed00869c6cda2bd9066f97403b1d4b5f3e6f5dda1da1487e945-merged.mount: Deactivated successfully.
Oct 11 01:42:59 compute-0 podman[201847]: 2025-10-11 01:42:59.99656336 +0000 UTC m=+14.520744450 container remove 31d846995bb0fca82ab9c4d4bb89fdb615646d214d724ace90837247769d278b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_chandrasekhar, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS)
Oct 11 01:43:00 compute-0 systemd[1]: libpod-conmon-31d846995bb0fca82ab9c4d4bb89fdb615646d214d724ace90837247769d278b.scope: Deactivated successfully.
Oct 11 01:43:00 compute-0 sudo[201724]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:00 compute-0 sudo[204803]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:00 compute-0 sudo[204803]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:00 compute-0 sudo[204803]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:00 compute-0 sudo[204828]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:43:00 compute-0 sudo[204828]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v29: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:43:00 compute-0 sudo[204828]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:00 compute-0 sudo[204853]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:00 compute-0 sudo[204853]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:00 compute-0 sudo[204853]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:00 compute-0 sudo[204878]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 01:43:00 compute-0 sudo[204878]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:01 compute-0 podman[204942]: 2025-10-11 01:43:01.200042382 +0000 UTC m=+0.069501393 container create e9ec410e23a6fcab5dd62bf13ea760ad3770db8c18fc77a4de11dfe452ece545 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_solomon, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0)
Oct 11 01:43:01 compute-0 podman[204942]: 2025-10-11 01:43:01.175548065 +0000 UTC m=+0.045007156 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:01 compute-0 systemd[1]: Started libpod-conmon-e9ec410e23a6fcab5dd62bf13ea760ad3770db8c18fc77a4de11dfe452ece545.scope.
Oct 11 01:43:01 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:01 compute-0 podman[204942]: 2025-10-11 01:43:01.370180352 +0000 UTC m=+0.239639383 container init e9ec410e23a6fcab5dd62bf13ea760ad3770db8c18fc77a4de11dfe452ece545 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_solomon, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0)
Oct 11 01:43:01 compute-0 podman[204942]: 2025-10-11 01:43:01.387421263 +0000 UTC m=+0.256880304 container start e9ec410e23a6fcab5dd62bf13ea760ad3770db8c18fc77a4de11dfe452ece545 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_solomon, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 01:43:01 compute-0 podman[204942]: 2025-10-11 01:43:01.39439714 +0000 UTC m=+0.263856221 container attach e9ec410e23a6fcab5dd62bf13ea760ad3770db8c18fc77a4de11dfe452ece545 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_solomon, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:01 compute-0 elegant_solomon[204959]: 167 167
Oct 11 01:43:01 compute-0 systemd[1]: libpod-e9ec410e23a6fcab5dd62bf13ea760ad3770db8c18fc77a4de11dfe452ece545.scope: Deactivated successfully.
Oct 11 01:43:01 compute-0 podman[204942]: 2025-10-11 01:43:01.397776023 +0000 UTC m=+0.267235034 container died e9ec410e23a6fcab5dd62bf13ea760ad3770db8c18fc77a4de11dfe452ece545 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_solomon, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True)
Oct 11 01:43:01 compute-0 openstack_network_exporter[159265]: ERROR   01:43:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:43:01 compute-0 openstack_network_exporter[159265]: ERROR   01:43:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:43:01 compute-0 openstack_network_exporter[159265]: ERROR   01:43:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:43:01 compute-0 openstack_network_exporter[159265]: ERROR   01:43:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:43:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:43:01 compute-0 openstack_network_exporter[159265]: ERROR   01:43:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:43:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:43:01 compute-0 systemd[1]: var-lib-containers-storage-overlay-82034839fa6149de9c02cd6016bbde4d3ef122bbc0fef7415ecb80f9c6480735-merged.mount: Deactivated successfully.
Oct 11 01:43:01 compute-0 podman[204942]: 2025-10-11 01:43:01.477285479 +0000 UTC m=+0.346744490 container remove e9ec410e23a6fcab5dd62bf13ea760ad3770db8c18fc77a4de11dfe452ece545 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_solomon, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, ceph=True, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:43:01 compute-0 systemd[1]: libpod-conmon-e9ec410e23a6fcab5dd62bf13ea760ad3770db8c18fc77a4de11dfe452ece545.scope: Deactivated successfully.
Oct 11 01:43:01 compute-0 podman[204982]: 2025-10-11 01:43:01.76062656 +0000 UTC m=+0.093721951 container create 9abbd4b4215c5ec8f9d3935e0d10848b50f196a50717118b8c65ae5af4aa190a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_sammet, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3)
Oct 11 01:43:01 compute-0 ceph-mon[191930]: pgmap v29: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:43:01 compute-0 podman[204982]: 2025-10-11 01:43:01.725424557 +0000 UTC m=+0.058520008 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:01 compute-0 systemd[1]: Started libpod-conmon-9abbd4b4215c5ec8f9d3935e0d10848b50f196a50717118b8c65ae5af4aa190a.scope.
Oct 11 01:43:01 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4c4af2af81edca7b55078e0dc9f8a0cf3379633aae9a0548a3ac7e333167bb6e/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4c4af2af81edca7b55078e0dc9f8a0cf3379633aae9a0548a3ac7e333167bb6e/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4c4af2af81edca7b55078e0dc9f8a0cf3379633aae9a0548a3ac7e333167bb6e/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4c4af2af81edca7b55078e0dc9f8a0cf3379633aae9a0548a3ac7e333167bb6e/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:01 compute-0 podman[204982]: 2025-10-11 01:43:01.965927606 +0000 UTC m=+0.299023007 container init 9abbd4b4215c5ec8f9d3935e0d10848b50f196a50717118b8c65ae5af4aa190a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_sammet, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:43:01 compute-0 podman[204982]: 2025-10-11 01:43:01.996799124 +0000 UTC m=+0.329894515 container start 9abbd4b4215c5ec8f9d3935e0d10848b50f196a50717118b8c65ae5af4aa190a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_sammet, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, ceph=True, OSD_FLAVOR=default, org.label-schema.schema-version=1.0)
Oct 11 01:43:02 compute-0 podman[204982]: 2025-10-11 01:43:02.00404326 +0000 UTC m=+0.337138731 container attach 9abbd4b4215c5ec8f9d3935e0d10848b50f196a50717118b8c65ae5af4aa190a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_sammet, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef)
Oct 11 01:43:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v30: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]: {
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:     "0": [
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:         {
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "devices": [
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "/dev/loop3"
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             ],
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "lv_name": "ceph_lv0",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "lv_size": "21470642176",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "name": "ceph_lv0",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "tags": {
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.cluster_name": "ceph",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.crush_device_class": "",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.encrypted": "0",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.osd_id": "0",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.type": "block",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.vdo": "0"
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             },
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "type": "block",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "vg_name": "ceph_vg0"
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:         }
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:     ],
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:     "1": [
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:         {
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "devices": [
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "/dev/loop4"
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             ],
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "lv_name": "ceph_lv1",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "lv_size": "21470642176",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "name": "ceph_lv1",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "tags": {
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.cluster_name": "ceph",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.crush_device_class": "",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.encrypted": "0",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.osd_id": "1",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.type": "block",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.vdo": "0"
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             },
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "type": "block",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "vg_name": "ceph_vg1"
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:         }
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:     ],
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:     "2": [
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:         {
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "devices": [
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "/dev/loop5"
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             ],
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "lv_name": "ceph_lv2",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "lv_size": "21470642176",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "name": "ceph_lv2",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "tags": {
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.cluster_name": "ceph",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.crush_device_class": "",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.encrypted": "0",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.osd_id": "2",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.type": "block",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:                 "ceph.vdo": "0"
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             },
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "type": "block",
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:             "vg_name": "ceph_vg2"
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:         }
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]:     ]
Oct 11 01:43:02 compute-0 dazzling_sammet[204999]: }
Oct 11 01:43:02 compute-0 systemd[1]: libpod-9abbd4b4215c5ec8f9d3935e0d10848b50f196a50717118b8c65ae5af4aa190a.scope: Deactivated successfully.
Oct 11 01:43:02 compute-0 podman[204982]: 2025-10-11 01:43:02.851509897 +0000 UTC m=+1.184605368 container died 9abbd4b4215c5ec8f9d3935e0d10848b50f196a50717118b8c65ae5af4aa190a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_sammet, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:02 compute-0 systemd[1]: var-lib-containers-storage-overlay-4c4af2af81edca7b55078e0dc9f8a0cf3379633aae9a0548a3ac7e333167bb6e-merged.mount: Deactivated successfully.
Oct 11 01:43:02 compute-0 podman[204982]: 2025-10-11 01:43:02.956957821 +0000 UTC m=+1.290053212 container remove 9abbd4b4215c5ec8f9d3935e0d10848b50f196a50717118b8c65ae5af4aa190a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_sammet, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:43:02 compute-0 systemd[1]: libpod-conmon-9abbd4b4215c5ec8f9d3935e0d10848b50f196a50717118b8c65ae5af4aa190a.scope: Deactivated successfully.
Oct 11 01:43:03 compute-0 sudo[204878]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "osd.0"} v 0) v1
Oct 11 01:43:03 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch
Oct 11 01:43:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:43:03 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:43:03 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.serve] Deploying daemon osd.0 on compute-0
Oct 11 01:43:03 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Deploying daemon osd.0 on compute-0
Oct 11 01:43:03 compute-0 sudo[205021]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:03 compute-0 sudo[205021]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:03 compute-0 sudo[205021]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:03 compute-0 sudo[205046]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:43:03 compute-0 sudo[205046]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:03 compute-0 sudo[205046]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:03 compute-0 sudo[205071]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:03 compute-0 sudo[205071]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:03 compute-0 sudo[205071]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:03 compute-0 sudo[205096]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 _orch deploy --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:43:03 compute-0 sudo[205096]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:03 compute-0 ceph-mon[191930]: pgmap v30: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:43:03 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch
Oct 11 01:43:03 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:43:03 compute-0 ceph-mon[191930]: Deploying daemon osd.0 on compute-0
Oct 11 01:43:04 compute-0 podman[205159]: 2025-10-11 01:43:04.270909523 +0000 UTC m=+0.089868427 container create 8f81099250732ee3a22e08da09417862e107a8bfb8c5889f017a26860abf9dba (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_blackwell, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:43:04 compute-0 podman[205159]: 2025-10-11 01:43:04.234096886 +0000 UTC m=+0.053055850 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:04 compute-0 systemd[1]: Started libpod-conmon-8f81099250732ee3a22e08da09417862e107a8bfb8c5889f017a26860abf9dba.scope.
Oct 11 01:43:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v31: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:43:04 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:04 compute-0 podman[205159]: 2025-10-11 01:43:04.408456326 +0000 UTC m=+0.227415210 container init 8f81099250732ee3a22e08da09417862e107a8bfb8c5889f017a26860abf9dba (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_blackwell, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_REF=reef)
Oct 11 01:43:04 compute-0 podman[205159]: 2025-10-11 01:43:04.42512507 +0000 UTC m=+0.244083934 container start 8f81099250732ee3a22e08da09417862e107a8bfb8c5889f017a26860abf9dba (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_blackwell, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef)
Oct 11 01:43:04 compute-0 podman[205159]: 2025-10-11 01:43:04.429000063 +0000 UTC m=+0.247958927 container attach 8f81099250732ee3a22e08da09417862e107a8bfb8c5889f017a26860abf9dba (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_blackwell, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 01:43:04 compute-0 mystifying_blackwell[205175]: 167 167
Oct 11 01:43:04 compute-0 systemd[1]: libpod-8f81099250732ee3a22e08da09417862e107a8bfb8c5889f017a26860abf9dba.scope: Deactivated successfully.
Oct 11 01:43:04 compute-0 podman[205159]: 2025-10-11 01:43:04.439210029 +0000 UTC m=+0.258168943 container died 8f81099250732ee3a22e08da09417862e107a8bfb8c5889f017a26860abf9dba (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_blackwell, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:43:04 compute-0 systemd[1]: var-lib-containers-storage-overlay-ed032cf17107f7863980452ace12b005d2f661e909694808c85b03a5238ade44-merged.mount: Deactivated successfully.
Oct 11 01:43:04 compute-0 podman[205159]: 2025-10-11 01:43:04.516768734 +0000 UTC m=+0.335727628 container remove 8f81099250732ee3a22e08da09417862e107a8bfb8c5889f017a26860abf9dba (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_blackwell, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, ceph=True)
Oct 11 01:43:04 compute-0 systemd[1]: libpod-conmon-8f81099250732ee3a22e08da09417862e107a8bfb8c5889f017a26860abf9dba.scope: Deactivated successfully.
Oct 11 01:43:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e6 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:43:04 compute-0 podman[205206]: 2025-10-11 01:43:04.989756825 +0000 UTC m=+0.092918184 container create 8aa02961a5d552b5614804672ea5051befa386c2c330e2fbc7102fc7a39356c1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate-test, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2)
Oct 11 01:43:05 compute-0 podman[205206]: 2025-10-11 01:43:04.952126781 +0000 UTC m=+0.055288150 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:05 compute-0 systemd[1]: Started libpod-conmon-8aa02961a5d552b5614804672ea5051befa386c2c330e2fbc7102fc7a39356c1.scope.
Oct 11 01:43:05 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dfef3c17e7b8a2e2fdaecaabe366411aaa2d022bc35656ef5d46a41c4ae40fe4/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dfef3c17e7b8a2e2fdaecaabe366411aaa2d022bc35656ef5d46a41c4ae40fe4/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dfef3c17e7b8a2e2fdaecaabe366411aaa2d022bc35656ef5d46a41c4ae40fe4/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dfef3c17e7b8a2e2fdaecaabe366411aaa2d022bc35656ef5d46a41c4ae40fe4/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dfef3c17e7b8a2e2fdaecaabe366411aaa2d022bc35656ef5d46a41c4ae40fe4/merged/var/lib/ceph/osd/ceph-0 supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:05 compute-0 podman[205206]: 2025-10-11 01:43:05.184879073 +0000 UTC m=+0.288040442 container init 8aa02961a5d552b5614804672ea5051befa386c2c330e2fbc7102fc7a39356c1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate-test, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:43:05 compute-0 podman[205206]: 2025-10-11 01:43:05.214830068 +0000 UTC m=+0.317991437 container start 8aa02961a5d552b5614804672ea5051befa386c2c330e2fbc7102fc7a39356c1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate-test, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:43:05 compute-0 podman[205206]: 2025-10-11 01:43:05.221890632 +0000 UTC m=+0.325052031 container attach 8aa02961a5d552b5614804672ea5051befa386c2c330e2fbc7102fc7a39356c1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate-test, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default)
Oct 11 01:43:05 compute-0 podman[205220]: 2025-10-11 01:43:05.235563118 +0000 UTC m=+0.165244281 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_id=edpm, managed_by=edpm_ansible)
Oct 11 01:43:05 compute-0 ceph-mon[191930]: pgmap v31: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:43:05 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate-test[205229]: usage: ceph-volume activate [-h] [--osd-id OSD_ID] [--osd-uuid OSD_UUID]
Oct 11 01:43:05 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate-test[205229]:                             [--no-systemd] [--no-tmpfs]
Oct 11 01:43:05 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate-test[205229]: ceph-volume activate: error: unrecognized arguments: --bad-option
Oct 11 01:43:05 compute-0 systemd[1]: libpod-8aa02961a5d552b5614804672ea5051befa386c2c330e2fbc7102fc7a39356c1.scope: Deactivated successfully.
Oct 11 01:43:05 compute-0 podman[205206]: 2025-10-11 01:43:05.855410184 +0000 UTC m=+0.958571543 container died 8aa02961a5d552b5614804672ea5051befa386c2c330e2fbc7102fc7a39356c1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate-test, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3)
Oct 11 01:43:05 compute-0 systemd[1]: var-lib-containers-storage-overlay-dfef3c17e7b8a2e2fdaecaabe366411aaa2d022bc35656ef5d46a41c4ae40fe4-merged.mount: Deactivated successfully.
Oct 11 01:43:05 compute-0 podman[205206]: 2025-10-11 01:43:05.943038421 +0000 UTC m=+1.046199750 container remove 8aa02961a5d552b5614804672ea5051befa386c2c330e2fbc7102fc7a39356c1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate-test, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.build-date=20250507, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:43:05 compute-0 systemd[1]: libpod-conmon-8aa02961a5d552b5614804672ea5051befa386c2c330e2fbc7102fc7a39356c1.scope: Deactivated successfully.
Oct 11 01:43:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v32: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:43:06 compute-0 systemd[1]: Reloading.
Oct 11 01:43:06 compute-0 systemd-rc-local-generator[205298]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:43:06 compute-0 systemd-sysv-generator[205306]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:43:06 compute-0 systemd[1]: Reloading.
Oct 11 01:43:07 compute-0 systemd-rc-local-generator[205344]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:43:07 compute-0 systemd-sysv-generator[205347]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:43:07 compute-0 systemd[1]: Starting Ceph osd.0 for 3c7617c3-7a20-523e-a9de-20c0d6ba41da...
Oct 11 01:43:07 compute-0 ceph-mon[191930]: pgmap v32: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:43:07 compute-0 podman[205395]: 2025-10-11 01:43:07.838058742 +0000 UTC m=+0.094877574 container create e5db2beb3450cec70309d0740238cd49cb5161588ae10d3307fa6a351d5a67b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:07 compute-0 podman[205395]: 2025-10-11 01:43:07.802629739 +0000 UTC m=+0.059448621 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.934 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.935 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.935 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.936 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f8ed27f97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.937 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb8c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.938 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.939 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.939 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb1a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb200>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed2874260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed3ab42f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.939 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.942 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f8ed27fbad0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.942 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb350>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb90>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fa390>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb3b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbbf0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbc80>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27f9610>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb620>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbe30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbec0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbf50>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2626ba0>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.942 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f8ed27faff0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.947 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.947 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f8ed27fb110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.947 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.947 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f8ed27fb170>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.948 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f8ed27fb1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.948 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f8ed27fb230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.948 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f8ed2874230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f8ed27fb290>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f8ed5778d70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f8ed27fb650>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f8ed27fbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f8ed27fb320>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f8ed27fbb60>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f8ed27fa3f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f8ed27fb380>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.952 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f8ed27fbbc0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.952 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f8ed27fbc50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.953 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.953 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f8ed27fbce0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.953 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.953 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f8ed27fbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.953 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.953 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f8ed27fb590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.954 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f8ed27f95e0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.954 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f8ed27fb5f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.954 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f8ed27fbe00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.955 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.955 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f8ed27fbe90>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.955 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.955 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f8ed27fbf20>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.955 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.956 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.956 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.956 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.956 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.956 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.957 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.957 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.957 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9498dc6d76c1c98c4e11dbef943cc88c22f2dacd4c2674d5f88ef184004e7fc4/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:07 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9498dc6d76c1c98c4e11dbef943cc88c22f2dacd4c2674d5f88ef184004e7fc4/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.959 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.959 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.960 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.960 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.960 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.960 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.960 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.960 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.960 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9498dc6d76c1c98c4e11dbef943cc88c22f2dacd4c2674d5f88ef184004e7fc4/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:07 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9498dc6d76c1c98c4e11dbef943cc88c22f2dacd4c2674d5f88ef184004e7fc4/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.960 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.960 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.960 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.960 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.960 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.961 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.961 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.961 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9498dc6d76c1c98c4e11dbef943cc88c22f2dacd4c2674d5f88ef184004e7fc4/merged/var/lib/ceph/osd/ceph-0 supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:43:07.961 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:43:07 compute-0 podman[205395]: 2025-10-11 01:43:07.988813889 +0000 UTC m=+0.245632751 container init e5db2beb3450cec70309d0740238cd49cb5161588ae10d3307fa6a351d5a67b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507)
Oct 11 01:43:08 compute-0 podman[205395]: 2025-10-11 01:43:08.019451947 +0000 UTC m=+0.276270769 container start e5db2beb3450cec70309d0740238cd49cb5161588ae10d3307fa6a351d5a67b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate, CEPH_REF=reef, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Oct 11 01:43:08 compute-0 podman[205395]: 2025-10-11 01:43:08.025448054 +0000 UTC m=+0.282266946 container attach e5db2beb3450cec70309d0740238cd49cb5161588ae10d3307fa6a351d5a67b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True)
Oct 11 01:43:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v33: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:43:09 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate[205410]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
Oct 11 01:43:09 compute-0 bash[205395]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
Oct 11 01:43:09 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate[205410]: Running command: /usr/bin/ceph-bluestore-tool prime-osd-dir --path /var/lib/ceph/osd/ceph-0 --no-mon-config --dev /dev/mapper/ceph_vg0-ceph_lv0
Oct 11 01:43:09 compute-0 bash[205395]: Running command: /usr/bin/ceph-bluestore-tool prime-osd-dir --path /var/lib/ceph/osd/ceph-0 --no-mon-config --dev /dev/mapper/ceph_vg0-ceph_lv0
Oct 11 01:43:09 compute-0 sudo[205566]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ffknmntmswibpcfdvddgqowxxybgkbxm ; /usr/bin/python3'
Oct 11 01:43:09 compute-0 sudo[205566]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:43:09 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate[205410]: Running command: /usr/bin/chown -h ceph:ceph /dev/mapper/ceph_vg0-ceph_lv0
Oct 11 01:43:09 compute-0 bash[205395]: Running command: /usr/bin/chown -h ceph:ceph /dev/mapper/ceph_vg0-ceph_lv0
Oct 11 01:43:09 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate[205410]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0
Oct 11 01:43:09 compute-0 bash[205395]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0
Oct 11 01:43:09 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate[205410]: Running command: /usr/bin/ln -s /dev/mapper/ceph_vg0-ceph_lv0 /var/lib/ceph/osd/ceph-0/block
Oct 11 01:43:09 compute-0 bash[205395]: Running command: /usr/bin/ln -s /dev/mapper/ceph_vg0-ceph_lv0 /var/lib/ceph/osd/ceph-0/block
Oct 11 01:43:09 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate[205410]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
Oct 11 01:43:09 compute-0 bash[205395]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
Oct 11 01:43:09 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate[205410]: --> ceph-volume raw activate successful for osd ID: 0
Oct 11 01:43:09 compute-0 bash[205395]: --> ceph-volume raw activate successful for osd ID: 0
Oct 11 01:43:09 compute-0 systemd[1]: libpod-e5db2beb3450cec70309d0740238cd49cb5161588ae10d3307fa6a351d5a67b7.scope: Deactivated successfully.
Oct 11 01:43:09 compute-0 systemd[1]: libpod-e5db2beb3450cec70309d0740238cd49cb5161588ae10d3307fa6a351d5a67b7.scope: Consumed 1.513s CPU time.
Oct 11 01:43:09 compute-0 podman[205395]: 2025-10-11 01:43:09.511843552 +0000 UTC m=+1.768662384 container died e5db2beb3450cec70309d0740238cd49cb5161588ae10d3307fa6a351d5a67b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:09 compute-0 python3[205569]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z   --volume /home/ceph-admin/specs/ceph_spec.yaml:/home/ceph_spec.yaml:z   --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   status --format json | jq .osdmap.num_up_osds _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:43:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e6 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:43:09 compute-0 systemd[1]: var-lib-containers-storage-overlay-9498dc6d76c1c98c4e11dbef943cc88c22f2dacd4c2674d5f88ef184004e7fc4-merged.mount: Deactivated successfully.
Oct 11 01:43:09 compute-0 podman[205395]: 2025-10-11 01:43:09.627026582 +0000 UTC m=+1.883845384 container remove e5db2beb3450cec70309d0740238cd49cb5161588ae10d3307fa6a351d5a67b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0-activate, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20250507)
Oct 11 01:43:09 compute-0 podman[205584]: 2025-10-11 01:43:09.674478898 +0000 UTC m=+0.092738361 container create c3f9d4144bc996532742374b5dac050caa8fab7d746bbb4f593bbd7c3b54ec28 (image=quay.io/ceph/ceph:v18, name=vigilant_brahmagupta, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:43:09 compute-0 podman[205584]: 2025-10-11 01:43:09.648650655 +0000 UTC m=+0.066910118 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:43:09 compute-0 systemd[1]: Started libpod-conmon-c3f9d4144bc996532742374b5dac050caa8fab7d746bbb4f593bbd7c3b54ec28.scope.
Oct 11 01:43:09 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3d80daf04af05deebe8f4705c38874c9e5100e6d9e2654054d1dbe18583ffb29/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3d80daf04af05deebe8f4705c38874c9e5100e6d9e2654054d1dbe18583ffb29/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3d80daf04af05deebe8f4705c38874c9e5100e6d9e2654054d1dbe18583ffb29/merged/home/ceph_spec.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:09 compute-0 ceph-mon[191930]: pgmap v33: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:43:09 compute-0 podman[205584]: 2025-10-11 01:43:09.864364428 +0000 UTC m=+0.282623891 container init c3f9d4144bc996532742374b5dac050caa8fab7d746bbb4f593bbd7c3b54ec28 (image=quay.io/ceph/ceph:v18, name=vigilant_brahmagupta, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, ceph=True)
Oct 11 01:43:09 compute-0 podman[205584]: 2025-10-11 01:43:09.885994801 +0000 UTC m=+0.304254264 container start c3f9d4144bc996532742374b5dac050caa8fab7d746bbb4f593bbd7c3b54ec28 (image=quay.io/ceph/ceph:v18, name=vigilant_brahmagupta, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0)
Oct 11 01:43:09 compute-0 podman[205584]: 2025-10-11 01:43:09.893344733 +0000 UTC m=+0.311604176 container attach c3f9d4144bc996532742374b5dac050caa8fab7d746bbb4f593bbd7c3b54ec28 (image=quay.io/ceph/ceph:v18, name=vigilant_brahmagupta, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 01:43:10 compute-0 podman[205647]: 2025-10-11 01:43:10.013330448 +0000 UTC m=+0.068255093 container create a0bc7452156ba2a7d1c6b9d4458870fd47b5f76cfe923cd3a8e426aaf6ebe58d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 01:43:10 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2f60ba0b8168073a591a5c2c76f68cfdf2c88771a70fd968f0cb376ea4059061/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:10 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2f60ba0b8168073a591a5c2c76f68cfdf2c88771a70fd968f0cb376ea4059061/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:10 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2f60ba0b8168073a591a5c2c76f68cfdf2c88771a70fd968f0cb376ea4059061/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:10 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2f60ba0b8168073a591a5c2c76f68cfdf2c88771a70fd968f0cb376ea4059061/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:10 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2f60ba0b8168073a591a5c2c76f68cfdf2c88771a70fd968f0cb376ea4059061/merged/var/lib/ceph/osd/ceph-0 supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:10 compute-0 podman[205647]: 2025-10-11 01:43:09.98590933 +0000 UTC m=+0.040833975 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:10 compute-0 podman[205647]: 2025-10-11 01:43:10.129181681 +0000 UTC m=+0.184106316 container init a0bc7452156ba2a7d1c6b9d4458870fd47b5f76cfe923cd3a8e426aaf6ebe58d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:43:10 compute-0 podman[205647]: 2025-10-11 01:43:10.14670854 +0000 UTC m=+0.201633175 container start a0bc7452156ba2a7d1c6b9d4458870fd47b5f76cfe923cd3a8e426aaf6ebe58d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef)
Oct 11 01:43:10 compute-0 bash[205647]: a0bc7452156ba2a7d1c6b9d4458870fd47b5f76cfe923cd3a8e426aaf6ebe58d
Oct 11 01:43:10 compute-0 systemd[1]: Started Ceph osd.0 for 3c7617c3-7a20-523e-a9de-20c0d6ba41da.
Oct 11 01:43:10 compute-0 ceph-osd[205667]: set uid:gid to 167:167 (ceph:ceph)
Oct 11 01:43:10 compute-0 ceph-osd[205667]: ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable), process ceph-osd, pid 2
Oct 11 01:43:10 compute-0 ceph-osd[205667]: pidfile_write: ignore empty --pid-file
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bdev(0x560258051800 /var/lib/ceph/osd/ceph-0/block) open path /var/lib/ceph/osd/ceph-0/block
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bdev(0x560258051800 /var/lib/ceph/osd/ceph-0/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-0/block failed: (22) Invalid argument
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bdev(0x560258051800 /var/lib/ceph/osd/ceph-0/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _set_cache_sizes cache_size 1073741824 meta 0.45 kv 0.45 kv_onode 0.04 data 0.06
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bdev(0x560258e93800 /var/lib/ceph/osd/ceph-0/block) open path /var/lib/ceph/osd/ceph-0/block
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bdev(0x560258e93800 /var/lib/ceph/osd/ceph-0/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-0/block failed: (22) Invalid argument
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bdev(0x560258e93800 /var/lib/ceph/osd/ceph-0/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bluefs add_block_device bdev 1 path /var/lib/ceph/osd/ceph-0/block size 20 GiB
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bdev(0x560258e93800 /var/lib/ceph/osd/ceph-0/block) close
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bdev(0x560258051800 /var/lib/ceph/osd/ceph-0/block) close
Oct 11 01:43:10 compute-0 sudo[205096]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:43:10 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:43:10 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "osd.1"} v 0) v1
Oct 11 01:43:10 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch
Oct 11 01:43:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:43:10 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:43:10 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.serve] Deploying daemon osd.1 on compute-0
Oct 11 01:43:10 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Deploying daemon osd.1 on compute-0
Oct 11 01:43:10 compute-0 sudo[205701]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v34: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:43:10 compute-0 sudo[205701]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:10 compute-0 sudo[205701]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:10 compute-0 podman[205704]: 2025-10-11 01:43:10.403221829 +0000 UTC m=+0.086258514 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 01:43:10 compute-0 sudo[205749]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:43:10 compute-0 sudo[205749]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:10 compute-0 ceph-osd[205667]: starting osd.0 osd_data /var/lib/ceph/osd/ceph-0 /var/lib/ceph/osd/ceph-0/journal
Oct 11 01:43:10 compute-0 sudo[205749]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:10 compute-0 ceph-osd[205667]: load: jerasure load: lrc 
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bdev(0x560258f26c00 /var/lib/ceph/osd/ceph-0/block) open path /var/lib/ceph/osd/ceph-0/block
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bdev(0x560258f26c00 /var/lib/ceph/osd/ceph-0/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-0/block failed: (22) Invalid argument
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bdev(0x560258f26c00 /var/lib/ceph/osd/ceph-0/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _set_cache_sizes cache_size 1073741824 meta 0.45 kv 0.45 kv_onode 0.04 data 0.06
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bdev(0x560258f26c00 /var/lib/ceph/osd/ceph-0/block) close
Oct 11 01:43:10 compute-0 podman[205753]: 2025-10-11 01:43:10.562461692 +0000 UTC m=+0.120793313 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, release=1214.1726694543, managed_by=edpm_ansible, build-date=2024-09-18T21:23:30, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.expose-services=, vcs-type=git, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi9-container, io.buildah.version=1.29.0, architecture=x86_64, maintainer=Red Hat, Inc., distribution-scope=public, vendor=Red Hat, Inc., io.openshift.tags=base rhel9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, container_name=kepler, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, summary=Provides the latest release of Red Hat Universal Base Image 9., release-0.7.12=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, version=9.4, name=ubi9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543)
Oct 11 01:43:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status", "format": "json"} v 0) v1
Oct 11 01:43:10 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3441155300' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
Oct 11 01:43:10 compute-0 vigilant_brahmagupta[205620]: 
Oct 11 01:43:10 compute-0 vigilant_brahmagupta[205620]: {"fsid":"3c7617c3-7a20-523e-a9de-20c0d6ba41da","health":{"status":"HEALTH_OK","checks":{},"mutes":[]},"election_epoch":5,"quorum":[0],"quorum_names":["compute-0"],"quorum_age":121,"monmap":{"epoch":1,"min_mon_release_name":"reef","num_mons":1},"osdmap":{"epoch":6,"num_osds":3,"num_up_osds":0,"osd_up_since":0,"num_in_osds":3,"osd_in_since":1760146975,"num_remapped_pgs":0},"pgmap":{"pgs_by_state":[],"num_pgs":0,"num_pools":0,"num_objects":0,"data_bytes":0,"bytes_used":0,"bytes_avail":0,"bytes_total":0},"fsmap":{"epoch":1,"by_rank":[],"up:standby":0},"mgrmap":{"available":true,"num_standbys":0,"modules":["cephadm","iostat","nfs","restful"],"services":{}},"servicemap":{"epoch":2,"modified":"2025-10-11T01:42:58.369813+0000","services":{}},"progress_events":{}}
Oct 11 01:43:10 compute-0 podman[205758]: 2025-10-11 01:43:10.576717714 +0000 UTC m=+0.132496949 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.build-date=20251009, container_name=ovn_controller)
Oct 11 01:43:10 compute-0 sudo[205800]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:10 compute-0 sudo[205800]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:10 compute-0 sudo[205800]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:10 compute-0 systemd[1]: libpod-c3f9d4144bc996532742374b5dac050caa8fab7d746bbb4f593bbd7c3b54ec28.scope: Deactivated successfully.
Oct 11 01:43:10 compute-0 podman[205584]: 2025-10-11 01:43:10.604094554 +0000 UTC m=+1.022353977 container died c3f9d4144bc996532742374b5dac050caa8fab7d746bbb4f593bbd7c3b54ec28 (image=quay.io/ceph/ceph:v18, name=vigilant_brahmagupta, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507)
Oct 11 01:43:10 compute-0 systemd[1]: var-lib-containers-storage-overlay-3d80daf04af05deebe8f4705c38874c9e5100e6d9e2654054d1dbe18583ffb29-merged.mount: Deactivated successfully.
Oct 11 01:43:10 compute-0 podman[205584]: 2025-10-11 01:43:10.667189326 +0000 UTC m=+1.085448749 container remove c3f9d4144bc996532742374b5dac050caa8fab7d746bbb4f593bbd7c3b54ec28 (image=quay.io/ceph/ceph:v18, name=vigilant_brahmagupta, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:10 compute-0 systemd[1]: libpod-conmon-c3f9d4144bc996532742374b5dac050caa8fab7d746bbb4f593bbd7c3b54ec28.scope: Deactivated successfully.
Oct 11 01:43:10 compute-0 sudo[205850]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 _orch deploy --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:43:10 compute-0 sudo[205850]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:10 compute-0 sudo[205566]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bdev(0x560258f26c00 /var/lib/ceph/osd/ceph-0/block) open path /var/lib/ceph/osd/ceph-0/block
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bdev(0x560258f26c00 /var/lib/ceph/osd/ceph-0/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-0/block failed: (22) Invalid argument
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bdev(0x560258f26c00 /var/lib/ceph/osd/ceph-0/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _set_cache_sizes cache_size 1073741824 meta 0.45 kv 0.45 kv_onode 0.04 data 0.06
Oct 11 01:43:10 compute-0 ceph-osd[205667]: bdev(0x560258f26c00 /var/lib/ceph/osd/ceph-0/block) close
Oct 11 01:43:11 compute-0 ceph-osd[205667]: mClockScheduler: set_osd_capacity_params_from_config: osd_bandwidth_cost_per_io: 499321.90 bytes/io, osd_bandwidth_capacity_per_shard 157286400.00 bytes/second
Oct 11 01:43:11 compute-0 ceph-osd[205667]: osd.0:0.OSDShard using op scheduler mclock_scheduler, cutoff=196
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bdev(0x560258f26c00 /var/lib/ceph/osd/ceph-0/block) open path /var/lib/ceph/osd/ceph-0/block
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bdev(0x560258f26c00 /var/lib/ceph/osd/ceph-0/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-0/block failed: (22) Invalid argument
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bdev(0x560258f26c00 /var/lib/ceph/osd/ceph-0/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _set_cache_sizes cache_size 1073741824 meta 0.45 kv 0.45 kv_onode 0.04 data 0.06
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bdev(0x560258f27400 /var/lib/ceph/osd/ceph-0/block) open path /var/lib/ceph/osd/ceph-0/block
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bdev(0x560258f27400 /var/lib/ceph/osd/ceph-0/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-0/block failed: (22) Invalid argument
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bdev(0x560258f27400 /var/lib/ceph/osd/ceph-0/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluefs add_block_device bdev 1 path /var/lib/ceph/osd/ceph-0/block size 20 GiB
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluefs mount
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluefs _init_alloc shared, id 1, capacity 0x4ffc00000, block size 0x10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluefs mount shared_bdev_used = 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _prepare_db_environment set db_paths to db,20397110067 db.slow,20397110067
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: RocksDB version: 7.9.2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Git sha 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Compile date 2025-05-06 23:30:25
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: DB SUMMARY
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: DB Session ID:  CUASSC5MKCX3EGTCJBY0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: CURRENT file:  CURRENT
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: IDENTITY file:  IDENTITY
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: MANIFEST file:  MANIFEST-000032 size: 1007 Bytes
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: SST files in db dir, Total Num: 1, files: 000030.sst 
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: SST files in db.slow dir, Total Num: 0, files: 
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Write Ahead Log file in db.wal: 000031.log size: 5093 ; 
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                         Options.error_if_exists: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.create_if_missing: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                         Options.paranoid_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.flush_verify_memtable_count: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.track_and_verify_wals_in_manifest: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.verify_sst_unique_id_in_manifest: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                                     Options.env: 0x560258ee5d50
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                                      Options.fs: LegacyFileSystem
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                                Options.info_log: 0x5602580dcb60
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_file_opening_threads: 16
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                              Options.statistics: (nil)
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.use_fsync: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.max_log_file_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.max_manifest_file_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.log_file_time_to_roll: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.keep_log_file_num: 1000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.recycle_log_file_num: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                         Options.allow_fallocate: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.allow_mmap_reads: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.allow_mmap_writes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.use_direct_reads: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.use_direct_io_for_flush_and_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.create_missing_column_families: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                              Options.db_log_dir: 
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                                 Options.wal_dir: db.wal
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.table_cache_numshardbits: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                         Options.WAL_ttl_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.WAL_size_limit_MB: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.max_write_batch_group_size_bytes: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.manifest_preallocation_size: 4194304
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                     Options.is_fd_close_on_exec: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.advise_random_on_open: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.db_write_buffer_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.write_buffer_manager: 0x560258ff2460
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.access_hint_on_compaction_start: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.random_access_max_buffer_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                      Options.use_adaptive_mutex: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                            Options.rate_limiter: (nil)
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.sst_file_manager.rate_bytes_per_sec: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.wal_recovery_mode: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.enable_thread_tracking: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.enable_pipelined_write: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.unordered_write: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.allow_concurrent_memtable_write: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.enable_write_thread_adaptive_yield: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.write_thread_max_yield_usec: 100
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.write_thread_slow_yield_usec: 3
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.row_cache: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                              Options.wal_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.avoid_flush_during_recovery: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.allow_ingest_behind: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.two_write_queues: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.manual_wal_flush: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.wal_compression: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.atomic_flush: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.avoid_unnecessary_blocking_io: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.persist_stats_to_disk: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.write_dbid_to_manifest: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.log_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.file_checksum_gen_factory: Unknown
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.best_efforts_recovery: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bgerror_resume_count: 2147483647
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bgerror_resume_retry_interval: 1000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.allow_data_in_errors: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.db_host_id: __hostname__
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.enforce_single_del_contracts: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.max_background_jobs: 4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.max_background_compactions: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.max_subcompactions: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.avoid_flush_during_shutdown: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.writable_file_max_buffer_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.delayed_write_rate : 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.max_total_wal_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.delete_obsolete_files_period_micros: 21600000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.stats_dump_period_sec: 600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.stats_persist_period_sec: 600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.stats_history_buffer_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.max_open_files: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.bytes_per_sync: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                      Options.wal_bytes_per_sync: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.strict_bytes_per_sync: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.compaction_readahead_size: 2097152
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.max_background_flushes: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Compression algorithms supported:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         kZSTD supported: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         kXpressCompression supported: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         kBZip2Compression supported: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         kZSTDNotFinalCompression supported: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         kLZ4Compression supported: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         kZlibCompression supported: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         kLZ4HCCompression supported: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         kSnappyCompression supported: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Fast CRC32 supported: Supported on x86
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: DMutex implementation: pthread_mutex_t
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl_readonly.cc:25] Opening the db in read only mode
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: db/MANIFEST-000032
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: .T:int64_array.b:bitwise_xor
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dd1c0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [m-0]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dd1c0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [m-1]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dd1c0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [m-2]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dd1c0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [p-0]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dd1c0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [p-1]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dd1c0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [p-2]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dd1c0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [O-0]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dd1a0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4430
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [O-1]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dd1a0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4430
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [O-2]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dd1a0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4430
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:635]         (skipping printing options)
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:635]         (skipping printing options)
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:db/MANIFEST-000032 succeeded,manifest_file_number is 32, next_file_number is 34, last_sequence is 12, log_number is 5,prev_log_number is 0,max_column_family is 11,min_log_number_to_keep is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [m-0] (ID 1), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [m-1] (ID 2), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [m-2] (ID 3), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [p-0] (ID 4), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [p-1] (ID 5), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [p-2] (ID 6), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [O-0] (ID 7), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [O-1] (ID 8), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [O-2] (ID 9), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [L] (ID 10), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [P] (ID 11), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: ce008203-df68-4ddb-b66d-20dc977714e1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146991130910, "job": 1, "event": "recovery_started", "wal_files": [31]}
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #31 mode 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146991131571, "job": 1, "event": "recovery_finished"}
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _open_db opened rocksdb path db options compression=kLZ4Compression,max_write_buffer_number=64,min_write_buffer_number_to_merge=6,compaction_style=kCompactionStyleLevel,write_buffer_size=16777216,max_background_jobs=4,level0_file_num_compaction_trigger=8,max_bytes_for_level_base=1073741824,max_bytes_for_level_multiplier=8,compaction_readahead_size=2MB,max_total_wal_size=1073741824,writable_file_max_buffer_size=0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _open_super_meta old nid_max 1025
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _open_super_meta old blobid_max 10240
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _open_super_meta ondisk_format 4 compat_ondisk_format 3
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _open_super_meta min_alloc_size 0x1000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: freelist init
Oct 11 01:43:11 compute-0 ceph-osd[205667]: freelist _read_cfg
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _init_alloc loaded 20 GiB in 2 extents, allocator type hybrid, capacity 0x4ffc00000, block size 0x1000, free 0x4ffbfd000, fragmentation 1.9e-07
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:496] Shutdown: canceling all background work
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:704] Shutdown complete
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluefs umount
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bdev(0x560258f27400 /var/lib/ceph/osd/ceph-0/block) close
Oct 11 01:43:11 compute-0 podman[206120]: 2025-10-11 01:43:11.245950228 +0000 UTC m=+0.074243901 container create 73997a7fc3855e514bd3785fbe065d1a456c0d1fbc64ff209c5d8f7cd3a5ac56 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=tender_ganguly, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=reef)
Oct 11 01:43:11 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:11 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:11 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch
Oct 11 01:43:11 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:43:11 compute-0 ceph-mon[191930]: Deploying daemon osd.1 on compute-0
Oct 11 01:43:11 compute-0 ceph-mon[191930]: pgmap v34: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:43:11 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3441155300' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
Oct 11 01:43:11 compute-0 podman[206120]: 2025-10-11 01:43:11.219851506 +0000 UTC m=+0.048145179 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:11 compute-0 systemd[1]: Started libpod-conmon-73997a7fc3855e514bd3785fbe065d1a456c0d1fbc64ff209c5d8f7cd3a5ac56.scope.
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bdev(0x560258f27400 /var/lib/ceph/osd/ceph-0/block) open path /var/lib/ceph/osd/ceph-0/block
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bdev(0x560258f27400 /var/lib/ceph/osd/ceph-0/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-0/block failed: (22) Invalid argument
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bdev(0x560258f27400 /var/lib/ceph/osd/ceph-0/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluefs add_block_device bdev 1 path /var/lib/ceph/osd/ceph-0/block size 20 GiB
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluefs mount
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluefs _init_alloc shared, id 1, capacity 0x4ffc00000, block size 0x10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluefs mount shared_bdev_used = 4718592
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _prepare_db_environment set db_paths to db,20397110067 db.slow,20397110067
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: RocksDB version: 7.9.2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Git sha 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Compile date 2025-05-06 23:30:25
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: DB SUMMARY
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: DB Session ID:  CUASSC5MKCX3EGTCJBY1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: CURRENT file:  CURRENT
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: IDENTITY file:  IDENTITY
Oct 11 01:43:11 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: MANIFEST file:  MANIFEST-000032 size: 1007 Bytes
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: SST files in db dir, Total Num: 1, files: 000030.sst 
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: SST files in db.slow dir, Total Num: 0, files: 
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Write Ahead Log file in db.wal: 000031.log size: 5093 ; 
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                         Options.error_if_exists: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.create_if_missing: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                         Options.paranoid_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.flush_verify_memtable_count: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.track_and_verify_wals_in_manifest: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.verify_sst_unique_id_in_manifest: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                                     Options.env: 0x5602590a63f0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                                      Options.fs: LegacyFileSystem
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                                Options.info_log: 0x5602580dc8a0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_file_opening_threads: 16
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                              Options.statistics: (nil)
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.use_fsync: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.max_log_file_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.max_manifest_file_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.log_file_time_to_roll: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.keep_log_file_num: 1000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.recycle_log_file_num: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                         Options.allow_fallocate: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.allow_mmap_reads: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.allow_mmap_writes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.use_direct_reads: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.use_direct_io_for_flush_and_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.create_missing_column_families: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                              Options.db_log_dir: 
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                                 Options.wal_dir: db.wal
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.table_cache_numshardbits: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                         Options.WAL_ttl_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.WAL_size_limit_MB: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.max_write_batch_group_size_bytes: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.manifest_preallocation_size: 4194304
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                     Options.is_fd_close_on_exec: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.advise_random_on_open: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.db_write_buffer_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.write_buffer_manager: 0x560258ff2460
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.access_hint_on_compaction_start: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.random_access_max_buffer_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                      Options.use_adaptive_mutex: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                            Options.rate_limiter: (nil)
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.sst_file_manager.rate_bytes_per_sec: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.wal_recovery_mode: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.enable_thread_tracking: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.enable_pipelined_write: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.unordered_write: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.allow_concurrent_memtable_write: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.enable_write_thread_adaptive_yield: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.write_thread_max_yield_usec: 100
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.write_thread_slow_yield_usec: 3
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.row_cache: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                              Options.wal_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.avoid_flush_during_recovery: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.allow_ingest_behind: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.two_write_queues: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.manual_wal_flush: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.wal_compression: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.atomic_flush: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.avoid_unnecessary_blocking_io: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.persist_stats_to_disk: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.write_dbid_to_manifest: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.log_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.file_checksum_gen_factory: Unknown
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.best_efforts_recovery: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bgerror_resume_count: 2147483647
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bgerror_resume_retry_interval: 1000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.allow_data_in_errors: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.db_host_id: __hostname__
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.enforce_single_del_contracts: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.max_background_jobs: 4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.max_background_compactions: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.max_subcompactions: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.avoid_flush_during_shutdown: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.writable_file_max_buffer_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.delayed_write_rate : 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.max_total_wal_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.delete_obsolete_files_period_micros: 21600000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.stats_dump_period_sec: 600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.stats_persist_period_sec: 600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.stats_history_buffer_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.max_open_files: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.bytes_per_sync: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                      Options.wal_bytes_per_sync: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.strict_bytes_per_sync: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.compaction_readahead_size: 2097152
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.max_background_flushes: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Compression algorithms supported:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         kZSTD supported: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         kXpressCompression supported: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         kBZip2Compression supported: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         kZSTDNotFinalCompression supported: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         kLZ4Compression supported: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         kZlibCompression supported: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         kLZ4HCCompression supported: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         kSnappyCompression supported: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Fast CRC32 supported: Supported on x86
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: DMutex implementation: pthread_mutex_t
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: db/MANIFEST-000032
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: .T:int64_array.b:bitwise_xor
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dccc0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [m-0]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dccc0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [m-1]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dccc0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [m-2]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dccc0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [p-0]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dccc0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 podman[206120]: 2025-10-11 01:43:11.386873584 +0000 UTC m=+0.215167307 container init 73997a7fc3855e514bd3785fbe065d1a456c0d1fbc64ff209c5d8f7cd3a5ac56 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=tender_ganguly, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [p-1]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dccc0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [p-2]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dccc0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [O-0]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dd2e0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4430
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [O-1]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dd2e0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4430
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [O-2]:
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5602580dd2e0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5602580c4430
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:635]         (skipping printing options)
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/column_family.cc:635]         (skipping printing options)
Oct 11 01:43:11 compute-0 podman[206120]: 2025-10-11 01:43:11.400981042 +0000 UTC m=+0.229274685 container start 73997a7fc3855e514bd3785fbe065d1a456c0d1fbc64ff209c5d8f7cd3a5ac56 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=tender_ganguly, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 01:43:11 compute-0 podman[206120]: 2025-10-11 01:43:11.405843474 +0000 UTC m=+0.234137197 container attach 73997a7fc3855e514bd3785fbe065d1a456c0d1fbc64ff209c5d8f7cd3a5ac56 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=tender_ganguly, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:db/MANIFEST-000032 succeeded,manifest_file_number is 32, next_file_number is 34, last_sequence is 12, log_number is 5,prev_log_number is 0,max_column_family is 11,min_log_number_to_keep is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [m-0] (ID 1), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [m-1] (ID 2), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [m-2] (ID 3), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [p-0] (ID 4), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [p-1] (ID 5), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [p-2] (ID 6), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [O-0] (ID 7), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [O-1] (ID 8), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [O-2] (ID 9), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [L] (ID 10), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5581] Column family [P] (ID 11), log number is 5
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: ce008203-df68-4ddb-b66d-20dc977714e1
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146991407487, "job": 1, "event": "recovery_started", "wal_files": [31]}
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #31 mode 2
Oct 11 01:43:11 compute-0 tender_ganguly[206136]: 167 167
Oct 11 01:43:11 compute-0 systemd[1]: libpod-73997a7fc3855e514bd3785fbe065d1a456c0d1fbc64ff209c5d8f7cd3a5ac56.scope: Deactivated successfully.
Oct 11 01:43:11 compute-0 conmon[206136]: conmon 73997a7fc3855e514bd3 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-73997a7fc3855e514bd3785fbe065d1a456c0d1fbc64ff209c5d8f7cd3a5ac56.scope/container/memory.events
Oct 11 01:43:11 compute-0 podman[206120]: 2025-10-11 01:43:11.413782263 +0000 UTC m=+0.242075906 container died 73997a7fc3855e514bd3785fbe065d1a456c0d1fbc64ff209c5d8f7cd3a5ac56 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=tender_ganguly, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146991418888, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 35, "file_size": 1272, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 13, "largest_seqno": 21, "table_properties": {"data_size": 128, "index_size": 27, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 87, "raw_average_key_size": 17, "raw_value_size": 82, "raw_average_value_size": 16, "num_data_blocks": 1, "num_entries": 5, "num_filter_entries": 5, "num_deletions": 0, "num_merge_operands": 2, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": ".T:int64_array.b:bitwise_xor", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "LZ4", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146991, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "ce008203-df68-4ddb-b66d-20dc977714e1", "db_session_id": "CUASSC5MKCX3EGTCJBY1", "orig_file_number": 35, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146991424919, "cf_name": "p-0", "job": 1, "event": "table_file_creation", "file_number": 36, "file_size": 1594, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 14, "largest_seqno": 15, "table_properties": {"data_size": 468, "index_size": 39, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 72, "raw_average_key_size": 36, "raw_value_size": 567, "raw_average_value_size": 283, "num_data_blocks": 1, "num_entries": 2, "num_filter_entries": 2, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "p-0", "column_family_id": 4, "comparator": "leveldb.BytewiseComparator", "merge_operator": "nullptr", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "LZ4", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146991, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "ce008203-df68-4ddb-b66d-20dc977714e1", "db_session_id": "CUASSC5MKCX3EGTCJBY1", "orig_file_number": 36, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146991429949, "cf_name": "O-2", "job": 1, "event": "table_file_creation", "file_number": 37, "file_size": 1275, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 16, "largest_seqno": 16, "table_properties": {"data_size": 121, "index_size": 64, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 55, "raw_average_key_size": 55, "raw_value_size": 50, "raw_average_value_size": 50, "num_data_blocks": 1, "num_entries": 1, "num_filter_entries": 1, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "O-2", "column_family_id": 9, "comparator": "leveldb.BytewiseComparator", "merge_operator": "nullptr", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "LZ4", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146991, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "ce008203-df68-4ddb-b66d-20dc977714e1", "db_session_id": "CUASSC5MKCX3EGTCJBY1", "orig_file_number": 37, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146991432577, "job": 1, "event": "recovery_finished"}
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/version_set.cc:5047] Creating manifest 40
Oct 11 01:43:11 compute-0 systemd[1]: var-lib-containers-storage-overlay-fe21f962efff7d26f28a7655d2f0e00cfc0e733b7c423dc774b8e0e9c7bbd69d-merged.mount: Deactivated successfully.
Oct 11 01:43:11 compute-0 podman[206120]: 2025-10-11 01:43:11.465436458 +0000 UTC m=+0.293730101 container remove 73997a7fc3855e514bd3785fbe065d1a456c0d1fbc64ff209c5d8f7cd3a5ac56 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=tender_ganguly, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x5602590b2000
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: DB pointer 0x5602580f9a00
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _open_db opened rocksdb path db options compression=kLZ4Compression,max_write_buffer_number=64,min_write_buffer_number_to_merge=6,compaction_style=kCompactionStyleLevel,write_buffer_size=16777216,max_background_jobs=4,level0_file_num_compaction_trigger=8,max_bytes_for_level_base=1073741824,max_bytes_for_level_multiplier=8,compaction_readahead_size=2MB,max_total_wal_size=1073741824,writable_file_max_buffer_size=0
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _upgrade_super from 4, latest 4
Oct 11 01:43:11 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _upgrade_super done
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 01:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s
                                            Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s
                                            Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
                                            
                                            ** Compaction Stats [default] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      2/0    2.63 KB   0.2      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                             Sum      2/0    2.63 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [default] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 460.80 MB usage: 1.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 5.6e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [default] **
                                            
                                            ** Compaction Stats [m-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 460.80 MB usage: 1.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 5.6e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-0] **
                                            
                                            ** Compaction Stats [m-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 460.80 MB usage: 1.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 5.6e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-1] **
                                            
                                            ** Compaction Stats [m-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 460.80 MB usage: 1.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 5.6e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-2] **
                                            
                                            ** Compaction Stats [p-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      1/0    1.56 KB   0.1      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.01              0.00         1    0.006       0      0       0.0       0.0
                                             Sum      1/0    1.56 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.01              0.00         1    0.006       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.01              0.00         1    0.006       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.3      0.01              0.00         1    0.006       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 460.80 MB usage: 1.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 5.6e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-0] **
                                            
                                            ** Compaction Stats [p-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 460.80 MB usage: 1.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 5.6e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-1] **
                                            
                                            ** Compaction Stats [p-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 460.80 MB usage: 1.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 5.6e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-2] **
                                            
                                            ** Compaction Stats [O-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4430#2 capacity: 512.00 MB usage: 0.25 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 2 last_secs: 1.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): FilterBlock(1,0.11 KB,2.08616e-05%) IndexBlock(1,0.14 KB,2.68221e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-0] **
                                            
                                            ** Compaction Stats [O-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4430#2 capacity: 512.00 MB usage: 0.25 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 2 last_secs: 1.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): FilterBlock(1,0.11 KB,2.08616e-05%) IndexBlock(1,0.14 KB,2.68221e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-1] **
                                            
                                            ** Compaction Stats [O-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      1/0    1.25 KB   0.1      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.00              0.00         1    0.005       0      0       0.0       0.0
                                             Sum      1/0    1.25 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.00              0.00         1    0.005       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.00              0.00         1    0.005       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.2      0.00              0.00         1    0.005       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4430#2 capacity: 512.00 MB usage: 0.25 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 2 last_secs: 1.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): FilterBlock(1,0.11 KB,2.08616e-05%) IndexBlock(1,0.14 KB,2.68221e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-2] **
                                            
                                            ** Compaction Stats [L] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [L] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 460.80 MB usage: 1.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 5.6e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [L] **
                                            
                                            ** Compaction Stats [P] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [P] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 460.80 MB usage: 1.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 5.6e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [P] **
Oct 11 01:43:11 compute-0 ceph-osd[205667]: <cls> /home/jenkins-build/build/workspace/ceph-build/ARCH/x86_64/AVAILABLE_ARCH/x86_64/AVAILABLE_DIST/centos9/DIST/centos9/MACHINE_SIZE/gigantic/release/18.2.7/rpm/el9/BUILD/ceph-18.2.7/src/cls/cephfs/cls_cephfs.cc:201: loading cephfs
Oct 11 01:43:11 compute-0 ceph-osd[205667]: <cls> /home/jenkins-build/build/workspace/ceph-build/ARCH/x86_64/AVAILABLE_ARCH/x86_64/AVAILABLE_DIST/centos9/DIST/centos9/MACHINE_SIZE/gigantic/release/18.2.7/rpm/el9/BUILD/ceph-18.2.7/src/cls/hello/cls_hello.cc:316: loading cls_hello
Oct 11 01:43:11 compute-0 ceph-osd[205667]: _get_class not permitted to load lua
Oct 11 01:43:11 compute-0 ceph-osd[205667]: _get_class not permitted to load sdk
Oct 11 01:43:11 compute-0 ceph-osd[205667]: _get_class not permitted to load test_remote_reads
Oct 11 01:43:11 compute-0 ceph-osd[205667]: osd.0 0 crush map has features 288232575208783872, adjusting msgr requires for clients
Oct 11 01:43:11 compute-0 ceph-osd[205667]: osd.0 0 crush map has features 288232575208783872 was 8705, adjusting msgr requires for mons
Oct 11 01:43:11 compute-0 ceph-osd[205667]: osd.0 0 crush map has features 288232575208783872, adjusting msgr requires for osds
Oct 11 01:43:11 compute-0 ceph-osd[205667]: osd.0 0 check_osdmap_features enabling on-disk ERASURE CODES compat feature
Oct 11 01:43:11 compute-0 ceph-osd[205667]: osd.0 0 load_pgs
Oct 11 01:43:11 compute-0 ceph-osd[205667]: osd.0 0 load_pgs opened 0 pgs
Oct 11 01:43:11 compute-0 ceph-osd[205667]: osd.0 0 log_to_monitors true
Oct 11 01:43:11 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0[205663]: 2025-10-11T01:43:11.478+0000 7f0e4fb89740 -1 osd.0 0 log_to_monitors true
Oct 11 01:43:11 compute-0 systemd[1]: libpod-conmon-73997a7fc3855e514bd3785fbe065d1a456c0d1fbc64ff209c5d8f7cd3a5ac56.scope: Deactivated successfully.
Oct 11 01:43:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]} v 0) v1
Oct 11 01:43:11 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='osd.0 [v2:192.168.122.100:6802/3838022694,v1:192.168.122.100:6803/3838022694]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch
Oct 11 01:43:11 compute-0 podman[206381]: 2025-10-11 01:43:11.854967623 +0000 UTC m=+0.095275968 container create 4f318223e54a7af75cda154acc07d7aa14b2c0039e95140fb5e89a27b1507f8d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate-test, org.label-schema.build-date=20250507, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:43:11 compute-0 podman[206381]: 2025-10-11 01:43:11.818439264 +0000 UTC m=+0.058747659 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:11 compute-0 systemd[1]: Started libpod-conmon-4f318223e54a7af75cda154acc07d7aa14b2c0039e95140fb5e89a27b1507f8d.scope.
Oct 11 01:43:11 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/592b6d0b53dcc7247877aabe17b7a9b448b8e132b56366210788ab05132e4c16/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/592b6d0b53dcc7247877aabe17b7a9b448b8e132b56366210788ab05132e4c16/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/592b6d0b53dcc7247877aabe17b7a9b448b8e132b56366210788ab05132e4c16/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/592b6d0b53dcc7247877aabe17b7a9b448b8e132b56366210788ab05132e4c16/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/592b6d0b53dcc7247877aabe17b7a9b448b8e132b56366210788ab05132e4c16/merged/var/lib/ceph/osd/ceph-1 supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:12 compute-0 podman[206381]: 2025-10-11 01:43:12.035193504 +0000 UTC m=+0.275501849 container init 4f318223e54a7af75cda154acc07d7aa14b2c0039e95140fb5e89a27b1507f8d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate-test, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 01:43:12 compute-0 podman[206381]: 2025-10-11 01:43:12.064456587 +0000 UTC m=+0.304764912 container start 4f318223e54a7af75cda154acc07d7aa14b2c0039e95140fb5e89a27b1507f8d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate-test, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, ceph=True)
Oct 11 01:43:12 compute-0 podman[206381]: 2025-10-11 01:43:12.070505932 +0000 UTC m=+0.310814347 container attach 4f318223e54a7af75cda154acc07d7aa14b2c0039e95140fb5e89a27b1507f8d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate-test, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e6 do_prune osdmap full prune enabled
Oct 11 01:43:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e6 encode_pending skipping prime_pg_temp; mapping job did not start
Oct 11 01:43:12 compute-0 ceph-mon[191930]: from='osd.0 [v2:192.168.122.100:6802/3838022694,v1:192.168.122.100:6803/3838022694]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch
Oct 11 01:43:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='osd.0 [v2:192.168.122.100:6802/3838022694,v1:192.168.122.100:6803/3838022694]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished
Oct 11 01:43:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e7 e7: 3 total, 0 up, 3 in
Oct 11 01:43:12 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e7: 3 total, 0 up, 3 in
Oct 11 01:43:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=compute-0", "root=default"]} v 0) v1
Oct 11 01:43:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='osd.0 [v2:192.168.122.100:6802/3838022694,v1:192.168.122.100:6803/3838022694]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=compute-0", "root=default"]}]: dispatch
Oct 11 01:43:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e7 create-or-move crush item name 'osd.0' initial_weight 0.0195 at location {host=compute-0,root=default}
Oct 11 01:43:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 0} v 0) v1
Oct 11 01:43:12 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:43:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 1} v 0) v1
Oct 11 01:43:12 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:12 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.0: (2) No such file or directory
Oct 11 01:43:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 2} v 0) v1
Oct 11 01:43:12 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:12 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.1: (2) No such file or directory
Oct 11 01:43:12 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.2: (2) No such file or directory
Oct 11 01:43:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v36: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:43:12 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : purged_snaps scrub starts
Oct 11 01:43:12 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : purged_snaps scrub ok
Oct 11 01:43:12 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate-test[206396]: usage: ceph-volume activate [-h] [--osd-id OSD_ID] [--osd-uuid OSD_UUID]
Oct 11 01:43:12 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate-test[206396]:                             [--no-systemd] [--no-tmpfs]
Oct 11 01:43:12 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate-test[206396]: ceph-volume activate: error: unrecognized arguments: --bad-option
Oct 11 01:43:12 compute-0 systemd[1]: libpod-4f318223e54a7af75cda154acc07d7aa14b2c0039e95140fb5e89a27b1507f8d.scope: Deactivated successfully.
Oct 11 01:43:12 compute-0 podman[206381]: 2025-10-11 01:43:12.739375181 +0000 UTC m=+0.979683556 container died 4f318223e54a7af75cda154acc07d7aa14b2c0039e95140fb5e89a27b1507f8d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate-test, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:43:12 compute-0 systemd[1]: var-lib-containers-storage-overlay-592b6d0b53dcc7247877aabe17b7a9b448b8e132b56366210788ab05132e4c16-merged.mount: Deactivated successfully.
Oct 11 01:43:12 compute-0 podman[206381]: 2025-10-11 01:43:12.849431539 +0000 UTC m=+1.089739864 container remove 4f318223e54a7af75cda154acc07d7aa14b2c0039e95140fb5e89a27b1507f8d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate-test, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:43:12 compute-0 systemd[1]: libpod-conmon-4f318223e54a7af75cda154acc07d7aa14b2c0039e95140fb5e89a27b1507f8d.scope: Deactivated successfully.
Oct 11 01:43:13 compute-0 systemd[1]: Reloading.
Oct 11 01:43:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e7 do_prune osdmap full prune enabled
Oct 11 01:43:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e7 encode_pending skipping prime_pg_temp; mapping job did not start
Oct 11 01:43:13 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='osd.0 [v2:192.168.122.100:6802/3838022694,v1:192.168.122.100:6803/3838022694]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=compute-0", "root=default"]}]': finished
Oct 11 01:43:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e8 e8: 3 total, 0 up, 3 in
Oct 11 01:43:13 compute-0 ceph-osd[205667]: osd.0 0 done with init, starting boot process
Oct 11 01:43:13 compute-0 ceph-osd[205667]: osd.0 0 start_boot
Oct 11 01:43:13 compute-0 ceph-osd[205667]: osd.0 0 maybe_override_options_for_qos osd_max_backfills set to 1
Oct 11 01:43:13 compute-0 ceph-osd[205667]: osd.0 0 maybe_override_options_for_qos osd_recovery_max_active set to 0
Oct 11 01:43:13 compute-0 ceph-osd[205667]: osd.0 0 maybe_override_options_for_qos osd_recovery_max_active_hdd set to 3
Oct 11 01:43:13 compute-0 ceph-osd[205667]: osd.0 0 maybe_override_options_for_qos osd_recovery_max_active_ssd set to 10
Oct 11 01:43:13 compute-0 ceph-osd[205667]: osd.0 0  bench count 12288000 bsize 4 KiB
Oct 11 01:43:13 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e8: 3 total, 0 up, 3 in
Oct 11 01:43:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 0} v 0) v1
Oct 11 01:43:13 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:43:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 1} v 0) v1
Oct 11 01:43:13 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 2} v 0) v1
Oct 11 01:43:13 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:13 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.0: (2) No such file or directory
Oct 11 01:43:13 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.1: (2) No such file or directory
Oct 11 01:43:13 compute-0 ceph-mon[191930]: from='osd.0 [v2:192.168.122.100:6802/3838022694,v1:192.168.122.100:6803/3838022694]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished
Oct 11 01:43:13 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.2: (2) No such file or directory
Oct 11 01:43:13 compute-0 ceph-mon[191930]: osdmap e7: 3 total, 0 up, 3 in
Oct 11 01:43:13 compute-0 ceph-mon[191930]: from='osd.0 [v2:192.168.122.100:6802/3838022694,v1:192.168.122.100:6803/3838022694]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=compute-0", "root=default"]}]: dispatch
Oct 11 01:43:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:43:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:13 compute-0 ceph-mon[191930]: pgmap v36: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:43:13 compute-0 ceph-mgr[192233]: mgr.server handle_open ignoring open from osd.0 v2:192.168.122.100:6802/3838022694; not ready for session (expect reconnect)
Oct 11 01:43:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 0} v 0) v1
Oct 11 01:43:13 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:43:13 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.0: (2) No such file or directory
Oct 11 01:43:13 compute-0 systemd-rc-local-generator[206459]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:43:13 compute-0 systemd-sysv-generator[206463]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:43:13 compute-0 systemd[1]: Reloading.
Oct 11 01:43:14 compute-0 systemd-rc-local-generator[206499]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:43:14 compute-0 systemd-sysv-generator[206504]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:43:14 compute-0 ceph-mgr[192233]: mgr.server handle_open ignoring open from osd.0 v2:192.168.122.100:6802/3838022694; not ready for session (expect reconnect)
Oct 11 01:43:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 0} v 0) v1
Oct 11 01:43:14 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:43:14 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.0: (2) No such file or directory
Oct 11 01:43:14 compute-0 ceph-mon[191930]: from='osd.0 [v2:192.168.122.100:6802/3838022694,v1:192.168.122.100:6803/3838022694]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=compute-0", "root=default"]}]': finished
Oct 11 01:43:14 compute-0 ceph-mon[191930]: osdmap e8: 3 total, 0 up, 3 in
Oct 11 01:43:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:43:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:43:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v38: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:43:14 compute-0 systemd[1]: Starting Ceph osd.1 for 3c7617c3-7a20-523e-a9de-20c0d6ba41da...
Oct 11 01:43:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e8 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:43:14 compute-0 podman[206554]: 2025-10-11 01:43:14.967290654 +0000 UTC m=+0.076126304 container create 2379fd66376e59f719ecbd4c84e8947e2a6edd06d5671fbd8f8ef8d20bd2f42d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507)
Oct 11 01:43:15 compute-0 podman[206554]: 2025-10-11 01:43:14.934022493 +0000 UTC m=+0.042858163 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:15 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d927cb02917f3f960f0ef09d280465144ee375abdee3e7bc6d49e70d5d0de818/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d927cb02917f3f960f0ef09d280465144ee375abdee3e7bc6d49e70d5d0de818/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d927cb02917f3f960f0ef09d280465144ee375abdee3e7bc6d49e70d5d0de818/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d927cb02917f3f960f0ef09d280465144ee375abdee3e7bc6d49e70d5d0de818/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d927cb02917f3f960f0ef09d280465144ee375abdee3e7bc6d49e70d5d0de818/merged/var/lib/ceph/osd/ceph-1 supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:15 compute-0 podman[206554]: 2025-10-11 01:43:15.153896747 +0000 UTC m=+0.262732457 container init 2379fd66376e59f719ecbd4c84e8947e2a6edd06d5671fbd8f8ef8d20bd2f42d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:43:15 compute-0 podman[206554]: 2025-10-11 01:43:15.181318005 +0000 UTC m=+0.290153655 container start 2379fd66376e59f719ecbd4c84e8947e2a6edd06d5671fbd8f8ef8d20bd2f42d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:43:15 compute-0 podman[206554]: 2025-10-11 01:43:15.203496616 +0000 UTC m=+0.312332266 container attach 2379fd66376e59f719ecbd4c84e8947e2a6edd06d5671fbd8f8ef8d20bd2f42d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, ceph=True)
Oct 11 01:43:15 compute-0 podman[206566]: 2025-10-11 01:43:15.241575932 +0000 UTC m=+0.197910986 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, io.buildah.version=1.41.4, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=edpm, org.label-schema.license=GPLv2)
Oct 11 01:43:15 compute-0 ceph-mgr[192233]: mgr.server handle_open ignoring open from osd.0 v2:192.168.122.100:6802/3838022694; not ready for session (expect reconnect)
Oct 11 01:43:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 0} v 0) v1
Oct 11 01:43:15 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:43:15 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.0: (2) No such file or directory
Oct 11 01:43:15 compute-0 ceph-mon[191930]: purged_snaps scrub starts
Oct 11 01:43:15 compute-0 ceph-mon[191930]: purged_snaps scrub ok
Oct 11 01:43:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:43:15 compute-0 ceph-mon[191930]: pgmap v38: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:43:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:43:16 compute-0 ceph-mgr[192233]: mgr.server handle_open ignoring open from osd.0 v2:192.168.122.100:6802/3838022694; not ready for session (expect reconnect)
Oct 11 01:43:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 0} v 0) v1
Oct 11 01:43:16 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:43:16 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.0: (2) No such file or directory
Oct 11 01:43:16 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:43:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v39: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:43:16 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate[206574]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1
Oct 11 01:43:16 compute-0 bash[206554]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1
Oct 11 01:43:16 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate[206574]: Running command: /usr/bin/ceph-bluestore-tool prime-osd-dir --path /var/lib/ceph/osd/ceph-1 --no-mon-config --dev /dev/mapper/ceph_vg1-ceph_lv1
Oct 11 01:43:16 compute-0 bash[206554]: Running command: /usr/bin/ceph-bluestore-tool prime-osd-dir --path /var/lib/ceph/osd/ceph-1 --no-mon-config --dev /dev/mapper/ceph_vg1-ceph_lv1
Oct 11 01:43:16 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate[206574]: Running command: /usr/bin/chown -h ceph:ceph /dev/mapper/ceph_vg1-ceph_lv1
Oct 11 01:43:16 compute-0 bash[206554]: Running command: /usr/bin/chown -h ceph:ceph /dev/mapper/ceph_vg1-ceph_lv1
Oct 11 01:43:16 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate[206574]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1
Oct 11 01:43:16 compute-0 bash[206554]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1
Oct 11 01:43:16 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate[206574]: Running command: /usr/bin/ln -s /dev/mapper/ceph_vg1-ceph_lv1 /var/lib/ceph/osd/ceph-1/block
Oct 11 01:43:16 compute-0 bash[206554]: Running command: /usr/bin/ln -s /dev/mapper/ceph_vg1-ceph_lv1 /var/lib/ceph/osd/ceph-1/block
Oct 11 01:43:16 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate[206574]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1
Oct 11 01:43:16 compute-0 bash[206554]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1
Oct 11 01:43:16 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate[206574]: --> ceph-volume raw activate successful for osd ID: 1
Oct 11 01:43:16 compute-0 bash[206554]: --> ceph-volume raw activate successful for osd ID: 1
Oct 11 01:43:16 compute-0 systemd[1]: libpod-2379fd66376e59f719ecbd4c84e8947e2a6edd06d5671fbd8f8ef8d20bd2f42d.scope: Deactivated successfully.
Oct 11 01:43:16 compute-0 podman[206554]: 2025-10-11 01:43:16.517191997 +0000 UTC m=+1.626027627 container died 2379fd66376e59f719ecbd4c84e8947e2a6edd06d5671fbd8f8ef8d20bd2f42d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, io.buildah.version=1.39.3)
Oct 11 01:43:16 compute-0 systemd[1]: libpod-2379fd66376e59f719ecbd4c84e8947e2a6edd06d5671fbd8f8ef8d20bd2f42d.scope: Consumed 1.335s CPU time.
Oct 11 01:43:16 compute-0 systemd[1]: var-lib-containers-storage-overlay-d927cb02917f3f960f0ef09d280465144ee375abdee3e7bc6d49e70d5d0de818-merged.mount: Deactivated successfully.
Oct 11 01:43:16 compute-0 podman[206554]: 2025-10-11 01:43:16.629316031 +0000 UTC m=+1.738151651 container remove 2379fd66376e59f719ecbd4c84e8947e2a6edd06d5671fbd8f8ef8d20bd2f42d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1-activate, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507)
Oct 11 01:43:16 compute-0 ceph-osd[205667]: osd.0 0 maybe_override_max_osd_capacity_for_qos osd bench result - bandwidth (MiB/sec): 25.650 iops: 6566.425 elapsed_sec: 0.457
Oct 11 01:43:16 compute-0 ceph-osd[205667]: log_channel(cluster) log [WRN] : OSD bench result of 6566.425152 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.0. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd].
Oct 11 01:43:16 compute-0 ceph-osd[205667]: osd.0 0 waiting for initial osdmap
Oct 11 01:43:16 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0[205663]: 2025-10-11T01:43:16.666+0000 7f0e4bb09640 -1 osd.0 0 waiting for initial osdmap
Oct 11 01:43:16 compute-0 ceph-osd[205667]: osd.0 8 crush map has features 288514050185494528, adjusting msgr requires for clients
Oct 11 01:43:16 compute-0 ceph-osd[205667]: osd.0 8 crush map has features 288514050185494528 was 288232575208792577, adjusting msgr requires for mons
Oct 11 01:43:16 compute-0 ceph-osd[205667]: osd.0 8 crush map has features 3314932999778484224, adjusting msgr requires for osds
Oct 11 01:43:16 compute-0 ceph-osd[205667]: osd.0 8 check_osdmap_features require_osd_release unknown -> reef
Oct 11 01:43:16 compute-0 ceph-osd[205667]: osd.0 8 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory
Oct 11 01:43:16 compute-0 ceph-osd[205667]: osd.0 8 set_numa_affinity not setting numa affinity
Oct 11 01:43:16 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-0[205663]: 2025-10-11T01:43:16.698+0000 7f0e47131640 -1 osd.0 8 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory
Oct 11 01:43:16 compute-0 ceph-osd[205667]: osd.0 8 _collect_metadata loop3:  no unique device id for loop3: fallback method has no model nor serial
Oct 11 01:43:17 compute-0 podman[206781]: 2025-10-11 01:43:17.043318024 +0000 UTC m=+0.087365968 container create 19dc149c8af7a96e8735429c581ad09d670406154110871e41ac68b52d95103f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0)
Oct 11 01:43:17 compute-0 podman[206781]: 2025-10-11 01:43:17.00568336 +0000 UTC m=+0.049731354 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b2ea19fdd80100d7d1c4a1dd3093f75778469a5a7c7c4700c5e5fb2cc0753aac/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b2ea19fdd80100d7d1c4a1dd3093f75778469a5a7c7c4700c5e5fb2cc0753aac/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b2ea19fdd80100d7d1c4a1dd3093f75778469a5a7c7c4700c5e5fb2cc0753aac/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b2ea19fdd80100d7d1c4a1dd3093f75778469a5a7c7c4700c5e5fb2cc0753aac/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b2ea19fdd80100d7d1c4a1dd3093f75778469a5a7c7c4700c5e5fb2cc0753aac/merged/var/lib/ceph/osd/ceph-1 supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:17 compute-0 podman[206781]: 2025-10-11 01:43:17.185671712 +0000 UTC m=+0.229719686 container init 19dc149c8af7a96e8735429c581ad09d670406154110871e41ac68b52d95103f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2)
Oct 11 01:43:17 compute-0 podman[206781]: 2025-10-11 01:43:17.212384499 +0000 UTC m=+0.256432443 container start 19dc149c8af7a96e8735429c581ad09d670406154110871e41ac68b52d95103f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:43:17 compute-0 bash[206781]: 19dc149c8af7a96e8735429c581ad09d670406154110871e41ac68b52d95103f
Oct 11 01:43:17 compute-0 systemd[1]: Started Ceph osd.1 for 3c7617c3-7a20-523e-a9de-20c0d6ba41da.
Oct 11 01:43:17 compute-0 ceph-osd[206800]: set uid:gid to 167:167 (ceph:ceph)
Oct 11 01:43:17 compute-0 ceph-osd[206800]: ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable), process ceph-osd, pid 2
Oct 11 01:43:17 compute-0 ceph-osd[206800]: pidfile_write: ignore empty --pid-file
Oct 11 01:43:17 compute-0 ceph-osd[206800]: bdev(0x559c9dda3800 /var/lib/ceph/osd/ceph-1/block) open path /var/lib/ceph/osd/ceph-1/block
Oct 11 01:43:17 compute-0 ceph-osd[206800]: bdev(0x559c9dda3800 /var/lib/ceph/osd/ceph-1/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-1/block failed: (22) Invalid argument
Oct 11 01:43:17 compute-0 ceph-osd[206800]: bdev(0x559c9dda3800 /var/lib/ceph/osd/ceph-1/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:17 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _set_cache_sizes cache_size 1073741824 meta 0.45 kv 0.45 kv_onode 0.04 data 0.06
Oct 11 01:43:17 compute-0 ceph-osd[206800]: bdev(0x559c9ebe5800 /var/lib/ceph/osd/ceph-1/block) open path /var/lib/ceph/osd/ceph-1/block
Oct 11 01:43:17 compute-0 ceph-osd[206800]: bdev(0x559c9ebe5800 /var/lib/ceph/osd/ceph-1/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-1/block failed: (22) Invalid argument
Oct 11 01:43:17 compute-0 ceph-osd[206800]: bdev(0x559c9ebe5800 /var/lib/ceph/osd/ceph-1/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:17 compute-0 ceph-osd[206800]: bluefs add_block_device bdev 1 path /var/lib/ceph/osd/ceph-1/block size 20 GiB
Oct 11 01:43:17 compute-0 ceph-osd[206800]: bdev(0x559c9ebe5800 /var/lib/ceph/osd/ceph-1/block) close
Oct 11 01:43:17 compute-0 sudo[205850]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:43:17 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:17 compute-0 ceph-mgr[192233]: mgr.server handle_open ignoring open from osd.0 v2:192.168.122.100:6802/3838022694; not ready for session (expect reconnect)
Oct 11 01:43:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:43:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 0} v 0) v1
Oct 11 01:43:17 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:43:17 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.0: (2) No such file or directory
Oct 11 01:43:17 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "osd.2"} v 0) v1
Oct 11 01:43:17 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch
Oct 11 01:43:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:43:17 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:43:17 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.serve] Deploying daemon osd.2 on compute-0
Oct 11 01:43:17 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Deploying daemon osd.2 on compute-0
Oct 11 01:43:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e8 do_prune osdmap full prune enabled
Oct 11 01:43:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e8 encode_pending skipping prime_pg_temp; mapping job did not start
Oct 11 01:43:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e9 e9: 3 total, 1 up, 3 in
Oct 11 01:43:17 compute-0 ceph-mon[191930]: pgmap v39: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail
Oct 11 01:43:17 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:17 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:43:17 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:17 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch
Oct 11 01:43:17 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:43:17 compute-0 ceph-osd[205667]: osd.0 9 state: booting -> active
Oct 11 01:43:17 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : osd.0 [v2:192.168.122.100:6802/3838022694,v1:192.168.122.100:6803/3838022694] boot
Oct 11 01:43:17 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e9: 3 total, 1 up, 3 in
Oct 11 01:43:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 0} v 0) v1
Oct 11 01:43:17 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:43:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 1} v 0) v1
Oct 11 01:43:17 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 2} v 0) v1
Oct 11 01:43:17 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:17 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.1: (2) No such file or directory
Oct 11 01:43:17 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.2: (2) No such file or directory
Oct 11 01:43:17 compute-0 sudo[206813]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:17 compute-0 sudo[206813]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:17 compute-0 sudo[206813]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:17 compute-0 ceph-osd[206800]: bdev(0x559c9dda3800 /var/lib/ceph/osd/ceph-1/block) close
Oct 11 01:43:17 compute-0 sudo[206838]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:43:17 compute-0 sudo[206838]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:17 compute-0 sudo[206838]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:17 compute-0 sudo[206864]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:17 compute-0 sudo[206864]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:17 compute-0 sudo[206864]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:17 compute-0 ceph-osd[206800]: starting osd.1 osd_data /var/lib/ceph/osd/ceph-1 /var/lib/ceph/osd/ceph-1/journal
Oct 11 01:43:17 compute-0 ceph-osd[206800]: load: jerasure load: lrc 
Oct 11 01:43:17 compute-0 ceph-osd[206800]: bdev(0x559c9ec78c00 /var/lib/ceph/osd/ceph-1/block) open path /var/lib/ceph/osd/ceph-1/block
Oct 11 01:43:17 compute-0 ceph-osd[206800]: bdev(0x559c9ec78c00 /var/lib/ceph/osd/ceph-1/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-1/block failed: (22) Invalid argument
Oct 11 01:43:17 compute-0 ceph-osd[206800]: bdev(0x559c9ec78c00 /var/lib/ceph/osd/ceph-1/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:17 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _set_cache_sizes cache_size 1073741824 meta 0.45 kv 0.45 kv_onode 0.04 data 0.06
Oct 11 01:43:17 compute-0 ceph-osd[206800]: bdev(0x559c9ec78c00 /var/lib/ceph/osd/ceph-1/block) close
Oct 11 01:43:17 compute-0 sudo[206889]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 _orch deploy --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:43:17 compute-0 sudo[206889]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bdev(0x559c9ec78c00 /var/lib/ceph/osd/ceph-1/block) open path /var/lib/ceph/osd/ceph-1/block
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bdev(0x559c9ec78c00 /var/lib/ceph/osd/ceph-1/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-1/block failed: (22) Invalid argument
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bdev(0x559c9ec78c00 /var/lib/ceph/osd/ceph-1/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _set_cache_sizes cache_size 1073741824 meta 0.45 kv 0.45 kv_onode 0.04 data 0.06
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bdev(0x559c9ec78c00 /var/lib/ceph/osd/ceph-1/block) close
Oct 11 01:43:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v41: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail
Oct 11 01:43:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e9 do_prune osdmap full prune enabled
Oct 11 01:43:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e9 encode_pending skipping prime_pg_temp; mapping job did not start
Oct 11 01:43:18 compute-0 ceph-osd[206800]: mClockScheduler: set_osd_capacity_params_from_config: osd_bandwidth_cost_per_io: 499321.90 bytes/io, osd_bandwidth_capacity_per_shard 157286400.00 bytes/second
Oct 11 01:43:18 compute-0 ceph-osd[206800]: osd.1:0.OSDShard using op scheduler mclock_scheduler, cutoff=196
Oct 11 01:43:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e10 e10: 3 total, 1 up, 3 in
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bdev(0x559c9ec78c00 /var/lib/ceph/osd/ceph-1/block) open path /var/lib/ceph/osd/ceph-1/block
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bdev(0x559c9ec78c00 /var/lib/ceph/osd/ceph-1/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-1/block failed: (22) Invalid argument
Oct 11 01:43:18 compute-0 ceph-mon[191930]: OSD bench result of 6566.425152 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.0. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd].
Oct 11 01:43:18 compute-0 ceph-mon[191930]: Deploying daemon osd.2 on compute-0
Oct 11 01:43:18 compute-0 ceph-mon[191930]: osd.0 [v2:192.168.122.100:6802/3838022694,v1:192.168.122.100:6803/3838022694] boot
Oct 11 01:43:18 compute-0 ceph-mon[191930]: osdmap e9: 3 total, 1 up, 3 in
Oct 11 01:43:18 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch
Oct 11 01:43:18 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:18 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bdev(0x559c9ec78c00 /var/lib/ceph/osd/ceph-1/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _set_cache_sizes cache_size 1073741824 meta 0.45 kv 0.45 kv_onode 0.04 data 0.06
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bdev(0x559c9ec79400 /var/lib/ceph/osd/ceph-1/block) open path /var/lib/ceph/osd/ceph-1/block
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bdev(0x559c9ec79400 /var/lib/ceph/osd/ceph-1/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-1/block failed: (22) Invalid argument
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bdev(0x559c9ec79400 /var/lib/ceph/osd/ceph-1/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluefs add_block_device bdev 1 path /var/lib/ceph/osd/ceph-1/block size 20 GiB
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluefs mount
Oct 11 01:43:18 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e10: 3 total, 1 up, 3 in
Oct 11 01:43:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 1} v 0) v1
Oct 11 01:43:18 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 2} v 0) v1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluefs _init_alloc shared, id 1, capacity 0x4ffc00000, block size 0x10000
Oct 11 01:43:18 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:18 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.1: (2) No such file or directory
Oct 11 01:43:18 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.2: (2) No such file or directory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluefs mount shared_bdev_used = 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _prepare_db_environment set db_paths to db,20397110067 db.slow,20397110067
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: RocksDB version: 7.9.2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Git sha 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Compile date 2025-05-06 23:30:25
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: DB SUMMARY
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: DB Session ID:  36KUXF3S98H0WIMUURP2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: CURRENT file:  CURRENT
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: IDENTITY file:  IDENTITY
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: MANIFEST file:  MANIFEST-000032 size: 1007 Bytes
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: SST files in db dir, Total Num: 1, files: 000030.sst 
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: SST files in db.slow dir, Total Num: 0, files: 
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Write Ahead Log file in db.wal: 000031.log size: 5093 ; 
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                         Options.error_if_exists: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.create_if_missing: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                         Options.paranoid_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.flush_verify_memtable_count: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.track_and_verify_wals_in_manifest: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.verify_sst_unique_id_in_manifest: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                                     Options.env: 0x559c9ec37d50
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                                      Options.fs: LegacyFileSystem
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                                Options.info_log: 0x559c9de2eb60
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_file_opening_threads: 16
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                              Options.statistics: (nil)
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.use_fsync: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.max_log_file_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.max_manifest_file_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.log_file_time_to_roll: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.keep_log_file_num: 1000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.recycle_log_file_num: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                         Options.allow_fallocate: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.allow_mmap_reads: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.allow_mmap_writes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.use_direct_reads: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.use_direct_io_for_flush_and_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.create_missing_column_families: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                              Options.db_log_dir: 
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                                 Options.wal_dir: db.wal
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.table_cache_numshardbits: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                         Options.WAL_ttl_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.WAL_size_limit_MB: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.max_write_batch_group_size_bytes: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.manifest_preallocation_size: 4194304
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                     Options.is_fd_close_on_exec: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.advise_random_on_open: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.db_write_buffer_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.write_buffer_manager: 0x559c9de62460
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.access_hint_on_compaction_start: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.random_access_max_buffer_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                      Options.use_adaptive_mutex: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                            Options.rate_limiter: (nil)
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.sst_file_manager.rate_bytes_per_sec: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.wal_recovery_mode: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.enable_thread_tracking: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.enable_pipelined_write: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.unordered_write: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.allow_concurrent_memtable_write: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.enable_write_thread_adaptive_yield: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.write_thread_max_yield_usec: 100
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.write_thread_slow_yield_usec: 3
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.row_cache: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                              Options.wal_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.avoid_flush_during_recovery: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.allow_ingest_behind: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.two_write_queues: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.manual_wal_flush: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.wal_compression: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.atomic_flush: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.avoid_unnecessary_blocking_io: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.persist_stats_to_disk: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.write_dbid_to_manifest: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.log_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.file_checksum_gen_factory: Unknown
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.best_efforts_recovery: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bgerror_resume_count: 2147483647
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bgerror_resume_retry_interval: 1000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.allow_data_in_errors: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.db_host_id: __hostname__
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.enforce_single_del_contracts: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.max_background_jobs: 4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.max_background_compactions: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.max_subcompactions: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.avoid_flush_during_shutdown: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.writable_file_max_buffer_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.delayed_write_rate : 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.max_total_wal_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.delete_obsolete_files_period_micros: 21600000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.stats_dump_period_sec: 600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.stats_persist_period_sec: 600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.stats_history_buffer_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.max_open_files: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.bytes_per_sync: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                      Options.wal_bytes_per_sync: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.strict_bytes_per_sync: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.compaction_readahead_size: 2097152
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.max_background_flushes: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Compression algorithms supported:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         kZSTD supported: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         kXpressCompression supported: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         kBZip2Compression supported: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         kZSTDNotFinalCompression supported: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         kLZ4Compression supported: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         kZlibCompression supported: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         kLZ4HCCompression supported: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         kSnappyCompression supported: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Fast CRC32 supported: Supported on x86
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: DMutex implementation: pthread_mutex_t
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl_readonly.cc:25] Opening the db in read only mode
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: db/MANIFEST-000032
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: .T:int64_array.b:bitwise_xor
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2f1c0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [m-0]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2f1c0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [m-1]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2f1c0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [m-2]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2f1c0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [p-0]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2f1c0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [p-1]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2f1c0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 podman[206963]: 2025-10-11 01:43:18.46101309 +0000 UTC m=+0.103000705 container create e22adf46bb8783fad970459c7eefc2da28873996891d4f5fd7992b88c9d7afae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_jackson, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [p-2]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2f1c0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [O-0]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2f1a0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16430
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [O-1]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2f1a0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16430
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [O-2]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2f1a0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16430
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:635]         (skipping printing options)
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:635]         (skipping printing options)
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:db/MANIFEST-000032 succeeded,manifest_file_number is 32, next_file_number is 34, last_sequence is 12, log_number is 5,prev_log_number is 0,max_column_family is 11,min_log_number_to_keep is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [m-0] (ID 1), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [m-1] (ID 2), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [m-2] (ID 3), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [p-0] (ID 4), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [p-1] (ID 5), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [p-2] (ID 6), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [O-0] (ID 7), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [O-1] (ID 8), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [O-2] (ID 9), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [L] (ID 10), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [P] (ID 11), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 6df79aca-8154-4f0a-9cf5-03a59d6b714f
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146998490597, "job": 1, "event": "recovery_started", "wal_files": [31]}
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #31 mode 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146998491085, "job": 1, "event": "recovery_finished"}
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _open_db opened rocksdb path db options compression=kLZ4Compression,max_write_buffer_number=64,min_write_buffer_number_to_merge=6,compaction_style=kCompactionStyleLevel,write_buffer_size=16777216,max_background_jobs=4,level0_file_num_compaction_trigger=8,max_bytes_for_level_base=1073741824,max_bytes_for_level_multiplier=8,compaction_readahead_size=2MB,max_total_wal_size=1073741824,writable_file_max_buffer_size=0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _open_super_meta old nid_max 1025
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _open_super_meta old blobid_max 10240
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _open_super_meta ondisk_format 4 compat_ondisk_format 3
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _open_super_meta min_alloc_size 0x1000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: freelist init
Oct 11 01:43:18 compute-0 ceph-osd[206800]: freelist _read_cfg
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _init_alloc loaded 20 GiB in 2 extents, allocator type hybrid, capacity 0x4ffc00000, block size 0x1000, free 0x4ffbfd000, fragmentation 1.9e-07
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:496] Shutdown: canceling all background work
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:704] Shutdown complete
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluefs umount
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bdev(0x559c9ec79400 /var/lib/ceph/osd/ceph-1/block) close
Oct 11 01:43:18 compute-0 podman[206963]: 2025-10-11 01:43:18.426906243 +0000 UTC m=+0.068893928 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:18 compute-0 ceph-mgr[192233]: [devicehealth INFO root] creating mgr pool
Oct 11 01:43:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true} v 0) v1
Oct 11 01:43:18 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]: dispatch
Oct 11 01:43:18 compute-0 systemd[1]: Started libpod-conmon-e22adf46bb8783fad970459c7eefc2da28873996891d4f5fd7992b88c9d7afae.scope.
Oct 11 01:43:18 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:18 compute-0 podman[206963]: 2025-10-11 01:43:18.617876649 +0000 UTC m=+0.259864294 container init e22adf46bb8783fad970459c7eefc2da28873996891d4f5fd7992b88c9d7afae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_jackson, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 01:43:18 compute-0 podman[206963]: 2025-10-11 01:43:18.637167317 +0000 UTC m=+0.279154962 container start e22adf46bb8783fad970459c7eefc2da28873996891d4f5fd7992b88c9d7afae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_jackson, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:18 compute-0 podman[206963]: 2025-10-11 01:43:18.644408663 +0000 UTC m=+0.286396298 container attach e22adf46bb8783fad970459c7eefc2da28873996891d4f5fd7992b88c9d7afae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_jackson, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:43:18 compute-0 amazing_jackson[207174]: 167 167
Oct 11 01:43:18 compute-0 systemd[1]: libpod-e22adf46bb8783fad970459c7eefc2da28873996891d4f5fd7992b88c9d7afae.scope: Deactivated successfully.
Oct 11 01:43:18 compute-0 podman[206963]: 2025-10-11 01:43:18.655452795 +0000 UTC m=+0.297440410 container died e22adf46bb8783fad970459c7eefc2da28873996891d4f5fd7992b88c9d7afae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_jackson, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default)
Oct 11 01:43:18 compute-0 systemd[1]: var-lib-containers-storage-overlay-e20921449845ba079e7182c99af9606ae53319a49c72ee8f3ae968895bbba8c0-merged.mount: Deactivated successfully.
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bdev(0x559c9ec79400 /var/lib/ceph/osd/ceph-1/block) open path /var/lib/ceph/osd/ceph-1/block
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bdev(0x559c9ec79400 /var/lib/ceph/osd/ceph-1/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-1/block failed: (22) Invalid argument
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bdev(0x559c9ec79400 /var/lib/ceph/osd/ceph-1/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluefs add_block_device bdev 1 path /var/lib/ceph/osd/ceph-1/block size 20 GiB
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluefs mount
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluefs _init_alloc shared, id 1, capacity 0x4ffc00000, block size 0x10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluefs mount shared_bdev_used = 4718592
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _prepare_db_environment set db_paths to db,20397110067 db.slow,20397110067
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: RocksDB version: 7.9.2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Git sha 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Compile date 2025-05-06 23:30:25
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: DB SUMMARY
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: DB Session ID:  36KUXF3S98H0WIMUURP3
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: CURRENT file:  CURRENT
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: IDENTITY file:  IDENTITY
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: MANIFEST file:  MANIFEST-000032 size: 1007 Bytes
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: SST files in db dir, Total Num: 1, files: 000030.sst 
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: SST files in db.slow dir, Total Num: 0, files: 
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Write Ahead Log file in db.wal: 000031.log size: 5093 ; 
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                         Options.error_if_exists: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.create_if_missing: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                         Options.paranoid_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.flush_verify_memtable_count: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.track_and_verify_wals_in_manifest: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.verify_sst_unique_id_in_manifest: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                                     Options.env: 0x559c9edd4460
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                                      Options.fs: LegacyFileSystem
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                                Options.info_log: 0x559c9de2e8a0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_file_opening_threads: 16
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                              Options.statistics: (nil)
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.use_fsync: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.max_log_file_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.max_manifest_file_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.log_file_time_to_roll: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.keep_log_file_num: 1000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.recycle_log_file_num: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                         Options.allow_fallocate: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.allow_mmap_reads: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.allow_mmap_writes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.use_direct_reads: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.use_direct_io_for_flush_and_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.create_missing_column_families: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                              Options.db_log_dir: 
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                                 Options.wal_dir: db.wal
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.table_cache_numshardbits: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                         Options.WAL_ttl_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.WAL_size_limit_MB: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.max_write_batch_group_size_bytes: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.manifest_preallocation_size: 4194304
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                     Options.is_fd_close_on_exec: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.advise_random_on_open: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.db_write_buffer_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.write_buffer_manager: 0x559c9de62460
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.access_hint_on_compaction_start: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.random_access_max_buffer_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                      Options.use_adaptive_mutex: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                            Options.rate_limiter: (nil)
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.sst_file_manager.rate_bytes_per_sec: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.wal_recovery_mode: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.enable_thread_tracking: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.enable_pipelined_write: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.unordered_write: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.allow_concurrent_memtable_write: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.enable_write_thread_adaptive_yield: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.write_thread_max_yield_usec: 100
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.write_thread_slow_yield_usec: 3
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.row_cache: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                              Options.wal_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.avoid_flush_during_recovery: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.allow_ingest_behind: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.two_write_queues: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.manual_wal_flush: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.wal_compression: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.atomic_flush: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.avoid_unnecessary_blocking_io: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.persist_stats_to_disk: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.write_dbid_to_manifest: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.log_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.file_checksum_gen_factory: Unknown
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.best_efforts_recovery: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bgerror_resume_count: 2147483647
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bgerror_resume_retry_interval: 1000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.allow_data_in_errors: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.db_host_id: __hostname__
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.enforce_single_del_contracts: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.max_background_jobs: 4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.max_background_compactions: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.max_subcompactions: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.avoid_flush_during_shutdown: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.writable_file_max_buffer_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.delayed_write_rate : 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.max_total_wal_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.delete_obsolete_files_period_micros: 21600000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.stats_dump_period_sec: 600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.stats_persist_period_sec: 600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.stats_history_buffer_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.max_open_files: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.bytes_per_sync: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                      Options.wal_bytes_per_sync: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.strict_bytes_per_sync: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.compaction_readahead_size: 2097152
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.max_background_flushes: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Compression algorithms supported:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         kZSTD supported: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         kXpressCompression supported: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         kBZip2Compression supported: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         kZSTDNotFinalCompression supported: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         kLZ4Compression supported: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         kZlibCompression supported: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         kLZ4HCCompression supported: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         kSnappyCompression supported: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Fast CRC32 supported: Supported on x86
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: DMutex implementation: pthread_mutex_t
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: db/MANIFEST-000032
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: .T:int64_array.b:bitwise_xor
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2ecc0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [m-0]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2ecc0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [m-1]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2ecc0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [m-2]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2ecc0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 podman[206963]: 2025-10-11 01:43:18.72636738 +0000 UTC m=+0.368355015 container remove e22adf46bb8783fad970459c7eefc2da28873996891d4f5fd7992b88c9d7afae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_jackson, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [p-0]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2ecc0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [p-1]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2ecc0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [p-2]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2ecc0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [O-0]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2f2e0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16430
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [O-1]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2f2e0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16430
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 systemd[1]: libpod-conmon-e22adf46bb8783fad970459c7eefc2da28873996891d4f5fd7992b88c9d7afae.scope: Deactivated successfully.
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [O-2]:
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559c9de2f2e0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x559c9de16430
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:635]         (skipping printing options)
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/column_family.cc:635]         (skipping printing options)
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:db/MANIFEST-000032 succeeded,manifest_file_number is 32, next_file_number is 34, last_sequence is 12, log_number is 5,prev_log_number is 0,max_column_family is 11,min_log_number_to_keep is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [m-0] (ID 1), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [m-1] (ID 2), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [m-2] (ID 3), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [p-0] (ID 4), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [p-1] (ID 5), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [p-2] (ID 6), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [O-0] (ID 7), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [O-1] (ID 8), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [O-2] (ID 9), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [L] (ID 10), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5581] Column family [P] (ID 11), log number is 5
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 6df79aca-8154-4f0a-9cf5-03a59d6b714f
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146998780181, "job": 1, "event": "recovery_started", "wal_files": [31]}
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #31 mode 2
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146998785505, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 35, "file_size": 1272, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 13, "largest_seqno": 21, "table_properties": {"data_size": 128, "index_size": 27, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 87, "raw_average_key_size": 17, "raw_value_size": 82, "raw_average_value_size": 16, "num_data_blocks": 1, "num_entries": 5, "num_filter_entries": 5, "num_deletions": 0, "num_merge_operands": 2, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": ".T:int64_array.b:bitwise_xor", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "LZ4", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146998, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "6df79aca-8154-4f0a-9cf5-03a59d6b714f", "db_session_id": "36KUXF3S98H0WIMUURP3", "orig_file_number": 35, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146998790216, "cf_name": "p-0", "job": 1, "event": "table_file_creation", "file_number": 36, "file_size": 1593, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 14, "largest_seqno": 15, "table_properties": {"data_size": 467, "index_size": 39, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 72, "raw_average_key_size": 36, "raw_value_size": 567, "raw_average_value_size": 283, "num_data_blocks": 1, "num_entries": 2, "num_filter_entries": 2, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "p-0", "column_family_id": 4, "comparator": "leveldb.BytewiseComparator", "merge_operator": "nullptr", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "LZ4", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146998, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "6df79aca-8154-4f0a-9cf5-03a59d6b714f", "db_session_id": "36KUXF3S98H0WIMUURP3", "orig_file_number": 36, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146998794748, "cf_name": "O-2", "job": 1, "event": "table_file_creation", "file_number": 37, "file_size": 1275, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 16, "largest_seqno": 16, "table_properties": {"data_size": 121, "index_size": 64, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 55, "raw_average_key_size": 55, "raw_value_size": 50, "raw_average_value_size": 50, "num_data_blocks": 1, "num_entries": 1, "num_filter_entries": 1, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "O-2", "column_family_id": 9, "comparator": "leveldb.BytewiseComparator", "merge_operator": "nullptr", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "LZ4", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146998, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "6df79aca-8154-4f0a-9cf5-03a59d6b714f", "db_session_id": "36KUXF3S98H0WIMUURP3", "orig_file_number": 37, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760146998797067, "job": 1, "event": "recovery_finished"}
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/version_set.cc:5047] Creating manifest 40
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x559c9ee04000
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: DB pointer 0x559c9de4ba00
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _open_db opened rocksdb path db options compression=kLZ4Compression,max_write_buffer_number=64,min_write_buffer_number_to_merge=6,compaction_style=kCompactionStyleLevel,write_buffer_size=16777216,max_background_jobs=4,level0_file_num_compaction_trigger=8,max_bytes_for_level_base=1073741824,max_bytes_for_level_multiplier=8,compaction_readahead_size=2MB,max_total_wal_size=1073741824,writable_file_max_buffer_size=0
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _upgrade_super from 4, latest 4
Oct 11 01:43:18 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _upgrade_super done
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 01:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s
                                            Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s
                                            Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
                                            
                                            ** Compaction Stats [default] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      2/0    2.63 KB   0.2      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.01              0.00         1    0.005       0      0       0.0       0.0
                                             Sum      2/0    2.63 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.01              0.00         1    0.005       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.01              0.00         1    0.005       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [default] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.2      0.01              0.00         1    0.005       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 460.80 MB usage: 0.94 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 5.2e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [default] **
                                            
                                            ** Compaction Stats [m-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 460.80 MB usage: 0.94 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 5.2e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-0] **
                                            
                                            ** Compaction Stats [m-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 460.80 MB usage: 0.94 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 5.2e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-1] **
                                            
                                            ** Compaction Stats [m-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 460.80 MB usage: 0.94 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 5.2e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-2] **
                                            
                                            ** Compaction Stats [p-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      1/0    1.56 KB   0.1      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.00              0.00         1    0.005       0      0       0.0       0.0
                                             Sum      1/0    1.56 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.00              0.00         1    0.005       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.00              0.00         1    0.005       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.3      0.00              0.00         1    0.005       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 460.80 MB usage: 0.94 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 5.2e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-0] **
                                            
                                            ** Compaction Stats [p-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 460.80 MB usage: 0.94 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 5.2e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-1] **
                                            
                                            ** Compaction Stats [p-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 460.80 MB usage: 0.94 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 5.2e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-2] **
                                            
                                            ** Compaction Stats [O-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16430#2 capacity: 512.00 MB usage: 0.25 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 2 last_secs: 1.2e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): FilterBlock(1,0.11 KB,2.08616e-05%) IndexBlock(1,0.14 KB,2.68221e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-0] **
                                            
                                            ** Compaction Stats [O-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16430#2 capacity: 512.00 MB usage: 0.25 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 2 last_secs: 1.2e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): FilterBlock(1,0.11 KB,2.08616e-05%) IndexBlock(1,0.14 KB,2.68221e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-1] **
                                            
                                            ** Compaction Stats [O-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      1/0    1.25 KB   0.1      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.00              0.00         1    0.004       0      0       0.0       0.0
                                             Sum      1/0    1.25 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.00              0.00         1    0.004       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.00              0.00         1    0.004       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.3      0.00              0.00         1    0.004       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16430#2 capacity: 512.00 MB usage: 0.25 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 2 last_secs: 1.2e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): FilterBlock(1,0.11 KB,2.08616e-05%) IndexBlock(1,0.14 KB,2.68221e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-2] **
                                            
                                            ** Compaction Stats [L] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [L] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 460.80 MB usage: 0.94 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 5.2e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [L] **
                                            
                                            ** Compaction Stats [P] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [P] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.1 total, 0.1 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 460.80 MB usage: 0.94 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 5.2e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [P] **
Oct 11 01:43:18 compute-0 ceph-osd[206800]: <cls> /home/jenkins-build/build/workspace/ceph-build/ARCH/x86_64/AVAILABLE_ARCH/x86_64/AVAILABLE_DIST/centos9/DIST/centos9/MACHINE_SIZE/gigantic/release/18.2.7/rpm/el9/BUILD/ceph-18.2.7/src/cls/cephfs/cls_cephfs.cc:201: loading cephfs
Oct 11 01:43:18 compute-0 ceph-osd[206800]: <cls> /home/jenkins-build/build/workspace/ceph-build/ARCH/x86_64/AVAILABLE_ARCH/x86_64/AVAILABLE_DIST/centos9/DIST/centos9/MACHINE_SIZE/gigantic/release/18.2.7/rpm/el9/BUILD/ceph-18.2.7/src/cls/hello/cls_hello.cc:316: loading cls_hello
Oct 11 01:43:18 compute-0 ceph-osd[206800]: _get_class not permitted to load lua
Oct 11 01:43:18 compute-0 ceph-osd[206800]: _get_class not permitted to load sdk
Oct 11 01:43:18 compute-0 ceph-osd[206800]: _get_class not permitted to load test_remote_reads
Oct 11 01:43:18 compute-0 ceph-osd[206800]: osd.1 0 crush map has features 288232575208783872, adjusting msgr requires for clients
Oct 11 01:43:18 compute-0 ceph-osd[206800]: osd.1 0 crush map has features 288232575208783872 was 8705, adjusting msgr requires for mons
Oct 11 01:43:18 compute-0 ceph-osd[206800]: osd.1 0 crush map has features 288232575208783872, adjusting msgr requires for osds
Oct 11 01:43:18 compute-0 ceph-osd[206800]: osd.1 0 check_osdmap_features enabling on-disk ERASURE CODES compat feature
Oct 11 01:43:18 compute-0 ceph-osd[206800]: osd.1 0 load_pgs
Oct 11 01:43:18 compute-0 ceph-osd[206800]: osd.1 0 load_pgs opened 0 pgs
Oct 11 01:43:18 compute-0 ceph-osd[206800]: osd.1 0 log_to_monitors true
Oct 11 01:43:18 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1[206796]: 2025-10-11T01:43:18.861+0000 7f22834f0740 -1 osd.1 0 log_to_monitors true
Oct 11 01:43:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]} v 0) v1
Oct 11 01:43:18 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='osd.1 [v2:192.168.122.100:6806/280669855,v1:192.168.122.100:6807/280669855]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch
Oct 11 01:43:19 compute-0 podman[207421]: 2025-10-11 01:43:19.088303014 +0000 UTC m=+0.067335231 container create 5a93657aac01d21b84b6f8d1e5692d76a174e73afcd64cb588f935937707c28d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate-test, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.build-date=20250507, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:43:19 compute-0 podman[207421]: 2025-10-11 01:43:19.058105999 +0000 UTC m=+0.037138216 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:19 compute-0 systemd[1]: Started libpod-conmon-5a93657aac01d21b84b6f8d1e5692d76a174e73afcd64cb588f935937707c28d.scope.
Oct 11 01:43:19 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/57cd3877208d8f8e57593be58125e5f1e34b0fb98265ea19c4c5e7e9e7266558/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/57cd3877208d8f8e57593be58125e5f1e34b0fb98265ea19c4c5e7e9e7266558/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/57cd3877208d8f8e57593be58125e5f1e34b0fb98265ea19c4c5e7e9e7266558/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/57cd3877208d8f8e57593be58125e5f1e34b0fb98265ea19c4c5e7e9e7266558/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/57cd3877208d8f8e57593be58125e5f1e34b0fb98265ea19c4c5e7e9e7266558/merged/var/lib/ceph/osd/ceph-2 supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:19 compute-0 podman[207421]: 2025-10-11 01:43:19.256906317 +0000 UTC m=+0.235938564 container init 5a93657aac01d21b84b6f8d1e5692d76a174e73afcd64cb588f935937707c28d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate-test, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507)
Oct 11 01:43:19 compute-0 podman[207421]: 2025-10-11 01:43:19.288718206 +0000 UTC m=+0.267750403 container start 5a93657aac01d21b84b6f8d1e5692d76a174e73afcd64cb588f935937707c28d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate-test, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=reef)
Oct 11 01:43:19 compute-0 podman[207421]: 2025-10-11 01:43:19.293451985 +0000 UTC m=+0.272484252 container attach 5a93657aac01d21b84b6f8d1e5692d76a174e73afcd64cb588f935937707c28d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate-test, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:43:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e10 do_prune osdmap full prune enabled
Oct 11 01:43:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e10 encode_pending skipping prime_pg_temp; mapping job did not start
Oct 11 01:43:19 compute-0 ceph-mon[191930]: pgmap v41: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail
Oct 11 01:43:19 compute-0 ceph-mon[191930]: osdmap e10: 3 total, 1 up, 3 in
Oct 11 01:43:19 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:19 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:19 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]: dispatch
Oct 11 01:43:19 compute-0 ceph-mon[191930]: from='osd.1 [v2:192.168.122.100:6806/280669855,v1:192.168.122.100:6807/280669855]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch
Oct 11 01:43:19 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished
Oct 11 01:43:19 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='osd.1 [v2:192.168.122.100:6806/280669855,v1:192.168.122.100:6807/280669855]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished
Oct 11 01:43:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e11 e11: 3 total, 1 up, 3 in
Oct 11 01:43:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e11 crush map has features 3314933000852226048, adjusting msgr requires
Oct 11 01:43:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e11 crush map has features 288514051259236352, adjusting msgr requires
Oct 11 01:43:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e11 crush map has features 288514051259236352, adjusting msgr requires
Oct 11 01:43:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e11 crush map has features 288514051259236352, adjusting msgr requires
Oct 11 01:43:19 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e11: 3 total, 1 up, 3 in
Oct 11 01:43:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=compute-0", "root=default"]} v 0) v1
Oct 11 01:43:19 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='osd.1 [v2:192.168.122.100:6806/280669855,v1:192.168.122.100:6807/280669855]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=compute-0", "root=default"]}]: dispatch
Oct 11 01:43:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e11 create-or-move crush item name 'osd.1' initial_weight 0.0195 at location {host=compute-0,root=default}
Oct 11 01:43:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 1} v 0) v1
Oct 11 01:43:19 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 2} v 0) v1
Oct 11 01:43:19 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true} v 0) v1
Oct 11 01:43:19 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch
Oct 11 01:43:19 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.1: (2) No such file or directory
Oct 11 01:43:19 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.2: (2) No such file or directory
Oct 11 01:43:19 compute-0 ceph-osd[205667]: osd.0 11 crush map has features 288514051259236352, adjusting msgr requires for clients
Oct 11 01:43:19 compute-0 ceph-osd[205667]: osd.0 11 crush map has features 288514051259236352 was 288514050185503233, adjusting msgr requires for mons
Oct 11 01:43:19 compute-0 ceph-osd[205667]: osd.0 11 crush map has features 3314933000852226048, adjusting msgr requires for osds
Oct 11 01:43:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 11 pg[1.0( empty local-lis/les=0/0 n=0 ec=11/11 lis/c=0/0 les/c/f=0/0/0 sis=11) [0] r=0 lpr=11 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e11 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:43:19 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : purged_snaps scrub starts
Oct 11 01:43:19 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : purged_snaps scrub ok
Oct 11 01:43:19 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate-test[207436]: usage: ceph-volume activate [-h] [--osd-id OSD_ID] [--osd-uuid OSD_UUID]
Oct 11 01:43:19 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate-test[207436]:                             [--no-systemd] [--no-tmpfs]
Oct 11 01:43:19 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate-test[207436]: ceph-volume activate: error: unrecognized arguments: --bad-option
Oct 11 01:43:19 compute-0 systemd[1]: libpod-5a93657aac01d21b84b6f8d1e5692d76a174e73afcd64cb588f935937707c28d.scope: Deactivated successfully.
Oct 11 01:43:19 compute-0 podman[207421]: 2025-10-11 01:43:19.956341426 +0000 UTC m=+0.935373663 container died 5a93657aac01d21b84b6f8d1e5692d76a174e73afcd64cb588f935937707c28d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate-test, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:20 compute-0 systemd[1]: var-lib-containers-storage-overlay-57cd3877208d8f8e57593be58125e5f1e34b0fb98265ea19c4c5e7e9e7266558-merged.mount: Deactivated successfully.
Oct 11 01:43:20 compute-0 podman[207421]: 2025-10-11 01:43:20.071039135 +0000 UTC m=+1.050071362 container remove 5a93657aac01d21b84b6f8d1e5692d76a174e73afcd64cb588f935937707c28d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate-test, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default)
Oct 11 01:43:20 compute-0 systemd[1]: libpod-conmon-5a93657aac01d21b84b6f8d1e5692d76a174e73afcd64cb588f935937707c28d.scope: Deactivated successfully.
Oct 11 01:43:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v44: 1 pgs: 1 unknown; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail
Oct 11 01:43:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e11 do_prune osdmap full prune enabled
Oct 11 01:43:20 compute-0 ceph-mon[191930]: log_channel(cluster) log [WRN] : Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)
Oct 11 01:43:20 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='osd.1 [v2:192.168.122.100:6806/280669855,v1:192.168.122.100:6807/280669855]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=compute-0", "root=default"]}]': finished
Oct 11 01:43:20 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished
Oct 11 01:43:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e12 e12: 3 total, 1 up, 3 in
Oct 11 01:43:20 compute-0 ceph-osd[206800]: osd.1 0 done with init, starting boot process
Oct 11 01:43:20 compute-0 ceph-osd[206800]: osd.1 0 start_boot
Oct 11 01:43:20 compute-0 ceph-osd[206800]: osd.1 0 maybe_override_options_for_qos osd_max_backfills set to 1
Oct 11 01:43:20 compute-0 ceph-osd[206800]: osd.1 0 maybe_override_options_for_qos osd_recovery_max_active set to 0
Oct 11 01:43:20 compute-0 ceph-osd[206800]: osd.1 0 maybe_override_options_for_qos osd_recovery_max_active_hdd set to 3
Oct 11 01:43:20 compute-0 ceph-osd[206800]: osd.1 0 maybe_override_options_for_qos osd_recovery_max_active_ssd set to 10
Oct 11 01:43:20 compute-0 ceph-osd[206800]: osd.1 0  bench count 12288000 bsize 4 KiB
Oct 11 01:43:20 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e12: 3 total, 1 up, 3 in
Oct 11 01:43:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 1} v 0) v1
Oct 11 01:43:20 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 2} v 0) v1
Oct 11 01:43:20 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:20 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.1: (2) No such file or directory
Oct 11 01:43:20 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 12 pg[1.0( empty local-lis/les=0/0 n=0 ec=11/11 lis/c=0/0 les/c/f=0/0/0 sis=12) [] r=-1 lpr=12 pi=[11,12)/0 crt=0'0 mlcod 0'0 unknown mbc={}] start_peering_interval up [0] -> [], acting [0] -> [], acting_primary 0 -> -1, up_primary 0 -> -1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:43:20 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 12 pg[1.0( empty local-lis/les=0/0 n=0 ec=11/11 lis/c=0/0 les/c/f=0/0/0 sis=12) [] r=-1 lpr=12 pi=[11,12)/0 crt=0'0 mlcod 0'0 unknown NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:43:20 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.2: (2) No such file or directory
Oct 11 01:43:20 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished
Oct 11 01:43:20 compute-0 ceph-mon[191930]: from='osd.1 [v2:192.168.122.100:6806/280669855,v1:192.168.122.100:6807/280669855]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished
Oct 11 01:43:20 compute-0 ceph-mon[191930]: osdmap e11: 3 total, 1 up, 3 in
Oct 11 01:43:20 compute-0 ceph-mon[191930]: from='osd.1 [v2:192.168.122.100:6806/280669855,v1:192.168.122.100:6807/280669855]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=compute-0", "root=default"]}]: dispatch
Oct 11 01:43:20 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:20 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:20 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch
Oct 11 01:43:20 compute-0 ceph-mgr[192233]: mgr.server handle_open ignoring open from osd.1 v2:192.168.122.100:6806/280669855; not ready for session (expect reconnect)
Oct 11 01:43:20 compute-0 systemd[1]: Reloading.
Oct 11 01:43:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 1} v 0) v1
Oct 11 01:43:20 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:20 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.1: (2) No such file or directory
Oct 11 01:43:20 compute-0 systemd-sysv-generator[207501]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:43:20 compute-0 systemd-rc-local-generator[207498]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:43:21 compute-0 systemd[1]: Reloading.
Oct 11 01:43:21 compute-0 systemd-rc-local-generator[207538]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:43:21 compute-0 systemd-sysv-generator[207544]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:43:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 1} v 0) v1
Oct 11 01:43:21 compute-0 ceph-mgr[192233]: mgr.server handle_open ignoring open from osd.1 v2:192.168.122.100:6806/280669855; not ready for session (expect reconnect)
Oct 11 01:43:21 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:21 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.1: (2) No such file or directory
Oct 11 01:43:21 compute-0 ceph-mon[191930]: pgmap v44: 1 pgs: 1 unknown; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail
Oct 11 01:43:21 compute-0 ceph-mon[191930]: Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)
Oct 11 01:43:21 compute-0 ceph-mon[191930]: from='osd.1 [v2:192.168.122.100:6806/280669855,v1:192.168.122.100:6807/280669855]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=compute-0", "root=default"]}]': finished
Oct 11 01:43:21 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished
Oct 11 01:43:21 compute-0 ceph-mon[191930]: osdmap e12: 3 total, 1 up, 3 in
Oct 11 01:43:21 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:21 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:21 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:21 compute-0 systemd[1]: Starting Ceph osd.2 for 3c7617c3-7a20-523e-a9de-20c0d6ba41da...
Oct 11 01:43:22 compute-0 podman[207593]: 2025-10-11 01:43:22.053550748 +0000 UTC m=+0.101940867 container create 1135484bc373ebf475f3277d81fd1d904935f633754fff16c0ddff6967c06f96 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef)
Oct 11 01:43:22 compute-0 podman[207593]: 2025-10-11 01:43:22.012662466 +0000 UTC m=+0.061052635 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:22 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/607071d20509ed3bbb7c57de862148ca483d6f2d8ba942a44a2d5d09bdffb375/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/607071d20509ed3bbb7c57de862148ca483d6f2d8ba942a44a2d5d09bdffb375/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/607071d20509ed3bbb7c57de862148ca483d6f2d8ba942a44a2d5d09bdffb375/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/607071d20509ed3bbb7c57de862148ca483d6f2d8ba942a44a2d5d09bdffb375/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/607071d20509ed3bbb7c57de862148ca483d6f2d8ba942a44a2d5d09bdffb375/merged/var/lib/ceph/osd/ceph-2 supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:22 compute-0 podman[207593]: 2025-10-11 01:43:22.278070815 +0000 UTC m=+0.326460924 container init 1135484bc373ebf475f3277d81fd1d904935f633754fff16c0ddff6967c06f96 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, io.buildah.version=1.39.3)
Oct 11 01:43:22 compute-0 podman[207593]: 2025-10-11 01:43:22.288722403 +0000 UTC m=+0.337112502 container start 1135484bc373ebf475f3277d81fd1d904935f633754fff16c0ddff6967c06f96 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:22 compute-0 podman[207593]: 2025-10-11 01:43:22.302488844 +0000 UTC m=+0.350878953 container attach 1135484bc373ebf475f3277d81fd1d904935f633754fff16c0ddff6967c06f96 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS)
Oct 11 01:43:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v46: 1 pgs: 1 unknown; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail
Oct 11 01:43:22 compute-0 ceph-mgr[192233]: mgr.server handle_open ignoring open from osd.1 v2:192.168.122.100:6806/280669855; not ready for session (expect reconnect)
Oct 11 01:43:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 1} v 0) v1
Oct 11 01:43:22 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:22 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.1: (2) No such file or directory
Oct 11 01:43:22 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : Health check cleared: POOL_APP_NOT_ENABLED (was: 1 pool(s) do not have an application enabled)
Oct 11 01:43:22 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : Cluster is now healthy
Oct 11 01:43:22 compute-0 ceph-mon[191930]: purged_snaps scrub starts
Oct 11 01:43:22 compute-0 ceph-mon[191930]: purged_snaps scrub ok
Oct 11 01:43:22 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:22 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:23 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate[207607]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2
Oct 11 01:43:23 compute-0 bash[207593]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2
Oct 11 01:43:23 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate[207607]: Running command: /usr/bin/ceph-bluestore-tool prime-osd-dir --path /var/lib/ceph/osd/ceph-2 --no-mon-config --dev /dev/mapper/ceph_vg2-ceph_lv2
Oct 11 01:43:23 compute-0 bash[207593]: Running command: /usr/bin/ceph-bluestore-tool prime-osd-dir --path /var/lib/ceph/osd/ceph-2 --no-mon-config --dev /dev/mapper/ceph_vg2-ceph_lv2
Oct 11 01:43:23 compute-0 ceph-mgr[192233]: mgr.server handle_open ignoring open from osd.1 v2:192.168.122.100:6806/280669855; not ready for session (expect reconnect)
Oct 11 01:43:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 1} v 0) v1
Oct 11 01:43:23 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:23 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.1: (2) No such file or directory
Oct 11 01:43:23 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate[207607]: Running command: /usr/bin/chown -h ceph:ceph /dev/mapper/ceph_vg2-ceph_lv2
Oct 11 01:43:23 compute-0 bash[207593]: Running command: /usr/bin/chown -h ceph:ceph /dev/mapper/ceph_vg2-ceph_lv2
Oct 11 01:43:23 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate[207607]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Oct 11 01:43:23 compute-0 bash[207593]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Oct 11 01:43:23 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate[207607]: Running command: /usr/bin/ln -s /dev/mapper/ceph_vg2-ceph_lv2 /var/lib/ceph/osd/ceph-2/block
Oct 11 01:43:23 compute-0 bash[207593]: Running command: /usr/bin/ln -s /dev/mapper/ceph_vg2-ceph_lv2 /var/lib/ceph/osd/ceph-2/block
Oct 11 01:43:23 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate[207607]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2
Oct 11 01:43:23 compute-0 bash[207593]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2
Oct 11 01:43:23 compute-0 ceph-mon[191930]: pgmap v46: 1 pgs: 1 unknown; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail
Oct 11 01:43:23 compute-0 ceph-mon[191930]: Health check cleared: POOL_APP_NOT_ENABLED (was: 1 pool(s) do not have an application enabled)
Oct 11 01:43:23 compute-0 ceph-mon[191930]: Cluster is now healthy
Oct 11 01:43:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:23 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate[207607]: --> ceph-volume raw activate successful for osd ID: 2
Oct 11 01:43:23 compute-0 bash[207593]: --> ceph-volume raw activate successful for osd ID: 2
Oct 11 01:43:23 compute-0 systemd[1]: libpod-1135484bc373ebf475f3277d81fd1d904935f633754fff16c0ddff6967c06f96.scope: Deactivated successfully.
Oct 11 01:43:23 compute-0 systemd[1]: libpod-1135484bc373ebf475f3277d81fd1d904935f633754fff16c0ddff6967c06f96.scope: Consumed 1.286s CPU time.
Oct 11 01:43:23 compute-0 podman[207755]: 2025-10-11 01:43:23.6515499 +0000 UTC m=+0.061352603 container died 1135484bc373ebf475f3277d81fd1d904935f633754fff16c0ddff6967c06f96 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:43:23 compute-0 systemd[1]: var-lib-containers-storage-overlay-607071d20509ed3bbb7c57de862148ca483d6f2d8ba942a44a2d5d09bdffb375-merged.mount: Deactivated successfully.
Oct 11 01:43:23 compute-0 podman[207755]: 2025-10-11 01:43:23.784539743 +0000 UTC m=+0.194342436 container remove 1135484bc373ebf475f3277d81fd1d904935f633754fff16c0ddff6967c06f96 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2-activate, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:43:24 compute-0 podman[207812]: 2025-10-11 01:43:24.273308245 +0000 UTC m=+0.088136459 container create 828dce3fe0dd715278c91c0082e2e415d321a1549bb273e9ec9f40663f06b3c0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3)
Oct 11 01:43:24 compute-0 podman[207812]: 2025-10-11 01:43:24.236658877 +0000 UTC m=+0.051487161 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:24 compute-0 ceph-osd[206800]: osd.1 0 maybe_override_max_osd_capacity_for_qos osd bench result - bandwidth (MiB/sec): 18.684 iops: 4783.214 elapsed_sec: 0.627
Oct 11 01:43:24 compute-0 ceph-osd[206800]: log_channel(cluster) log [WRN] : OSD bench result of 4783.214026 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.1. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd].
Oct 11 01:43:24 compute-0 ceph-osd[206800]: osd.1 0 waiting for initial osdmap
Oct 11 01:43:24 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1[206796]: 2025-10-11T01:43:24.343+0000 7f227fc87640 -1 osd.1 0 waiting for initial osdmap
Oct 11 01:43:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/84ebed78306e7dbbde9a7fea63e87bf49c11cb978ece5f5048002f661c9ad248/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/84ebed78306e7dbbde9a7fea63e87bf49c11cb978ece5f5048002f661c9ad248/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/84ebed78306e7dbbde9a7fea63e87bf49c11cb978ece5f5048002f661c9ad248/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/84ebed78306e7dbbde9a7fea63e87bf49c11cb978ece5f5048002f661c9ad248/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/84ebed78306e7dbbde9a7fea63e87bf49c11cb978ece5f5048002f661c9ad248/merged/var/lib/ceph/osd/ceph-2 supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:24 compute-0 ceph-osd[206800]: osd.1 12 crush map has features 288514051259236352, adjusting msgr requires for clients
Oct 11 01:43:24 compute-0 ceph-osd[206800]: osd.1 12 crush map has features 288514051259236352 was 288232575208792577, adjusting msgr requires for mons
Oct 11 01:43:24 compute-0 ceph-osd[206800]: osd.1 12 crush map has features 3314933000852226048, adjusting msgr requires for osds
Oct 11 01:43:24 compute-0 ceph-osd[206800]: osd.1 12 check_osdmap_features require_osd_release unknown -> reef
Oct 11 01:43:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v47: 1 pgs: 1 unknown; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail
Oct 11 01:43:24 compute-0 podman[207812]: 2025-10-11 01:43:24.380837276 +0000 UTC m=+0.195665520 container init 828dce3fe0dd715278c91c0082e2e415d321a1549bb273e9ec9f40663f06b3c0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:43:24 compute-0 podman[207812]: 2025-10-11 01:43:24.400127912 +0000 UTC m=+0.214956116 container start 828dce3fe0dd715278c91c0082e2e415d321a1549bb273e9ec9f40663f06b3c0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 01:43:24 compute-0 bash[207812]: 828dce3fe0dd715278c91c0082e2e415d321a1549bb273e9ec9f40663f06b3c0
Oct 11 01:43:24 compute-0 ceph-osd[206800]: osd.1 12 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory
Oct 11 01:43:24 compute-0 ceph-osd[206800]: osd.1 12 set_numa_affinity not setting numa affinity
Oct 11 01:43:24 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-1[206796]: 2025-10-11T01:43:24.424+0000 7f227aa98640 -1 osd.1 12 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory
Oct 11 01:43:24 compute-0 systemd[1]: Started Ceph osd.2 for 3c7617c3-7a20-523e-a9de-20c0d6ba41da.
Oct 11 01:43:24 compute-0 ceph-osd[206800]: osd.1 12 _collect_metadata loop4:  no unique device id for loop4: fallback method has no model nor serial
Oct 11 01:43:24 compute-0 ceph-osd[207831]: set uid:gid to 167:167 (ceph:ceph)
Oct 11 01:43:24 compute-0 ceph-osd[207831]: ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable), process ceph-osd, pid 2
Oct 11 01:43:24 compute-0 ceph-osd[207831]: pidfile_write: ignore empty --pid-file
Oct 11 01:43:24 compute-0 ceph-mgr[192233]: mgr.server handle_open ignoring open from osd.1 v2:192.168.122.100:6806/280669855; not ready for session (expect reconnect)
Oct 11 01:43:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 1} v 0) v1
Oct 11 01:43:24 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:24 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.1: (2) No such file or directory
Oct 11 01:43:24 compute-0 ceph-osd[207831]: bdev(0x5626b17e3800 /var/lib/ceph/osd/ceph-2/block) open path /var/lib/ceph/osd/ceph-2/block
Oct 11 01:43:24 compute-0 ceph-osd[207831]: bdev(0x5626b17e3800 /var/lib/ceph/osd/ceph-2/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-2/block failed: (22) Invalid argument
Oct 11 01:43:24 compute-0 ceph-osd[207831]: bdev(0x5626b17e3800 /var/lib/ceph/osd/ceph-2/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:24 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _set_cache_sizes cache_size 1073741824 meta 0.45 kv 0.45 kv_onode 0.04 data 0.06
Oct 11 01:43:24 compute-0 ceph-osd[207831]: bdev(0x5626b2625800 /var/lib/ceph/osd/ceph-2/block) open path /var/lib/ceph/osd/ceph-2/block
Oct 11 01:43:24 compute-0 ceph-osd[207831]: bdev(0x5626b2625800 /var/lib/ceph/osd/ceph-2/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-2/block failed: (22) Invalid argument
Oct 11 01:43:24 compute-0 ceph-osd[207831]: bdev(0x5626b2625800 /var/lib/ceph/osd/ceph-2/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:24 compute-0 ceph-osd[207831]: bluefs add_block_device bdev 1 path /var/lib/ceph/osd/ceph-2/block size 20 GiB
Oct 11 01:43:24 compute-0 ceph-osd[207831]: bdev(0x5626b2625800 /var/lib/ceph/osd/ceph-2/block) close
Oct 11 01:43:24 compute-0 sudo[206889]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:43:24 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:43:24 compute-0 unix_chkpwd[207846]: password check failed for user (root)
Oct 11 01:43:24 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:24 compute-0 sshd-session[207768]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 01:43:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e12 do_prune osdmap full prune enabled
Oct 11 01:43:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e13 e13: 3 total, 2 up, 3 in
Oct 11 01:43:24 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : osd.1 [v2:192.168.122.100:6806/280669855,v1:192.168.122.100:6807/280669855] boot
Oct 11 01:43:24 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e13: 3 total, 2 up, 3 in
Oct 11 01:43:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 1} v 0) v1
Oct 11 01:43:24 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 2} v 0) v1
Oct 11 01:43:24 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:24 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.2: (2) No such file or directory
Oct 11 01:43:24 compute-0 ceph-osd[206800]: osd.1 13 state: booting -> active
Oct 11 01:43:24 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 13 pg[1.0( empty local-lis/les=0/0 n=0 ec=11/11 lis/c=0/0 les/c/f=0/0/0 sis=13) [1] r=0 lpr=13 pi=[11,13)/0 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e13 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:43:24 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 13 pg[1.0( empty local-lis/les=0/0 n=0 ec=11/11 lis/c=0/0 les/c/f=0/0/0 sis=13) [1] r=-1 lpr=13 pi=[11,13)/0 crt=0'0 mlcod 0'0 unknown NOTIFY mbc={}] start_peering_interval up [] -> [1], acting [] -> [1], acting_primary ? -> 1, up_primary ? -> 1, role -1 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:43:24 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 13 pg[1.0( empty local-lis/les=0/0 n=0 ec=11/11 lis/c=0/0 les/c/f=0/0/0 sis=13) [1] r=-1 lpr=13 pi=[11,13)/0 crt=0'0 mlcod 0'0 unknown NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:43:24 compute-0 sudo[207847]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:24 compute-0 sudo[207847]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:24 compute-0 sudo[207847]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:24 compute-0 ceph-osd[207831]: bdev(0x5626b17e3800 /var/lib/ceph/osd/ceph-2/block) close
Oct 11 01:43:24 compute-0 sudo[207872]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:43:24 compute-0 sudo[207872]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:24 compute-0 sudo[207872]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:24 compute-0 sudo[207898]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:24 compute-0 sudo[207898]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:24 compute-0 sudo[207898]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:24 compute-0 ceph-osd[207831]: starting osd.2 osd_data /var/lib/ceph/osd/ceph-2 /var/lib/ceph/osd/ceph-2/journal
Oct 11 01:43:25 compute-0 sudo[207923]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 01:43:25 compute-0 ceph-osd[207831]: load: jerasure load: lrc 
Oct 11 01:43:25 compute-0 sudo[207923]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bdev(0x5626b19acc00 /var/lib/ceph/osd/ceph-2/block) open path /var/lib/ceph/osd/ceph-2/block
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bdev(0x5626b19acc00 /var/lib/ceph/osd/ceph-2/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-2/block failed: (22) Invalid argument
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bdev(0x5626b19acc00 /var/lib/ceph/osd/ceph-2/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _set_cache_sizes cache_size 1073741824 meta 0.45 kv 0.45 kv_onode 0.04 data 0.06
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bdev(0x5626b19acc00 /var/lib/ceph/osd/ceph-2/block) close
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bdev(0x5626b19acc00 /var/lib/ceph/osd/ceph-2/block) open path /var/lib/ceph/osd/ceph-2/block
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bdev(0x5626b19acc00 /var/lib/ceph/osd/ceph-2/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-2/block failed: (22) Invalid argument
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bdev(0x5626b19acc00 /var/lib/ceph/osd/ceph-2/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _set_cache_sizes cache_size 1073741824 meta 0.45 kv 0.45 kv_onode 0.04 data 0.06
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bdev(0x5626b19acc00 /var/lib/ceph/osd/ceph-2/block) close
Oct 11 01:43:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e13 do_prune osdmap full prune enabled
Oct 11 01:43:25 compute-0 ceph-mon[191930]: OSD bench result of 4783.214026 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.1. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd].
Oct 11 01:43:25 compute-0 ceph-mon[191930]: pgmap v47: 1 pgs: 1 unknown; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail
Oct 11 01:43:25 compute-0 ceph-mon[191930]: osd.1 [v2:192.168.122.100:6806/280669855,v1:192.168.122.100:6807/280669855] boot
Oct 11 01:43:25 compute-0 ceph-mon[191930]: osdmap e13: 3 total, 2 up, 3 in
Oct 11 01:43:25 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch
Oct 11 01:43:25 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e14 e14: 3 total, 2 up, 3 in
Oct 11 01:43:25 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e14: 3 total, 2 up, 3 in
Oct 11 01:43:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 2} v 0) v1
Oct 11 01:43:25 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:25 compute-0 ceph-osd[207831]: mClockScheduler: set_osd_capacity_params_from_config: osd_bandwidth_cost_per_io: 499321.90 bytes/io, osd_bandwidth_capacity_per_shard 157286400.00 bytes/second
Oct 11 01:43:25 compute-0 ceph-osd[207831]: osd.2:0.OSDShard using op scheduler mclock_scheduler, cutoff=196
Oct 11 01:43:25 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.2: (2) No such file or directory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bdev(0x5626b19acc00 /var/lib/ceph/osd/ceph-2/block) open path /var/lib/ceph/osd/ceph-2/block
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bdev(0x5626b19acc00 /var/lib/ceph/osd/ceph-2/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-2/block failed: (22) Invalid argument
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bdev(0x5626b19acc00 /var/lib/ceph/osd/ceph-2/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _set_cache_sizes cache_size 1073741824 meta 0.45 kv 0.45 kv_onode 0.04 data 0.06
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bdev(0x5626b19ad400 /var/lib/ceph/osd/ceph-2/block) open path /var/lib/ceph/osd/ceph-2/block
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bdev(0x5626b19ad400 /var/lib/ceph/osd/ceph-2/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-2/block failed: (22) Invalid argument
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bdev(0x5626b19ad400 /var/lib/ceph/osd/ceph-2/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluefs add_block_device bdev 1 path /var/lib/ceph/osd/ceph-2/block size 20 GiB
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluefs mount
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluefs _init_alloc shared, id 1, capacity 0x4ffc00000, block size 0x10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluefs mount shared_bdev_used = 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _prepare_db_environment set db_paths to db,20397110067 db.slow,20397110067
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: RocksDB version: 7.9.2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Git sha 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Compile date 2025-05-06 23:30:25
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: DB SUMMARY
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: DB Session ID:  28W0HLYBQUTA5EX2FZLR
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: CURRENT file:  CURRENT
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: IDENTITY file:  IDENTITY
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: MANIFEST file:  MANIFEST-000032 size: 1007 Bytes
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: SST files in db dir, Total Num: 1, files: 000030.sst 
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: SST files in db.slow dir, Total Num: 0, files: 
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Write Ahead Log file in db.wal: 000031.log size: 5093 ; 
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                         Options.error_if_exists: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.create_if_missing: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                         Options.paranoid_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.flush_verify_memtable_count: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.track_and_verify_wals_in_manifest: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.verify_sst_unique_id_in_manifest: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                                     Options.env: 0x5626b2677d50
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                                      Options.fs: LegacyFileSystem
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                                Options.info_log: 0x5626b186e980
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_file_opening_threads: 16
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                              Options.statistics: (nil)
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.use_fsync: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.max_log_file_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.max_manifest_file_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.log_file_time_to_roll: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.keep_log_file_num: 1000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.recycle_log_file_num: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                         Options.allow_fallocate: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.allow_mmap_reads: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.allow_mmap_writes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.use_direct_reads: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.use_direct_io_for_flush_and_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.create_missing_column_families: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                              Options.db_log_dir: 
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                                 Options.wal_dir: db.wal
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.table_cache_numshardbits: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                         Options.WAL_ttl_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.WAL_size_limit_MB: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.max_write_batch_group_size_bytes: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.manifest_preallocation_size: 4194304
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                     Options.is_fd_close_on_exec: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.advise_random_on_open: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.db_write_buffer_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.write_buffer_manager: 0x5626b2784460
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.access_hint_on_compaction_start: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.random_access_max_buffer_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                      Options.use_adaptive_mutex: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                            Options.rate_limiter: (nil)
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.sst_file_manager.rate_bytes_per_sec: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.wal_recovery_mode: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.enable_thread_tracking: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.enable_pipelined_write: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.unordered_write: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.allow_concurrent_memtable_write: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.enable_write_thread_adaptive_yield: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.write_thread_max_yield_usec: 100
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.write_thread_slow_yield_usec: 3
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.row_cache: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                              Options.wal_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.avoid_flush_during_recovery: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.allow_ingest_behind: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.two_write_queues: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.manual_wal_flush: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.wal_compression: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.atomic_flush: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.avoid_unnecessary_blocking_io: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.persist_stats_to_disk: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.write_dbid_to_manifest: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.log_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.file_checksum_gen_factory: Unknown
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.best_efforts_recovery: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bgerror_resume_count: 2147483647
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bgerror_resume_retry_interval: 1000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.allow_data_in_errors: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.db_host_id: __hostname__
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.enforce_single_del_contracts: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.max_background_jobs: 4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.max_background_compactions: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.max_subcompactions: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.avoid_flush_during_shutdown: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.writable_file_max_buffer_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.delayed_write_rate : 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.max_total_wal_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.delete_obsolete_files_period_micros: 21600000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.stats_dump_period_sec: 600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.stats_persist_period_sec: 600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.stats_history_buffer_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.max_open_files: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.bytes_per_sync: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                      Options.wal_bytes_per_sync: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.strict_bytes_per_sync: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.compaction_readahead_size: 2097152
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.max_background_flushes: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Compression algorithms supported:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         kZSTD supported: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         kXpressCompression supported: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         kBZip2Compression supported: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         kZSTDNotFinalCompression supported: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         kLZ4Compression supported: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         kZlibCompression supported: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         kLZ4HCCompression supported: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         kSnappyCompression supported: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Fast CRC32 supported: Supported on x86
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: DMutex implementation: pthread_mutex_t
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl_readonly.cc:25] Opening the db in read only mode
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: db/MANIFEST-000032
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: .T:int64_array.b:bitwise_xor
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186efe0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [m-0]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186efe0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [m-1]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186efe0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [m-2]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186efe0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 14 pg[1.0( empty local-lis/les=13/14 n=0 ec=11/11 lis/c=0/0 les/c/f=0/0/0 sis=13) [1] r=0 lpr=13 pi=[11,13)/0 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [p-0]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186efe0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [p-1]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186efe0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [p-2]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186efe0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [O-0]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186efc0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856430
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [O-1]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186efc0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856430
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [O-2]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186efc0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856430
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:635]         (skipping printing options)
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:635]         (skipping printing options)
Oct 11 01:43:25 compute-0 podman[207994]: 2025-10-11 01:43:25.657095745 +0000 UTC m=+0.113191365 container create 86d5cd856b6c5e630a321c050a2ce51da524dd80ea9f12c22a9e38b7727140de (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_banzai, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:db/MANIFEST-000032 succeeded,manifest_file_number is 32, next_file_number is 34, last_sequence is 12, log_number is 5,prev_log_number is 0,max_column_family is 11,min_log_number_to_keep is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [m-0] (ID 1), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [m-1] (ID 2), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [m-2] (ID 3), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [p-0] (ID 4), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [p-1] (ID 5), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [p-2] (ID 6), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [O-0] (ID 7), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [O-1] (ID 8), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [O-2] (ID 9), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [L] (ID 10), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [P] (ID 11), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 4af6075d-3616-4571-b9dd-20743cdcc04d
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147005665052, "job": 1, "event": "recovery_started", "wal_files": [31]}
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #31 mode 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147005665644, "job": 1, "event": "recovery_finished"}
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _open_db opened rocksdb path db options compression=kLZ4Compression,max_write_buffer_number=64,min_write_buffer_number_to_merge=6,compaction_style=kCompactionStyleLevel,write_buffer_size=16777216,max_background_jobs=4,level0_file_num_compaction_trigger=8,max_bytes_for_level_base=1073741824,max_bytes_for_level_multiplier=8,compaction_readahead_size=2MB,max_total_wal_size=1073741824,writable_file_max_buffer_size=0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _open_super_meta old nid_max 1025
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _open_super_meta old blobid_max 10240
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _open_super_meta ondisk_format 4 compat_ondisk_format 3
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _open_super_meta min_alloc_size 0x1000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: freelist init
Oct 11 01:43:25 compute-0 ceph-osd[207831]: freelist _read_cfg
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _init_alloc loaded 20 GiB in 2 extents, allocator type hybrid, capacity 0x4ffc00000, block size 0x1000, free 0x4ffbfd000, fragmentation 1.9e-07
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:496] Shutdown: canceling all background work
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:704] Shutdown complete
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluefs umount
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bdev(0x5626b19ad400 /var/lib/ceph/osd/ceph-2/block) close
Oct 11 01:43:25 compute-0 podman[207994]: 2025-10-11 01:43:25.614066917 +0000 UTC m=+0.070162577 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:25 compute-0 ceph-mgr[192233]: [devicehealth INFO root] creating main.db for devicehealth
Oct 11 01:43:25 compute-0 systemd[1]: Started libpod-conmon-86d5cd856b6c5e630a321c050a2ce51da524dd80ea9f12c22a9e38b7727140de.scope.
Oct 11 01:43:25 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:25 compute-0 podman[207994]: 2025-10-11 01:43:25.82242403 +0000 UTC m=+0.278519630 container init 86d5cd856b6c5e630a321c050a2ce51da524dd80ea9f12c22a9e38b7727140de (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_banzai, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:43:25 compute-0 podman[207994]: 2025-10-11 01:43:25.839847417 +0000 UTC m=+0.295943007 container start 86d5cd856b6c5e630a321c050a2ce51da524dd80ea9f12c22a9e38b7727140de (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_banzai, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:43:25 compute-0 ceph-mgr[192233]: [devicehealth INFO root] Check health
Oct 11 01:43:25 compute-0 podman[207994]: 2025-10-11 01:43:25.844157203 +0000 UTC m=+0.300252803 container attach 86d5cd856b6c5e630a321c050a2ce51da524dd80ea9f12c22a9e38b7727140de (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_banzai, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:25 compute-0 agitated_banzai[208204]: 167 167
Oct 11 01:43:25 compute-0 systemd[1]: libpod-86d5cd856b6c5e630a321c050a2ce51da524dd80ea9f12c22a9e38b7727140de.scope: Deactivated successfully.
Oct 11 01:43:25 compute-0 podman[207994]: 2025-10-11 01:43:25.853385654 +0000 UTC m=+0.309481274 container died 86d5cd856b6c5e630a321c050a2ce51da524dd80ea9f12c22a9e38b7727140de (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_banzai, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS)
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bdev(0x5626b19ad400 /var/lib/ceph/osd/ceph-2/block) open path /var/lib/ceph/osd/ceph-2/block
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bdev(0x5626b19ad400 /var/lib/ceph/osd/ceph-2/block) ioctl(F_SET_FILE_RW_HINT) on /var/lib/ceph/osd/ceph-2/block failed: (22) Invalid argument
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bdev(0x5626b19ad400 /var/lib/ceph/osd/ceph-2/block) open size 21470642176 (0x4ffc00000, 20 GiB) block_size 4096 (4 KiB) rotational device, discard supported
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluefs add_block_device bdev 1 path /var/lib/ceph/osd/ceph-2/block size 20 GiB
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluefs mount
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluefs _init_alloc shared, id 1, capacity 0x4ffc00000, block size 0x10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluefs mount shared_bdev_used = 4718592
Oct 11 01:43:25 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _prepare_db_environment set db_paths to db,20397110067 db.slow,20397110067
Oct 11 01:43:25 compute-0 ceph-mgr[192233]: [devicehealth ERROR root] Fail to parse JSON result from daemon osd.2 ()
Oct 11 01:43:25 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: RocksDB version: 7.9.2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Git sha 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Compile date 2025-05-06 23:30:25
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: DB SUMMARY
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: DB Session ID:  28W0HLYBQUTA5EX2FZLQ
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: CURRENT file:  CURRENT
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: IDENTITY file:  IDENTITY
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: MANIFEST file:  MANIFEST-000032 size: 1007 Bytes
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: SST files in db dir, Total Num: 1, files: 000030.sst 
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: SST files in db.slow dir, Total Num: 0, files: 
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Write Ahead Log file in db.wal: 000031.log size: 5093 ; 
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                         Options.error_if_exists: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.create_if_missing: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                         Options.paranoid_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.flush_verify_memtable_count: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.track_and_verify_wals_in_manifest: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.verify_sst_unique_id_in_manifest: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                                     Options.env: 0x5626b28383f0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                                      Options.fs: LegacyFileSystem
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                                Options.info_log: 0x5626b2673b60
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_file_opening_threads: 16
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                              Options.statistics: (nil)
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.use_fsync: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.max_log_file_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.max_manifest_file_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.log_file_time_to_roll: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.keep_log_file_num: 1000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.recycle_log_file_num: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                         Options.allow_fallocate: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.allow_mmap_reads: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.allow_mmap_writes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.use_direct_reads: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.use_direct_io_for_flush_and_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.create_missing_column_families: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                              Options.db_log_dir: 
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                                 Options.wal_dir: db.wal
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.table_cache_numshardbits: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                         Options.WAL_ttl_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.WAL_size_limit_MB: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.max_write_batch_group_size_bytes: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.manifest_preallocation_size: 4194304
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                     Options.is_fd_close_on_exec: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.advise_random_on_open: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.db_write_buffer_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.write_buffer_manager: 0x5626b2784460
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.access_hint_on_compaction_start: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.random_access_max_buffer_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                      Options.use_adaptive_mutex: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                            Options.rate_limiter: (nil)
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.sst_file_manager.rate_bytes_per_sec: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.wal_recovery_mode: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.enable_thread_tracking: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.enable_pipelined_write: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.unordered_write: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.allow_concurrent_memtable_write: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.enable_write_thread_adaptive_yield: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.write_thread_max_yield_usec: 100
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.write_thread_slow_yield_usec: 3
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.row_cache: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                              Options.wal_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.avoid_flush_during_recovery: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.allow_ingest_behind: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.two_write_queues: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.manual_wal_flush: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.wal_compression: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.atomic_flush: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.avoid_unnecessary_blocking_io: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.persist_stats_to_disk: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.write_dbid_to_manifest: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.log_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.file_checksum_gen_factory: Unknown
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.best_efforts_recovery: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bgerror_resume_count: 2147483647
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bgerror_resume_retry_interval: 1000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.allow_data_in_errors: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.db_host_id: __hostname__
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.enforce_single_del_contracts: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.max_background_jobs: 4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.max_background_compactions: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.max_subcompactions: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.avoid_flush_during_shutdown: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.writable_file_max_buffer_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.delayed_write_rate : 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.max_total_wal_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.delete_obsolete_files_period_micros: 21600000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.stats_dump_period_sec: 600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.stats_persist_period_sec: 600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.stats_history_buffer_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.max_open_files: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.bytes_per_sync: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                      Options.wal_bytes_per_sync: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.strict_bytes_per_sync: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.compaction_readahead_size: 2097152
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.max_background_flushes: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Compression algorithms supported:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         kZSTD supported: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         kXpressCompression supported: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         kBZip2Compression supported: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         kZSTDNotFinalCompression supported: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         kLZ4Compression supported: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         kZlibCompression supported: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         kLZ4HCCompression supported: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         kSnappyCompression supported: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Fast CRC32 supported: Supported on x86
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: DMutex implementation: pthread_mutex_t
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: db/MANIFEST-000032
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: .T:int64_array.b:bitwise_xor
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186eb00)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [m-0]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186eb00)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [m-1]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186eb00)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [m-2]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186eb00)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 systemd[1]: var-lib-containers-storage-overlay-2b39f9f3647ad66708963bd62fb3709b9e0c25ff20a905c32ecf2ac1a5f52142-merged.mount: Deactivated successfully.
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [p-0]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186eb00)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 sudo[208230]:     ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vda
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [p-1]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186eb00)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 sudo[208230]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 sudo[208230]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167)
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [p-2]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186eb00)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856dd0
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 483183820
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [O-0]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186f0e0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856430
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [O-1]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186f0e0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856430
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 podman[207994]: 2025-10-11 01:43:25.927889039 +0000 UTC m=+0.383984629 container remove 86d5cd856b6c5e630a321c050a2ce51da524dd80ea9f12c22a9e38b7727140de (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_banzai, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507)
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [O-2]:
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.comparator: leveldb.BytewiseComparator
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:           Options.merge_operator: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.compaction_filter_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.sst_partitioner_factory: None
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.memtable_factory: SkipListFactory
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.table_factory: BlockBasedTable
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            table_factory options:   flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5626b186f0e0)
                                              cache_index_and_filter_blocks: 1
                                              cache_index_and_filter_blocks_with_high_priority: 0
                                              pin_l0_filter_and_index_blocks_in_cache: 0
                                              pin_top_level_index_and_filter: 1
                                              index_type: 0
                                              data_block_index_type: 0
                                              index_shortening: 1
                                              data_block_hash_table_util_ratio: 0.750000
                                              checksum: 4
                                              no_block_cache: 0
                                              block_cache: 0x5626b1856430
                                              block_cache_name: BinnedLRUCache
                                              block_cache_options:
                                                capacity : 536870912
                                                num_shard_bits : 4
                                                strict_capacity_limit : 0
                                                high_pri_pool_ratio: 0.000
                                              block_cache_compressed: (nil)
                                              persistent_cache: (nil)
                                              block_size: 4096
                                              block_size_deviation: 10
                                              block_restart_interval: 16
                                              index_block_restart_interval: 1
                                              metadata_block_size: 4096
                                              partition_filters: 0
                                              use_delta_encoding: 1
                                              filter_policy: bloomfilter
                                              whole_key_filtering: 1
                                              verify_compression: 0
                                              read_amp_bytes_per_bit: 0
                                              format_version: 5
                                              enable_index_compression: 1
                                              block_align: 0
                                              max_auto_readahead_size: 262144
                                              prepopulate_block_cache: 0
                                              initial_auto_readahead_size: 8192
                                              num_file_reads_for_auto_readahead: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.write_buffer_size: 16777216
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.max_write_buffer_number: 64
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.compression: LZ4
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression: Disabled
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_insert_with_hint_prefix_extractor: nullptr
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.num_levels: 7
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:        Options.min_write_buffer_number_to_merge: 6
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_number_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:     Options.max_write_buffer_size_to_maintain: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.bottommost_compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.bottommost_compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.bottommost_compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.bottommost_compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:            Options.compression_opts.window_bits: -14
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.level: 32767
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.compression_opts.strategy: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.zstd_max_train_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.use_zstd_dict_trainer: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.parallel_threads: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                  Options.compression_opts.enabled: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:         Options.compression_opts.max_dict_buffer_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.level0_file_num_compaction_trigger: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.level0_slowdown_writes_trigger: 20
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:              Options.level0_stop_writes_trigger: 36
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.target_file_size_base: 67108864
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:             Options.target_file_size_multiplier: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.max_bytes_for_level_base: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.level_compaction_dynamic_level_bytes: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.max_bytes_for_level_multiplier: 8.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:       Options.max_sequential_skip_in_iterations: 8
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_compaction_bytes: 1677721600
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.ignore_max_compaction_bytes_for_input: true
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.arena_block_size: 1048576
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.soft_pending_compaction_bytes_limit: 68719476736
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.hard_pending_compaction_bytes_limit: 274877906944
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.disable_auto_compactions: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                        Options.compaction_style: kCompactionStyleLevel
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.compaction_pri: kMinOverlappingRatio
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.size_ratio: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.min_merge_width: 2
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0);
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.inplace_update_support: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                 Options.inplace_update_num_locks: 10000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_prefix_bloom_size_ratio: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:               Options.memtable_whole_key_filtering: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:   Options.memtable_huge_page_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.bloom_locality: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                    Options.max_successive_merges: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.optimize_filters_for_hits: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.paranoid_file_checks: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.force_consistency_checks: 1
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.report_bg_io_stats: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                               Options.ttl: 2592000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.periodic_compaction_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:  Options.preclude_last_level_data_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:    Options.preserve_internal_time_seconds: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                       Options.enable_blob_files: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                           Options.min_blob_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                          Options.blob_file_size: 268435456
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                   Options.blob_compression_type: NoCompression
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.enable_blob_garbage_collection: false
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:      Options.blob_garbage_collection_age_cutoff: 0.250000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:          Options.blob_compaction_readahead_size: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb:                Options.blob_file_starting_level: 0
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: Options.experimental_mempurge_threshold: 0.000000
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:635]         (skipping printing options)
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/column_family.cc:635]         (skipping printing options)
Oct 11 01:43:25 compute-0 systemd[1]: libpod-conmon-86d5cd856b6c5e630a321c050a2ce51da524dd80ea9f12c22a9e38b7727140de.scope: Deactivated successfully.
Oct 11 01:43:25 compute-0 sudo[208230]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:db/MANIFEST-000032 succeeded,manifest_file_number is 32, next_file_number is 34, last_sequence is 12, log_number is 5,prev_log_number is 0,max_column_family is 11,min_log_number_to_keep is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [m-0] (ID 1), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [m-1] (ID 2), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [m-2] (ID 3), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [p-0] (ID 4), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [p-1] (ID 5), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [p-2] (ID 6), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [O-0] (ID 7), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [O-1] (ID 8), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [O-2] (ID 9), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [L] (ID 10), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5581] Column family [P] (ID 11), log number is 5
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 4af6075d-3616-4571-b9dd-20743cdcc04d
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147005953266, "job": 1, "event": "recovery_started", "wal_files": [31]}
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #31 mode 2
Oct 11 01:43:25 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='admin socket' entity='admin socket' cmd=smart args=[json]: finished
Oct 11 01:43:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon metadata", "id": "compute-0"} v 0) v1
Oct 11 01:43:25 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mon metadata", "id": "compute-0"}]: dispatch
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147005967600, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 35, "file_size": 1272, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 13, "largest_seqno": 21, "table_properties": {"data_size": 128, "index_size": 27, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 87, "raw_average_key_size": 17, "raw_value_size": 82, "raw_average_value_size": 16, "num_data_blocks": 1, "num_entries": 5, "num_filter_entries": 5, "num_deletions": 0, "num_merge_operands": 2, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": ".T:int64_array.b:bitwise_xor", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "LZ4", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760147005, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "4af6075d-3616-4571-b9dd-20743cdcc04d", "db_session_id": "28W0HLYBQUTA5EX2FZLQ", "orig_file_number": 35, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147005976565, "cf_name": "p-0", "job": 1, "event": "table_file_creation", "file_number": 36, "file_size": 1594, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 14, "largest_seqno": 15, "table_properties": {"data_size": 468, "index_size": 39, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 72, "raw_average_key_size": 36, "raw_value_size": 567, "raw_average_value_size": 283, "num_data_blocks": 1, "num_entries": 2, "num_filter_entries": 2, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "p-0", "column_family_id": 4, "comparator": "leveldb.BytewiseComparator", "merge_operator": "nullptr", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "LZ4", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760147005, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "4af6075d-3616-4571-b9dd-20743cdcc04d", "db_session_id": "28W0HLYBQUTA5EX2FZLQ", "orig_file_number": 36, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147005987804, "cf_name": "O-2", "job": 1, "event": "table_file_creation", "file_number": 37, "file_size": 1275, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 16, "largest_seqno": 16, "table_properties": {"data_size": 121, "index_size": 64, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 55, "raw_average_key_size": 55, "raw_value_size": 50, "raw_average_value_size": 50, "num_data_blocks": 1, "num_entries": 1, "num_filter_entries": 1, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "O-2", "column_family_id": 9, "comparator": "leveldb.BytewiseComparator", "merge_operator": "nullptr", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "LZ4", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760147005, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "4af6075d-3616-4571-b9dd-20743cdcc04d", "db_session_id": "28W0HLYBQUTA5EX2FZLQ", "orig_file_number": 37, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147005992582, "job": 1, "event": "recovery_finished"}
Oct 11 01:43:25 compute-0 ceph-osd[207831]: rocksdb: [db/version_set.cc:5047] Creating manifest 40
Oct 11 01:43:26 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x5626b2844000
Oct 11 01:43:26 compute-0 ceph-osd[207831]: rocksdb: DB pointer 0x5626b1895a00
Oct 11 01:43:26 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _open_db opened rocksdb path db options compression=kLZ4Compression,max_write_buffer_number=64,min_write_buffer_number_to_merge=6,compaction_style=kCompactionStyleLevel,write_buffer_size=16777216,max_background_jobs=4,level0_file_num_compaction_trigger=8,max_bytes_for_level_base=1073741824,max_bytes_for_level_multiplier=8,compaction_readahead_size=2MB,max_total_wal_size=1073741824,writable_file_max_buffer_size=0
Oct 11 01:43:26 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _upgrade_super from 4, latest 4
Oct 11 01:43:26 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _upgrade_super done
Oct 11 01:43:26 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 01:43:26 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 0.2 total, 0.2 interval
                                            Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s
                                            Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s
                                            Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
                                            
                                            ** Compaction Stats [default] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      2/0    2.63 KB   0.2      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.014       0      0       0.0       0.0
                                             Sum      2/0    2.63 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.014       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.014       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [default] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.1      0.01              0.00         1    0.014       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.2 total, 0.2 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 460.80 MB usage: 1.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 2.3e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [default] **
                                            
                                            ** Compaction Stats [m-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.2 total, 0.2 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 460.80 MB usage: 1.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 2.3e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-0] **
                                            
                                            ** Compaction Stats [m-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.2 total, 0.2 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 460.80 MB usage: 1.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 2.3e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-1] **
                                            
                                            ** Compaction Stats [m-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.2 total, 0.2 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 460.80 MB usage: 1.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 2.3e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-2] **
                                            
                                            ** Compaction Stats [p-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      1/0    1.56 KB   0.1      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.01              0.00         1    0.009       0      0       0.0       0.0
                                             Sum      1/0    1.56 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.01              0.00         1    0.009       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.01              0.00         1    0.009       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.2      0.01              0.00         1    0.009       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.2 total, 0.2 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 460.80 MB usage: 1.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 2.3e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-0] **
                                            
                                            ** Compaction Stats [p-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.2 total, 0.2 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 460.80 MB usage: 1.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 2.3e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-1] **
                                            
                                            ** Compaction Stats [p-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.2 total, 0.2 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 460.80 MB usage: 1.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 2.3e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-2] **
                                            
                                            ** Compaction Stats [O-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.2 total, 0.2 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856430#2 capacity: 512.00 MB usage: 0.25 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 2 last_secs: 9e-06 secs_since: 0
                                            Block cache entry stats(count,size,portion): FilterBlock(1,0.11 KB,2.08616e-05%) IndexBlock(1,0.14 KB,2.68221e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-0] **
                                            
                                            ** Compaction Stats [O-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.2 total, 0.2 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856430#2 capacity: 512.00 MB usage: 0.25 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 2 last_secs: 9e-06 secs_since: 0
                                            Block cache entry stats(count,size,portion): FilterBlock(1,0.11 KB,2.08616e-05%) IndexBlock(1,0.14 KB,2.68221e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-1] **
                                            
                                            ** Compaction Stats [O-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      1/0    1.25 KB   0.1      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                             Sum      1/0    1.25 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.2 total, 0.2 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856430#2 capacity: 512.00 MB usage: 0.25 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 2 last_secs: 9e-06 secs_since: 0
                                            Block cache entry stats(count,size,portion): FilterBlock(1,0.11 KB,2.08616e-05%) IndexBlock(1,0.14 KB,2.68221e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-2] **
                                            
                                            ** Compaction Stats [L] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.005       0      0       0.0       0.0
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.005       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.005       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [L] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.005       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.2 total, 0.2 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 460.80 MB usage: 1.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 2.3e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [L] **
                                            
                                            ** Compaction Stats [P] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [P] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 0.2 total, 0.2 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 460.80 MB usage: 1.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 8 last_secs: 2.3e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2,0.72 KB,0.000152323%) FilterBlock(3,0.33 KB,6.95388e-05%) IndexBlock(3,0.34 KB,7.28501e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [P] **
Oct 11 01:43:26 compute-0 ceph-osd[207831]: <cls> /home/jenkins-build/build/workspace/ceph-build/ARCH/x86_64/AVAILABLE_ARCH/x86_64/AVAILABLE_DIST/centos9/DIST/centos9/MACHINE_SIZE/gigantic/release/18.2.7/rpm/el9/BUILD/ceph-18.2.7/src/cls/cephfs/cls_cephfs.cc:201: loading cephfs
Oct 11 01:43:26 compute-0 ceph-osd[207831]: <cls> /home/jenkins-build/build/workspace/ceph-build/ARCH/x86_64/AVAILABLE_ARCH/x86_64/AVAILABLE_DIST/centos9/DIST/centos9/MACHINE_SIZE/gigantic/release/18.2.7/rpm/el9/BUILD/ceph-18.2.7/src/cls/hello/cls_hello.cc:316: loading cls_hello
Oct 11 01:43:26 compute-0 ceph-osd[207831]: _get_class not permitted to load lua
Oct 11 01:43:26 compute-0 ceph-osd[207831]: _get_class not permitted to load sdk
Oct 11 01:43:26 compute-0 ceph-osd[207831]: _get_class not permitted to load test_remote_reads
Oct 11 01:43:26 compute-0 ceph-osd[207831]: osd.2 0 crush map has features 288232575208783872, adjusting msgr requires for clients
Oct 11 01:43:26 compute-0 ceph-osd[207831]: osd.2 0 crush map has features 288232575208783872 was 8705, adjusting msgr requires for mons
Oct 11 01:43:26 compute-0 ceph-osd[207831]: osd.2 0 crush map has features 288232575208783872, adjusting msgr requires for osds
Oct 11 01:43:26 compute-0 ceph-osd[207831]: osd.2 0 check_osdmap_features enabling on-disk ERASURE CODES compat feature
Oct 11 01:43:26 compute-0 ceph-osd[207831]: osd.2 0 load_pgs
Oct 11 01:43:26 compute-0 ceph-osd[207831]: osd.2 0 load_pgs opened 0 pgs
Oct 11 01:43:26 compute-0 ceph-osd[207831]: osd.2 0 log_to_monitors true
Oct 11 01:43:26 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2[207827]: 2025-10-11T01:43:26.049+0000 7f23cea26740 -1 osd.2 0 log_to_monitors true
Oct 11 01:43:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]} v 0) v1
Oct 11 01:43:26 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='osd.2 [v2:192.168.122.100:6810/2304766820,v1:192.168.122.100:6811/2304766820]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch
Oct 11 01:43:26 compute-0 podman[208457]: 2025-10-11 01:43:26.131639756 +0000 UTC m=+0.060142025 container create 607109a557e891d5ed3ed79e800c47e8c404e49d6780663dc065cc1ebd8bf5a5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=beautiful_jemison, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, ceph=True, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:26 compute-0 podman[208457]: 2025-10-11 01:43:26.107019168 +0000 UTC m=+0.035521477 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:26 compute-0 systemd[1]: Started libpod-conmon-607109a557e891d5ed3ed79e800c47e8c404e49d6780663dc065cc1ebd8bf5a5.scope.
Oct 11 01:43:26 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/10d7373606c94cc176301aba39d02dea5867cdbc0bedf9083b31316437ff3d5d/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/10d7373606c94cc176301aba39d02dea5867cdbc0bedf9083b31316437ff3d5d/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/10d7373606c94cc176301aba39d02dea5867cdbc0bedf9083b31316437ff3d5d/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/10d7373606c94cc176301aba39d02dea5867cdbc0bedf9083b31316437ff3d5d/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:26 compute-0 podman[208457]: 2025-10-11 01:43:26.304551136 +0000 UTC m=+0.233053475 container init 607109a557e891d5ed3ed79e800c47e8c404e49d6780663dc065cc1ebd8bf5a5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=beautiful_jemison, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3)
Oct 11 01:43:26 compute-0 podman[208457]: 2025-10-11 01:43:26.335252013 +0000 UTC m=+0.263754272 container start 607109a557e891d5ed3ed79e800c47e8c404e49d6780663dc065cc1ebd8bf5a5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=beautiful_jemison, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507)
Oct 11 01:43:26 compute-0 podman[208457]: 2025-10-11 01:43:26.340076583 +0000 UTC m=+0.268578882 container attach 607109a557e891d5ed3ed79e800c47e8c404e49d6780663dc065cc1ebd8bf5a5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=beautiful_jemison, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:43:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v50: 1 pgs: 1 creating+peering; 0 B data, 453 MiB used, 40 GiB / 40 GiB avail
Oct 11 01:43:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:43:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:43:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:43:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:43:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:43:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:43:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e14 do_prune osdmap full prune enabled
Oct 11 01:43:26 compute-0 ceph-mon[191930]: osdmap e14: 3 total, 2 up, 3 in
Oct 11 01:43:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:26 compute-0 ceph-mon[191930]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch
Oct 11 01:43:26 compute-0 ceph-mon[191930]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished
Oct 11 01:43:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mon metadata", "id": "compute-0"}]: dispatch
Oct 11 01:43:26 compute-0 ceph-mon[191930]: from='osd.2 [v2:192.168.122.100:6810/2304766820,v1:192.168.122.100:6811/2304766820]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch
Oct 11 01:43:26 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='osd.2 [v2:192.168.122.100:6810/2304766820,v1:192.168.122.100:6811/2304766820]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished
Oct 11 01:43:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e15 e15: 3 total, 2 up, 3 in
Oct 11 01:43:26 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : mgrmap e9: compute-0.bzgmgr(active, since 90s)
Oct 11 01:43:26 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e15: 3 total, 2 up, 3 in
Oct 11 01:43:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=compute-0", "root=default"]} v 0) v1
Oct 11 01:43:26 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='osd.2 [v2:192.168.122.100:6810/2304766820,v1:192.168.122.100:6811/2304766820]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=compute-0", "root=default"]}]: dispatch
Oct 11 01:43:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e15 create-or-move crush item name 'osd.2' initial_weight 0.0195 at location {host=compute-0,root=default}
Oct 11 01:43:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 2} v 0) v1
Oct 11 01:43:26 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:26 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.2: (2) No such file or directory
Oct 11 01:43:26 compute-0 sshd-session[207768]: Failed password for root from 193.46.255.217 port 47562 ssh2
Oct 11 01:43:27 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : purged_snaps scrub starts
Oct 11 01:43:27 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : purged_snaps scrub ok
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]: {
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:         "osd_id": 1,
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:         "type": "bluestore"
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:     },
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:         "osd_id": 2,
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:         "type": "bluestore"
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:     },
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:         "osd_id": 0,
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:         "type": "bluestore"
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]:     }
Oct 11 01:43:27 compute-0 beautiful_jemison[208473]: }
Oct 11 01:43:27 compute-0 systemd[1]: libpod-607109a557e891d5ed3ed79e800c47e8c404e49d6780663dc065cc1ebd8bf5a5.scope: Deactivated successfully.
Oct 11 01:43:27 compute-0 systemd[1]: libpod-607109a557e891d5ed3ed79e800c47e8c404e49d6780663dc065cc1ebd8bf5a5.scope: Consumed 1.200s CPU time.
Oct 11 01:43:27 compute-0 podman[208457]: 2025-10-11 01:43:27.53032775 +0000 UTC m=+1.458830099 container died 607109a557e891d5ed3ed79e800c47e8c404e49d6780663dc065cc1ebd8bf5a5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=beautiful_jemison, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 01:43:27 compute-0 systemd[1]: var-lib-containers-storage-overlay-10d7373606c94cc176301aba39d02dea5867cdbc0bedf9083b31316437ff3d5d-merged.mount: Deactivated successfully.
Oct 11 01:43:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e15 do_prune osdmap full prune enabled
Oct 11 01:43:27 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='osd.2 [v2:192.168.122.100:6810/2304766820,v1:192.168.122.100:6811/2304766820]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=compute-0", "root=default"]}]': finished
Oct 11 01:43:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e16 e16: 3 total, 2 up, 3 in
Oct 11 01:43:27 compute-0 ceph-osd[207831]: osd.2 0 done with init, starting boot process
Oct 11 01:43:27 compute-0 ceph-osd[207831]: osd.2 0 start_boot
Oct 11 01:43:27 compute-0 ceph-osd[207831]: osd.2 0 maybe_override_options_for_qos osd_max_backfills set to 1
Oct 11 01:43:27 compute-0 ceph-osd[207831]: osd.2 0 maybe_override_options_for_qos osd_recovery_max_active set to 0
Oct 11 01:43:27 compute-0 ceph-osd[207831]: osd.2 0 maybe_override_options_for_qos osd_recovery_max_active_hdd set to 3
Oct 11 01:43:27 compute-0 ceph-osd[207831]: osd.2 0 maybe_override_options_for_qos osd_recovery_max_active_ssd set to 10
Oct 11 01:43:27 compute-0 ceph-osd[207831]: osd.2 0  bench count 12288000 bsize 4 KiB
Oct 11 01:43:27 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e16: 3 total, 2 up, 3 in
Oct 11 01:43:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 2} v 0) v1
Oct 11 01:43:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:27 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.2: (2) No such file or directory
Oct 11 01:43:27 compute-0 ceph-mon[191930]: pgmap v50: 1 pgs: 1 creating+peering; 0 B data, 453 MiB used, 40 GiB / 40 GiB avail
Oct 11 01:43:27 compute-0 ceph-mon[191930]: from='osd.2 [v2:192.168.122.100:6810/2304766820,v1:192.168.122.100:6811/2304766820]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished
Oct 11 01:43:27 compute-0 ceph-mon[191930]: mgrmap e9: compute-0.bzgmgr(active, since 90s)
Oct 11 01:43:27 compute-0 ceph-mon[191930]: osdmap e15: 3 total, 2 up, 3 in
Oct 11 01:43:27 compute-0 ceph-mon[191930]: from='osd.2 [v2:192.168.122.100:6810/2304766820,v1:192.168.122.100:6811/2304766820]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=compute-0", "root=default"]}]: dispatch
Oct 11 01:43:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:27 compute-0 ceph-mgr[192233]: mgr.server handle_open ignoring open from osd.2 v2:192.168.122.100:6810/2304766820; not ready for session (expect reconnect)
Oct 11 01:43:27 compute-0 podman[208457]: 2025-10-11 01:43:27.662691602 +0000 UTC m=+1.591193901 container remove 607109a557e891d5ed3ed79e800c47e8c404e49d6780663dc065cc1ebd8bf5a5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=beautiful_jemison, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, ceph=True, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 2} v 0) v1
Oct 11 01:43:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:27 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.2: (2) No such file or directory
Oct 11 01:43:27 compute-0 systemd[1]: libpod-conmon-607109a557e891d5ed3ed79e800c47e8c404e49d6780663dc065cc1ebd8bf5a5.scope: Deactivated successfully.
Oct 11 01:43:27 compute-0 sudo[207923]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:27 compute-0 unix_chkpwd[208519]: password check failed for user (root)
Oct 11 01:43:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:43:27 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:43:27 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:27 compute-0 sudo[208520]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:27 compute-0 sudo[208520]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:27 compute-0 sudo[208520]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:28 compute-0 sudo[208545]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:43:28 compute-0 sudo[208545]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:28 compute-0 sudo[208545]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:28 compute-0 sudo[208570]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:28 compute-0 sudo[208570]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:28 compute-0 sudo[208570]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:28 compute-0 sudo[208595]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:43:28 compute-0 sudo[208595]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:28 compute-0 sudo[208595]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v53: 1 pgs: 1 creating+peering; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail
Oct 11 01:43:28 compute-0 sudo[208620]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:28 compute-0 sudo[208620]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:28 compute-0 sudo[208620]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:28 compute-0 sudo[208645]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ls
Oct 11 01:43:28 compute-0 sudo[208645]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:28 compute-0 ceph-mon[191930]: from='osd.2 [v2:192.168.122.100:6810/2304766820,v1:192.168.122.100:6811/2304766820]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=compute-0", "root=default"]}]': finished
Oct 11 01:43:28 compute-0 ceph-mon[191930]: osdmap e16: 3 total, 2 up, 3 in
Oct 11 01:43:28 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:28 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:28 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:28 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:28 compute-0 ceph-mgr[192233]: mgr.server handle_open ignoring open from osd.2 v2:192.168.122.100:6810/2304766820; not ready for session (expect reconnect)
Oct 11 01:43:28 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 2} v 0) v1
Oct 11 01:43:28 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:28 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.2: (2) No such file or directory
Oct 11 01:43:29 compute-0 podman[208736]: 2025-10-11 01:43:29.240567421 +0000 UTC m=+0.110606840 container exec ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:43:29 compute-0 podman[208736]: 2025-10-11 01:43:29.363587054 +0000 UTC m=+0.233626453 container exec_died ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, OSD_FLAVOR=default)
Oct 11 01:43:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e16 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:43:29 compute-0 podman[208771]: 2025-10-11 01:43:29.588202899 +0000 UTC m=+0.109456306 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, build-date=2025-08-20T13:12:41, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, com.redhat.component=ubi9-minimal-container, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc., architecture=x86_64, version=9.6, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-type=git, url=https://catalog.redhat.com/en/search?searchType=containers, container_name=openstack_network_exporter, distribution-scope=public, config_id=edpm, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, release=1755695350, vendor=Red Hat, Inc., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.tags=minimal rhel9, io.openshift.expose-services=, io.buildah.version=1.33.7)
Oct 11 01:43:29 compute-0 podman[208770]: 2025-10-11 01:43:29.614450831 +0000 UTC m=+0.127696725 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 01:43:29 compute-0 ceph-mgr[192233]: mgr.server handle_open ignoring open from osd.2 v2:192.168.122.100:6810/2304766820; not ready for session (expect reconnect)
Oct 11 01:43:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 2} v 0) v1
Oct 11 01:43:29 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:29 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.2: (2) No such file or directory
Oct 11 01:43:29 compute-0 ceph-mon[191930]: purged_snaps scrub starts
Oct 11 01:43:29 compute-0 ceph-mon[191930]: purged_snaps scrub ok
Oct 11 01:43:29 compute-0 ceph-mon[191930]: pgmap v53: 1 pgs: 1 creating+peering; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail
Oct 11 01:43:29 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:29 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:29 compute-0 podman[157119]: time="2025-10-11T01:43:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:43:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:43:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 29176 "" "Go-http-client/1.1"
Oct 11 01:43:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:43:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 5820 "" "Go-http-client/1.1"
Oct 11 01:43:29 compute-0 sshd-session[207768]: Failed password for root from 193.46.255.217 port 47562 ssh2
Oct 11 01:43:30 compute-0 sudo[208645]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:43:30 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:43:30 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v54: 1 pgs: 1 creating+peering; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail
Oct 11 01:43:30 compute-0 sudo[208895]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:30 compute-0 sudo[208895]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:30 compute-0 sudo[208895]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:30 compute-0 sudo[208920]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:43:30 compute-0 sudo[208920]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:30 compute-0 sudo[208920]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:30 compute-0 ceph-mgr[192233]: mgr.server handle_open ignoring open from osd.2 v2:192.168.122.100:6810/2304766820; not ready for session (expect reconnect)
Oct 11 01:43:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 2} v 0) v1
Oct 11 01:43:30 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:30 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.2: (2) No such file or directory
Oct 11 01:43:30 compute-0 sudo[208945]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:30 compute-0 sudo[208945]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:30 compute-0 sudo[208945]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:30 compute-0 sudo[208970]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 01:43:30 compute-0 sudo[208970]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:30 compute-0 unix_chkpwd[208995]: password check failed for user (root)
Oct 11 01:43:31 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:31 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:31 compute-0 ceph-mon[191930]: pgmap v54: 1 pgs: 1 creating+peering; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail
Oct 11 01:43:31 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:31 compute-0 ceph-osd[207831]: osd.2 0 maybe_override_max_osd_capacity_for_qos osd bench result - bandwidth (MiB/sec): 20.738 iops: 5308.885 elapsed_sec: 0.565
Oct 11 01:43:31 compute-0 ceph-osd[207831]: log_channel(cluster) log [WRN] : OSD bench result of 5308.885242 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.2. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd].
Oct 11 01:43:31 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2[207827]: 2025-10-11T01:43:31.288+0000 7f23ca9a6640 -1 osd.2 0 waiting for initial osdmap
Oct 11 01:43:31 compute-0 ceph-osd[207831]: osd.2 0 waiting for initial osdmap
Oct 11 01:43:31 compute-0 ceph-osd[207831]: osd.2 16 crush map has features 288514051259236352, adjusting msgr requires for clients
Oct 11 01:43:31 compute-0 ceph-osd[207831]: osd.2 16 crush map has features 288514051259236352 was 288232575208792577, adjusting msgr requires for mons
Oct 11 01:43:31 compute-0 ceph-osd[207831]: osd.2 16 crush map has features 3314933000852226048, adjusting msgr requires for osds
Oct 11 01:43:31 compute-0 ceph-osd[207831]: osd.2 16 check_osdmap_features require_osd_release unknown -> reef
Oct 11 01:43:31 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-osd-2[207827]: 2025-10-11T01:43:31.324+0000 7f23c5fce640 -1 osd.2 16 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory
Oct 11 01:43:31 compute-0 ceph-osd[207831]: osd.2 16 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory
Oct 11 01:43:31 compute-0 ceph-osd[207831]: osd.2 16 set_numa_affinity not setting numa affinity
Oct 11 01:43:31 compute-0 ceph-osd[207831]: osd.2 16 _collect_metadata loop5:  no unique device id for loop5: fallback method has no model nor serial
Oct 11 01:43:31 compute-0 sudo[208970]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:31 compute-0 openstack_network_exporter[159265]: ERROR   01:43:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:43:31 compute-0 openstack_network_exporter[159265]: ERROR   01:43:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:43:31 compute-0 openstack_network_exporter[159265]: ERROR   01:43:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:43:31 compute-0 openstack_network_exporter[159265]: ERROR   01:43:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:43:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:43:31 compute-0 openstack_network_exporter[159265]: ERROR   01:43:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:43:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:43:31 compute-0 sudo[209026]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:31 compute-0 sudo[209026]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:31 compute-0 sudo[209026]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:31 compute-0 sudo[209053]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:43:31 compute-0 sudo[209053]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:31 compute-0 ceph-mgr[192233]: mgr.server handle_open ignoring open from osd.2 v2:192.168.122.100:6810/2304766820; not ready for session (expect reconnect)
Oct 11 01:43:31 compute-0 sudo[209053]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 2} v 0) v1
Oct 11 01:43:31 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:31 compute-0 ceph-mgr[192233]: mgr finish mon failed to return metadata for osd.2: (2) No such file or directory
Oct 11 01:43:31 compute-0 sudo[209078]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:31 compute-0 sudo[209078]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:31 compute-0 sudo[209078]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:31 compute-0 sudo[209103]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- inventory --format=json-pretty --filter-for-batch
Oct 11 01:43:31 compute-0 sudo[209103]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:32 compute-0 ceph-osd[207831]: osd.2 16 tick checking mon for new map
Oct 11 01:43:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e16 do_prune osdmap full prune enabled
Oct 11 01:43:32 compute-0 ceph-mon[191930]: OSD bench result of 5308.885242 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.2. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd].
Oct 11 01:43:32 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e17 e17: 3 total, 3 up, 3 in
Oct 11 01:43:32 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : osd.2 [v2:192.168.122.100:6810/2304766820,v1:192.168.122.100:6811/2304766820] boot
Oct 11 01:43:32 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e17: 3 total, 3 up, 3 in
Oct 11 01:43:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata", "id": 2} v 0) v1
Oct 11 01:43:32 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:32 compute-0 ceph-osd[207831]: osd.2 17 state: booting -> active
Oct 11 01:43:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 53 MiB used, 40 GiB / 40 GiB avail
Oct 11 01:43:32 compute-0 podman[209161]: 2025-10-11 01:43:32.501044525 +0000 UTC m=+0.103842575 container create 6cb50eb755b7a775e3f2ca88a10ba109fe73371a49df12012158f308804517fd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_goodall, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, ceph=True)
Oct 11 01:43:32 compute-0 podman[209161]: 2025-10-11 01:43:32.457812795 +0000 UTC m=+0.060610875 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:32 compute-0 systemd[1]: Started libpod-conmon-6cb50eb755b7a775e3f2ca88a10ba109fe73371a49df12012158f308804517fd.scope.
Oct 11 01:43:32 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:32 compute-0 podman[209161]: 2025-10-11 01:43:32.671932597 +0000 UTC m=+0.274730677 container init 6cb50eb755b7a775e3f2ca88a10ba109fe73371a49df12012158f308804517fd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_goodall, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 01:43:32 compute-0 podman[209161]: 2025-10-11 01:43:32.685076139 +0000 UTC m=+0.287874179 container start 6cb50eb755b7a775e3f2ca88a10ba109fe73371a49df12012158f308804517fd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_goodall, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, ceph=True, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:43:32 compute-0 podman[209161]: 2025-10-11 01:43:32.691321239 +0000 UTC m=+0.294119289 container attach 6cb50eb755b7a775e3f2ca88a10ba109fe73371a49df12012158f308804517fd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_goodall, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:43:32 compute-0 angry_goodall[209177]: 167 167
Oct 11 01:43:32 compute-0 systemd[1]: libpod-6cb50eb755b7a775e3f2ca88a10ba109fe73371a49df12012158f308804517fd.scope: Deactivated successfully.
Oct 11 01:43:32 compute-0 podman[209161]: 2025-10-11 01:43:32.699856456 +0000 UTC m=+0.302654486 container died 6cb50eb755b7a775e3f2ca88a10ba109fe73371a49df12012158f308804517fd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_goodall, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:32 compute-0 systemd[1]: var-lib-containers-storage-overlay-cc0b662c317bca4586f754ab1b6726adb8f96d8991aa6b5fcb35b8d503c6c0d5-merged.mount: Deactivated successfully.
Oct 11 01:43:32 compute-0 podman[209161]: 2025-10-11 01:43:32.786692151 +0000 UTC m=+0.389490191 container remove 6cb50eb755b7a775e3f2ca88a10ba109fe73371a49df12012158f308804517fd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_goodall, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, ceph=True)
Oct 11 01:43:32 compute-0 systemd[1]: libpod-conmon-6cb50eb755b7a775e3f2ca88a10ba109fe73371a49df12012158f308804517fd.scope: Deactivated successfully.
Oct 11 01:43:32 compute-0 sshd-session[207768]: Failed password for root from 193.46.255.217 port 47562 ssh2
Oct 11 01:43:33 compute-0 podman[209199]: 2025-10-11 01:43:33.104737673 +0000 UTC m=+0.111256351 container create 24303d1cda74f3c2ad0df2752078203edf5be9eba05315d3dc24eb2acbd3ce99 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_poincare, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507)
Oct 11 01:43:33 compute-0 podman[209199]: 2025-10-11 01:43:33.062645005 +0000 UTC m=+0.069163733 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:33 compute-0 systemd[1]: Started libpod-conmon-24303d1cda74f3c2ad0df2752078203edf5be9eba05315d3dc24eb2acbd3ce99.scope.
Oct 11 01:43:33 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1422bb42f53e0bc2429f310d56dcf76957132f1752bd5a633be266ee91e0d834/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1422bb42f53e0bc2429f310d56dcf76957132f1752bd5a633be266ee91e0d834/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1422bb42f53e0bc2429f310d56dcf76957132f1752bd5a633be266ee91e0d834/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1422bb42f53e0bc2429f310d56dcf76957132f1752bd5a633be266ee91e0d834/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:33 compute-0 podman[209199]: 2025-10-11 01:43:33.272104739 +0000 UTC m=+0.278623417 container init 24303d1cda74f3c2ad0df2752078203edf5be9eba05315d3dc24eb2acbd3ce99 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_poincare, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef)
Oct 11 01:43:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e17 do_prune osdmap full prune enabled
Oct 11 01:43:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e18 e18: 3 total, 3 up, 3 in
Oct 11 01:43:33 compute-0 ceph-mon[191930]: osd.2 [v2:192.168.122.100:6810/2304766820,v1:192.168.122.100:6811/2304766820] boot
Oct 11 01:43:33 compute-0 ceph-mon[191930]: osdmap e17: 3 total, 3 up, 3 in
Oct 11 01:43:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch
Oct 11 01:43:33 compute-0 ceph-mon[191930]: pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 53 MiB used, 40 GiB / 40 GiB avail
Oct 11 01:43:33 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e18: 3 total, 3 up, 3 in
Oct 11 01:43:33 compute-0 podman[209199]: 2025-10-11 01:43:33.31129692 +0000 UTC m=+0.317815588 container start 24303d1cda74f3c2ad0df2752078203edf5be9eba05315d3dc24eb2acbd3ce99 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_poincare, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, ceph=True, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:43:33 compute-0 podman[209199]: 2025-10-11 01:43:33.320082053 +0000 UTC m=+0.326600941 container attach 24303d1cda74f3c2ad0df2752078203edf5be9eba05315d3dc24eb2acbd3ce99 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_poincare, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.schema-version=1.0)
Oct 11 01:43:34 compute-0 sshd-session[207768]: Received disconnect from 193.46.255.217 port 47562:11:  [preauth]
Oct 11 01:43:34 compute-0 sshd-session[207768]: Disconnected from authenticating user root 193.46.255.217 port 47562 [preauth]
Oct 11 01:43:34 compute-0 sshd-session[207768]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 01:43:34 compute-0 ceph-mon[191930]: osdmap e18: 3 total, 3 up, 3 in
Oct 11 01:43:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v58: 1 pgs: 1 active+clean; 449 KiB data, 480 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e18 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:43:35 compute-0 unix_chkpwd[210006]: password check failed for user (root)
Oct 11 01:43:35 compute-0 sshd-session[209236]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 01:43:35 compute-0 ceph-mon[191930]: pgmap v58: 1 pgs: 1 active+clean; 449 KiB data, 480 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:35 compute-0 eager_poincare[209216]: [
Oct 11 01:43:35 compute-0 eager_poincare[209216]:     {
Oct 11 01:43:35 compute-0 eager_poincare[209216]:         "available": false,
Oct 11 01:43:35 compute-0 eager_poincare[209216]:         "ceph_device": false,
Oct 11 01:43:35 compute-0 eager_poincare[209216]:         "device_id": "QEMU_DVD-ROM_QM00001",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:         "lsm_data": {},
Oct 11 01:43:35 compute-0 eager_poincare[209216]:         "lvs": [],
Oct 11 01:43:35 compute-0 eager_poincare[209216]:         "path": "/dev/sr0",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:         "rejected_reasons": [
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "Insufficient space (<5GB)",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "Has a FileSystem"
Oct 11 01:43:35 compute-0 eager_poincare[209216]:         ],
Oct 11 01:43:35 compute-0 eager_poincare[209216]:         "sys_api": {
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "actuators": null,
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "device_nodes": "sr0",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "devname": "sr0",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "human_readable_size": "482.00 KB",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "id_bus": "ata",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "model": "QEMU DVD-ROM",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "nr_requests": "2",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "parent": "/dev/sr0",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "partitions": {},
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "path": "/dev/sr0",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "removable": "1",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "rev": "2.5+",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "ro": "0",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "rotational": "0",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "sas_address": "",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "sas_device_handle": "",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "scheduler_mode": "mq-deadline",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "sectors": 0,
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "sectorsize": "2048",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "size": 493568.0,
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "support_discard": "2048",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "type": "disk",
Oct 11 01:43:35 compute-0 eager_poincare[209216]:             "vendor": "QEMU"
Oct 11 01:43:35 compute-0 eager_poincare[209216]:         }
Oct 11 01:43:35 compute-0 eager_poincare[209216]:     }
Oct 11 01:43:35 compute-0 eager_poincare[209216]: ]
Oct 11 01:43:35 compute-0 systemd[1]: libpod-24303d1cda74f3c2ad0df2752078203edf5be9eba05315d3dc24eb2acbd3ce99.scope: Deactivated successfully.
Oct 11 01:43:35 compute-0 systemd[1]: libpod-24303d1cda74f3c2ad0df2752078203edf5be9eba05315d3dc24eb2acbd3ce99.scope: Consumed 2.516s CPU time.
Oct 11 01:43:35 compute-0 podman[209199]: 2025-10-11 01:43:35.685387402 +0000 UTC m=+2.691906040 container died 24303d1cda74f3c2ad0df2752078203edf5be9eba05315d3dc24eb2acbd3ce99 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_poincare, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3)
Oct 11 01:43:35 compute-0 systemd[1]: var-lib-containers-storage-overlay-1422bb42f53e0bc2429f310d56dcf76957132f1752bd5a633be266ee91e0d834-merged.mount: Deactivated successfully.
Oct 11 01:43:35 compute-0 podman[209199]: 2025-10-11 01:43:35.787070458 +0000 UTC m=+2.793589106 container remove 24303d1cda74f3c2ad0df2752078203edf5be9eba05315d3dc24eb2acbd3ce99 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_poincare, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:43:35 compute-0 systemd[1]: libpod-conmon-24303d1cda74f3c2ad0df2752078203edf5be9eba05315d3dc24eb2acbd3ce99.scope: Deactivated successfully.
Oct 11 01:43:35 compute-0 sudo[209103]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:43:35 compute-0 podman[211267]: 2025-10-11 01:43:35.859861553 +0000 UTC m=+0.145134642 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, config_id=edpm, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 01:43:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:43:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config rm", "who": "osd.0", "name": "osd_memory_target"} v 0) v1
Oct 11 01:43:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd.0", "name": "osd_memory_target"}]: dispatch
Oct 11 01:43:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config rm", "who": "osd.1", "name": "osd_memory_target"} v 0) v1
Oct 11 01:43:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd.1", "name": "osd_memory_target"}]: dispatch
Oct 11 01:43:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config rm", "who": "osd.2", "name": "osd_memory_target"} v 0) v1
Oct 11 01:43:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd.2", "name": "osd_memory_target"}]: dispatch
Oct 11 01:43:35 compute-0 ceph-mgr[192233]: [cephadm INFO root] Adjusting osd_memory_target on compute-0 to 43697k
Oct 11 01:43:35 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Adjusting osd_memory_target on compute-0 to 43697k
Oct 11 01:43:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config set, name=osd_memory_target}] v 0) v1
Oct 11 01:43:35 compute-0 ceph-mgr[192233]: [cephadm WARNING cephadm.serve] Unable to set osd_memory_target on compute-0 to 44745932: error parsing value: Value '44745932' is below minimum 939524096
Oct 11 01:43:35 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [WRN] : Unable to set osd_memory_target on compute-0 to 44745932: error parsing value: Value '44745932' is below minimum 939524096
Oct 11 01:43:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:43:35 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:43:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:43:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:43:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:43:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:35 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 6396df1b-5934-4973-9e0e-c4c0b83bf085 does not exist
Oct 11 01:43:35 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev e89c656e-00ef-487d-b53e-83a48e7b6b0d does not exist
Oct 11 01:43:35 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev e725484b-6a06-44c8-9baf-fa041474db20 does not exist
Oct 11 01:43:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 01:43:35 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:43:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 01:43:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:43:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:43:35 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:43:36 compute-0 sudo[211298]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:36 compute-0 sudo[211298]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:36 compute-0 sudo[211298]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:36 compute-0 sudo[211323]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:43:36 compute-0 sudo[211323]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:36 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 01:43:36 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 01:43:36 compute-0 sudo[211323]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:36 compute-0 sudo[211349]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:36 compute-0 sudo[211349]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:36 compute-0 sudo[211349]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v59: 1 pgs: 1 active+clean; 449 KiB data, 479 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:36 compute-0 sudo[211374]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 01:43:36 compute-0 sudo[211374]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd.0", "name": "osd_memory_target"}]: dispatch
Oct 11 01:43:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd.1", "name": "osd_memory_target"}]: dispatch
Oct 11 01:43:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd.2", "name": "osd_memory_target"}]: dispatch
Oct 11 01:43:36 compute-0 ceph-mon[191930]: Adjusting osd_memory_target on compute-0 to 43697k
Oct 11 01:43:36 compute-0 ceph-mon[191930]: Unable to set osd_memory_target on compute-0 to 44745932: error parsing value: Value '44745932' is below minimum 939524096
Oct 11 01:43:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:43:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:43:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:43:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:43:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:43:37 compute-0 podman[211438]: 2025-10-11 01:43:37.02518488 +0000 UTC m=+0.071418395 container create eeb182a7add8973719f21d487c6f18d428b51445bd0678913cc4aed2cd47cf64 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_hypatia, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:37 compute-0 podman[211438]: 2025-10-11 01:43:37.001897548 +0000 UTC m=+0.048131093 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:37 compute-0 systemd[1]: Started libpod-conmon-eeb182a7add8973719f21d487c6f18d428b51445bd0678913cc4aed2cd47cf64.scope.
Oct 11 01:43:37 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:37 compute-0 podman[211438]: 2025-10-11 01:43:37.177089237 +0000 UTC m=+0.223322952 container init eeb182a7add8973719f21d487c6f18d428b51445bd0678913cc4aed2cd47cf64 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_hypatia, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True)
Oct 11 01:43:37 compute-0 podman[211438]: 2025-10-11 01:43:37.193016354 +0000 UTC m=+0.239249859 container start eeb182a7add8973719f21d487c6f18d428b51445bd0678913cc4aed2cd47cf64 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_hypatia, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:37 compute-0 podman[211438]: 2025-10-11 01:43:37.198751922 +0000 UTC m=+0.244985447 container attach eeb182a7add8973719f21d487c6f18d428b51445bd0678913cc4aed2cd47cf64 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_hypatia, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:43:37 compute-0 eager_hypatia[211453]: 167 167
Oct 11 01:43:37 compute-0 systemd[1]: libpod-eeb182a7add8973719f21d487c6f18d428b51445bd0678913cc4aed2cd47cf64.scope: Deactivated successfully.
Oct 11 01:43:37 compute-0 podman[211458]: 2025-10-11 01:43:37.283011955 +0000 UTC m=+0.051353955 container died eeb182a7add8973719f21d487c6f18d428b51445bd0678913cc4aed2cd47cf64 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_hypatia, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.license=GPLv2)
Oct 11 01:43:37 compute-0 systemd[1]: var-lib-containers-storage-overlay-e971cbc9941e85b2613ed48a845d3df7d11931269b2f34fd1b1a143d24de9844-merged.mount: Deactivated successfully.
Oct 11 01:43:37 compute-0 podman[211458]: 2025-10-11 01:43:37.339579937 +0000 UTC m=+0.107921897 container remove eeb182a7add8973719f21d487c6f18d428b51445bd0678913cc4aed2cd47cf64 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_hypatia, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:43:37 compute-0 systemd[1]: libpod-conmon-eeb182a7add8973719f21d487c6f18d428b51445bd0678913cc4aed2cd47cf64.scope: Deactivated successfully.
Oct 11 01:43:37 compute-0 sshd-session[209236]: Failed password for root from 193.46.255.217 port 62014 ssh2
Oct 11 01:43:37 compute-0 podman[211482]: 2025-10-11 01:43:37.653724146 +0000 UTC m=+0.100802142 container create b62b6f93f2124e31100f71bb55bad7cd67450f7cd0eabb4df382166d370a8a2c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=practical_bhabha, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:43:37 compute-0 podman[211482]: 2025-10-11 01:43:37.616212025 +0000 UTC m=+0.063290071 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:37 compute-0 systemd[1]: Started libpod-conmon-b62b6f93f2124e31100f71bb55bad7cd67450f7cd0eabb4df382166d370a8a2c.scope.
Oct 11 01:43:37 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d5dd7ba76eceea26c457318480325700fa78379a0f71e245c3ea89bbfe0a4917/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d5dd7ba76eceea26c457318480325700fa78379a0f71e245c3ea89bbfe0a4917/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d5dd7ba76eceea26c457318480325700fa78379a0f71e245c3ea89bbfe0a4917/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d5dd7ba76eceea26c457318480325700fa78379a0f71e245c3ea89bbfe0a4917/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d5dd7ba76eceea26c457318480325700fa78379a0f71e245c3ea89bbfe0a4917/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:37 compute-0 podman[211482]: 2025-10-11 01:43:37.853043517 +0000 UTC m=+0.300121573 container init b62b6f93f2124e31100f71bb55bad7cd67450f7cd0eabb4df382166d370a8a2c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=practical_bhabha, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 01:43:37 compute-0 podman[211482]: 2025-10-11 01:43:37.880158703 +0000 UTC m=+0.327236709 container start b62b6f93f2124e31100f71bb55bad7cd67450f7cd0eabb4df382166d370a8a2c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=practical_bhabha, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3)
Oct 11 01:43:37 compute-0 podman[211482]: 2025-10-11 01:43:37.886907919 +0000 UTC m=+0.333985965 container attach b62b6f93f2124e31100f71bb55bad7cd67450f7cd0eabb4df382166d370a8a2c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=practical_bhabha, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:37 compute-0 ceph-mon[191930]: pgmap v59: 1 pgs: 1 active+clean; 449 KiB data, 479 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:38 compute-0 unix_chkpwd[211503]: password check failed for user (root)
Oct 11 01:43:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v60: 1 pgs: 1 active+clean; 449 KiB data, 479 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:39 compute-0 practical_bhabha[211498]: --> passed data devices: 0 physical, 3 LVM
Oct 11 01:43:39 compute-0 practical_bhabha[211498]: --> relative data size: 1.0
Oct 11 01:43:39 compute-0 practical_bhabha[211498]: --> All data devices are unavailable
Oct 11 01:43:39 compute-0 systemd[1]: libpod-b62b6f93f2124e31100f71bb55bad7cd67450f7cd0eabb4df382166d370a8a2c.scope: Deactivated successfully.
Oct 11 01:43:39 compute-0 systemd[1]: libpod-b62b6f93f2124e31100f71bb55bad7cd67450f7cd0eabb4df382166d370a8a2c.scope: Consumed 1.275s CPU time.
Oct 11 01:43:39 compute-0 podman[211482]: 2025-10-11 01:43:39.206915205 +0000 UTC m=+1.653993181 container died b62b6f93f2124e31100f71bb55bad7cd67450f7cd0eabb4df382166d370a8a2c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=practical_bhabha, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:43:39 compute-0 systemd[1]: var-lib-containers-storage-overlay-d5dd7ba76eceea26c457318480325700fa78379a0f71e245c3ea89bbfe0a4917-merged.mount: Deactivated successfully.
Oct 11 01:43:39 compute-0 podman[211482]: 2025-10-11 01:43:39.299415835 +0000 UTC m=+1.746493811 container remove b62b6f93f2124e31100f71bb55bad7cd67450f7cd0eabb4df382166d370a8a2c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=practical_bhabha, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2)
Oct 11 01:43:39 compute-0 systemd[1]: libpod-conmon-b62b6f93f2124e31100f71bb55bad7cd67450f7cd0eabb4df382166d370a8a2c.scope: Deactivated successfully.
Oct 11 01:43:39 compute-0 sudo[211374]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:39 compute-0 sudo[211540]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:39 compute-0 sudo[211540]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:39 compute-0 sudo[211540]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e18 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:43:39 compute-0 sudo[211565]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:43:39 compute-0 sudo[211565]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:39 compute-0 sudo[211565]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:39 compute-0 sudo[211590]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:39 compute-0 sudo[211590]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:39 compute-0 sudo[211590]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:39 compute-0 sshd-session[209236]: Failed password for root from 193.46.255.217 port 62014 ssh2
Oct 11 01:43:39 compute-0 sudo[211615]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 01:43:39 compute-0 sudo[211615]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:39 compute-0 ceph-mon[191930]: pgmap v60: 1 pgs: 1 active+clean; 449 KiB data, 479 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v61: 1 pgs: 1 active+clean; 449 KiB data, 479 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:40 compute-0 podman[211675]: 2025-10-11 01:43:40.502987281 +0000 UTC m=+0.085677207 container create e8b97ca0340be68e7250c7e40ad7c150efc2c66bd75bb3f12b2c1e153ee6a024 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_pare, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:43:40 compute-0 podman[211675]: 2025-10-11 01:43:40.475835994 +0000 UTC m=+0.058525911 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:40 compute-0 systemd[1]: Started libpod-conmon-e8b97ca0340be68e7250c7e40ad7c150efc2c66bd75bb3f12b2c1e153ee6a024.scope.
Oct 11 01:43:40 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:40 compute-0 podman[211675]: 2025-10-11 01:43:40.651113388 +0000 UTC m=+0.233803365 container init e8b97ca0340be68e7250c7e40ad7c150efc2c66bd75bb3f12b2c1e153ee6a024 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_pare, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 01:43:40 compute-0 podman[211675]: 2025-10-11 01:43:40.669488234 +0000 UTC m=+0.252178141 container start e8b97ca0340be68e7250c7e40ad7c150efc2c66bd75bb3f12b2c1e153ee6a024 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_pare, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:43:40 compute-0 podman[211675]: 2025-10-11 01:43:40.679482701 +0000 UTC m=+0.262172638 container attach e8b97ca0340be68e7250c7e40ad7c150efc2c66bd75bb3f12b2c1e153ee6a024 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_pare, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:40 compute-0 romantic_pare[211697]: 167 167
Oct 11 01:43:40 compute-0 systemd[1]: libpod-e8b97ca0340be68e7250c7e40ad7c150efc2c66bd75bb3f12b2c1e153ee6a024.scope: Deactivated successfully.
Oct 11 01:43:40 compute-0 podman[211675]: 2025-10-11 01:43:40.68175791 +0000 UTC m=+0.264447817 container died e8b97ca0340be68e7250c7e40ad7c150efc2c66bd75bb3f12b2c1e153ee6a024 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_pare, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 01:43:40 compute-0 systemd[1]: var-lib-containers-storage-overlay-5322220b1a9f5e386a9d92ad4f3e7674d8c9b4079801dd23d71892a6d629b1b4-merged.mount: Deactivated successfully.
Oct 11 01:43:40 compute-0 podman[211689]: 2025-10-11 01:43:40.742634396 +0000 UTC m=+0.160492630 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 01:43:40 compute-0 podman[211675]: 2025-10-11 01:43:40.752662677 +0000 UTC m=+0.335352584 container remove e8b97ca0340be68e7250c7e40ad7c150efc2c66bd75bb3f12b2c1e153ee6a024 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_pare, CEPH_REF=reef, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:40 compute-0 systemd[1]: libpod-conmon-e8b97ca0340be68e7250c7e40ad7c150efc2c66bd75bb3f12b2c1e153ee6a024.scope: Deactivated successfully.
Oct 11 01:43:40 compute-0 podman[211698]: 2025-10-11 01:43:40.79466707 +0000 UTC m=+0.174323374 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, version=9.4, io.buildah.version=1.29.0, maintainer=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, config_id=edpm, com.redhat.component=ubi9-container, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1214.1726694543, architecture=x86_64, build-date=2024-09-18T21:23:30, summary=Provides the latest release of Red Hat Universal Base Image 9., io.openshift.expose-services=, io.openshift.tags=base rhel9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., managed_by=edpm_ansible, release-0.7.12=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, vcs-type=git, container_name=kepler, name=ubi9, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9)
Oct 11 01:43:40 compute-0 podman[211700]: 2025-10-11 01:43:40.815179757 +0000 UTC m=+0.186307367 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, container_name=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0)
Oct 11 01:43:40 compute-0 sudo[211796]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cqpdesimhssluwratckglifikqaqaiao ; /usr/bin/python3'
Oct 11 01:43:40 compute-0 sudo[211796]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:43:40 compute-0 python3[211798]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z   --volume /home/ceph-admin/specs/ceph_spec.yaml:/home/ceph_spec.yaml:z   --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   status --format json | jq .osdmap.num_up_osds _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:43:41 compute-0 podman[211804]: 2025-10-11 01:43:41.002718538 +0000 UTC m=+0.081181874 container create d3231ce2e822efb06554da37b939ad4dcc92da187e923b753294ccc8b294b3db (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_edison, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default)
Oct 11 01:43:41 compute-0 podman[211804]: 2025-10-11 01:43:40.971022419 +0000 UTC m=+0.049485835 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:41 compute-0 systemd[1]: Started libpod-conmon-d3231ce2e822efb06554da37b939ad4dcc92da187e923b753294ccc8b294b3db.scope.
Oct 11 01:43:41 compute-0 podman[211819]: 2025-10-11 01:43:41.118938188 +0000 UTC m=+0.087030037 container create 088993e1af55a31d035d0aba95c39df59aa7b6c1ab3be990f6a2368569b81518 (image=quay.io/ceph/ceph:v18, name=nervous_chebyshev, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS)
Oct 11 01:43:41 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f580754829787d3e1d9a9c0f41fb8cda0cf4c9b01256e772ab7a0c545882cc0a/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f580754829787d3e1d9a9c0f41fb8cda0cf4c9b01256e772ab7a0c545882cc0a/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f580754829787d3e1d9a9c0f41fb8cda0cf4c9b01256e772ab7a0c545882cc0a/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f580754829787d3e1d9a9c0f41fb8cda0cf4c9b01256e772ab7a0c545882cc0a/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:41 compute-0 podman[211804]: 2025-10-11 01:43:41.178947754 +0000 UTC m=+0.257411100 container init d3231ce2e822efb06554da37b939ad4dcc92da187e923b753294ccc8b294b3db (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_edison, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:43:41 compute-0 podman[211819]: 2025-10-11 01:43:41.089007562 +0000 UTC m=+0.057099451 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:43:41 compute-0 systemd[1]: Started libpod-conmon-088993e1af55a31d035d0aba95c39df59aa7b6c1ab3be990f6a2368569b81518.scope.
Oct 11 01:43:41 compute-0 podman[211804]: 2025-10-11 01:43:41.203273644 +0000 UTC m=+0.281736980 container start d3231ce2e822efb06554da37b939ad4dcc92da187e923b753294ccc8b294b3db (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_edison, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:43:41 compute-0 podman[211804]: 2025-10-11 01:43:41.207794458 +0000 UTC m=+0.286257794 container attach d3231ce2e822efb06554da37b939ad4dcc92da187e923b753294ccc8b294b3db (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_edison, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:43:41 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fb9d561abce4207edf29327943f95a508a263f3dd455dc40cba2e198346fb6a0/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fb9d561abce4207edf29327943f95a508a263f3dd455dc40cba2e198346fb6a0/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fb9d561abce4207edf29327943f95a508a263f3dd455dc40cba2e198346fb6a0/merged/home/ceph_spec.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:41 compute-0 podman[211819]: 2025-10-11 01:43:41.264021772 +0000 UTC m=+0.232113681 container init 088993e1af55a31d035d0aba95c39df59aa7b6c1ab3be990f6a2368569b81518 (image=quay.io/ceph/ceph:v18, name=nervous_chebyshev, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2)
Oct 11 01:43:41 compute-0 podman[211819]: 2025-10-11 01:43:41.282029665 +0000 UTC m=+0.250121504 container start 088993e1af55a31d035d0aba95c39df59aa7b6c1ab3be990f6a2368569b81518 (image=quay.io/ceph/ceph:v18, name=nervous_chebyshev, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:41 compute-0 podman[211819]: 2025-10-11 01:43:41.286670612 +0000 UTC m=+0.254762461 container attach 088993e1af55a31d035d0aba95c39df59aa7b6c1ab3be990f6a2368569b81518 (image=quay.io/ceph/ceph:v18, name=nervous_chebyshev, io.buildah.version=1.39.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:43:41 compute-0 unix_chkpwd[211846]: password check failed for user (root)
Oct 11 01:43:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status", "format": "json"} v 0) v1
Oct 11 01:43:41 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/306694762' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
Oct 11 01:43:41 compute-0 ceph-mon[191930]: pgmap v61: 1 pgs: 1 active+clean; 449 KiB data, 479 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:41 compute-0 nervous_chebyshev[211841]: 
Oct 11 01:43:41 compute-0 nervous_chebyshev[211841]: {"fsid":"3c7617c3-7a20-523e-a9de-20c0d6ba41da","health":{"status":"HEALTH_OK","checks":{},"mutes":[]},"election_epoch":5,"quorum":[0],"quorum_names":["compute-0"],"quorum_age":152,"monmap":{"epoch":1,"min_mon_release_name":"reef","num_mons":1},"osdmap":{"epoch":18,"num_osds":3,"num_up_osds":3,"osd_up_since":1760147012,"num_in_osds":3,"osd_in_since":1760146975,"num_remapped_pgs":0},"pgmap":{"pgs_by_state":[{"state_name":"active+clean","count":1}],"num_pgs":1,"num_pools":1,"num_objects":2,"data_bytes":459280,"bytes_used":502788096,"bytes_avail":63909138432,"bytes_total":64411926528},"fsmap":{"epoch":1,"by_rank":[],"up:standby":0},"mgrmap":{"available":true,"num_standbys":0,"modules":["cephadm","iostat","nfs","restful"],"services":{}},"servicemap":{"epoch":2,"modified":"2025-10-11T01:42:58.369813+0000","services":{}},"progress_events":{}}
Oct 11 01:43:41 compute-0 systemd[1]: libpod-088993e1af55a31d035d0aba95c39df59aa7b6c1ab3be990f6a2368569b81518.scope: Deactivated successfully.
Oct 11 01:43:42 compute-0 silly_edison[211835]: {
Oct 11 01:43:42 compute-0 silly_edison[211835]:     "0": [
Oct 11 01:43:42 compute-0 silly_edison[211835]:         {
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "devices": [
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "/dev/loop3"
Oct 11 01:43:42 compute-0 silly_edison[211835]:             ],
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "lv_name": "ceph_lv0",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "lv_size": "21470642176",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "name": "ceph_lv0",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "tags": {
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.cluster_name": "ceph",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.crush_device_class": "",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.encrypted": "0",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.osd_id": "0",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.type": "block",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.vdo": "0"
Oct 11 01:43:42 compute-0 silly_edison[211835]:             },
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "type": "block",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "vg_name": "ceph_vg0"
Oct 11 01:43:42 compute-0 silly_edison[211835]:         }
Oct 11 01:43:42 compute-0 silly_edison[211835]:     ],
Oct 11 01:43:42 compute-0 silly_edison[211835]:     "1": [
Oct 11 01:43:42 compute-0 silly_edison[211835]:         {
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "devices": [
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "/dev/loop4"
Oct 11 01:43:42 compute-0 silly_edison[211835]:             ],
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "lv_name": "ceph_lv1",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "lv_size": "21470642176",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "name": "ceph_lv1",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "tags": {
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.cluster_name": "ceph",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.crush_device_class": "",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.encrypted": "0",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.osd_id": "1",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.type": "block",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.vdo": "0"
Oct 11 01:43:42 compute-0 silly_edison[211835]:             },
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "type": "block",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "vg_name": "ceph_vg1"
Oct 11 01:43:42 compute-0 silly_edison[211835]:         }
Oct 11 01:43:42 compute-0 silly_edison[211835]:     ],
Oct 11 01:43:42 compute-0 silly_edison[211835]:     "2": [
Oct 11 01:43:42 compute-0 silly_edison[211835]:         {
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "devices": [
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "/dev/loop5"
Oct 11 01:43:42 compute-0 silly_edison[211835]:             ],
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "lv_name": "ceph_lv2",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "lv_size": "21470642176",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "name": "ceph_lv2",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "tags": {
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.cluster_name": "ceph",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.crush_device_class": "",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.encrypted": "0",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.osd_id": "2",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.type": "block",
Oct 11 01:43:42 compute-0 silly_edison[211835]:                 "ceph.vdo": "0"
Oct 11 01:43:42 compute-0 silly_edison[211835]:             },
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "type": "block",
Oct 11 01:43:42 compute-0 silly_edison[211835]:             "vg_name": "ceph_vg2"
Oct 11 01:43:42 compute-0 silly_edison[211835]:         }
Oct 11 01:43:42 compute-0 silly_edison[211835]:     ]
Oct 11 01:43:42 compute-0 silly_edison[211835]: }
Oct 11 01:43:42 compute-0 systemd[1]: libpod-d3231ce2e822efb06554da37b939ad4dcc92da187e923b753294ccc8b294b3db.scope: Deactivated successfully.
Oct 11 01:43:42 compute-0 podman[211872]: 2025-10-11 01:43:42.086798485 +0000 UTC m=+0.062316756 container died 088993e1af55a31d035d0aba95c39df59aa7b6c1ab3be990f6a2368569b81518 (image=quay.io/ceph/ceph:v18, name=nervous_chebyshev, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:43:42 compute-0 systemd[1]: var-lib-containers-storage-overlay-fb9d561abce4207edf29327943f95a508a263f3dd455dc40cba2e198346fb6a0-merged.mount: Deactivated successfully.
Oct 11 01:43:42 compute-0 podman[211881]: 2025-10-11 01:43:42.143469252 +0000 UTC m=+0.055471913 container died d3231ce2e822efb06554da37b939ad4dcc92da187e923b753294ccc8b294b3db (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_edison, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:43:42 compute-0 podman[211872]: 2025-10-11 01:43:42.167562405 +0000 UTC m=+0.143080686 container remove 088993e1af55a31d035d0aba95c39df59aa7b6c1ab3be990f6a2368569b81518 (image=quay.io/ceph/ceph:v18, name=nervous_chebyshev, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 01:43:42 compute-0 systemd[1]: libpod-conmon-088993e1af55a31d035d0aba95c39df59aa7b6c1ab3be990f6a2368569b81518.scope: Deactivated successfully.
Oct 11 01:43:42 compute-0 systemd[1]: var-lib-containers-storage-overlay-f580754829787d3e1d9a9c0f41fb8cda0cf4c9b01256e772ab7a0c545882cc0a-merged.mount: Deactivated successfully.
Oct 11 01:43:42 compute-0 sudo[211796]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:42 compute-0 podman[211881]: 2025-10-11 01:43:42.236755193 +0000 UTC m=+0.148757844 container remove d3231ce2e822efb06554da37b939ad4dcc92da187e923b753294ccc8b294b3db (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_edison, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2)
Oct 11 01:43:42 compute-0 systemd[1]: libpod-conmon-d3231ce2e822efb06554da37b939ad4dcc92da187e923b753294ccc8b294b3db.scope: Deactivated successfully.
Oct 11 01:43:42 compute-0 sudo[211615]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v62: 1 pgs: 1 active+clean; 449 KiB data, 479 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:42 compute-0 sudo[211898]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:42 compute-0 sudo[211898]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:42 compute-0 sudo[211898]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:42 compute-0 sudo[211923]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:43:42 compute-0 sudo[211923]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:42 compute-0 sudo[211923]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:42 compute-0 sudo[211978]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jpuuehfdkolwxzjaqjuwlqhzramyfrby ; /usr/bin/python3'
Oct 11 01:43:42 compute-0 sudo[211978]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:43:42 compute-0 sudo[211966]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:42 compute-0 sudo[211966]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:42 compute-0 sudo[211966]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:42 compute-0 python3[211996]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   osd pool create vms  replicated_rule --autoscale-mode on _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:43:42 compute-0 sudo[211999]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 01:43:42 compute-0 sudo[211999]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:42 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/306694762' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
Oct 11 01:43:42 compute-0 podman[212022]: 2025-10-11 01:43:42.99679835 +0000 UTC m=+0.113570713 container create 4a55804ceafc773691a1d599f07306969061feaf73c6c0302063b479e119e353 (image=quay.io/ceph/ceph:v18, name=strange_haslett, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:43 compute-0 podman[212022]: 2025-10-11 01:43:42.961427923 +0000 UTC m=+0.078200316 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:43:43 compute-0 systemd[1]: Started libpod-conmon-4a55804ceafc773691a1d599f07306969061feaf73c6c0302063b479e119e353.scope.
Oct 11 01:43:43 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:43 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1b4b6cb233d1fa39e151891859f717528826ed3c070a4448aa40b7d0bee39df8/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:43 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1b4b6cb233d1fa39e151891859f717528826ed3c070a4448aa40b7d0bee39df8/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:43 compute-0 podman[212022]: 2025-10-11 01:43:43.163098012 +0000 UTC m=+0.279870395 container init 4a55804ceafc773691a1d599f07306969061feaf73c6c0302063b479e119e353 (image=quay.io/ceph/ceph:v18, name=strange_haslett, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:43:43 compute-0 podman[212022]: 2025-10-11 01:43:43.185483445 +0000 UTC m=+0.302255808 container start 4a55804ceafc773691a1d599f07306969061feaf73c6c0302063b479e119e353 (image=quay.io/ceph/ceph:v18, name=strange_haslett, org.label-schema.vendor=CentOS, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3)
Oct 11 01:43:43 compute-0 podman[212022]: 2025-10-11 01:43:43.192928285 +0000 UTC m=+0.309700638 container attach 4a55804ceafc773691a1d599f07306969061feaf73c6c0302063b479e119e353 (image=quay.io/ceph/ceph:v18, name=strange_haslett, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3)
Oct 11 01:43:43 compute-0 sshd-session[209236]: Failed password for root from 193.46.255.217 port 62014 ssh2
Oct 11 01:43:43 compute-0 podman[212081]: 2025-10-11 01:43:43.545598134 +0000 UTC m=+0.097513118 container create 5334021f391d152d1e734e5236ebcd13d1e452a99a13ef50c4ec09d8c13cc53a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=awesome_lederberg, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:43:43 compute-0 podman[212081]: 2025-10-11 01:43:43.505523517 +0000 UTC m=+0.057438561 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:43 compute-0 systemd[1]: Started libpod-conmon-5334021f391d152d1e734e5236ebcd13d1e452a99a13ef50c4ec09d8c13cc53a.scope.
Oct 11 01:43:43 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:43 compute-0 podman[212081]: 2025-10-11 01:43:43.689698649 +0000 UTC m=+0.241613673 container init 5334021f391d152d1e734e5236ebcd13d1e452a99a13ef50c4ec09d8c13cc53a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=awesome_lederberg, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:43 compute-0 podman[212081]: 2025-10-11 01:43:43.699591962 +0000 UTC m=+0.251506936 container start 5334021f391d152d1e734e5236ebcd13d1e452a99a13ef50c4ec09d8c13cc53a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=awesome_lederberg, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, ceph=True, CEPH_REF=reef, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:43 compute-0 podman[212081]: 2025-10-11 01:43:43.706206727 +0000 UTC m=+0.258121711 container attach 5334021f391d152d1e734e5236ebcd13d1e452a99a13ef50c4ec09d8c13cc53a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=awesome_lederberg, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:43:43 compute-0 awesome_lederberg[212116]: 167 167
Oct 11 01:43:43 compute-0 systemd[1]: libpod-5334021f391d152d1e734e5236ebcd13d1e452a99a13ef50c4ec09d8c13cc53a.scope: Deactivated successfully.
Oct 11 01:43:43 compute-0 podman[212081]: 2025-10-11 01:43:43.710788303 +0000 UTC m=+0.262703277 container died 5334021f391d152d1e734e5236ebcd13d1e452a99a13ef50c4ec09d8c13cc53a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=awesome_lederberg, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.build-date=20250507)
Oct 11 01:43:43 compute-0 systemd[1]: var-lib-containers-storage-overlay-2552667f57daca8ee7e217f0297006528f36f68c14ea2e98b4ba2293f3a59b30-merged.mount: Deactivated successfully.
Oct 11 01:43:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool create", "pool": "vms", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"} v 0) v1
Oct 11 01:43:43 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/417253722' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "vms", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]: dispatch
Oct 11 01:43:43 compute-0 podman[212081]: 2025-10-11 01:43:43.793159001 +0000 UTC m=+0.345073945 container remove 5334021f391d152d1e734e5236ebcd13d1e452a99a13ef50c4ec09d8c13cc53a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=awesome_lederberg, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True)
Oct 11 01:43:43 compute-0 systemd[1]: libpod-conmon-5334021f391d152d1e734e5236ebcd13d1e452a99a13ef50c4ec09d8c13cc53a.scope: Deactivated successfully.
Oct 11 01:43:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e18 do_prune osdmap full prune enabled
Oct 11 01:43:43 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/417253722' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "vms", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]': finished
Oct 11 01:43:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e19 e19: 3 total, 3 up, 3 in
Oct 11 01:43:43 compute-0 strange_haslett[212047]: pool 'vms' created
Oct 11 01:43:44 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e19: 3 total, 3 up, 3 in
Oct 11 01:43:44 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 19 pg[2.0( empty local-lis/les=0/0 n=0 ec=19/19 lis/c=0/0 les/c/f=0/0/0 sis=19) [2] r=0 lpr=19 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:44 compute-0 ceph-mon[191930]: pgmap v62: 1 pgs: 1 active+clean; 449 KiB data, 479 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:44 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/417253722' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "vms", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]: dispatch
Oct 11 01:43:44 compute-0 systemd[1]: libpod-4a55804ceafc773691a1d599f07306969061feaf73c6c0302063b479e119e353.scope: Deactivated successfully.
Oct 11 01:43:44 compute-0 podman[212022]: 2025-10-11 01:43:44.038976997 +0000 UTC m=+1.155749330 container died 4a55804ceafc773691a1d599f07306969061feaf73c6c0302063b479e119e353 (image=quay.io/ceph/ceph:v18, name=strange_haslett, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 01:43:44 compute-0 podman[212142]: 2025-10-11 01:43:44.103173777 +0000 UTC m=+0.120592920 container create f56d6702ef5e92265c4dbb25b251d4bd3bc9b9e91ff5821354ca092d4a83e54d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_edison, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef)
Oct 11 01:43:44 compute-0 systemd[1]: var-lib-containers-storage-overlay-1b4b6cb233d1fa39e151891859f717528826ed3c070a4448aa40b7d0bee39df8-merged.mount: Deactivated successfully.
Oct 11 01:43:44 compute-0 podman[212142]: 2025-10-11 01:43:44.048840269 +0000 UTC m=+0.066259452 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:44 compute-0 podman[212022]: 2025-10-11 01:43:44.162498013 +0000 UTC m=+1.279270326 container remove 4a55804ceafc773691a1d599f07306969061feaf73c6c0302063b479e119e353 (image=quay.io/ceph/ceph:v18, name=strange_haslett, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 01:43:44 compute-0 systemd[1]: libpod-conmon-4a55804ceafc773691a1d599f07306969061feaf73c6c0302063b479e119e353.scope: Deactivated successfully.
Oct 11 01:43:44 compute-0 systemd[1]: Started libpod-conmon-f56d6702ef5e92265c4dbb25b251d4bd3bc9b9e91ff5821354ca092d4a83e54d.scope.
Oct 11 01:43:44 compute-0 sudo[211978]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:44 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bdd11e01cf4c2097f1f04ebabbf779a094ee31ac5bb58deddec6d2f8f9d5a816/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bdd11e01cf4c2097f1f04ebabbf779a094ee31ac5bb58deddec6d2f8f9d5a816/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bdd11e01cf4c2097f1f04ebabbf779a094ee31ac5bb58deddec6d2f8f9d5a816/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bdd11e01cf4c2097f1f04ebabbf779a094ee31ac5bb58deddec6d2f8f9d5a816/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:44 compute-0 podman[212142]: 2025-10-11 01:43:44.271629382 +0000 UTC m=+0.289048535 container init f56d6702ef5e92265c4dbb25b251d4bd3bc9b9e91ff5821354ca092d4a83e54d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_edison, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default)
Oct 11 01:43:44 compute-0 podman[212142]: 2025-10-11 01:43:44.295930092 +0000 UTC m=+0.313349235 container start f56d6702ef5e92265c4dbb25b251d4bd3bc9b9e91ff5821354ca092d4a83e54d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_edison, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3)
Oct 11 01:43:44 compute-0 podman[212142]: 2025-10-11 01:43:44.302191437 +0000 UTC m=+0.319610580 container attach f56d6702ef5e92265c4dbb25b251d4bd3bc9b9e91ff5821354ca092d4a83e54d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_edison, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:44 compute-0 sudo[212197]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vnsevaohvhdjdjlbifkxxloqnekazpne ; /usr/bin/python3'
Oct 11 01:43:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v64: 2 pgs: 1 unknown, 1 active+clean; 449 KiB data, 479 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:44 compute-0 sudo[212197]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:43:44 compute-0 python3[212199]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   osd pool create volumes  replicated_rule --autoscale-mode on _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:43:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e19 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:43:44 compute-0 sshd-session[209236]: Received disconnect from 193.46.255.217 port 62014:11:  [preauth]
Oct 11 01:43:44 compute-0 sshd-session[209236]: Disconnected from authenticating user root 193.46.255.217 port 62014 [preauth]
Oct 11 01:43:44 compute-0 sshd-session[209236]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 01:43:44 compute-0 podman[212200]: 2025-10-11 01:43:44.662749909 +0000 UTC m=+0.109937275 container create fff6fbfeeb66aafd5aad4d40d635c55bce847c49fb4ac23d61464b58c6cf2ed8 (image=quay.io/ceph/ceph:v18, name=blissful_diffie, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:44 compute-0 podman[212200]: 2025-10-11 01:43:44.602917128 +0000 UTC m=+0.050104494 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:43:44 compute-0 systemd[1]: Started libpod-conmon-fff6fbfeeb66aafd5aad4d40d635c55bce847c49fb4ac23d61464b58c6cf2ed8.scope.
Oct 11 01:43:44 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/04ca28a4c57eecf2b8f7d916c330fd0e9c6474d4da5f5436c6ced06961e0caad/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/04ca28a4c57eecf2b8f7d916c330fd0e9c6474d4da5f5436c6ced06961e0caad/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:44 compute-0 podman[212200]: 2025-10-11 01:43:44.855469203 +0000 UTC m=+0.302656579 container init fff6fbfeeb66aafd5aad4d40d635c55bce847c49fb4ac23d61464b58c6cf2ed8 (image=quay.io/ceph/ceph:v18, name=blissful_diffie, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.license=GPLv2, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:43:44 compute-0 podman[212200]: 2025-10-11 01:43:44.872645332 +0000 UTC m=+0.319832708 container start fff6fbfeeb66aafd5aad4d40d635c55bce847c49fb4ac23d61464b58c6cf2ed8 (image=quay.io/ceph/ceph:v18, name=blissful_diffie, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 01:43:44 compute-0 podman[212200]: 2025-10-11 01:43:44.879574007 +0000 UTC m=+0.326761383 container attach fff6fbfeeb66aafd5aad4d40d635c55bce847c49fb4ac23d61464b58c6cf2ed8 (image=quay.io/ceph/ceph:v18, name=blissful_diffie, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:43:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e19 do_prune osdmap full prune enabled
Oct 11 01:43:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e20 e20: 3 total, 3 up, 3 in
Oct 11 01:43:45 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e20: 3 total, 3 up, 3 in
Oct 11 01:43:45 compute-0 ceph-mon[191930]: log_channel(cluster) log [WRN] : Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)
Oct 11 01:43:45 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 20 pg[2.0( empty local-lis/les=19/20 n=0 ec=19/19 lis/c=0/0 les/c/f=0/0/0 sis=19) [2] r=0 lpr=19 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:45 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/417253722' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "vms", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]': finished
Oct 11 01:43:45 compute-0 ceph-mon[191930]: osdmap e19: 3 total, 3 up, 3 in
Oct 11 01:43:45 compute-0 ceph-mon[191930]: osdmap e20: 3 total, 3 up, 3 in
Oct 11 01:43:45 compute-0 vigilant_edison[212169]: {
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:         "osd_id": 1,
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:         "type": "bluestore"
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:     },
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:         "osd_id": 2,
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:         "type": "bluestore"
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:     },
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:         "osd_id": 0,
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:         "type": "bluestore"
Oct 11 01:43:45 compute-0 vigilant_edison[212169]:     }
Oct 11 01:43:45 compute-0 vigilant_edison[212169]: }
Oct 11 01:43:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool create", "pool": "volumes", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"} v 0) v1
Oct 11 01:43:45 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/170162154' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "volumes", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]: dispatch
Oct 11 01:43:45 compute-0 systemd[1]: libpod-f56d6702ef5e92265c4dbb25b251d4bd3bc9b9e91ff5821354ca092d4a83e54d.scope: Deactivated successfully.
Oct 11 01:43:45 compute-0 systemd[1]: libpod-f56d6702ef5e92265c4dbb25b251d4bd3bc9b9e91ff5821354ca092d4a83e54d.scope: Consumed 1.222s CPU time.
Oct 11 01:43:45 compute-0 podman[212142]: 2025-10-11 01:43:45.524520316 +0000 UTC m=+1.541939539 container died f56d6702ef5e92265c4dbb25b251d4bd3bc9b9e91ff5821354ca092d4a83e54d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_edison, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:45 compute-0 unix_chkpwd[212271]: password check failed for user (root)
Oct 11 01:43:45 compute-0 sshd-session[212215]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 01:43:45 compute-0 systemd[1]: var-lib-containers-storage-overlay-bdd11e01cf4c2097f1f04ebabbf779a094ee31ac5bb58deddec6d2f8f9d5a816-merged.mount: Deactivated successfully.
Oct 11 01:43:45 compute-0 podman[212142]: 2025-10-11 01:43:45.639110658 +0000 UTC m=+1.656529811 container remove f56d6702ef5e92265c4dbb25b251d4bd3bc9b9e91ff5821354ca092d4a83e54d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_edison, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:45 compute-0 systemd[1]: libpod-conmon-f56d6702ef5e92265c4dbb25b251d4bd3bc9b9e91ff5821354ca092d4a83e54d.scope: Deactivated successfully.
Oct 11 01:43:45 compute-0 sudo[211999]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:43:45 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:43:45 compute-0 podman[212273]: 2025-10-11 01:43:45.740353095 +0000 UTC m=+0.173971781 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']})
Oct 11 01:43:45 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:45 compute-0 sudo[212306]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:45 compute-0 sudo[212306]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:45 compute-0 sudo[212306]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e20 do_prune osdmap full prune enabled
Oct 11 01:43:46 compute-0 sudo[212331]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:43:46 compute-0 sudo[212331]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/170162154' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "volumes", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]': finished
Oct 11 01:43:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e21 e21: 3 total, 3 up, 3 in
Oct 11 01:43:46 compute-0 blissful_diffie[212216]: pool 'volumes' created
Oct 11 01:43:46 compute-0 sudo[212331]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:46 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e21: 3 total, 3 up, 3 in
Oct 11 01:43:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/alertmanager/web_user}] v 0) v1
Oct 11 01:43:46 compute-0 ceph-mon[191930]: pgmap v64: 2 pgs: 1 unknown, 1 active+clean; 449 KiB data, 479 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:46 compute-0 ceph-mon[191930]: Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)
Oct 11 01:43:46 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/170162154' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "volumes", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]: dispatch
Oct 11 01:43:46 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:46 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/alertmanager/web_password}] v 0) v1
Oct 11 01:43:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/prometheus/web_user}] v 0) v1
Oct 11 01:43:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:46 compute-0 systemd[1]: libpod-fff6fbfeeb66aafd5aad4d40d635c55bce847c49fb4ac23d61464b58c6cf2ed8.scope: Deactivated successfully.
Oct 11 01:43:46 compute-0 podman[212200]: 2025-10-11 01:43:46.109314726 +0000 UTC m=+1.556502102 container died fff6fbfeeb66aafd5aad4d40d635c55bce847c49fb4ac23d61464b58c6cf2ed8 (image=quay.io/ceph/ceph:v18, name=blissful_diffie, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:43:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/prometheus/web_password}] v 0) v1
Oct 11 01:43:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:46 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.serve] Reconfiguring mon.compute-0 (unknown last config time)...
Oct 11 01:43:46 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Reconfiguring mon.compute-0 (unknown last config time)...
Oct 11 01:43:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "mon."} v 0) v1
Oct 11 01:43:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch
Oct 11 01:43:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config get", "who": "mon", "key": "public_network"} v 0) v1
Oct 11 01:43:46 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch
Oct 11 01:43:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:43:46 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:43:46 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.serve] Reconfiguring daemon mon.compute-0 on compute-0
Oct 11 01:43:46 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Reconfiguring daemon mon.compute-0 on compute-0
Oct 11 01:43:46 compute-0 systemd[1]: var-lib-containers-storage-overlay-04ca28a4c57eecf2b8f7d916c330fd0e9c6474d4da5f5436c6ced06961e0caad-merged.mount: Deactivated successfully.
Oct 11 01:43:46 compute-0 podman[212200]: 2025-10-11 01:43:46.205886344 +0000 UTC m=+1.653073730 container remove fff6fbfeeb66aafd5aad4d40d635c55bce847c49fb4ac23d61464b58c6cf2ed8 (image=quay.io/ceph/ceph:v18, name=blissful_diffie, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 01:43:46 compute-0 systemd[1]: libpod-conmon-fff6fbfeeb66aafd5aad4d40d635c55bce847c49fb4ac23d61464b58c6cf2ed8.scope: Deactivated successfully.
Oct 11 01:43:46 compute-0 sudo[212197]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:46 compute-0 sudo[212364]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:46 compute-0 sudo[212364]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:46 compute-0 sudo[212364]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v67: 3 pgs: 1 active+clean, 2 unknown; 449 KiB data, 480 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:46 compute-0 sudo[212394]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:43:46 compute-0 sudo[212394]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:46 compute-0 sudo[212394]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:46 compute-0 sudo[212440]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-llbmcthybrmgkuhmsxmbpshgrpqbhyau ; /usr/bin/python3'
Oct 11 01:43:46 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 21 pg[3.0( empty local-lis/les=0/0 n=0 ec=21/21 lis/c=0/0 les/c/f=0/0/0 sis=21) [1] r=0 lpr=21 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:46 compute-0 sudo[212440]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:43:46 compute-0 sudo[212445]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:46 compute-0 sudo[212445]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:46 compute-0 sudo[212445]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:46 compute-0 python3[212444]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   osd pool create backups  replicated_rule --autoscale-mode on _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:43:46 compute-0 sudo[212470]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 _orch deploy --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:43:46 compute-0 sudo[212470]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:46 compute-0 podman[212490]: 2025-10-11 01:43:46.756566994 +0000 UTC m=+0.062041258 container create e6de7d18436d9eb44bcf5f22a6d026d4c6e9b8e711949b9be6fea09ed6754531 (image=quay.io/ceph/ceph:v18, name=zen_matsumoto, org.label-schema.build-date=20250507, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:43:46 compute-0 systemd[1]: Started libpod-conmon-e6de7d18436d9eb44bcf5f22a6d026d4c6e9b8e711949b9be6fea09ed6754531.scope.
Oct 11 01:43:46 compute-0 podman[212490]: 2025-10-11 01:43:46.733100989 +0000 UTC m=+0.038575263 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:43:46 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:46 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c7d1f8c27f45eb9d972fb1544be3cec9bee5f30e1896fba15c88a2fb6e19e42b/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:46 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c7d1f8c27f45eb9d972fb1544be3cec9bee5f30e1896fba15c88a2fb6e19e42b/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:46 compute-0 podman[212490]: 2025-10-11 01:43:46.913459257 +0000 UTC m=+0.218933521 container init e6de7d18436d9eb44bcf5f22a6d026d4c6e9b8e711949b9be6fea09ed6754531 (image=quay.io/ceph/ceph:v18, name=zen_matsumoto, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, OSD_FLAVOR=default, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:43:46 compute-0 podman[212490]: 2025-10-11 01:43:46.932679006 +0000 UTC m=+0.238153260 container start e6de7d18436d9eb44bcf5f22a6d026d4c6e9b8e711949b9be6fea09ed6754531 (image=quay.io/ceph/ceph:v18, name=zen_matsumoto, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:46 compute-0 podman[212490]: 2025-10-11 01:43:46.937454578 +0000 UTC m=+0.242928832 container attach e6de7d18436d9eb44bcf5f22a6d026d4c6e9b8e711949b9be6fea09ed6754531 (image=quay.io/ceph/ceph:v18, name=zen_matsumoto, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.39.3)
Oct 11 01:43:47 compute-0 podman[212527]: 2025-10-11 01:43:47.07099736 +0000 UTC m=+0.066895071 container create 3b97cdb277e894db52f32c1482cfd9a39d91118e441284b8ac6e79e51d71c55f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_goodall, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_REF=reef, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:43:47 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/170162154' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "volumes", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]': finished
Oct 11 01:43:47 compute-0 ceph-mon[191930]: osdmap e21: 3 total, 3 up, 3 in
Oct 11 01:43:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:47 compute-0 ceph-mon[191930]: Reconfiguring mon.compute-0 (unknown last config time)...
Oct 11 01:43:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch
Oct 11 01:43:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch
Oct 11 01:43:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:43:47 compute-0 ceph-mon[191930]: Reconfiguring daemon mon.compute-0 on compute-0
Oct 11 01:43:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e21 do_prune osdmap full prune enabled
Oct 11 01:43:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e22 e22: 3 total, 3 up, 3 in
Oct 11 01:43:47 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e22: 3 total, 3 up, 3 in
Oct 11 01:43:47 compute-0 podman[212527]: 2025-10-11 01:43:47.041910319 +0000 UTC m=+0.037808010 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:47 compute-0 systemd[1]: Started libpod-conmon-3b97cdb277e894db52f32c1482cfd9a39d91118e441284b8ac6e79e51d71c55f.scope.
Oct 11 01:43:47 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 22 pg[3.0( empty local-lis/les=21/22 n=0 ec=21/21 lis/c=0/0 les/c/f=0/0/0 sis=21) [1] r=0 lpr=21 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:47 compute-0 sshd-session[212215]: Failed password for root from 193.46.255.217 port 20664 ssh2
Oct 11 01:43:47 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:47 compute-0 podman[212527]: 2025-10-11 01:43:47.219105634 +0000 UTC m=+0.215003395 container init 3b97cdb277e894db52f32c1482cfd9a39d91118e441284b8ac6e79e51d71c55f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_goodall, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, OSD_FLAVOR=default)
Oct 11 01:43:47 compute-0 podman[212527]: 2025-10-11 01:43:47.23484551 +0000 UTC m=+0.230743191 container start 3b97cdb277e894db52f32c1482cfd9a39d91118e441284b8ac6e79e51d71c55f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_goodall, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Oct 11 01:43:47 compute-0 podman[212527]: 2025-10-11 01:43:47.239416715 +0000 UTC m=+0.235314486 container attach 3b97cdb277e894db52f32c1482cfd9a39d91118e441284b8ac6e79e51d71c55f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_goodall, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2)
Oct 11 01:43:47 compute-0 admiring_goodall[212542]: 167 167
Oct 11 01:43:47 compute-0 systemd[1]: libpod-3b97cdb277e894db52f32c1482cfd9a39d91118e441284b8ac6e79e51d71c55f.scope: Deactivated successfully.
Oct 11 01:43:47 compute-0 podman[212527]: 2025-10-11 01:43:47.246727982 +0000 UTC m=+0.242625673 container died 3b97cdb277e894db52f32c1482cfd9a39d91118e441284b8ac6e79e51d71c55f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_goodall, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:47 compute-0 systemd[1]: var-lib-containers-storage-overlay-4febe0bbceeea395fcee42c38d5a4c0abce05bfa9b25a5504e198b9fe01577b8-merged.mount: Deactivated successfully.
Oct 11 01:43:47 compute-0 podman[212527]: 2025-10-11 01:43:47.303715409 +0000 UTC m=+0.299613120 container remove 3b97cdb277e894db52f32c1482cfd9a39d91118e441284b8ac6e79e51d71c55f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_goodall, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0)
Oct 11 01:43:47 compute-0 systemd[1]: libpod-conmon-3b97cdb277e894db52f32c1482cfd9a39d91118e441284b8ac6e79e51d71c55f.scope: Deactivated successfully.
Oct 11 01:43:47 compute-0 sudo[212470]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:43:47 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:43:47 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:47 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.serve] Reconfiguring mgr.compute-0.bzgmgr (unknown last config time)...
Oct 11 01:43:47 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Reconfiguring mgr.compute-0.bzgmgr (unknown last config time)...
Oct 11 01:43:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get-or-create", "entity": "mgr.compute-0.bzgmgr", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} v 0) v1
Oct 11 01:43:47 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.compute-0.bzgmgr", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch
Oct 11 01:43:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr services"} v 0) v1
Oct 11 01:43:47 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mgr services"}]: dispatch
Oct 11 01:43:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:43:47 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:43:47 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.serve] Reconfiguring daemon mgr.compute-0.bzgmgr on compute-0
Oct 11 01:43:47 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Reconfiguring daemon mgr.compute-0.bzgmgr on compute-0
Oct 11 01:43:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool create", "pool": "backups", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"} v 0) v1
Oct 11 01:43:47 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/2275630110' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "backups", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]: dispatch
Oct 11 01:43:47 compute-0 sudo[212581]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:47 compute-0 sudo[212581]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:47 compute-0 sudo[212581]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:47 compute-0 sudo[212609]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:43:47 compute-0 sudo[212609]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:47 compute-0 sudo[212609]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:47 compute-0 sudo[212634]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:47 compute-0 sudo[212634]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:47 compute-0 sudo[212634]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:47 compute-0 sudo[212659]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 _orch deploy --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:43:47 compute-0 sudo[212659]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:48 compute-0 ceph-mon[191930]: pgmap v67: 3 pgs: 1 active+clean, 2 unknown; 449 KiB data, 480 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:48 compute-0 ceph-mon[191930]: osdmap e22: 3 total, 3 up, 3 in
Oct 11 01:43:48 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:48 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:48 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.compute-0.bzgmgr", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch
Oct 11 01:43:48 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mgr services"}]: dispatch
Oct 11 01:43:48 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:43:48 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2275630110' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "backups", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]: dispatch
Oct 11 01:43:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e22 do_prune osdmap full prune enabled
Oct 11 01:43:48 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/2275630110' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "backups", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]': finished
Oct 11 01:43:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e23 e23: 3 total, 3 up, 3 in
Oct 11 01:43:48 compute-0 zen_matsumoto[212509]: pool 'backups' created
Oct 11 01:43:48 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e23: 3 total, 3 up, 3 in
Oct 11 01:43:48 compute-0 systemd[1]: libpod-e6de7d18436d9eb44bcf5f22a6d026d4c6e9b8e711949b9be6fea09ed6754531.scope: Deactivated successfully.
Oct 11 01:43:48 compute-0 podman[212490]: 2025-10-11 01:43:48.197940366 +0000 UTC m=+1.503414620 container died e6de7d18436d9eb44bcf5f22a6d026d4c6e9b8e711949b9be6fea09ed6754531 (image=quay.io/ceph/ceph:v18, name=zen_matsumoto, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:43:48 compute-0 systemd[1]: var-lib-containers-storage-overlay-c7d1f8c27f45eb9d972fb1544be3cec9bee5f30e1896fba15c88a2fb6e19e42b-merged.mount: Deactivated successfully.
Oct 11 01:43:48 compute-0 podman[212490]: 2025-10-11 01:43:48.285806016 +0000 UTC m=+1.591280280 container remove e6de7d18436d9eb44bcf5f22a6d026d4c6e9b8e711949b9be6fea09ed6754531 (image=quay.io/ceph/ceph:v18, name=zen_matsumoto, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:43:48 compute-0 systemd[1]: libpod-conmon-e6de7d18436d9eb44bcf5f22a6d026d4c6e9b8e711949b9be6fea09ed6754531.scope: Deactivated successfully.
Oct 11 01:43:48 compute-0 sudo[212440]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:48 compute-0 podman[212710]: 2025-10-11 01:43:48.359449466 +0000 UTC m=+0.069521559 container create 250600ea7f67ec30f6d29535c0935b514c1cbc6334febc987606e091a88c0738 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=determined_wilbur, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3)
Oct 11 01:43:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v70: 4 pgs: 2 active+clean, 2 unknown; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:48 compute-0 systemd[1]: Started libpod-conmon-250600ea7f67ec30f6d29535c0935b514c1cbc6334febc987606e091a88c0738.scope.
Oct 11 01:43:48 compute-0 podman[212710]: 2025-10-11 01:43:48.333979112 +0000 UTC m=+0.044051245 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:48 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:48 compute-0 podman[212710]: 2025-10-11 01:43:48.47108848 +0000 UTC m=+0.181160583 container init 250600ea7f67ec30f6d29535c0935b514c1cbc6334febc987606e091a88c0738 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=determined_wilbur, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507)
Oct 11 01:43:48 compute-0 podman[212710]: 2025-10-11 01:43:48.484002953 +0000 UTC m=+0.194075076 container start 250600ea7f67ec30f6d29535c0935b514c1cbc6334febc987606e091a88c0738 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=determined_wilbur, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:43:48 compute-0 determined_wilbur[212731]: 167 167
Oct 11 01:43:48 compute-0 podman[212710]: 2025-10-11 01:43:48.491069662 +0000 UTC m=+0.201141835 container attach 250600ea7f67ec30f6d29535c0935b514c1cbc6334febc987606e091a88c0738 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=determined_wilbur, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:43:48 compute-0 systemd[1]: libpod-250600ea7f67ec30f6d29535c0935b514c1cbc6334febc987606e091a88c0738.scope: Deactivated successfully.
Oct 11 01:43:48 compute-0 podman[212710]: 2025-10-11 01:43:48.493089992 +0000 UTC m=+0.203162105 container died 250600ea7f67ec30f6d29535c0935b514c1cbc6334febc987606e091a88c0738 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=determined_wilbur, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, ceph=True, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:48 compute-0 sudo[212755]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bioapwfflnqqbmzqymiugbwlnzymbaxf ; /usr/bin/python3'
Oct 11 01:43:48 compute-0 sudo[212755]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:43:48 compute-0 systemd[1]: var-lib-containers-storage-overlay-9e3ce2cdcb773518fb4736eb167e4f836cab22eee94be102c8503d27d7aeb77c-merged.mount: Deactivated successfully.
Oct 11 01:43:48 compute-0 podman[212710]: 2025-10-11 01:43:48.555074146 +0000 UTC m=+0.265146229 container remove 250600ea7f67ec30f6d29535c0935b514c1cbc6334febc987606e091a88c0738 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=determined_wilbur, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:48 compute-0 systemd[1]: libpod-conmon-250600ea7f67ec30f6d29535c0935b514c1cbc6334febc987606e091a88c0738.scope: Deactivated successfully.
Oct 11 01:43:48 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 23 pg[4.0( empty local-lis/les=0/0 n=0 ec=23/23 lis/c=0/0 les/c/f=0/0/0 sis=23) [0] r=0 lpr=23 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:48 compute-0 sudo[212659]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:43:48 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:43:48 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:48 compute-0 python3[212764]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   osd pool create images  replicated_rule --autoscale-mode on _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:43:48 compute-0 unix_chkpwd[212798]: password check failed for user (root)
Oct 11 01:43:48 compute-0 podman[212772]: 2025-10-11 01:43:48.783170728 +0000 UTC m=+0.077569597 container create 8f30f34dadde3d05f4569ef476e354b07719f6d8d3823fd3b47fd04c27eed910 (image=quay.io/ceph/ceph:v18, name=recursing_ritchie, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0)
Oct 11 01:43:48 compute-0 sudo[212771]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:48 compute-0 sudo[212771]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:48 compute-0 sudo[212771]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:48 compute-0 podman[212772]: 2025-10-11 01:43:48.748579524 +0000 UTC m=+0.042978463 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:43:48 compute-0 systemd[1]: Started libpod-conmon-8f30f34dadde3d05f4569ef476e354b07719f6d8d3823fd3b47fd04c27eed910.scope.
Oct 11 01:43:48 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:48 compute-0 sudo[212811]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:43:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b0c85f2e460419dc7ba2121e42250c91c80e33e341b9ce5467a44b1a6e503199/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b0c85f2e460419dc7ba2121e42250c91c80e33e341b9ce5467a44b1a6e503199/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:48 compute-0 sudo[212811]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:48 compute-0 sudo[212811]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:48 compute-0 podman[212772]: 2025-10-11 01:43:48.970057389 +0000 UTC m=+0.264456278 container init 8f30f34dadde3d05f4569ef476e354b07719f6d8d3823fd3b47fd04c27eed910 (image=quay.io/ceph/ceph:v18, name=recursing_ritchie, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default)
Oct 11 01:43:48 compute-0 podman[212772]: 2025-10-11 01:43:48.985025652 +0000 UTC m=+0.279424521 container start 8f30f34dadde3d05f4569ef476e354b07719f6d8d3823fd3b47fd04c27eed910 (image=quay.io/ceph/ceph:v18, name=recursing_ritchie, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:43:48 compute-0 podman[212772]: 2025-10-11 01:43:48.99137199 +0000 UTC m=+0.285770929 container attach 8f30f34dadde3d05f4569ef476e354b07719f6d8d3823fd3b47fd04c27eed910 (image=quay.io/ceph/ceph:v18, name=recursing_ritchie, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:43:49 compute-0 sudo[212841]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:49 compute-0 sudo[212841]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:49 compute-0 sudo[212841]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e23 do_prune osdmap full prune enabled
Oct 11 01:43:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e24 e24: 3 total, 3 up, 3 in
Oct 11 01:43:49 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e24: 3 total, 3 up, 3 in
Oct 11 01:43:49 compute-0 ceph-mon[191930]: Reconfiguring mgr.compute-0.bzgmgr (unknown last config time)...
Oct 11 01:43:49 compute-0 ceph-mon[191930]: Reconfiguring daemon mgr.compute-0.bzgmgr on compute-0
Oct 11 01:43:49 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2275630110' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "backups", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]': finished
Oct 11 01:43:49 compute-0 ceph-mon[191930]: osdmap e23: 3 total, 3 up, 3 in
Oct 11 01:43:49 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:49 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:49 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 24 pg[4.0( empty local-lis/les=23/24 n=0 ec=23/23 lis/c=0/0 les/c/f=0/0/0 sis=23) [0] r=0 lpr=23 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:49 compute-0 sudo[212867]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ls
Oct 11 01:43:49 compute-0 sudo[212867]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e24 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:43:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool create", "pool": "images", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"} v 0) v1
Oct 11 01:43:49 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/854765594' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "images", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]: dispatch
Oct 11 01:43:50 compute-0 podman[212982]: 2025-10-11 01:43:50.164365429 +0000 UTC m=+0.138372056 container exec ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e24 do_prune osdmap full prune enabled
Oct 11 01:43:50 compute-0 ceph-mon[191930]: pgmap v70: 4 pgs: 2 active+clean, 2 unknown; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:50 compute-0 ceph-mon[191930]: osdmap e24: 3 total, 3 up, 3 in
Oct 11 01:43:50 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/854765594' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "images", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]: dispatch
Oct 11 01:43:50 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/854765594' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "images", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]': finished
Oct 11 01:43:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e25 e25: 3 total, 3 up, 3 in
Oct 11 01:43:50 compute-0 recursing_ritchie[212825]: pool 'images' created
Oct 11 01:43:50 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e25: 3 total, 3 up, 3 in
Oct 11 01:43:50 compute-0 systemd[1]: libpod-8f30f34dadde3d05f4569ef476e354b07719f6d8d3823fd3b47fd04c27eed910.scope: Deactivated successfully.
Oct 11 01:43:50 compute-0 podman[212772]: 2025-10-11 01:43:50.259108554 +0000 UTC m=+1.553507433 container died 8f30f34dadde3d05f4569ef476e354b07719f6d8d3823fd3b47fd04c27eed910 (image=quay.io/ceph/ceph:v18, name=recursing_ritchie, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef)
Oct 11 01:43:50 compute-0 podman[212982]: 2025-10-11 01:43:50.321647865 +0000 UTC m=+0.295654452 container exec_died ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef)
Oct 11 01:43:50 compute-0 systemd[1]: var-lib-containers-storage-overlay-b0c85f2e460419dc7ba2121e42250c91c80e33e341b9ce5467a44b1a6e503199-merged.mount: Deactivated successfully.
Oct 11 01:43:50 compute-0 podman[212772]: 2025-10-11 01:43:50.349028355 +0000 UTC m=+1.643427194 container remove 8f30f34dadde3d05f4569ef476e354b07719f6d8d3823fd3b47fd04c27eed910 (image=quay.io/ceph/ceph:v18, name=recursing_ritchie, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3)
Oct 11 01:43:50 compute-0 systemd[1]: libpod-conmon-8f30f34dadde3d05f4569ef476e354b07719f6d8d3823fd3b47fd04c27eed910.scope: Deactivated successfully.
Oct 11 01:43:50 compute-0 sudo[212755]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v73: 5 pgs: 2 active+clean, 3 unknown; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:50 compute-0 sudo[213068]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iekhohvjtosnujnzalykbwtpsxyenspl ; /usr/bin/python3'
Oct 11 01:43:50 compute-0 sudo[213068]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:43:50 compute-0 python3[213079]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   osd pool create cephfs.cephfs.meta  replicated_rule --autoscale-mode on _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:43:50 compute-0 podman[213099]: 2025-10-11 01:43:50.873086347 +0000 UTC m=+0.084105611 container create 1c7054fcc33a16023bfd8692001232d47673ea62b680937d4a71f76910dfba51 (image=quay.io/ceph/ceph:v18, name=sleepy_benz, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507)
Oct 11 01:43:50 compute-0 podman[213099]: 2025-10-11 01:43:50.837638717 +0000 UTC m=+0.048658041 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:43:50 compute-0 systemd[1]: Started libpod-conmon-1c7054fcc33a16023bfd8692001232d47673ea62b680937d4a71f76910dfba51.scope.
Oct 11 01:43:50 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:50 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/92d9eb59451957a3acb1e20d3ad7fb06abac38404e4fcc27e12f2c1c330ccc8f/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:51 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/92d9eb59451957a3acb1e20d3ad7fb06abac38404e4fcc27e12f2c1c330ccc8f/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:51 compute-0 podman[213099]: 2025-10-11 01:43:51.027103475 +0000 UTC m=+0.238122789 container init 1c7054fcc33a16023bfd8692001232d47673ea62b680937d4a71f76910dfba51 (image=quay.io/ceph/ceph:v18, name=sleepy_benz, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef)
Oct 11 01:43:51 compute-0 podman[213099]: 2025-10-11 01:43:51.04177436 +0000 UTC m=+0.252793624 container start 1c7054fcc33a16023bfd8692001232d47673ea62b680937d4a71f76910dfba51 (image=quay.io/ceph/ceph:v18, name=sleepy_benz, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:51 compute-0 podman[213099]: 2025-10-11 01:43:51.048034515 +0000 UTC m=+0.259053839 container attach 1c7054fcc33a16023bfd8692001232d47673ea62b680937d4a71f76910dfba51 (image=quay.io/ceph/ceph:v18, name=sleepy_benz, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:43:51 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 25 pg[5.0( empty local-lis/les=0/0 n=0 ec=25/25 lis/c=0/0 les/c/f=0/0/0 sis=25) [2] r=0 lpr=25 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e25 do_prune osdmap full prune enabled
Oct 11 01:43:51 compute-0 ceph-mon[191930]: log_channel(cluster) log [WRN] : Health check update: 4 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)
Oct 11 01:43:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e26 e26: 3 total, 3 up, 3 in
Oct 11 01:43:51 compute-0 sudo[212867]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:51 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e26: 3 total, 3 up, 3 in
Oct 11 01:43:51 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/854765594' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "images", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]': finished
Oct 11 01:43:51 compute-0 ceph-mon[191930]: osdmap e25: 3 total, 3 up, 3 in
Oct 11 01:43:51 compute-0 ceph-mon[191930]: pgmap v73: 5 pgs: 2 active+clean, 3 unknown; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:43:51 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 26 pg[5.0( empty local-lis/les=25/26 n=0 ec=25/25 lis/c=0/0 les/c/f=0/0/0 sis=25) [2] r=0 lpr=25 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:43:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:43:51 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:43:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:43:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:43:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:43:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:51 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 216e838c-e0c8-404c-87bf-4a0a5c15dcee does not exist
Oct 11 01:43:51 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 79451033-6b37-44eb-896b-1f2c094e9af3 does not exist
Oct 11 01:43:51 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 879891f1-5e2f-45ed-8a71-2e136a1cddec does not exist
Oct 11 01:43:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 01:43:51 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:43:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 01:43:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:43:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:43:51 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:43:51 compute-0 sshd-session[212215]: Failed password for root from 193.46.255.217 port 20664 ssh2
Oct 11 01:43:51 compute-0 sudo[213153]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:51 compute-0 sudo[213153]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:51 compute-0 sudo[213153]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:51 compute-0 sudo[213197]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:43:51 compute-0 sudo[213197]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:51 compute-0 sudo[213197]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool create", "pool": "cephfs.cephfs.meta", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"} v 0) v1
Oct 11 01:43:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/2663091739' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "cephfs.cephfs.meta", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]: dispatch
Oct 11 01:43:51 compute-0 sudo[213222]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:51 compute-0 sudo[213222]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:51 compute-0 sudo[213222]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:51 compute-0 sudo[213250]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 01:43:51 compute-0 sudo[213250]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:51 compute-0 unix_chkpwd[213275]: password check failed for user (root)
Oct 11 01:43:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e26 do_prune osdmap full prune enabled
Oct 11 01:43:52 compute-0 ceph-mon[191930]: Health check update: 4 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)
Oct 11 01:43:52 compute-0 ceph-mon[191930]: osdmap e26: 3 total, 3 up, 3 in
Oct 11 01:43:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:43:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:43:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:43:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:43:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:43:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:43:52 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2663091739' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "cephfs.cephfs.meta", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]: dispatch
Oct 11 01:43:52 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/2663091739' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "cephfs.cephfs.meta", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]': finished
Oct 11 01:43:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e27 e27: 3 total, 3 up, 3 in
Oct 11 01:43:52 compute-0 sleepy_benz[213131]: pool 'cephfs.cephfs.meta' created
Oct 11 01:43:52 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e27: 3 total, 3 up, 3 in
Oct 11 01:43:52 compute-0 systemd[1]: libpod-1c7054fcc33a16023bfd8692001232d47673ea62b680937d4a71f76910dfba51.scope: Deactivated successfully.
Oct 11 01:43:52 compute-0 podman[213099]: 2025-10-11 01:43:52.317635453 +0000 UTC m=+1.528654707 container died 1c7054fcc33a16023bfd8692001232d47673ea62b680937d4a71f76910dfba51 (image=quay.io/ceph/ceph:v18, name=sleepy_benz, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507)
Oct 11 01:43:52 compute-0 systemd[1]: var-lib-containers-storage-overlay-92d9eb59451957a3acb1e20d3ad7fb06abac38404e4fcc27e12f2c1c330ccc8f-merged.mount: Deactivated successfully.
Oct 11 01:43:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v76: 6 pgs: 3 active+clean, 3 unknown; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:52 compute-0 podman[213099]: 2025-10-11 01:43:52.423466065 +0000 UTC m=+1.634485299 container remove 1c7054fcc33a16023bfd8692001232d47673ea62b680937d4a71f76910dfba51 (image=quay.io/ceph/ceph:v18, name=sleepy_benz, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3)
Oct 11 01:43:52 compute-0 systemd[1]: libpod-conmon-1c7054fcc33a16023bfd8692001232d47673ea62b680937d4a71f76910dfba51.scope: Deactivated successfully.
Oct 11 01:43:52 compute-0 sudo[213068]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:52 compute-0 podman[213323]: 2025-10-11 01:43:52.46419014 +0000 UTC m=+0.070868158 container create 45c82f01ba3bdeaab53c3ac17cad2aedc3c5ce722acfd8e64dbbd2e3ea45f776 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ecstatic_perlman, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:43:52 compute-0 systemd[1]: Started libpod-conmon-45c82f01ba3bdeaab53c3ac17cad2aedc3c5ce722acfd8e64dbbd2e3ea45f776.scope.
Oct 11 01:43:52 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 27 pg[6.0( empty local-lis/les=0/0 n=0 ec=27/27 lis/c=0/0 les/c/f=0/0/0 sis=27) [0] r=0 lpr=27 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:52 compute-0 podman[213323]: 2025-10-11 01:43:52.443155378 +0000 UTC m=+0.049833416 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:52 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:52 compute-0 podman[213323]: 2025-10-11 01:43:52.578461973 +0000 UTC m=+0.185140011 container init 45c82f01ba3bdeaab53c3ac17cad2aedc3c5ce722acfd8e64dbbd2e3ea45f776 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ecstatic_perlman, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_REF=reef)
Oct 11 01:43:52 compute-0 podman[213323]: 2025-10-11 01:43:52.588572052 +0000 UTC m=+0.195250080 container start 45c82f01ba3bdeaab53c3ac17cad2aedc3c5ce722acfd8e64dbbd2e3ea45f776 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ecstatic_perlman, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_REF=reef)
Oct 11 01:43:52 compute-0 podman[213323]: 2025-10-11 01:43:52.593950261 +0000 UTC m=+0.200628299 container attach 45c82f01ba3bdeaab53c3ac17cad2aedc3c5ce722acfd8e64dbbd2e3ea45f776 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ecstatic_perlman, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, ceph=True, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:52 compute-0 ecstatic_perlman[213346]: 167 167
Oct 11 01:43:52 compute-0 systemd[1]: libpod-45c82f01ba3bdeaab53c3ac17cad2aedc3c5ce722acfd8e64dbbd2e3ea45f776.scope: Deactivated successfully.
Oct 11 01:43:52 compute-0 conmon[213346]: conmon 45c82f01ba3bdeaab53c <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-45c82f01ba3bdeaab53c3ac17cad2aedc3c5ce722acfd8e64dbbd2e3ea45f776.scope/container/memory.events
Oct 11 01:43:52 compute-0 podman[213359]: 2025-10-11 01:43:52.674956019 +0000 UTC m=+0.047443735 container died 45c82f01ba3bdeaab53c3ac17cad2aedc3c5ce722acfd8e64dbbd2e3ea45f776 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ecstatic_perlman, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, io.buildah.version=1.39.3)
Oct 11 01:43:52 compute-0 sudo[213383]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ujldgafuesjnxgkwhuekkbazssowjrgm ; /usr/bin/python3'
Oct 11 01:43:52 compute-0 sudo[213383]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:43:52 compute-0 systemd[1]: var-lib-containers-storage-overlay-28ab101e83c2397256afea0b20b1c438167342d2d169e8fe945fc885cc9358cd-merged.mount: Deactivated successfully.
Oct 11 01:43:52 compute-0 podman[213359]: 2025-10-11 01:43:52.749578348 +0000 UTC m=+0.122066074 container remove 45c82f01ba3bdeaab53c3ac17cad2aedc3c5ce722acfd8e64dbbd2e3ea45f776 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ecstatic_perlman, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default)
Oct 11 01:43:52 compute-0 systemd[1]: libpod-conmon-45c82f01ba3bdeaab53c3ac17cad2aedc3c5ce722acfd8e64dbbd2e3ea45f776.scope: Deactivated successfully.
Oct 11 01:43:52 compute-0 python3[213391]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   osd pool create cephfs.cephfs.data  replicated_rule --autoscale-mode on _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:43:53 compute-0 podman[213394]: 2025-10-11 01:43:53.010657555 +0000 UTC m=+0.103300168 container create d93582fe837540cf2b34c046c7c91020d5efa4b36638d988067fba6bb33e6f04 (image=quay.io/ceph/ceph:v18, name=elegant_stonebraker, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0)
Oct 11 01:43:53 compute-0 podman[213394]: 2025-10-11 01:43:52.973029522 +0000 UTC m=+0.065672165 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:43:53 compute-0 podman[213408]: 2025-10-11 01:43:53.072207897 +0000 UTC m=+0.111356947 container create c54009eeb0555a0f924ef1244808b9761ca48c86c5009bb6b947f0b2cc4c3f2a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_saha, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2)
Oct 11 01:43:53 compute-0 systemd[1]: Started libpod-conmon-d93582fe837540cf2b34c046c7c91020d5efa4b36638d988067fba6bb33e6f04.scope.
Oct 11 01:43:53 compute-0 podman[213408]: 2025-10-11 01:43:53.025824264 +0000 UTC m=+0.064973334 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:53 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cb392987ad9ebcaa93ef5853e9a0dbb5b1e444ec32f04f4781824c140acb33b2/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cb392987ad9ebcaa93ef5853e9a0dbb5b1e444ec32f04f4781824c140acb33b2/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:53 compute-0 systemd[1]: Started libpod-conmon-c54009eeb0555a0f924ef1244808b9761ca48c86c5009bb6b947f0b2cc4c3f2a.scope.
Oct 11 01:43:53 compute-0 podman[213394]: 2025-10-11 01:43:53.188833509 +0000 UTC m=+0.281476162 container init d93582fe837540cf2b34c046c7c91020d5efa4b36638d988067fba6bb33e6f04 (image=quay.io/ceph/ceph:v18, name=elegant_stonebraker, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:53 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:53 compute-0 podman[213394]: 2025-10-11 01:43:53.206532373 +0000 UTC m=+0.299174946 container start d93582fe837540cf2b34c046c7c91020d5efa4b36638d988067fba6bb33e6f04 (image=quay.io/ceph/ceph:v18, name=elegant_stonebraker, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 01:43:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/680528e557f31891075e0322f6d4a218073fb2c3336943a2212550b4b32197b3/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:53 compute-0 podman[213394]: 2025-10-11 01:43:53.211760688 +0000 UTC m=+0.304403341 container attach d93582fe837540cf2b34c046c7c91020d5efa4b36638d988067fba6bb33e6f04 (image=quay.io/ceph/ceph:v18, name=elegant_stonebraker, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507)
Oct 11 01:43:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/680528e557f31891075e0322f6d4a218073fb2c3336943a2212550b4b32197b3/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/680528e557f31891075e0322f6d4a218073fb2c3336943a2212550b4b32197b3/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/680528e557f31891075e0322f6d4a218073fb2c3336943a2212550b4b32197b3/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/680528e557f31891075e0322f6d4a218073fb2c3336943a2212550b4b32197b3/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:53 compute-0 podman[213408]: 2025-10-11 01:43:53.238899721 +0000 UTC m=+0.278048781 container init c54009eeb0555a0f924ef1244808b9761ca48c86c5009bb6b947f0b2cc4c3f2a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_saha, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 01:43:53 compute-0 podman[213408]: 2025-10-11 01:43:53.264523399 +0000 UTC m=+0.303672439 container start c54009eeb0555a0f924ef1244808b9761ca48c86c5009bb6b947f0b2cc4c3f2a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_saha, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 01:43:53 compute-0 podman[213408]: 2025-10-11 01:43:53.26926227 +0000 UTC m=+0.308411310 container attach c54009eeb0555a0f924ef1244808b9761ca48c86c5009bb6b947f0b2cc4c3f2a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_saha, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default)
Oct 11 01:43:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e27 do_prune osdmap full prune enabled
Oct 11 01:43:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e28 e28: 3 total, 3 up, 3 in
Oct 11 01:43:53 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e28: 3 total, 3 up, 3 in
Oct 11 01:43:53 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2663091739' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "cephfs.cephfs.meta", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]': finished
Oct 11 01:43:53 compute-0 ceph-mon[191930]: osdmap e27: 3 total, 3 up, 3 in
Oct 11 01:43:53 compute-0 ceph-mon[191930]: pgmap v76: 6 pgs: 3 active+clean, 3 unknown; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:53 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 28 pg[6.0( empty local-lis/les=27/28 n=0 ec=27/27 lis/c=0/0 les/c/f=0/0/0 sis=27) [0] r=0 lpr=27 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool create", "pool": "cephfs.cephfs.data", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"} v 0) v1
Oct 11 01:43:53 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/1584358' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "cephfs.cephfs.data", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]: dispatch
Oct 11 01:43:54 compute-0 sshd-session[212215]: Failed password for root from 193.46.255.217 port 20664 ssh2
Oct 11 01:43:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e28 do_prune osdmap full prune enabled
Oct 11 01:43:54 compute-0 ceph-mon[191930]: osdmap e28: 3 total, 3 up, 3 in
Oct 11 01:43:54 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1584358' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "cephfs.cephfs.data", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]: dispatch
Oct 11 01:43:54 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/1584358' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "cephfs.cephfs.data", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]': finished
Oct 11 01:43:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e29 e29: 3 total, 3 up, 3 in
Oct 11 01:43:54 compute-0 elegant_stonebraker[213426]: pool 'cephfs.cephfs.data' created
Oct 11 01:43:54 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e29: 3 total, 3 up, 3 in
Oct 11 01:43:54 compute-0 systemd[1]: libpod-d93582fe837540cf2b34c046c7c91020d5efa4b36638d988067fba6bb33e6f04.scope: Deactivated successfully.
Oct 11 01:43:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v79: 7 pgs: 1 unknown, 6 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:54 compute-0 podman[213480]: 2025-10-11 01:43:54.457203191 +0000 UTC m=+0.046175717 container died d93582fe837540cf2b34c046c7c91020d5efa4b36638d988067fba6bb33e6f04 (image=quay.io/ceph/ceph:v18, name=elegant_stonebraker, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:54 compute-0 silly_saha[213431]: --> passed data devices: 0 physical, 3 LVM
Oct 11 01:43:54 compute-0 silly_saha[213431]: --> relative data size: 1.0
Oct 11 01:43:54 compute-0 silly_saha[213431]: --> All data devices are unavailable
Oct 11 01:43:54 compute-0 systemd[1]: var-lib-containers-storage-overlay-cb392987ad9ebcaa93ef5853e9a0dbb5b1e444ec32f04f4781824c140acb33b2-merged.mount: Deactivated successfully.
Oct 11 01:43:54 compute-0 podman[213480]: 2025-10-11 01:43:54.523301108 +0000 UTC m=+0.112273594 container remove d93582fe837540cf2b34c046c7c91020d5efa4b36638d988067fba6bb33e6f04 (image=quay.io/ceph/ceph:v18, name=elegant_stonebraker, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 01:43:54 compute-0 systemd[1]: libpod-c54009eeb0555a0f924ef1244808b9761ca48c86c5009bb6b947f0b2cc4c3f2a.scope: Deactivated successfully.
Oct 11 01:43:54 compute-0 podman[213408]: 2025-10-11 01:43:54.529055068 +0000 UTC m=+1.568204108 container died c54009eeb0555a0f924ef1244808b9761ca48c86c5009bb6b947f0b2cc4c3f2a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_saha, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 01:43:54 compute-0 systemd[1]: libpod-c54009eeb0555a0f924ef1244808b9761ca48c86c5009bb6b947f0b2cc4c3f2a.scope: Consumed 1.190s CPU time.
Oct 11 01:43:54 compute-0 systemd[1]: libpod-conmon-d93582fe837540cf2b34c046c7c91020d5efa4b36638d988067fba6bb33e6f04.scope: Deactivated successfully.
Oct 11 01:43:54 compute-0 systemd[1]: var-lib-containers-storage-overlay-680528e557f31891075e0322f6d4a218073fb2c3336943a2212550b4b32197b3-merged.mount: Deactivated successfully.
Oct 11 01:43:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e29 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:43:54 compute-0 sudo[213383]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:54 compute-0 podman[213408]: 2025-10-11 01:43:54.610665174 +0000 UTC m=+1.649814214 container remove c54009eeb0555a0f924ef1244808b9761ca48c86c5009bb6b947f0b2cc4c3f2a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_saha, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:43:54 compute-0 systemd[1]: libpod-conmon-c54009eeb0555a0f924ef1244808b9761ca48c86c5009bb6b947f0b2cc4c3f2a.scope: Deactivated successfully.
Oct 11 01:43:54 compute-0 sudo[213250]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:54 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 29 pg[7.0( empty local-lis/les=0/0 n=0 ec=29/29 lis/c=0/0 les/c/f=0/0/0 sis=29) [1] r=0 lpr=29 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:54 compute-0 sudo[213513]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:54 compute-0 sudo[213513]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:54 compute-0 sudo[213513]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:54 compute-0 sudo[213561]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-psrragipupxkdbzqpnfhqjlkfjfqrhcm ; /usr/bin/python3'
Oct 11 01:43:54 compute-0 sudo[213561]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:43:54 compute-0 sudo[213562]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:43:54 compute-0 sudo[213562]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:54 compute-0 sudo[213562]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:54 compute-0 python3[213571]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   osd pool application enable vms rbd _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:43:55 compute-0 sudo[213589]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:55 compute-0 sudo[213589]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:55 compute-0 sudo[213589]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:55 compute-0 podman[213612]: 2025-10-11 01:43:55.068051731 +0000 UTC m=+0.074706083 container create 0946066451d4bcaf5e1559d0fec865918ec4d6eee663fb91c1096be027252dc3 (image=quay.io/ceph/ceph:v18, name=serene_blackwell, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 01:43:55 compute-0 podman[213612]: 2025-10-11 01:43:55.032478468 +0000 UTC m=+0.039132900 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:43:55 compute-0 systemd[1]: Started libpod-conmon-0946066451d4bcaf5e1559d0fec865918ec4d6eee663fb91c1096be027252dc3.scope.
Oct 11 01:43:55 compute-0 sudo[213624]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 01:43:55 compute-0 sudo[213624]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:55 compute-0 sshd-session[212215]: Received disconnect from 193.46.255.217 port 20664:11:  [preauth]
Oct 11 01:43:55 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:55 compute-0 sshd-session[212215]: Disconnected from authenticating user root 193.46.255.217 port 20664 [preauth]
Oct 11 01:43:55 compute-0 sshd-session[212215]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 01:43:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/07e905115afd54f8d8231c9895c4096bf1843422e198f0003f134ddde399aa71/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/07e905115afd54f8d8231c9895c4096bf1843422e198f0003f134ddde399aa71/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:55 compute-0 podman[213612]: 2025-10-11 01:43:55.221915695 +0000 UTC m=+0.228570097 container init 0946066451d4bcaf5e1559d0fec865918ec4d6eee663fb91c1096be027252dc3 (image=quay.io/ceph/ceph:v18, name=serene_blackwell, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:55 compute-0 podman[213612]: 2025-10-11 01:43:55.234790786 +0000 UTC m=+0.241445138 container start 0946066451d4bcaf5e1559d0fec865918ec4d6eee663fb91c1096be027252dc3 (image=quay.io/ceph/ceph:v18, name=serene_blackwell, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default)
Oct 11 01:43:55 compute-0 podman[213612]: 2025-10-11 01:43:55.258539069 +0000 UTC m=+0.265193471 container attach 0946066451d4bcaf5e1559d0fec865918ec4d6eee663fb91c1096be027252dc3 (image=quay.io/ceph/ceph:v18, name=serene_blackwell, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3)
Oct 11 01:43:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e29 do_prune osdmap full prune enabled
Oct 11 01:43:55 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1584358' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "cephfs.cephfs.data", "erasure_code_profile": "replicated_rule", "autoscale_mode": "on"}]': finished
Oct 11 01:43:55 compute-0 ceph-mon[191930]: osdmap e29: 3 total, 3 up, 3 in
Oct 11 01:43:55 compute-0 ceph-mon[191930]: pgmap v79: 7 pgs: 1 unknown, 6 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e30 e30: 3 total, 3 up, 3 in
Oct 11 01:43:55 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e30: 3 total, 3 up, 3 in
Oct 11 01:43:55 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 30 pg[7.0( empty local-lis/les=29/30 n=0 ec=29/29 lis/c=0/0 les/c/f=0/0/0 sis=29) [1] r=0 lpr=29 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:55 compute-0 podman[213715]: 2025-10-11 01:43:55.752605523 +0000 UTC m=+0.089625324 container create 6c0e328406208d73bb1b350d6080eec8eaccce65b2d7ccb0aeed512404f95687 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_bhaskara, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:43:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool application enable", "pool": "vms", "app": "rbd"} v 0) v1
Oct 11 01:43:55 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/3770001800' entity='client.admin' cmd=[{"prefix": "osd pool application enable", "pool": "vms", "app": "rbd"}]: dispatch
Oct 11 01:43:55 compute-0 podman[213715]: 2025-10-11 01:43:55.714950468 +0000 UTC m=+0.051970319 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:55 compute-0 systemd[1]: Started libpod-conmon-6c0e328406208d73bb1b350d6080eec8eaccce65b2d7ccb0aeed512404f95687.scope.
Oct 11 01:43:55 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:55 compute-0 podman[213715]: 2025-10-11 01:43:55.904757896 +0000 UTC m=+0.241777747 container init 6c0e328406208d73bb1b350d6080eec8eaccce65b2d7ccb0aeed512404f95687 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_bhaskara, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, ceph=True, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2)
Oct 11 01:43:55 compute-0 podman[213715]: 2025-10-11 01:43:55.921623795 +0000 UTC m=+0.258643596 container start 6c0e328406208d73bb1b350d6080eec8eaccce65b2d7ccb0aeed512404f95687 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_bhaskara, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:43:55 compute-0 goofy_bhaskara[213733]: 167 167
Oct 11 01:43:55 compute-0 podman[213715]: 2025-10-11 01:43:55.929197109 +0000 UTC m=+0.266216970 container attach 6c0e328406208d73bb1b350d6080eec8eaccce65b2d7ccb0aeed512404f95687 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_bhaskara, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:43:55 compute-0 systemd[1]: libpod-6c0e328406208d73bb1b350d6080eec8eaccce65b2d7ccb0aeed512404f95687.scope: Deactivated successfully.
Oct 11 01:43:55 compute-0 conmon[213733]: conmon 6c0e328406208d73bb1b <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-6c0e328406208d73bb1b350d6080eec8eaccce65b2d7ccb0aeed512404f95687.scope/container/memory.events
Oct 11 01:43:55 compute-0 podman[213715]: 2025-10-11 01:43:55.936927448 +0000 UTC m=+0.273947279 container died 6c0e328406208d73bb1b350d6080eec8eaccce65b2d7ccb0aeed512404f95687 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_bhaskara, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507)
Oct 11 01:43:55 compute-0 systemd[1]: var-lib-containers-storage-overlay-5e3b366d72440ce4b34e37bd292eb76b837e6e8a4a7f95c49aa1ab2005c403fe-merged.mount: Deactivated successfully.
Oct 11 01:43:56 compute-0 podman[213715]: 2025-10-11 01:43:56.016673099 +0000 UTC m=+0.353692900 container remove 6c0e328406208d73bb1b350d6080eec8eaccce65b2d7ccb0aeed512404f95687 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_bhaskara, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:43:56 compute-0 systemd[1]: libpod-conmon-6c0e328406208d73bb1b350d6080eec8eaccce65b2d7ccb0aeed512404f95687.scope: Deactivated successfully.
Oct 11 01:43:56 compute-0 podman[213755]: 2025-10-11 01:43:56.339900726 +0000 UTC m=+0.110585104 container create f23484be94a87c5b84e229e926ccb938681f235908c543bedc36ce6ac2557d51 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_hamilton, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 01:43:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e30 do_prune osdmap full prune enabled
Oct 11 01:43:56 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/3770001800' entity='client.admin' cmd='[{"prefix": "osd pool application enable", "pool": "vms", "app": "rbd"}]': finished
Oct 11 01:43:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e31 e31: 3 total, 3 up, 3 in
Oct 11 01:43:56 compute-0 serene_blackwell[213654]: enabled application 'rbd' on pool 'vms'
Oct 11 01:43:56 compute-0 podman[213755]: 2025-10-11 01:43:56.294202153 +0000 UTC m=+0.064886601 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:56 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e31: 3 total, 3 up, 3 in
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:43:56
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['volumes', 'cephfs.cephfs.data', 'cephfs.cephfs.meta', 'vms', '.mgr', 'images', 'backups']
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v82: 7 pgs: 7 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:56 compute-0 ceph-mon[191930]: osdmap e30: 3 total, 3 up, 3 in
Oct 11 01:43:56 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3770001800' entity='client.admin' cmd=[{"prefix": "osd pool application enable", "pool": "vms", "app": "rbd"}]: dispatch
Oct 11 01:43:56 compute-0 podman[213612]: 2025-10-11 01:43:56.425661924 +0000 UTC m=+1.432316246 container died 0946066451d4bcaf5e1559d0fec865918ec4d6eee663fb91c1096be027252dc3 (image=quay.io/ceph/ceph:v18, name=serene_blackwell, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:56 compute-0 systemd[1]: Started libpod-conmon-f23484be94a87c5b84e229e926ccb938681f235908c543bedc36ce6ac2557d51.scope.
Oct 11 01:43:56 compute-0 systemd[1]: libpod-0946066451d4bcaf5e1559d0fec865918ec4d6eee663fb91c1096be027252dc3.scope: Deactivated successfully.
Oct 11 01:43:56 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:56 compute-0 systemd[1]: var-lib-containers-storage-overlay-07e905115afd54f8d8231c9895c4096bf1843422e198f0003f134ddde399aa71-merged.mount: Deactivated successfully.
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 01:43:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/79bb43b68c7e59d3900019724019858590249756bd1383ef6a45529a48c8cd3e/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 1)
Oct 11 01:43:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/79bb43b68c7e59d3900019724019858590249756bd1383ef6a45529a48c8cd3e/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 1)
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 1)
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 1)
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 1)
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 1)
Oct 11 01:43:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/79bb43b68c7e59d3900019724019858590249756bd1383ef6a45529a48c8cd3e/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "vms", "var": "pg_num", "val": "32"} v 0) v1
Oct 11 01:43:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/79bb43b68c7e59d3900019724019858590249756bd1383ef6a45529a48c8cd3e/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:56 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "vms", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:43:56 compute-0 podman[213612]: 2025-10-11 01:43:56.529088736 +0000 UTC m=+1.535743078 container remove 0946066451d4bcaf5e1559d0fec865918ec4d6eee663fb91c1096be027252dc3 (image=quay.io/ceph/ceph:v18, name=serene_blackwell, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:43:56 compute-0 podman[213755]: 2025-10-11 01:43:56.549764048 +0000 UTC m=+0.320448476 container init f23484be94a87c5b84e229e926ccb938681f235908c543bedc36ce6ac2557d51 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_hamilton, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:43:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:43:56 compute-0 systemd[1]: libpod-conmon-0946066451d4bcaf5e1559d0fec865918ec4d6eee663fb91c1096be027252dc3.scope: Deactivated successfully.
Oct 11 01:43:56 compute-0 podman[213755]: 2025-10-11 01:43:56.566601776 +0000 UTC m=+0.337286154 container start f23484be94a87c5b84e229e926ccb938681f235908c543bedc36ce6ac2557d51 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_hamilton, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:56 compute-0 podman[213755]: 2025-10-11 01:43:56.574921042 +0000 UTC m=+0.345605470 container attach f23484be94a87c5b84e229e926ccb938681f235908c543bedc36ce6ac2557d51 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_hamilton, ceph=True, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0)
Oct 11 01:43:56 compute-0 sudo[213561]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:56 compute-0 sudo[213810]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nycymgxarvhbvqighbckptwireplhtws ; /usr/bin/python3'
Oct 11 01:43:56 compute-0 sudo[213810]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:43:56 compute-0 python3[213812]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   osd pool application enable volumes rbd _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:43:57 compute-0 podman[213813]: 2025-10-11 01:43:57.097383216 +0000 UTC m=+0.099004371 container create 6cbf53b312ef28de2d184c72c0ea72be8a2e76dfc89e33a9fac6c6f538668754 (image=quay.io/ceph/ceph:v18, name=hopeful_torvalds, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:57 compute-0 podman[213813]: 2025-10-11 01:43:57.062735561 +0000 UTC m=+0.064356776 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:43:57 compute-0 systemd[1]: Started libpod-conmon-6cbf53b312ef28de2d184c72c0ea72be8a2e76dfc89e33a9fac6c6f538668754.scope.
Oct 11 01:43:57 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:57 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5be8d7a665338c6c7fa4a9f2a2f8101228dc2528777ccb75c77c2b25c2f6b3dd/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:57 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5be8d7a665338c6c7fa4a9f2a2f8101228dc2528777ccb75c77c2b25c2f6b3dd/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:57 compute-0 podman[213813]: 2025-10-11 01:43:57.24108916 +0000 UTC m=+0.242710315 container init 6cbf53b312ef28de2d184c72c0ea72be8a2e76dfc89e33a9fac6c6f538668754 (image=quay.io/ceph/ceph:v18, name=hopeful_torvalds, CEPH_REF=reef, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, io.buildah.version=1.39.3)
Oct 11 01:43:57 compute-0 podman[213813]: 2025-10-11 01:43:57.259434763 +0000 UTC m=+0.261055898 container start 6cbf53b312ef28de2d184c72c0ea72be8a2e76dfc89e33a9fac6c6f538668754 (image=quay.io/ceph/ceph:v18, name=hopeful_torvalds, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 01:43:57 compute-0 podman[213813]: 2025-10-11 01:43:57.264783171 +0000 UTC m=+0.266404316 container attach 6cbf53b312ef28de2d184c72c0ea72be8a2e76dfc89e33a9fac6c6f538668754 (image=quay.io/ceph/ceph:v18, name=hopeful_torvalds, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:43:57 compute-0 ceph-mon[191930]: log_channel(cluster) log [WRN] : Health check update: 5 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)
Oct 11 01:43:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e31 do_prune osdmap full prune enabled
Oct 11 01:43:57 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "vms", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:43:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e32 e32: 3 total, 3 up, 3 in
Oct 11 01:43:57 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e32: 3 total, 3 up, 3 in
Oct 11 01:43:57 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3770001800' entity='client.admin' cmd='[{"prefix": "osd pool application enable", "pool": "vms", "app": "rbd"}]': finished
Oct 11 01:43:57 compute-0 ceph-mon[191930]: osdmap e31: 3 total, 3 up, 3 in
Oct 11 01:43:57 compute-0 ceph-mon[191930]: pgmap v82: 7 pgs: 7 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:57 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "vms", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:43:57 compute-0 ceph-mon[191930]: Health check update: 5 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)
Oct 11 01:43:57 compute-0 ceph-mgr[192233]: [progress INFO root] update: starting ev f16391ec-93b4-417e-be78-56cc7c0efe02 (PG autoscaler increasing pool 2 PGs from 1 to 32)
Oct 11 01:43:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "volumes", "var": "pg_num", "val": "32"} v 0) v1
Oct 11 01:43:57 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "volumes", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]: {
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:     "0": [
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:         {
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "devices": [
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "/dev/loop3"
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             ],
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "lv_name": "ceph_lv0",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "lv_size": "21470642176",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "name": "ceph_lv0",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "tags": {
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.cluster_name": "ceph",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.crush_device_class": "",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.encrypted": "0",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.osd_id": "0",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.type": "block",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.vdo": "0"
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             },
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "type": "block",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "vg_name": "ceph_vg0"
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:         }
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:     ],
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:     "1": [
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:         {
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "devices": [
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "/dev/loop4"
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             ],
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "lv_name": "ceph_lv1",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "lv_size": "21470642176",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "name": "ceph_lv1",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "tags": {
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.cluster_name": "ceph",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.crush_device_class": "",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.encrypted": "0",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.osd_id": "1",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.type": "block",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.vdo": "0"
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             },
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "type": "block",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "vg_name": "ceph_vg1"
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:         }
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:     ],
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:     "2": [
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:         {
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "devices": [
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "/dev/loop5"
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             ],
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "lv_name": "ceph_lv2",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "lv_size": "21470642176",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "name": "ceph_lv2",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "tags": {
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.cluster_name": "ceph",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.crush_device_class": "",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.encrypted": "0",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.osd_id": "2",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.type": "block",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:                 "ceph.vdo": "0"
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             },
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "type": "block",
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:             "vg_name": "ceph_vg2"
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:         }
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]:     ]
Oct 11 01:43:57 compute-0 gracious_hamilton[213773]: }
Oct 11 01:43:57 compute-0 systemd[1]: libpod-f23484be94a87c5b84e229e926ccb938681f235908c543bedc36ce6ac2557d51.scope: Deactivated successfully.
Oct 11 01:43:57 compute-0 podman[213755]: 2025-10-11 01:43:57.490847673 +0000 UTC m=+1.261532051 container died f23484be94a87c5b84e229e926ccb938681f235908c543bedc36ce6ac2557d51 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_hamilton, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:43:57 compute-0 systemd[1]: var-lib-containers-storage-overlay-79bb43b68c7e59d3900019724019858590249756bd1383ef6a45529a48c8cd3e-merged.mount: Deactivated successfully.
Oct 11 01:43:57 compute-0 podman[213755]: 2025-10-11 01:43:57.609468844 +0000 UTC m=+1.380153232 container remove f23484be94a87c5b84e229e926ccb938681f235908c543bedc36ce6ac2557d51 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_hamilton, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS)
Oct 11 01:43:57 compute-0 systemd[1]: libpod-conmon-f23484be94a87c5b84e229e926ccb938681f235908c543bedc36ce6ac2557d51.scope: Deactivated successfully.
Oct 11 01:43:57 compute-0 sudo[213624]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:57 compute-0 sudo[213869]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:57 compute-0 sudo[213869]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:57 compute-0 sudo[213869]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool application enable", "pool": "volumes", "app": "rbd"} v 0) v1
Oct 11 01:43:57 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/852616946' entity='client.admin' cmd=[{"prefix": "osd pool application enable", "pool": "volumes", "app": "rbd"}]: dispatch
Oct 11 01:43:57 compute-0 sudo[213894]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:43:57 compute-0 sudo[213894]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:57 compute-0 sudo[213894]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:58 compute-0 sudo[213920]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:43:58 compute-0 sudo[213920]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:58 compute-0 sudo[213920]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:58 compute-0 sudo[213945]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 01:43:58 compute-0 sudo[213945]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:43:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v84: 7 pgs: 7 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "vms", "var": "pg_num_actual", "val": "32"} v 0) v1
Oct 11 01:43:58 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "vms", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:43:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e32 do_prune osdmap full prune enabled
Oct 11 01:43:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "vms", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:43:58 compute-0 ceph-mon[191930]: osdmap e32: 3 total, 3 up, 3 in
Oct 11 01:43:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "volumes", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:43:58 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/852616946' entity='client.admin' cmd=[{"prefix": "osd pool application enable", "pool": "volumes", "app": "rbd"}]: dispatch
Oct 11 01:43:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "vms", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:43:58 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "volumes", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:43:58 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/852616946' entity='client.admin' cmd='[{"prefix": "osd pool application enable", "pool": "volumes", "app": "rbd"}]': finished
Oct 11 01:43:58 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "vms", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:43:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e33 e33: 3 total, 3 up, 3 in
Oct 11 01:43:58 compute-0 hopeful_torvalds[213828]: enabled application 'rbd' on pool 'volumes'
Oct 11 01:43:58 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e33: 3 total, 3 up, 3 in
Oct 11 01:43:58 compute-0 ceph-mgr[192233]: [progress INFO root] update: starting ev 644154e4-7831-465d-bf9d-c8c4c33794c3 (PG autoscaler increasing pool 3 PGs from 1 to 32)
Oct 11 01:43:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "backups", "var": "pg_num", "val": "32"} v 0) v1
Oct 11 01:43:58 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "backups", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:43:58 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 33 pg[2.0( empty local-lis/les=19/20 n=0 ec=19/19 lis/c=19/19 les/c/f=20/20/0 sis=33 pruub=10.584838867s) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active pruub 42.994712830s@ mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [2], acting_primary 2 -> 2, up_primary 2 -> 2, role 0 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:43:58 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 33 pg[2.0( empty local-lis/les=19/20 n=0 ec=19/19 lis/c=19/19 les/c/f=20/20/0 sis=33 pruub=10.584838867s) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown pruub 42.994712830s@ mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:58 compute-0 systemd[1]: libpod-6cbf53b312ef28de2d184c72c0ea72be8a2e76dfc89e33a9fac6c6f538668754.scope: Deactivated successfully.
Oct 11 01:43:58 compute-0 podman[213813]: 2025-10-11 01:43:58.496947491 +0000 UTC m=+1.498568666 container died 6cbf53b312ef28de2d184c72c0ea72be8a2e76dfc89e33a9fac6c6f538668754 (image=quay.io/ceph/ceph:v18, name=hopeful_torvalds, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3)
Oct 11 01:43:58 compute-0 systemd[1]: var-lib-containers-storage-overlay-5be8d7a665338c6c7fa4a9f2a2f8101228dc2528777ccb75c77c2b25c2f6b3dd-merged.mount: Deactivated successfully.
Oct 11 01:43:58 compute-0 podman[213813]: 2025-10-11 01:43:58.580392861 +0000 UTC m=+1.582013996 container remove 6cbf53b312ef28de2d184c72c0ea72be8a2e76dfc89e33a9fac6c6f538668754 (image=quay.io/ceph/ceph:v18, name=hopeful_torvalds, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:43:58 compute-0 systemd[1]: libpod-conmon-6cbf53b312ef28de2d184c72c0ea72be8a2e76dfc89e33a9fac6c6f538668754.scope: Deactivated successfully.
Oct 11 01:43:58 compute-0 sudo[213810]: pam_unix(sudo:session): session closed for user root
Oct 11 01:43:58 compute-0 podman[214020]: 2025-10-11 01:43:58.799008302 +0000 UTC m=+0.061499042 container create 787aafa7ba854ca8e66b3de08665e2a4c3959d90f61ede8a5bf8fb78f3a437c2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_williamson, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:43:58 compute-0 sudo[214057]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ebvmyjywrojbyuhwmmzqhkhdfpaopork ; /usr/bin/python3'
Oct 11 01:43:58 compute-0 sudo[214057]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:43:58 compute-0 systemd[1]: Started libpod-conmon-787aafa7ba854ca8e66b3de08665e2a4c3959d90f61ede8a5bf8fb78f3a437c2.scope.
Oct 11 01:43:58 compute-0 podman[214020]: 2025-10-11 01:43:58.780312858 +0000 UTC m=+0.042803608 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:58 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:58 compute-0 podman[214020]: 2025-10-11 01:43:58.90604991 +0000 UTC m=+0.168540670 container init 787aafa7ba854ca8e66b3de08665e2a4c3959d90f61ede8a5bf8fb78f3a437c2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_williamson, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, io.buildah.version=1.39.3)
Oct 11 01:43:58 compute-0 podman[214020]: 2025-10-11 01:43:58.915657554 +0000 UTC m=+0.178148284 container start 787aafa7ba854ca8e66b3de08665e2a4c3959d90f61ede8a5bf8fb78f3a437c2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_williamson, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef)
Oct 11 01:43:58 compute-0 podman[214020]: 2025-10-11 01:43:58.920166848 +0000 UTC m=+0.182657638 container attach 787aafa7ba854ca8e66b3de08665e2a4c3959d90f61ede8a5bf8fb78f3a437c2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_williamson, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:43:58 compute-0 strange_williamson[214062]: 167 167
Oct 11 01:43:58 compute-0 podman[214020]: 2025-10-11 01:43:58.923302931 +0000 UTC m=+0.185793671 container died 787aafa7ba854ca8e66b3de08665e2a4c3959d90f61ede8a5bf8fb78f3a437c2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_williamson, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:58 compute-0 systemd[1]: libpod-787aafa7ba854ca8e66b3de08665e2a4c3959d90f61ede8a5bf8fb78f3a437c2.scope: Deactivated successfully.
Oct 11 01:43:58 compute-0 systemd[1]: var-lib-containers-storage-overlay-52cb0ffaf6bccab851034517a17c4f1508e88f07dcd63612ba41972ca1b2338d-merged.mount: Deactivated successfully.
Oct 11 01:43:58 compute-0 podman[214020]: 2025-10-11 01:43:58.969747665 +0000 UTC m=+0.232238405 container remove 787aafa7ba854ca8e66b3de08665e2a4c3959d90f61ede8a5bf8fb78f3a437c2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_williamson, ceph=True, CEPH_REF=reef, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:43:58 compute-0 python3[214059]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   osd pool application enable backups rbd _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:43:59 compute-0 systemd[1]: libpod-conmon-787aafa7ba854ca8e66b3de08665e2a4c3959d90f61ede8a5bf8fb78f3a437c2.scope: Deactivated successfully.
Oct 11 01:43:59 compute-0 podman[214076]: 2025-10-11 01:43:59.0935457 +0000 UTC m=+0.080630028 container create 04925013f0f3223fb6eccbaf01951a2c507cdebc81df7684aee0bbcbc50d340d (image=quay.io/ceph/ceph:v18, name=tender_franklin, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:43:59 compute-0 podman[214076]: 2025-10-11 01:43:59.059286706 +0000 UTC m=+0.046371044 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:43:59 compute-0 systemd[1]: Started libpod-conmon-04925013f0f3223fb6eccbaf01951a2c507cdebc81df7684aee0bbcbc50d340d.scope.
Oct 11 01:43:59 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:59 compute-0 podman[214096]: 2025-10-11 01:43:59.203215516 +0000 UTC m=+0.083528284 container create ad6e228ab0edf5b9532dd712a7ded74eaa6c4bf48bc8a4dac9ab120a56e510b1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_pascal, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_REF=reef, org.label-schema.build-date=20250507)
Oct 11 01:43:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0bcf6a544a38a5a8099660aabde35486b766635b5e9cb74c01df0598c178bb11/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0bcf6a544a38a5a8099660aabde35486b766635b5e9cb74c01df0598c178bb11/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:59 compute-0 podman[214076]: 2025-10-11 01:43:59.237618854 +0000 UTC m=+0.224703182 container init 04925013f0f3223fb6eccbaf01951a2c507cdebc81df7684aee0bbcbc50d340d (image=quay.io/ceph/ceph:v18, name=tender_franklin, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default)
Oct 11 01:43:59 compute-0 podman[214076]: 2025-10-11 01:43:59.247933709 +0000 UTC m=+0.235018037 container start 04925013f0f3223fb6eccbaf01951a2c507cdebc81df7684aee0bbcbc50d340d (image=quay.io/ceph/ceph:v18, name=tender_franklin, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Oct 11 01:43:59 compute-0 podman[214076]: 2025-10-11 01:43:59.254436332 +0000 UTC m=+0.241520620 container attach 04925013f0f3223fb6eccbaf01951a2c507cdebc81df7684aee0bbcbc50d340d (image=quay.io/ceph/ceph:v18, name=tender_franklin, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True)
Oct 11 01:43:59 compute-0 podman[214096]: 2025-10-11 01:43:59.173134085 +0000 UTC m=+0.053446833 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:43:59 compute-0 systemd[1]: Started libpod-conmon-ad6e228ab0edf5b9532dd712a7ded74eaa6c4bf48bc8a4dac9ab120a56e510b1.scope.
Oct 11 01:43:59 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:43:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/746d29d171a6e048c8ef37cda323c5392373ba805ed2bba5bf712503ffecdeb0/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/746d29d171a6e048c8ef37cda323c5392373ba805ed2bba5bf712503ffecdeb0/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/746d29d171a6e048c8ef37cda323c5392373ba805ed2bba5bf712503ffecdeb0/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/746d29d171a6e048c8ef37cda323c5392373ba805ed2bba5bf712503ffecdeb0/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:43:59 compute-0 podman[214096]: 2025-10-11 01:43:59.358024158 +0000 UTC m=+0.238336936 container init ad6e228ab0edf5b9532dd712a7ded74eaa6c4bf48bc8a4dac9ab120a56e510b1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_pascal, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507)
Oct 11 01:43:59 compute-0 podman[214096]: 2025-10-11 01:43:59.390150959 +0000 UTC m=+0.270463697 container start ad6e228ab0edf5b9532dd712a7ded74eaa6c4bf48bc8a4dac9ab120a56e510b1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_pascal, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:43:59 compute-0 podman[214096]: 2025-10-11 01:43:59.395771045 +0000 UTC m=+0.276083783 container attach ad6e228ab0edf5b9532dd712a7ded74eaa6c4bf48bc8a4dac9ab120a56e510b1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_pascal, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.license=GPLv2)
Oct 11 01:43:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e33 do_prune osdmap full prune enabled
Oct 11 01:43:59 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "backups", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:43:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e34 e34: 3 total, 3 up, 3 in
Oct 11 01:43:59 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e34: 3 total, 3 up, 3 in
Oct 11 01:43:59 compute-0 ceph-mgr[192233]: [progress INFO root] update: starting ev 8cc46e1c-f6f1-4099-aae8-998aa4309eba (PG autoscaler increasing pool 4 PGs from 1 to 32)
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.1f( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.1c( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.1e( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "images", "var": "pg_num", "val": "32"} v 0) v1
Oct 11 01:43:59 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "images", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.a( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.b( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.6( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.9( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.5( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.4( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-mon[191930]: pgmap v84: 7 pgs: 7 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.1d( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.3( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "volumes", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:43:59 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/852616946' entity='client.admin' cmd='[{"prefix": "osd pool application enable", "pool": "volumes", "app": "rbd"}]': finished
Oct 11 01:43:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "vms", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:43:59 compute-0 ceph-mon[191930]: osdmap e33: 3 total, 3 up, 3 in
Oct 11 01:43:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "backups", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.8( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.7( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.2( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.1( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.c( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.d( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.e( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.10( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.11( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.14( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.12( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.16( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.15( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.17( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.18( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.13( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.19( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.f( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.1f( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.1b( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.1a( empty local-lis/les=19/20 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.a( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.1c( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.5( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.1e( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.9( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.3( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.6( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.8( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.1d( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.b( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.2( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.7( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.1( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.c( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.d( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.0( empty local-lis/les=33/34 n=0 ec=19/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.10( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.4( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.e( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.14( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.11( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.12( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.16( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.17( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.18( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.13( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.f( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.15( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.1b( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.1a( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 34 pg[2.19( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=19/19 les/c/f=20/20/0 sis=33) [2] r=0 lpr=33 pi=[19,33)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:43:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e34 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:43:59 compute-0 podman[157119]: time="2025-10-11T01:43:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:43:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:43:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32236 "" "Go-http-client/1.1"
Oct 11 01:43:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:43:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6656 "" "Go-http-client/1.1"
Oct 11 01:43:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool application enable", "pool": "backups", "app": "rbd"} v 0) v1
Oct 11 01:43:59 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/1675721644' entity='client.admin' cmd=[{"prefix": "osd pool application enable", "pool": "backups", "app": "rbd"}]: dispatch
Oct 11 01:44:00 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.1 scrub starts
Oct 11 01:44:00 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.1 scrub ok
Oct 11 01:44:00 compute-0 podman[214144]: 2025-10-11 01:44:00.221801895 +0000 UTC m=+0.114179701 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, release=1755695350, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, url=https://catalog.redhat.com/en/search?searchType=containers, build-date=2025-08-20T13:12:41, io.buildah.version=1.33.7, maintainer=Red Hat, Inc., managed_by=edpm_ansible, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, vendor=Red Hat, Inc., config_id=edpm, io.openshift.expose-services=, vcs-type=git, version=9.6, distribution-scope=public, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., name=ubi9-minimal, architecture=x86_64, container_name=openstack_network_exporter, com.redhat.component=ubi9-minimal-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Oct 11 01:44:00 compute-0 podman[214143]: 2025-10-11 01:44:00.249414562 +0000 UTC m=+0.133997407 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 01:44:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v87: 38 pgs: 31 unknown, 7 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "backups", "var": "pg_num_actual", "val": "32"} v 0) v1
Oct 11 01:44:00 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "backups", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "volumes", "var": "pg_num_actual", "val": "32"} v 0) v1
Oct 11 01:44:00 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "volumes", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e34 do_prune osdmap full prune enabled
Oct 11 01:44:00 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "images", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:44:00 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/1675721644' entity='client.admin' cmd='[{"prefix": "osd pool application enable", "pool": "backups", "app": "rbd"}]': finished
Oct 11 01:44:00 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "backups", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:44:00 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "volumes", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:44:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e35 e35: 3 total, 3 up, 3 in
Oct 11 01:44:00 compute-0 tender_franklin[214108]: enabled application 'rbd' on pool 'backups'
Oct 11 01:44:00 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e35: 3 total, 3 up, 3 in
Oct 11 01:44:00 compute-0 ceph-mgr[192233]: [progress INFO root] update: starting ev 534e5f70-7666-4211-89fa-4d7d791688eb (PG autoscaler increasing pool 5 PGs from 1 to 32)
Oct 11 01:44:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "cephfs.cephfs.meta", "var": "pg_num", "val": "32"} v 0) v1
Oct 11 01:44:00 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "cephfs.cephfs.meta", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:44:00 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "backups", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:44:00 compute-0 ceph-mon[191930]: osdmap e34: 3 total, 3 up, 3 in
Oct 11 01:44:00 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "images", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:44:00 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1675721644' entity='client.admin' cmd=[{"prefix": "osd pool application enable", "pool": "backups", "app": "rbd"}]: dispatch
Oct 11 01:44:00 compute-0 ceph-mon[191930]: 2.1 scrub starts
Oct 11 01:44:00 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "backups", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:00 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "volumes", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:00 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "images", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:44:00 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1675721644' entity='client.admin' cmd='[{"prefix": "osd pool application enable", "pool": "backups", "app": "rbd"}]': finished
Oct 11 01:44:00 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "backups", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:44:00 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "volumes", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:44:00 compute-0 ceph-mon[191930]: osdmap e35: 3 total, 3 up, 3 in
Oct 11 01:44:00 compute-0 systemd[1]: libpod-04925013f0f3223fb6eccbaf01951a2c507cdebc81df7684aee0bbcbc50d340d.scope: Deactivated successfully.
Oct 11 01:44:00 compute-0 podman[214076]: 2025-10-11 01:44:00.501495673 +0000 UTC m=+1.488580061 container died 04925013f0f3223fb6eccbaf01951a2c507cdebc81df7684aee0bbcbc50d340d (image=quay.io/ceph/ceph:v18, name=tender_franklin, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507)
Oct 11 01:44:00 compute-0 systemd[1]: var-lib-containers-storage-overlay-0bcf6a544a38a5a8099660aabde35486b766635b5e9cb74c01df0598c178bb11-merged.mount: Deactivated successfully.
Oct 11 01:44:00 compute-0 podman[214076]: 2025-10-11 01:44:00.597391052 +0000 UTC m=+1.584475350 container remove 04925013f0f3223fb6eccbaf01951a2c507cdebc81df7684aee0bbcbc50d340d (image=quay.io/ceph/ceph:v18, name=tender_franklin, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.license=GPLv2)
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]: {
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:         "osd_id": 1,
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:         "type": "bluestore"
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:     },
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:         "osd_id": 2,
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:         "type": "bluestore"
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:     },
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:         "osd_id": 0,
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:         "type": "bluestore"
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]:     }
Oct 11 01:44:00 compute-0 inspiring_pascal[214118]: }
Oct 11 01:44:00 compute-0 systemd[1]: libpod-conmon-04925013f0f3223fb6eccbaf01951a2c507cdebc81df7684aee0bbcbc50d340d.scope: Deactivated successfully.
Oct 11 01:44:00 compute-0 sudo[214057]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:00 compute-0 systemd[1]: libpod-ad6e228ab0edf5b9532dd712a7ded74eaa6c4bf48bc8a4dac9ab120a56e510b1.scope: Deactivated successfully.
Oct 11 01:44:00 compute-0 systemd[1]: libpod-ad6e228ab0edf5b9532dd712a7ded74eaa6c4bf48bc8a4dac9ab120a56e510b1.scope: Consumed 1.251s CPU time.
Oct 11 01:44:00 compute-0 podman[214096]: 2025-10-11 01:44:00.661947922 +0000 UTC m=+1.542260680 container died ad6e228ab0edf5b9532dd712a7ded74eaa6c4bf48bc8a4dac9ab120a56e510b1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_pascal, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=reef)
Oct 11 01:44:00 compute-0 systemd[1]: var-lib-containers-storage-overlay-746d29d171a6e048c8ef37cda323c5392373ba805ed2bba5bf712503ffecdeb0-merged.mount: Deactivated successfully.
Oct 11 01:44:00 compute-0 podman[214096]: 2025-10-11 01:44:00.775688439 +0000 UTC m=+1.656001167 container remove ad6e228ab0edf5b9532dd712a7ded74eaa6c4bf48bc8a4dac9ab120a56e510b1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_pascal, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0)
Oct 11 01:44:00 compute-0 systemd[1]: libpod-conmon-ad6e228ab0edf5b9532dd712a7ded74eaa6c4bf48bc8a4dac9ab120a56e510b1.scope: Deactivated successfully.
Oct 11 01:44:00 compute-0 sudo[213945]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:44:00 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:44:00 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:00 compute-0 sudo[214259]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oeqsksgnzujtqzomkbylvxeeornujqyt ; /usr/bin/python3'
Oct 11 01:44:00 compute-0 sudo[214259]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:00 compute-0 sudo[214261]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:00 compute-0 sudo[214261]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:00 compute-0 sudo[214261]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:00 compute-0 python3[214263]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   osd pool application enable images rbd _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:01 compute-0 sudo[214288]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:44:01 compute-0 sudo[214288]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:01 compute-0 sudo[214288]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:01 compute-0 podman[214287]: 2025-10-11 01:44:01.128599525 +0000 UTC m=+0.104518565 container create 7fc3003f8370dd613d05541f5b0bfc5ddb163d98b9a867251caa8c618af9d1ed (image=quay.io/ceph/ceph:v18, name=pedantic_chebyshev, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:44:01 compute-0 podman[214287]: 2025-10-11 01:44:01.09162477 +0000 UTC m=+0.067543860 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:01 compute-0 systemd[1]: Started libpod-conmon-7fc3003f8370dd613d05541f5b0bfc5ddb163d98b9a867251caa8c618af9d1ed.scope.
Oct 11 01:44:01 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6429de7da52f500113fbc68af11f313c9e6d17ab81dd39d558224e5a5b1e822a/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6429de7da52f500113fbc68af11f313c9e6d17ab81dd39d558224e5a5b1e822a/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:01 compute-0 podman[214287]: 2025-10-11 01:44:01.28181937 +0000 UTC m=+0.257738460 container init 7fc3003f8370dd613d05541f5b0bfc5ddb163d98b9a867251caa8c618af9d1ed (image=quay.io/ceph/ceph:v18, name=pedantic_chebyshev, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 01:44:01 compute-0 podman[214287]: 2025-10-11 01:44:01.300169833 +0000 UTC m=+0.276088873 container start 7fc3003f8370dd613d05541f5b0bfc5ddb163d98b9a867251caa8c618af9d1ed (image=quay.io/ceph/ceph:v18, name=pedantic_chebyshev, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:44:01 compute-0 podman[214287]: 2025-10-11 01:44:01.307004895 +0000 UTC m=+0.282923985 container attach 7fc3003f8370dd613d05541f5b0bfc5ddb163d98b9a867251caa8c618af9d1ed (image=quay.io/ceph/ceph:v18, name=pedantic_chebyshev, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20250507, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:44:01 compute-0 openstack_network_exporter[159265]: ERROR   01:44:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:44:01 compute-0 openstack_network_exporter[159265]: ERROR   01:44:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:44:01 compute-0 openstack_network_exporter[159265]: ERROR   01:44:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:44:01 compute-0 openstack_network_exporter[159265]: ERROR   01:44:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:44:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:44:01 compute-0 openstack_network_exporter[159265]: ERROR   01:44:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:44:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:44:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e35 do_prune osdmap full prune enabled
Oct 11 01:44:01 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "cephfs.cephfs.meta", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:44:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e36 e36: 3 total, 3 up, 3 in
Oct 11 01:44:01 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e36: 3 total, 3 up, 3 in
Oct 11 01:44:01 compute-0 ceph-mgr[192233]: [progress INFO root] update: starting ev 19ba7efb-2a8e-4ef3-8746-2700dcfc7fa3 (PG autoscaler increasing pool 6 PGs from 1 to 32)
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 35 pg[3.0( empty local-lis/les=21/22 n=0 ec=21/21 lis/c=21/21 les/c/f=22/22/0 sis=35 pruub=9.683469772s) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active pruub 52.322486877s@ mbc={}] start_peering_interval up [1] -> [1], acting [1] -> [1], acting_primary 1 -> 1, up_primary 1 -> 1, role 0 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.0( empty local-lis/les=21/22 n=0 ec=21/21 lis/c=21/21 les/c/f=22/22/0 sis=35 pruub=9.683469772s) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown pruub 52.322486877s@ mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.4( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.5( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.9( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.8( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.14( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.15( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "cephfs.cephfs.data", "var": "pg_num", "val": "32"} v 0) v1
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.2( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "cephfs.cephfs.data", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.a( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.3( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.b( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.1e( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.1f( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.1c( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.1d( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.1( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.6( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.7( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.e( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.f( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.16( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.17( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.12( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.10( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.13( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.11( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.c( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.d( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.18( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.19( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.1a( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 36 pg[3.1b( empty local-lis/les=21/22 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-mon[191930]: 2.1 scrub ok
Oct 11 01:44:01 compute-0 ceph-mon[191930]: pgmap v87: 38 pgs: 31 unknown, 7 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:01 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "cephfs.cephfs.meta", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:44:01 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:01 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:01 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "cephfs.cephfs.meta", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:44:01 compute-0 ceph-mon[191930]: osdmap e36: 3 total, 3 up, 3 in
Oct 11 01:44:01 compute-0 ceph-mgr[192233]: [progress WARNING root] Starting Global Recovery Event,93 pgs not in active + clean state
Oct 11 01:44:01 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 35 pg[4.0( empty local-lis/les=23/24 n=0 ec=23/23 lis/c=23/23 les/c/f=24/24/0 sis=35 pruub=11.518368721s) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active pruub 61.725494385s@ mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [0], acting_primary 0 -> 0, up_primary 0 -> 0, role 0 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:01 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 35 pg[4.0( empty local-lis/les=23/24 n=0 ec=23/23 lis/c=23/23 les/c/f=24/24/0 sis=35 pruub=11.518368721s) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown pruub 61.725494385s@ mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool application enable", "pool": "images", "app": "rbd"} v 0) v1
Oct 11 01:44:01 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/1626752993' entity='client.admin' cmd=[{"prefix": "osd pool application enable", "pool": "images", "app": "rbd"}]: dispatch
Oct 11 01:44:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v90: 100 pgs: 93 unknown, 7 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "cephfs.cephfs.meta", "var": "pg_num_actual", "val": "32"} v 0) v1
Oct 11 01:44:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "cephfs.cephfs.meta", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "images", "var": "pg_num_actual", "val": "32"} v 0) v1
Oct 11 01:44:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "images", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e36 do_prune osdmap full prune enabled
Oct 11 01:44:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "cephfs.cephfs.data", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:44:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/1626752993' entity='client.admin' cmd='[{"prefix": "osd pool application enable", "pool": "images", "app": "rbd"}]': finished
Oct 11 01:44:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "cephfs.cephfs.meta", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:44:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "images", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:44:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e37 e37: 3 total, 3 up, 3 in
Oct 11 01:44:02 compute-0 pedantic_chebyshev[214327]: enabled application 'rbd' on pool 'images'
Oct 11 01:44:02 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e37: 3 total, 3 up, 3 in
Oct 11 01:44:02 compute-0 ceph-mgr[192233]: [progress INFO root] update: starting ev c1bcca8d-cc11-44ac-b33d-08fc5f4df188 (PG autoscaler increasing pool 7 PGs from 1 to 32)
Oct 11 01:44:02 compute-0 ceph-mgr[192233]: [progress INFO root] complete: finished ev f16391ec-93b4-417e-be78-56cc7c0efe02 (PG autoscaler increasing pool 2 PGs from 1 to 32)
Oct 11 01:44:02 compute-0 ceph-mgr[192233]: [progress INFO root] Completed event f16391ec-93b4-417e-be78-56cc7c0efe02 (PG autoscaler increasing pool 2 PGs from 1 to 32) in 5 seconds
Oct 11 01:44:02 compute-0 ceph-mgr[192233]: [progress INFO root] complete: finished ev 644154e4-7831-465d-bf9d-c8c4c33794c3 (PG autoscaler increasing pool 3 PGs from 1 to 32)
Oct 11 01:44:02 compute-0 ceph-mgr[192233]: [progress INFO root] Completed event 644154e4-7831-465d-bf9d-c8c4c33794c3 (PG autoscaler increasing pool 3 PGs from 1 to 32) in 4 seconds
Oct 11 01:44:02 compute-0 ceph-mgr[192233]: [progress INFO root] complete: finished ev 8cc46e1c-f6f1-4099-aae8-998aa4309eba (PG autoscaler increasing pool 4 PGs from 1 to 32)
Oct 11 01:44:02 compute-0 ceph-mgr[192233]: [progress INFO root] Completed event 8cc46e1c-f6f1-4099-aae8-998aa4309eba (PG autoscaler increasing pool 4 PGs from 1 to 32) in 3 seconds
Oct 11 01:44:02 compute-0 ceph-mgr[192233]: [progress INFO root] complete: finished ev 534e5f70-7666-4211-89fa-4d7d791688eb (PG autoscaler increasing pool 5 PGs from 1 to 32)
Oct 11 01:44:02 compute-0 ceph-mgr[192233]: [progress INFO root] Completed event 534e5f70-7666-4211-89fa-4d7d791688eb (PG autoscaler increasing pool 5 PGs from 1 to 32) in 2 seconds
Oct 11 01:44:02 compute-0 ceph-mgr[192233]: [progress INFO root] complete: finished ev 19ba7efb-2a8e-4ef3-8746-2700dcfc7fa3 (PG autoscaler increasing pool 6 PGs from 1 to 32)
Oct 11 01:44:02 compute-0 ceph-mgr[192233]: [progress INFO root] Completed event 19ba7efb-2a8e-4ef3-8746-2700dcfc7fa3 (PG autoscaler increasing pool 6 PGs from 1 to 32) in 1 seconds
Oct 11 01:44:02 compute-0 ceph-mgr[192233]: [progress INFO root] complete: finished ev c1bcca8d-cc11-44ac-b33d-08fc5f4df188 (PG autoscaler increasing pool 7 PGs from 1 to 32)
Oct 11 01:44:02 compute-0 ceph-mgr[192233]: [progress INFO root] Completed event c1bcca8d-cc11-44ac-b33d-08fc5f4df188 (PG autoscaler increasing pool 7 PGs from 1 to 32) in 0 seconds
Oct 11 01:44:02 compute-0 ceph-mon[191930]: log_channel(cluster) log [WRN] : Health check update: 3 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.1f( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "cephfs.cephfs.data", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:44:02 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1626752993' entity='client.admin' cmd=[{"prefix": "osd pool application enable", "pool": "images", "app": "rbd"}]: dispatch
Oct 11 01:44:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "cephfs.cephfs.meta", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "images", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "cephfs.cephfs.data", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:44:02 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1626752993' entity='client.admin' cmd='[{"prefix": "osd pool application enable", "pool": "images", "app": "rbd"}]': finished
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.18( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "cephfs.cephfs.meta", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:44:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "images", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:44:02 compute-0 ceph-mon[191930]: osdmap e37: 3 total, 3 up, 3 in
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.16( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.15( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.14( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.13( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.12( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.11( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.f( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.17( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.d( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.c( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[6.0( empty local-lis/les=27/28 n=0 ec=27/27 lis/c=27/27 les/c/f=28/28/0 sis=37 pruub=14.819159508s) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active pruub 65.845046997s@ mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [0], acting_primary 0 -> 0, up_primary 0 -> 0, role 0 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.2( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.10( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.3( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.19( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.4( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.1( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.e( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.9( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.1a( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.5( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.a( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.6( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.1b( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.7( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.b( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.8( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.1c( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.1d( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.1f( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.1e( empty local-lis/les=23/24 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[6.0( empty local-lis/les=27/28 n=0 ec=27/27 lis/c=27/27 les/c/f=28/28/0 sis=37 pruub=14.819159508s) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown pruub 65.845046997s@ mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.1e( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.1d( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.a( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.9( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.8( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.1b( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.1c( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.7( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.3( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.5( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.1( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.2( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.b( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.c( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.d( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.f( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.e( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.10( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.11( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.12( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.13( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.14( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.16( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.15( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.17( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.19( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.1a( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.4( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.18( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.0( empty local-lis/les=35/37 n=0 ec=21/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 37 pg[3.6( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=21/21 les/c/f=22/22/0 sis=35) [1] r=0 lpr=35 pi=[21,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 systemd[1]: libpod-7fc3003f8370dd613d05541f5b0bfc5ddb163d98b9a867251caa8c618af9d1ed.scope: Deactivated successfully.
Oct 11 01:44:02 compute-0 podman[214287]: 2025-10-11 01:44:02.51804931 +0000 UTC m=+1.493968310 container died 7fc3003f8370dd613d05541f5b0bfc5ddb163d98b9a867251caa8c618af9d1ed (image=quay.io/ceph/ceph:v18, name=pedantic_chebyshev, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.18( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.16( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.15( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.14( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.13( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.12( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.11( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.f( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.d( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.0( empty local-lis/les=35/37 n=0 ec=23/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.c( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.2( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.17( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.10( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.3( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.4( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.19( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.1( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.5( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.9( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.a( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.1a( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.6( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.7( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.b( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.1b( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.1c( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.8( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.1d( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.e( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.1e( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 37 pg[4.1f( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=23/23 les/c/f=24/24/0 sis=35) [0] r=0 lpr=35 pi=[23,35)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:02 compute-0 systemd[1]: var-lib-containers-storage-overlay-6429de7da52f500113fbc68af11f313c9e6d17ab81dd39d558224e5a5b1e822a-merged.mount: Deactivated successfully.
Oct 11 01:44:02 compute-0 podman[214287]: 2025-10-11 01:44:02.590555796 +0000 UTC m=+1.566474836 container remove 7fc3003f8370dd613d05541f5b0bfc5ddb163d98b9a867251caa8c618af9d1ed (image=quay.io/ceph/ceph:v18, name=pedantic_chebyshev, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_REF=reef)
Oct 11 01:44:02 compute-0 systemd[1]: libpod-conmon-7fc3003f8370dd613d05541f5b0bfc5ddb163d98b9a867251caa8c618af9d1ed.scope: Deactivated successfully.
Oct 11 01:44:02 compute-0 sudo[214259]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:02 compute-0 sudo[214386]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nrpsgvdqtvljguylwtjtuirrkpomwmfr ; /usr/bin/python3'
Oct 11 01:44:02 compute-0 sudo[214386]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:03 compute-0 python3[214388]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   osd pool application enable cephfs.cephfs.meta cephfs _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:03 compute-0 podman[214389]: 2025-10-11 01:44:03.152063866 +0000 UTC m=+0.079102382 container create 12d76e4952d940fad3075d9b771636fc19d4eabb39eaea8b70cb59645f8a34d7 (image=quay.io/ceph/ceph:v18, name=musing_wilbur, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 37 pg[5.0( empty local-lis/les=25/26 n=0 ec=25/25 lis/c=25/25 les/c/f=26/26/0 sis=37 pruub=12.064429283s) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active pruub 49.210262299s@ mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [2], acting_primary 2 -> 2, up_primary 2 -> 2, role 0 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 37 pg[5.0( empty local-lis/les=25/26 n=0 ec=25/25 lis/c=25/25 les/c/f=26/26/0 sis=37 pruub=12.064429283s) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown pruub 49.210262299s@ mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 systemd[1]: Started libpod-conmon-12d76e4952d940fad3075d9b771636fc19d4eabb39eaea8b70cb59645f8a34d7.scope.
Oct 11 01:44:03 compute-0 podman[214389]: 2025-10-11 01:44:03.122087499 +0000 UTC m=+0.049125995 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:03 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:03 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fdc1133033c92727002fe9cb3932abd962a496437c499eba12ad6fd04b044dac/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:03 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fdc1133033c92727002fe9cb3932abd962a496437c499eba12ad6fd04b044dac/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:03 compute-0 podman[214389]: 2025-10-11 01:44:03.318631546 +0000 UTC m=+0.245670052 container init 12d76e4952d940fad3075d9b771636fc19d4eabb39eaea8b70cb59645f8a34d7 (image=quay.io/ceph/ceph:v18, name=musing_wilbur, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.license=GPLv2)
Oct 11 01:44:03 compute-0 podman[214389]: 2025-10-11 01:44:03.329641162 +0000 UTC m=+0.256679668 container start 12d76e4952d940fad3075d9b771636fc19d4eabb39eaea8b70cb59645f8a34d7 (image=quay.io/ceph/ceph:v18, name=musing_wilbur, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2)
Oct 11 01:44:03 compute-0 podman[214389]: 2025-10-11 01:44:03.336199837 +0000 UTC m=+0.263238333 container attach 12d76e4952d940fad3075d9b771636fc19d4eabb39eaea8b70cb59645f8a34d7 (image=quay.io/ceph/ceph:v18, name=musing_wilbur, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 01:44:03 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.1 scrub starts
Oct 11 01:44:03 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.1 scrub ok
Oct 11 01:44:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e37 do_prune osdmap full prune enabled
Oct 11 01:44:03 compute-0 ceph-mon[191930]: pgmap v90: 100 pgs: 93 unknown, 7 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:03 compute-0 ceph-mon[191930]: Health check update: 3 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)
Oct 11 01:44:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e38 e38: 3 total, 3 up, 3 in
Oct 11 01:44:03 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e38: 3 total, 3 up, 3 in
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.1c( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.1f( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.1d( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.1e( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.10( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.12( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.13( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.14( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.15( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.16( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.8( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.17( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.9( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.a( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.b( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.7( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.6( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.5( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.4( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.3( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.2( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.1( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.f( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.e( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.d( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.c( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.1b( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.1a( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.19( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.18( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.11( empty local-lis/les=25/26 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.15( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.1a( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.17( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.16( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.11( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.10( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.13( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.12( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.d( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.c( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.f( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.e( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.2( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.3( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.1( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.1b( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.1f( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.b( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.18( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.7( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.8( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.6( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.14( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.19( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.4( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.9( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.5( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.a( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.1e( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.1f( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.1c( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.1d( empty local-lis/les=27/28 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.10( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.13( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.1d( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.14( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.15( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.16( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.1c( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.17( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.9( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.a( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.12( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.b( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.0( empty local-lis/les=37/38 n=0 ec=25/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.6( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.4( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.8( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.3( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.1( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.2( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.e( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.1e( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.d( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.c( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.1a( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.f( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.7( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.19( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.18( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.11( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.1b( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 38 pg[5.5( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=25/25 les/c/f=26/26/0 sis=37) [2] r=0 lpr=37 pi=[25,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.1 scrub starts
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.15( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.1a( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.17( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.16( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.10( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.13( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.12( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.c( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.11( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.f( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.0( empty local-lis/les=37/38 n=0 ec=27/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.3( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.1( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.1b( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.e( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.b( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.d( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.18( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.8( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.6( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.14( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.19( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.4( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.9( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.5( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.a( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.1e( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.1f( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.1c( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.1d( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.7( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 38 pg[6.2( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=27/27 les/c/f=28/28/0 sis=37) [0] r=0 lpr=37 pi=[27,37)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:03 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.1 scrub ok
Oct 11 01:44:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool application enable", "pool": "cephfs.cephfs.meta", "app": "cephfs"} v 0) v1
Oct 11 01:44:03 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/2049694541' entity='client.admin' cmd=[{"prefix": "osd pool application enable", "pool": "cephfs.cephfs.meta", "app": "cephfs"}]: dispatch
Oct 11 01:44:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v93: 162 pgs: 1 peering, 93 unknown, 68 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "cephfs.cephfs.data", "var": "pg_num_actual", "val": "32"} v 0) v1
Oct 11 01:44:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "cephfs.cephfs.data", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:04 compute-0 ceph-mon[191930]: 4.1 scrub starts
Oct 11 01:44:04 compute-0 ceph-mon[191930]: 4.1 scrub ok
Oct 11 01:44:04 compute-0 ceph-mon[191930]: osdmap e38: 3 total, 3 up, 3 in
Oct 11 01:44:04 compute-0 ceph-mon[191930]: 3.1 scrub starts
Oct 11 01:44:04 compute-0 ceph-mon[191930]: 3.1 scrub ok
Oct 11 01:44:04 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2049694541' entity='client.admin' cmd=[{"prefix": "osd pool application enable", "pool": "cephfs.cephfs.meta", "app": "cephfs"}]: dispatch
Oct 11 01:44:04 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "cephfs.cephfs.data", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e38 do_prune osdmap full prune enabled
Oct 11 01:44:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/2049694541' entity='client.admin' cmd='[{"prefix": "osd pool application enable", "pool": "cephfs.cephfs.meta", "app": "cephfs"}]': finished
Oct 11 01:44:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "cephfs.cephfs.data", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:44:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e39 e39: 3 total, 3 up, 3 in
Oct 11 01:44:04 compute-0 musing_wilbur[214404]: enabled application 'cephfs' on pool 'cephfs.cephfs.meta'
Oct 11 01:44:04 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e39: 3 total, 3 up, 3 in
Oct 11 01:44:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e39 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:44:04 compute-0 systemd[1]: libpod-12d76e4952d940fad3075d9b771636fc19d4eabb39eaea8b70cb59645f8a34d7.scope: Deactivated successfully.
Oct 11 01:44:04 compute-0 podman[214389]: 2025-10-11 01:44:04.589130972 +0000 UTC m=+1.516169498 container died 12d76e4952d940fad3075d9b771636fc19d4eabb39eaea8b70cb59645f8a34d7 (image=quay.io/ceph/ceph:v18, name=musing_wilbur, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:44:04 compute-0 systemd[1]: var-lib-containers-storage-overlay-fdc1133033c92727002fe9cb3932abd962a496437c499eba12ad6fd04b044dac-merged.mount: Deactivated successfully.
Oct 11 01:44:04 compute-0 systemd[193665]: Starting Mark boot as successful...
Oct 11 01:44:04 compute-0 systemd[193665]: Finished Mark boot as successful.
Oct 11 01:44:04 compute-0 podman[214389]: 2025-10-11 01:44:04.6857132 +0000 UTC m=+1.612751716 container remove 12d76e4952d940fad3075d9b771636fc19d4eabb39eaea8b70cb59645f8a34d7 (image=quay.io/ceph/ceph:v18, name=musing_wilbur, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507)
Oct 11 01:44:04 compute-0 systemd[1]: libpod-conmon-12d76e4952d940fad3075d9b771636fc19d4eabb39eaea8b70cb59645f8a34d7.scope: Deactivated successfully.
Oct 11 01:44:04 compute-0 sudo[214386]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:04 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.2 deep-scrub starts
Oct 11 01:44:04 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.2 deep-scrub ok
Oct 11 01:44:04 compute-0 sudo[214463]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nuslrnxekdkvwwcorivvwgqynivsoiin ; /usr/bin/python3'
Oct 11 01:44:04 compute-0 sudo[214463]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:05 compute-0 python3[214465]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   osd pool application enable cephfs.cephfs.data cephfs _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:05 compute-0 podman[214466]: 2025-10-11 01:44:05.256153535 +0000 UTC m=+0.080641788 container create 67c84d9480f5b097b6fdca4168aa72d8d66e997980956963a7a92644f410e6a8 (image=quay.io/ceph/ceph:v18, name=heuristic_hofstadter, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.vendor=CentOS)
Oct 11 01:44:05 compute-0 podman[214466]: 2025-10-11 01:44:05.23435848 +0000 UTC m=+0.058846763 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:05 compute-0 systemd[1]: Started libpod-conmon-67c84d9480f5b097b6fdca4168aa72d8d66e997980956963a7a92644f410e6a8.scope.
Oct 11 01:44:05 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b7733a7eb722c25f22ec5c1c81c1683a095b3a08e70a0505ba429fcad99e200e/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b7733a7eb722c25f22ec5c1c81c1683a095b3a08e70a0505ba429fcad99e200e/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:05 compute-0 podman[214466]: 2025-10-11 01:44:05.429143445 +0000 UTC m=+0.253631698 container init 67c84d9480f5b097b6fdca4168aa72d8d66e997980956963a7a92644f410e6a8 (image=quay.io/ceph/ceph:v18, name=heuristic_hofstadter, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:05 compute-0 podman[214466]: 2025-10-11 01:44:05.445983063 +0000 UTC m=+0.270471316 container start 67c84d9480f5b097b6fdca4168aa72d8d66e997980956963a7a92644f410e6a8 (image=quay.io/ceph/ceph:v18, name=heuristic_hofstadter, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3, ceph=True, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:44:05 compute-0 podman[214466]: 2025-10-11 01:44:05.451430695 +0000 UTC m=+0.275918958 container attach 67c84d9480f5b097b6fdca4168aa72d8d66e997980956963a7a92644f410e6a8 (image=quay.io/ceph/ceph:v18, name=heuristic_hofstadter, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3)
Oct 11 01:44:05 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.2 scrub starts
Oct 11 01:44:05 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.2 scrub ok
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 39 pg[7.0( empty local-lis/les=29/30 n=0 ec=29/29 lis/c=29/29 les/c/f=30/30/0 sis=39 pruub=13.893761635s) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active pruub 60.549110413s@ mbc={}] start_peering_interval up [1] -> [1], acting [1] -> [1], acting_primary 1 -> 1, up_primary 1 -> 1, role 0 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 39 pg[7.0( empty local-lis/les=29/30 n=0 ec=29/29 lis/c=29/29 les/c/f=30/30/0 sis=39 pruub=13.893761635s) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown pruub 60.549110413s@ mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-mon[191930]: pgmap v93: 162 pgs: 1 peering, 93 unknown, 68 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:05 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2049694541' entity='client.admin' cmd='[{"prefix": "osd pool application enable", "pool": "cephfs.cephfs.meta", "app": "cephfs"}]': finished
Oct 11 01:44:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "cephfs.cephfs.data", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:44:05 compute-0 ceph-mon[191930]: osdmap e39: 3 total, 3 up, 3 in
Oct 11 01:44:05 compute-0 ceph-mon[191930]: 2.2 deep-scrub starts
Oct 11 01:44:05 compute-0 ceph-mon[191930]: 2.2 deep-scrub ok
Oct 11 01:44:05 compute-0 ceph-mon[191930]: 4.2 scrub starts
Oct 11 01:44:05 compute-0 ceph-mon[191930]: 4.2 scrub ok
Oct 11 01:44:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e39 do_prune osdmap full prune enabled
Oct 11 01:44:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e40 e40: 3 total, 3 up, 3 in
Oct 11 01:44:05 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e40: 3 total, 3 up, 3 in
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.1e( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.1c( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.1d( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.13( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.12( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.10( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.11( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.16( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.15( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.17( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.14( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.b( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.a( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.9( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.8( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.f( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.6( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.4( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.5( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.1( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.2( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.3( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.7( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.c( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.d( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.e( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.1f( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.18( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.19( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.1a( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.1b( empty local-lis/les=29/30 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.1e( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.1c( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.1d( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.13( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.12( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.10( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.15( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.17( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.11( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.b( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.14( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.9( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.8( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.6( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.a( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.4( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.0( empty local-lis/les=39/40 n=0 ec=29/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.f( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.5( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.16( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.1( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.3( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.2( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.1f( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.c( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.18( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.19( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.d( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.7( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.1a( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.1b( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 40 pg[7.e( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=29/29 les/c/f=30/30/0 sis=39) [1] r=0 lpr=39 pi=[29,39)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool application enable", "pool": "cephfs.cephfs.data", "app": "cephfs"} v 0) v1
Oct 11 01:44:06 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/1983857752' entity='client.admin' cmd=[{"prefix": "osd pool application enable", "pool": "cephfs.cephfs.data", "app": "cephfs"}]: dispatch
Oct 11 01:44:06 compute-0 podman[214504]: 2025-10-11 01:44:06.241819028 +0000 UTC m=+0.126361961 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, org.label-schema.license=GPLv2)
Oct 11 01:44:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v96: 193 pgs: 1 peering, 93 unknown, 99 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:06 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.3 deep-scrub starts
Oct 11 01:44:06 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.3 deep-scrub ok
Oct 11 01:44:06 compute-0 ceph-mgr[192233]: [progress INFO root] Writing back 9 completed events
Oct 11 01:44:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/progress/completed}] v 0) v1
Oct 11 01:44:06 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e40 do_prune osdmap full prune enabled
Oct 11 01:44:06 compute-0 ceph-mon[191930]: osdmap e40: 3 total, 3 up, 3 in
Oct 11 01:44:06 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1983857752' entity='client.admin' cmd=[{"prefix": "osd pool application enable", "pool": "cephfs.cephfs.data", "app": "cephfs"}]: dispatch
Oct 11 01:44:06 compute-0 ceph-mon[191930]: 4.3 deep-scrub starts
Oct 11 01:44:06 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:06 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/1983857752' entity='client.admin' cmd='[{"prefix": "osd pool application enable", "pool": "cephfs.cephfs.data", "app": "cephfs"}]': finished
Oct 11 01:44:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e41 e41: 3 total, 3 up, 3 in
Oct 11 01:44:06 compute-0 heuristic_hofstadter[214481]: enabled application 'cephfs' on pool 'cephfs.cephfs.data'
Oct 11 01:44:06 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e41: 3 total, 3 up, 3 in
Oct 11 01:44:06 compute-0 systemd[1]: libpod-67c84d9480f5b097b6fdca4168aa72d8d66e997980956963a7a92644f410e6a8.scope: Deactivated successfully.
Oct 11 01:44:06 compute-0 podman[214466]: 2025-10-11 01:44:06.631533393 +0000 UTC m=+1.456021666 container died 67c84d9480f5b097b6fdca4168aa72d8d66e997980956963a7a92644f410e6a8 (image=quay.io/ceph/ceph:v18, name=heuristic_hofstadter, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, io.buildah.version=1.39.3, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:44:06 compute-0 systemd[1]: var-lib-containers-storage-overlay-b7733a7eb722c25f22ec5c1c81c1683a095b3a08e70a0505ba429fcad99e200e-merged.mount: Deactivated successfully.
Oct 11 01:44:06 compute-0 podman[214466]: 2025-10-11 01:44:06.727437372 +0000 UTC m=+1.551925635 container remove 67c84d9480f5b097b6fdca4168aa72d8d66e997980956963a7a92644f410e6a8 (image=quay.io/ceph/ceph:v18, name=heuristic_hofstadter, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS)
Oct 11 01:44:06 compute-0 systemd[1]: libpod-conmon-67c84d9480f5b097b6fdca4168aa72d8d66e997980956963a7a92644f410e6a8.scope: Deactivated successfully.
Oct 11 01:44:06 compute-0 sudo[214463]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:07 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.4 scrub starts
Oct 11 01:44:07 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.4 scrub ok
Oct 11 01:44:07 compute-0 ceph-mon[191930]: pgmap v96: 193 pgs: 1 peering, 93 unknown, 99 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:07 compute-0 ceph-mon[191930]: 4.3 deep-scrub ok
Oct 11 01:44:07 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1983857752' entity='client.admin' cmd='[{"prefix": "osd pool application enable", "pool": "cephfs.cephfs.data", "app": "cephfs"}]': finished
Oct 11 01:44:07 compute-0 ceph-mon[191930]: osdmap e41: 3 total, 3 up, 3 in
Oct 11 01:44:07 compute-0 python3[214613]: ansible-ansible.legacy.stat Invoked with path=/tmp/ceph_rgw.yml follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 01:44:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v98: 193 pgs: 31 unknown, 162 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:08 compute-0 python3[214684]: ansible-ansible.legacy.copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760147047.4655669-33904-72766963813077/source dest=/tmp/ceph_rgw.yml mode=0644 force=True follow=False _original_basename=ceph_rgw.yml.j2 checksum=0a1ea65aada399f80274d3cc2047646f2797712b backup=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:44:08 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : Health check cleared: POOL_APP_NOT_ENABLED (was: 1 pool(s) do not have an application enabled)
Oct 11 01:44:08 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : Cluster is now healthy
Oct 11 01:44:08 compute-0 ceph-mon[191930]: 4.4 scrub starts
Oct 11 01:44:08 compute-0 ceph-mon[191930]: 4.4 scrub ok
Oct 11 01:44:08 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.3 scrub starts
Oct 11 01:44:09 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.3 scrub ok
Oct 11 01:44:09 compute-0 sudo[214784]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xajbtbzxaktpfvpfqctvkgkshngtzbnp ; /usr/bin/python3'
Oct 11 01:44:09 compute-0 sudo[214784]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:09 compute-0 python3[214786]: ansible-ansible.legacy.stat Invoked with path=/home/ceph-admin/assimilate_ceph.conf follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 01:44:09 compute-0 sudo[214784]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:09 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.5 scrub starts
Oct 11 01:44:09 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.5 scrub ok
Oct 11 01:44:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e41 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:44:09 compute-0 ceph-mon[191930]: pgmap v98: 193 pgs: 31 unknown, 162 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:09 compute-0 ceph-mon[191930]: Health check cleared: POOL_APP_NOT_ENABLED (was: 1 pool(s) do not have an application enabled)
Oct 11 01:44:09 compute-0 ceph-mon[191930]: Cluster is now healthy
Oct 11 01:44:09 compute-0 ceph-mon[191930]: 2.3 scrub starts
Oct 11 01:44:09 compute-0 ceph-mon[191930]: 2.3 scrub ok
Oct 11 01:44:09 compute-0 ceph-mon[191930]: 4.5 scrub starts
Oct 11 01:44:09 compute-0 ceph-mon[191930]: 4.5 scrub ok
Oct 11 01:44:09 compute-0 sudo[214859]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fnocjoqrwsfnfpfvdoxuznmdzzvlecsi ; /usr/bin/python3'
Oct 11 01:44:09 compute-0 sudo[214859]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:09 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.4 deep-scrub starts
Oct 11 01:44:09 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.4 deep-scrub ok
Oct 11 01:44:10 compute-0 python3[214861]: ansible-ansible.legacy.copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760147048.9013824-33918-78948269828708/source dest=/home/ceph-admin/assimilate_ceph.conf owner=167 group=167 mode=0644 follow=False _original_basename=ceph_rgw.conf.j2 checksum=aa2df0a9c99b4a6f9628bc78f750e092b6bddd4f backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:44:10 compute-0 sudo[214859]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v99: 193 pgs: 31 unknown, 162 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:10 compute-0 sudo[214909]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ztbzhtodcauwclyozfskgpdfhypfdvol ; /usr/bin/python3'
Oct 11 01:44:10 compute-0 sudo[214909]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:10 compute-0 ceph-mon[191930]: 2.4 deep-scrub starts
Oct 11 01:44:10 compute-0 ceph-mon[191930]: 2.4 deep-scrub ok
Oct 11 01:44:10 compute-0 python3[214911]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z   --volume /tmp/ceph_rgw.yml:/home/ceph_spec.yaml:z   --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   config assimilate-conf -i /home/assimilate_ceph.conf
                                            _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:10 compute-0 podman[214912]: 2025-10-11 01:44:10.779725474 +0000 UTC m=+0.089044687 container create 63929ca2cea720c3c835e000e85517afff8835ecaabf0c3b862ce00c7a15d2d1 (image=quay.io/ceph/ceph:v18, name=pedantic_khayyam, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, ceph=True)
Oct 11 01:44:10 compute-0 podman[214912]: 2025-10-11 01:44:10.749389986 +0000 UTC m=+0.058709259 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:10 compute-0 systemd[1]: Started libpod-conmon-63929ca2cea720c3c835e000e85517afff8835ecaabf0c3b862ce00c7a15d2d1.scope.
Oct 11 01:44:10 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:10 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/82827287a5b2098a45ed4e2723132f76a37771b568fa26cbe3b6bca8eadfd9c7/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:10 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/82827287a5b2098a45ed4e2723132f76a37771b568fa26cbe3b6bca8eadfd9c7/merged/home/ceph_spec.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:10 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/82827287a5b2098a45ed4e2723132f76a37771b568fa26cbe3b6bca8eadfd9c7/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:10 compute-0 podman[214912]: 2025-10-11 01:44:10.948369176 +0000 UTC m=+0.257688399 container init 63929ca2cea720c3c835e000e85517afff8835ecaabf0c3b862ce00c7a15d2d1 (image=quay.io/ceph/ceph:v18, name=pedantic_khayyam, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 01:44:10 compute-0 podman[214912]: 2025-10-11 01:44:10.961849055 +0000 UTC m=+0.271168278 container start 63929ca2cea720c3c835e000e85517afff8835ecaabf0c3b862ce00c7a15d2d1 (image=quay.io/ceph/ceph:v18, name=pedantic_khayyam, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:44:10 compute-0 podman[214912]: 2025-10-11 01:44:10.967499372 +0000 UTC m=+0.276818625 container attach 63929ca2cea720c3c835e000e85517afff8835ecaabf0c3b862ce00c7a15d2d1 (image=quay.io/ceph/ceph:v18, name=pedantic_khayyam, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True)
Oct 11 01:44:10 compute-0 podman[214926]: 2025-10-11 01:44:10.981534057 +0000 UTC m=+0.115890751 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 01:44:10 compute-0 podman[214929]: 2025-10-11 01:44:10.990800752 +0000 UTC m=+0.127454774 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, release-0.7.12=, build-date=2024-09-18T21:23:30, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-type=git, summary=Provides the latest release of Red Hat Universal Base Image 9., version=9.4, com.redhat.component=ubi9-container, name=ubi9, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9, container_name=kepler, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, managed_by=edpm_ansible, architecture=x86_64, io.openshift.expose-services=, io.buildah.version=1.29.0, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, maintainer=Red Hat, Inc., io.openshift.tags=base rhel9, config_id=edpm, release=1214.1726694543)
Oct 11 01:44:11 compute-0 podman[214930]: 2025-10-11 01:44:11.051021584 +0000 UTC m=+0.170929830 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 01:44:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config assimilate-conf"} v 0) v1
Oct 11 01:44:11 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/2184874845' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch
Oct 11 01:44:11 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/2184874845' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished
Oct 11 01:44:11 compute-0 pedantic_khayyam[214931]: 
Oct 11 01:44:11 compute-0 pedantic_khayyam[214931]: [global]
Oct 11 01:44:11 compute-0 pedantic_khayyam[214931]:         fsid = 3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:44:11 compute-0 pedantic_khayyam[214931]:         mon_host = 192.168.122.100
Oct 11 01:44:11 compute-0 systemd[1]: libpod-63929ca2cea720c3c835e000e85517afff8835ecaabf0c3b862ce00c7a15d2d1.scope: Deactivated successfully.
Oct 11 01:44:11 compute-0 podman[214912]: 2025-10-11 01:44:11.57793125 +0000 UTC m=+0.887250493 container died 63929ca2cea720c3c835e000e85517afff8835ecaabf0c3b862ce00c7a15d2d1 (image=quay.io/ceph/ceph:v18, name=pedantic_khayyam, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:44:11 compute-0 systemd[1]: var-lib-containers-storage-overlay-82827287a5b2098a45ed4e2723132f76a37771b568fa26cbe3b6bca8eadfd9c7-merged.mount: Deactivated successfully.
Oct 11 01:44:11 compute-0 ceph-mon[191930]: pgmap v99: 193 pgs: 31 unknown, 162 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:11 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2184874845' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch
Oct 11 01:44:11 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2184874845' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished
Oct 11 01:44:11 compute-0 podman[214912]: 2025-10-11 01:44:11.66340156 +0000 UTC m=+0.972720773 container remove 63929ca2cea720c3c835e000e85517afff8835ecaabf0c3b862ce00c7a15d2d1 (image=quay.io/ceph/ceph:v18, name=pedantic_khayyam, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:44:11 compute-0 systemd[1]: libpod-conmon-63929ca2cea720c3c835e000e85517afff8835ecaabf0c3b862ce00c7a15d2d1.scope: Deactivated successfully.
Oct 11 01:44:11 compute-0 sudo[214909]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:11 compute-0 sudo[215017]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:11 compute-0 sudo[215017]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:11 compute-0 sudo[215017]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:11 compute-0 sudo[215051]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:44:11 compute-0 sudo[215051]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:11 compute-0 sudo[215051]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:11 compute-0 sudo[215107]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ivicqipgtlmmdpcmndjpipaiznxlbjqz ; /usr/bin/python3'
Oct 11 01:44:11 compute-0 sudo[215107]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:12 compute-0 sudo[215093]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:12 compute-0 sudo[215093]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:12 compute-0 sudo[215093]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:12 compute-0 python3[215124]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z   --volume /tmp/ceph_rgw.yml:/home/ceph_spec.yaml:z   --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   config-key set ssl_option no_sslv2:sslv3:no_tlsv1:no_tlsv1_1
                                            _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:12 compute-0 sudo[215127]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ls
Oct 11 01:44:12 compute-0 sudo[215127]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:12 compute-0 podman[215150]: 2025-10-11 01:44:12.223499608 +0000 UTC m=+0.074225988 container create 44eb37fb61ae11cc945e9b2567c9cc9791ca2bae909ef88394258fd5dcea19fe (image=quay.io/ceph/ceph:v18, name=youthful_neumann, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 01:44:12 compute-0 systemd[1]: Started libpod-conmon-44eb37fb61ae11cc945e9b2567c9cc9791ca2bae909ef88394258fd5dcea19fe.scope.
Oct 11 01:44:12 compute-0 podman[215150]: 2025-10-11 01:44:12.197604762 +0000 UTC m=+0.048331182 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:12 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bf6139748a5e0a387fe63ad10a2956862885d31248c9657dc98502573140f03b/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bf6139748a5e0a387fe63ad10a2956862885d31248c9657dc98502573140f03b/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bf6139748a5e0a387fe63ad10a2956862885d31248c9657dc98502573140f03b/merged/home/ceph_spec.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:12 compute-0 podman[215150]: 2025-10-11 01:44:12.36075149 +0000 UTC m=+0.211477900 container init 44eb37fb61ae11cc945e9b2567c9cc9791ca2bae909ef88394258fd5dcea19fe (image=quay.io/ceph/ceph:v18, name=youthful_neumann, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:44:12 compute-0 podman[215150]: 2025-10-11 01:44:12.391670656 +0000 UTC m=+0.242397076 container start 44eb37fb61ae11cc945e9b2567c9cc9791ca2bae909ef88394258fd5dcea19fe (image=quay.io/ceph/ceph:v18, name=youthful_neumann, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507)
Oct 11 01:44:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v100: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "backups", "var": "pgp_num_actual", "val": "32"} v 0) v1
Oct 11 01:44:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "backups", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "cephfs.cephfs.data", "var": "pgp_num_actual", "val": "32"} v 0) v1
Oct 11 01:44:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "cephfs.cephfs.data", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "cephfs.cephfs.meta", "var": "pgp_num_actual", "val": "32"} v 0) v1
Oct 11 01:44:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "cephfs.cephfs.meta", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "images", "var": "pgp_num_actual", "val": "32"} v 0) v1
Oct 11 01:44:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "images", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "vms", "var": "pgp_num_actual", "val": "32"} v 0) v1
Oct 11 01:44:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "vms", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "volumes", "var": "pgp_num_actual", "val": "32"} v 0) v1
Oct 11 01:44:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "volumes", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:12 compute-0 podman[215150]: 2025-10-11 01:44:12.403584188 +0000 UTC m=+0.254310638 container attach 44eb37fb61ae11cc945e9b2567c9cc9791ca2bae909ef88394258fd5dcea19fe (image=quay.io/ceph/ceph:v18, name=youthful_neumann, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:44:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e41 do_prune osdmap full prune enabled
Oct 11 01:44:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "backups", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "cephfs.cephfs.data", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "cephfs.cephfs.meta", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "images", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "vms", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "volumes", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:44:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "backups", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:44:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "cephfs.cephfs.data", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:44:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "cephfs.cephfs.meta", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:44:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "images", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:44:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "vms", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:44:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "volumes", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:44:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e42 e42: 3 total, 3 up, 3 in
Oct 11 01:44:12 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e42: 3 total, 3 up, 3 in
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.18( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.811526299s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.049842834s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.14( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.851706505s) [2] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 76.090087891s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.18( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.811423302s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.049842834s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.14( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.851631165s) [2] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 76.090087891s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.17( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.849180222s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 76.087745667s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.17( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.849039078s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 76.087745667s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.13( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.811411858s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.050521851s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.13( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.811374664s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.050521851s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.15( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.848544121s) [2] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 76.087722778s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.14( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.811144829s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.050514221s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.15( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.848507881s) [2] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 76.087722778s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.11( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.848484993s) [2] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 76.087944031s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.11( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.848415375s) [2] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 76.087944031s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.12( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.810621262s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.050529480s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.14( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.810593605s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.050514221s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.12( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.810587883s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.050529480s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.11( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.810141563s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.050529480s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.13( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.847535133s) [2] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 76.087959290s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.10( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.810668945s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.051116943s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.11( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.810090065s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.050529480s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.10( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.810633659s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.051116943s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.13( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.847490311s) [2] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 76.087959290s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.f( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.809402466s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.050598145s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.f( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.809365273s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.050598145s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.d( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.848768234s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 76.090042114s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.d( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.848733902s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 76.090042114s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.e( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.819228172s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.061447144s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.e( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.818922997s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.061447144s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.d( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.807821274s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.050598145s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.c( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.845151901s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 76.087936401s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.d( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.807788849s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.050598145s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.c( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.845119476s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 76.087936401s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.e( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.846616745s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 76.090026855s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.e( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.846592903s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 76.090026855s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.2( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.846682549s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 76.090324402s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.2( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.846648216s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 76.090324402s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.2( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.807367325s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.051216125s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.2( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.807345390s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.051216125s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.1( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.807141304s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.051170349s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.1( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.807116508s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.051170349s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.1( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.845424652s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 76.089630127s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.f( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.845621109s) [2] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 76.088867188s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.1( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.844970703s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 76.089630127s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.4( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.806242943s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.051124573s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.4( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.806214333s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.051124573s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.6( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.845026970s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 76.090065002s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.6( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.845005035s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 76.090065002s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.9( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.816087723s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.061248779s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.9( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.816068649s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.061248779s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.b( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.844759941s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 76.090034485s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.b( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.844740868s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 76.090034485s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.1a( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.815845490s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.061256409s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.1a( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.815827370s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.061256409s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.5( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.815687180s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.061233521s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.5( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.815616608s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.061233521s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.a( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.815503120s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.061256409s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.a( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.815482140s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.061256409s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.8( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.844168663s) [2] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 76.090057373s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.8( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.844149590s) [2] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 76.090057373s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.4( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.843916893s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 76.090095520s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.1b( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.815260887s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.061431885s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.4( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.843894958s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 76.090095520s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.1b( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.815178871s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.061431885s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.7( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.814938545s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.061294556s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.7( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.814919472s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.061294556s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.8( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.814901352s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.061378479s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.1e( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.843613625s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 76.090126038s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.8( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.814879417s) [1] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.061378479s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.1e( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.843587875s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 76.090126038s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.1c( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.814772606s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 75.061508179s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[4.1c( empty local-lis/les=35/37 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.814745903s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 75.061508179s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.1f( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.843340874s) [2] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 76.090133667s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.1f( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.843218803s) [2] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 76.090133667s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.1c( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.843071938s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 76.090133667s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.1c( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.843047142s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 76.090133667s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.1d( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.843154907s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 76.090309143s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.1d( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.843128204s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 76.090309143s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[6.f( empty local-lis/les=37/38 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.840686798s) [2] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 76.088867188s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[4.18( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[4.1b( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[6.1e( empty local-lis/les=0/0 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[4.d( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[6.c( empty local-lis/les=0/0 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[4.f( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[6.d( empty local-lis/les=0/0 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[6.2( empty local-lis/les=0/0 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[4.1a( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[4.2( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[4.4( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[6.6( empty local-lis/les=0/0 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[6.4( empty local-lis/les=0/0 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[6.1( empty local-lis/les=0/0 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[4.7( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[6.f( empty local-lis/les=0/0 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [2] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[4.e( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[4.1( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[4.a( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[6.8( empty local-lis/les=0/0 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [2] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[6.14( empty local-lis/les=0/0 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [2] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[6.15( empty local-lis/les=0/0 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [2] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[4.13( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[6.11( empty local-lis/les=0/0 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [2] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[6.13( empty local-lis/les=0/0 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [2] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[4.11( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[4.1c( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[6.1f( empty local-lis/les=0/0 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [2] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.1b( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.748971939s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.440681458s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.1b( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.748948097s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.440681458s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.1d( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.814435959s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.506313324s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.1d( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.814414978s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.506313324s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.1e( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.815236092s) [0] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.507278442s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.1e( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.815217972s) [0] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.507278442s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.19( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.748605728s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.440769196s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.19( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.748587608s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.440769196s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.18( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.747461319s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.439735413s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.18( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.747446060s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.439735413s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.17( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.747338295s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.439727783s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.17( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.747323990s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.439727783s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.16( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.747990608s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.440498352s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.16( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.747973442s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.440498352s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.11( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.814681053s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.507331848s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.11( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.814664841s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.507331848s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.15( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.747736931s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.440502167s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.15( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.747721672s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.440502167s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.12( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.813430786s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.506298065s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.12( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.813416481s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.506298065s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.13( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.812945366s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.506305695s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.13( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.812925339s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.506305695s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[4.5( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[6.e( empty local-lis/les=0/0 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.13( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.746483803s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.439987183s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.13( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.746466637s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.439987183s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.14( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.812634468s) [0] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.506324768s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.14( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.812595367s) [0] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.506324768s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.15( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.812297821s) [0] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.506347656s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.15( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.812273979s) [0] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.506347656s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.11( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.745368958s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.439598083s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.11( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.745347977s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.439598083s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.16( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.811959267s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.506355286s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.16( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.811936378s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.506355286s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.f( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.745885849s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.440490723s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.f( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.745862961s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.440490723s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.9( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.811610222s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.506412506s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.9( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.811590195s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.506412506s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.d( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.737392426s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.432373047s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.d( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.737365723s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.432373047s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.7( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.811221123s) [0] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.506530762s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.7( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.811195374s) [0] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.506530762s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[5.1e( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [0] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[2.19( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[2.18( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[2.16( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[2.13( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[5.14( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [0] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[5.15( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [0] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[2.11( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[2.f( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[5.7( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [0] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[2.2( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[5.5( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [0] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[5.3( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [0] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[4.9( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[6.b( empty local-lis/les=0/0 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[4.8( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[6.17( empty local-lis/les=0/0 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[4.14( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[4.12( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[4.10( empty local-lis/les=0/0 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[6.1d( empty local-lis/les=0/0 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[6.1c( empty local-lis/les=0/0 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.1c( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.840857506s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.747592926s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.1c( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.840827942s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.747592926s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.17( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.764780045s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.673355103s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.17( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.764688492s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.673355103s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.13( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.839133263s) [0] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.747940063s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.13( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.839114189s) [0] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.747940063s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.18( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.764859200s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.673980713s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.18( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.764829636s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.673980713s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.15( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.762633324s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.671897888s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.11( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.849013329s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.758304596s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.15( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.762605667s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.671897888s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.11( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.848991394s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.758304596s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.12( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.762109756s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.671607971s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.12( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.762089729s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.671607971s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.11( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.761729240s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.671447754s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.11( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.761710167s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.671447754s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.15( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.848484993s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.758319855s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.f( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.761474609s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.671409607s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.f( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.761446953s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.671409607s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.e( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.761192322s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.671417236s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.16( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.761219978s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.671478271s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.e( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.761138916s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.671417236s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.16( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.761185646s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.671478271s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.15( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.848445892s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.758319855s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.a( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.848374367s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.758838654s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.a( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.848350525s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.758838654s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.c( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.760828018s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.671394348s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.c( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.760772705s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.671394348s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.7( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.736496925s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.432014465s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.7( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.736474991s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.432014465s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.2( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.736276627s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.432006836s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.2( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.736251831s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.432006836s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.5( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.815396309s) [0] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.511299133s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.5( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.815321922s) [0] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.511299133s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.3( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.735893250s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.431972504s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.3( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.735872269s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.431972504s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.4( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.743244171s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.439556122s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.4( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.810253143s) [0] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.506584167s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.4( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.743217468s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.439556122s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.4( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.810192108s) [0] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.506584167s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.3( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.810201645s) [0] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.506649017s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.3( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.810181618s) [0] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.506649017s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.5( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.735325813s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.431961060s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.5( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.735307693s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.431961060s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.2( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.809926033s) [0] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.506675720s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.2( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.809906006s) [0] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.506675720s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.6( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.735124588s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.431983948s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.1( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.809679031s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.506679535s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.1( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.809650421s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.506679535s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.6( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.734933853s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.431983948s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.8( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.734766006s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.431995392s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.9( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.734724998s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.431964874s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.8( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.734724045s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.431995392s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.9( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.734691620s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.431964874s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.a( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.733357430s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.430828094s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.a( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.733332634s) [1] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.430828094s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.b( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.733207703s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.430831909s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.b( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.733189583s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.430831909s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.c( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.809150696s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.507297516s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.c( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.809123039s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.507297516s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.1c( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.733073235s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.431850433s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.1c( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.733048439s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.431850433s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.f( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.807799339s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.506690979s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.1d( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.733047485s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.432003021s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.f( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.807724953s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.506690979s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.1d( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.733025551s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.432003021s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.1a( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.808223724s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.507308960s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.1a( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.805868149s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.507308960s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.19( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.805780411s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.507316589s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.19( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.805746078s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.507316589s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.18( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.805526733s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active pruub 61.507324219s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[5.18( empty local-lis/les=37/38 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42 pruub=14.805493355s) [1] r=-1 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 61.507324219s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.1f( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.724844933s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active pruub 57.426719666s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[2.1f( empty local-lis/les=33/34 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42 pruub=10.724752426s) [0] r=-1 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 57.426719666s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[3.17( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.8( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.848004341s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.758728027s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.9( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.848163605s) [0] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.758678436s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.8( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.847970963s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.758728027s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.9( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.847891808s) [0] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.758678436s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.f( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.847789764s) [0] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.758956909s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.f( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.847765923s) [0] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.758956909s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.6( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.847483635s) [0] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.758838654s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.6( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.847460747s) [0] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.758838654s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.4( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.847372055s) [0] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.758865356s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.4( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.847352982s) [0] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.758865356s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.1( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.759316444s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.671005249s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.1( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.759293556s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.671005249s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.5( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.847091675s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.758964539s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.5( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.847072601s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.758964539s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.3( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.758967400s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.670951843s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.5( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.758975029s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.670989990s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.3( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.758944511s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.670951843s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.5( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.758954048s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.670989990s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.1( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.846884727s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.759021759s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.1( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.846867561s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.759021759s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.2( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.846746445s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.759067535s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.2( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.846727371s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.759067535s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.6( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.761539459s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.673965454s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.7( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.758435249s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.670883179s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.6( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.761515617s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.673965454s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.3( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.846591949s) [0] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.759056091s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.7( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.758411407s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.670883179s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.3( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.846566200s) [0] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.759056091s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.8( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.758079529s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.670852661s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.8( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.758060455s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.670852661s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.c( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.846027374s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.759101868s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.9( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.757760048s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.670845032s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.c( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.846009254s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.759101868s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.9( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.757739067s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.670845032s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.a( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.757476807s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.670837402s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.a( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.757458687s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.670837402s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.e( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.846763611s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.760295868s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.e( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.846692085s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.760295868s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.1f( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.845366478s) [0] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.759113312s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.1f( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.845344543s) [0] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.759113312s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.1b( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.756931305s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.670875549s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.1b( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.756907463s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.670875549s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.18( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.845000267s) [0] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.759136200s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.18( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.844977379s) [0] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.759136200s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.1a( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.845375061s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.759681702s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.1e( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.754309654s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.668655396s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.1a( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.845354080s) [2] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.759681702s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.1d( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.754961967s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.669319153s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.1e( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.754284859s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.668655396s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.1d( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.754938126s) [2] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.669319153s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.1b( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.845261574s) [0] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active pruub 62.759723663s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.1f( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.742387772s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active pruub 67.656867981s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[7.1b( empty local-lis/les=39/40 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42 pruub=8.845235825s) [0] r=-1 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 62.759723663s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[3.1f( empty local-lis/les=35/37 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42 pruub=13.742371559s) [0] r=-1 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown NOTIFY pruub 67.656867981s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[7.13( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [0] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[3.15( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[3.12( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[7.1c( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[3.18( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[7.11( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[3.11( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[3.e( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[3.16( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[3.f( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[7.9( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [0] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[2.1b( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[5.1d( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[7.15( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[7.a( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[7.8( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[7.5( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[3.5( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[7.1( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[7.2( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[3.7( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[3.8( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[7.f( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [0] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[7.6( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [0] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[2.17( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[5.11( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[2.15( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[5.12( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[5.13( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[7.4( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [0] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[5.16( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[5.9( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[2.7( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[2.3( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[7.c( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[7.e( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[7.1a( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[3.1d( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 42 pg[3.1e( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[3.1( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[3.3( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[2.d( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[2.4( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[2.5( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[5.1( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[2.6( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[2.9( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[2.a( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[5.c( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[5.f( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[5.1a( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[5.19( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 42 pg[5.18( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[3.c( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[3.6( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[7.3( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [0] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[3.9( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[3.a( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[7.1f( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [0] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[3.1b( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[7.18( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [0] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[5.4( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [0] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[5.2( empty local-lis/les=0/0 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [0] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[2.8( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[3.1f( empty local-lis/les=0/0 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[7.1b( empty local-lis/les=0/0 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [0] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[2.b( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[2.1c( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[2.1d( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 42 pg[2.1f( empty local-lis/les=0/0 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:12 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.c scrub starts
Oct 11 01:44:12 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.c scrub ok
Oct 11 01:44:13 compute-0 podman[215263]: 2025-10-11 01:44:13.050244618 +0000 UTC m=+0.101666831 container exec ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default)
Oct 11 01:44:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=ssl_option}] v 0) v1
Oct 11 01:44:13 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/3995700804' entity='client.admin' 
Oct 11 01:44:13 compute-0 youthful_neumann[215167]: set ssl_option
Oct 11 01:44:13 compute-0 systemd[1]: libpod-44eb37fb61ae11cc945e9b2567c9cc9791ca2bae909ef88394258fd5dcea19fe.scope: Deactivated successfully.
Oct 11 01:44:13 compute-0 podman[215150]: 2025-10-11 01:44:13.123562538 +0000 UTC m=+0.974288978 container died 44eb37fb61ae11cc945e9b2567c9cc9791ca2bae909ef88394258fd5dcea19fe (image=quay.io/ceph/ceph:v18, name=youthful_neumann, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 01:44:13 compute-0 systemd[1]: var-lib-containers-storage-overlay-bf6139748a5e0a387fe63ad10a2956862885d31248c9657dc98502573140f03b-merged.mount: Deactivated successfully.
Oct 11 01:44:13 compute-0 podman[215263]: 2025-10-11 01:44:13.189972403 +0000 UTC m=+0.241394596 container exec_died ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, ceph=True)
Oct 11 01:44:13 compute-0 podman[215150]: 2025-10-11 01:44:13.208013277 +0000 UTC m=+1.058739657 container remove 44eb37fb61ae11cc945e9b2567c9cc9791ca2bae909ef88394258fd5dcea19fe (image=quay.io/ceph/ceph:v18, name=youthful_neumann, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS)
Oct 11 01:44:13 compute-0 systemd[1]: libpod-conmon-44eb37fb61ae11cc945e9b2567c9cc9791ca2bae909ef88394258fd5dcea19fe.scope: Deactivated successfully.
Oct 11 01:44:13 compute-0 sudo[215107]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:13 compute-0 sudo[215357]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tvzoidpzldxqcnlpjpmrwsfckesrzkxs ; /usr/bin/python3'
Oct 11 01:44:13 compute-0 sudo[215357]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:13 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.2 deep-scrub starts
Oct 11 01:44:13 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.2 deep-scrub ok
Oct 11 01:44:13 compute-0 python3[215364]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z   --volume /tmp/ceph_rgw.yml:/home/ceph_spec.yaml:z   --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   orch apply --in-file /home/ceph_spec.yaml _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e42 do_prune osdmap full prune enabled
Oct 11 01:44:13 compute-0 ceph-mon[191930]: pgmap v100: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "backups", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:44:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "cephfs.cephfs.data", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:44:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "cephfs.cephfs.meta", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:44:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "images", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:44:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "vms", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:44:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "volumes", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:44:13 compute-0 ceph-mon[191930]: osdmap e42: 3 total, 3 up, 3 in
Oct 11 01:44:13 compute-0 ceph-mon[191930]: 2.c scrub starts
Oct 11 01:44:13 compute-0 ceph-mon[191930]: 2.c scrub ok
Oct 11 01:44:13 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3995700804' entity='client.admin' 
Oct 11 01:44:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e43 e43: 3 total, 3 up, 3 in
Oct 11 01:44:13 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e43: 3 total, 3 up, 3 in
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[4.1c( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[3.18( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[3.16( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[7.1c( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[6.1f( empty local-lis/les=42/43 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [2] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[6.13( empty local-lis/les=42/43 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [2] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[4.11( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[7.11( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[6.15( empty local-lis/les=42/43 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [2] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[7.15( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[3.11( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[6.14( empty local-lis/les=42/43 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [2] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[6.11( empty local-lis/les=42/43 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [2] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[6.8( empty local-lis/les=42/43 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [2] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[7.8( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[4.a( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[7.5( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[4.1( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[7.a( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[3.e( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[7.1( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[7.2( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[3.7( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[4.e( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[7.c( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[4.13( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[3.5( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[6.f( empty local-lis/les=42/43 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [2] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[3.8( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[3.1d( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[4.1a( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[4.1b( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[4.18( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[3.1e( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [2] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[7.1a( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 43 pg[7.e( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [2] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[5.19( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[5.18( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[2.19( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[2.18( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[5.7( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [0] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[5.1e( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [0] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[5.4( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [0] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[2.1c( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[2.f( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[5.5( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [0] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[2.1f( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[5.f( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[5.1d( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[5.1a( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[2.9( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[5.c( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[2.6( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[2.4( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[2.7( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[2.3( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[2.d( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[2.5( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[5.9( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[5.1( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[5.2( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [0] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[2.1d( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[2.b( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[5.16( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[2.15( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[2.17( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[2.8( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[5.15( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [0] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[2.a( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[2.1b( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [1] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[5.12( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[6.1d( empty local-lis/les=42/43 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[4.10( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[6.1c( empty local-lis/les=42/43 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[5.13( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[4.14( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[6.17( empty local-lis/les=42/43 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[4.8( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[5.3( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [0] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[6.b( empty local-lis/les=42/43 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[5.14( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [0] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[4.12( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[7.1b( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [0] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[2.16( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[3.17( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[3.15( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[4.9( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[6.e( empty local-lis/les=42/43 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[4.5( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[4.7( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[6.1( empty local-lis/les=42/43 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[6.4( empty local-lis/les=42/43 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[6.6( empty local-lis/les=42/43 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[4.4( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[6.2( empty local-lis/les=42/43 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[4.f( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[6.c( empty local-lis/les=42/43 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[4.d( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[5.11( empty local-lis/les=42/43 n=0 ec=37/25 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[6.1e( empty local-lis/les=42/43 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[6.d( empty local-lis/les=42/43 n=0 ec=37/27 lis/c=37/37 les/c/f=38/38/0 sis=42) [1] r=0 lpr=42 pi=[37,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 43 pg[4.2( empty local-lis/les=42/43 n=0 ec=35/23 lis/c=35/35 les/c/f=37/37/0 sis=42) [1] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[2.2( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[3.12( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[3.f( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[3.c( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[7.9( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [0] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[7.13( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [0] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[2.11( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[7.f( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [0] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[7.6( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [0] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[2.13( empty local-lis/les=42/43 n=0 ec=33/19 lis/c=33/33 les/c/f=34/34/0 sis=42) [0] r=0 lpr=42 pi=[33,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[7.4( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [0] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[3.1( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[3.3( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[7.3( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [0] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[3.9( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[3.a( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[7.1f( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [0] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[3.6( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[7.18( empty local-lis/les=42/43 n=0 ec=39/29 lis/c=39/39 les/c/f=40/40/0 sis=42) [0] r=0 lpr=42 pi=[39,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[3.1f( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 43 pg[3.1b( empty local-lis/les=42/43 n=0 ec=35/21 lis/c=35/35 les/c/f=37/37/0 sis=42) [0] r=0 lpr=42 pi=[35,42)/1 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:13 compute-0 podman[215381]: 2025-10-11 01:44:13.8094733 +0000 UTC m=+0.098444845 container create babc1f3182f1b0e8f4ac7ff626fda5a76fbc99f0b3b334e7893b1d415d08ea78 (image=quay.io/ceph/ceph:v18, name=vigilant_davinci, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:44:13 compute-0 systemd[1]: Started libpod-conmon-babc1f3182f1b0e8f4ac7ff626fda5a76fbc99f0b3b334e7893b1d415d08ea78.scope.
Oct 11 01:44:13 compute-0 podman[215381]: 2025-10-11 01:44:13.776740301 +0000 UTC m=+0.065711896 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:13 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ca480f1694336fb5fde83680ebc26a6cd7e4441bacd47924d1f26453a145c0b2/merged/home/ceph_spec.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ca480f1694336fb5fde83680ebc26a6cd7e4441bacd47924d1f26453a145c0b2/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ca480f1694336fb5fde83680ebc26a6cd7e4441bacd47924d1f26453a145c0b2/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:13 compute-0 podman[215381]: 2025-10-11 01:44:13.945740683 +0000 UTC m=+0.234712248 container init babc1f3182f1b0e8f4ac7ff626fda5a76fbc99f0b3b334e7893b1d415d08ea78 (image=quay.io/ceph/ceph:v18, name=vigilant_davinci, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:13 compute-0 podman[215381]: 2025-10-11 01:44:13.96962167 +0000 UTC m=+0.258593225 container start babc1f3182f1b0e8f4ac7ff626fda5a76fbc99f0b3b334e7893b1d415d08ea78 (image=quay.io/ceph/ceph:v18, name=vigilant_davinci, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 01:44:13 compute-0 podman[215381]: 2025-10-11 01:44:13.976100302 +0000 UTC m=+0.265071887 container attach babc1f3182f1b0e8f4ac7ff626fda5a76fbc99f0b3b334e7893b1d415d08ea78 (image=quay.io/ceph/ceph:v18, name=vigilant_davinci, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True)
Oct 11 01:44:14 compute-0 sudo[215127]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:44:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:44:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:44:14 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:44:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:44:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:44:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:44:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:14 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 6585f1df-d573-45fe-973e-b084c41a1e1d does not exist
Oct 11 01:44:14 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 2a34b5bc-a5c8-4c95-a222-4e2a89726b52 does not exist
Oct 11 01:44:14 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev d630e9c1-1e60-4ca5-aa7c-806955211beb does not exist
Oct 11 01:44:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 01:44:14 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:44:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 01:44:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:44:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:44:14 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:44:14 compute-0 sudo[215432]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:14 compute-0 sudo[215432]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:14 compute-0 sudo[215432]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:14 compute-0 sudo[215458]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:44:14 compute-0 sudo[215458]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:14 compute-0 sudo[215458]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v103: 193 pgs: 41 peering, 152 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:14 compute-0 sudo[215501]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:14 compute-0 sudo[215501]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:14 compute-0 sudo[215501]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:14 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14244 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:44:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e43 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:44:14 compute-0 sudo[215526]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 01:44:14 compute-0 ceph-mgr[192233]: [cephadm INFO root] Saving service rgw.rgw spec with placement compute-0
Oct 11 01:44:14 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Saving service rgw.rgw spec with placement compute-0
Oct 11 01:44:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.rgw.rgw}] v 0) v1
Oct 11 01:44:14 compute-0 sudo[215526]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:14 compute-0 vigilant_davinci[215411]: Scheduled rgw.rgw update...
Oct 11 01:44:14 compute-0 systemd[1]: libpod-babc1f3182f1b0e8f4ac7ff626fda5a76fbc99f0b3b334e7893b1d415d08ea78.scope: Deactivated successfully.
Oct 11 01:44:14 compute-0 podman[215381]: 2025-10-11 01:44:14.635982563 +0000 UTC m=+0.924954168 container died babc1f3182f1b0e8f4ac7ff626fda5a76fbc99f0b3b334e7893b1d415d08ea78 (image=quay.io/ceph/ceph:v18, name=vigilant_davinci, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507)
Oct 11 01:44:14 compute-0 systemd[1]: var-lib-containers-storage-overlay-ca480f1694336fb5fde83680ebc26a6cd7e4441bacd47924d1f26453a145c0b2-merged.mount: Deactivated successfully.
Oct 11 01:44:14 compute-0 ceph-mon[191930]: 3.2 deep-scrub starts
Oct 11 01:44:14 compute-0 ceph-mon[191930]: 3.2 deep-scrub ok
Oct 11 01:44:14 compute-0 ceph-mon[191930]: osdmap e43: 3 total, 3 up, 3 in
Oct 11 01:44:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:44:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:44:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:44:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:44:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:44:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:14 compute-0 podman[215381]: 2025-10-11 01:44:14.717442325 +0000 UTC m=+1.006413870 container remove babc1f3182f1b0e8f4ac7ff626fda5a76fbc99f0b3b334e7893b1d415d08ea78 (image=quay.io/ceph/ceph:v18, name=vigilant_davinci, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, io.buildah.version=1.39.3)
Oct 11 01:44:14 compute-0 systemd[1]: libpod-conmon-babc1f3182f1b0e8f4ac7ff626fda5a76fbc99f0b3b334e7893b1d415d08ea78.scope: Deactivated successfully.
Oct 11 01:44:14 compute-0 sudo[215357]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:15 compute-0 podman[215602]: 2025-10-11 01:44:15.130119419 +0000 UTC m=+0.091665164 container create ffb6da55aea39d7801a7260a170e5cfacfb245dbe0f63bb8d5fe90e15472e274 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_zhukovsky, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:44:15 compute-0 systemd[1]: Started libpod-conmon-ffb6da55aea39d7801a7260a170e5cfacfb245dbe0f63bb8d5fe90e15472e274.scope.
Oct 11 01:44:15 compute-0 podman[215602]: 2025-10-11 01:44:15.09839135 +0000 UTC m=+0.059937105 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:15 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:15 compute-0 podman[215602]: 2025-10-11 01:44:15.244433683 +0000 UTC m=+0.205979468 container init ffb6da55aea39d7801a7260a170e5cfacfb245dbe0f63bb8d5fe90e15472e274 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_zhukovsky, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0)
Oct 11 01:44:15 compute-0 podman[215602]: 2025-10-11 01:44:15.255955654 +0000 UTC m=+0.217501369 container start ffb6da55aea39d7801a7260a170e5cfacfb245dbe0f63bb8d5fe90e15472e274 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_zhukovsky, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.vendor=CentOS, CEPH_REF=reef, OSD_FLAVOR=default)
Oct 11 01:44:15 compute-0 podman[215602]: 2025-10-11 01:44:15.260955442 +0000 UTC m=+0.222501157 container attach ffb6da55aea39d7801a7260a170e5cfacfb245dbe0f63bb8d5fe90e15472e274 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_zhukovsky, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:44:15 compute-0 amazing_zhukovsky[215618]: 167 167
Oct 11 01:44:15 compute-0 systemd[1]: libpod-ffb6da55aea39d7801a7260a170e5cfacfb245dbe0f63bb8d5fe90e15472e274.scope: Deactivated successfully.
Oct 11 01:44:15 compute-0 podman[215602]: 2025-10-11 01:44:15.271085962 +0000 UTC m=+0.232631677 container died ffb6da55aea39d7801a7260a170e5cfacfb245dbe0f63bb8d5fe90e15472e274 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_zhukovsky, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2)
Oct 11 01:44:15 compute-0 systemd[1]: var-lib-containers-storage-overlay-2bbf05c2276019c352ce088da7050622ac63338d470d4d7f5443f648730a4837-merged.mount: Deactivated successfully.
Oct 11 01:44:15 compute-0 podman[215602]: 2025-10-11 01:44:15.33756765 +0000 UTC m=+0.299113365 container remove ffb6da55aea39d7801a7260a170e5cfacfb245dbe0f63bb8d5fe90e15472e274 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_zhukovsky, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.vendor=CentOS)
Oct 11 01:44:15 compute-0 systemd[1]: libpod-conmon-ffb6da55aea39d7801a7260a170e5cfacfb245dbe0f63bb8d5fe90e15472e274.scope: Deactivated successfully.
Oct 11 01:44:15 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.4 scrub starts
Oct 11 01:44:15 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.4 scrub ok
Oct 11 01:44:15 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.6 deep-scrub starts
Oct 11 01:44:15 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.6 deep-scrub ok
Oct 11 01:44:15 compute-0 podman[215662]: 2025-10-11 01:44:15.604206492 +0000 UTC m=+0.077518196 container create 41ecc0b5d84fbec1914619aa7f8c9f4c13f227c56ac86e37a5c4082ce8f02b55 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_robinson, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:44:15 compute-0 systemd[1]: Started libpod-conmon-41ecc0b5d84fbec1914619aa7f8c9f4c13f227c56ac86e37a5c4082ce8f02b55.scope.
Oct 11 01:44:15 compute-0 podman[215662]: 2025-10-11 01:44:15.581882161 +0000 UTC m=+0.055193875 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:15 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/98ff4e9bfd39e57941468f9850fac3263c32c9d2e6507697087ce15c4b22bb68/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/98ff4e9bfd39e57941468f9850fac3263c32c9d2e6507697087ce15c4b22bb68/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/98ff4e9bfd39e57941468f9850fac3263c32c9d2e6507697087ce15c4b22bb68/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/98ff4e9bfd39e57941468f9850fac3263c32c9d2e6507697087ce15c4b22bb68/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/98ff4e9bfd39e57941468f9850fac3263c32c9d2e6507697087ce15c4b22bb68/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:15 compute-0 podman[215662]: 2025-10-11 01:44:15.702973575 +0000 UTC m=+0.176285289 container init 41ecc0b5d84fbec1914619aa7f8c9f4c13f227c56ac86e37a5c4082ce8f02b55 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_robinson, org.label-schema.build-date=20250507, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:44:15 compute-0 podman[215662]: 2025-10-11 01:44:15.723831903 +0000 UTC m=+0.197143607 container start 41ecc0b5d84fbec1914619aa7f8c9f4c13f227c56ac86e37a5c4082ce8f02b55 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_robinson, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, ceph=True, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:15 compute-0 podman[215662]: 2025-10-11 01:44:15.728437389 +0000 UTC m=+0.201749113 container attach 41ecc0b5d84fbec1914619aa7f8c9f4c13f227c56ac86e37a5c4082ce8f02b55 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_robinson, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, OSD_FLAVOR=default)
Oct 11 01:44:15 compute-0 python3[215737]: ansible-ansible.legacy.stat Invoked with path=/tmp/ceph_mds.yml follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 01:44:16 compute-0 ceph-mon[191930]: pgmap v103: 193 pgs: 41 peering, 152 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:16 compute-0 ceph-mon[191930]: from='client.14244 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:44:16 compute-0 ceph-mon[191930]: Saving service rgw.rgw spec with placement compute-0
Oct 11 01:44:16 compute-0 ceph-mon[191930]: 3.4 scrub starts
Oct 11 01:44:16 compute-0 ceph-mon[191930]: 3.4 scrub ok
Oct 11 01:44:16 compute-0 podman[215765]: 2025-10-11 01:44:16.22578455 +0000 UTC m=+0.118734606 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, io.buildah.version=1.41.4)
Oct 11 01:44:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v104: 193 pgs: 41 peering, 152 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:16 compute-0 python3[215828]: ansible-ansible.legacy.copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760147055.514862-33959-112862422210313/source dest=/tmp/ceph_mds.yml mode=0644 force=True follow=False _original_basename=ceph_mds.yml.j2 checksum=e359e26d9e42bc107a0de03375144cf8590b6f68 backup=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:44:16 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.b scrub starts
Oct 11 01:44:16 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.b scrub ok
Oct 11 01:44:16 compute-0 sudo[215898]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-twbbddxypumiuahncndgaecpfkbraqqk ; /usr/bin/python3'
Oct 11 01:44:16 compute-0 sudo[215898]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:17 compute-0 adoring_robinson[215709]: --> passed data devices: 0 physical, 3 LVM
Oct 11 01:44:17 compute-0 adoring_robinson[215709]: --> relative data size: 1.0
Oct 11 01:44:17 compute-0 adoring_robinson[215709]: --> All data devices are unavailable
Oct 11 01:44:17 compute-0 systemd[1]: libpod-41ecc0b5d84fbec1914619aa7f8c9f4c13f227c56ac86e37a5c4082ce8f02b55.scope: Deactivated successfully.
Oct 11 01:44:17 compute-0 systemd[1]: libpod-41ecc0b5d84fbec1914619aa7f8c9f4c13f227c56ac86e37a5c4082ce8f02b55.scope: Consumed 1.278s CPU time.
Oct 11 01:44:17 compute-0 podman[215662]: 2025-10-11 01:44:17.079801017 +0000 UTC m=+1.553112761 container died 41ecc0b5d84fbec1914619aa7f8c9f4c13f227c56ac86e37a5c4082ce8f02b55 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_robinson, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:44:17 compute-0 systemd[1]: var-lib-containers-storage-overlay-98ff4e9bfd39e57941468f9850fac3263c32c9d2e6507697087ce15c4b22bb68-merged.mount: Deactivated successfully.
Oct 11 01:44:17 compute-0 python3[215901]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z   --volume /tmp/ceph_mds.yml:/home/ceph_spec.yaml:z   --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   fs volume create cephfs '--placement=compute-0 '
                                            _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:17 compute-0 ceph-mon[191930]: 4.6 deep-scrub starts
Oct 11 01:44:17 compute-0 ceph-mon[191930]: 4.6 deep-scrub ok
Oct 11 01:44:17 compute-0 podman[215662]: 2025-10-11 01:44:17.216678438 +0000 UTC m=+1.689990182 container remove 41ecc0b5d84fbec1914619aa7f8c9f4c13f227c56ac86e37a5c4082ce8f02b55 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_robinson, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:44:17 compute-0 systemd[1]: libpod-conmon-41ecc0b5d84fbec1914619aa7f8c9f4c13f227c56ac86e37a5c4082ce8f02b55.scope: Deactivated successfully.
Oct 11 01:44:17 compute-0 podman[215914]: 2025-10-11 01:44:17.258097654 +0000 UTC m=+0.077329710 container create bbba2b0a5fff7cecca20f790c8261d148efd6e9fad8a37caa66edffef0a85feb (image=quay.io/ceph/ceph:v18, name=inspiring_nash, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef)
Oct 11 01:44:17 compute-0 sudo[215526]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:17 compute-0 podman[215914]: 2025-10-11 01:44:17.229993892 +0000 UTC m=+0.049225928 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:17 compute-0 systemd[1]: Started libpod-conmon-bbba2b0a5fff7cecca20f790c8261d148efd6e9fad8a37caa66edffef0a85feb.scope.
Oct 11 01:44:17 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/44bea757c862a3d890eab5a5fd3cf040d28125e5d2a779221f62ead99a163019/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/44bea757c862a3d890eab5a5fd3cf040d28125e5d2a779221f62ead99a163019/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/44bea757c862a3d890eab5a5fd3cf040d28125e5d2a779221f62ead99a163019/merged/home/ceph_spec.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:17 compute-0 podman[215914]: 2025-10-11 01:44:17.407748554 +0000 UTC m=+0.226980680 container init bbba2b0a5fff7cecca20f790c8261d148efd6e9fad8a37caa66edffef0a85feb (image=quay.io/ceph/ceph:v18, name=inspiring_nash, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 01:44:17 compute-0 podman[215914]: 2025-10-11 01:44:17.435591988 +0000 UTC m=+0.254824044 container start bbba2b0a5fff7cecca20f790c8261d148efd6e9fad8a37caa66edffef0a85feb (image=quay.io/ceph/ceph:v18, name=inspiring_nash, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_REF=reef)
Oct 11 01:44:17 compute-0 podman[215914]: 2025-10-11 01:44:17.443054829 +0000 UTC m=+0.262286945 container attach bbba2b0a5fff7cecca20f790c8261d148efd6e9fad8a37caa66edffef0a85feb (image=quay.io/ceph/ceph:v18, name=inspiring_nash, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:17 compute-0 sudo[215926]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:17 compute-0 sudo[215926]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:17 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.b scrub starts
Oct 11 01:44:17 compute-0 sudo[215926]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:17 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.b scrub ok
Oct 11 01:44:17 compute-0 sudo[215955]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:44:17 compute-0 sudo[215955]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:17 compute-0 sudo[215955]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:17 compute-0 sudo[215980]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:17 compute-0 sudo[215980]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:17 compute-0 sudo[215980]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:17 compute-0 sudo[216024]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 01:44:17 compute-0 sudo[216024]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:18 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14246 -' entity='client.admin' cmd=[{"prefix": "fs volume create", "name": "cephfs", "placement": "compute-0 ", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:44:18 compute-0 ceph-mgr[192233]: [volumes INFO volumes.module] Starting _cmd_fs_volume_create(name:cephfs, placement:compute-0 , prefix:fs volume create, target:['mon-mgr', '']) < ""
Oct 11 01:44:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool create", "pool": "cephfs.cephfs.meta"} v 0) v1
Oct 11 01:44:18 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool create", "pool": "cephfs.cephfs.meta"}]: dispatch
Oct 11 01:44:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"bulk": true, "prefix": "osd pool create", "pool": "cephfs.cephfs.data"} v 0) v1
Oct 11 01:44:18 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"bulk": true, "prefix": "osd pool create", "pool": "cephfs.cephfs.data"}]: dispatch
Oct 11 01:44:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "fs new", "fs_name": "cephfs", "metadata": "cephfs.cephfs.meta", "data": "cephfs.cephfs.data"} v 0) v1
Oct 11 01:44:18 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "fs new", "fs_name": "cephfs", "metadata": "cephfs.cephfs.meta", "data": "cephfs.cephfs.data"}]: dispatch
Oct 11 01:44:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e43 do_prune osdmap full prune enabled
Oct 11 01:44:18 compute-0 ceph-mon[191930]: log_channel(cluster) log [ERR] : Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)
Oct 11 01:44:18 compute-0 ceph-mon[191930]: log_channel(cluster) log [WRN] : Health check failed: 1 filesystem is online with fewer MDS than max_mds (MDS_UP_LESS_THAN_MAX)
Oct 11 01:44:18 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0[191926]: 2025-10-11T01:44:18.031+0000 7f20b5945640 -1 log_channel(cluster) log [ERR] : Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)
Oct 11 01:44:18 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "fs new", "fs_name": "cephfs", "metadata": "cephfs.cephfs.meta", "data": "cephfs.cephfs.data"}]': finished
Oct 11 01:44:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).mds e2 new map
Oct 11 01:44:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).mds e2 print_map
                                            e2
                                            enable_multiple, ever_enabled_multiple: 1,1
                                            default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2}
                                            legacy client fscid: 1
                                             
                                            Filesystem 'cephfs' (1)
                                            fs_name        cephfs
                                            epoch        2
                                            flags        12 joinable allow_snaps allow_multimds_snaps
                                            created        2025-10-11T01:44:18.031797+0000
                                            modified        2025-10-11T01:44:18.031855+0000
                                            tableserver        0
                                            root        0
                                            session_timeout        60
                                            session_autoclose        300
                                            max_file_size        1099511627776
                                            max_xattr_size        65536
                                            required_client_features        {}
                                            last_failure        0
                                            last_failure_osd_epoch        0
                                            compat        compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,7=mds uses inline data,8=no anchor table,9=file layout v2,10=snaprealm v2}
                                            max_mds        1
                                            in        
                                            up        {}
                                            failed        
                                            damaged        
                                            stopped        
                                            data_pools        [7]
                                            metadata_pool        6
                                            inline_data        disabled
                                            balancer        
                                            bal_rank_mask        -1
                                            standby_count_wanted        0
                                             
                                             
Oct 11 01:44:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e44 e44: 3 total, 3 up, 3 in
Oct 11 01:44:18 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e44: 3 total, 3 up, 3 in
Oct 11 01:44:18 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : fsmap cephfs:0
Oct 11 01:44:18 compute-0 ceph-mgr[192233]: [cephadm INFO root] Saving service mds.cephfs spec with placement compute-0
Oct 11 01:44:18 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Saving service mds.cephfs spec with placement compute-0
Oct 11 01:44:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.mds.cephfs}] v 0) v1
Oct 11 01:44:18 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:18 compute-0 ceph-mgr[192233]: [volumes INFO volumes.module] Finishing _cmd_fs_volume_create(name:cephfs, placement:compute-0 , prefix:fs volume create, target:['mon-mgr', '']) < ""
Oct 11 01:44:18 compute-0 systemd[1]: libpod-bbba2b0a5fff7cecca20f790c8261d148efd6e9fad8a37caa66edffef0a85feb.scope: Deactivated successfully.
Oct 11 01:44:18 compute-0 podman[215914]: 2025-10-11 01:44:18.107915788 +0000 UTC m=+0.927147854 container died bbba2b0a5fff7cecca20f790c8261d148efd6e9fad8a37caa66edffef0a85feb (image=quay.io/ceph/ceph:v18, name=inspiring_nash, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:18 compute-0 systemd[1]: var-lib-containers-storage-overlay-44bea757c862a3d890eab5a5fd3cf040d28125e5d2a779221f62ead99a163019-merged.mount: Deactivated successfully.
Oct 11 01:44:18 compute-0 ceph-mon[191930]: pgmap v104: 193 pgs: 41 peering, 152 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:18 compute-0 ceph-mon[191930]: 4.b scrub starts
Oct 11 01:44:18 compute-0 ceph-mon[191930]: 4.b scrub ok
Oct 11 01:44:18 compute-0 ceph-mon[191930]: 3.b scrub starts
Oct 11 01:44:18 compute-0 ceph-mon[191930]: 3.b scrub ok
Oct 11 01:44:18 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool create", "pool": "cephfs.cephfs.meta"}]: dispatch
Oct 11 01:44:18 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"bulk": true, "prefix": "osd pool create", "pool": "cephfs.cephfs.data"}]: dispatch
Oct 11 01:44:18 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "fs new", "fs_name": "cephfs", "metadata": "cephfs.cephfs.meta", "data": "cephfs.cephfs.data"}]: dispatch
Oct 11 01:44:18 compute-0 ceph-mon[191930]: Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)
Oct 11 01:44:18 compute-0 ceph-mon[191930]: Health check failed: 1 filesystem is online with fewer MDS than max_mds (MDS_UP_LESS_THAN_MAX)
Oct 11 01:44:18 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "fs new", "fs_name": "cephfs", "metadata": "cephfs.cephfs.meta", "data": "cephfs.cephfs.data"}]': finished
Oct 11 01:44:18 compute-0 ceph-mon[191930]: osdmap e44: 3 total, 3 up, 3 in
Oct 11 01:44:18 compute-0 ceph-mon[191930]: fsmap cephfs:0
Oct 11 01:44:18 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:18 compute-0 podman[215914]: 2025-10-11 01:44:18.219715427 +0000 UTC m=+1.038947463 container remove bbba2b0a5fff7cecca20f790c8261d148efd6e9fad8a37caa66edffef0a85feb (image=quay.io/ceph/ceph:v18, name=inspiring_nash, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, io.buildah.version=1.39.3, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:18 compute-0 systemd[1]: libpod-conmon-bbba2b0a5fff7cecca20f790c8261d148efd6e9fad8a37caa66edffef0a85feb.scope: Deactivated successfully.
Oct 11 01:44:18 compute-0 sudo[215898]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v106: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:18 compute-0 sudo[216126]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kaqtnktpfqyrjdmvexhgrdiojhawaejs ; /usr/bin/python3'
Oct 11 01:44:18 compute-0 sudo[216126]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:18 compute-0 podman[216122]: 2025-10-11 01:44:18.558851655 +0000 UTC m=+0.080114513 container create ae77406b7bba2c9cc5e2f75cd7f3603e6cd3d806f5eda9abedd2277d430a4b6e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_solomon, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:44:18 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.c scrub starts
Oct 11 01:44:18 compute-0 podman[216122]: 2025-10-11 01:44:18.531405212 +0000 UTC m=+0.052668150 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:18 compute-0 systemd[1]: Started libpod-conmon-ae77406b7bba2c9cc5e2f75cd7f3603e6cd3d806f5eda9abedd2277d430a4b6e.scope.
Oct 11 01:44:18 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.c scrub ok
Oct 11 01:44:18 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:18 compute-0 python3[216136]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z   --volume /tmp/ceph_mds.yml:/home/ceph_spec.yaml:z   --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   orch apply --in-file /home/ceph_spec.yaml _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:18 compute-0 podman[216122]: 2025-10-11 01:44:18.718795979 +0000 UTC m=+0.240058927 container init ae77406b7bba2c9cc5e2f75cd7f3603e6cd3d806f5eda9abedd2277d430a4b6e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_solomon, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0)
Oct 11 01:44:18 compute-0 podman[216122]: 2025-10-11 01:44:18.736215925 +0000 UTC m=+0.257478783 container start ae77406b7bba2c9cc5e2f75cd7f3603e6cd3d806f5eda9abedd2277d430a4b6e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_solomon, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2)
Oct 11 01:44:18 compute-0 podman[216122]: 2025-10-11 01:44:18.742656925 +0000 UTC m=+0.263919833 container attach ae77406b7bba2c9cc5e2f75cd7f3603e6cd3d806f5eda9abedd2277d430a4b6e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_solomon, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 01:44:18 compute-0 gallant_solomon[216143]: 167 167
Oct 11 01:44:18 compute-0 systemd[1]: libpod-ae77406b7bba2c9cc5e2f75cd7f3603e6cd3d806f5eda9abedd2277d430a4b6e.scope: Deactivated successfully.
Oct 11 01:44:18 compute-0 podman[216122]: 2025-10-11 01:44:18.749695224 +0000 UTC m=+0.270958112 container died ae77406b7bba2c9cc5e2f75cd7f3603e6cd3d806f5eda9abedd2277d430a4b6e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_solomon, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, ceph=True)
Oct 11 01:44:18 compute-0 systemd[1]: var-lib-containers-storage-overlay-bc3f956b71606eb9289a8aad089cf72470925f7478972f17fedb8811f5227c23-merged.mount: Deactivated successfully.
Oct 11 01:44:18 compute-0 podman[216122]: 2025-10-11 01:44:18.816325456 +0000 UTC m=+0.337588314 container remove ae77406b7bba2c9cc5e2f75cd7f3603e6cd3d806f5eda9abedd2277d430a4b6e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_solomon, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507)
Oct 11 01:44:18 compute-0 systemd[1]: libpod-conmon-ae77406b7bba2c9cc5e2f75cd7f3603e6cd3d806f5eda9abedd2277d430a4b6e.scope: Deactivated successfully.
Oct 11 01:44:18 compute-0 podman[216146]: 2025-10-11 01:44:18.838805931 +0000 UTC m=+0.097357403 container create 5a5728825320a2500efdc77b99a29884e97351ca6155fc5013d50e90d2fe4142 (image=quay.io/ceph/ceph:v18, name=busy_dijkstra, CEPH_REF=reef, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:44:18 compute-0 podman[216146]: 2025-10-11 01:44:18.791709607 +0000 UTC m=+0.050261149 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:18 compute-0 systemd[1]: Started libpod-conmon-5a5728825320a2500efdc77b99a29884e97351ca6155fc5013d50e90d2fe4142.scope.
Oct 11 01:44:18 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1de33042e5d8126da8b4028eb25b03f73b1ddf5204c995c298ffa56c4f588bc5/merged/home/ceph_spec.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1de33042e5d8126da8b4028eb25b03f73b1ddf5204c995c298ffa56c4f588bc5/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1de33042e5d8126da8b4028eb25b03f73b1ddf5204c995c298ffa56c4f588bc5/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:18 compute-0 podman[216146]: 2025-10-11 01:44:18.97085692 +0000 UTC m=+0.229408452 container init 5a5728825320a2500efdc77b99a29884e97351ca6155fc5013d50e90d2fe4142 (image=quay.io/ceph/ceph:v18, name=busy_dijkstra, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, OSD_FLAVOR=default, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507)
Oct 11 01:44:18 compute-0 podman[216146]: 2025-10-11 01:44:18.989410629 +0000 UTC m=+0.247962131 container start 5a5728825320a2500efdc77b99a29884e97351ca6155fc5013d50e90d2fe4142 (image=quay.io/ceph/ceph:v18, name=busy_dijkstra, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:18 compute-0 podman[216146]: 2025-10-11 01:44:18.997362294 +0000 UTC m=+0.255913796 container attach 5a5728825320a2500efdc77b99a29884e97351ca6155fc5013d50e90d2fe4142 (image=quay.io/ceph/ceph:v18, name=busy_dijkstra, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:19 compute-0 podman[216184]: 2025-10-11 01:44:19.117571552 +0000 UTC m=+0.093482868 container create d2d96d5004284cf1fc266bd564abfc9f75eeee46b6451c70a4953269c28d9fe6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_bartik, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:19 compute-0 podman[216184]: 2025-10-11 01:44:19.081591907 +0000 UTC m=+0.057503223 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:19 compute-0 ceph-mon[191930]: from='client.14246 -' entity='client.admin' cmd=[{"prefix": "fs volume create", "name": "cephfs", "placement": "compute-0 ", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:44:19 compute-0 ceph-mon[191930]: Saving service mds.cephfs spec with placement compute-0
Oct 11 01:44:19 compute-0 systemd[1]: Started libpod-conmon-d2d96d5004284cf1fc266bd564abfc9f75eeee46b6451c70a4953269c28d9fe6.scope.
Oct 11 01:44:19 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/279baae26888162e9face1bef1005c89224e9152e548bf7339e556343b62c306/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/279baae26888162e9face1bef1005c89224e9152e548bf7339e556343b62c306/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/279baae26888162e9face1bef1005c89224e9152e548bf7339e556343b62c306/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/279baae26888162e9face1bef1005c89224e9152e548bf7339e556343b62c306/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:19 compute-0 podman[216184]: 2025-10-11 01:44:19.30602754 +0000 UTC m=+0.281938826 container init d2d96d5004284cf1fc266bd564abfc9f75eeee46b6451c70a4953269c28d9fe6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_bartik, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:44:19 compute-0 podman[216184]: 2025-10-11 01:44:19.332530895 +0000 UTC m=+0.308442221 container start d2d96d5004284cf1fc266bd564abfc9f75eeee46b6451c70a4953269c28d9fe6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_bartik, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, io.buildah.version=1.39.3)
Oct 11 01:44:19 compute-0 podman[216184]: 2025-10-11 01:44:19.338971665 +0000 UTC m=+0.314882991 container attach d2d96d5004284cf1fc266bd564abfc9f75eeee46b6451c70a4953269c28d9fe6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_bartik, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True)
Oct 11 01:44:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e44 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:44:19 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14248 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:44:19 compute-0 ceph-mgr[192233]: [cephadm INFO root] Saving service mds.cephfs spec with placement compute-0
Oct 11 01:44:19 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Saving service mds.cephfs spec with placement compute-0
Oct 11 01:44:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.mds.cephfs}] v 0) v1
Oct 11 01:44:19 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:19 compute-0 busy_dijkstra[216175]: Scheduled mds.cephfs update...
Oct 11 01:44:19 compute-0 systemd[1]: libpod-5a5728825320a2500efdc77b99a29884e97351ca6155fc5013d50e90d2fe4142.scope: Deactivated successfully.
Oct 11 01:44:19 compute-0 conmon[216175]: conmon 5a5728825320a2500efd <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-5a5728825320a2500efdc77b99a29884e97351ca6155fc5013d50e90d2fe4142.scope/container/memory.events
Oct 11 01:44:19 compute-0 podman[216146]: 2025-10-11 01:44:19.648208399 +0000 UTC m=+0.906759861 container died 5a5728825320a2500efdc77b99a29884e97351ca6155fc5013d50e90d2fe4142 (image=quay.io/ceph/ceph:v18, name=busy_dijkstra, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:19 compute-0 systemd[1]: var-lib-containers-storage-overlay-1de33042e5d8126da8b4028eb25b03f73b1ddf5204c995c298ffa56c4f588bc5-merged.mount: Deactivated successfully.
Oct 11 01:44:19 compute-0 podman[216146]: 2025-10-11 01:44:19.73881849 +0000 UTC m=+0.997369992 container remove 5a5728825320a2500efdc77b99a29884e97351ca6155fc5013d50e90d2fe4142 (image=quay.io/ceph/ceph:v18, name=busy_dijkstra, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:44:19 compute-0 systemd[1]: libpod-conmon-5a5728825320a2500efdc77b99a29884e97351ca6155fc5013d50e90d2fe4142.scope: Deactivated successfully.
Oct 11 01:44:19 compute-0 sudo[216126]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:20 compute-0 elastic_bartik[216201]: {
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:     "0": [
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:         {
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "devices": [
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "/dev/loop3"
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             ],
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "lv_name": "ceph_lv0",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "lv_size": "21470642176",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "name": "ceph_lv0",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "tags": {
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.cluster_name": "ceph",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.crush_device_class": "",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.encrypted": "0",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.osd_id": "0",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.type": "block",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.vdo": "0"
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             },
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "type": "block",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "vg_name": "ceph_vg0"
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:         }
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:     ],
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:     "1": [
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:         {
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "devices": [
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "/dev/loop4"
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             ],
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "lv_name": "ceph_lv1",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "lv_size": "21470642176",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "name": "ceph_lv1",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "tags": {
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.cluster_name": "ceph",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.crush_device_class": "",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.encrypted": "0",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.osd_id": "1",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.type": "block",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.vdo": "0"
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             },
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "type": "block",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "vg_name": "ceph_vg1"
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:         }
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:     ],
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:     "2": [
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:         {
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "devices": [
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "/dev/loop5"
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             ],
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "lv_name": "ceph_lv2",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "lv_size": "21470642176",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "name": "ceph_lv2",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "tags": {
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.cluster_name": "ceph",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.crush_device_class": "",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.encrypted": "0",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.osd_id": "2",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.type": "block",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:                 "ceph.vdo": "0"
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             },
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "type": "block",
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:             "vg_name": "ceph_vg2"
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:         }
Oct 11 01:44:20 compute-0 elastic_bartik[216201]:     ]
Oct 11 01:44:20 compute-0 elastic_bartik[216201]: }
Oct 11 01:44:20 compute-0 systemd[1]: libpod-d2d96d5004284cf1fc266bd564abfc9f75eeee46b6451c70a4953269c28d9fe6.scope: Deactivated successfully.
Oct 11 01:44:20 compute-0 podman[216184]: 2025-10-11 01:44:20.189376035 +0000 UTC m=+1.165287381 container died d2d96d5004284cf1fc266bd564abfc9f75eeee46b6451c70a4953269c28d9fe6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_bartik, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:20 compute-0 ceph-mon[191930]: pgmap v106: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:20 compute-0 ceph-mon[191930]: 4.c scrub starts
Oct 11 01:44:20 compute-0 ceph-mon[191930]: 4.c scrub ok
Oct 11 01:44:20 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:20 compute-0 systemd[1]: var-lib-containers-storage-overlay-279baae26888162e9face1bef1005c89224e9152e548bf7339e556343b62c306-merged.mount: Deactivated successfully.
Oct 11 01:44:20 compute-0 podman[216184]: 2025-10-11 01:44:20.292819967 +0000 UTC m=+1.268731283 container remove d2d96d5004284cf1fc266bd564abfc9f75eeee46b6451c70a4953269c28d9fe6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_bartik, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:44:20 compute-0 systemd[1]: libpod-conmon-d2d96d5004284cf1fc266bd564abfc9f75eeee46b6451c70a4953269c28d9fe6.scope: Deactivated successfully.
Oct 11 01:44:20 compute-0 sudo[216024]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v107: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:20 compute-0 sudo[216294]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:20 compute-0 sudo[216294]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:20 compute-0 sudo[216294]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:20 compute-0 sudo[216356]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ccknsfrccpylwcgkexhmjkkzlpklsjtg ; /usr/bin/python3'
Oct 11 01:44:20 compute-0 sudo[216356]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:20 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.15 deep-scrub starts
Oct 11 01:44:20 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.15 deep-scrub ok
Oct 11 01:44:20 compute-0 sudo[216352]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:44:20 compute-0 sudo[216352]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:20 compute-0 sudo[216352]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:20 compute-0 python3[216367]: ansible-ansible.legacy.stat Invoked with path=/etc/ceph/ceph.client.openstack.keyring follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 01:44:20 compute-0 sudo[216356]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:20 compute-0 sudo[216381]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:20 compute-0 sudo[216381]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:20 compute-0 sudo[216381]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:20 compute-0 sudo[216420]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 01:44:20 compute-0 sudo[216420]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:21 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.e scrub starts
Oct 11 01:44:21 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.e scrub ok
Oct 11 01:44:21 compute-0 sudo[216508]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mkpbjfmddfcibjygzwegpttbiskjcblh ; /usr/bin/python3'
Oct 11 01:44:21 compute-0 sudo[216508]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:21 compute-0 ceph-mon[191930]: from='client.14248 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 01:44:21 compute-0 ceph-mon[191930]: Saving service mds.cephfs spec with placement compute-0
Oct 11 01:44:21 compute-0 ceph-mon[191930]: pgmap v107: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:21 compute-0 ceph-mon[191930]: 2.e scrub starts
Oct 11 01:44:21 compute-0 ceph-mon[191930]: 2.e scrub ok
Oct 11 01:44:21 compute-0 python3[216513]: ansible-ansible.legacy.copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760147060.2910779-33989-215250736681508/source dest=/etc/ceph/ceph.client.openstack.keyring mode=0644 force=True owner=167 group=167 follow=False _original_basename=ceph_key.j2 checksum=5a4e28cd9e72beb8739807af57732c755958e969 backup=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:44:21 compute-0 sudo[216508]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:21 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.d scrub starts
Oct 11 01:44:21 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.d scrub ok
Oct 11 01:44:21 compute-0 podman[216546]: 2025-10-11 01:44:21.474170244 +0000 UTC m=+0.082947466 container create 43264188e5536de564a28e6b677edf22fe997a89062f4f66160fd71cfdd10f5a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_kowalevski, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507)
Oct 11 01:44:21 compute-0 podman[216546]: 2025-10-11 01:44:21.44364496 +0000 UTC m=+0.052422182 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:21 compute-0 systemd[1]: Started libpod-conmon-43264188e5536de564a28e6b677edf22fe997a89062f4f66160fd71cfdd10f5a.scope.
Oct 11 01:44:21 compute-0 ceph-mgr[192233]: [progress INFO root] Completed event 667866df-5687-406c-9071-247796551e3a (Global Recovery Event) in 20 seconds
Oct 11 01:44:21 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:21 compute-0 podman[216546]: 2025-10-11 01:44:21.627842492 +0000 UTC m=+0.236619764 container init 43264188e5536de564a28e6b677edf22fe997a89062f4f66160fd71cfdd10f5a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_kowalevski, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:44:21 compute-0 podman[216546]: 2025-10-11 01:44:21.644394062 +0000 UTC m=+0.253171254 container start 43264188e5536de564a28e6b677edf22fe997a89062f4f66160fd71cfdd10f5a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_kowalevski, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:44:21 compute-0 podman[216546]: 2025-10-11 01:44:21.651516423 +0000 UTC m=+0.260293645 container attach 43264188e5536de564a28e6b677edf22fe997a89062f4f66160fd71cfdd10f5a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_kowalevski, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.license=GPLv2)
Oct 11 01:44:21 compute-0 adoring_kowalevski[216581]: 167 167
Oct 11 01:44:21 compute-0 systemd[1]: libpod-43264188e5536de564a28e6b677edf22fe997a89062f4f66160fd71cfdd10f5a.scope: Deactivated successfully.
Oct 11 01:44:21 compute-0 podman[216546]: 2025-10-11 01:44:21.65681773 +0000 UTC m=+0.265594952 container died 43264188e5536de564a28e6b677edf22fe997a89062f4f66160fd71cfdd10f5a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_kowalevski, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:44:21 compute-0 systemd[1]: var-lib-containers-storage-overlay-32f7c3484aec7d2ce59684b749e9a7cb1f9e3f459ff3761338ffb77ea564442b-merged.mount: Deactivated successfully.
Oct 11 01:44:21 compute-0 podman[216546]: 2025-10-11 01:44:21.729723928 +0000 UTC m=+0.338501150 container remove 43264188e5536de564a28e6b677edf22fe997a89062f4f66160fd71cfdd10f5a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_kowalevski, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 01:44:21 compute-0 systemd[1]: libpod-conmon-43264188e5536de564a28e6b677edf22fe997a89062f4f66160fd71cfdd10f5a.scope: Deactivated successfully.
Oct 11 01:44:21 compute-0 sudo[216622]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ezjmjindhimwincysqahnorongcudvwa ; /usr/bin/python3'
Oct 11 01:44:21 compute-0 sudo[216622]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:21 compute-0 python3[216624]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   auth import -i /etc/ceph/ceph.client.openstack.keyring _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:22 compute-0 podman[216630]: 2025-10-11 01:44:22.020522785 +0000 UTC m=+0.079312138 container create e84616c8b58f2a878c79220f5c037f7075cfa60dd48cb57e01555c89ebf569b0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_fermat, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, ceph=True)
Oct 11 01:44:22 compute-0 systemd[1]: Started libpod-conmon-e84616c8b58f2a878c79220f5c037f7075cfa60dd48cb57e01555c89ebf569b0.scope.
Oct 11 01:44:22 compute-0 podman[216630]: 2025-10-11 01:44:21.995858885 +0000 UTC m=+0.054648268 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:22 compute-0 podman[216643]: 2025-10-11 01:44:22.113820257 +0000 UTC m=+0.088042077 container create 7ee8d2a31c2440b808c62ea896f3a432f9610e6bd797d179c2ec25da456f64e7 (image=quay.io/ceph/ceph:v18, name=objective_hoover, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef)
Oct 11 01:44:22 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f84ad0c164bad77b5428159b248232f780fa55e4f1a448219222ec460df3bf65/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f84ad0c164bad77b5428159b248232f780fa55e4f1a448219222ec460df3bf65/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f84ad0c164bad77b5428159b248232f780fa55e4f1a448219222ec460df3bf65/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f84ad0c164bad77b5428159b248232f780fa55e4f1a448219222ec460df3bf65/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:22 compute-0 systemd[1]: Started libpod-conmon-7ee8d2a31c2440b808c62ea896f3a432f9610e6bd797d179c2ec25da456f64e7.scope.
Oct 11 01:44:22 compute-0 podman[216630]: 2025-10-11 01:44:22.169641129 +0000 UTC m=+0.228430482 container init e84616c8b58f2a878c79220f5c037f7075cfa60dd48cb57e01555c89ebf569b0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_fermat, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:44:22 compute-0 podman[216643]: 2025-10-11 01:44:22.086722025 +0000 UTC m=+0.060943835 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:22 compute-0 podman[216630]: 2025-10-11 01:44:22.186329693 +0000 UTC m=+0.245119036 container start e84616c8b58f2a878c79220f5c037f7075cfa60dd48cb57e01555c89ebf569b0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_fermat, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0)
Oct 11 01:44:22 compute-0 podman[216630]: 2025-10-11 01:44:22.190721323 +0000 UTC m=+0.249510686 container attach e84616c8b58f2a878c79220f5c037f7075cfa60dd48cb57e01555c89ebf569b0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_fermat, org.label-schema.vendor=CentOS, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:44:22 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/caba8f6673910f2091432cd8947bca1a8329ed1988fedfb789b10ae929d379d7/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/caba8f6673910f2091432cd8947bca1a8329ed1988fedfb789b10ae929d379d7/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:22 compute-0 ceph-mon[191930]: 4.15 deep-scrub starts
Oct 11 01:44:22 compute-0 ceph-mon[191930]: 4.15 deep-scrub ok
Oct 11 01:44:22 compute-0 ceph-mon[191930]: 3.d scrub starts
Oct 11 01:44:22 compute-0 ceph-mon[191930]: 3.d scrub ok
Oct 11 01:44:22 compute-0 podman[216643]: 2025-10-11 01:44:22.243816934 +0000 UTC m=+0.218038764 container init 7ee8d2a31c2440b808c62ea896f3a432f9610e6bd797d179c2ec25da456f64e7 (image=quay.io/ceph/ceph:v18, name=objective_hoover, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:22 compute-0 podman[216643]: 2025-10-11 01:44:22.25987193 +0000 UTC m=+0.234093720 container start 7ee8d2a31c2440b808c62ea896f3a432f9610e6bd797d179c2ec25da456f64e7 (image=quay.io/ceph/ceph:v18, name=objective_hoover, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0)
Oct 11 01:44:22 compute-0 podman[216643]: 2025-10-11 01:44:22.264445855 +0000 UTC m=+0.238667655 container attach 7ee8d2a31c2440b808c62ea896f3a432f9610e6bd797d179c2ec25da456f64e7 (image=quay.io/ceph/ceph:v18, name=objective_hoover, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, OSD_FLAVOR=default)
Oct 11 01:44:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v108: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:22 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.10 scrub starts
Oct 11 01:44:22 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.10 scrub ok
Oct 11 01:44:22 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.16 scrub starts
Oct 11 01:44:22 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.16 scrub ok
Oct 11 01:44:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth import"} v 0) v1
Oct 11 01:44:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/2214650356' entity='client.admin' cmd=[{"prefix": "auth import"}]: dispatch
Oct 11 01:44:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/2214650356' entity='client.admin' cmd='[{"prefix": "auth import"}]': finished
Oct 11 01:44:22 compute-0 systemd[1]: libpod-7ee8d2a31c2440b808c62ea896f3a432f9610e6bd797d179c2ec25da456f64e7.scope: Deactivated successfully.
Oct 11 01:44:22 compute-0 conmon[216665]: conmon 7ee8d2a31c2440b808c6 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-7ee8d2a31c2440b808c62ea896f3a432f9610e6bd797d179c2ec25da456f64e7.scope/container/memory.events
Oct 11 01:44:22 compute-0 podman[216643]: 2025-10-11 01:44:22.930303434 +0000 UTC m=+0.904525244 container died 7ee8d2a31c2440b808c62ea896f3a432f9610e6bd797d179c2ec25da456f64e7 (image=quay.io/ceph/ceph:v18, name=objective_hoover, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:44:22 compute-0 systemd[1]: var-lib-containers-storage-overlay-caba8f6673910f2091432cd8947bca1a8329ed1988fedfb789b10ae929d379d7-merged.mount: Deactivated successfully.
Oct 11 01:44:22 compute-0 podman[216643]: 2025-10-11 01:44:22.998644606 +0000 UTC m=+0.972866406 container remove 7ee8d2a31c2440b808c62ea896f3a432f9610e6bd797d179c2ec25da456f64e7 (image=quay.io/ceph/ceph:v18, name=objective_hoover, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, org.label-schema.vendor=CentOS)
Oct 11 01:44:23 compute-0 systemd[1]: libpod-conmon-7ee8d2a31c2440b808c62ea896f3a432f9610e6bd797d179c2ec25da456f64e7.scope: Deactivated successfully.
Oct 11 01:44:23 compute-0 sudo[216622]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:23 compute-0 ceph-mon[191930]: pgmap v108: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:23 compute-0 ceph-mon[191930]: 3.10 scrub starts
Oct 11 01:44:23 compute-0 ceph-mon[191930]: 3.10 scrub ok
Oct 11 01:44:23 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2214650356' entity='client.admin' cmd=[{"prefix": "auth import"}]: dispatch
Oct 11 01:44:23 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2214650356' entity='client.admin' cmd='[{"prefix": "auth import"}]': finished
Oct 11 01:44:23 compute-0 sweet_fermat[216656]: {
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:         "osd_id": 1,
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:         "type": "bluestore"
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:     },
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:         "osd_id": 2,
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:         "type": "bluestore"
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:     },
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:         "osd_id": 0,
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:         "type": "bluestore"
Oct 11 01:44:23 compute-0 sweet_fermat[216656]:     }
Oct 11 01:44:23 compute-0 sweet_fermat[216656]: }
Oct 11 01:44:23 compute-0 systemd[1]: libpod-e84616c8b58f2a878c79220f5c037f7075cfa60dd48cb57e01555c89ebf569b0.scope: Deactivated successfully.
Oct 11 01:44:23 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.13 scrub starts
Oct 11 01:44:23 compute-0 systemd[1]: libpod-e84616c8b58f2a878c79220f5c037f7075cfa60dd48cb57e01555c89ebf569b0.scope: Consumed 1.271s CPU time.
Oct 11 01:44:23 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.13 scrub ok
Oct 11 01:44:23 compute-0 podman[216734]: 2025-10-11 01:44:23.547020418 +0000 UTC m=+0.042574852 container died e84616c8b58f2a878c79220f5c037f7075cfa60dd48cb57e01555c89ebf569b0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_fermat, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:23 compute-0 systemd[1]: var-lib-containers-storage-overlay-f84ad0c164bad77b5428159b248232f780fa55e4f1a448219222ec460df3bf65-merged.mount: Deactivated successfully.
Oct 11 01:44:23 compute-0 podman[216734]: 2025-10-11 01:44:23.657451195 +0000 UTC m=+0.153005579 container remove e84616c8b58f2a878c79220f5c037f7075cfa60dd48cb57e01555c89ebf569b0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_fermat, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:44:23 compute-0 systemd[1]: libpod-conmon-e84616c8b58f2a878c79220f5c037f7075cfa60dd48cb57e01555c89ebf569b0.scope: Deactivated successfully.
Oct 11 01:44:23 compute-0 sudo[216420]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:44:23 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:44:23 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:23 compute-0 sudo[216772]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lawabqspnevsmblukoecycrcrbsklaub ; /usr/bin/python3'
Oct 11 01:44:23 compute-0 sudo[216772]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:23 compute-0 sudo[216774]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:23 compute-0 sudo[216774]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:23 compute-0 sudo[216774]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:23 compute-0 python3[216775]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   status --format json | jq .monmap.num_mons _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:24 compute-0 sudo[216800]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:44:24 compute-0 sudo[216800]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:24 compute-0 sudo[216800]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:24 compute-0 podman[216813]: 2025-10-11 01:44:24.091532374 +0000 UTC m=+0.095607781 container create 998a57f9cc7a61ffbec9689e7b3b7b02b70fe7d318249579cafaf199c1d30c45 (image=quay.io/ceph/ceph:v18, name=objective_poitras, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 01:44:24 compute-0 podman[216813]: 2025-10-11 01:44:24.051668964 +0000 UTC m=+0.055744391 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:24 compute-0 systemd[1]: Started libpod-conmon-998a57f9cc7a61ffbec9689e7b3b7b02b70fe7d318249579cafaf199c1d30c45.scope.
Oct 11 01:44:24 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d877f3c08a6cd5f63f09676487b7f3f1b1f76ff0eb191de82debaabd42daedde/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d877f3c08a6cd5f63f09676487b7f3f1b1f76ff0eb191de82debaabd42daedde/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:24 compute-0 sudo[216840]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:24 compute-0 sudo[216840]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:24 compute-0 ceph-mon[191930]: 4.16 scrub starts
Oct 11 01:44:24 compute-0 ceph-mon[191930]: 4.16 scrub ok
Oct 11 01:44:24 compute-0 ceph-mon[191930]: 3.13 scrub starts
Oct 11 01:44:24 compute-0 ceph-mon[191930]: 3.13 scrub ok
Oct 11 01:44:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:24 compute-0 podman[216813]: 2025-10-11 01:44:24.26472063 +0000 UTC m=+0.268796077 container init 998a57f9cc7a61ffbec9689e7b3b7b02b70fe7d318249579cafaf199c1d30c45 (image=quay.io/ceph/ceph:v18, name=objective_poitras, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef)
Oct 11 01:44:24 compute-0 sudo[216840]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:24 compute-0 podman[216813]: 2025-10-11 01:44:24.2816149 +0000 UTC m=+0.285690337 container start 998a57f9cc7a61ffbec9689e7b3b7b02b70fe7d318249579cafaf199c1d30c45 (image=quay.io/ceph/ceph:v18, name=objective_poitras, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:24 compute-0 podman[216813]: 2025-10-11 01:44:24.294101889 +0000 UTC m=+0.298177326 container attach 998a57f9cc7a61ffbec9689e7b3b7b02b70fe7d318249579cafaf199c1d30c45 (image=quay.io/ceph/ceph:v18, name=objective_poitras, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default)
Oct 11 01:44:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v109: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:24 compute-0 sudo[216871]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:44:24 compute-0 sudo[216871]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:24 compute-0 sudo[216871]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:24 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.14 scrub starts
Oct 11 01:44:24 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.14 scrub ok
Oct 11 01:44:24 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.17 scrub starts
Oct 11 01:44:24 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.17 scrub ok
Oct 11 01:44:24 compute-0 sudo[216896]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e44 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:44:24 compute-0 sudo[216896]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:24 compute-0 sudo[216896]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:24 compute-0 sudo[216922]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ls
Oct 11 01:44:24 compute-0 sudo[216922]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status", "format": "json"} v 0) v1
Oct 11 01:44:24 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2767765345' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
Oct 11 01:44:24 compute-0 objective_poitras[216861]: 
Oct 11 01:44:24 compute-0 objective_poitras[216861]: {"fsid":"3c7617c3-7a20-523e-a9de-20c0d6ba41da","health":{"status":"HEALTH_ERR","checks":{"MDS_ALL_DOWN":{"severity":"HEALTH_ERR","summary":{"message":"1 filesystem is offline","count":1},"muted":false},"MDS_UP_LESS_THAN_MAX":{"severity":"HEALTH_WARN","summary":{"message":"1 filesystem is online with fewer MDS than max_mds","count":1},"muted":false}},"mutes":[]},"election_epoch":5,"quorum":[0],"quorum_names":["compute-0"],"quorum_age":195,"monmap":{"epoch":1,"min_mon_release_name":"reef","num_mons":1},"osdmap":{"epoch":44,"num_osds":3,"num_up_osds":3,"osd_up_since":1760147012,"num_in_osds":3,"osd_in_since":1760146975,"num_remapped_pgs":0},"pgmap":{"pgs_by_state":[{"state_name":"active+clean","count":193}],"num_pgs":193,"num_pools":7,"num_objects":2,"data_bytes":459280,"bytes_used":84205568,"bytes_avail":64327720960,"bytes_total":64411926528},"fsmap":{"epoch":2,"id":1,"up":0,"in":0,"max":1,"by_rank":[],"up:standby":0},"mgrmap":{"available":true,"num_standbys":0,"modules":["cephadm","iostat","nfs","restful"],"services":{}},"servicemap":{"epoch":3,"modified":"2025-10-11T01:44:18.398741+0000","services":{"osd":{"daemons":{"summary":"","1":{"start_epoch":0,"start_stamp":"0.000000","gid":0,"addr":"(unrecognized address family 0)/0","metadata":{},"task_status":{}},"2":{"start_epoch":0,"start_stamp":"0.000000","gid":0,"addr":"(unrecognized address family 0)/0","metadata":{},"task_status":{}}}}}},"progress_events":{}}
Oct 11 01:44:24 compute-0 systemd[1]: libpod-998a57f9cc7a61ffbec9689e7b3b7b02b70fe7d318249579cafaf199c1d30c45.scope: Deactivated successfully.
Oct 11 01:44:24 compute-0 podman[216813]: 2025-10-11 01:44:24.993107339 +0000 UTC m=+0.997182746 container died 998a57f9cc7a61ffbec9689e7b3b7b02b70fe7d318249579cafaf199c1d30c45 (image=quay.io/ceph/ceph:v18, name=objective_poitras, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:44:25 compute-0 systemd[1]: var-lib-containers-storage-overlay-d877f3c08a6cd5f63f09676487b7f3f1b1f76ff0eb191de82debaabd42daedde-merged.mount: Deactivated successfully.
Oct 11 01:44:25 compute-0 podman[216813]: 2025-10-11 01:44:25.067417048 +0000 UTC m=+1.071492475 container remove 998a57f9cc7a61ffbec9689e7b3b7b02b70fe7d318249579cafaf199c1d30c45 (image=quay.io/ceph/ceph:v18, name=objective_poitras, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507)
Oct 11 01:44:25 compute-0 systemd[1]: libpod-conmon-998a57f9cc7a61ffbec9689e7b3b7b02b70fe7d318249579cafaf199c1d30c45.scope: Deactivated successfully.
Oct 11 01:44:25 compute-0 sudo[216772]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:25 compute-0 ceph-mon[191930]: pgmap v109: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:25 compute-0 ceph-mon[191930]: 3.14 scrub starts
Oct 11 01:44:25 compute-0 ceph-mon[191930]: 3.14 scrub ok
Oct 11 01:44:25 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2767765345' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
Oct 11 01:44:25 compute-0 sudo[217048]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bdgynowqiwzpjawmrkwjozcnlnimijbo ; /usr/bin/python3'
Oct 11 01:44:25 compute-0 sudo[217048]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:25 compute-0 python3[217055]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   mon dump --format json _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:25 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.19 scrub starts
Oct 11 01:44:25 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.19 scrub ok
Oct 11 01:44:25 compute-0 podman[217073]: 2025-10-11 01:44:25.61987021 +0000 UTC m=+0.080132972 container create 30192fb9abc052335a9c1b2b329a5e844a4681ea897547700be3c761502dcec9 (image=quay.io/ceph/ceph:v18, name=pensive_wescoff, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, ceph=True, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:44:25 compute-0 systemd[1]: Started libpod-conmon-30192fb9abc052335a9c1b2b329a5e844a4681ea897547700be3c761502dcec9.scope.
Oct 11 01:44:25 compute-0 podman[217078]: 2025-10-11 01:44:25.686131862 +0000 UTC m=+0.119505699 container exec ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 01:44:25 compute-0 podman[217073]: 2025-10-11 01:44:25.600710393 +0000 UTC m=+0.060973175 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:25 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:25 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b7899cb4dd3330dd96e771b53665efbbc806473cc9e80e794d32d9780e9e2f4d/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:25 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b7899cb4dd3330dd96e771b53665efbbc806473cc9e80e794d32d9780e9e2f4d/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:25 compute-0 podman[217073]: 2025-10-11 01:44:25.75702808 +0000 UTC m=+0.217290862 container init 30192fb9abc052335a9c1b2b329a5e844a4681ea897547700be3c761502dcec9 (image=quay.io/ceph/ceph:v18, name=pensive_wescoff, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, io.buildah.version=1.39.3)
Oct 11 01:44:25 compute-0 podman[217073]: 2025-10-11 01:44:25.76885891 +0000 UTC m=+0.229121712 container start 30192fb9abc052335a9c1b2b329a5e844a4681ea897547700be3c761502dcec9 (image=quay.io/ceph/ceph:v18, name=pensive_wescoff, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:44:25 compute-0 podman[217073]: 2025-10-11 01:44:25.774734884 +0000 UTC m=+0.234997666 container attach 30192fb9abc052335a9c1b2b329a5e844a4681ea897547700be3c761502dcec9 (image=quay.io/ceph/ceph:v18, name=pensive_wescoff, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 01:44:25 compute-0 podman[217078]: 2025-10-11 01:44:25.797482137 +0000 UTC m=+0.230855894 container exec_died ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:44:25 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.10 scrub starts
Oct 11 01:44:26 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.10 scrub ok
Oct 11 01:44:26 compute-0 ceph-mon[191930]: 4.17 scrub starts
Oct 11 01:44:26 compute-0 ceph-mon[191930]: 4.17 scrub ok
Oct 11 01:44:26 compute-0 ceph-mon[191930]: 2.10 scrub starts
Oct 11 01:44:26 compute-0 ceph-mon[191930]: 2.10 scrub ok
Oct 11 01:44:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v110: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 01:44:26 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/594620573' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 01:44:26 compute-0 pensive_wescoff[217107]: 
Oct 11 01:44:26 compute-0 pensive_wescoff[217107]: {"epoch":1,"fsid":"3c7617c3-7a20-523e-a9de-20c0d6ba41da","modified":"2025-10-11T01:41:01.695427Z","created":"2025-10-11T01:41:01.695427Z","min_mon_release":18,"min_mon_release_name":"reef","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks: ":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef"],"optional":[]},"mons":[{"rank":0,"name":"compute-0","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.122.100:3300","nonce":0},{"type":"v1","addr":"192.168.122.100:6789","nonce":0}]},"addr":"192.168.122.100:6789/0","public_addr":"192.168.122.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]}
Oct 11 01:44:26 compute-0 pensive_wescoff[217107]: dumped monmap epoch 1
Oct 11 01:44:26 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.19 scrub starts
Oct 11 01:44:26 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.19 scrub ok
Oct 11 01:44:26 compute-0 systemd[1]: libpod-30192fb9abc052335a9c1b2b329a5e844a4681ea897547700be3c761502dcec9.scope: Deactivated successfully.
Oct 11 01:44:26 compute-0 podman[217073]: 2025-10-11 01:44:26.500754673 +0000 UTC m=+0.961017435 container died 30192fb9abc052335a9c1b2b329a5e844a4681ea897547700be3c761502dcec9 (image=quay.io/ceph/ceph:v18, name=pensive_wescoff, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3)
Oct 11 01:44:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:44:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:44:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:44:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:44:26 compute-0 systemd[1]: var-lib-containers-storage-overlay-b7899cb4dd3330dd96e771b53665efbbc806473cc9e80e794d32d9780e9e2f4d-merged.mount: Deactivated successfully.
Oct 11 01:44:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:44:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:44:26 compute-0 ceph-mgr[192233]: [progress INFO root] Writing back 10 completed events
Oct 11 01:44:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/progress/completed}] v 0) v1
Oct 11 01:44:26 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:26 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.1d scrub starts
Oct 11 01:44:26 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.1d scrub ok
Oct 11 01:44:26 compute-0 podman[217073]: 2025-10-11 01:44:26.593368075 +0000 UTC m=+1.053630877 container remove 30192fb9abc052335a9c1b2b329a5e844a4681ea897547700be3c761502dcec9 (image=quay.io/ceph/ceph:v18, name=pensive_wescoff, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, OSD_FLAVOR=default)
Oct 11 01:44:26 compute-0 systemd[1]: libpod-conmon-30192fb9abc052335a9c1b2b329a5e844a4681ea897547700be3c761502dcec9.scope: Deactivated successfully.
Oct 11 01:44:26 compute-0 sudo[217048]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:26 compute-0 sudo[216922]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:44:26 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:44:26 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:44:26 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:44:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:44:26 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:44:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:44:26 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:26 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev ca81a349-cd55-4d21-ba8e-3f293f702ce4 does not exist
Oct 11 01:44:26 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev c3ba63f0-d987-4859-a29a-82f0dbacaa70 does not exist
Oct 11 01:44:26 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 03749fb2-46e2-4026-926a-80e282df49f0 does not exist
Oct 11 01:44:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 01:44:26 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:44:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 01:44:26 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:44:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:44:26 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:44:26 compute-0 sudo[217240]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:26 compute-0 sudo[217240]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:26 compute-0 sudo[217240]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:27 compute-0 sudo[217265]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:44:27 compute-0 sudo[217265]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:27 compute-0 sudo[217265]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:27 compute-0 sudo[217334]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ebmuvmphatzegkhlckwrqywlwgolyvhh ; /usr/bin/python3'
Oct 11 01:44:27 compute-0 sudo[217334]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:27 compute-0 sudo[217295]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:27 compute-0 sudo[217295]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:27 compute-0 sudo[217295]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:27 compute-0 ceph-mon[191930]: 4.19 scrub starts
Oct 11 01:44:27 compute-0 ceph-mon[191930]: 4.19 scrub ok
Oct 11 01:44:27 compute-0 ceph-mon[191930]: pgmap v110: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/594620573' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 01:44:27 compute-0 ceph-mon[191930]: 3.19 scrub starts
Oct 11 01:44:27 compute-0 ceph-mon[191930]: 3.19 scrub ok
Oct 11 01:44:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:44:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:44:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:44:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:44:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:44:27 compute-0 sudo[217341]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 01:44:27 compute-0 sudo[217341]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:27 compute-0 python3[217339]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   auth get client.openstack _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:27 compute-0 podman[217366]: 2025-10-11 01:44:27.470798415 +0000 UTC m=+0.099693772 container create 6e2f15de0061fc62304c3b99c9fa602cdc6ea6fb8302c42e1c2a74a082cf5823 (image=quay.io/ceph/ceph:v18, name=agitated_engelbart, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507)
Oct 11 01:44:27 compute-0 podman[217366]: 2025-10-11 01:44:27.433505081 +0000 UTC m=+0.062400508 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:27 compute-0 systemd[1]: Started libpod-conmon-6e2f15de0061fc62304c3b99c9fa602cdc6ea6fb8302c42e1c2a74a082cf5823.scope.
Oct 11 01:44:27 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9f5e77a04196ffa56360327f0b199187861a36a145b4289402d31dee251dda39/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9f5e77a04196ffa56360327f0b199187861a36a145b4289402d31dee251dda39/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:27 compute-0 podman[217366]: 2025-10-11 01:44:27.592362743 +0000 UTC m=+0.221258120 container init 6e2f15de0061fc62304c3b99c9fa602cdc6ea6fb8302c42e1c2a74a082cf5823 (image=quay.io/ceph/ceph:v18, name=agitated_engelbart, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0)
Oct 11 01:44:27 compute-0 podman[217366]: 2025-10-11 01:44:27.604510442 +0000 UTC m=+0.233405799 container start 6e2f15de0061fc62304c3b99c9fa602cdc6ea6fb8302c42e1c2a74a082cf5823 (image=quay.io/ceph/ceph:v18, name=agitated_engelbart, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:44:27 compute-0 podman[217366]: 2025-10-11 01:44:27.609684165 +0000 UTC m=+0.238579522 container attach 6e2f15de0061fc62304c3b99c9fa602cdc6ea6fb8302c42e1c2a74a082cf5823 (image=quay.io/ceph/ceph:v18, name=agitated_engelbart, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 01:44:27 compute-0 podman[217421]: 2025-10-11 01:44:27.872052661 +0000 UTC m=+0.081589426 container create 133ce437f2dd32ca3fbf11e7a090825efca70405baaa707feaeff1c9bb677758 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_solomon, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, org.label-schema.license=GPLv2)
Oct 11 01:44:27 compute-0 podman[217421]: 2025-10-11 01:44:27.839093376 +0000 UTC m=+0.048630201 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:27 compute-0 systemd[1]: Started libpod-conmon-133ce437f2dd32ca3fbf11e7a090825efca70405baaa707feaeff1c9bb677758.scope.
Oct 11 01:44:27 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:28 compute-0 podman[217421]: 2025-10-11 01:44:28.004833301 +0000 UTC m=+0.214370046 container init 133ce437f2dd32ca3fbf11e7a090825efca70405baaa707feaeff1c9bb677758 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_solomon, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:44:28 compute-0 podman[217421]: 2025-10-11 01:44:28.012827258 +0000 UTC m=+0.222364003 container start 133ce437f2dd32ca3fbf11e7a090825efca70405baaa707feaeff1c9bb677758 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_solomon, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0)
Oct 11 01:44:28 compute-0 podman[217421]: 2025-10-11 01:44:28.01828623 +0000 UTC m=+0.227822985 container attach 133ce437f2dd32ca3fbf11e7a090825efca70405baaa707feaeff1c9bb677758 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_solomon, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:44:28 compute-0 loving_solomon[217438]: 167 167
Oct 11 01:44:28 compute-0 systemd[1]: libpod-133ce437f2dd32ca3fbf11e7a090825efca70405baaa707feaeff1c9bb677758.scope: Deactivated successfully.
Oct 11 01:44:28 compute-0 podman[217421]: 2025-10-11 01:44:28.022560336 +0000 UTC m=+0.232097081 container died 133ce437f2dd32ca3fbf11e7a090825efca70405baaa707feaeff1c9bb677758 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_solomon, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS)
Oct 11 01:44:28 compute-0 systemd[1]: var-lib-containers-storage-overlay-db659d1b1f515ab38b33e432f26e819138e4c43768e9e8ee35358894fb8180db-merged.mount: Deactivated successfully.
Oct 11 01:44:28 compute-0 podman[217421]: 2025-10-11 01:44:28.090116516 +0000 UTC m=+0.299653251 container remove 133ce437f2dd32ca3fbf11e7a090825efca70405baaa707feaeff1c9bb677758 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_solomon, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True)
Oct 11 01:44:28 compute-0 systemd[1]: libpod-conmon-133ce437f2dd32ca3fbf11e7a090825efca70405baaa707feaeff1c9bb677758.scope: Deactivated successfully.
Oct 11 01:44:28 compute-0 podman[217480]: 2025-10-11 01:44:28.306948714 +0000 UTC m=+0.062170371 container create 7c5493567fb8a5c5a33fe549cf77ab8b59a14fe72bf578d817dc65dba20d3405 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_villani, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default)
Oct 11 01:44:28 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.openstack"} v 0) v1
Oct 11 01:44:28 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/1204507304' entity='client.admin' cmd=[{"prefix": "auth get", "entity": "client.openstack"}]: dispatch
Oct 11 01:44:28 compute-0 agitated_engelbart[217392]: [client.openstack]
Oct 11 01:44:28 compute-0 agitated_engelbart[217392]:         key = AQCDteloAAAAABAAqDHfKJKrnOb6idG+XXtwTw==
Oct 11 01:44:28 compute-0 agitated_engelbart[217392]:         caps mgr = "allow *"
Oct 11 01:44:28 compute-0 agitated_engelbart[217392]:         caps mon = "profile rbd"
Oct 11 01:44:28 compute-0 agitated_engelbart[217392]:         caps osd = "profile rbd pool=vms, profile rbd pool=volumes, profile rbd pool=backups, profile rbd pool=images, profile rbd pool=cephfs.cephfs.meta, profile rbd pool=cephfs.cephfs.data"
Oct 11 01:44:28 compute-0 ceph-mon[191930]: 4.1d scrub starts
Oct 11 01:44:28 compute-0 ceph-mon[191930]: 4.1d scrub ok
Oct 11 01:44:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1204507304' entity='client.admin' cmd=[{"prefix": "auth get", "entity": "client.openstack"}]: dispatch
Oct 11 01:44:28 compute-0 systemd[1]: libpod-6e2f15de0061fc62304c3b99c9fa602cdc6ea6fb8302c42e1c2a74a082cf5823.scope: Deactivated successfully.
Oct 11 01:44:28 compute-0 podman[217366]: 2025-10-11 01:44:28.360317463 +0000 UTC m=+0.989212840 container died 6e2f15de0061fc62304c3b99c9fa602cdc6ea6fb8302c42e1c2a74a082cf5823 (image=quay.io/ceph/ceph:v18, name=agitated_engelbart, OSD_FLAVOR=default, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 01:44:28 compute-0 podman[217480]: 2025-10-11 01:44:28.281889192 +0000 UTC m=+0.037110819 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:28 compute-0 systemd[1]: Started libpod-conmon-7c5493567fb8a5c5a33fe549cf77ab8b59a14fe72bf578d817dc65dba20d3405.scope.
Oct 11 01:44:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v111: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:28 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:28 compute-0 systemd[1]: var-lib-containers-storage-overlay-9f5e77a04196ffa56360327f0b199187861a36a145b4289402d31dee251dda39-merged.mount: Deactivated successfully.
Oct 11 01:44:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2a178eb401674ede956ce7f4f2c001bb854493451897186d4d1423ff77de5e22/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2a178eb401674ede956ce7f4f2c001bb854493451897186d4d1423ff77de5e22/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2a178eb401674ede956ce7f4f2c001bb854493451897186d4d1423ff77de5e22/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2a178eb401674ede956ce7f4f2c001bb854493451897186d4d1423ff77de5e22/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2a178eb401674ede956ce7f4f2c001bb854493451897186d4d1423ff77de5e22/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:28 compute-0 podman[217366]: 2025-10-11 01:44:28.445605018 +0000 UTC m=+1.074500365 container remove 6e2f15de0061fc62304c3b99c9fa602cdc6ea6fb8302c42e1c2a74a082cf5823 (image=quay.io/ceph/ceph:v18, name=agitated_engelbart, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, OSD_FLAVOR=default)
Oct 11 01:44:28 compute-0 podman[217480]: 2025-10-11 01:44:28.458018065 +0000 UTC m=+0.213239732 container init 7c5493567fb8a5c5a33fe549cf77ab8b59a14fe72bf578d817dc65dba20d3405 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_villani, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:44:28 compute-0 systemd[1]: libpod-conmon-6e2f15de0061fc62304c3b99c9fa602cdc6ea6fb8302c42e1c2a74a082cf5823.scope: Deactivated successfully.
Oct 11 01:44:28 compute-0 podman[217480]: 2025-10-11 01:44:28.47136425 +0000 UTC m=+0.226585877 container start 7c5493567fb8a5c5a33fe549cf77ab8b59a14fe72bf578d817dc65dba20d3405 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_villani, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2)
Oct 11 01:44:28 compute-0 sudo[217334]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:28 compute-0 podman[217480]: 2025-10-11 01:44:28.476550784 +0000 UTC m=+0.231772411 container attach 7c5493567fb8a5c5a33fe549cf77ab8b59a14fe72bf578d817dc65dba20d3405 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_villani, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS)
Oct 11 01:44:29 compute-0 ceph-mon[191930]: pgmap v111: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e44 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:44:29 compute-0 nostalgic_villani[217506]: --> passed data devices: 0 physical, 3 LVM
Oct 11 01:44:29 compute-0 nostalgic_villani[217506]: --> relative data size: 1.0
Oct 11 01:44:29 compute-0 nostalgic_villani[217506]: --> All data devices are unavailable
Oct 11 01:44:29 compute-0 podman[157119]: time="2025-10-11T01:44:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:44:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:44:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 30883 "" "Go-http-client/1.1"
Oct 11 01:44:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:44:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6253 "" "Go-http-client/1.1"
Oct 11 01:44:29 compute-0 systemd[1]: libpod-7c5493567fb8a5c5a33fe549cf77ab8b59a14fe72bf578d817dc65dba20d3405.scope: Deactivated successfully.
Oct 11 01:44:29 compute-0 systemd[1]: libpod-7c5493567fb8a5c5a33fe549cf77ab8b59a14fe72bf578d817dc65dba20d3405.scope: Consumed 1.257s CPU time.
Oct 11 01:44:29 compute-0 podman[217480]: 2025-10-11 01:44:29.808385454 +0000 UTC m=+1.563607091 container died 7c5493567fb8a5c5a33fe549cf77ab8b59a14fe72bf578d817dc65dba20d3405 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_villani, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20250507)
Oct 11 01:44:29 compute-0 systemd[1]: var-lib-containers-storage-overlay-2a178eb401674ede956ce7f4f2c001bb854493451897186d4d1423ff77de5e22-merged.mount: Deactivated successfully.
Oct 11 01:44:29 compute-0 podman[217480]: 2025-10-11 01:44:29.88290186 +0000 UTC m=+1.638123467 container remove 7c5493567fb8a5c5a33fe549cf77ab8b59a14fe72bf578d817dc65dba20d3405 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_villani, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:44:29 compute-0 systemd[1]: libpod-conmon-7c5493567fb8a5c5a33fe549cf77ab8b59a14fe72bf578d817dc65dba20d3405.scope: Deactivated successfully.
Oct 11 01:44:29 compute-0 sudo[217341]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:30 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.12 scrub starts
Oct 11 01:44:30 compute-0 sudo[217625]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:30 compute-0 sudo[217625]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:30 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.12 scrub ok
Oct 11 01:44:30 compute-0 sudo[217625]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:30 compute-0 sudo[217674]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:44:30 compute-0 sudo[217674]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:30 compute-0 sudo[217674]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:30 compute-0 sudo[217770]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-srpgfrhzmxvrtudmakbendtijrkechgz ; ANSIBLE_ASYNC_DIR=\'~/.ansible_async\' /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760147069.6018212-34061-218643403847538/async_wrapper.py j994717362462 30 /home/zuul/.ansible/tmp/ansible-tmp-1760147069.6018212-34061-218643403847538/AnsiballZ_command.py _'
Oct 11 01:44:30 compute-0 sudo[217726]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:30 compute-0 sudo[217770]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:30 compute-0 sudo[217726]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:30 compute-0 sudo[217726]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:30 compute-0 ceph-mon[191930]: 2.12 scrub starts
Oct 11 01:44:30 compute-0 ceph-mon[191930]: 2.12 scrub ok
Oct 11 01:44:30 compute-0 podman[217773]: 2025-10-11 01:44:30.400591153 +0000 UTC m=+0.087951744 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 01:44:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v112: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:30 compute-0 sudo[217784]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 01:44:30 compute-0 sudo[217784]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:30 compute-0 podman[217774]: 2025-10-11 01:44:30.416858005 +0000 UTC m=+0.110374238 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, com.redhat.component=ubi9-minimal-container, name=ubi9-minimal, architecture=x86_64, vendor=Red Hat, Inc., io.buildah.version=1.33.7, release=1755695350, config_id=edpm, distribution-scope=public, vcs-type=git, version=9.6, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.openshift.expose-services=, build-date=2025-08-20T13:12:41, container_name=openstack_network_exporter, maintainer=Red Hat, Inc., managed_by=edpm_ansible, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.openshift.tags=minimal rhel9)
Oct 11 01:44:30 compute-0 ansible-async_wrapper.py[217775]: Invoked with j994717362462 30 /home/zuul/.ansible/tmp/ansible-tmp-1760147069.6018212-34061-218643403847538/AnsiballZ_command.py _
Oct 11 01:44:30 compute-0 ansible-async_wrapper.py[217845]: Starting module and watcher
Oct 11 01:44:30 compute-0 ansible-async_wrapper.py[217845]: Start watching 217846 (30)
Oct 11 01:44:30 compute-0 ansible-async_wrapper.py[217846]: Start module (217846)
Oct 11 01:44:30 compute-0 ansible-async_wrapper.py[217775]: Return async_wrapper task started.
Oct 11 01:44:30 compute-0 sudo[217770]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:30 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.1e scrub starts
Oct 11 01:44:30 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.1e scrub ok
Oct 11 01:44:30 compute-0 python3[217847]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   orch status --format json _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:30 compute-0 podman[217866]: 2025-10-11 01:44:30.785147334 +0000 UTC m=+0.099567837 container create 33879d1feeed77febc8390f5facad990109210ccd7641adead6cf354d922283d (image=quay.io/ceph/ceph:v18, name=jovial_yalow, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:30 compute-0 podman[217866]: 2025-10-11 01:44:30.75087974 +0000 UTC m=+0.065300263 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:30 compute-0 systemd[1]: Started libpod-conmon-33879d1feeed77febc8390f5facad990109210ccd7641adead6cf354d922283d.scope.
Oct 11 01:44:30 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a44460fbbaa25d7f64744d02311520693e14ae8d4f64f37ff1c72da3bb3fb341/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a44460fbbaa25d7f64744d02311520693e14ae8d4f64f37ff1c72da3bb3fb341/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:30 compute-0 podman[217866]: 2025-10-11 01:44:30.960817274 +0000 UTC m=+0.275237827 container init 33879d1feeed77febc8390f5facad990109210ccd7641adead6cf354d922283d (image=quay.io/ceph/ceph:v18, name=jovial_yalow, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default)
Oct 11 01:44:30 compute-0 podman[217866]: 2025-10-11 01:44:30.982422094 +0000 UTC m=+0.296842597 container start 33879d1feeed77febc8390f5facad990109210ccd7641adead6cf354d922283d (image=quay.io/ceph/ceph:v18, name=jovial_yalow, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:30 compute-0 podman[217866]: 2025-10-11 01:44:30.988904966 +0000 UTC m=+0.303325459 container attach 33879d1feeed77febc8390f5facad990109210ccd7641adead6cf354d922283d (image=quay.io/ceph/ceph:v18, name=jovial_yalow, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 01:44:31 compute-0 podman[217904]: 2025-10-11 01:44:31.144639985 +0000 UTC m=+0.091503839 container create 48689d2d6f71246dc8705830d7065a9495da14030464169e3440bba9a89763f3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=bold_shaw, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3, CEPH_REF=reef)
Oct 11 01:44:31 compute-0 podman[217904]: 2025-10-11 01:44:31.103088415 +0000 UTC m=+0.049952329 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:31 compute-0 systemd[1]: Started libpod-conmon-48689d2d6f71246dc8705830d7065a9495da14030464169e3440bba9a89763f3.scope.
Oct 11 01:44:31 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:31 compute-0 podman[217904]: 2025-10-11 01:44:31.294195682 +0000 UTC m=+0.241059546 container init 48689d2d6f71246dc8705830d7065a9495da14030464169e3440bba9a89763f3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=bold_shaw, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:44:31 compute-0 podman[217904]: 2025-10-11 01:44:31.308978509 +0000 UTC m=+0.255842333 container start 48689d2d6f71246dc8705830d7065a9495da14030464169e3440bba9a89763f3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=bold_shaw, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.vendor=CentOS)
Oct 11 01:44:31 compute-0 podman[217904]: 2025-10-11 01:44:31.314630357 +0000 UTC m=+0.261494211 container attach 48689d2d6f71246dc8705830d7065a9495da14030464169e3440bba9a89763f3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=bold_shaw, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef)
Oct 11 01:44:31 compute-0 bold_shaw[217920]: 167 167
Oct 11 01:44:31 compute-0 systemd[1]: libpod-48689d2d6f71246dc8705830d7065a9495da14030464169e3440bba9a89763f3.scope: Deactivated successfully.
Oct 11 01:44:31 compute-0 ceph-mon[191930]: pgmap v112: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:31 compute-0 openstack_network_exporter[159265]: ERROR   01:44:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:44:31 compute-0 openstack_network_exporter[159265]: ERROR   01:44:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:44:31 compute-0 openstack_network_exporter[159265]: ERROR   01:44:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:44:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:44:31 compute-0 openstack_network_exporter[159265]: ERROR   01:44:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:44:31 compute-0 openstack_network_exporter[159265]: ERROR   01:44:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:44:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:44:31 compute-0 podman[217944]: 2025-10-11 01:44:31.431690612 +0000 UTC m=+0.072900749 container died 48689d2d6f71246dc8705830d7065a9495da14030464169e3440bba9a89763f3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=bold_shaw, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 01:44:31 compute-0 systemd[1]: var-lib-containers-storage-overlay-d5e55ccb6ff82cd6db23868f56408dd20a736403d9bada23ae45bea2adc97e25-merged.mount: Deactivated successfully.
Oct 11 01:44:31 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.1a scrub starts
Oct 11 01:44:31 compute-0 podman[217944]: 2025-10-11 01:44:31.51845153 +0000 UTC m=+0.159661587 container remove 48689d2d6f71246dc8705830d7065a9495da14030464169e3440bba9a89763f3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=bold_shaw, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:31 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.1a scrub ok
Oct 11 01:44:31 compute-0 systemd[1]: libpod-conmon-48689d2d6f71246dc8705830d7065a9495da14030464169e3440bba9a89763f3.scope: Deactivated successfully.
Oct 11 01:44:31 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14258 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""], "format": "json"}]: dispatch
Oct 11 01:44:31 compute-0 jovial_yalow[217899]: 
Oct 11 01:44:31 compute-0 jovial_yalow[217899]: {"available": true, "backend": "cephadm", "paused": false, "workers": 10}
Oct 11 01:44:31 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.1f scrub starts
Oct 11 01:44:31 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 4.1f scrub ok
Oct 11 01:44:31 compute-0 systemd[1]: libpod-33879d1feeed77febc8390f5facad990109210ccd7641adead6cf354d922283d.scope: Deactivated successfully.
Oct 11 01:44:31 compute-0 conmon[217899]: conmon 33879d1feeed77febc83 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-33879d1feeed77febc8390f5facad990109210ccd7641adead6cf354d922283d.scope/container/memory.events
Oct 11 01:44:31 compute-0 podman[217866]: 2025-10-11 01:44:31.660909226 +0000 UTC m=+0.975329739 container died 33879d1feeed77febc8390f5facad990109210ccd7641adead6cf354d922283d (image=quay.io/ceph/ceph:v18, name=jovial_yalow, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:44:31 compute-0 systemd[1]: var-lib-containers-storage-overlay-a44460fbbaa25d7f64744d02311520693e14ae8d4f64f37ff1c72da3bb3fb341-merged.mount: Deactivated successfully.
Oct 11 01:44:31 compute-0 podman[217866]: 2025-10-11 01:44:31.738484462 +0000 UTC m=+1.052904925 container remove 33879d1feeed77febc8390f5facad990109210ccd7641adead6cf354d922283d (image=quay.io/ceph/ceph:v18, name=jovial_yalow, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:31 compute-0 systemd[1]: libpod-conmon-33879d1feeed77febc8390f5facad990109210ccd7641adead6cf354d922283d.scope: Deactivated successfully.
Oct 11 01:44:31 compute-0 ansible-async_wrapper.py[217846]: Module complete (217846)
Oct 11 01:44:31 compute-0 sudo[218034]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vkbhycdvmizqvqngfmwadsquuwkgbaip ; /usr/bin/python3'
Oct 11 01:44:31 compute-0 sudo[218034]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:31 compute-0 podman[218006]: 2025-10-11 01:44:31.818612724 +0000 UTC m=+0.073174097 container create cca6e81f7b9b09691cd939c78e316b4a93213e335948954fe493337cf2bc2785 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_gagarin, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:44:31 compute-0 podman[218006]: 2025-10-11 01:44:31.789153382 +0000 UTC m=+0.043714745 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:31 compute-0 systemd[1]: Started libpod-conmon-cca6e81f7b9b09691cd939c78e316b4a93213e335948954fe493337cf2bc2785.scope.
Oct 11 01:44:31 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:31 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d05ae48b2d9cc86613d4c531f19b45fed7c0251e4352e42f909ece3616564ca0/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:31 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d05ae48b2d9cc86613d4c531f19b45fed7c0251e4352e42f909ece3616564ca0/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:31 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d05ae48b2d9cc86613d4c531f19b45fed7c0251e4352e42f909ece3616564ca0/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:31 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d05ae48b2d9cc86613d4c531f19b45fed7c0251e4352e42f909ece3616564ca0/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:31 compute-0 python3[218038]: ansible-ansible.legacy.async_status Invoked with jid=j994717362462.217775 mode=status _async_dir=/root/.ansible_async
Oct 11 01:44:31 compute-0 podman[218006]: 2025-10-11 01:44:31.9704997 +0000 UTC m=+0.225061033 container init cca6e81f7b9b09691cd939c78e316b4a93213e335948954fe493337cf2bc2785 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_gagarin, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0)
Oct 11 01:44:31 compute-0 sudo[218034]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:32 compute-0 podman[218006]: 2025-10-11 01:44:32.001021243 +0000 UTC m=+0.255582616 container start cca6e81f7b9b09691cd939c78e316b4a93213e335948954fe493337cf2bc2785 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_gagarin, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 01:44:32 compute-0 podman[218006]: 2025-10-11 01:44:32.008449013 +0000 UTC m=+0.263010366 container attach cca6e81f7b9b09691cd939c78e316b4a93213e335948954fe493337cf2bc2785 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_gagarin, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:44:32 compute-0 sudo[218093]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wwxzmskjomkzcvuxscwfhpbkkieyjvse ; /usr/bin/python3'
Oct 11 01:44:32 compute-0 sudo[218093]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:32 compute-0 ceph-mon[191930]: 4.1e scrub starts
Oct 11 01:44:32 compute-0 ceph-mon[191930]: 4.1e scrub ok
Oct 11 01:44:32 compute-0 ceph-mon[191930]: 3.1a scrub starts
Oct 11 01:44:32 compute-0 ceph-mon[191930]: 3.1a scrub ok
Oct 11 01:44:32 compute-0 ceph-mon[191930]: from='client.14258 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""], "format": "json"}]: dispatch
Oct 11 01:44:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v113: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:32 compute-0 python3[218095]: ansible-ansible.legacy.async_status Invoked with jid=j994717362462.217775 mode=cleanup _async_dir=/root/.ansible_async
Oct 11 01:44:32 compute-0 sudo[218093]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]: {
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:     "0": [
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:         {
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "devices": [
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "/dev/loop3"
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             ],
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "lv_name": "ceph_lv0",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "lv_size": "21470642176",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "name": "ceph_lv0",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "tags": {
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.cluster_name": "ceph",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.crush_device_class": "",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.encrypted": "0",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.osd_id": "0",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.type": "block",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.vdo": "0"
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             },
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "type": "block",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "vg_name": "ceph_vg0"
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:         }
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:     ],
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:     "1": [
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:         {
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "devices": [
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "/dev/loop4"
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             ],
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "lv_name": "ceph_lv1",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "lv_size": "21470642176",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "name": "ceph_lv1",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "tags": {
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.cluster_name": "ceph",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.crush_device_class": "",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.encrypted": "0",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.osd_id": "1",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.type": "block",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.vdo": "0"
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             },
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "type": "block",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "vg_name": "ceph_vg1"
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:         }
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:     ],
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:     "2": [
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:         {
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "devices": [
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "/dev/loop5"
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             ],
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "lv_name": "ceph_lv2",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "lv_size": "21470642176",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "name": "ceph_lv2",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "tags": {
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.cluster_name": "ceph",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.crush_device_class": "",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.encrypted": "0",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.osd_id": "2",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.type": "block",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:                 "ceph.vdo": "0"
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             },
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "type": "block",
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:             "vg_name": "ceph_vg2"
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:         }
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]:     ]
Oct 11 01:44:32 compute-0 recursing_gagarin[218042]: }
Oct 11 01:44:32 compute-0 systemd[1]: libpod-cca6e81f7b9b09691cd939c78e316b4a93213e335948954fe493337cf2bc2785.scope: Deactivated successfully.
Oct 11 01:44:32 compute-0 conmon[218042]: conmon cca6e81f7b9b09691cd9 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-cca6e81f7b9b09691cd939c78e316b4a93213e335948954fe493337cf2bc2785.scope/container/memory.events
Oct 11 01:44:32 compute-0 podman[218006]: 2025-10-11 01:44:32.898686513 +0000 UTC m=+1.153247886 container died cca6e81f7b9b09691cd939c78e316b4a93213e335948954fe493337cf2bc2785 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_gagarin, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 01:44:32 compute-0 systemd[1]: var-lib-containers-storage-overlay-d05ae48b2d9cc86613d4c531f19b45fed7c0251e4352e42f909ece3616564ca0-merged.mount: Deactivated successfully.
Oct 11 01:44:33 compute-0 podman[218006]: 2025-10-11 01:44:33.003800224 +0000 UTC m=+1.258361587 container remove cca6e81f7b9b09691cd939c78e316b4a93213e335948954fe493337cf2bc2785 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_gagarin, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0)
Oct 11 01:44:33 compute-0 systemd[1]: libpod-conmon-cca6e81f7b9b09691cd939c78e316b4a93213e335948954fe493337cf2bc2785.scope: Deactivated successfully.
Oct 11 01:44:33 compute-0 sudo[217784]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:33 compute-0 sudo[218134]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rxaxmxahyrlhnmofdhrqkwtlybbhqoek ; /usr/bin/python3'
Oct 11 01:44:33 compute-0 sudo[218134]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:33 compute-0 sudo[218137]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:33 compute-0 sudo[218137]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:33 compute-0 sudo[218137]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:33 compute-0 python3[218136]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   orch status --format json _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:33 compute-0 sudo[218162]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:44:33 compute-0 sudo[218162]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:33 compute-0 sudo[218162]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:33 compute-0 podman[218182]: 2025-10-11 01:44:33.331761161 +0000 UTC m=+0.076431513 container create 6a8dd5340f4aed8178a5d6c51338c5b37d6757ed82821927b761b8ae44cb081a (image=quay.io/ceph/ceph:v18, name=sleepy_einstein, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef)
Oct 11 01:44:33 compute-0 systemd[1]: Started libpod-conmon-6a8dd5340f4aed8178a5d6c51338c5b37d6757ed82821927b761b8ae44cb081a.scope.
Oct 11 01:44:33 compute-0 podman[218182]: 2025-10-11 01:44:33.305110602 +0000 UTC m=+0.049780984 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:33 compute-0 ceph-mon[191930]: 4.1f scrub starts
Oct 11 01:44:33 compute-0 ceph-mon[191930]: 4.1f scrub ok
Oct 11 01:44:33 compute-0 ceph-mon[191930]: pgmap v113: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:33 compute-0 sudo[218195]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:33 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:33 compute-0 sudo[218195]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/829c3e1d70ef1b2c970c64e775fdd743fc017bd36a13f44edaf300eed3b4f935/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/829c3e1d70ef1b2c970c64e775fdd743fc017bd36a13f44edaf300eed3b4f935/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:33 compute-0 sudo[218195]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:33 compute-0 podman[218182]: 2025-10-11 01:44:33.494114087 +0000 UTC m=+0.238784519 container init 6a8dd5340f4aed8178a5d6c51338c5b37d6757ed82821927b761b8ae44cb081a (image=quay.io/ceph/ceph:v18, name=sleepy_einstein, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 01:44:33 compute-0 podman[218182]: 2025-10-11 01:44:33.519313843 +0000 UTC m=+0.263984225 container start 6a8dd5340f4aed8178a5d6c51338c5b37d6757ed82821927b761b8ae44cb081a (image=quay.io/ceph/ceph:v18, name=sleepy_einstein, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 01:44:33 compute-0 podman[218182]: 2025-10-11 01:44:33.52700005 +0000 UTC m=+0.271670492 container attach 6a8dd5340f4aed8178a5d6c51338c5b37d6757ed82821927b761b8ae44cb081a (image=quay.io/ceph/ceph:v18, name=sleepy_einstein, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:33 compute-0 sudo[218230]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 01:44:33 compute-0 sudo[218230]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:34 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14260 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""], "format": "json"}]: dispatch
Oct 11 01:44:34 compute-0 sleepy_einstein[218225]: 
Oct 11 01:44:34 compute-0 sleepy_einstein[218225]: {"available": true, "backend": "cephadm", "paused": false, "workers": 10}
Oct 11 01:44:34 compute-0 systemd[1]: libpod-6a8dd5340f4aed8178a5d6c51338c5b37d6757ed82821927b761b8ae44cb081a.scope: Deactivated successfully.
Oct 11 01:44:34 compute-0 podman[218182]: 2025-10-11 01:44:34.177809103 +0000 UTC m=+0.922479495 container died 6a8dd5340f4aed8178a5d6c51338c5b37d6757ed82821927b761b8ae44cb081a (image=quay.io/ceph/ceph:v18, name=sleepy_einstein, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=reef)
Oct 11 01:44:34 compute-0 systemd[1]: var-lib-containers-storage-overlay-829c3e1d70ef1b2c970c64e775fdd743fc017bd36a13f44edaf300eed3b4f935-merged.mount: Deactivated successfully.
Oct 11 01:44:34 compute-0 podman[218182]: 2025-10-11 01:44:34.258816861 +0000 UTC m=+1.003487213 container remove 6a8dd5340f4aed8178a5d6c51338c5b37d6757ed82821927b761b8ae44cb081a (image=quay.io/ceph/ceph:v18, name=sleepy_einstein, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 01:44:34 compute-0 podman[218312]: 2025-10-11 01:44:34.270171277 +0000 UTC m=+0.127095203 container create 8b3eee6b6cbc09ff006013b6bad2928fd07686e42a0a72451a6c8ecaa4de0885 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_carson, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:44:34 compute-0 systemd[1]: libpod-conmon-6a8dd5340f4aed8178a5d6c51338c5b37d6757ed82821927b761b8ae44cb081a.scope: Deactivated successfully.
Oct 11 01:44:34 compute-0 sudo[218134]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:34 compute-0 podman[218312]: 2025-10-11 01:44:34.224483595 +0000 UTC m=+0.081407571 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:34 compute-0 systemd[1]: Started libpod-conmon-8b3eee6b6cbc09ff006013b6bad2928fd07686e42a0a72451a6c8ecaa4de0885.scope.
Oct 11 01:44:34 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:34 compute-0 podman[218312]: 2025-10-11 01:44:34.389406915 +0000 UTC m=+0.246330901 container init 8b3eee6b6cbc09ff006013b6bad2928fd07686e42a0a72451a6c8ecaa4de0885 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_carson, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, ceph=True)
Oct 11 01:44:34 compute-0 podman[218312]: 2025-10-11 01:44:34.400119782 +0000 UTC m=+0.257043698 container start 8b3eee6b6cbc09ff006013b6bad2928fd07686e42a0a72451a6c8ecaa4de0885 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_carson, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_REF=reef, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True)
Oct 11 01:44:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v114: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:34 compute-0 podman[218312]: 2025-10-11 01:44:34.407654355 +0000 UTC m=+0.264578251 container attach 8b3eee6b6cbc09ff006013b6bad2928fd07686e42a0a72451a6c8ecaa4de0885 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_carson, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3)
Oct 11 01:44:34 compute-0 ceph-mon[191930]: from='client.14260 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""], "format": "json"}]: dispatch
Oct 11 01:44:34 compute-0 angry_carson[218338]: 167 167
Oct 11 01:44:34 compute-0 systemd[1]: libpod-8b3eee6b6cbc09ff006013b6bad2928fd07686e42a0a72451a6c8ecaa4de0885.scope: Deactivated successfully.
Oct 11 01:44:34 compute-0 podman[218312]: 2025-10-11 01:44:34.411580242 +0000 UTC m=+0.268504128 container died 8b3eee6b6cbc09ff006013b6bad2928fd07686e42a0a72451a6c8ecaa4de0885 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_carson, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:44:34 compute-0 systemd[1]: var-lib-containers-storage-overlay-899d2fee9b2989c121cc7ca8ac13140b116878d488bc118df454449bc5374bf3-merged.mount: Deactivated successfully.
Oct 11 01:44:34 compute-0 podman[218312]: 2025-10-11 01:44:34.478466581 +0000 UTC m=+0.335390477 container remove 8b3eee6b6cbc09ff006013b6bad2928fd07686e42a0a72451a6c8ecaa4de0885 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_carson, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2)
Oct 11 01:44:34 compute-0 systemd[1]: libpod-conmon-8b3eee6b6cbc09ff006013b6bad2928fd07686e42a0a72451a6c8ecaa4de0885.scope: Deactivated successfully.
Oct 11 01:44:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e44 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:44:34 compute-0 podman[218360]: 2025-10-11 01:44:34.691917719 +0000 UTC m=+0.065179320 container create c5a81cd75a4a0c6d8299664fcca6fb553bfe9b412c1071bc141d954f005632be (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=clever_black, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.license=GPLv2)
Oct 11 01:44:34 compute-0 podman[218360]: 2025-10-11 01:44:34.665394734 +0000 UTC m=+0.038656335 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:34 compute-0 systemd[1]: Started libpod-conmon-c5a81cd75a4a0c6d8299664fcca6fb553bfe9b412c1071bc141d954f005632be.scope.
Oct 11 01:44:34 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/838274e7f7d801f3e03ec46793db44778c354316c69c7dd8257a4978aee8d329/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/838274e7f7d801f3e03ec46793db44778c354316c69c7dd8257a4978aee8d329/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/838274e7f7d801f3e03ec46793db44778c354316c69c7dd8257a4978aee8d329/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/838274e7f7d801f3e03ec46793db44778c354316c69c7dd8257a4978aee8d329/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:34 compute-0 podman[218360]: 2025-10-11 01:44:34.851671588 +0000 UTC m=+0.224933229 container init c5a81cd75a4a0c6d8299664fcca6fb553bfe9b412c1071bc141d954f005632be (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=clever_black, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:34 compute-0 podman[218360]: 2025-10-11 01:44:34.876886264 +0000 UTC m=+0.250147865 container start c5a81cd75a4a0c6d8299664fcca6fb553bfe9b412c1071bc141d954f005632be (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=clever_black, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True)
Oct 11 01:44:34 compute-0 podman[218360]: 2025-10-11 01:44:34.883068977 +0000 UTC m=+0.256330618 container attach c5a81cd75a4a0c6d8299664fcca6fb553bfe9b412c1071bc141d954f005632be (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=clever_black, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:44:35 compute-0 sudo[218405]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vpbqxlybrxqdgmhbjrxvrgnnbvxjhjmk ; /usr/bin/python3'
Oct 11 01:44:35 compute-0 sudo[218405]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:35 compute-0 python3[218407]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   orch ls --export -f json _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:35 compute-0 podman[218408]: 2025-10-11 01:44:35.385093576 +0000 UTC m=+0.095225219 container create 17769a3ba1fb6e81a8d94497c51061fc298f5697626e1554b4438e596b9ac718 (image=quay.io/ceph/ceph:v18, name=eager_moser, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:44:35 compute-0 podman[218408]: 2025-10-11 01:44:35.344426873 +0000 UTC m=+0.054558576 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:35 compute-0 systemd[1]: Started libpod-conmon-17769a3ba1fb6e81a8d94497c51061fc298f5697626e1554b4438e596b9ac718.scope.
Oct 11 01:44:35 compute-0 ceph-mon[191930]: pgmap v114: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:35 compute-0 ansible-async_wrapper.py[217845]: Done in kid B.
Oct 11 01:44:35 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/815a1dd65455a05bbe93d58da7dd484d340cc1ffdc860460c4e582bc0f21c769/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/815a1dd65455a05bbe93d58da7dd484d340cc1ffdc860460c4e582bc0f21c769/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:35 compute-0 podman[218408]: 2025-10-11 01:44:35.527557623 +0000 UTC m=+0.237689336 container init 17769a3ba1fb6e81a8d94497c51061fc298f5697626e1554b4438e596b9ac718 (image=quay.io/ceph/ceph:v18, name=eager_moser, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2)
Oct 11 01:44:35 compute-0 podman[218408]: 2025-10-11 01:44:35.545646699 +0000 UTC m=+0.255778352 container start 17769a3ba1fb6e81a8d94497c51061fc298f5697626e1554b4438e596b9ac718 (image=quay.io/ceph/ceph:v18, name=eager_moser, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507)
Oct 11 01:44:35 compute-0 podman[218408]: 2025-10-11 01:44:35.553837851 +0000 UTC m=+0.263969494 container attach 17769a3ba1fb6e81a8d94497c51061fc298f5697626e1554b4438e596b9ac718 (image=quay.io/ceph/ceph:v18, name=eager_moser, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS)
Oct 11 01:44:35 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.3 scrub starts
Oct 11 01:44:35 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.3 scrub ok
Oct 11 01:44:35 compute-0 clever_black[218377]: {
Oct 11 01:44:35 compute-0 clever_black[218377]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 01:44:35 compute-0 clever_black[218377]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:35 compute-0 clever_black[218377]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 01:44:35 compute-0 clever_black[218377]:         "osd_id": 1,
Oct 11 01:44:35 compute-0 clever_black[218377]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:44:35 compute-0 clever_black[218377]:         "type": "bluestore"
Oct 11 01:44:35 compute-0 clever_black[218377]:     },
Oct 11 01:44:35 compute-0 clever_black[218377]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 01:44:35 compute-0 clever_black[218377]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:35 compute-0 clever_black[218377]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 01:44:35 compute-0 clever_black[218377]:         "osd_id": 2,
Oct 11 01:44:35 compute-0 clever_black[218377]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:44:35 compute-0 clever_black[218377]:         "type": "bluestore"
Oct 11 01:44:35 compute-0 clever_black[218377]:     },
Oct 11 01:44:35 compute-0 clever_black[218377]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 01:44:35 compute-0 clever_black[218377]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:35 compute-0 clever_black[218377]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 01:44:35 compute-0 clever_black[218377]:         "osd_id": 0,
Oct 11 01:44:35 compute-0 clever_black[218377]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:44:35 compute-0 clever_black[218377]:         "type": "bluestore"
Oct 11 01:44:35 compute-0 clever_black[218377]:     }
Oct 11 01:44:35 compute-0 clever_black[218377]: }
Oct 11 01:44:36 compute-0 systemd[1]: libpod-c5a81cd75a4a0c6d8299664fcca6fb553bfe9b412c1071bc141d954f005632be.scope: Deactivated successfully.
Oct 11 01:44:36 compute-0 systemd[1]: libpod-c5a81cd75a4a0c6d8299664fcca6fb553bfe9b412c1071bc141d954f005632be.scope: Consumed 1.161s CPU time.
Oct 11 01:44:36 compute-0 podman[218360]: 2025-10-11 01:44:36.049878893 +0000 UTC m=+1.423140464 container died c5a81cd75a4a0c6d8299664fcca6fb553bfe9b412c1071bc141d954f005632be (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=clever_black, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS)
Oct 11 01:44:36 compute-0 systemd[1]: var-lib-containers-storage-overlay-838274e7f7d801f3e03ec46793db44778c354316c69c7dd8257a4978aee8d329-merged.mount: Deactivated successfully.
Oct 11 01:44:36 compute-0 podman[218360]: 2025-10-11 01:44:36.174314396 +0000 UTC m=+1.547575967 container remove c5a81cd75a4a0c6d8299664fcca6fb553bfe9b412c1071bc141d954f005632be (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=clever_black, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:36 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14262 -' entity='client.admin' cmd=[{"prefix": "orch ls", "export": true, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
Oct 11 01:44:36 compute-0 eager_moser[218422]: 
Oct 11 01:44:36 compute-0 eager_moser[218422]: [{"placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash"}, {"placement": {"hosts": ["compute-0"]}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds"}, {"placement": {"hosts": ["compute-0"]}, "service_name": "mgr", "service_type": "mgr"}, {"placement": {"hosts": ["compute-0"]}, "service_name": "mon", "service_type": "mon"}, {"placement": {"hosts": ["compute-0"]}, "service_id": "default_drive_group", "service_name": "osd.default_drive_group", "service_type": "osd", "spec": {"data_devices": {"paths": ["/dev/ceph_vg0/ceph_lv0", "/dev/ceph_vg1/ceph_lv1", "/dev/ceph_vg2/ceph_lv2"]}, "filter_logic": "AND", "objectstore": "bluestore"}}, {"networks": ["192.168.122.0/24"], "placement": {"hosts": ["compute-0"]}, "service_id": "rgw", "service_name": "rgw.rgw", "service_type": "rgw", "spec": {"rgw_frontend_port": 8082}}]
Oct 11 01:44:36 compute-0 systemd[1]: libpod-conmon-c5a81cd75a4a0c6d8299664fcca6fb553bfe9b412c1071bc141d954f005632be.scope: Deactivated successfully.
Oct 11 01:44:36 compute-0 systemd[1]: libpod-17769a3ba1fb6e81a8d94497c51061fc298f5697626e1554b4438e596b9ac718.scope: Deactivated successfully.
Oct 11 01:44:36 compute-0 podman[218408]: 2025-10-11 01:44:36.219214935 +0000 UTC m=+0.929346558 container died 17769a3ba1fb6e81a8d94497c51061fc298f5697626e1554b4438e596b9ac718 (image=quay.io/ceph/ceph:v18, name=eager_moser, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:36 compute-0 sudo[218230]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:44:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:44:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:36 compute-0 ceph-mgr[192233]: [progress INFO root] update: starting ev 2d757a48-e673-4c54-9255-f96642d3fbe1 (Updating rgw.rgw deployment (+1 -> 1))
Oct 11 01:44:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get-or-create", "entity": "client.rgw.rgw.compute-0.fahafy", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} v 0) v1
Oct 11 01:44:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.rgw.compute-0.fahafy", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch
Oct 11 01:44:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.rgw.compute-0.fahafy", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished
Oct 11 01:44:36 compute-0 systemd[1]: var-lib-containers-storage-overlay-815a1dd65455a05bbe93d58da7dd484d340cc1ffdc860460c4e582bc0f21c769-merged.mount: Deactivated successfully.
Oct 11 01:44:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config set, name=rgw_frontends}] v 0) v1
Oct 11 01:44:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:44:36 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:44:36 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.serve] Deploying daemon rgw.rgw.compute-0.fahafy on compute-0
Oct 11 01:44:36 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Deploying daemon rgw.rgw.compute-0.fahafy on compute-0
Oct 11 01:44:36 compute-0 podman[218408]: 2025-10-11 01:44:36.31565054 +0000 UTC m=+1.025782163 container remove 17769a3ba1fb6e81a8d94497c51061fc298f5697626e1554b4438e596b9ac718 (image=quay.io/ceph/ceph:v18, name=eager_moser, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2)
Oct 11 01:44:36 compute-0 systemd[1]: libpod-conmon-17769a3ba1fb6e81a8d94497c51061fc298f5697626e1554b4438e596b9ac718.scope: Deactivated successfully.
Oct 11 01:44:36 compute-0 sudo[218405]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:36 compute-0 sudo[218504]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v115: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:36 compute-0 podman[218498]: 2025-10-11 01:44:36.409503028 +0000 UTC m=+0.104605197 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, managed_by=edpm_ansible, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true)
Oct 11 01:44:36 compute-0 sudo[218504]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:36 compute-0 sudo[218504]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:36 compute-0 ceph-mon[191930]: from='client.14262 -' entity='client.admin' cmd=[{"prefix": "orch ls", "export": true, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
Oct 11 01:44:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.rgw.compute-0.fahafy", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch
Oct 11 01:44:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.rgw.compute-0.fahafy", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished
Oct 11 01:44:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:44:36 compute-0 sudo[218542]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:44:36 compute-0 sudo[218542]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:36 compute-0 sudo[218542]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:36 compute-0 sudo[218567]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:36 compute-0 sudo[218567]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:36 compute-0 sudo[218567]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:36 compute-0 sudo[218592]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 _orch deploy --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:44:36 compute-0 sudo[218592]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:36 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.14 scrub starts
Oct 11 01:44:36 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.14 scrub ok
Oct 11 01:44:37 compute-0 sudo[218665]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xqkotusynxydzqanwckryslcjsvdmbob ; /usr/bin/python3'
Oct 11 01:44:37 compute-0 sudo[218665]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:37 compute-0 python3[218669]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   orch ps -f json _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:37 compute-0 podman[218680]: 2025-10-11 01:44:37.418199734 +0000 UTC m=+0.093000134 container create 794a868b7e21532ccbe94ca36a8e18262604d2d97af8df70894884c007df684e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wonderful_spence, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS)
Oct 11 01:44:37 compute-0 podman[218680]: 2025-10-11 01:44:37.381110996 +0000 UTC m=+0.055911426 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:37 compute-0 podman[218693]: 2025-10-11 01:44:37.473183271 +0000 UTC m=+0.078901316 container create 8c5ed1e125a0efba7092a624202eb1c59f0e640daafdc6211598190db3c6f9e7 (image=quay.io/ceph/ceph:v18, name=tender_dirac, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3)
Oct 11 01:44:37 compute-0 ceph-mon[191930]: 6.3 scrub starts
Oct 11 01:44:37 compute-0 ceph-mon[191930]: 6.3 scrub ok
Oct 11 01:44:37 compute-0 ceph-mon[191930]: Deploying daemon rgw.rgw.compute-0.fahafy on compute-0
Oct 11 01:44:37 compute-0 ceph-mon[191930]: pgmap v115: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:37 compute-0 ceph-mon[191930]: 2.14 scrub starts
Oct 11 01:44:37 compute-0 ceph-mon[191930]: 2.14 scrub ok
Oct 11 01:44:37 compute-0 systemd[1]: Started libpod-conmon-794a868b7e21532ccbe94ca36a8e18262604d2d97af8df70894884c007df684e.scope.
Oct 11 01:44:37 compute-0 podman[218693]: 2025-10-11 01:44:37.44305996 +0000 UTC m=+0.048778085 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:37 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:37 compute-0 systemd[1]: Started libpod-conmon-8c5ed1e125a0efba7092a624202eb1c59f0e640daafdc6211598190db3c6f9e7.scope.
Oct 11 01:44:37 compute-0 podman[218680]: 2025-10-11 01:44:37.560221578 +0000 UTC m=+0.235021978 container init 794a868b7e21532ccbe94ca36a8e18262604d2d97af8df70894884c007df684e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wonderful_spence, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True)
Oct 11 01:44:37 compute-0 podman[218680]: 2025-10-11 01:44:37.582997592 +0000 UTC m=+0.257797962 container start 794a868b7e21532ccbe94ca36a8e18262604d2d97af8df70894884c007df684e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wonderful_spence, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:44:37 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:37 compute-0 podman[218680]: 2025-10-11 01:44:37.588468124 +0000 UTC m=+0.263268494 container attach 794a868b7e21532ccbe94ca36a8e18262604d2d97af8df70894884c007df684e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wonderful_spence, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True)
Oct 11 01:44:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9f0a161b88083a1e4c42677211324424b7c5637da6132d44369f1ad08b3767d9/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9f0a161b88083a1e4c42677211324424b7c5637da6132d44369f1ad08b3767d9/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:37 compute-0 wonderful_spence[218706]: 167 167
Oct 11 01:44:37 compute-0 podman[218693]: 2025-10-11 01:44:37.625269183 +0000 UTC m=+0.230987248 container init 8c5ed1e125a0efba7092a624202eb1c59f0e640daafdc6211598190db3c6f9e7 (image=quay.io/ceph/ceph:v18, name=tender_dirac, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default)
Oct 11 01:44:37 compute-0 systemd[1]: libpod-794a868b7e21532ccbe94ca36a8e18262604d2d97af8df70894884c007df684e.scope: Deactivated successfully.
Oct 11 01:44:37 compute-0 podman[218680]: 2025-10-11 01:44:37.6268566 +0000 UTC m=+0.301656970 container died 794a868b7e21532ccbe94ca36a8e18262604d2d97af8df70894884c007df684e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wonderful_spence, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 01:44:37 compute-0 podman[218693]: 2025-10-11 01:44:37.645712328 +0000 UTC m=+0.251430383 container start 8c5ed1e125a0efba7092a624202eb1c59f0e640daafdc6211598190db3c6f9e7 (image=quay.io/ceph/ceph:v18, name=tender_dirac, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:44:37 compute-0 podman[218693]: 2025-10-11 01:44:37.650705556 +0000 UTC m=+0.256423601 container attach 8c5ed1e125a0efba7092a624202eb1c59f0e640daafdc6211598190db3c6f9e7 (image=quay.io/ceph/ceph:v18, name=tender_dirac, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:37 compute-0 systemd[1]: var-lib-containers-storage-overlay-da8e7029f44006c4c35afd956b531e7d07faffabb1fc73c3fcbf1641bfde9140-merged.mount: Deactivated successfully.
Oct 11 01:44:37 compute-0 podman[218680]: 2025-10-11 01:44:37.707209258 +0000 UTC m=+0.382009628 container remove 794a868b7e21532ccbe94ca36a8e18262604d2d97af8df70894884c007df684e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wonderful_spence, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 01:44:37 compute-0 systemd[1]: libpod-conmon-794a868b7e21532ccbe94ca36a8e18262604d2d97af8df70894884c007df684e.scope: Deactivated successfully.
Oct 11 01:44:37 compute-0 systemd[1]: Reloading.
Oct 11 01:44:37 compute-0 systemd-rc-local-generator[218751]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:44:38 compute-0 systemd-sysv-generator[218756]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:44:38 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14264 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""], "format": "json"}]: dispatch
Oct 11 01:44:38 compute-0 tender_dirac[218711]: 
Oct 11 01:44:38 compute-0 tender_dirac[218711]: [{"container_id": "cba4b470035c", "container_image_digests": ["quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0", "quay.io/ceph/ceph@sha256:7d8bb82696d5d9cbeae2a2828dc12b6835aa2dded890fa3ac5a733cb66b72b1c"], "container_image_id": "0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1", "container_image_name": "quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0", "cpu_percentage": "0.40%", "created": "2025-10-11T01:42:35.381724Z", "daemon_id": "compute-0", "daemon_name": "crash.compute-0", "daemon_type": "crash", "events": ["2025-10-11T01:42:35.456413Z daemon:crash.compute-0 [INFO] \"Deployed crash.compute-0 on host 'compute-0'\""], "hostname": "compute-0", "is_active": false, "last_refresh": "2025-10-11T01:44:26.681480Z", "memory_usage": 11660165, "ports": [], "service_name": "crash", "started": "2025-10-11T01:42:35.216880Z", "status": 1, "status_desc": "running", "systemd_unit": "ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da@crash.compute-0", "version": "18.2.7"}, {"container_id": "c1da5b49478d", "container_image_digests": ["quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0", "quay.io/ceph/ceph@sha256:7d8bb82696d5d9cbeae2a2828dc12b6835aa2dded890fa3ac5a733cb66b72b1c"], "container_image_id": "0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1", "container_image_name": "quay.io/ceph/ceph:v18", "cpu_percentage": "25.95%", "created": "2025-10-11T01:41:13.117960Z", "daemon_id": "compute-0.bzgmgr", "daemon_name": "mgr.compute-0.bzgmgr", "daemon_type": "mgr", "events": ["2025-10-11T01:43:48.675457Z daemon:mgr.compute-0.bzgmgr [INFO] \"Reconfigured mgr.compute-0.bzgmgr on host 'compute-0'\""], "hostname": "compute-0", "is_active": false, "last_refresh": "2025-10-11T01:44:26.681339Z", "memory_usage": 549558681, "ports": [9283, 8765], "service_name": "mgr", "started": "2025-10-11T01:41:12.905692Z", "status": 1, "status_desc": "running", "systemd_unit": "ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da@mgr.compute-0.bzgmgr", "version": "18.2.7"}, {"container_id": "ab2a7db9f9dd", "container_image_digests": ["quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0", "quay.io/ceph/ceph@sha256:7d8bb82696d5d9cbeae2a2828dc12b6835aa2dded890fa3ac5a733cb66b72b1c"], "container_image_id": "0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1", "container_image_name": "quay.io/ceph/ceph:v18", "cpu_percentage": "3.45%", "created": "2025-10-11T01:41:05.362552Z", "daemon_id": "compute-0", "daemon_name": "mon.compute-0", "daemon_type": "mon", "events": ["2025-10-11T01:43:47.407084Z daemon:mon.compute-0 [INFO] \"Reconfigured mon.compute-0 on host 'compute-0'\""], "hostname": "compute-0", "is_active": false, "last_refresh": "2025-10-11T01:44:26.681078Z", "memory_request": 2147483648, "memory_usage": 39531315, "ports": [], "service_name": "mon", "started": "2025-10-11T01:41:09.269199Z", "status": 1, "status_desc": "running", "systemd_unit": "ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da@mon.compute-0", "version": "18.2.7"}, {"container_id": "a0bc7452156b", "container_image_digests": ["quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0", "quay.io/ceph/ceph@sha256:7d8bb82696d5d9cbeae2a2828dc12b6835aa2dded890fa3ac5a733cb66b72b1c"], "container_image_id": "0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1", "container_image_name": "quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0", "cpu_percentage": "3.80%", "created": "2025-10-11T01:43:10.188313Z", "daemon_id": "0", "daemon_name": "osd.0", "daemon_type": "osd", "events": ["2025-10-11T01:43:10.259980Z daemon:osd.0 [INFO] \"Deployed osd.0 on host 'compute-0'\""], "hostname": "compute-0", "is_active": false, "last_refresh": "2025-10-11T01:44:26.681615Z", "memory_request": 4294967296, "memory_usage": 66605547, "ports": [], "service_name": "osd.default_drive_group", "started": "2025-10-11T01:43:09.992369Z", "status": 1, "status_desc": "running", "systemd_unit": "ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da@osd.0", "version": "18.2.7"}, {"container_id": "19dc149c8af7", "container_image_digests": ["quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0", "quay.io/ceph/ceph@sha256:7d8bb82696d5d9cbeae2a2828dc12b6835aa2dded890fa3ac5a733cb66b72b1c"], "container_image_id": "0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1", "container_image_name": "quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0", "cpu_percentage": "4.67%", "created": "2025-10-11T01:43:17.246026Z", "daemon_id": "1", "daemon_name": "osd.1", "daemon_type": "osd", "events": ["2025-10-11T01:43:17.336738Z daemon:osd.1 [INFO] \"Deployed osd.1 on host 'compute-0'\""], "hostname": "compute-0", "is_active": false, "last_refresh": "2025-10-11T01:44:26.681742Z", "memory_request": 4294967296, "memory_usage": 68681728, "ports": [], "service_name": "osd.default_drive_group", "started": "2025-10-11T01:43:17.015517Z", "status": 1, "status_desc": "running", "systemd_unit": "ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da@osd.1", "version": "18.2.7"}, {"container_id": "828dce3fe0dd", "container_image_digests": ["quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0", "quay.io/ceph/ceph@sha256:7d8bb82696d5d9cbeae2a2828dc12b6835aa2dded890fa3ac5a733cb66b72b1c"], "container_image_id": "0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1", "container_image_name": "quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0", "cpu_percentage": "4.28%", "created": "2025-10-11T01:43:24.435951Z", "daemon_id": "2", "daemon_name": "osd.2", "daemon_type": "osd", "events": ["2025-10-11T01:43:24.503350Z daemon:osd.2 [INFO] \"Deployed osd.2 on host 'compute-0'\""], "hostname": "compute-0", "is_active": false, "last_refresh": "2025-10-11T01:44:26.681867Z", "memory_request": 4294967296, "memory_usage": 68660756, "ports": [], "service_name": "osd.default_drive_group", "started": "2025-10-11T01:43:24.246436Z", "status": 1, "status_desc": "running", "systemd_unit": "ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da@osd.2", "version": "18.2.7"}]
Oct 11 01:44:38 compute-0 systemd[1]: libpod-8c5ed1e125a0efba7092a624202eb1c59f0e640daafdc6211598190db3c6f9e7.scope: Deactivated successfully.
Oct 11 01:44:38 compute-0 conmon[218711]: conmon 8c5ed1e125a0efba7092 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-8c5ed1e125a0efba7092a624202eb1c59f0e640daafdc6211598190db3c6f9e7.scope/container/memory.events
Oct 11 01:44:38 compute-0 podman[218693]: 2025-10-11 01:44:38.370678845 +0000 UTC m=+0.976396880 container died 8c5ed1e125a0efba7092a624202eb1c59f0e640daafdc6211598190db3c6f9e7 (image=quay.io/ceph/ceph:v18, name=tender_dirac, ceph=True, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:44:38 compute-0 systemd[1]: Reloading.
Oct 11 01:44:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v116: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:38 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.5 scrub starts
Oct 11 01:44:38 compute-0 systemd-rc-local-generator[218831]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:44:38 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.5 scrub ok
Oct 11 01:44:38 compute-0 systemd-sysv-generator[218834]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:44:38 compute-0 systemd[1]: var-lib-containers-storage-overlay-9f0a161b88083a1e4c42677211324424b7c5637da6132d44369f1ad08b3767d9-merged.mount: Deactivated successfully.
Oct 11 01:44:38 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.1a scrub starts
Oct 11 01:44:38 compute-0 systemd[1]: Starting Ceph rgw.rgw.compute-0.fahafy for 3c7617c3-7a20-523e-a9de-20c0d6ba41da...
Oct 11 01:44:38 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.1a scrub ok
Oct 11 01:44:38 compute-0 podman[218693]: 2025-10-11 01:44:38.967188811 +0000 UTC m=+1.572906886 container remove 8c5ed1e125a0efba7092a624202eb1c59f0e640daafdc6211598190db3c6f9e7 (image=quay.io/ceph/ceph:v18, name=tender_dirac, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 01:44:38 compute-0 systemd[1]: libpod-conmon-8c5ed1e125a0efba7092a624202eb1c59f0e640daafdc6211598190db3c6f9e7.scope: Deactivated successfully.
Oct 11 01:44:39 compute-0 sudo[218665]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:39 compute-0 podman[218884]: 2025-10-11 01:44:39.391181881 +0000 UTC m=+0.091482949 container create 9991b6cd25db18c2b77683c6c5f05ed3220b856c3c8a06c46c33fbc1d5132d01 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-rgw-rgw-compute-0-fahafy, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2)
Oct 11 01:44:39 compute-0 podman[218884]: 2025-10-11 01:44:39.353510136 +0000 UTC m=+0.053811244 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:39 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4a61ca0fa837540961a0bade505b662e6f163e3dbbaf4fa8ba8cc6b71d3c38c9/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:39 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4a61ca0fa837540961a0bade505b662e6f163e3dbbaf4fa8ba8cc6b71d3c38c9/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:39 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4a61ca0fa837540961a0bade505b662e6f163e3dbbaf4fa8ba8cc6b71d3c38c9/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:39 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4a61ca0fa837540961a0bade505b662e6f163e3dbbaf4fa8ba8cc6b71d3c38c9/merged/var/lib/ceph/radosgw/ceph-rgw.rgw.compute-0.fahafy supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:39 compute-0 ceph-mon[191930]: from='client.14264 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""], "format": "json"}]: dispatch
Oct 11 01:44:39 compute-0 ceph-mon[191930]: pgmap v116: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:39 compute-0 ceph-mon[191930]: 2.1a scrub starts
Oct 11 01:44:39 compute-0 ceph-mon[191930]: 2.1a scrub ok
Oct 11 01:44:39 compute-0 podman[218884]: 2025-10-11 01:44:39.526280529 +0000 UTC m=+0.226581577 container init 9991b6cd25db18c2b77683c6c5f05ed3220b856c3c8a06c46c33fbc1d5132d01 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-rgw-rgw-compute-0-fahafy, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef)
Oct 11 01:44:39 compute-0 podman[218884]: 2025-10-11 01:44:39.548117366 +0000 UTC m=+0.248418394 container start 9991b6cd25db18c2b77683c6c5f05ed3220b856c3c8a06c46c33fbc1d5132d01 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-rgw-rgw-compute-0-fahafy, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:44:39 compute-0 bash[218884]: 9991b6cd25db18c2b77683c6c5f05ed3220b856c3c8a06c46c33fbc1d5132d01
Oct 11 01:44:39 compute-0 systemd[1]: Started Ceph rgw.rgw.compute-0.fahafy for 3c7617c3-7a20-523e-a9de-20c0d6ba41da.
Oct 11 01:44:39 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.7 scrub starts
Oct 11 01:44:39 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.7 scrub ok
Oct 11 01:44:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e44 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:44:39 compute-0 sudo[218592]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:39 compute-0 radosgw[218904]: deferred set uid:gid to 167:167 (ceph:ceph)
Oct 11 01:44:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:44:39 compute-0 radosgw[218904]: ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable), process radosgw, pid 2
Oct 11 01:44:39 compute-0 radosgw[218904]: framework: beast
Oct 11 01:44:39 compute-0 radosgw[218904]: framework conf key: endpoint, val: 192.168.122.100:8082
Oct 11 01:44:39 compute-0 radosgw[218904]: init_numa not setting numa affinity
Oct 11 01:44:39 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:44:39 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.rgw.rgw}] v 0) v1
Oct 11 01:44:39 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:39 compute-0 ceph-mgr[192233]: [progress INFO root] complete: finished ev 2d757a48-e673-4c54-9255-f96642d3fbe1 (Updating rgw.rgw deployment (+1 -> 1))
Oct 11 01:44:39 compute-0 ceph-mgr[192233]: [progress INFO root] Completed event 2d757a48-e673-4c54-9255-f96642d3fbe1 (Updating rgw.rgw deployment (+1 -> 1)) in 3 seconds
Oct 11 01:44:39 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.services.cephadmservice] Saving service rgw.rgw spec with placement compute-0
Oct 11 01:44:39 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Saving service rgw.rgw spec with placement compute-0
Oct 11 01:44:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.rgw.rgw}] v 0) v1
Oct 11 01:44:39 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.rgw.rgw}] v 0) v1
Oct 11 01:44:39 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:39 compute-0 ceph-mgr[192233]: [progress INFO root] update: starting ev 5b11ec4a-6274-444f-8285-7e7f90df30f4 (Updating mds.cephfs deployment (+1 -> 1))
Oct 11 01:44:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get-or-create", "entity": "mds.cephfs.compute-0.mxkspn", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]} v 0) v1
Oct 11 01:44:39 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get-or-create", "entity": "mds.cephfs.compute-0.mxkspn", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch
Oct 11 01:44:39 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "auth get-or-create", "entity": "mds.cephfs.compute-0.mxkspn", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished
Oct 11 01:44:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:44:39 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:44:39 compute-0 ceph-mgr[192233]: [cephadm INFO cephadm.serve] Deploying daemon mds.cephfs.compute-0.mxkspn on compute-0
Oct 11 01:44:39 compute-0 ceph-mgr[192233]: log_channel(cephadm) log [INF] : Deploying daemon mds.cephfs.compute-0.mxkspn on compute-0
Oct 11 01:44:39 compute-0 sudo[218966]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:39 compute-0 sudo[218966]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:39 compute-0 sudo[218966]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:39 compute-0 sudo[219013]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-grqctnyzakrqfhvyohkpupqkzdfwurdm ; /usr/bin/python3'
Oct 11 01:44:39 compute-0 sudo[219013]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:39 compute-0 sudo[219016]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:44:39 compute-0 sudo[219016]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:39 compute-0 sudo[219016]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:39 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.1e scrub starts
Oct 11 01:44:39 compute-0 python3[219017]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   -s -f json _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:40 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 2.1e scrub ok
Oct 11 01:44:40 compute-0 sudo[219042]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:40 compute-0 podman[219045]: 2025-10-11 01:44:40.103906697 +0000 UTC m=+0.082902255 container create f14bb7f8e07967dcc240538e975db6d8a78f56cc13128f32dbb1e1a805e67991 (image=quay.io/ceph/ceph:v18, name=dreamy_hofstadter, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, ceph=True)
Oct 11 01:44:40 compute-0 sudo[219042]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:40 compute-0 sudo[219042]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:40 compute-0 podman[219045]: 2025-10-11 01:44:40.070067195 +0000 UTC m=+0.049062713 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:40 compute-0 systemd[1]: Started libpod-conmon-f14bb7f8e07967dcc240538e975db6d8a78f56cc13128f32dbb1e1a805e67991.scope.
Oct 11 01:44:40 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:40 compute-0 sudo[219079]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 _orch deploy --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da
Oct 11 01:44:40 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b9ff5c6a1e425a5b90c684910b45241e97188e2bc489fd5ba0700ba366a66339/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:40 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b9ff5c6a1e425a5b90c684910b45241e97188e2bc489fd5ba0700ba366a66339/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:40 compute-0 sudo[219079]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:40 compute-0 podman[219045]: 2025-10-11 01:44:40.246978101 +0000 UTC m=+0.225973719 container init f14bb7f8e07967dcc240538e975db6d8a78f56cc13128f32dbb1e1a805e67991 (image=quay.io/ceph/ceph:v18, name=dreamy_hofstadter, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:44:40 compute-0 podman[219045]: 2025-10-11 01:44:40.265110878 +0000 UTC m=+0.244106406 container start f14bb7f8e07967dcc240538e975db6d8a78f56cc13128f32dbb1e1a805e67991 (image=quay.io/ceph/ceph:v18, name=dreamy_hofstadter, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:40 compute-0 podman[219045]: 2025-10-11 01:44:40.271280871 +0000 UTC m=+0.250276429 container attach f14bb7f8e07967dcc240538e975db6d8a78f56cc13128f32dbb1e1a805e67991 (image=quay.io/ceph/ceph:v18, name=dreamy_hofstadter, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, ceph=True, org.label-schema.license=GPLv2)
Oct 11 01:44:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v117: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:40 compute-0 ceph-mon[191930]: 6.5 scrub starts
Oct 11 01:44:40 compute-0 ceph-mon[191930]: 6.5 scrub ok
Oct 11 01:44:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:40 compute-0 ceph-mon[191930]: Saving service rgw.rgw spec with placement compute-0
Oct 11 01:44:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get-or-create", "entity": "mds.cephfs.compute-0.mxkspn", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch
Oct 11 01:44:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "auth get-or-create", "entity": "mds.cephfs.compute-0.mxkspn", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished
Oct 11 01:44:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:44:40 compute-0 ceph-mon[191930]: Deploying daemon mds.cephfs.compute-0.mxkspn on compute-0
Oct 11 01:44:40 compute-0 ceph-mon[191930]: 2.1e scrub starts
Oct 11 01:44:40 compute-0 ceph-mon[191930]: 2.1e scrub ok
Oct 11 01:44:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e44 do_prune osdmap full prune enabled
Oct 11 01:44:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e45 e45: 3 total, 3 up, 3 in
Oct 11 01:44:40 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e45: 3 total, 3 up, 3 in
Oct 11 01:44:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} v 0) v1
Oct 11 01:44:40 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/178430499' entity='client.rgw.rgw.compute-0.fahafy' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch
Oct 11 01:44:40 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 45 pg[8.0( empty local-lis/les=0/0 n=0 ec=45/45 lis/c=0/0 les/c/f=0/0/0 sis=45) [1] r=0 lpr=45 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:40 compute-0 podman[219168]: 2025-10-11 01:44:40.782890394 +0000 UTC m=+0.077774773 container create a47fd7838bf664e5496be2e889b5b4b1279507d55ddef227f086cac501ecdaf9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jovial_grothendieck, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:40 compute-0 systemd[1]: Started libpod-conmon-a47fd7838bf664e5496be2e889b5b4b1279507d55ddef227f086cac501ecdaf9.scope.
Oct 11 01:44:40 compute-0 podman[219168]: 2025-10-11 01:44:40.753006899 +0000 UTC m=+0.047891348 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:40 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status", "format": "json"} v 0) v1
Oct 11 01:44:40 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3630717625' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
Oct 11 01:44:40 compute-0 podman[219168]: 2025-10-11 01:44:40.933802571 +0000 UTC m=+0.228686990 container init a47fd7838bf664e5496be2e889b5b4b1279507d55ddef227f086cac501ecdaf9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jovial_grothendieck, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:40 compute-0 dreamy_hofstadter[219104]: 
Oct 11 01:44:40 compute-0 dreamy_hofstadter[219104]: {"fsid":"3c7617c3-7a20-523e-a9de-20c0d6ba41da","health":{"status":"HEALTH_ERR","checks":{"MDS_ALL_DOWN":{"severity":"HEALTH_ERR","summary":{"message":"1 filesystem is offline","count":1},"muted":false},"MDS_UP_LESS_THAN_MAX":{"severity":"HEALTH_WARN","summary":{"message":"1 filesystem is online with fewer MDS than max_mds","count":1},"muted":false}},"mutes":[]},"election_epoch":5,"quorum":[0],"quorum_names":["compute-0"],"quorum_age":211,"monmap":{"epoch":1,"min_mon_release_name":"reef","num_mons":1},"osdmap":{"epoch":45,"num_osds":3,"num_up_osds":3,"osd_up_since":1760147012,"num_in_osds":3,"osd_in_since":1760146975,"num_remapped_pgs":0},"pgmap":{"pgs_by_state":[{"state_name":"active+clean","count":193}],"num_pgs":193,"num_pools":7,"num_objects":2,"data_bytes":459280,"bytes_used":84217856,"bytes_avail":64327708672,"bytes_total":64411926528},"fsmap":{"epoch":2,"id":1,"up":0,"in":0,"max":1,"by_rank":[],"up:standby":0},"mgrmap":{"available":true,"num_standbys":0,"modules":["cephadm","iostat","nfs","restful"],"services":{}},"servicemap":{"epoch":5,"modified":"2025-10-11T01:44:34.405543+0000","services":{}},"progress_events":{"5b11ec4a-6274-444f-8285-7e7f90df30f4":{"message":"Updating mds.cephfs deployment (+1 -> 1) (0s)\n      [............................] ","progress":0,"add_to_ceph_s":true}}}
Oct 11 01:44:40 compute-0 podman[219168]: 2025-10-11 01:44:40.951144274 +0000 UTC m=+0.246028693 container start a47fd7838bf664e5496be2e889b5b4b1279507d55ddef227f086cac501ecdaf9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jovial_grothendieck, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 01:44:40 compute-0 podman[219168]: 2025-10-11 01:44:40.958061169 +0000 UTC m=+0.252945578 container attach a47fd7838bf664e5496be2e889b5b4b1279507d55ddef227f086cac501ecdaf9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jovial_grothendieck, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:44:40 compute-0 jovial_grothendieck[219184]: 167 167
Oct 11 01:44:40 compute-0 systemd[1]: libpod-a47fd7838bf664e5496be2e889b5b4b1279507d55ddef227f086cac501ecdaf9.scope: Deactivated successfully.
Oct 11 01:44:40 compute-0 podman[219168]: 2025-10-11 01:44:40.964145039 +0000 UTC m=+0.259029448 container died a47fd7838bf664e5496be2e889b5b4b1279507d55ddef227f086cac501ecdaf9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jovial_grothendieck, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.build-date=20250507, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:40 compute-0 systemd[1]: libpod-f14bb7f8e07967dcc240538e975db6d8a78f56cc13128f32dbb1e1a805e67991.scope: Deactivated successfully.
Oct 11 01:44:40 compute-0 podman[219045]: 2025-10-11 01:44:40.986641834 +0000 UTC m=+0.965637362 container died f14bb7f8e07967dcc240538e975db6d8a78f56cc13128f32dbb1e1a805e67991 (image=quay.io/ceph/ceph:v18, name=dreamy_hofstadter, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True)
Oct 11 01:44:41 compute-0 systemd[1]: var-lib-containers-storage-overlay-b9ff5c6a1e425a5b90c684910b45241e97188e2bc489fd5ba0700ba366a66339-merged.mount: Deactivated successfully.
Oct 11 01:44:41 compute-0 systemd[1]: var-lib-containers-storage-overlay-bca0bacd24e71551a65b21c14089ba4da89fbb182fadc1716258682d07c66d24-merged.mount: Deactivated successfully.
Oct 11 01:44:41 compute-0 podman[219045]: 2025-10-11 01:44:41.064729096 +0000 UTC m=+1.043724614 container remove f14bb7f8e07967dcc240538e975db6d8a78f56cc13128f32dbb1e1a805e67991 (image=quay.io/ceph/ceph:v18, name=dreamy_hofstadter, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:44:41 compute-0 podman[219168]: 2025-10-11 01:44:41.088447168 +0000 UTC m=+0.383331547 container remove a47fd7838bf664e5496be2e889b5b4b1279507d55ddef227f086cac501ecdaf9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jovial_grothendieck, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:44:41 compute-0 sudo[219013]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:41 compute-0 systemd[1]: libpod-conmon-f14bb7f8e07967dcc240538e975db6d8a78f56cc13128f32dbb1e1a805e67991.scope: Deactivated successfully.
Oct 11 01:44:41 compute-0 systemd[1]: libpod-conmon-a47fd7838bf664e5496be2e889b5b4b1279507d55ddef227f086cac501ecdaf9.scope: Deactivated successfully.
Oct 11 01:44:41 compute-0 systemd[1]: Reloading.
Oct 11 01:44:41 compute-0 podman[219210]: 2025-10-11 01:44:41.182294886 +0000 UTC m=+0.118936492 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., vcs-type=git, version=9.4, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_id=edpm, release=1214.1726694543, build-date=2024-09-18T21:23:30, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, architecture=x86_64, name=ubi9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.openshift.tags=base rhel9, release-0.7.12=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, com.redhat.component=ubi9-container, container_name=kepler, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.k8s.display-name=Red Hat Universal Base Image 9, distribution-scope=public, summary=Provides the latest release of Red Hat Universal Base Image 9., io.buildah.version=1.29.0, io.openshift.expose-services=, vendor=Red Hat, Inc., managed_by=edpm_ansible)
Oct 11 01:44:41 compute-0 podman[219205]: 2025-10-11 01:44:41.189677894 +0000 UTC m=+0.132348948 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 01:44:41 compute-0 podman[219225]: 2025-10-11 01:44:41.233963725 +0000 UTC m=+0.130689269 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_managed=true, config_id=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team)
Oct 11 01:44:41 compute-0 systemd-rc-local-generator[219307]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:44:41 compute-0 systemd-sysv-generator[219310]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:44:41 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.1c scrub starts
Oct 11 01:44:41 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 3.1c scrub ok
Oct 11 01:44:41 compute-0 ceph-mon[191930]: 6.7 scrub starts
Oct 11 01:44:41 compute-0 ceph-mon[191930]: 6.7 scrub ok
Oct 11 01:44:41 compute-0 ceph-mon[191930]: pgmap v117: 193 pgs: 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:41 compute-0 ceph-mon[191930]: osdmap e45: 3 total, 3 up, 3 in
Oct 11 01:44:41 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/178430499' entity='client.rgw.rgw.compute-0.fahafy' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch
Oct 11 01:44:41 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3630717625' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
Oct 11 01:44:41 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.9 scrub starts
Oct 11 01:44:41 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.9 scrub ok
Oct 11 01:44:41 compute-0 ceph-mgr[192233]: [progress INFO root] Writing back 11 completed events
Oct 11 01:44:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/progress/completed}] v 0) v1
Oct 11 01:44:41 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:41 compute-0 ceph-mgr[192233]: [progress WARNING root] Starting Global Recovery Event,1 pgs not in active + clean state
Oct 11 01:44:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e45 do_prune osdmap full prune enabled
Oct 11 01:44:41 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/178430499' entity='client.rgw.rgw.compute-0.fahafy' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished
Oct 11 01:44:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e46 e46: 3 total, 3 up, 3 in
Oct 11 01:44:41 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e46: 3 total, 3 up, 3 in
Oct 11 01:44:41 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 46 pg[8.0( empty local-lis/les=45/46 n=0 ec=45/45 lis/c=0/0 les/c/f=0/0/0 sis=45) [1] r=0 lpr=45 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:41 compute-0 systemd[1]: Reloading.
Oct 11 01:44:41 compute-0 systemd-sysv-generator[219363]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:44:41 compute-0 systemd-rc-local-generator[219357]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:44:42 compute-0 sudo[219380]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-plyobtquljanrieypqgxjqwzydtycsgv ; /usr/bin/python3'
Oct 11 01:44:42 compute-0 systemd[1]: Starting Ceph mds.cephfs.compute-0.mxkspn for 3c7617c3-7a20-523e-a9de-20c0d6ba41da...
Oct 11 01:44:42 compute-0 sudo[219380]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:42 compute-0 python3[219388]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   config dump -f json _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v120: 194 pgs: 1 unknown, 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:42 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.a scrub starts
Oct 11 01:44:42 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.a scrub ok
Oct 11 01:44:42 compute-0 podman[219409]: 2025-10-11 01:44:42.544605717 +0000 UTC m=+0.101645968 container create 240da6ee53204ace6bb9a7df7f9f687c1fc111b68be5eeec967267ede720fc65 (image=quay.io/ceph/ceph:v18, name=frosty_clarke, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2)
Oct 11 01:44:42 compute-0 ceph-mon[191930]: 3.1c scrub starts
Oct 11 01:44:42 compute-0 ceph-mon[191930]: 3.1c scrub ok
Oct 11 01:44:42 compute-0 ceph-mon[191930]: 6.9 scrub starts
Oct 11 01:44:42 compute-0 ceph-mon[191930]: 6.9 scrub ok
Oct 11 01:44:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:42 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/178430499' entity='client.rgw.rgw.compute-0.fahafy' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished
Oct 11 01:44:42 compute-0 ceph-mon[191930]: osdmap e46: 3 total, 3 up, 3 in
Oct 11 01:44:42 compute-0 ceph-mon[191930]: 6.a scrub starts
Oct 11 01:44:42 compute-0 ceph-mon[191930]: 6.a scrub ok
Oct 11 01:44:42 compute-0 podman[219409]: 2025-10-11 01:44:42.51227328 +0000 UTC m=+0.069313571 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:42 compute-0 systemd[1]: Started libpod-conmon-240da6ee53204ace6bb9a7df7f9f687c1fc111b68be5eeec967267ede720fc65.scope.
Oct 11 01:44:42 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/488bc478dc264bde42067352b8e9245b805a21b563bae456e70796ef9a8177de/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/488bc478dc264bde42067352b8e9245b805a21b563bae456e70796ef9a8177de/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:42 compute-0 podman[219447]: 2025-10-11 01:44:42.669836903 +0000 UTC m=+0.085489804 container create 55d9a30272fa11c4b8e3677e4cd0b399b896ea08d7de31bb83a17aec5be7d9b2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mds-cephfs-compute-0-mxkspn, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e46 do_prune osdmap full prune enabled
Oct 11 01:44:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e47 e47: 3 total, 3 up, 3 in
Oct 11 01:44:42 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e47: 3 total, 3 up, 3 in
Oct 11 01:44:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} v 0) v1
Oct 11 01:44:42 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/178430499' entity='client.rgw.rgw.compute-0.fahafy' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch
Oct 11 01:44:42 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 47 pg[9.0( empty local-lis/les=0/0 n=0 ec=47/47 lis/c=0/0 les/c/f=0/0/0 sis=47) [1] r=0 lpr=47 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:42 compute-0 podman[219409]: 2025-10-11 01:44:42.728716068 +0000 UTC m=+0.285756349 container init 240da6ee53204ace6bb9a7df7f9f687c1fc111b68be5eeec967267ede720fc65 (image=quay.io/ceph/ceph:v18, name=frosty_clarke, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:42 compute-0 podman[219447]: 2025-10-11 01:44:42.637955997 +0000 UTC m=+0.053608908 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:42 compute-0 podman[219409]: 2025-10-11 01:44:42.742832612 +0000 UTC m=+0.299872863 container start 240da6ee53204ace6bb9a7df7f9f687c1fc111b68be5eeec967267ede720fc65 (image=quay.io/ceph/ceph:v18, name=frosty_clarke, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0)
Oct 11 01:44:42 compute-0 podman[219409]: 2025-10-11 01:44:42.74777498 +0000 UTC m=+0.304815231 container attach 240da6ee53204ace6bb9a7df7f9f687c1fc111b68be5eeec967267ede720fc65 (image=quay.io/ceph/ceph:v18, name=frosty_clarke, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8b84e96124f394e5f8f6bf56a5fbd0ea499a28acb836a8fd16effdfe946d9e52/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8b84e96124f394e5f8f6bf56a5fbd0ea499a28acb836a8fd16effdfe946d9e52/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8b84e96124f394e5f8f6bf56a5fbd0ea499a28acb836a8fd16effdfe946d9e52/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8b84e96124f394e5f8f6bf56a5fbd0ea499a28acb836a8fd16effdfe946d9e52/merged/var/lib/ceph/mds/ceph-cephfs.compute-0.mxkspn supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:42 compute-0 podman[219447]: 2025-10-11 01:44:42.782811715 +0000 UTC m=+0.198464646 container init 55d9a30272fa11c4b8e3677e4cd0b399b896ea08d7de31bb83a17aec5be7d9b2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mds-cephfs-compute-0-mxkspn, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True)
Oct 11 01:44:42 compute-0 podman[219447]: 2025-10-11 01:44:42.801404747 +0000 UTC m=+0.217057638 container start 55d9a30272fa11c4b8e3677e4cd0b399b896ea08d7de31bb83a17aec5be7d9b2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mds-cephfs-compute-0-mxkspn, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0)
Oct 11 01:44:42 compute-0 bash[219447]: 55d9a30272fa11c4b8e3677e4cd0b399b896ea08d7de31bb83a17aec5be7d9b2
Oct 11 01:44:42 compute-0 systemd[1]: Started Ceph mds.cephfs.compute-0.mxkspn for 3c7617c3-7a20-523e-a9de-20c0d6ba41da.
Oct 11 01:44:42 compute-0 ceph-mds[219472]: set uid:gid to 167:167 (ceph:ceph)
Oct 11 01:44:42 compute-0 ceph-mds[219472]: ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable), process ceph-mds, pid 2
Oct 11 01:44:42 compute-0 ceph-mds[219472]: main not setting numa affinity
Oct 11 01:44:42 compute-0 ceph-mds[219472]: pidfile_write: ignore empty --pid-file
Oct 11 01:44:42 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mds-cephfs-compute-0-mxkspn[219467]: starting mds.cephfs.compute-0.mxkspn at 
Oct 11 01:44:42 compute-0 sudo[219079]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:44:42 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:42 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn Updating MDS map to version 2 from mon.0
Oct 11 01:44:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:44:42 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.mds.cephfs}] v 0) v1
Oct 11 01:44:42 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:42 compute-0 ceph-mgr[192233]: [progress INFO root] complete: finished ev 5b11ec4a-6274-444f-8285-7e7f90df30f4 (Updating mds.cephfs deployment (+1 -> 1))
Oct 11 01:44:42 compute-0 ceph-mgr[192233]: [progress INFO root] Completed event 5b11ec4a-6274-444f-8285-7e7f90df30f4 (Updating mds.cephfs deployment (+1 -> 1)) in 3 seconds
Oct 11 01:44:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config set, name=mds_join_fs}] v 0) v1
Oct 11 01:44:42 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/spec.mds.cephfs}] v 0) v1
Oct 11 01:44:42 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:42 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.6 deep-scrub starts
Oct 11 01:44:42 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.6 deep-scrub ok
Oct 11 01:44:43 compute-0 sudo[219491]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:43 compute-0 sudo[219491]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:43 compute-0 sudo[219491]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:43 compute-0 sudo[219516]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:44:43 compute-0 sudo[219516]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:43 compute-0 sudo[219516]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:43 compute-0 sudo[219560]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:43 compute-0 sudo[219560]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:43 compute-0 sudo[219560]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config dump", "format": "json"} v 0) v1
Oct 11 01:44:43 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3749887511' entity='client.admin' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch
Oct 11 01:44:43 compute-0 frosty_clarke[219457]: 
Oct 11 01:44:43 compute-0 systemd[1]: libpod-240da6ee53204ace6bb9a7df7f9f687c1fc111b68be5eeec967267ede720fc65.scope: Deactivated successfully.
Oct 11 01:44:43 compute-0 frosty_clarke[219457]: [{"section":"global","name":"cluster_network","value":"172.20.0.0/24","level":"advanced","can_update_at_runtime":false,"mask":""},{"section":"global","name":"container_image","value":"quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0","level":"basic","can_update_at_runtime":false,"mask":""},{"section":"global","name":"log_to_file","value":"true","level":"basic","can_update_at_runtime":true,"mask":""},{"section":"global","name":"mon_cluster_log_to_file","value":"true","level":"advanced","can_update_at_runtime":true,"mask":""},{"section":"global","name":"ms_bind_ipv4","value":"true","level":"advanced","can_update_at_runtime":true,"mask":""},{"section":"global","name":"ms_bind_ipv6","value":"false","level":"advanced","can_update_at_runtime":true,"mask":""},{"section":"global","name":"osd_pool_default_size","value":"1","level":"advanced","can_update_at_runtime":true,"mask":""},{"section":"global","name":"public_network","value":"192.168.122.0/24","level":"advanced","can_update_at_runtime":false,"mask":""},{"section":"global","name":"rgw_keystone_accepted_admin_roles","value":"ResellerAdmin, swiftoperator","level":"advanced","can_update_at_runtime":false,"mask":""},{"section":"global","name":"rgw_keystone_accepted_roles","value":"member, Member, admin","level":"advanced","can_update_at_runtime":false,"mask":""},{"section":"global","name":"rgw_keystone_admin_domain","value":"default","level":"advanced","can_update_at_runtime":false,"mask":""},{"section":"global","name":"rgw_keystone_admin_password","value":"12345678","level":"advanced","can_update_at_runtime":false,"mask":""},{"section":"global","name":"rgw_keystone_admin_project","value":"service","level":"advanced","can_update_at_runtime":false,"mask":""},{"section":"global","name":"rgw_keystone_admin_user","value":"swift","level":"advanced","can_update_at_runtime":false,"mask":""},{"section":"global","name":"rgw_keystone_api_version","value":"3","level":"advanced","can_update_at_runtime":true,"mask":""},{"section":"global","name":"rgw_keystone_implicit_tenants","value":"true","level":"advanced","can_update_at_runtime":false,"mask":""},{"section":"global","name":"rgw_keystone_url","value":"https://keystone-internal.openstack.svc:5000","level":"basic","can_update_at_runtime":false,"mask":""},{"section":"global","name":"rgw_keystone_verify_ssl","value":"false","level":"advanced","can_update_at_runtime":true,"mask":""},{"section":"global","name":"rgw_max_attr_name_len","value":"128","level":"advanced","can_update_at_runtime":true,"mask":""},{"section":"global","name":"rgw_max_attr_size","value":"1024","level":"advanced","can_update_at_runtime":true,"mask":""},{"section":"global","name":"rgw_max_attrs_num_in_req","value":"90","level":"advanced","can_update_at_runtime":true,"mask":""},{"section":"global","name":"rgw_s3_auth_use_keystone","value":"true","level":"advanced","can_update_at_runtime":true,"mask":""},{"section":"global","name":"rgw_swift_account_in_url","value":"true","level":"advanced","can_update_at_runtime":true,"mask":""},{"section":"global","name":"rgw_swift_enforce_content_length","value":"true","level":"advanced","can_update_at_runtime":true,"mask":""},{"section":"global","name":"rgw_swift_versioning_enabled","value":"true","level":"advanced","can_update_at_runtime":true,"mask":""},{"section":"global","name":"rgw_trust_forwarded_https","value":"true","level":"advanced","can_update_at_runtime":true,"mask":""},{"section":"mon","name":"auth_allow_insecure_global_id_reclaim","value":"false","level":"advanced","can_update_at_runtime":true,"mask":""},{"section":"mon","name":"mon_warn_on_pool_no_redundancy","value":"false","level":"advanced","can_update_at_runtime":true,"mask":""},{"section":"mgr","name":"mgr/cephadm/container_init","value":"True","level":"advanced","can_update_at_runtime":false,"mask":""},{"section":"mgr","name":"mgr/cephadm/migration_current","value":"6","level":"advanced","can_update_at_runtime":false,"mask":""},{"section":"mgr","name":"mgr/cephadm/use_repo_digest","value":"false","level":"advanced","can_update_at_runtime":false,"mask":""},{"section":"mgr","name":"mgr/orchestrator/orchestrator","value":"cephadm","level":"advanced","can_update_at_runtime":true,"mask":""},{"section":"mgr","name":"mgr_standby_modules","value":"false","level":"advanced","can_update_at_runtime":true,"mask":""},{"section":"osd","name":"osd_memory_target_autotune","value":"true","level":"advanced","can_update_at_runtime":true,"mask":""},{"section":"mds.cephfs","name":"mds_join_fs","value":"cephfs","level":"basic","can_update_at_runtime":true,"mask":""},{"section":"client.rgw.rgw.compute-0.fahafy","name":"rgw_frontends","value":"beast endpoint=192.168.122.100:8082","level":"basic","can_update_at_runtime":false,"mask":""}]
Oct 11 01:44:43 compute-0 podman[219409]: 2025-10-11 01:44:43.402453089 +0000 UTC m=+0.959493360 container died 240da6ee53204ace6bb9a7df7f9f687c1fc111b68be5eeec967267ede720fc65 (image=quay.io/ceph/ceph:v18, name=frosty_clarke, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:43 compute-0 sudo[219585]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:44:43 compute-0 sudo[219585]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:43 compute-0 systemd[1]: var-lib-containers-storage-overlay-488bc478dc264bde42067352b8e9245b805a21b563bae456e70796ef9a8177de-merged.mount: Deactivated successfully.
Oct 11 01:44:43 compute-0 sudo[219585]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:43 compute-0 podman[219409]: 2025-10-11 01:44:43.503340687 +0000 UTC m=+1.060380938 container remove 240da6ee53204ace6bb9a7df7f9f687c1fc111b68be5eeec967267ede720fc65 (image=quay.io/ceph/ceph:v18, name=frosty_clarke, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:44:43 compute-0 systemd[1]: libpod-conmon-240da6ee53204ace6bb9a7df7f9f687c1fc111b68be5eeec967267ede720fc65.scope: Deactivated successfully.
Oct 11 01:44:43 compute-0 sudo[219380]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:43 compute-0 sudo[219622]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:43 compute-0 sudo[219622]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:43 compute-0 sudo[219622]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:43 compute-0 sudo[219648]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ls
Oct 11 01:44:43 compute-0 sudo[219648]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e47 do_prune osdmap full prune enabled
Oct 11 01:44:43 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/178430499' entity='client.rgw.rgw.compute-0.fahafy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished
Oct 11 01:44:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e48 e48: 3 total, 3 up, 3 in
Oct 11 01:44:43 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e48: 3 total, 3 up, 3 in
Oct 11 01:44:43 compute-0 ceph-mon[191930]: pgmap v120: 194 pgs: 1 unknown, 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:43 compute-0 ceph-mon[191930]: osdmap e47: 3 total, 3 up, 3 in
Oct 11 01:44:43 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/178430499' entity='client.rgw.rgw.compute-0.fahafy' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch
Oct 11 01:44:43 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:43 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:43 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:43 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:43 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:43 compute-0 ceph-mon[191930]: 5.6 deep-scrub starts
Oct 11 01:44:43 compute-0 ceph-mon[191930]: 5.6 deep-scrub ok
Oct 11 01:44:43 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3749887511' entity='client.admin' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch
Oct 11 01:44:43 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 48 pg[9.0( empty local-lis/les=47/48 n=0 ec=47/47 lis/c=0/0 les/c/f=0/0/0 sis=47) [1] r=0 lpr=47 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).mds e3 new map
Oct 11 01:44:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).mds e3 print_map
                                            e3
                                            enable_multiple, ever_enabled_multiple: 1,1
                                            default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2}
                                            legacy client fscid: 1
                                             
                                            Filesystem 'cephfs' (1)
                                            fs_name        cephfs
                                            epoch        2
                                            flags        12 joinable allow_snaps allow_multimds_snaps
                                            created        2025-10-11T01:44:18.031797+0000
                                            modified        2025-10-11T01:44:18.031855+0000
                                            tableserver        0
                                            root        0
                                            session_timeout        60
                                            session_autoclose        300
                                            max_file_size        1099511627776
                                            max_xattr_size        65536
                                            required_client_features        {}
                                            last_failure        0
                                            last_failure_osd_epoch        0
                                            compat        compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,7=mds uses inline data,8=no anchor table,9=file layout v2,10=snaprealm v2}
                                            max_mds        1
                                            in        
                                            up        {}
                                            failed        
                                            damaged        
                                            stopped        
                                            data_pools        [7]
                                            metadata_pool        6
                                            inline_data        disabled
                                            balancer        
                                            bal_rank_mask        -1
                                            standby_count_wanted        0
                                             
                                             
                                            Standby daemons:
                                             
                                            [mds.cephfs.compute-0.mxkspn{-1:14271} state up:standby seq 1 addr [v2:192.168.122.100:6814/3339055507,v1:192.168.122.100:6815/3339055507] compat {c=[1],r=[1],i=[7ff]}]
Oct 11 01:44:43 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn Updating MDS map to version 3 from mon.0
Oct 11 01:44:43 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn Monitors have assigned me to become a standby.
Oct 11 01:44:43 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : mds.? [v2:192.168.122.100:6814/3339055507,v1:192.168.122.100:6815/3339055507] up:boot
Oct 11 01:44:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).mds e3 assigned standby [v2:192.168.122.100:6814/3339055507,v1:192.168.122.100:6815/3339055507] as mds.0
Oct 11 01:44:43 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : daemon mds.cephfs.compute-0.mxkspn assigned to filesystem cephfs as rank 0 (now has 1 ranks)
Oct 11 01:44:43 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : Health check cleared: MDS_ALL_DOWN (was: 1 filesystem is offline)
Oct 11 01:44:43 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : Health check cleared: MDS_UP_LESS_THAN_MAX (was: 1 filesystem is online with fewer MDS than max_mds)
Oct 11 01:44:43 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : Cluster is now healthy
Oct 11 01:44:43 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : fsmap cephfs:0 1 up:standby
Oct 11 01:44:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mds metadata", "who": "cephfs.compute-0.mxkspn"} v 0) v1
Oct 11 01:44:43 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mds metadata", "who": "cephfs.compute-0.mxkspn"}]: dispatch
Oct 11 01:44:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).mds e3 all = 0
Oct 11 01:44:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).mds e4 new map
Oct 11 01:44:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).mds e4 print_map
                                            e4
                                            enable_multiple, ever_enabled_multiple: 1,1
                                            default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2}
                                            legacy client fscid: 1
                                             
                                            Filesystem 'cephfs' (1)
                                            fs_name        cephfs
                                            epoch        4
                                            flags        12 joinable allow_snaps allow_multimds_snaps
                                            created        2025-10-11T01:44:18.031797+0000
                                            modified        2025-10-11T01:44:43.896533+0000
                                            tableserver        0
                                            root        0
                                            session_timeout        60
                                            session_autoclose        300
                                            max_file_size        1099511627776
                                            max_xattr_size        65536
                                            required_client_features        {}
                                            last_failure        0
                                            last_failure_osd_epoch        0
                                            compat        compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,7=mds uses inline data,8=no anchor table,9=file layout v2,10=snaprealm v2}
                                            max_mds        1
                                            in        0
                                            up        {0=14271}
                                            failed        
                                            damaged        
                                            stopped        
                                            data_pools        [7]
                                            metadata_pool        6
                                            inline_data        disabled
                                            balancer        
                                            bal_rank_mask        -1
                                            standby_count_wanted        0
                                            [mds.cephfs.compute-0.mxkspn{0:14271} state up:creating seq 1 addr [v2:192.168.122.100:6814/3339055507,v1:192.168.122.100:6815/3339055507] compat {c=[1],r=[1],i=[7ff]}]
                                             
                                             
Oct 11 01:44:43 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : fsmap cephfs:1 {0=cephfs.compute-0.mxkspn=up:creating}
Oct 11 01:44:43 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn Updating MDS map to version 4 from mon.0
Oct 11 01:44:43 compute-0 ceph-mds[219472]: mds.0.4 handle_mds_map i am now mds.0.4
Oct 11 01:44:43 compute-0 ceph-mds[219472]: mds.0.4 handle_mds_map state change up:standby --> up:creating
Oct 11 01:44:43 compute-0 ceph-mds[219472]: mds.0.cache creating system inode with ino:0x1
Oct 11 01:44:43 compute-0 ceph-mds[219472]: mds.0.cache creating system inode with ino:0x100
Oct 11 01:44:43 compute-0 ceph-mds[219472]: mds.0.cache creating system inode with ino:0x600
Oct 11 01:44:43 compute-0 ceph-mds[219472]: mds.0.cache creating system inode with ino:0x601
Oct 11 01:44:43 compute-0 ceph-mds[219472]: mds.0.cache creating system inode with ino:0x602
Oct 11 01:44:43 compute-0 ceph-mds[219472]: mds.0.cache creating system inode with ino:0x603
Oct 11 01:44:43 compute-0 ceph-mds[219472]: mds.0.cache creating system inode with ino:0x604
Oct 11 01:44:43 compute-0 ceph-mds[219472]: mds.0.cache creating system inode with ino:0x605
Oct 11 01:44:43 compute-0 ceph-mds[219472]: mds.0.cache creating system inode with ino:0x606
Oct 11 01:44:43 compute-0 ceph-mds[219472]: mds.0.cache creating system inode with ino:0x607
Oct 11 01:44:43 compute-0 ceph-mds[219472]: mds.0.cache creating system inode with ino:0x608
Oct 11 01:44:43 compute-0 ceph-mds[219472]: mds.0.cache creating system inode with ino:0x609
Oct 11 01:44:43 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.8 scrub starts
Oct 11 01:44:43 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.8 scrub ok
Oct 11 01:44:43 compute-0 ceph-mds[219472]: mds.0.4 creating_done
Oct 11 01:44:43 compute-0 ceph-mon[191930]: log_channel(cluster) log [INF] : daemon mds.cephfs.compute-0.mxkspn is now active in filesystem cephfs as rank 0
Oct 11 01:44:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v123: 195 pgs: 2 unknown, 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:44 compute-0 sudo[219773]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xqjfrbecpcwfewmyxokjexzfkllfjcmt ; /usr/bin/python3'
Oct 11 01:44:44 compute-0 sudo[219773]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:44 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.10 scrub starts
Oct 11 01:44:44 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.10 scrub ok
Oct 11 01:44:44 compute-0 podman[219778]: 2025-10-11 01:44:44.578699906 +0000 UTC m=+0.133805404 container exec ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:44:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e48 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:44:44 compute-0 python3[219780]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   osd get-require-min-compat-client _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:44 compute-0 podman[219778]: 2025-10-11 01:44:44.705348372 +0000 UTC m=+0.260453870 container exec_died ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e48 do_prune osdmap full prune enabled
Oct 11 01:44:44 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/178430499' entity='client.rgw.rgw.compute-0.fahafy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished
Oct 11 01:44:44 compute-0 ceph-mon[191930]: osdmap e48: 3 total, 3 up, 3 in
Oct 11 01:44:44 compute-0 ceph-mon[191930]: mds.? [v2:192.168.122.100:6814/3339055507,v1:192.168.122.100:6815/3339055507] up:boot
Oct 11 01:44:44 compute-0 ceph-mon[191930]: daemon mds.cephfs.compute-0.mxkspn assigned to filesystem cephfs as rank 0 (now has 1 ranks)
Oct 11 01:44:44 compute-0 ceph-mon[191930]: Health check cleared: MDS_ALL_DOWN (was: 1 filesystem is offline)
Oct 11 01:44:44 compute-0 ceph-mon[191930]: Health check cleared: MDS_UP_LESS_THAN_MAX (was: 1 filesystem is online with fewer MDS than max_mds)
Oct 11 01:44:44 compute-0 ceph-mon[191930]: Cluster is now healthy
Oct 11 01:44:44 compute-0 ceph-mon[191930]: fsmap cephfs:0 1 up:standby
Oct 11 01:44:44 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "mds metadata", "who": "cephfs.compute-0.mxkspn"}]: dispatch
Oct 11 01:44:44 compute-0 ceph-mon[191930]: fsmap cephfs:1 {0=cephfs.compute-0.mxkspn=up:creating}
Oct 11 01:44:44 compute-0 ceph-mon[191930]: 5.8 scrub starts
Oct 11 01:44:44 compute-0 ceph-mon[191930]: 5.8 scrub ok
Oct 11 01:44:44 compute-0 ceph-mon[191930]: daemon mds.cephfs.compute-0.mxkspn is now active in filesystem cephfs as rank 0
Oct 11 01:44:44 compute-0 ceph-mon[191930]: 6.10 scrub starts
Oct 11 01:44:44 compute-0 ceph-mon[191930]: 6.10 scrub ok
Oct 11 01:44:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e49 e49: 3 total, 3 up, 3 in
Oct 11 01:44:44 compute-0 podman[219797]: 2025-10-11 01:44:44.744391895 +0000 UTC m=+0.109666624 container create 22504f4a47966eeaa4fb37445fa280a8b4fe68f6b8888d2f246499527eddac55 (image=quay.io/ceph/ceph:v18, name=wizardly_hamilton, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:44 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e49: 3 total, 3 up, 3 in
Oct 11 01:44:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} v 0) v1
Oct 11 01:44:44 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/178430499' entity='client.rgw.rgw.compute-0.fahafy' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch
Oct 11 01:44:44 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 49 pg[10.0( empty local-lis/les=0/0 n=0 ec=49/49 lis/c=0/0 les/c/f=0/0/0 sis=49) [2] r=0 lpr=49 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:44 compute-0 podman[219797]: 2025-10-11 01:44:44.717287347 +0000 UTC m=+0.082562076 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:44 compute-0 systemd[1]: Started libpod-conmon-22504f4a47966eeaa4fb37445fa280a8b4fe68f6b8888d2f246499527eddac55.scope.
Oct 11 01:44:44 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/554c5e6211490757796de29516756353eb917aeceae1c2376f30c0ef60e99733/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/554c5e6211490757796de29516756353eb917aeceae1c2376f30c0ef60e99733/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:44 compute-0 podman[219797]: 2025-10-11 01:44:44.892791712 +0000 UTC m=+0.258066461 container init 22504f4a47966eeaa4fb37445fa280a8b4fe68f6b8888d2f246499527eddac55 (image=quay.io/ceph/ceph:v18, name=wizardly_hamilton, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:44:44 compute-0 podman[219797]: 2025-10-11 01:44:44.904160827 +0000 UTC m=+0.269435556 container start 22504f4a47966eeaa4fb37445fa280a8b4fe68f6b8888d2f246499527eddac55 (image=quay.io/ceph/ceph:v18, name=wizardly_hamilton, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_REF=reef)
Oct 11 01:44:44 compute-0 podman[219797]: 2025-10-11 01:44:44.910115134 +0000 UTC m=+0.275389883 container attach 22504f4a47966eeaa4fb37445fa280a8b4fe68f6b8888d2f246499527eddac55 (image=quay.io/ceph/ceph:v18, name=wizardly_hamilton, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).mds e5 new map
Oct 11 01:44:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).mds e5 print_map
                                            e5
                                            enable_multiple, ever_enabled_multiple: 1,1
                                            default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2}
                                            legacy client fscid: 1
                                             
                                            Filesystem 'cephfs' (1)
                                            fs_name        cephfs
                                            epoch        5
                                            flags        12 joinable allow_snaps allow_multimds_snaps
                                            created        2025-10-11T01:44:18.031797+0000
                                            modified        2025-10-11T01:44:44.904492+0000
                                            tableserver        0
                                            root        0
                                            session_timeout        60
                                            session_autoclose        300
                                            max_file_size        1099511627776
                                            max_xattr_size        65536
                                            required_client_features        {}
                                            last_failure        0
                                            last_failure_osd_epoch        0
                                            compat        compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,7=mds uses inline data,8=no anchor table,9=file layout v2,10=snaprealm v2}
                                            max_mds        1
                                            in        0
                                            up        {0=14271}
                                            failed        
                                            damaged        
                                            stopped        
                                            data_pools        [7]
                                            metadata_pool        6
                                            inline_data        disabled
                                            balancer        
                                            bal_rank_mask        -1
                                            standby_count_wanted        0
                                            [mds.cephfs.compute-0.mxkspn{0:14271} state up:active seq 2 join_fscid=1 addr [v2:192.168.122.100:6814/3339055507,v1:192.168.122.100:6815/3339055507] compat {c=[1],r=[1],i=[7ff]}]
                                             
                                             
Oct 11 01:44:44 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn Updating MDS map to version 5 from mon.0
Oct 11 01:44:44 compute-0 ceph-mds[219472]: mds.0.4 handle_mds_map i am now mds.0.4
Oct 11 01:44:44 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : mds.? [v2:192.168.122.100:6814/3339055507,v1:192.168.122.100:6815/3339055507] up:active
Oct 11 01:44:44 compute-0 ceph-mds[219472]: mds.0.4 handle_mds_map state change up:creating --> up:active
Oct 11 01:44:44 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : fsmap cephfs:1 {0=cephfs.compute-0.mxkspn=up:active}
Oct 11 01:44:44 compute-0 ceph-mds[219472]: mds.0.4 recovery_done -- successful recovery!
Oct 11 01:44:44 compute-0 ceph-mds[219472]: mds.0.4 active_start
Oct 11 01:44:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd get-require-min-compat-client"} v 0) v1
Oct 11 01:44:45 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2545543402' entity='client.admin' cmd=[{"prefix": "osd get-require-min-compat-client"}]: dispatch
Oct 11 01:44:45 compute-0 wizardly_hamilton[219835]: mimic
Oct 11 01:44:45 compute-0 systemd[1]: libpod-22504f4a47966eeaa4fb37445fa280a8b4fe68f6b8888d2f246499527eddac55.scope: Deactivated successfully.
Oct 11 01:44:45 compute-0 podman[219797]: 2025-10-11 01:44:45.501567149 +0000 UTC m=+0.866841898 container died 22504f4a47966eeaa4fb37445fa280a8b4fe68f6b8888d2f246499527eddac55 (image=quay.io/ceph/ceph:v18, name=wizardly_hamilton, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:44:45 compute-0 systemd[1]: var-lib-containers-storage-overlay-554c5e6211490757796de29516756353eb917aeceae1c2376f30c0ef60e99733-merged.mount: Deactivated successfully.
Oct 11 01:44:45 compute-0 podman[219797]: 2025-10-11 01:44:45.59268883 +0000 UTC m=+0.957963569 container remove 22504f4a47966eeaa4fb37445fa280a8b4fe68f6b8888d2f246499527eddac55 (image=quay.io/ceph/ceph:v18, name=wizardly_hamilton, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0)
Oct 11 01:44:45 compute-0 systemd[1]: libpod-conmon-22504f4a47966eeaa4fb37445fa280a8b4fe68f6b8888d2f246499527eddac55.scope: Deactivated successfully.
Oct 11 01:44:45 compute-0 sudo[219773]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e49 do_prune osdmap full prune enabled
Oct 11 01:44:45 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/178430499' entity='client.rgw.rgw.compute-0.fahafy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished
Oct 11 01:44:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e50 e50: 3 total, 3 up, 3 in
Oct 11 01:44:45 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e50: 3 total, 3 up, 3 in
Oct 11 01:44:45 compute-0 ceph-mon[191930]: pgmap v123: 195 pgs: 2 unknown, 193 active+clean; 449 KiB data, 80 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:44:45 compute-0 ceph-mon[191930]: osdmap e49: 3 total, 3 up, 3 in
Oct 11 01:44:45 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/178430499' entity='client.rgw.rgw.compute-0.fahafy' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch
Oct 11 01:44:45 compute-0 ceph-mon[191930]: mds.? [v2:192.168.122.100:6814/3339055507,v1:192.168.122.100:6815/3339055507] up:active
Oct 11 01:44:45 compute-0 ceph-mon[191930]: fsmap cephfs:1 {0=cephfs.compute-0.mxkspn=up:active}
Oct 11 01:44:45 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2545543402' entity='client.admin' cmd=[{"prefix": "osd get-require-min-compat-client"}]: dispatch
Oct 11 01:44:45 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 50 pg[10.0( empty local-lis/les=49/50 n=0 ec=49/49 lis/c=0/0 les/c/f=0/0/0 sis=49) [2] r=0 lpr=49 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:45 compute-0 sudo[219648]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:44:45 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:44:45 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:45 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.a scrub starts
Oct 11 01:44:45 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.a scrub ok
Oct 11 01:44:46 compute-0 sudo[219979]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:46 compute-0 sudo[219979]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:46 compute-0 sudo[219979]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:46 compute-0 sudo[220004]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:44:46 compute-0 sudo[220004]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:46 compute-0 sudo[220004]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:46 compute-0 sudo[220042]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:46 compute-0 sudo[220042]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:46 compute-0 sudo[220042]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v126: 196 pgs: 1 unknown, 195 active+clean; 450 KiB data, 80 MiB used, 60 GiB / 60 GiB avail; 1023 B/s rd, 1.2 KiB/s wr, 4 op/s
Oct 11 01:44:46 compute-0 sudo[220068]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 01:44:46 compute-0 sudo[220068]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:46 compute-0 podman[220066]: 2025-10-11 01:44:46.491628185 +0000 UTC m=+0.148628487 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 10 Base Image, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, config_id=edpm, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, io.buildah.version=1.41.4)
Oct 11 01:44:46 compute-0 sudo[220135]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pmajlflcofphyouuhbihxaqodlpqxdxv ; /usr/bin/python3'
Oct 11 01:44:46 compute-0 sudo[220135]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:46 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.7 scrub starts
Oct 11 01:44:46 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.7 scrub ok
Oct 11 01:44:46 compute-0 ceph-mgr[192233]: [progress INFO root] Writing back 12 completed events
Oct 11 01:44:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/progress/completed}] v 0) v1
Oct 11 01:44:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:46 compute-0 ceph-mgr[192233]: [progress INFO root] Completed event 2cd3251c-0b23-40fa-b82c-cd15652e4148 (Global Recovery Event) in 5 seconds
Oct 11 01:44:46 compute-0 python3[220137]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   versions -f json _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e50 do_prune osdmap full prune enabled
Oct 11 01:44:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e51 e51: 3 total, 3 up, 3 in
Oct 11 01:44:46 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e51: 3 total, 3 up, 3 in
Oct 11 01:44:46 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/178430499' entity='client.rgw.rgw.compute-0.fahafy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished
Oct 11 01:44:46 compute-0 ceph-mon[191930]: osdmap e50: 3 total, 3 up, 3 in
Oct 11 01:44:46 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:46 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:46 compute-0 ceph-mon[191930]: 5.a scrub starts
Oct 11 01:44:46 compute-0 ceph-mon[191930]: 5.a scrub ok
Oct 11 01:44:46 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} v 0) v1
Oct 11 01:44:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/1946742169' entity='client.rgw.rgw.compute-0.fahafy' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch
Oct 11 01:44:46 compute-0 podman[220152]: 2025-10-11 01:44:46.870639722 +0000 UTC m=+0.094958090 container create f26b12fe974c9a00b344d709272b7ee39caa924006b37819c88801c2a90dd9dd (image=quay.io/ceph/ceph:v18, name=angry_chebyshev, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS)
Oct 11 01:44:46 compute-0 podman[220152]: 2025-10-11 01:44:46.838438146 +0000 UTC m=+0.062756574 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:46 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.b scrub starts
Oct 11 01:44:46 compute-0 systemd[1]: Started libpod-conmon-f26b12fe974c9a00b344d709272b7ee39caa924006b37819c88801c2a90dd9dd.scope.
Oct 11 01:44:46 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.b scrub ok
Oct 11 01:44:46 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/93d1b181454014df946fe5d06a2ab0af78ebc9d79d21b1931b5fd10ecba8a2b7/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/93d1b181454014df946fe5d06a2ab0af78ebc9d79d21b1931b5fd10ecba8a2b7/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:47 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 51 pg[11.0( empty local-lis/les=0/0 n=0 ec=51/51 lis/c=0/0 les/c/f=0/0/0 sis=51) [1] r=0 lpr=51 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:44:47 compute-0 podman[220152]: 2025-10-11 01:44:47.03966439 +0000 UTC m=+0.263982818 container init f26b12fe974c9a00b344d709272b7ee39caa924006b37819c88801c2a90dd9dd (image=quay.io/ceph/ceph:v18, name=angry_chebyshev, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 01:44:47 compute-0 podman[220152]: 2025-10-11 01:44:47.054426013 +0000 UTC m=+0.278744361 container start f26b12fe974c9a00b344d709272b7ee39caa924006b37819c88801c2a90dd9dd (image=quay.io/ceph/ceph:v18, name=angry_chebyshev, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0)
Oct 11 01:44:47 compute-0 podman[220152]: 2025-10-11 01:44:47.074062075 +0000 UTC m=+0.298380443 container attach f26b12fe974c9a00b344d709272b7ee39caa924006b37819c88801c2a90dd9dd (image=quay.io/ceph/ceph:v18, name=angry_chebyshev, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True)
Oct 11 01:44:47 compute-0 sudo[220068]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:44:47 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:44:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:44:47 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:44:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:44:47 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:47 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 812a89f8-6c8d-4394-bf17-c67ff1e5935a does not exist
Oct 11 01:44:47 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 4c1db05b-7b8e-45af-b3ed-57b09d69e3b8 does not exist
Oct 11 01:44:47 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 9f211ccd-93df-4453-8b53-1de4426d5e65 does not exist
Oct 11 01:44:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 01:44:47 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:44:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 01:44:47 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:44:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:44:47 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:44:47 compute-0 sudo[220188]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:47 compute-0 sudo[220188]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:47 compute-0 sudo[220188]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:47 compute-0 sudo[220213]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:44:47 compute-0 sudo[220213]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:47 compute-0 sudo[220213]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:47 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.12 scrub starts
Oct 11 01:44:47 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.12 scrub ok
Oct 11 01:44:47 compute-0 sudo[220257]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:47 compute-0 sudo[220257]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:47 compute-0 sudo[220257]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:47 compute-0 sudo[220282]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 01:44:47 compute-0 sudo[220282]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e51 do_prune osdmap full prune enabled
Oct 11 01:44:47 compute-0 ceph-mon[191930]: pgmap v126: 196 pgs: 1 unknown, 195 active+clean; 450 KiB data, 80 MiB used, 60 GiB / 60 GiB avail; 1023 B/s rd, 1.2 KiB/s wr, 4 op/s
Oct 11 01:44:47 compute-0 ceph-mon[191930]: 7.7 scrub starts
Oct 11 01:44:47 compute-0 ceph-mon[191930]: 7.7 scrub ok
Oct 11 01:44:47 compute-0 ceph-mon[191930]: osdmap e51: 3 total, 3 up, 3 in
Oct 11 01:44:47 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1946742169' entity='client.rgw.rgw.compute-0.fahafy' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch
Oct 11 01:44:47 compute-0 ceph-mon[191930]: 5.b scrub starts
Oct 11 01:44:47 compute-0 ceph-mon[191930]: 5.b scrub ok
Oct 11 01:44:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:44:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:44:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:44:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:44:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:44:47 compute-0 ceph-mon[191930]: 6.12 scrub starts
Oct 11 01:44:47 compute-0 ceph-mon[191930]: 6.12 scrub ok
Oct 11 01:44:47 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/1946742169' entity='client.rgw.rgw.compute-0.fahafy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished
Oct 11 01:44:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e52 e52: 3 total, 3 up, 3 in
Oct 11 01:44:47 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e52: 3 total, 3 up, 3 in
Oct 11 01:44:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} v 0) v1
Oct 11 01:44:47 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/1946742169' entity='client.rgw.rgw.compute-0.fahafy' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch
Oct 11 01:44:47 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 52 pg[11.0( empty local-lis/les=51/52 n=0 ec=51/51 lis/c=0/0 les/c/f=0/0/0 sis=51) [1] r=0 lpr=51 crt=0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:44:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "versions", "format": "json"} v 0) v1
Oct 11 01:44:47 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2286965225' entity='client.admin' cmd=[{"prefix": "versions", "format": "json"}]: dispatch
Oct 11 01:44:47 compute-0 angry_chebyshev[220172]: 
Oct 11 01:44:47 compute-0 angry_chebyshev[220172]: {"mon":{"ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable)":1},"mgr":{"ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable)":1},"osd":{"ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable)":3},"mds":{"ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable)":1},"overall":{"ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable)":6}}
Oct 11 01:44:47 compute-0 systemd[1]: libpod-f26b12fe974c9a00b344d709272b7ee39caa924006b37819c88801c2a90dd9dd.scope: Deactivated successfully.
Oct 11 01:44:47 compute-0 podman[220152]: 2025-10-11 01:44:47.883657138 +0000 UTC m=+1.107975516 container died f26b12fe974c9a00b344d709272b7ee39caa924006b37819c88801c2a90dd9dd (image=quay.io/ceph/ceph:v18, name=angry_chebyshev, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:44:47 compute-0 systemd[1]: var-lib-containers-storage-overlay-93d1b181454014df946fe5d06a2ab0af78ebc9d79d21b1931b5fd10ecba8a2b7-merged.mount: Deactivated successfully.
Oct 11 01:44:47 compute-0 podman[220152]: 2025-10-11 01:44:47.985473354 +0000 UTC m=+1.209791702 container remove f26b12fe974c9a00b344d709272b7ee39caa924006b37819c88801c2a90dd9dd (image=quay.io/ceph/ceph:v18, name=angry_chebyshev, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.license=GPLv2)
Oct 11 01:44:47 compute-0 systemd[1]: libpod-conmon-f26b12fe974c9a00b344d709272b7ee39caa924006b37819c88801c2a90dd9dd.scope: Deactivated successfully.
Oct 11 01:44:48 compute-0 sudo[220135]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:48 compute-0 podman[220359]: 2025-10-11 01:44:48.375142417 +0000 UTC m=+0.129907836 container create fb0ed44dc399072d88db34408862d895ae30bbd27749930cb89792b9e9e20865 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_mccarthy, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:44:48 compute-0 podman[220359]: 2025-10-11 01:44:48.302098188 +0000 UTC m=+0.056863647 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v129: 197 pgs: 1 unknown, 196 active+clean; 452 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 1023 B/s rd, 4.5 KiB/s wr, 9 op/s
Oct 11 01:44:48 compute-0 systemd[1]: Started libpod-conmon-fb0ed44dc399072d88db34408862d895ae30bbd27749930cb89792b9e9e20865.scope.
Oct 11 01:44:48 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:48 compute-0 podman[220359]: 2025-10-11 01:44:48.515136026 +0000 UTC m=+0.269901455 container init fb0ed44dc399072d88db34408862d895ae30bbd27749930cb89792b9e9e20865 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_mccarthy, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:44:48 compute-0 podman[220359]: 2025-10-11 01:44:48.532783679 +0000 UTC m=+0.287549108 container start fb0ed44dc399072d88db34408862d895ae30bbd27749930cb89792b9e9e20865 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_mccarthy, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS)
Oct 11 01:44:48 compute-0 podman[220359]: 2025-10-11 01:44:48.539890656 +0000 UTC m=+0.294656085 container attach fb0ed44dc399072d88db34408862d895ae30bbd27749930cb89792b9e9e20865 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_mccarthy, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:44:48 compute-0 sharp_mccarthy[220375]: 167 167
Oct 11 01:44:48 compute-0 systemd[1]: libpod-fb0ed44dc399072d88db34408862d895ae30bbd27749930cb89792b9e9e20865.scope: Deactivated successfully.
Oct 11 01:44:48 compute-0 podman[220359]: 2025-10-11 01:44:48.549903241 +0000 UTC m=+0.304668660 container died fb0ed44dc399072d88db34408862d895ae30bbd27749930cb89792b9e9e20865 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_mccarthy, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS)
Oct 11 01:44:48 compute-0 systemd[1]: var-lib-containers-storage-overlay-c9f53c0fc364f889bb9a7ca08dbfd2425208378fcccae824dfd0f20d873a6078-merged.mount: Deactivated successfully.
Oct 11 01:44:48 compute-0 podman[220359]: 2025-10-11 01:44:48.64347608 +0000 UTC m=+0.398241509 container remove fb0ed44dc399072d88db34408862d895ae30bbd27749930cb89792b9e9e20865 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_mccarthy, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 01:44:48 compute-0 systemd[1]: libpod-conmon-fb0ed44dc399072d88db34408862d895ae30bbd27749930cb89792b9e9e20865.scope: Deactivated successfully.
Oct 11 01:44:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e52 do_prune osdmap full prune enabled
Oct 11 01:44:48 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='client.? 192.168.122.100:0/1946742169' entity='client.rgw.rgw.compute-0.fahafy' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished
Oct 11 01:44:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e53 e53: 3 total, 3 up, 3 in
Oct 11 01:44:48 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e53: 3 total, 3 up, 3 in
Oct 11 01:44:48 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1946742169' entity='client.rgw.rgw.compute-0.fahafy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished
Oct 11 01:44:48 compute-0 ceph-mon[191930]: osdmap e52: 3 total, 3 up, 3 in
Oct 11 01:44:48 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1946742169' entity='client.rgw.rgw.compute-0.fahafy' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch
Oct 11 01:44:48 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2286965225' entity='client.admin' cmd=[{"prefix": "versions", "format": "json"}]: dispatch
Oct 11 01:44:48 compute-0 podman[220397]: 2025-10-11 01:44:48.979052577 +0000 UTC m=+0.107804155 container create 0d6eb09568245b823269ff43d110976646292c4046ce33a752c3f43d5ea630ce (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_fermi, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2)
Oct 11 01:44:49 compute-0 podman[220397]: 2025-10-11 01:44:48.943950172 +0000 UTC m=+0.072701800 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:49 compute-0 systemd[1]: Started libpod-conmon-0d6eb09568245b823269ff43d110976646292c4046ce33a752c3f43d5ea630ce.scope.
Oct 11 01:44:49 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:49 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/15cce0f80c0eca3fc84873525e17601e4030fab25aa3d089cd07a0cf11f80da2/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:49 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/15cce0f80c0eca3fc84873525e17601e4030fab25aa3d089cd07a0cf11f80da2/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:49 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/15cce0f80c0eca3fc84873525e17601e4030fab25aa3d089cd07a0cf11f80da2/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:49 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/15cce0f80c0eca3fc84873525e17601e4030fab25aa3d089cd07a0cf11f80da2/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:49 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/15cce0f80c0eca3fc84873525e17601e4030fab25aa3d089cd07a0cf11f80da2/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:49 compute-0 podman[220397]: 2025-10-11 01:44:49.163551247 +0000 UTC m=+0.292302825 container init 0d6eb09568245b823269ff43d110976646292c4046ce33a752c3f43d5ea630ce (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_fermi, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:44:49 compute-0 radosgw[218904]: LDAP not started since no server URIs were provided in the configuration.
Oct 11 01:44:49 compute-0 radosgw[218904]: framework: beast
Oct 11 01:44:49 compute-0 radosgw[218904]: framework conf key: ssl_certificate, val: config://rgw/cert/$realm/$zone.crt
Oct 11 01:44:49 compute-0 radosgw[218904]: framework conf key: ssl_private_key, val: config://rgw/cert/$realm/$zone.key
Oct 11 01:44:49 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-rgw-rgw-compute-0-fahafy[218900]: 2025-10-11T01:44:49.159+0000 7fb5c3fdd940 -1 LDAP not started since no server URIs were provided in the configuration.
Oct 11 01:44:49 compute-0 podman[220397]: 2025-10-11 01:44:49.194437904 +0000 UTC m=+0.323189472 container start 0d6eb09568245b823269ff43d110976646292c4046ce33a752c3f43d5ea630ce (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_fermi, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507)
Oct 11 01:44:49 compute-0 podman[220397]: 2025-10-11 01:44:49.201571801 +0000 UTC m=+0.330323349 container attach 0d6eb09568245b823269ff43d110976646292c4046ce33a752c3f43d5ea630ce (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_fermi, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:44:49 compute-0 radosgw[218904]: starting handler: beast
Oct 11 01:44:49 compute-0 radosgw[218904]: set uid:gid to 167:167 (ceph:ceph)
Oct 11 01:44:49 compute-0 radosgw[218904]: mgrc service_daemon_register rgw.14277 metadata {arch=x86_64,ceph_release=reef,ceph_version=ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable),ceph_version_short=18.2.7,container_hostname=compute-0,container_image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0,cpu=AMD EPYC-Rome Processor,distro=centos,distro_description=CentOS Stream 9,distro_version=9,frontend_config#0=beast endpoint=192.168.122.100:8082,frontend_type#0=beast,hostname=compute-0,id=rgw.compute-0.fahafy,kernel_description=#1 SMP PREEMPT_DYNAMIC Tue Sep 30 07:37:35 UTC 2025,kernel_version=5.14.0-621.el9.x86_64,mem_swap_kb=1048572,mem_total_kb=7864348,num_handles=1,os=Linux,pid=2,realm_id=,realm_name=,zone_id=2bf3fceb-ebfb-4ffa-a425-a7ac76b37b8c,zone_name=default,zonegroup_id=310e0ce8-86e2-4657-8886-c4cdb654d245,zonegroup_name=default}
Oct 11 01:44:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e53 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:44:49 compute-0 ceph-mon[191930]: pgmap v129: 197 pgs: 1 unknown, 196 active+clean; 452 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 1023 B/s rd, 4.5 KiB/s wr, 9 op/s
Oct 11 01:44:49 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1946742169' entity='client.rgw.rgw.compute-0.fahafy' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished
Oct 11 01:44:49 compute-0 ceph-mon[191930]: osdmap e53: 3 total, 3 up, 3 in
Oct 11 01:44:49 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.d scrub starts
Oct 11 01:44:49 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.d scrub ok
Oct 11 01:44:50 compute-0 hungry_fermi[220413]: --> passed data devices: 0 physical, 3 LVM
Oct 11 01:44:50 compute-0 hungry_fermi[220413]: --> relative data size: 1.0
Oct 11 01:44:50 compute-0 hungry_fermi[220413]: --> All data devices are unavailable
Oct 11 01:44:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v131: 197 pgs: 1 unknown, 196 active+clean; 452 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 2.8 KiB/s wr, 4 op/s
Oct 11 01:44:50 compute-0 systemd[1]: libpod-0d6eb09568245b823269ff43d110976646292c4046ce33a752c3f43d5ea630ce.scope: Deactivated successfully.
Oct 11 01:44:50 compute-0 podman[220397]: 2025-10-11 01:44:50.43959056 +0000 UTC m=+1.568342138 container died 0d6eb09568245b823269ff43d110976646292c4046ce33a752c3f43d5ea630ce (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_fermi, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.build-date=20250507, OSD_FLAVOR=default)
Oct 11 01:44:50 compute-0 systemd[1]: libpod-0d6eb09568245b823269ff43d110976646292c4046ce33a752c3f43d5ea630ce.scope: Consumed 1.185s CPU time.
Oct 11 01:44:50 compute-0 systemd[1]: var-lib-containers-storage-overlay-15cce0f80c0eca3fc84873525e17601e4030fab25aa3d089cd07a0cf11f80da2-merged.mount: Deactivated successfully.
Oct 11 01:44:50 compute-0 podman[220397]: 2025-10-11 01:44:50.526116223 +0000 UTC m=+1.654867781 container remove 0d6eb09568245b823269ff43d110976646292c4046ce33a752c3f43d5ea630ce (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_fermi, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:50 compute-0 systemd[1]: libpod-conmon-0d6eb09568245b823269ff43d110976646292c4046ce33a752c3f43d5ea630ce.scope: Deactivated successfully.
Oct 11 01:44:50 compute-0 sudo[220282]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:50 compute-0 sudo[220997]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:50 compute-0 sudo[220997]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:50 compute-0 sudo[220997]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:50 compute-0 sudo[221022]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:44:50 compute-0 sudo[221022]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:50 compute-0 sudo[221022]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:50 compute-0 ceph-mon[191930]: 5.d scrub starts
Oct 11 01:44:50 compute-0 ceph-mon[191930]: 5.d scrub ok
Oct 11 01:44:50 compute-0 sudo[221047]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:50 compute-0 sudo[221047]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:50 compute-0 sudo[221047]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:51 compute-0 sudo[221072]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 01:44:51 compute-0 sudo[221072]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:51 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.b scrub starts
Oct 11 01:44:51 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.b scrub ok
Oct 11 01:44:51 compute-0 ceph-mgr[192233]: [progress INFO root] Writing back 13 completed events
Oct 11 01:44:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/progress/completed}] v 0) v1
Oct 11 01:44:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:51 compute-0 podman[221136]: 2025-10-11 01:44:51.72235229 +0000 UTC m=+0.087827804 container create fc82fed3b9f4af8ca758afb9b311a0d718be5deef3933a1a8873424b418abec3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_jones, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:44:51 compute-0 podman[221136]: 2025-10-11 01:44:51.687628234 +0000 UTC m=+0.053103788 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:51 compute-0 systemd[1]: Started libpod-conmon-fc82fed3b9f4af8ca758afb9b311a0d718be5deef3933a1a8873424b418abec3.scope.
Oct 11 01:44:51 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:51 compute-0 ceph-mon[191930]: pgmap v131: 197 pgs: 1 unknown, 196 active+clean; 452 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 2.8 KiB/s wr, 4 op/s
Oct 11 01:44:51 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:51 compute-0 podman[221136]: 2025-10-11 01:44:51.886968119 +0000 UTC m=+0.252443673 container init fc82fed3b9f4af8ca758afb9b311a0d718be5deef3933a1a8873424b418abec3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_jones, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef)
Oct 11 01:44:51 compute-0 podman[221136]: 2025-10-11 01:44:51.90803464 +0000 UTC m=+0.273510144 container start fc82fed3b9f4af8ca758afb9b311a0d718be5deef3933a1a8873424b418abec3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_jones, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default)
Oct 11 01:44:51 compute-0 podman[221136]: 2025-10-11 01:44:51.915764667 +0000 UTC m=+0.281240231 container attach fc82fed3b9f4af8ca758afb9b311a0d718be5deef3933a1a8873424b418abec3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_jones, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, ceph=True, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS)
Oct 11 01:44:51 compute-0 naughty_jones[221150]: 167 167
Oct 11 01:44:51 compute-0 systemd[1]: libpod-fc82fed3b9f4af8ca758afb9b311a0d718be5deef3933a1a8873424b418abec3.scope: Deactivated successfully.
Oct 11 01:44:51 compute-0 podman[221136]: 2025-10-11 01:44:51.923178983 +0000 UTC m=+0.288654477 container died fc82fed3b9f4af8ca758afb9b311a0d718be5deef3933a1a8873424b418abec3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_jones, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507)
Oct 11 01:44:51 compute-0 systemd[1]: var-lib-containers-storage-overlay-856e8772aa433bc2b79f30ebe4a394ed793f2ed93fdcae6b0a012841f86b6085-merged.mount: Deactivated successfully.
Oct 11 01:44:52 compute-0 podman[221136]: 2025-10-11 01:44:52.022231301 +0000 UTC m=+0.387706775 container remove fc82fed3b9f4af8ca758afb9b311a0d718be5deef3933a1a8873424b418abec3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_jones, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:44:52 compute-0 systemd[1]: libpod-conmon-fc82fed3b9f4af8ca758afb9b311a0d718be5deef3933a1a8873424b418abec3.scope: Deactivated successfully.
Oct 11 01:44:52 compute-0 podman[221172]: 2025-10-11 01:44:52.312050675 +0000 UTC m=+0.094263170 container create a176e5cd5e73325950753407302c82058926a8513578691d02ef5a3f0b54618f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wonderful_easley, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:44:52 compute-0 podman[221172]: 2025-10-11 01:44:52.266774495 +0000 UTC m=+0.048987040 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:52 compute-0 systemd[1]: Started libpod-conmon-a176e5cd5e73325950753407302c82058926a8513578691d02ef5a3f0b54618f.scope.
Oct 11 01:44:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v132: 197 pgs: 197 active+clean; 456 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 100 KiB/s rd, 7.8 KiB/s wr, 224 op/s
Oct 11 01:44:52 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.d scrub starts
Oct 11 01:44:52 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:52 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/068e9a2865524680d0f4675c6ca1793530bdb0846ddf3f73158bee2b61582a68/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:52 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/068e9a2865524680d0f4675c6ca1793530bdb0846ddf3f73158bee2b61582a68/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:52 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.d scrub ok
Oct 11 01:44:52 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/068e9a2865524680d0f4675c6ca1793530bdb0846ddf3f73158bee2b61582a68/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:52 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/068e9a2865524680d0f4675c6ca1793530bdb0846ddf3f73158bee2b61582a68/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:52 compute-0 podman[221172]: 2025-10-11 01:44:52.549614893 +0000 UTC m=+0.331827428 container init a176e5cd5e73325950753407302c82058926a8513578691d02ef5a3f0b54618f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wonderful_easley, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:52 compute-0 podman[221172]: 2025-10-11 01:44:52.563577177 +0000 UTC m=+0.345789662 container start a176e5cd5e73325950753407302c82058926a8513578691d02ef5a3f0b54618f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wonderful_easley, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:52 compute-0 podman[221172]: 2025-10-11 01:44:52.569383855 +0000 UTC m=+0.351596400 container attach a176e5cd5e73325950753407302c82058926a8513578691d02ef5a3f0b54618f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wonderful_easley, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:52 compute-0 ceph-mon[191930]: 7.b scrub starts
Oct 11 01:44:52 compute-0 ceph-mon[191930]: 7.b scrub ok
Oct 11 01:44:52 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.e scrub starts
Oct 11 01:44:52 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.e scrub ok
Oct 11 01:44:53 compute-0 wonderful_easley[221188]: {
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:     "0": [
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:         {
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "devices": [
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "/dev/loop3"
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             ],
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "lv_name": "ceph_lv0",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "lv_size": "21470642176",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "name": "ceph_lv0",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "tags": {
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.cluster_name": "ceph",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.crush_device_class": "",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.encrypted": "0",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.osd_id": "0",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.type": "block",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.vdo": "0"
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             },
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "type": "block",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "vg_name": "ceph_vg0"
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:         }
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:     ],
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:     "1": [
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:         {
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "devices": [
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "/dev/loop4"
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             ],
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "lv_name": "ceph_lv1",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "lv_size": "21470642176",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "name": "ceph_lv1",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "tags": {
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.cluster_name": "ceph",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.crush_device_class": "",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.encrypted": "0",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.osd_id": "1",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.type": "block",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.vdo": "0"
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             },
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "type": "block",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "vg_name": "ceph_vg1"
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:         }
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:     ],
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:     "2": [
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:         {
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "devices": [
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "/dev/loop5"
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             ],
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "lv_name": "ceph_lv2",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "lv_size": "21470642176",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "name": "ceph_lv2",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "tags": {
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.cluster_name": "ceph",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.crush_device_class": "",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.encrypted": "0",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.osd_id": "2",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.type": "block",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:                 "ceph.vdo": "0"
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             },
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "type": "block",
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:             "vg_name": "ceph_vg2"
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:         }
Oct 11 01:44:53 compute-0 wonderful_easley[221188]:     ]
Oct 11 01:44:53 compute-0 wonderful_easley[221188]: }
Oct 11 01:44:53 compute-0 podman[221172]: 2025-10-11 01:44:53.379303427 +0000 UTC m=+1.161515902 container died a176e5cd5e73325950753407302c82058926a8513578691d02ef5a3f0b54618f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wonderful_easley, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:44:53 compute-0 systemd[1]: libpod-a176e5cd5e73325950753407302c82058926a8513578691d02ef5a3f0b54618f.scope: Deactivated successfully.
Oct 11 01:44:53 compute-0 systemd[1]: var-lib-containers-storage-overlay-068e9a2865524680d0f4675c6ca1793530bdb0846ddf3f73158bee2b61582a68-merged.mount: Deactivated successfully.
Oct 11 01:44:53 compute-0 podman[221172]: 2025-10-11 01:44:53.46553963 +0000 UTC m=+1.247752085 container remove a176e5cd5e73325950753407302c82058926a8513578691d02ef5a3f0b54618f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wonderful_easley, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 01:44:53 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.10 scrub starts
Oct 11 01:44:53 compute-0 systemd[1]: libpod-conmon-a176e5cd5e73325950753407302c82058926a8513578691d02ef5a3f0b54618f.scope: Deactivated successfully.
Oct 11 01:44:53 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.10 scrub ok
Oct 11 01:44:53 compute-0 sudo[221072]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:53 compute-0 sudo[221207]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:53 compute-0 sudo[221207]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:53 compute-0 sudo[221207]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:53 compute-0 sudo[221232]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:44:53 compute-0 sudo[221232]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:53 compute-0 sudo[221232]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:53 compute-0 ceph-mon[191930]: pgmap v132: 197 pgs: 197 active+clean; 456 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 100 KiB/s rd, 7.8 KiB/s wr, 224 op/s
Oct 11 01:44:53 compute-0 ceph-mon[191930]: 7.d scrub starts
Oct 11 01:44:53 compute-0 ceph-mon[191930]: 7.d scrub ok
Oct 11 01:44:53 compute-0 ceph-mon[191930]: 5.e scrub starts
Oct 11 01:44:53 compute-0 ceph-mon[191930]: 5.e scrub ok
Oct 11 01:44:53 compute-0 sudo[221257]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:53 compute-0 sudo[221257]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:53 compute-0 sudo[221257]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:54 compute-0 sudo[221282]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 01:44:54 compute-0 sudo[221282]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v133: 197 pgs: 197 active+clean; 456 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 78 KiB/s rd, 6.2 KiB/s wr, 176 op/s
Oct 11 01:44:54 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.12 scrub starts
Oct 11 01:44:54 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.12 scrub ok
Oct 11 01:44:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e53 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:44:54 compute-0 podman[221344]: 2025-10-11 01:44:54.698395371 +0000 UTC m=+0.087995683 container create 0c04e99293f3d8abb2ee2fa94535d331baeec317d42c5a1b1641493b67a8c1e2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_pascal, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS)
Oct 11 01:44:54 compute-0 podman[221344]: 2025-10-11 01:44:54.660942817 +0000 UTC m=+0.050543129 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:54 compute-0 systemd[1]: Started libpod-conmon-0c04e99293f3d8abb2ee2fa94535d331baeec317d42c5a1b1641493b67a8c1e2.scope.
Oct 11 01:44:54 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:54 compute-0 podman[221344]: 2025-10-11 01:44:54.844523038 +0000 UTC m=+0.234123390 container init 0c04e99293f3d8abb2ee2fa94535d331baeec317d42c5a1b1641493b67a8c1e2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_pascal, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2)
Oct 11 01:44:54 compute-0 podman[221344]: 2025-10-11 01:44:54.860422751 +0000 UTC m=+0.250023033 container start 0c04e99293f3d8abb2ee2fa94535d331baeec317d42c5a1b1641493b67a8c1e2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_pascal, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:54 compute-0 podman[221344]: 2025-10-11 01:44:54.865676019 +0000 UTC m=+0.255276381 container attach 0c04e99293f3d8abb2ee2fa94535d331baeec317d42c5a1b1641493b67a8c1e2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_pascal, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:54 compute-0 trusting_pascal[221359]: 167 167
Oct 11 01:44:54 compute-0 systemd[1]: libpod-0c04e99293f3d8abb2ee2fa94535d331baeec317d42c5a1b1641493b67a8c1e2.scope: Deactivated successfully.
Oct 11 01:44:54 compute-0 podman[221344]: 2025-10-11 01:44:54.873664476 +0000 UTC m=+0.263264778 container died 0c04e99293f3d8abb2ee2fa94535d331baeec317d42c5a1b1641493b67a8c1e2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_pascal, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:54 compute-0 systemd[1]: var-lib-containers-storage-overlay-b6da9808ade3e6433d7edb5fa0835207b26ca28e3f998631f567e15e26770fde-merged.mount: Deactivated successfully.
Oct 11 01:44:54 compute-0 ceph-mon[191930]: 7.10 scrub starts
Oct 11 01:44:54 compute-0 ceph-mon[191930]: 7.10 scrub ok
Oct 11 01:44:54 compute-0 podman[221344]: 2025-10-11 01:44:54.941459257 +0000 UTC m=+0.331059539 container remove 0c04e99293f3d8abb2ee2fa94535d331baeec317d42c5a1b1641493b67a8c1e2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_pascal, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 01:44:54 compute-0 systemd[1]: libpod-conmon-0c04e99293f3d8abb2ee2fa94535d331baeec317d42c5a1b1641493b67a8c1e2.scope: Deactivated successfully.
Oct 11 01:44:55 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.10 scrub starts
Oct 11 01:44:55 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.10 scrub ok
Oct 11 01:44:55 compute-0 podman[221382]: 2025-10-11 01:44:55.23651422 +0000 UTC m=+0.080520166 container create 02135ec4363543ace96a4a60ebef3aeec766f3e2ce6b0a760fdbcec82233072c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_goodall, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:44:55 compute-0 podman[221382]: 2025-10-11 01:44:55.208351372 +0000 UTC m=+0.052357358 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:44:55 compute-0 systemd[1]: Started libpod-conmon-02135ec4363543ace96a4a60ebef3aeec766f3e2ce6b0a760fdbcec82233072c.scope.
Oct 11 01:44:55 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9bb5bd7dbc1c7344bde6944c499d6542fde3ada1d120773007b76a790e04b18f/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9bb5bd7dbc1c7344bde6944c499d6542fde3ada1d120773007b76a790e04b18f/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9bb5bd7dbc1c7344bde6944c499d6542fde3ada1d120773007b76a790e04b18f/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9bb5bd7dbc1c7344bde6944c499d6542fde3ada1d120773007b76a790e04b18f/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:55 compute-0 podman[221382]: 2025-10-11 01:44:55.434958215 +0000 UTC m=+0.278964191 container init 02135ec4363543ace96a4a60ebef3aeec766f3e2ce6b0a760fdbcec82233072c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_goodall, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:55 compute-0 podman[221382]: 2025-10-11 01:44:55.459175224 +0000 UTC m=+0.303181190 container start 02135ec4363543ace96a4a60ebef3aeec766f3e2ce6b0a760fdbcec82233072c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_goodall, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3)
Oct 11 01:44:55 compute-0 podman[221382]: 2025-10-11 01:44:55.467487431 +0000 UTC m=+0.311493437 container attach 02135ec4363543ace96a4a60ebef3aeec766f3e2ce6b0a760fdbcec82233072c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_goodall, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True)
Oct 11 01:44:55 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.14 scrub starts
Oct 11 01:44:55 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.14 scrub ok
Oct 11 01:44:55 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.16 scrub starts
Oct 11 01:44:55 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.16 scrub ok
Oct 11 01:44:55 compute-0 ceph-mon[191930]: pgmap v133: 197 pgs: 197 active+clean; 456 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 78 KiB/s rd, 6.2 KiB/s wr, 176 op/s
Oct 11 01:44:55 compute-0 ceph-mon[191930]: 7.12 scrub starts
Oct 11 01:44:55 compute-0 ceph-mon[191930]: 7.12 scrub ok
Oct 11 01:44:55 compute-0 ceph-mon[191930]: 5.10 scrub starts
Oct 11 01:44:55 compute-0 ceph-mon[191930]: 5.10 scrub ok
Oct 11 01:44:55 compute-0 ceph-mon[191930]: 7.14 scrub starts
Oct 11 01:44:55 compute-0 ceph-mon[191930]: 7.14 scrub ok
Oct 11 01:44:56 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.17 scrub starts
Oct 11 01:44:56 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.17 scrub ok
Oct 11 01:44:56 compute-0 sudo[221428]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hekgkfoxipfycdjhbeyqlyytwnvrcsqj ; /usr/bin/python3'
Oct 11 01:44:56 compute-0 sudo[221428]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:56 compute-0 python3[221431]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint radosgw-admin quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   user info --uid openstack _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:44:56
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.control', 'cephfs.cephfs.data', '.mgr', 'cephfs.cephfs.meta', 'images', 'volumes', '.rgw.root', 'vms', 'default.rgw.meta', 'backups', 'default.rgw.log']
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 01:44:56 compute-0 podman[221444]: 2025-10-11 01:44:56.413578434 +0000 UTC m=+0.073178320 container create 6a20372c51836d20b56d1c378baff813031bd799b5c616babfd7cd6dbb807743 (image=quay.io/ceph/ceph:v18, name=flamboyant_hypatia, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, OSD_FLAVOR=default)
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v134: 197 pgs: 197 active+clean; 456 KiB data, 85 MiB used, 60 GiB / 60 GiB avail; 82 KiB/s rd, 4.0 KiB/s wr, 184 op/s
Oct 11 01:44:56 compute-0 podman[221444]: 2025-10-11 01:44:56.382096827 +0000 UTC m=+0.041696743 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:56 compute-0 systemd[1]: Started libpod-conmon-6a20372c51836d20b56d1c378baff813031bd799b5c616babfd7cd6dbb807743.scope.
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:44:56 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9be3d7d373723e8ecb3482df30930b4caf592220deb68ca6587554d8bcee4c11/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9be3d7d373723e8ecb3482df30930b4caf592220deb68ca6587554d8bcee4c11/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:44:56 compute-0 modest_goodall[221399]: {
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:44:56 compute-0 modest_goodall[221399]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 01:44:56 compute-0 modest_goodall[221399]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:56 compute-0 modest_goodall[221399]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 01:44:56 compute-0 modest_goodall[221399]:         "osd_id": 1,
Oct 11 01:44:56 compute-0 modest_goodall[221399]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:44:56 compute-0 modest_goodall[221399]:         "type": "bluestore"
Oct 11 01:44:56 compute-0 modest_goodall[221399]:     },
Oct 11 01:44:56 compute-0 modest_goodall[221399]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 01:44:56 compute-0 modest_goodall[221399]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:56 compute-0 modest_goodall[221399]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 01:44:56 compute-0 modest_goodall[221399]:         "osd_id": 2,
Oct 11 01:44:56 compute-0 modest_goodall[221399]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:44:56 compute-0 modest_goodall[221399]:         "type": "bluestore"
Oct 11 01:44:56 compute-0 modest_goodall[221399]:     },
Oct 11 01:44:56 compute-0 modest_goodall[221399]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 01:44:56 compute-0 modest_goodall[221399]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:44:56 compute-0 modest_goodall[221399]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 01:44:56 compute-0 modest_goodall[221399]:         "osd_id": 0,
Oct 11 01:44:56 compute-0 modest_goodall[221399]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:44:56 compute-0 modest_goodall[221399]:         "type": "bluestore"
Oct 11 01:44:56 compute-0 modest_goodall[221399]:     }
Oct 11 01:44:56 compute-0 modest_goodall[221399]: }
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:44:56 compute-0 podman[221444]: 2025-10-11 01:44:56.560465331 +0000 UTC m=+0.220065227 container init 6a20372c51836d20b56d1c378baff813031bd799b5c616babfd7cd6dbb807743 (image=quay.io/ceph/ceph:v18, name=flamboyant_hypatia, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True)
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:44:56 compute-0 podman[221444]: 2025-10-11 01:44:56.570824156 +0000 UTC m=+0.230424032 container start 6a20372c51836d20b56d1c378baff813031bd799b5c616babfd7cd6dbb807743 (image=quay.io/ceph/ceph:v18, name=flamboyant_hypatia, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:44:56 compute-0 podman[221444]: 2025-10-11 01:44:56.575923024 +0000 UTC m=+0.235522920 container attach 6a20372c51836d20b56d1c378baff813031bd799b5c616babfd7cd6dbb807743 (image=quay.io/ceph/ceph:v18, name=flamboyant_hypatia, CEPH_REF=reef, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507)
Oct 11 01:44:56 compute-0 podman[221382]: 2025-10-11 01:44:56.592860687 +0000 UTC m=+1.436866633 container died 02135ec4363543ace96a4a60ebef3aeec766f3e2ce6b0a760fdbcec82233072c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_goodall, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 01:44:56 compute-0 systemd[1]: libpod-02135ec4363543ace96a4a60ebef3aeec766f3e2ce6b0a760fdbcec82233072c.scope: Deactivated successfully.
Oct 11 01:44:56 compute-0 systemd[1]: libpod-02135ec4363543ace96a4a60ebef3aeec766f3e2ce6b0a760fdbcec82233072c.scope: Consumed 1.131s CPU time.
Oct 11 01:44:56 compute-0 systemd[1]: var-lib-containers-storage-overlay-9bb5bd7dbc1c7344bde6944c499d6542fde3ada1d120773007b76a790e04b18f-merged.mount: Deactivated successfully.
Oct 11 01:44:56 compute-0 podman[221382]: 2025-10-11 01:44:56.664067326 +0000 UTC m=+1.508073262 container remove 02135ec4363543ace96a4a60ebef3aeec766f3e2ce6b0a760fdbcec82233072c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_goodall, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:44:56 compute-0 systemd[1]: libpod-conmon-02135ec4363543ace96a4a60ebef3aeec766f3e2ce6b0a760fdbcec82233072c.scope: Deactivated successfully.
Oct 11 01:44:56 compute-0 sudo[221282]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:44:56 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:44:56 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 91cf5725-299e-48f3-b7d6-bfa663996371 does not exist
Oct 11 01:44:56 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 0e59b7ca-6cc2-4086-8b16-94f7e083437a does not exist
Oct 11 01:44:56 compute-0 flamboyant_hypatia[221468]: could not fetch user info: no user info saved
Oct 11 01:44:56 compute-0 sudo[221549]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:56 compute-0 sudo[221549]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:56 compute-0 sudo[221549]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:56 compute-0 ceph-mon[191930]: 6.16 scrub starts
Oct 11 01:44:56 compute-0 ceph-mon[191930]: 6.16 scrub ok
Oct 11 01:44:56 compute-0 ceph-mon[191930]: 5.17 scrub starts
Oct 11 01:44:56 compute-0 ceph-mon[191930]: 5.17 scrub ok
Oct 11 01:44:56 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:56 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:57 compute-0 sudo[221593]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:44:57 compute-0 sudo[221593]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:57 compute-0 sudo[221593]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:57 compute-0 systemd[1]: libpod-6a20372c51836d20b56d1c378baff813031bd799b5c616babfd7cd6dbb807743.scope: Deactivated successfully.
Oct 11 01:44:57 compute-0 podman[221444]: 2025-10-11 01:44:57.02921156 +0000 UTC m=+0.688811436 container died 6a20372c51836d20b56d1c378baff813031bd799b5c616babfd7cd6dbb807743 (image=quay.io/ceph/ceph:v18, name=flamboyant_hypatia, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2)
Oct 11 01:44:57 compute-0 systemd[1]: var-lib-containers-storage-overlay-9be3d7d373723e8ecb3482df30930b4caf592220deb68ca6587554d8bcee4c11-merged.mount: Deactivated successfully.
Oct 11 01:44:57 compute-0 podman[221444]: 2025-10-11 01:44:57.088230784 +0000 UTC m=+0.747830670 container remove 6a20372c51836d20b56d1c378baff813031bd799b5c616babfd7cd6dbb807743 (image=quay.io/ceph/ceph:v18, name=flamboyant_hypatia, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 01:44:57 compute-0 systemd[1]: libpod-conmon-6a20372c51836d20b56d1c378baff813031bd799b5c616babfd7cd6dbb807743.scope: Deactivated successfully.
Oct 11 01:44:57 compute-0 sudo[221428]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:57 compute-0 sudo[221620]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:57 compute-0 sudo[221620]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:57 compute-0 sudo[221620]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:57 compute-0 sudo[221655]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:44:57 compute-0 sudo[221655]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:57 compute-0 sudo[221655]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:57 compute-0 sudo[221720]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kmyuzzoasylcqkvlpysexrowqfuhatjl ; /usr/bin/python3'
Oct 11 01:44:57 compute-0 sudo[221720]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:44:57 compute-0 sudo[221688]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:57 compute-0 sudo[221688]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:57 compute-0 sudo[221688]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:57 compute-0 sudo[221731]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ls
Oct 11 01:44:57 compute-0 sudo[221731]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:57 compute-0 python3[221728]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint radosgw-admin quay.io/ceph/ceph:v18 --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   user create --uid="openstack" --display-name "openstack" _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:44:57 compute-0 podman[221756]: 2025-10-11 01:44:57.619683266 +0000 UTC m=+0.112614753 container create ede43a6ab86a907de5167b0377152a4132347f0d24ffdd20361c3f8711f01408 (image=quay.io/ceph/ceph:v18, name=jovial_ganguly, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2)
Oct 11 01:44:57 compute-0 podman[221756]: 2025-10-11 01:44:57.568408628 +0000 UTC m=+0.061340135 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Oct 11 01:44:57 compute-0 systemd[1]: Started libpod-conmon-ede43a6ab86a907de5167b0377152a4132347f0d24ffdd20361c3f8711f01408.scope.
Oct 11 01:44:57 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:44:57 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3011aca2e88720f618aa2aca22f801ccd3aa7ad8b28ef4506e993d32b97e4b25/merged/home/assimilate_ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:57 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3011aca2e88720f618aa2aca22f801ccd3aa7ad8b28ef4506e993d32b97e4b25/merged/etc/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:44:57 compute-0 podman[221756]: 2025-10-11 01:44:57.780892457 +0000 UTC m=+0.273823944 container init ede43a6ab86a907de5167b0377152a4132347f0d24ffdd20361c3f8711f01408 (image=quay.io/ceph/ceph:v18, name=jovial_ganguly, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:44:57 compute-0 podman[221756]: 2025-10-11 01:44:57.799633939 +0000 UTC m=+0.292565396 container start ede43a6ab86a907de5167b0377152a4132347f0d24ffdd20361c3f8711f01408 (image=quay.io/ceph/ceph:v18, name=jovial_ganguly, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507)
Oct 11 01:44:57 compute-0 podman[221756]: 2025-10-11 01:44:57.806041786 +0000 UTC m=+0.298973243 container attach ede43a6ab86a907de5167b0377152a4132347f0d24ffdd20361c3f8711f01408 (image=quay.io/ceph/ceph:v18, name=jovial_ganguly, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True)
Oct 11 01:44:57 compute-0 ceph-mon[191930]: pgmap v134: 197 pgs: 197 active+clean; 456 KiB data, 85 MiB used, 60 GiB / 60 GiB avail; 82 KiB/s rd, 4.0 KiB/s wr, 184 op/s
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]: {
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     "user_id": "openstack",
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     "display_name": "openstack",
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     "email": "",
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     "suspended": 0,
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     "max_buckets": 1000,
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     "subusers": [],
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     "keys": [
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:         {
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:             "user": "openstack",
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:             "access_key": "YL6HOW72A8BEWVDEJXTZ",
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:             "secret_key": "vkuBP9GgzqyEZRsOwhQIiG3pEDozhlYYGVVsLjE5"
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:         }
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     ],
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     "swift_keys": [],
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     "caps": [],
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     "op_mask": "read, write, delete",
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     "default_placement": "",
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     "default_storage_class": "",
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     "placement_tags": [],
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     "bucket_quota": {
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:         "enabled": false,
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:         "check_on_raw": false,
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:         "max_size": -1,
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:         "max_size_kb": 0,
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:         "max_objects": -1
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     },
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     "user_quota": {
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:         "enabled": false,
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:         "check_on_raw": false,
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:         "max_size": -1,
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:         "max_size_kb": 0,
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:         "max_objects": -1
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     },
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     "temp_url_keys": [],
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     "type": "rgw",
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]:     "mfa_ids": []
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]: }
Oct 11 01:44:58 compute-0 jovial_ganguly[221785]: 
Oct 11 01:44:58 compute-0 systemd[1]: libpod-ede43a6ab86a907de5167b0377152a4132347f0d24ffdd20361c3f8711f01408.scope: Deactivated successfully.
Oct 11 01:44:58 compute-0 podman[221931]: 2025-10-11 01:44:58.296980746 +0000 UTC m=+0.043155033 container died ede43a6ab86a907de5167b0377152a4132347f0d24ffdd20361c3f8711f01408 (image=quay.io/ceph/ceph:v18, name=jovial_ganguly, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:44:58 compute-0 systemd[1]: var-lib-containers-storage-overlay-3011aca2e88720f618aa2aca22f801ccd3aa7ad8b28ef4506e993d32b97e4b25-merged.mount: Deactivated successfully.
Oct 11 01:44:58 compute-0 podman[221929]: 2025-10-11 01:44:58.348841063 +0000 UTC m=+0.104881886 container exec ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.build-date=20250507)
Oct 11 01:44:58 compute-0 podman[221931]: 2025-10-11 01:44:58.382462949 +0000 UTC m=+0.128637226 container remove ede43a6ab86a907de5167b0377152a4132347f0d24ffdd20361c3f8711f01408 (image=quay.io/ceph/ceph:v18, name=jovial_ganguly, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 01:44:58 compute-0 systemd[1]: libpod-conmon-ede43a6ab86a907de5167b0377152a4132347f0d24ffdd20361c3f8711f01408.scope: Deactivated successfully.
Oct 11 01:44:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v135: 197 pgs: 197 active+clean; 456 KiB data, 85 MiB used, 60 GiB / 60 GiB avail; 70 KiB/s rd, 3.4 KiB/s wr, 158 op/s
Oct 11 01:44:58 compute-0 sudo[221720]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:58 compute-0 podman[221929]: 2025-10-11 01:44:58.490546992 +0000 UTC m=+0.246587765 container exec_died ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:44:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e53 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:44:59 compute-0 sudo[221731]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:44:59 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:44:59 compute-0 podman[157119]: time="2025-10-11T01:44:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:44:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:44:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:44:59 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:44:59 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:44:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:44:59 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:44:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:44:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6780 "" "Go-http-client/1.1"
Oct 11 01:44:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:44:59 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:59 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 9e72e77d-a955-4d0f-9d34-06cc25a2f4b9 does not exist
Oct 11 01:44:59 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 3a904bcc-21a3-412a-b22f-b33b729ff572 does not exist
Oct 11 01:44:59 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev ef556730-2eda-4427-b6bb-88ea6308e694 does not exist
Oct 11 01:44:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 01:44:59 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:44:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 01:44:59 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:44:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:44:59 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:44:59 compute-0 sudo[222095]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:44:59 compute-0 sudo[222095]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:44:59 compute-0 sudo[222095]: pam_unix(sudo:session): session closed for user root
Oct 11 01:44:59 compute-0 ceph-mon[191930]: pgmap v135: 197 pgs: 197 active+clean; 456 KiB data, 85 MiB used, 60 GiB / 60 GiB avail; 70 KiB/s rd, 3.4 KiB/s wr, 158 op/s
Oct 11 01:44:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:44:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:44:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:44:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:44:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:44:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:45:00 compute-0 sudo[222120]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:45:00 compute-0 sudo[222120]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:45:00 compute-0 sudo[222120]: pam_unix(sudo:session): session closed for user root
Oct 11 01:45:00 compute-0 sudo[222145]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:45:00 compute-0 sudo[222145]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:45:00 compute-0 sudo[222145]: pam_unix(sudo:session): session closed for user root
Oct 11 01:45:00 compute-0 sudo[222170]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 01:45:00 compute-0 sudo[222170]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:45:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v136: 197 pgs: 197 active+clean; 456 KiB data, 85 MiB used, 60 GiB / 60 GiB avail; 61 KiB/s rd, 2.9 KiB/s wr, 136 op/s
Oct 11 01:45:00 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.16 scrub starts
Oct 11 01:45:00 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.16 scrub ok
Oct 11 01:45:00 compute-0 podman[222234]: 2025-10-11 01:45:00.91857688 +0000 UTC m=+0.088546073 container create 0075e3fe26deb48dfa6f8c7ee0749fec0361b48990ee58572f37df771e97ade9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_engelbart, org.label-schema.schema-version=1.0, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 01:45:00 compute-0 podman[222234]: 2025-10-11 01:45:00.879594396 +0000 UTC m=+0.049563639 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:45:00 compute-0 systemd[1]: Started libpod-conmon-0075e3fe26deb48dfa6f8c7ee0749fec0361b48990ee58572f37df771e97ade9.scope.
Oct 11 01:45:00 compute-0 ceph-mon[191930]: 7.16 scrub starts
Oct 11 01:45:00 compute-0 ceph-mon[191930]: 7.16 scrub ok
Oct 11 01:45:01 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:45:01 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.1b scrub starts
Oct 11 01:45:01 compute-0 podman[222234]: 2025-10-11 01:45:01.066421756 +0000 UTC m=+0.236390959 container init 0075e3fe26deb48dfa6f8c7ee0749fec0361b48990ee58572f37df771e97ade9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_engelbart, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:45:01 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.1b scrub ok
Oct 11 01:45:01 compute-0 podman[222234]: 2025-10-11 01:45:01.078204451 +0000 UTC m=+0.248173614 container start 0075e3fe26deb48dfa6f8c7ee0749fec0361b48990ee58572f37df771e97ade9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_engelbart, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef)
Oct 11 01:45:01 compute-0 podman[222234]: 2025-10-11 01:45:01.083529699 +0000 UTC m=+0.253498912 container attach 0075e3fe26deb48dfa6f8c7ee0749fec0361b48990ee58572f37df771e97ade9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_engelbart, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20250507, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:45:01 compute-0 trusting_engelbart[222252]: 167 167
Oct 11 01:45:01 compute-0 systemd[1]: libpod-0075e3fe26deb48dfa6f8c7ee0749fec0361b48990ee58572f37df771e97ade9.scope: Deactivated successfully.
Oct 11 01:45:01 compute-0 podman[222234]: 2025-10-11 01:45:01.089588046 +0000 UTC m=+0.259557229 container died 0075e3fe26deb48dfa6f8c7ee0749fec0361b48990ee58572f37df771e97ade9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_engelbart, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:45:01 compute-0 systemd[1]: var-lib-containers-storage-overlay-6a131b3295452156e050dd661d6c6332c146abde72f8c7b4335b1268c19b146d-merged.mount: Deactivated successfully.
Oct 11 01:45:01 compute-0 podman[222248]: 2025-10-11 01:45:01.124317761 +0000 UTC m=+0.121281729 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 01:45:01 compute-0 podman[222234]: 2025-10-11 01:45:01.142685094 +0000 UTC m=+0.312654267 container remove 0075e3fe26deb48dfa6f8c7ee0749fec0361b48990ee58572f37df771e97ade9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_engelbart, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, io.buildah.version=1.39.3)
Oct 11 01:45:01 compute-0 podman[222251]: 2025-10-11 01:45:01.145167502 +0000 UTC m=+0.143128899 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=openstack_network_exporter, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, managed_by=edpm_ansible, build-date=2025-08-20T13:12:41, version=9.6, io.openshift.tags=minimal rhel9, io.buildah.version=1.33.7, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, name=ubi9-minimal, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, com.redhat.component=ubi9-minimal-container, config_id=edpm, url=https://catalog.redhat.com/en/search?searchType=containers, distribution-scope=public, architecture=x86_64, maintainer=Red Hat, Inc., release=1755695350, vcs-type=git, vendor=Red Hat, Inc.)
Oct 11 01:45:01 compute-0 systemd[1]: libpod-conmon-0075e3fe26deb48dfa6f8c7ee0749fec0361b48990ee58572f37df771e97ade9.scope: Deactivated successfully.
Oct 11 01:45:01 compute-0 podman[222317]: 2025-10-11 01:45:01.387653368 +0000 UTC m=+0.071982820 container create 7f3d6b9e852de24650def035ac29d251fe31083bff8fe42a074023016d32898d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=youthful_banzai, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:45:01 compute-0 openstack_network_exporter[159265]: ERROR   01:45:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:45:01 compute-0 openstack_network_exporter[159265]: ERROR   01:45:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:45:01 compute-0 openstack_network_exporter[159265]: ERROR   01:45:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:45:01 compute-0 openstack_network_exporter[159265]: ERROR   01:45:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:45:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:45:01 compute-0 openstack_network_exporter[159265]: ERROR   01:45:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:45:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:45:01 compute-0 podman[222317]: 2025-10-11 01:45:01.354500402 +0000 UTC m=+0.038829894 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:45:01 compute-0 systemd[1]: Started libpod-conmon-7f3d6b9e852de24650def035ac29d251fe31083bff8fe42a074023016d32898d.scope.
Oct 11 01:45:01 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:45:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd7df91c9904674091936bf84f94a79bc4be712751e88e8d02d8131c9fa00da8/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:45:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd7df91c9904674091936bf84f94a79bc4be712751e88e8d02d8131c9fa00da8/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:45:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd7df91c9904674091936bf84f94a79bc4be712751e88e8d02d8131c9fa00da8/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:45:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd7df91c9904674091936bf84f94a79bc4be712751e88e8d02d8131c9fa00da8/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:45:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd7df91c9904674091936bf84f94a79bc4be712751e88e8d02d8131c9fa00da8/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:45:01 compute-0 podman[222317]: 2025-10-11 01:45:01.577818407 +0000 UTC m=+0.262147879 container init 7f3d6b9e852de24650def035ac29d251fe31083bff8fe42a074023016d32898d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=youthful_banzai, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507)
Oct 11 01:45:01 compute-0 podman[222317]: 2025-10-11 01:45:01.612934761 +0000 UTC m=+0.297264203 container start 7f3d6b9e852de24650def035ac29d251fe31083bff8fe42a074023016d32898d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=youthful_banzai, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:45:01 compute-0 podman[222317]: 2025-10-11 01:45:01.619500329 +0000 UTC m=+0.303829801 container attach 7f3d6b9e852de24650def035ac29d251fe31083bff8fe42a074023016d32898d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=youthful_banzai, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:45:01 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.18 deep-scrub starts
Oct 11 01:45:01 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.18 deep-scrub ok
Oct 11 01:45:02 compute-0 ceph-mon[191930]: pgmap v136: 197 pgs: 197 active+clean; 456 KiB data, 85 MiB used, 60 GiB / 60 GiB avail; 61 KiB/s rd, 2.9 KiB/s wr, 136 op/s
Oct 11 01:45:02 compute-0 ceph-mon[191930]: 5.1b scrub starts
Oct 11 01:45:02 compute-0 ceph-mon[191930]: 5.1b scrub ok
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v137: 197 pgs: 197 active+clean; 456 KiB data, 85 MiB used, 60 GiB / 60 GiB avail; 60 KiB/s rd, 3.0 KiB/s wr, 133 op/s
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 1)
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 1)
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 1)
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:45:02 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 1)
Oct 11 01:45:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": ".rgw.root", "var": "pg_num", "val": "32"} v 0) v1
Oct 11 01:45:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": ".rgw.root", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:45:02 compute-0 youthful_banzai[222333]: --> passed data devices: 0 physical, 3 LVM
Oct 11 01:45:02 compute-0 youthful_banzai[222333]: --> relative data size: 1.0
Oct 11 01:45:02 compute-0 youthful_banzai[222333]: --> All data devices are unavailable
Oct 11 01:45:02 compute-0 systemd[1]: libpod-7f3d6b9e852de24650def035ac29d251fe31083bff8fe42a074023016d32898d.scope: Deactivated successfully.
Oct 11 01:45:02 compute-0 systemd[1]: libpod-7f3d6b9e852de24650def035ac29d251fe31083bff8fe42a074023016d32898d.scope: Consumed 1.240s CPU time.
Oct 11 01:45:02 compute-0 podman[222317]: 2025-10-11 01:45:02.909097924 +0000 UTC m=+1.593427406 container died 7f3d6b9e852de24650def035ac29d251fe31083bff8fe42a074023016d32898d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=youthful_banzai, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, ceph=True, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, OSD_FLAVOR=default)
Oct 11 01:45:02 compute-0 systemd[1]: var-lib-containers-storage-overlay-fd7df91c9904674091936bf84f94a79bc4be712751e88e8d02d8131c9fa00da8-merged.mount: Deactivated successfully.
Oct 11 01:45:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e53 do_prune osdmap full prune enabled
Oct 11 01:45:03 compute-0 ceph-mon[191930]: 6.18 deep-scrub starts
Oct 11 01:45:03 compute-0 ceph-mon[191930]: 6.18 deep-scrub ok
Oct 11 01:45:03 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": ".rgw.root", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:45:03 compute-0 podman[222317]: 2025-10-11 01:45:03.038384178 +0000 UTC m=+1.722713660 container remove 7f3d6b9e852de24650def035ac29d251fe31083bff8fe42a074023016d32898d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=youthful_banzai, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:45:03 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": ".rgw.root", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:45:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e54 e54: 3 total, 3 up, 3 in
Oct 11 01:45:03 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e54: 3 total, 3 up, 3 in
Oct 11 01:45:03 compute-0 ceph-mgr[192233]: [progress INFO root] update: starting ev ca48587c-bc0a-499d-87f7-9569fe6d4144 (PG autoscaler increasing pool 8 PGs from 1 to 32)
Oct 11 01:45:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pg_num", "val": "32"} v 0) v1
Oct 11 01:45:03 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:45:03 compute-0 systemd[1]: libpod-conmon-7f3d6b9e852de24650def035ac29d251fe31083bff8fe42a074023016d32898d.scope: Deactivated successfully.
Oct 11 01:45:03 compute-0 sudo[222170]: pam_unix(sudo:session): session closed for user root
Oct 11 01:45:03 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.1c scrub starts
Oct 11 01:45:03 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.1c scrub ok
Oct 11 01:45:03 compute-0 sudo[222372]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:45:03 compute-0 sudo[222372]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:45:03 compute-0 sudo[222372]: pam_unix(sudo:session): session closed for user root
Oct 11 01:45:03 compute-0 sudo[222397]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:45:03 compute-0 sudo[222397]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:45:03 compute-0 sudo[222397]: pam_unix(sudo:session): session closed for user root
Oct 11 01:45:03 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.17 deep-scrub starts
Oct 11 01:45:03 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.17 deep-scrub ok
Oct 11 01:45:03 compute-0 sudo[222422]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:45:03 compute-0 sudo[222422]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:45:03 compute-0 sudo[222422]: pam_unix(sudo:session): session closed for user root
Oct 11 01:45:03 compute-0 sudo[222447]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 01:45:03 compute-0 sudo[222447]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:45:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e54 do_prune osdmap full prune enabled
Oct 11 01:45:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:45:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e55 e55: 3 total, 3 up, 3 in
Oct 11 01:45:04 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e55: 3 total, 3 up, 3 in
Oct 11 01:45:04 compute-0 ceph-mgr[192233]: [progress INFO root] update: starting ev b006774a-b151-4b18-a6a3-d3e49f17dd73 (PG autoscaler increasing pool 9 PGs from 1 to 32)
Oct 11 01:45:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.control", "var": "pg_num", "val": "32"} v 0) v1
Oct 11 01:45:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.control", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:45:04 compute-0 ceph-mon[191930]: pgmap v137: 197 pgs: 197 active+clean; 456 KiB data, 85 MiB used, 60 GiB / 60 GiB avail; 60 KiB/s rd, 3.0 KiB/s wr, 133 op/s
Oct 11 01:45:04 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": ".rgw.root", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:45:04 compute-0 ceph-mon[191930]: osdmap e54: 3 total, 3 up, 3 in
Oct 11 01:45:04 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:45:04 compute-0 ceph-mon[191930]: 7.17 deep-scrub starts
Oct 11 01:45:04 compute-0 ceph-mon[191930]: 7.17 deep-scrub ok
Oct 11 01:45:04 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.1f scrub starts
Oct 11 01:45:04 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 5.1f scrub ok
Oct 11 01:45:04 compute-0 podman[222510]: 2025-10-11 01:45:04.347890575 +0000 UTC m=+0.099518038 container create eb47eed159a9e9d896ca6058952d8526d9f2aae7b52cafca9173db0a8e04a061 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_bose, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:45:04 compute-0 podman[222510]: 2025-10-11 01:45:04.310365022 +0000 UTC m=+0.061992565 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:45:04 compute-0 systemd[1]: Started libpod-conmon-eb47eed159a9e9d896ca6058952d8526d9f2aae7b52cafca9173db0a8e04a061.scope.
Oct 11 01:45:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v140: 197 pgs: 197 active+clean; 456 KiB data, 85 MiB used, 60 GiB / 60 GiB avail; 1.7 KiB/s rd, 255 B/s wr, 2 op/s
Oct 11 01:45:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": ".rgw.root", "var": "pg_num_actual", "val": "32"} v 0) v1
Oct 11 01:45:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": ".rgw.root", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:45:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pg_num_actual", "val": "32"} v 0) v1
Oct 11 01:45:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:45:04 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:45:04 compute-0 podman[222510]: 2025-10-11 01:45:04.493039733 +0000 UTC m=+0.244667276 container init eb47eed159a9e9d896ca6058952d8526d9f2aae7b52cafca9173db0a8e04a061 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_bose, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:45:04 compute-0 podman[222510]: 2025-10-11 01:45:04.513539754 +0000 UTC m=+0.265167247 container start eb47eed159a9e9d896ca6058952d8526d9f2aae7b52cafca9173db0a8e04a061 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_bose, org.label-schema.schema-version=1.0, ceph=True, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2)
Oct 11 01:45:04 compute-0 podman[222510]: 2025-10-11 01:45:04.520658741 +0000 UTC m=+0.272286294 container attach eb47eed159a9e9d896ca6058952d8526d9f2aae7b52cafca9173db0a8e04a061 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_bose, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507)
Oct 11 01:45:04 compute-0 objective_bose[222526]: 167 167
Oct 11 01:45:04 compute-0 systemd[1]: libpod-eb47eed159a9e9d896ca6058952d8526d9f2aae7b52cafca9173db0a8e04a061.scope: Deactivated successfully.
Oct 11 01:45:04 compute-0 podman[222510]: 2025-10-11 01:45:04.527706188 +0000 UTC m=+0.279333681 container died eb47eed159a9e9d896ca6058952d8526d9f2aae7b52cafca9173db0a8e04a061 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_bose, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, ceph=True, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:45:04 compute-0 systemd[1]: var-lib-containers-storage-overlay-94e82e11bd7bfb9bc241402548ae486f68f36e81a4df0c607f94c16fbef673f1-merged.mount: Deactivated successfully.
Oct 11 01:45:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e55 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:45:04 compute-0 podman[222510]: 2025-10-11 01:45:04.601450997 +0000 UTC m=+0.353078450 container remove eb47eed159a9e9d896ca6058952d8526d9f2aae7b52cafca9173db0a8e04a061 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_bose, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0)
Oct 11 01:45:04 compute-0 systemd[1]: libpod-conmon-eb47eed159a9e9d896ca6058952d8526d9f2aae7b52cafca9173db0a8e04a061.scope: Deactivated successfully.
Oct 11 01:45:04 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.19 scrub starts
Oct 11 01:45:04 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.19 scrub ok
Oct 11 01:45:04 compute-0 podman[222549]: 2025-10-11 01:45:04.856066757 +0000 UTC m=+0.074906438 container create 7ccc84d008ed4dcee01f3b2e495233b571aea7f8b93ccdce5bd54d07be695610 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_germain, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2)
Oct 11 01:45:04 compute-0 podman[222549]: 2025-10-11 01:45:04.820341483 +0000 UTC m=+0.039181234 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:45:04 compute-0 systemd[1]: Started libpod-conmon-7ccc84d008ed4dcee01f3b2e495233b571aea7f8b93ccdce5bd54d07be695610.scope.
Oct 11 01:45:04 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:45:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/300f84071983b13e92aedfdfe0d4a78465100a8675564db36d22b172ab18bd99/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:45:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/300f84071983b13e92aedfdfe0d4a78465100a8675564db36d22b172ab18bd99/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:45:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/300f84071983b13e92aedfdfe0d4a78465100a8675564db36d22b172ab18bd99/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:45:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/300f84071983b13e92aedfdfe0d4a78465100a8675564db36d22b172ab18bd99/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:45:05 compute-0 podman[222549]: 2025-10-11 01:45:05.042144847 +0000 UTC m=+0.260984578 container init 7ccc84d008ed4dcee01f3b2e495233b571aea7f8b93ccdce5bd54d07be695610 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_germain, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:45:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e55 do_prune osdmap full prune enabled
Oct 11 01:45:05 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.control", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:45:05 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": ".rgw.root", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:45:05 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:45:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e56 e56: 3 total, 3 up, 3 in
Oct 11 01:45:05 compute-0 podman[222549]: 2025-10-11 01:45:05.070375055 +0000 UTC m=+0.289214716 container start 7ccc84d008ed4dcee01f3b2e495233b571aea7f8b93ccdce5bd54d07be695610 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_germain, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 01:45:05 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e56: 3 total, 3 up, 3 in
Oct 11 01:45:05 compute-0 ceph-mgr[192233]: [progress INFO root] update: starting ev aba7da24-2f59-4cda-a416-d5df8373cafa (PG autoscaler increasing pool 10 PGs from 1 to 32)
Oct 11 01:45:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_num", "val": "32"} v 0) v1
Oct 11 01:45:05 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:45:05 compute-0 podman[222549]: 2025-10-11 01:45:05.08177391 +0000 UTC m=+0.300613591 container attach 7ccc84d008ed4dcee01f3b2e495233b571aea7f8b93ccdce5bd54d07be695610 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_germain, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0)
Oct 11 01:45:05 compute-0 ceph-mon[191930]: 5.1c scrub starts
Oct 11 01:45:05 compute-0 ceph-mon[191930]: 5.1c scrub ok
Oct 11 01:45:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:45:05 compute-0 ceph-mon[191930]: osdmap e55: 3 total, 3 up, 3 in
Oct 11 01:45:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.control", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:45:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": ".rgw.root", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:45:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:45:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.control", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:45:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": ".rgw.root", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:45:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:45:05 compute-0 ceph-mon[191930]: osdmap e56: 3 total, 3 up, 3 in
Oct 11 01:45:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 56 pg[9.0( v 53'585 (0'0,53'585] local-lis/les=47/48 n=209 ec=47/47 lis/c=47/47 les/c/f=48/48/0 sis=56 pruub=10.532778740s) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 53'584 mlcod 53'584 active pruub 116.890007019s@ mbc={}] start_peering_interval up [1] -> [1], acting [1] -> [1], acting_primary 1 -> 1, up_primary 1 -> 1, role 0 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 56 pg[8.0( v 46'4 (0'0,46'4] local-lis/les=45/46 n=4 ec=45/45 lis/c=45/45 les/c/f=46/46/0 sis=56 pruub=8.510573387s) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 46'3 mlcod 46'3 active pruub 114.868682861s@ mbc={}] start_peering_interval up [1] -> [1], acting [1] -> [1], acting_primary 1 -> 1, up_primary 1 -> 1, role 0 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 56 pg[8.0( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=45/45 lis/c=45/45 les/c/f=46/46/0 sis=56 pruub=8.510573387s) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 46'3 mlcod 0'0 unknown pruub 114.868682861s@ mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:05 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 56 pg[9.0( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=6 ec=47/47 lis/c=47/47 les/c/f=48/48/0 sis=56 pruub=10.532778740s) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 53'584 mlcod 0'0 unknown pruub 116.890007019s@ mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:05 compute-0 laughing_germain[222566]: {
Oct 11 01:45:05 compute-0 laughing_germain[222566]:     "0": [
Oct 11 01:45:05 compute-0 laughing_germain[222566]:         {
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "devices": [
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "/dev/loop3"
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             ],
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "lv_name": "ceph_lv0",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "lv_size": "21470642176",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "name": "ceph_lv0",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "tags": {
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.cluster_name": "ceph",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.crush_device_class": "",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.encrypted": "0",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.osd_id": "0",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.type": "block",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.vdo": "0"
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             },
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "type": "block",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "vg_name": "ceph_vg0"
Oct 11 01:45:05 compute-0 laughing_germain[222566]:         }
Oct 11 01:45:05 compute-0 laughing_germain[222566]:     ],
Oct 11 01:45:05 compute-0 laughing_germain[222566]:     "1": [
Oct 11 01:45:05 compute-0 laughing_germain[222566]:         {
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "devices": [
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "/dev/loop4"
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             ],
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "lv_name": "ceph_lv1",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "lv_size": "21470642176",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "name": "ceph_lv1",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "tags": {
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.cluster_name": "ceph",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.crush_device_class": "",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.encrypted": "0",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.osd_id": "1",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.type": "block",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.vdo": "0"
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             },
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "type": "block",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "vg_name": "ceph_vg1"
Oct 11 01:45:05 compute-0 laughing_germain[222566]:         }
Oct 11 01:45:05 compute-0 laughing_germain[222566]:     ],
Oct 11 01:45:05 compute-0 laughing_germain[222566]:     "2": [
Oct 11 01:45:05 compute-0 laughing_germain[222566]:         {
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "devices": [
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "/dev/loop5"
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             ],
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "lv_name": "ceph_lv2",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "lv_size": "21470642176",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "name": "ceph_lv2",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "tags": {
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.cluster_name": "ceph",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.crush_device_class": "",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.encrypted": "0",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.osd_id": "2",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.type": "block",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:                 "ceph.vdo": "0"
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             },
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "type": "block",
Oct 11 01:45:05 compute-0 laughing_germain[222566]:             "vg_name": "ceph_vg2"
Oct 11 01:45:05 compute-0 laughing_germain[222566]:         }
Oct 11 01:45:05 compute-0 laughing_germain[222566]:     ]
Oct 11 01:45:05 compute-0 laughing_germain[222566]: }
Oct 11 01:45:05 compute-0 systemd[1]: libpod-7ccc84d008ed4dcee01f3b2e495233b571aea7f8b93ccdce5bd54d07be695610.scope: Deactivated successfully.
Oct 11 01:45:05 compute-0 podman[222549]: 2025-10-11 01:45:05.910655794 +0000 UTC m=+1.129495445 container died 7ccc84d008ed4dcee01f3b2e495233b571aea7f8b93ccdce5bd54d07be695610 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_germain, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:45:05 compute-0 systemd[1]: var-lib-containers-storage-overlay-300f84071983b13e92aedfdfe0d4a78465100a8675564db36d22b172ab18bd99-merged.mount: Deactivated successfully.
Oct 11 01:45:06 compute-0 podman[222549]: 2025-10-11 01:45:06.004226324 +0000 UTC m=+1.223065985 container remove 7ccc84d008ed4dcee01f3b2e495233b571aea7f8b93ccdce5bd54d07be695610 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_germain, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS)
Oct 11 01:45:06 compute-0 systemd[1]: libpod-conmon-7ccc84d008ed4dcee01f3b2e495233b571aea7f8b93ccdce5bd54d07be695610.scope: Deactivated successfully.
Oct 11 01:45:06 compute-0 sudo[222447]: pam_unix(sudo:session): session closed for user root
Oct 11 01:45:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e56 do_prune osdmap full prune enabled
Oct 11 01:45:06 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:45:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e57 e57: 3 total, 3 up, 3 in
Oct 11 01:45:06 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e57: 3 total, 3 up, 3 in
Oct 11 01:45:06 compute-0 ceph-mgr[192233]: [progress INFO root] update: starting ev fe8f9bb4-e406-4b34-a1f0-d93b8b67f674 (PG autoscaler increasing pool 11 PGs from 1 to 32)
Oct 11 01:45:06 compute-0 ceph-mgr[192233]: [progress INFO root] complete: finished ev ca48587c-bc0a-499d-87f7-9569fe6d4144 (PG autoscaler increasing pool 8 PGs from 1 to 32)
Oct 11 01:45:06 compute-0 ceph-mgr[192233]: [progress INFO root] Completed event ca48587c-bc0a-499d-87f7-9569fe6d4144 (PG autoscaler increasing pool 8 PGs from 1 to 32) in 3 seconds
Oct 11 01:45:06 compute-0 ceph-mgr[192233]: [progress INFO root] complete: finished ev b006774a-b151-4b18-a6a3-d3e49f17dd73 (PG autoscaler increasing pool 9 PGs from 1 to 32)
Oct 11 01:45:06 compute-0 ceph-mgr[192233]: [progress INFO root] Completed event b006774a-b151-4b18-a6a3-d3e49f17dd73 (PG autoscaler increasing pool 9 PGs from 1 to 32) in 2 seconds
Oct 11 01:45:06 compute-0 ceph-mgr[192233]: [progress INFO root] complete: finished ev aba7da24-2f59-4cda-a416-d5df8373cafa (PG autoscaler increasing pool 10 PGs from 1 to 32)
Oct 11 01:45:06 compute-0 ceph-mgr[192233]: [progress INFO root] Completed event aba7da24-2f59-4cda-a416-d5df8373cafa (PG autoscaler increasing pool 10 PGs from 1 to 32) in 1 seconds
Oct 11 01:45:06 compute-0 ceph-mgr[192233]: [progress INFO root] complete: finished ev fe8f9bb4-e406-4b34-a1f0-d93b8b67f674 (PG autoscaler increasing pool 11 PGs from 1 to 32)
Oct 11 01:45:06 compute-0 ceph-mgr[192233]: [progress INFO root] Completed event fe8f9bb4-e406-4b34-a1f0-d93b8b67f674 (PG autoscaler increasing pool 11 PGs from 1 to 32) in 0 seconds
Oct 11 01:45:06 compute-0 ceph-mon[191930]: 5.1f scrub starts
Oct 11 01:45:06 compute-0 ceph-mon[191930]: 5.1f scrub ok
Oct 11 01:45:06 compute-0 ceph-mon[191930]: pgmap v140: 197 pgs: 197 active+clean; 456 KiB data, 85 MiB used, 60 GiB / 60 GiB avail; 1.7 KiB/s rd, 255 B/s wr, 2 op/s
Oct 11 01:45:06 compute-0 ceph-mon[191930]: 6.19 scrub starts
Oct 11 01:45:06 compute-0 ceph-mon[191930]: 6.19 scrub ok
Oct 11 01:45:06 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_num", "val": "32"}]: dispatch
Oct 11 01:45:06 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_num", "val": "32"}]': finished
Oct 11 01:45:06 compute-0 ceph-mon[191930]: osdmap e57: 3 total, 3 up, 3 in
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.15( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.14( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.15( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.17( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.14( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.16( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.17( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.16( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.11( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.1( v 46'4 (0'0,46'4] local-lis/les=45/46 n=1 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.3( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.2( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=1 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.10( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.3( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=1 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.2( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.c( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.d( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.d( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.c( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.e( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.f( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.8( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.9( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.a( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.b( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.f( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.e( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.a( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.9( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.8( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.b( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.1( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.6( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.6( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.7( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.7( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.5( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.4( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.4( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=1 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.5( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.1a( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.1b( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.18( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.19( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.19( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.18( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.1f( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.1f( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.1c( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.1e( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.1e( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.1d( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.1d( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.1c( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.12( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.13( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.13( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.11( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.12( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.1b( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.10( v 53'585 lc 0'0 (0'0,53'585] local-lis/les=47/48 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.1a( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=45/46 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.14( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.15( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.16( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.14( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.11( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.0( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=47/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 53'584 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.1( v 46'4 (0'0,46'4] local-lis/les=56/57 n=1 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.3( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.10( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.3( v 46'4 (0'0,46'4] local-lis/les=56/57 n=1 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.2( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.2( v 46'4 (0'0,46'4] local-lis/les=56/57 n=1 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.d( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.c( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.d( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.c( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.17( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.e( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.8( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.9( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.a( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.b( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.e( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.f( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.9( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.a( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.8( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.0( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=45/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 46'3 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.b( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.1( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.6( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.5( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.4( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.4( v 46'4 (0'0,46'4] local-lis/les=56/57 n=1 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.5( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.6( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.1a( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.1b( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.18( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.19( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.18( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.1f( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.1e( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.1d( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.1d( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.1c( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.7( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.13( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.12( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.1b( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.11( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.1a( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[9.10( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=47/47 les/c/f=48/48/0 sis=56) [1] r=0 lpr=56 pi=[47,56)/1 crt=53'585 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 57 pg[8.12( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=45/45 les/c/f=46/46/0 sis=56) [1] r=0 lpr=56 pi=[45,56)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:06 compute-0 sudo[222586]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:45:06 compute-0 sudo[222586]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:45:06 compute-0 sudo[222586]: pam_unix(sudo:session): session closed for user root
Oct 11 01:45:06 compute-0 sudo[222611]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:45:06 compute-0 sudo[222611]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:45:06 compute-0 sudo[222611]: pam_unix(sudo:session): session closed for user root
Oct 11 01:45:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v143: 259 pgs: 2 peering, 62 unknown, 195 active+clean; 456 KiB data, 85 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.control", "var": "pg_num_actual", "val": "32"} v 0) v1
Oct 11 01:45:06 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.control", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:45:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_num_actual", "val": "32"} v 0) v1
Oct 11 01:45:06 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:45:06 compute-0 sudo[222636]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:45:06 compute-0 sudo[222636]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:45:06 compute-0 sudo[222636]: pam_unix(sudo:session): session closed for user root
Oct 11 01:45:06 compute-0 ceph-mgr[192233]: [progress INFO root] Writing back 17 completed events
Oct 11 01:45:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/progress/completed}] v 0) v1
Oct 11 01:45:06 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:45:06 compute-0 sudo[222665]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 01:45:06 compute-0 sudo[222665]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:45:06 compute-0 podman[222660]: 2025-10-11 01:45:06.674876235 +0000 UTC m=+0.146747878 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, config_id=edpm, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']})
Oct 11 01:45:06 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.1a scrub starts
Oct 11 01:45:06 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.1a scrub ok
Oct 11 01:45:07 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e57 do_prune osdmap full prune enabled
Oct 11 01:45:07 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.control", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:45:07 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_num_actual", "val": "32"}]: dispatch
Oct 11 01:45:07 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:45:07 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.control", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:45:07 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:45:07 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e58 e58: 3 total, 3 up, 3 in
Oct 11 01:45:07 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e58: 3 total, 3 up, 3 in
Oct 11 01:45:07 compute-0 podman[222744]: 2025-10-11 01:45:07.281496995 +0000 UTC m=+0.088791133 container create 773a2f7d4f4f904116032634bf42811dd16de5dea832146dfd354beeb67708aa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_einstein, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:45:07 compute-0 podman[222744]: 2025-10-11 01:45:07.239493563 +0000 UTC m=+0.046787751 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:45:07 compute-0 systemd[1]: Started libpod-conmon-773a2f7d4f4f904116032634bf42811dd16de5dea832146dfd354beeb67708aa.scope.
Oct 11 01:45:07 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:45:07 compute-0 podman[222744]: 2025-10-11 01:45:07.429677372 +0000 UTC m=+0.236971590 container init 773a2f7d4f4f904116032634bf42811dd16de5dea832146dfd354beeb67708aa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_einstein, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:45:07 compute-0 podman[222744]: 2025-10-11 01:45:07.446203184 +0000 UTC m=+0.253497312 container start 773a2f7d4f4f904116032634bf42811dd16de5dea832146dfd354beeb67708aa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_einstein, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, ceph=True)
Oct 11 01:45:07 compute-0 podman[222744]: 2025-10-11 01:45:07.454144601 +0000 UTC m=+0.261438749 container attach 773a2f7d4f4f904116032634bf42811dd16de5dea832146dfd354beeb67708aa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_einstein, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 01:45:07 compute-0 happy_einstein[222759]: 167 167
Oct 11 01:45:07 compute-0 systemd[1]: libpod-773a2f7d4f4f904116032634bf42811dd16de5dea832146dfd354beeb67708aa.scope: Deactivated successfully.
Oct 11 01:45:07 compute-0 podman[222744]: 2025-10-11 01:45:07.458727679 +0000 UTC m=+0.266021847 container died 773a2f7d4f4f904116032634bf42811dd16de5dea832146dfd354beeb67708aa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_einstein, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 01:45:07 compute-0 systemd[1]: var-lib-containers-storage-overlay-56e29738e72044e50338585df3c87f02f0d01a03dd56867877cce9b02c983892-merged.mount: Deactivated successfully.
Oct 11 01:45:07 compute-0 podman[222744]: 2025-10-11 01:45:07.542965643 +0000 UTC m=+0.350259771 container remove 773a2f7d4f4f904116032634bf42811dd16de5dea832146dfd354beeb67708aa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_einstein, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:45:07 compute-0 systemd[1]: libpod-conmon-773a2f7d4f4f904116032634bf42811dd16de5dea832146dfd354beeb67708aa.scope: Deactivated successfully.
Oct 11 01:45:07 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.1b scrub starts
Oct 11 01:45:07 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 6.1b scrub ok
Oct 11 01:45:07 compute-0 podman[222783]: 2025-10-11 01:45:07.858691727 +0000 UTC m=+0.101812767 container create 7b0bead4f0227441a1fff72c504096ed458e11288140633418fbba4b5826ddbf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elated_moore, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:45:07 compute-0 podman[222783]: 2025-10-11 01:45:07.812634787 +0000 UTC m=+0.055755827 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.935 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.936 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.936 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.937 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f8ed27f97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.937 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb8c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.937 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.938 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.938 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb1a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.938 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb200>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.938 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.938 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed2874260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.938 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.939 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.939 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed3ab42f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.939 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.939 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb350>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb90>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fa390>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb3b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbbf0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 systemd[1]: Started libpod-conmon-7b0bead4f0227441a1fff72c504096ed458e11288140633418fbba4b5826ddbf.scope.
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbc80>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27f9610>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb620>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbe30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbec0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbf50>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.943 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.944 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f8ed27fbad0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.944 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.944 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f8ed27faff0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.945 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.945 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f8ed27fb110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.945 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.945 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f8ed27fb170>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.946 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.946 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f8ed27fb1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.946 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.946 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f8ed27fb230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.947 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.947 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f8ed2874230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.947 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.947 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f8ed27fb290>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.948 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f8ed5778d70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.948 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f8ed27fb650>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f8ed27fbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f8ed27fb320>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f8ed27fbb60>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f8ed27fa3f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f8ed27fb380>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f8ed27fbbc0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.952 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f8ed27fbc50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.952 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f8ed27fbce0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.953 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f8ed27fbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.953 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.953 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f8ed27fb590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.953 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.954 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f8ed27f95e0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.954 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f8ed27fb5f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.955 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f8ed27fbe00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.955 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.955 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f8ed27fbe90>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.955 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.956 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f8ed27fbf20>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.956 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.957 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.957 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.957 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.957 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.957 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.957 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.957 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.957 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.958 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.958 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.958 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.958 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.958 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.958 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.958 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.958 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.958 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.958 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.958 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.958 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.958 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.959 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.959 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.959 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.959 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:45:07.959 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:45:07 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:45:07 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7c7ab65faee873b3283082b32f6e961214923f6377d575a3be96d4fac55a8df0/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:45:08 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7c7ab65faee873b3283082b32f6e961214923f6377d575a3be96d4fac55a8df0/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:45:08 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7c7ab65faee873b3283082b32f6e961214923f6377d575a3be96d4fac55a8df0/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:45:08 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7c7ab65faee873b3283082b32f6e961214923f6377d575a3be96d4fac55a8df0/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:45:08 compute-0 podman[222783]: 2025-10-11 01:45:08.033754072 +0000 UTC m=+0.276875112 container init 7b0bead4f0227441a1fff72c504096ed458e11288140633418fbba4b5826ddbf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elated_moore, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:45:08 compute-0 podman[222783]: 2025-10-11 01:45:08.060437021 +0000 UTC m=+0.303558041 container start 7b0bead4f0227441a1fff72c504096ed458e11288140633418fbba4b5826ddbf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elated_moore, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0)
Oct 11 01:45:08 compute-0 podman[222783]: 2025-10-11 01:45:08.066825688 +0000 UTC m=+0.309946728 container attach 7b0bead4f0227441a1fff72c504096ed458e11288140633418fbba4b5826ddbf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elated_moore, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:45:08 compute-0 ceph-mon[191930]: pgmap v143: 259 pgs: 2 peering, 62 unknown, 195 active+clean; 456 KiB data, 85 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:08 compute-0 ceph-mon[191930]: 6.1a scrub starts
Oct 11 01:45:08 compute-0 ceph-mon[191930]: 6.1a scrub ok
Oct 11 01:45:08 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.control", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:45:08 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_num_actual", "val": "32"}]': finished
Oct 11 01:45:08 compute-0 ceph-mon[191930]: osdmap e58: 3 total, 3 up, 3 in
Oct 11 01:45:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v145: 321 pgs: 2 peering, 124 unknown, 195 active+clean; 456 KiB data, 85 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:08 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 58 pg[10.0( v 53'64 (0'0,53'64] local-lis/les=49/50 n=8 ec=49/49 lis/c=49/49 les/c/f=50/50/0 sis=58 pruub=9.159426689s) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 53'63 mlcod 53'63 active pruub 111.755493164s@ mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [2], acting_primary 2 -> 2, up_primary 2 -> 2, role 0 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:08 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 58 pg[11.0( v 53'2 (0'0,53'2] local-lis/les=51/52 n=2 ec=51/51 lis/c=51/51 les/c/f=52/52/0 sis=58 pruub=11.196599007s) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 53'1 mlcod 53'1 active pruub 121.011955261s@ mbc={}] start_peering_interval up [1] -> [1], acting [1] -> [1], acting_primary 1 -> 1, up_primary 1 -> 1, role 0 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:08 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 58 pg[11.0( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=51/51 lis/c=51/51 les/c/f=52/52/0 sis=58 pruub=11.196599007s) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 53'1 mlcod 0'0 unknown pruub 121.011955261s@ mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:08 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 58 pg[10.0( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=49/49 lis/c=49/49 les/c/f=50/50/0 sis=58 pruub=9.159426689s) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 53'63 mlcod 0'0 unknown pruub 111.755493164s@ mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 elated_moore[222800]: {
Oct 11 01:45:09 compute-0 elated_moore[222800]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 01:45:09 compute-0 elated_moore[222800]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:45:09 compute-0 elated_moore[222800]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 01:45:09 compute-0 elated_moore[222800]:         "osd_id": 1,
Oct 11 01:45:09 compute-0 elated_moore[222800]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:45:09 compute-0 elated_moore[222800]:         "type": "bluestore"
Oct 11 01:45:09 compute-0 elated_moore[222800]:     },
Oct 11 01:45:09 compute-0 elated_moore[222800]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 01:45:09 compute-0 elated_moore[222800]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:45:09 compute-0 elated_moore[222800]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 01:45:09 compute-0 elated_moore[222800]:         "osd_id": 2,
Oct 11 01:45:09 compute-0 elated_moore[222800]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:45:09 compute-0 elated_moore[222800]:         "type": "bluestore"
Oct 11 01:45:09 compute-0 elated_moore[222800]:     },
Oct 11 01:45:09 compute-0 elated_moore[222800]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 01:45:09 compute-0 elated_moore[222800]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:45:09 compute-0 elated_moore[222800]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 01:45:09 compute-0 elated_moore[222800]:         "osd_id": 0,
Oct 11 01:45:09 compute-0 elated_moore[222800]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:45:09 compute-0 elated_moore[222800]:         "type": "bluestore"
Oct 11 01:45:09 compute-0 elated_moore[222800]:     }
Oct 11 01:45:09 compute-0 elated_moore[222800]: }
Oct 11 01:45:09 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 4.1b deep-scrub starts
Oct 11 01:45:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e58 do_prune osdmap full prune enabled
Oct 11 01:45:09 compute-0 ceph-mon[191930]: 6.1b scrub starts
Oct 11 01:45:09 compute-0 ceph-mon[191930]: 6.1b scrub ok
Oct 11 01:45:09 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 4.1b deep-scrub ok
Oct 11 01:45:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e59 e59: 3 total, 3 up, 3 in
Oct 11 01:45:09 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e59: 3 total, 3 up, 3 in
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.1e( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.1b( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.d( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.b( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.a( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.11( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.12( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.10( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.1f( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.1d( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.1c( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.13( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.1a( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.19( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.18( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.7( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=1 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.6( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=1 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.5( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=1 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.4( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=1 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.8( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=1 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.16( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.17( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.15( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.14( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.13( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.1( v 53'2 (0'0,53'2] local-lis/les=51/52 n=1 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.f( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.e( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.d( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.b( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.9( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.c( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.2( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=1 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.8( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.a( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.3( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.4( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.5( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.f( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.9( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.c( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.e( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.1( v 53'64 (0'0,53'64] local-lis/les=49/50 n=1 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.2( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=1 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.7( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.3( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=1 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.14( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.18( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.6( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.16( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.17( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.1a( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.1b( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.1c( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.1d( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.1f( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.10( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.11( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.12( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.19( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.1e( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=51/52 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.d( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.15( v 53'64 lc 0'0 (0'0,53'64] local-lis/les=49/50 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.17( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.15( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.14( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.1( v 53'2 (0'0,53'2] local-lis/les=58/59 n=1 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.0( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=51/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 53'1 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.f( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.e( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.d( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.b( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.c( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.2( v 53'2 (0'0,53'2] local-lis/les=58/59 n=1 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.16( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.8( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.9( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.13( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 systemd[1]: libpod-7b0bead4f0227441a1fff72c504096ed458e11288140633418fbba4b5826ddbf.scope: Deactivated successfully.
Oct 11 01:45:09 compute-0 systemd[1]: libpod-7b0bead4f0227441a1fff72c504096ed458e11288140633418fbba4b5826ddbf.scope: Consumed 1.141s CPU time.
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.1b( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.a( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.12( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.11( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.10( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.1f( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.1d( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.1c( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.1a( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.13( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.19( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.7( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.4( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.6( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.f( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.8( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.9( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.c( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.0( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=49/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 53'63 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.5( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 podman[222783]: 2025-10-11 01:45:09.200587512 +0000 UTC m=+1.443708532 container died 7b0bead4f0227441a1fff72c504096ed458e11288140633418fbba4b5826ddbf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elated_moore, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef)
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.1e( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.b( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.e( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.1( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.14( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.2( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.16( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.17( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.18( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.3( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.a( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.4( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.5( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.3( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.7( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.18( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.1a( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 59 pg[10.15( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=49/49 les/c/f=50/50/0 sis=58) [2] r=0 lpr=58 pi=[49,58)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.1b( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.1c( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.1d( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.6( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.1f( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.10( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.11( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.12( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.19( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 59 pg[11.1e( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=51/51 les/c/f=52/52/0 sis=58) [1] r=0 lpr=58 pi=[51,58)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:09 compute-0 systemd[1]: var-lib-containers-storage-overlay-7c7ab65faee873b3283082b32f6e961214923f6377d575a3be96d4fac55a8df0-merged.mount: Deactivated successfully.
Oct 11 01:45:09 compute-0 podman[222783]: 2025-10-11 01:45:09.305010257 +0000 UTC m=+1.548131257 container remove 7b0bead4f0227441a1fff72c504096ed458e11288140633418fbba4b5826ddbf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elated_moore, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS)
Oct 11 01:45:09 compute-0 systemd[1]: libpod-conmon-7b0bead4f0227441a1fff72c504096ed458e11288140633418fbba4b5826ddbf.scope: Deactivated successfully.
Oct 11 01:45:09 compute-0 sudo[222665]: pam_unix(sudo:session): session closed for user root
Oct 11 01:45:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:45:09 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:45:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:45:09 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:45:09 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 6b9af2bb-0b91-4cc6-8eec-68dd50bac755 does not exist
Oct 11 01:45:09 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 14322bb6-b3a5-49c8-9d37-73351a31bcf7 does not exist
Oct 11 01:45:09 compute-0 sudo[222846]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:45:09 compute-0 sudo[222846]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:45:09 compute-0 sudo[222846]: pam_unix(sudo:session): session closed for user root
Oct 11 01:45:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e59 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:45:09 compute-0 sudo[222871]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:45:09 compute-0 sudo[222871]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:45:09 compute-0 sudo[222871]: pam_unix(sudo:session): session closed for user root
Oct 11 01:45:10 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 4.1a scrub starts
Oct 11 01:45:10 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 4.1a scrub ok
Oct 11 01:45:10 compute-0 ceph-mon[191930]: pgmap v145: 321 pgs: 2 peering, 124 unknown, 195 active+clean; 456 KiB data, 85 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:10 compute-0 ceph-mon[191930]: 4.1b deep-scrub starts
Oct 11 01:45:10 compute-0 ceph-mon[191930]: 4.1b deep-scrub ok
Oct 11 01:45:10 compute-0 ceph-mon[191930]: osdmap e59: 3 total, 3 up, 3 in
Oct 11 01:45:10 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:45:10 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:45:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v147: 321 pgs: 2 peering, 124 unknown, 195 active+clean; 456 KiB data, 85 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:10 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.19 scrub starts
Oct 11 01:45:10 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.19 scrub ok
Oct 11 01:45:10 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.19 scrub starts
Oct 11 01:45:10 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.19 scrub ok
Oct 11 01:45:11 compute-0 ceph-mon[191930]: 4.1a scrub starts
Oct 11 01:45:11 compute-0 ceph-mon[191930]: 4.1a scrub ok
Oct 11 01:45:11 compute-0 ceph-mon[191930]: 7.19 scrub starts
Oct 11 01:45:11 compute-0 ceph-mon[191930]: 7.19 scrub ok
Oct 11 01:45:11 compute-0 sshd-session[222896]: Accepted publickey for zuul from 192.168.122.30 port 48176 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:45:11 compute-0 systemd-logind[804]: New session 41 of user zuul.
Oct 11 01:45:11 compute-0 systemd[1]: Started Session 41 of User zuul.
Oct 11 01:45:11 compute-0 sshd-session[222896]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:45:12 compute-0 ceph-mon[191930]: pgmap v147: 321 pgs: 2 peering, 124 unknown, 195 active+clean; 456 KiB data, 85 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:12 compute-0 ceph-mon[191930]: 2.19 scrub starts
Oct 11 01:45:12 compute-0 ceph-mon[191930]: 2.19 scrub ok
Oct 11 01:45:12 compute-0 podman[222976]: 2025-10-11 01:45:12.246786594 +0000 UTC m=+0.124886008 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 01:45:12 compute-0 podman[222982]: 2025-10-11 01:45:12.259453848 +0000 UTC m=+0.128354956 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, name=ubi9, build-date=2024-09-18T21:23:30, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, vcs-type=git, architecture=x86_64, managed_by=edpm_ansible, maintainer=Red Hat, Inc., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, version=9.4, release-0.7.12=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, container_name=kepler, com.redhat.component=ubi9-container, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.openshift.expose-services=, io.openshift.tags=base rhel9, io.buildah.version=1.29.0, vendor=Red Hat, Inc., distribution-scope=public, summary=Provides the latest release of Red Hat Universal Base Image 9., config_id=edpm, release=1214.1726694543, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543)
Oct 11 01:45:12 compute-0 podman[222980]: 2025-10-11 01:45:12.295806213 +0000 UTC m=+0.172224647 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_controller, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible)
Oct 11 01:45:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v148: 321 pgs: 31 unknown, 290 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:12 compute-0 python3.9[223114]: ansible-ansible.legacy.setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:45:14 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 4.18 scrub starts
Oct 11 01:45:14 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 4.18 scrub ok
Oct 11 01:45:14 compute-0 ceph-mon[191930]: pgmap v148: 321 pgs: 31 unknown, 290 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:14 compute-0 ceph-mon[191930]: 4.18 scrub starts
Oct 11 01:45:14 compute-0 ceph-mon[191930]: 4.18 scrub ok
Oct 11 01:45:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v149: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": ".rgw.root", "var": "pgp_num_actual", "val": "32"} v 0) v1
Oct 11 01:45:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": ".rgw.root", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:45:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.control", "var": "pgp_num_actual", "val": "32"} v 0) v1
Oct 11 01:45:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.control", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:45:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "2"} v 0) v1
Oct 11 01:45:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "2"}]: dispatch
Oct 11 01:45:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pgp_num_actual", "val": "32"} v 0) v1
Oct 11 01:45:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:45:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e59 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:45:15 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 6.f scrub starts
Oct 11 01:45:15 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 6.f scrub ok
Oct 11 01:45:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e59 do_prune osdmap full prune enabled
Oct 11 01:45:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": ".rgw.root", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:45:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.control", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:45:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "2"}]: dispatch
Oct 11 01:45:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:45:15 compute-0 ceph-mon[191930]: 6.f scrub starts
Oct 11 01:45:15 compute-0 ceph-mon[191930]: 6.f scrub ok
Oct 11 01:45:15 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": ".rgw.root", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:45:15 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.control", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:45:15 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "2"}]': finished
Oct 11 01:45:15 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:45:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e60 e60: 3 total, 3 up, 3 in
Oct 11 01:45:15 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e60: 3 total, 3 up, 3 in
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.17( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.930346489s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.361068726s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.17( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.930220604s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.361068726s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.865339279s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 131.296340942s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.14( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.865277290s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.296356201s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.865200996s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.296340942s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.14( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.865191460s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.296356201s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.15( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.929541588s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.361091614s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.15( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.929443359s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.361091614s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.14( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.929373741s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.361236572s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.14( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.929333687s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.361236572s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.15( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.864096642s) [2] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.296356201s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.10( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.876389503s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.308761597s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.15( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.863992691s) [2] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.296356201s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.10( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.876353264s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.308761597s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.874170303s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 131.308227539s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.874105453s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.308227539s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.2( v 53'2 (0'0,53'2] local-lis/les=58/59 n=1 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.927503586s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.362045288s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.2( v 53'2 (0'0,53'2] local-lis/les=58/59 n=1 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.927370071s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.362045288s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.1( v 53'2 (0'0,53'2] local-lis/les=58/59 n=1 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.926666260s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.361755371s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.1( v 53'2 (0'0,53'2] local-lis/les=58/59 n=1 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.926403999s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.361755371s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.2( v 46'4 (0'0,46'4] local-lis/les=56/57 n=1 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.873297691s) [2] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.308776855s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.2( v 46'4 (0'0,46'4] local-lis/les=56/57 n=1 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.873254776s) [2] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.308776855s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.3( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.872982979s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 131.308715820s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.3( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.872927666s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.308715820s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.c( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.872813225s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.309158325s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.c( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.872764587s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.309158325s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.e( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.924837112s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.361953735s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.e( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.924790382s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.361953735s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.d( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.871376038s) [2] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.308792114s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.d( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.871335983s) [2] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.308792114s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.f( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.924221039s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.361900330s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.f( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.924177170s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.361900330s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.e( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.872035027s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.311172485s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.e( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.871980667s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.311172485s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.b( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.922458649s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.361991882s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.b( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.922279358s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.361991882s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.d( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.921815872s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.361968994s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.9( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.871010780s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 131.311187744s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.d( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.921751976s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.361968994s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 sudo[223338]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tuvocpdtdisxwuvbdrupskpxenazckxd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147114.475885-32-242816667626364/AnsiballZ_command.py'
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.9( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.870921135s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.311187744s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.11( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.866825104s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 131.308334351s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.11( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.866670609s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.308334351s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.b( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.869452477s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 131.311187744s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.b( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.869406700s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.311187744s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.f( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.869716644s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.311859131s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.f( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.869680405s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.311859131s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.8( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.919767380s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.362098694s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.8( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.919714928s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.362098694s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.b( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.870368958s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.312850952s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.b( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.870334625s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.312850952s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.9( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.869930267s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.312759399s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.9( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.869898796s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.312759399s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.1( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.869769096s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 131.312866211s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.1( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.869740486s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.312866211s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.3( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.927292824s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.370338440s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.3( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.926807404s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.370338440s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.4( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.925868988s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.370254517s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.d( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.864364624s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 131.309143066s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.d( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.864315987s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.309143066s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.866971970s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 131.311859131s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.9( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.917173386s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.362121582s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.9( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.917115211s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.362121582s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.867943764s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 131.313034058s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.867908478s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.313034058s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.6( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.925126076s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.370483398s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.6( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.925089836s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.370483398s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.4( v 46'4 (0'0,46'4] local-lis/les=56/57 n=1 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.867673874s) [2] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.313140869s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.4( v 46'4 (0'0,46'4] local-lis/les=56/57 n=1 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.867642403s) [2] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.313140869s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.18( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.924386024s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.370376587s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.18( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.924350739s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.370376587s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.5( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.866874695s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 131.313140869s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.1b( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.868032455s) [2] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.314331055s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.5( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.866837502s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.313140869s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.1b( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.867980957s) [2] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.314331055s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.1a( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.923760414s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.370399475s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.1a( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.923725128s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.370399475s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.1b( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.923637390s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.370414734s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.1b( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.923592567s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.370414734s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.867784500s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 131.314682007s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.867752075s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.314682007s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.18( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.867466927s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.314437866s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.18( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.867444038s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.314437866s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.1c( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.922684669s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.370437622s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.1c( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.922629356s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.370437622s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.865762711s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.311859131s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 sudo[223338]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[8.10( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.4( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.922213554s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.370254517s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.864253044s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 131.314468384s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.864200592s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.314468384s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.1f( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.862519264s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.314422607s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.1f( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.862460136s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.314422607s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.6( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.860977173s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.312988281s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.6( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.860920906s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.312988281s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[9.11( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[8.15( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [2] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[9.5( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[8.b( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[11.4( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[11.15( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[9.b( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[11.14( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[11.2( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[9.7( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[11.3( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[8.2( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [2] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[11.d( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[11.8( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[8.d( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [2] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[11.9( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[8.4( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [2] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[11.18( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[8.1b( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [2] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[11.1b( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[11.1c( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[11.b( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[11.1a( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.1e( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.905893326s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active pruub 119.158958435s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.1e( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.905873299s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 119.158958435s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.d( v 59'65 (0'0,59'65] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.893824577s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 53'64 mlcod 53'64 active pruub 119.147140503s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.d( v 59'65 (0'0,59'65] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.893796921s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 53'64 mlcod 0'0 unknown NOTIFY pruub 119.147140503s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[8.9( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.b( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.904473305s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active pruub 119.158615112s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.b( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.904453278s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 119.158615112s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.13( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.904891014s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active pruub 119.159172058s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.13( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.904873848s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 119.159172058s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.12( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.904345512s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active pruub 119.158744812s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.12( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.904331207s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 119.158744812s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.11( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.904279709s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active pruub 119.158782959s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.11( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.904266357s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 119.158782959s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.10( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.904197693s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active pruub 119.158836365s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.10( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.904182434s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 119.158836365s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.1a( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.904236794s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active pruub 119.159118652s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.1a( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.904189110s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 119.159118652s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.19( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.904269218s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active pruub 119.159286499s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.19( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.904254913s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 119.159286499s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[9.17( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[11.6( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[11.1e( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[9.9( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[11.11( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[8.f( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.6( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.901635170s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active pruub 119.159347534s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.6( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.901617050s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 119.159347534s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.7( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.901565552s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active pruub 119.159301758s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.4( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.901504517s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active pruub 119.159355164s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.4( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.901489258s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 119.159355164s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.7( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.901473045s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 119.159301758s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.8( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.901154518s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active pruub 119.159431458s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.8( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.901137352s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 119.159431458s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.f( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.900836945s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active pruub 119.159400940s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.f( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.900819778s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 119.159400940s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.e( v 59'65 (0'0,59'65] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.902868271s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 53'64 mlcod 53'64 active pruub 119.161804199s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.e( v 59'65 (0'0,59'65] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.902841568s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 53'64 mlcod 0'0 unknown NOTIFY pruub 119.161804199s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.9( v 59'65 (0'0,59'65] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.900460243s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 53'64 mlcod 53'64 active pruub 119.159439087s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.9( v 59'65 (0'0,59'65] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.900387764s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 53'64 mlcod 0'0 unknown NOTIFY pruub 119.159439087s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.1( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.902500153s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active pruub 119.161964417s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.1( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.902437210s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 119.161964417s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.14( v 59'65 (0'0,59'65] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.902210236s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 53'64 mlcod 53'64 active pruub 119.162010193s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.14( v 59'65 (0'0,59'65] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.902168274s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 53'64 mlcod 0'0 unknown NOTIFY pruub 119.162010193s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[11.e( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[8.e( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[9.f( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.15( v 59'65 (0'0,59'65] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.906595230s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 53'64 mlcod 53'64 active pruub 119.166587830s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.15( v 59'65 (0'0,59'65] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.906530380s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 53'64 mlcod 0'0 unknown NOTIFY pruub 119.166587830s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.2( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.902297974s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active pruub 119.162055969s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.16( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.901846886s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active pruub 119.162086487s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.2( v 53'64 (0'0,53'64] local-lis/les=58/59 n=1 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.901798248s) [1] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 119.162055969s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.16( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.901813507s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 119.162086487s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.17( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.901698112s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active pruub 119.162117004s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[10.17( v 53'64 (0'0,53'64] local-lis/les=58/59 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.901666641s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 119.162117004s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[8.c( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[11.f( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[8.12( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [2] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[9.d( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[9.1( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[11.12( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[11.1( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[9.3( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[9.19( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[11.1f( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[8.11( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [2] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[8.18( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.1e( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.918476105s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.371017456s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[11.17( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[8.14( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 60 pg[8.1c( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [2] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[9.15( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.1e( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.918357849s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.371017456s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[9.1f( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[8.1f( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.1d( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.861694336s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 131.314651489s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[9.1d( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[11.10( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.1d( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.861651421s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.314651489s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[9.13( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.1d( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.861579895s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.314849854s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[10.d( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[10.1e( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.1d( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.861529350s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.314849854s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[10.4( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[10.8( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.1c( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.858926773s) [2] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.314666748s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[10.7( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.10( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.912812233s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.370513916s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[10.e( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[10.9( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.10( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.912756920s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.370513916s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[10.1( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[10.15( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.11( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.912874222s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.370941162s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[10.16( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.11( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.912834167s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.370941162s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[10.17( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[8.1d( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.12( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.856880188s) [2] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.315277100s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[11.19( empty local-lis/les=0/0 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[8.1a( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.12( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.856837273s) [2] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.315277100s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.855730057s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 131.314712524s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.855681419s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.314712524s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.12( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.910783768s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.370971680s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.12( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.910735130s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.370971680s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.1f( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.909224510s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.370491028s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.1f( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.909194946s) [2] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.370491028s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.19( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.909276962s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active pruub 126.370979309s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[11.19( v 53'2 (0'0,53'2] local-lis/les=58/59 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60 pruub=9.909228325s) [0] r=-1 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 126.370979309s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.1a( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.852605820s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.314773560s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[9.1b( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.1a( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.852581978s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.314773560s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 60 pg[8.6( empty local-lis/les=0/0 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.11( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.851955414s) [2] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active pruub 131.314788818s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.11( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.851868629s) [2] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.314788818s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.1b( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.852782249s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 131.314758301s@ mbc={}] start_peering_interval up [1] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[9.1b( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.850844383s) [0] r=-1 lpr=60 pi=[56,60)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.314758301s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[8.1c( v 46'4 (0'0,46'4] local-lis/les=56/57 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60 pruub=14.858515739s) [2] r=-1 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 131.314666748s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[10.b( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[10.13( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[10.12( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[10.11( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[10.10( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[10.1a( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[10.19( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[10.6( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[10.f( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[10.14( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 60 pg[10.2( empty local-lis/les=0/0 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:15 compute-0 python3.9[223340]: ansible-ansible.legacy.command Invoked with _raw_params=set -euxo pipefail
                                             pushd /var/tmp
                                             curl -sL https://github.com/openstack-k8s-operators/repo-setup/archive/refs/heads/main.tar.gz | tar -xz
                                             pushd repo-setup-main
                                             python3 -m venv ./venv
                                             PBR_VERSION=0.0.0 ./venv/bin/pip install ./
                                             ./venv/bin/repo-setup current-podified -b antelope
                                             popd
                                             rm -rf repo-setup-main
                                              _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:45:15 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 4.e scrub starts
Oct 11 01:45:16 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 4.e scrub ok
Oct 11 01:45:16 compute-0 ceph-mon[191930]: pgmap v149: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:16 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": ".rgw.root", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:45:16 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.control", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:45:16 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "2"}]': finished
Oct 11 01:45:16 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:45:16 compute-0 ceph-mon[191930]: osdmap e60: 3 total, 3 up, 3 in
Oct 11 01:45:16 compute-0 ceph-mon[191930]: 4.e scrub starts
Oct 11 01:45:16 compute-0 ceph-mon[191930]: 4.e scrub ok
Oct 11 01:45:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e60 do_prune osdmap full prune enabled
Oct 11 01:45:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e61 e61: 3 total, 3 up, 3 in
Oct 11 01:45:16 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e61: 3 total, 3 up, 3 in
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.11( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.11( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.3( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.15( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.15( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.1b( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.1b( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.19( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.19( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.1d( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.1d( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.3( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.3( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.1( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.1( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[11.15( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[8.1c( v 46'4 (0'0,46'4] local-lis/les=60/61 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [2] r=0 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.d( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.d( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.1f( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.1f( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.9( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.9( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.7( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.7( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.3( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.d( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.b( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.d( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.b( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.9( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.9( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.17( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.b( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.b( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.17( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.1( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.5( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.1( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.11( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.5( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.11( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.5( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.5( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.1d( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.1d( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.1b( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[9.1b( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[10.12( v 53'64 (0'0,53'64] local-lis/les=60/61 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[10.14( v 59'65 lc 53'54 (0'0,59'65] local-lis/les=60/61 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=59'65 lcod 0'0 mlcod 0'0 active+degraded m=1 mbc={255={(0+1)=1}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[8.15( v 46'4 (0'0,46'4] local-lis/les=60/61 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [2] r=0 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[11.1f( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[11.b( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[11.1a( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[8.11( v 46'4 (0'0,46'4] local-lis/les=60/61 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [2] r=0 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[11.12( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[8.12( v 46'4 (0'0,46'4] local-lis/les=60/61 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [2] r=0 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[11.11( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[11.1e( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[11.1c( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[11.1b( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[8.1b( v 46'4 (0'0,46'4] local-lis/les=60/61 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [2] r=0 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[11.18( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[8.d( v 46'4 (0'0,46'4] local-lis/les=60/61 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [2] r=0 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[11.8( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[8.4( v 46'4 (0'0,46'4] local-lis/les=60/61 n=1 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [2] r=0 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[11.d( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[11.9( v 53'2 lc 0'0 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=53'2 mlcod 0'0 active+degraded m=2 mbc={255={(0+1)=2}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[8.2( v 46'4 (0'0,46'4] local-lis/les=60/61 n=1 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [2] r=0 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[11.2( v 53'2 (0'0,53'2] local-lis/les=60/61 n=1 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 61 pg[11.3( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [2] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.13( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.13( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[8.1d( v 46'4 (0'0,46'4] local-lis/les=60/61 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[10.1( v 53'64 (0'0,53'64] local-lis/les=60/61 n=1 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.f( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[10.f( v 53'64 (0'0,53'64] local-lis/les=60/61 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[10.b( v 53'64 (0'0,53'64] local-lis/les=60/61 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[10.2( v 53'64 (0'0,53'64] local-lis/les=60/61 n=1 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[10.6( v 53'64 (0'0,53'64] local-lis/les=60/61 n=1 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[9.f( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] r=-1 lpr=61 pi=[56,61)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[8.1f( v 46'4 (0'0,46'4] local-lis/les=60/61 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[11.17( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[8.14( v 46'4 (0'0,46'4] local-lis/les=60/61 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[10.16( v 53'64 (0'0,53'64] local-lis/les=60/61 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[8.1a( v 46'4 (0'0,46'4] local-lis/les=60/61 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[11.19( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[8.18( v 46'4 (0'0,46'4] local-lis/les=60/61 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[10.1e( v 53'64 (0'0,53'64] local-lis/les=60/61 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[11.1( v 53'2 (0'0,53'2] local-lis/les=60/61 n=1 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[11.f( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[8.c( v 46'4 (0'0,46'4] local-lis/les=60/61 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[10.d( v 59'65 lc 53'50 (0'0,59'65] local-lis/les=60/61 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=59'65 lcod 0'0 mlcod 0'0 active+degraded m=1 mbc={255={(0+1)=1}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[8.e( v 46'4 (0'0,46'4] local-lis/les=60/61 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[10.17( v 53'64 (0'0,53'64] local-lis/les=60/61 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[11.e( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[10.7( v 53'64 (0'0,53'64] local-lis/les=60/61 n=1 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[10.19( v 53'64 (0'0,53'64] local-lis/les=60/61 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[10.1a( v 53'64 (0'0,53'64] local-lis/les=60/61 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[10.10( v 53'64 (0'0,53'64] local-lis/les=60/61 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[10.11( v 53'64 (0'0,53'64] local-lis/les=60/61 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 61 pg[10.13( v 53'64 (0'0,53'64] local-lis/les=60/61 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [1] r=0 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[11.6( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[8.9( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=60/61 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=46'4 mlcod 0'0 active+degraded m=1 mbc={255={(0+1)=1}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[8.6( v 46'4 (0'0,46'4] local-lis/les=60/61 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[10.4( v 53'64 (0'0,53'64] local-lis/les=60/61 n=1 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[11.14( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[8.f( v 46'4 lc 0'0 (0'0,46'4] local-lis/les=60/61 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=46'4 mlcod 0'0 active+degraded m=2 mbc={255={(0+1)=2}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[10.8( v 53'64 (0'0,53'64] local-lis/les=60/61 n=1 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=53'64 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[10.e( v 59'65 lc 53'48 (0'0,59'65] local-lis/les=60/61 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=59'65 lcod 0'0 mlcod 0'0 active+degraded m=1 mbc={255={(0+1)=1}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[10.9( v 59'65 lc 53'56 (0'0,59'65] local-lis/les=60/61 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=59'65 lcod 0'0 mlcod 0'0 active+degraded m=1 mbc={255={(0+1)=1}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[8.b( v 46'4 (0'0,46'4] local-lis/les=60/61 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[8.10( v 46'4 (0'0,46'4] local-lis/les=60/61 n=0 ec=56/45 lis/c=56/56 les/c/f=57/57/0 sis=60) [0] r=0 lpr=60 pi=[56,60)/1 crt=46'4 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[11.10( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[11.4( v 53'2 (0'0,53'2] local-lis/les=60/61 n=0 ec=58/51 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=53'2 lcod 0'0 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 61 pg[10.15( v 59'65 lc 53'46 (0'0,59'65] local-lis/les=60/61 n=0 ec=58/49 lis/c=58/58 les/c/f=59/59/0 sis=60) [0] r=0 lpr=60 pi=[58,60)/1 crt=59'65 lcod 0'0 mlcod 0'0 active+degraded m=1 mbc={255={(0+1)=1}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v152: 321 pgs: 11 peering, 310 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:17 compute-0 podman[223351]: 2025-10-11 01:45:17.248049446 +0000 UTC m=+0.136493318 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.license=GPLv2, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']})
Oct 11 01:45:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e61 do_prune osdmap full prune enabled
Oct 11 01:45:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e62 e62: 3 total, 3 up, 3 in
Oct 11 01:45:17 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e62: 3 total, 3 up, 3 in
Oct 11 01:45:17 compute-0 ceph-mon[191930]: osdmap e61: 3 total, 3 up, 3 in
Oct 11 01:45:17 compute-0 ceph-mon[191930]: pgmap v152: 321 pgs: 11 peering, 310 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:17 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 62 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] async=[0] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=5}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:17 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 62 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] async=[0] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=6}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:17 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 62 pg[9.5( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] async=[0] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=7}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:17 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 62 pg[9.11( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] async=[0] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=6}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:17 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 62 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] async=[0] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=7}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:17 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 62 pg[9.d( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] async=[0] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=8}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:17 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 62 pg[9.b( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] async=[0] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=5}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:17 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 62 pg[9.1( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] async=[0] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=8}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:17 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 62 pg[9.1d( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] async=[0] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=6}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:17 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 62 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] async=[0] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=11}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:17 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 62 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] async=[0] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=5}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:17 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 62 pg[9.3( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] async=[0] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=9}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:17 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 62 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] async=[0] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=6}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:17 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 62 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] async=[0] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=4}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:17 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 62 pg[9.1b( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] async=[0] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=3}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:17 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 62 pg[9.9( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=61) [0]/[1] async=[0] r=0 lpr=61 pi=[56,61)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=7}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e62 do_prune osdmap full prune enabled
Oct 11 01:45:18 compute-0 ceph-mon[191930]: osdmap e62: 3 total, 3 up, 3 in
Oct 11 01:45:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e63 e63: 3 total, 3 up, 3 in
Oct 11 01:45:18 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e63: 3 total, 3 up, 3 in
Oct 11 01:45:18 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 63 pg[9.11( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63 pruub=15.315114021s) [0] async=[0] r=-1 lpr=63 pi=[56,63)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 134.812057495s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:18 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 63 pg[9.11( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63 pruub=15.315001488s) [0] r=-1 lpr=63 pi=[56,63)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 134.812057495s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:18 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 63 pg[9.d( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63 pruub=15.314266205s) [0] async=[0] r=-1 lpr=63 pi=[56,63)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 134.812057495s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:18 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 63 pg[9.d( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63 pruub=15.314103127s) [0] r=-1 lpr=63 pi=[56,63)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 134.812057495s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:18 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 63 pg[9.b( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63 pruub=15.313959122s) [0] async=[0] r=-1 lpr=63 pi=[56,63)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 134.812225342s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:18 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 63 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63 pruub=15.313995361s) [0] async=[0] r=-1 lpr=63 pi=[56,63)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 134.812118530s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:18 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 63 pg[9.b( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63 pruub=15.313854218s) [0] r=-1 lpr=63 pi=[56,63)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 134.812225342s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:18 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 63 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63 pruub=15.313628197s) [0] r=-1 lpr=63 pi=[56,63)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 134.812118530s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:18 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 63 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63 pruub=15.311907768s) [0] async=[0] r=-1 lpr=63 pi=[56,63)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 134.810882568s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:18 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 63 pg[9.5( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63 pruub=15.312910080s) [0] async=[0] r=-1 lpr=63 pi=[56,63)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 134.811920166s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:18 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 63 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63 pruub=15.311846733s) [0] r=-1 lpr=63 pi=[56,63)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 134.810882568s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:18 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 63 pg[9.5( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63 pruub=15.312747955s) [0] r=-1 lpr=63 pi=[56,63)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 134.811920166s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:18 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 63 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63 pruub=15.299468040s) [0] async=[0] r=-1 lpr=63 pi=[56,63)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 134.799514771s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:18 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 63 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63 pruub=15.299391747s) [0] r=-1 lpr=63 pi=[56,63)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 134.799514771s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:18 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 63 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:18 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 63 pg[9.11( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:18 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 63 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:18 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 63 pg[9.11( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:18 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 63 pg[9.b( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:18 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 63 pg[9.b( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:18 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 63 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:18 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 63 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:18 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 63 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:18 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 63 pg[9.d( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:18 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 63 pg[9.d( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:18 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 63 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:18 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 63 pg[9.5( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:18 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 63 pg[9.5( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v155: 321 pgs: 18 peering, 303 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 480 B/s, 13 objects/s recovering
Oct 11 01:45:19 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 4.a deep-scrub starts
Oct 11 01:45:19 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 4.a deep-scrub ok
Oct 11 01:45:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e63 do_prune osdmap full prune enabled
Oct 11 01:45:19 compute-0 ceph-mon[191930]: osdmap e63: 3 total, 3 up, 3 in
Oct 11 01:45:19 compute-0 ceph-mon[191930]: pgmap v155: 321 pgs: 18 peering, 303 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 480 B/s, 13 objects/s recovering
Oct 11 01:45:19 compute-0 ceph-mon[191930]: 4.a deep-scrub starts
Oct 11 01:45:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e64 e64: 3 total, 3 up, 3 in
Oct 11 01:45:19 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e64: 3 total, 3 up, 3 in
Oct 11 01:45:19 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 64 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64 pruub=14.292655945s) [0] async=[0] r=-1 lpr=64 pi=[56,64)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 134.812484741s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:19 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 64 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64 pruub=14.292478561s) [0] r=-1 lpr=64 pi=[56,64)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 134.812484741s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:19 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 64 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64 pruub=14.292478561s) [0] async=[0] r=-1 lpr=64 pi=[56,64)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 134.812698364s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:19 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 64 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64 pruub=14.292366982s) [0] r=-1 lpr=64 pi=[56,64)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 134.812698364s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:19 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 64 pg[9.3( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64 pruub=14.291595459s) [0] async=[0] r=-1 lpr=64 pi=[56,64)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 134.812515259s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:19 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 64 pg[9.3( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64 pruub=14.291475296s) [0] r=-1 lpr=64 pi=[56,64)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 134.812515259s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:19 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 64 pg[9.9( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64 pruub=14.290918350s) [0] async=[0] r=-1 lpr=64 pi=[56,64)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 134.812332153s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:19 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 64 pg[9.9( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64 pruub=14.290820122s) [0] r=-1 lpr=64 pi=[56,64)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 134.812332153s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:19 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 64 pg[9.1( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64 pruub=14.290288925s) [0] async=[0] r=-1 lpr=64 pi=[56,64)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 134.812316895s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:19 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 64 pg[9.1( v 53'585 (0'0,53'585] local-lis/les=61/62 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64 pruub=14.290155411s) [0] r=-1 lpr=64 pi=[56,64)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 134.812316895s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:19 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 64 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64 pruub=14.289752960s) [0] async=[0] r=-1 lpr=64 pi=[56,64)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 134.812408447s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:19 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 64 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64 pruub=14.289660454s) [0] r=-1 lpr=64 pi=[56,64)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 134.812408447s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:19 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 64 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64 pruub=14.289596558s) [0] async=[0] r=-1 lpr=64 pi=[56,64)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 134.812622070s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:19 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 64 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64 pruub=14.289489746s) [0] r=-1 lpr=64 pi=[56,64)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 134.812622070s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:19 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 64 pg[9.1d( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64 pruub=14.289054871s) [0] async=[0] r=-1 lpr=64 pi=[56,64)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 134.812362671s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:19 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 64 pg[9.1d( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64 pruub=14.288986206s) [0] r=-1 lpr=64 pi=[56,64)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 134.812362671s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:19 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 64 pg[9.1b( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64 pruub=14.289031029s) [0] async=[0] r=-1 lpr=64 pi=[56,64)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 134.812698364s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:19 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 64 pg[9.1b( v 53'585 (0'0,53'585] local-lis/les=61/62 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64 pruub=14.288941383s) [0] r=-1 lpr=64 pi=[56,64)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 134.812698364s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.1b( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.1b( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.1d( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.1d( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.3( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.3( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.1( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.1( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.9( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [1] -> [0], acting_primary 1 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.9( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.d( v 53'585 (0'0,53'585] local-lis/les=63/64 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=63/64 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=63/64 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.b( v 53'585 (0'0,53'585] local-lis/les=63/64 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.5( v 53'585 (0'0,53'585] local-lis/les=63/64 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.11( v 53'585 (0'0,53'585] local-lis/les=63/64 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:19 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 64 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=63/64 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=63) [0] r=0 lpr=63 pi=[56,63)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e64 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:45:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e64 do_prune osdmap full prune enabled
Oct 11 01:45:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e65 e65: 3 total, 3 up, 3 in
Oct 11 01:45:20 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e65: 3 total, 3 up, 3 in
Oct 11 01:45:20 compute-0 ceph-mon[191930]: 4.a deep-scrub ok
Oct 11 01:45:20 compute-0 ceph-mon[191930]: osdmap e64: 3 total, 3 up, 3 in
Oct 11 01:45:20 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 65 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:20 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 65 pg[9.1b( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:20 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 65 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:20 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 65 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:20 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 65 pg[9.3( v 53'585 (0'0,53'585] local-lis/les=64/65 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:20 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 65 pg[9.1( v 53'585 (0'0,53'585] local-lis/les=64/65 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:20 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 65 pg[9.1d( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:20 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 65 pg[9.9( v 53'585 (0'0,53'585] local-lis/les=64/65 n=7 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:20 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 65 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=61/56 les/c/f=62/57/0 sis=64) [0] r=0 lpr=64 pi=[56,64)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v158: 321 pgs: 18 peering, 303 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 480 B/s, 13 objects/s recovering
Oct 11 01:45:20 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.16 scrub starts
Oct 11 01:45:20 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.16 scrub ok
Oct 11 01:45:21 compute-0 ceph-mon[191930]: osdmap e65: 3 total, 3 up, 3 in
Oct 11 01:45:21 compute-0 ceph-mon[191930]: pgmap v158: 321 pgs: 18 peering, 303 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 480 B/s, 13 objects/s recovering
Oct 11 01:45:22 compute-0 ceph-mon[191930]: 2.16 scrub starts
Oct 11 01:45:22 compute-0 ceph-mon[191930]: 2.16 scrub ok
Oct 11 01:45:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v159: 321 pgs: 7 peering, 314 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 372 B/s, 10 objects/s recovering
Oct 11 01:45:22 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.1d scrub starts
Oct 11 01:45:22 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.1d scrub ok
Oct 11 01:45:22 compute-0 sudo[223338]: pam_unix(sudo:session): session closed for user root
Oct 11 01:45:23 compute-0 sshd-session[222899]: Connection closed by 192.168.122.30 port 48176
Oct 11 01:45:23 compute-0 sshd-session[222896]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:45:23 compute-0 systemd[1]: session-41.scope: Deactivated successfully.
Oct 11 01:45:23 compute-0 systemd[1]: session-41.scope: Consumed 10.298s CPU time.
Oct 11 01:45:23 compute-0 systemd-logind[804]: Session 41 logged out. Waiting for processes to exit.
Oct 11 01:45:23 compute-0 systemd-logind[804]: Removed session 41.
Oct 11 01:45:23 compute-0 ceph-mon[191930]: pgmap v159: 321 pgs: 7 peering, 314 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 372 B/s, 10 objects/s recovering
Oct 11 01:45:23 compute-0 ceph-mon[191930]: 7.1d scrub starts
Oct 11 01:45:23 compute-0 ceph-mon[191930]: 7.1d scrub ok
Oct 11 01:45:23 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.1e deep-scrub starts
Oct 11 01:45:23 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 7.1e deep-scrub ok
Oct 11 01:45:24 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 6.8 scrub starts
Oct 11 01:45:24 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 6.8 scrub ok
Oct 11 01:45:24 compute-0 ceph-mon[191930]: 7.1e deep-scrub starts
Oct 11 01:45:24 compute-0 ceph-mon[191930]: 7.1e deep-scrub ok
Oct 11 01:45:24 compute-0 ceph-mon[191930]: 6.8 scrub starts
Oct 11 01:45:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v160: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 450 B/s, 18 objects/s recovering
Oct 11 01:45:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "3"} v 0) v1
Oct 11 01:45:24 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "3"}]: dispatch
Oct 11 01:45:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e65 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:45:24 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.18 scrub starts
Oct 11 01:45:24 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.18 scrub ok
Oct 11 01:45:25 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 6.14 scrub starts
Oct 11 01:45:25 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 6.14 scrub ok
Oct 11 01:45:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e65 do_prune osdmap full prune enabled
Oct 11 01:45:25 compute-0 ceph-mon[191930]: 6.8 scrub ok
Oct 11 01:45:25 compute-0 ceph-mon[191930]: pgmap v160: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 450 B/s, 18 objects/s recovering
Oct 11 01:45:25 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "3"}]: dispatch
Oct 11 01:45:25 compute-0 ceph-mon[191930]: 6.14 scrub starts
Oct 11 01:45:25 compute-0 ceph-mon[191930]: 6.14 scrub ok
Oct 11 01:45:25 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "3"}]': finished
Oct 11 01:45:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e66 e66: 3 total, 3 up, 3 in
Oct 11 01:45:25 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e66: 3 total, 3 up, 3 in
Oct 11 01:45:25 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 5.14 scrub starts
Oct 11 01:45:25 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 5.14 scrub ok
Oct 11 01:45:26 compute-0 PackageKit[186757]: daemon quit
Oct 11 01:45:26 compute-0 systemd[1]: packagekit.service: Deactivated successfully.
Oct 11 01:45:26 compute-0 systemd[1]: packagekit.service: Consumed 1.030s CPU time.
Oct 11 01:45:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v162: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 170 B/s, 8 objects/s recovering
Oct 11 01:45:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "4"} v 0) v1
Oct 11 01:45:26 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "4"}]: dispatch
Oct 11 01:45:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e66 do_prune osdmap full prune enabled
Oct 11 01:45:26 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "4"}]': finished
Oct 11 01:45:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e67 e67: 3 total, 3 up, 3 in
Oct 11 01:45:26 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e67: 3 total, 3 up, 3 in
Oct 11 01:45:26 compute-0 ceph-mon[191930]: 2.18 scrub starts
Oct 11 01:45:26 compute-0 ceph-mon[191930]: 2.18 scrub ok
Oct 11 01:45:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "3"}]': finished
Oct 11 01:45:26 compute-0 ceph-mon[191930]: osdmap e66: 3 total, 3 up, 3 in
Oct 11 01:45:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "4"}]: dispatch
Oct 11 01:45:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:45:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:45:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:45:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:45:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:45:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:45:26 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 5.15 deep-scrub starts
Oct 11 01:45:26 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 5.15 deep-scrub ok
Oct 11 01:45:27 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 6.15 scrub starts
Oct 11 01:45:27 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 6.15 scrub ok
Oct 11 01:45:27 compute-0 ceph-mon[191930]: 5.14 scrub starts
Oct 11 01:45:27 compute-0 ceph-mon[191930]: 5.14 scrub ok
Oct 11 01:45:27 compute-0 ceph-mon[191930]: pgmap v162: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 170 B/s, 8 objects/s recovering
Oct 11 01:45:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "4"}]': finished
Oct 11 01:45:27 compute-0 ceph-mon[191930]: osdmap e67: 3 total, 3 up, 3 in
Oct 11 01:45:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v164: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 151 B/s, 7 objects/s recovering
Oct 11 01:45:28 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "5"} v 0) v1
Oct 11 01:45:28 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "5"}]: dispatch
Oct 11 01:45:28 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e67 do_prune osdmap full prune enabled
Oct 11 01:45:28 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "5"}]': finished
Oct 11 01:45:28 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e68 e68: 3 total, 3 up, 3 in
Oct 11 01:45:28 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e68: 3 total, 3 up, 3 in
Oct 11 01:45:28 compute-0 ceph-mon[191930]: 5.15 deep-scrub starts
Oct 11 01:45:28 compute-0 ceph-mon[191930]: 5.15 deep-scrub ok
Oct 11 01:45:28 compute-0 ceph-mon[191930]: 6.15 scrub starts
Oct 11 01:45:28 compute-0 ceph-mon[191930]: 6.15 scrub ok
Oct 11 01:45:28 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "5"}]: dispatch
Oct 11 01:45:28 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.11 scrub starts
Oct 11 01:45:28 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.11 scrub ok
Oct 11 01:45:29 compute-0 ceph-mon[191930]: pgmap v164: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 151 B/s, 7 objects/s recovering
Oct 11 01:45:29 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "5"}]': finished
Oct 11 01:45:29 compute-0 ceph-mon[191930]: osdmap e68: 3 total, 3 up, 3 in
Oct 11 01:45:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e68 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:45:29 compute-0 podman[157119]: time="2025-10-11T01:45:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:45:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:45:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:45:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:45:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6774 "" "Go-http-client/1.1"
Oct 11 01:45:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v166: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "6"} v 0) v1
Oct 11 01:45:30 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "6"}]: dispatch
Oct 11 01:45:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e68 do_prune osdmap full prune enabled
Oct 11 01:45:30 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "6"}]': finished
Oct 11 01:45:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e69 e69: 3 total, 3 up, 3 in
Oct 11 01:45:30 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e69: 3 total, 3 up, 3 in
Oct 11 01:45:30 compute-0 ceph-mon[191930]: 2.11 scrub starts
Oct 11 01:45:30 compute-0 ceph-mon[191930]: 2.11 scrub ok
Oct 11 01:45:30 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "6"}]: dispatch
Oct 11 01:45:30 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 5.7 deep-scrub starts
Oct 11 01:45:30 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 5.7 deep-scrub ok
Oct 11 01:45:31 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 4.13 scrub starts
Oct 11 01:45:31 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 4.13 scrub ok
Oct 11 01:45:31 compute-0 openstack_network_exporter[159265]: ERROR   01:45:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:45:31 compute-0 openstack_network_exporter[159265]: ERROR   01:45:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:45:31 compute-0 openstack_network_exporter[159265]: ERROR   01:45:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:45:31 compute-0 openstack_network_exporter[159265]: ERROR   01:45:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:45:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:45:31 compute-0 openstack_network_exporter[159265]: ERROR   01:45:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:45:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:45:31 compute-0 ceph-mon[191930]: pgmap v166: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:31 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "6"}]': finished
Oct 11 01:45:31 compute-0 ceph-mon[191930]: osdmap e69: 3 total, 3 up, 3 in
Oct 11 01:45:31 compute-0 ceph-mon[191930]: 5.7 deep-scrub starts
Oct 11 01:45:31 compute-0 ceph-mon[191930]: 5.7 deep-scrub ok
Oct 11 01:45:32 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 6.11 scrub starts
Oct 11 01:45:32 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 6.11 scrub ok
Oct 11 01:45:32 compute-0 podman[223420]: 2025-10-11 01:45:32.240141944 +0000 UTC m=+0.117600620 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 01:45:32 compute-0 podman[223421]: 2025-10-11 01:45:32.264586205 +0000 UTC m=+0.141276071 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, io.buildah.version=1.33.7, container_name=openstack_network_exporter, release=1755695350, io.openshift.tags=minimal rhel9, managed_by=edpm_ansible, distribution-scope=public, vcs-type=git, version=9.6, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.expose-services=, architecture=x86_64, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vendor=Red Hat, Inc., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://catalog.redhat.com/en/search?searchType=containers, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, build-date=2025-08-20T13:12:41, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., com.redhat.component=ubi9-minimal-container, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, name=ubi9-minimal)
Oct 11 01:45:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v168: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "7"} v 0) v1
Oct 11 01:45:32 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "7"}]: dispatch
Oct 11 01:45:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e69 do_prune osdmap full prune enabled
Oct 11 01:45:32 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "7"}]': finished
Oct 11 01:45:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e70 e70: 3 total, 3 up, 3 in
Oct 11 01:45:32 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e70: 3 total, 3 up, 3 in
Oct 11 01:45:32 compute-0 ceph-mon[191930]: 4.13 scrub starts
Oct 11 01:45:32 compute-0 ceph-mon[191930]: 4.13 scrub ok
Oct 11 01:45:32 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "7"}]: dispatch
Oct 11 01:45:33 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 70 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=70 pruub=12.732912064s) [2] r=-1 lpr=70 pi=[56,70)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 147.309768677s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:33 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 70 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=70 pruub=12.732839584s) [2] r=-1 lpr=70 pi=[56,70)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 147.309768677s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:33 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 70 pg[9.e( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=70 pruub=12.735260010s) [2] r=-1 lpr=70 pi=[56,70)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 147.312698364s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:33 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 70 pg[9.6( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=70 pruub=12.737512589s) [2] r=-1 lpr=70 pi=[56,70)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 147.315414429s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:33 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 70 pg[9.6( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=70 pruub=12.737467766s) [2] r=-1 lpr=70 pi=[56,70)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 147.315414429s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:33 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 70 pg[9.16( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=70) [2] r=0 lpr=70 pi=[56,70)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:33 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 70 pg[9.6( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=70) [2] r=0 lpr=70 pi=[56,70)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:33 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 70 pg[9.1e( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=70) [2] r=0 lpr=70 pi=[56,70)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:33 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 70 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=70 pruub=12.738392830s) [2] r=-1 lpr=70 pi=[56,70)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 147.316925049s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:33 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 70 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=70 pruub=12.738227844s) [2] r=-1 lpr=70 pi=[56,70)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 147.316925049s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:33 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 70 pg[9.e( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=70 pruub=12.735155106s) [2] r=-1 lpr=70 pi=[56,70)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 147.312698364s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:33 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 70 pg[9.e( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=70) [2] r=0 lpr=70 pi=[56,70)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e70 do_prune osdmap full prune enabled
Oct 11 01:45:33 compute-0 ceph-mon[191930]: 6.11 scrub starts
Oct 11 01:45:33 compute-0 ceph-mon[191930]: 6.11 scrub ok
Oct 11 01:45:33 compute-0 ceph-mon[191930]: pgmap v168: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "7"}]': finished
Oct 11 01:45:33 compute-0 ceph-mon[191930]: osdmap e70: 3 total, 3 up, 3 in
Oct 11 01:45:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e71 e71: 3 total, 3 up, 3 in
Oct 11 01:45:33 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e71: 3 total, 3 up, 3 in
Oct 11 01:45:33 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 71 pg[9.e( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] r=-1 lpr=71 pi=[56,71)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:33 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 71 pg[9.e( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] r=-1 lpr=71 pi=[56,71)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:33 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 71 pg[9.1e( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] r=-1 lpr=71 pi=[56,71)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:33 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 71 pg[9.1e( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] r=-1 lpr=71 pi=[56,71)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:33 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 71 pg[9.6( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] r=-1 lpr=71 pi=[56,71)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:33 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 71 pg[9.6( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] r=-1 lpr=71 pi=[56,71)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:33 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 71 pg[9.16( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] r=-1 lpr=71 pi=[56,71)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:33 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 71 pg[9.16( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] r=-1 lpr=71 pi=[56,71)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:33 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 71 pg[9.e( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] r=0 lpr=71 pi=[56,71)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:33 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 71 pg[9.6( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] r=0 lpr=71 pi=[56,71)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:33 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 71 pg[9.6( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] r=0 lpr=71 pi=[56,71)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:33 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 71 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] r=0 lpr=71 pi=[56,71)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:33 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 71 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] r=0 lpr=71 pi=[56,71)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:33 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 71 pg[9.e( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] r=0 lpr=71 pi=[56,71)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:33 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 71 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] r=0 lpr=71 pi=[56,71)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:33 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 71 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] r=0 lpr=71 pi=[56,71)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:34 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 6.13 scrub starts
Oct 11 01:45:34 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 6.13 scrub ok
Oct 11 01:45:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v171: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "8"} v 0) v1
Oct 11 01:45:34 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "8"}]: dispatch
Oct 11 01:45:34 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.1e scrub starts
Oct 11 01:45:34 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.1e scrub ok
Oct 11 01:45:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e71 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:45:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e71 do_prune osdmap full prune enabled
Oct 11 01:45:34 compute-0 ceph-mon[191930]: osdmap e71: 3 total, 3 up, 3 in
Oct 11 01:45:34 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "8"}]: dispatch
Oct 11 01:45:34 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "8"}]': finished
Oct 11 01:45:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e72 e72: 3 total, 3 up, 3 in
Oct 11 01:45:34 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e72: 3 total, 3 up, 3 in
Oct 11 01:45:34 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 72 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=72 pruub=9.733637810s) [2] r=-1 lpr=72 pi=[64,72)/1 crt=53'585 mlcod 0'0 active pruub 152.912918091s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:34 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 72 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=72 pruub=9.733565331s) [2] r=-1 lpr=72 pi=[64,72)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 152.912918091s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:34 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 72 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=72 pruub=9.732717514s) [2] r=-1 lpr=72 pi=[64,72)/1 crt=53'585 mlcod 0'0 active pruub 152.912796021s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:34 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 72 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=72 pruub=9.732669830s) [2] r=-1 lpr=72 pi=[64,72)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 152.912796021s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:34 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 72 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=63/64 n=7 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=72 pruub=8.734333038s) [2] r=-1 lpr=72 pi=[63,72)/1 crt=53'585 mlcod 0'0 active pruub 151.914672852s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:34 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 72 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=63/64 n=7 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=72 pruub=8.728829384s) [2] r=-1 lpr=72 pi=[63,72)/1 crt=53'585 mlcod 0'0 active pruub 151.909210205s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:34 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 72 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=63/64 n=7 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=72 pruub=8.734234810s) [2] r=-1 lpr=72 pi=[63,72)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 151.914672852s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:34 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 72 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=63/64 n=7 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=72 pruub=8.728724480s) [2] r=-1 lpr=72 pi=[63,72)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 151.909210205s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:34 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 72 pg[9.17( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=72) [2] r=0 lpr=72 pi=[64,72)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:34 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 72 pg[9.f( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=72) [2] r=0 lpr=72 pi=[63,72)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:34 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 72 pg[9.7( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=72) [2] r=0 lpr=72 pi=[63,72)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:34 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 72 pg[9.1f( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=72) [2] r=0 lpr=72 pi=[64,72)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:34 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 5.3 scrub starts
Oct 11 01:45:34 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 72 pg[9.6( v 53'585 (0'0,53'585] local-lis/les=71/72 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] async=[2] r=0 lpr=71 pi=[56,71)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=6}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:34 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 72 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=71/72 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] async=[2] r=0 lpr=71 pi=[56,71)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=6}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:34 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 72 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=71/72 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] async=[2] r=0 lpr=71 pi=[56,71)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=6}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:34 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 72 pg[9.e( v 53'585 (0'0,53'585] local-lis/les=71/72 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=71) [2]/[1] async=[2] r=0 lpr=71 pi=[56,71)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=6}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:34 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 5.3 scrub ok
Oct 11 01:45:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e72 do_prune osdmap full prune enabled
Oct 11 01:45:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e73 e73: 3 total, 3 up, 3 in
Oct 11 01:45:35 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.15 deep-scrub starts
Oct 11 01:45:35 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e73: 3 total, 3 up, 3 in
Oct 11 01:45:35 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 73 pg[9.e( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73) [2] r=0 lpr=73 pi=[56,73)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [2] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:35 compute-0 ceph-mon[191930]: 6.13 scrub starts
Oct 11 01:45:35 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 73 pg[9.e( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73) [2] r=0 lpr=73 pi=[56,73)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:35 compute-0 ceph-mon[191930]: 6.13 scrub ok
Oct 11 01:45:35 compute-0 ceph-mon[191930]: pgmap v171: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:35 compute-0 ceph-mon[191930]: 6.1e scrub starts
Oct 11 01:45:35 compute-0 ceph-mon[191930]: 6.1e scrub ok
Oct 11 01:45:35 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "8"}]': finished
Oct 11 01:45:35 compute-0 ceph-mon[191930]: osdmap e72: 3 total, 3 up, 3 in
Oct 11 01:45:35 compute-0 ceph-mon[191930]: 5.3 scrub starts
Oct 11 01:45:35 compute-0 ceph-mon[191930]: 5.3 scrub ok
Oct 11 01:45:35 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 73 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73) [2] r=0 lpr=73 pi=[56,73)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [2] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:35 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 73 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73) [2] r=0 lpr=73 pi=[56,73)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:35 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 73 pg[9.1f( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=73) [2]/[0] r=-1 lpr=73 pi=[64,73)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:35 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 73 pg[9.1f( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=73) [2]/[0] r=-1 lpr=73 pi=[64,73)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:35 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 73 pg[9.6( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73) [2] r=0 lpr=73 pi=[56,73)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [2] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:35 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 73 pg[9.6( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73) [2] r=0 lpr=73 pi=[56,73)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:35 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 73 pg[9.7( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=73) [2]/[0] r=-1 lpr=73 pi=[63,73)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:35 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 73 pg[9.7( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=73) [2]/[0] r=-1 lpr=73 pi=[63,73)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:35 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 73 pg[9.f( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=73) [2]/[0] r=-1 lpr=73 pi=[63,73)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:35 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 73 pg[9.f( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=73) [2]/[0] r=-1 lpr=73 pi=[63,73)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:35 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 73 pg[9.17( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=73) [2]/[0] r=-1 lpr=73 pi=[64,73)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:35 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 73 pg[9.17( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=73) [2]/[0] r=-1 lpr=73 pi=[64,73)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:35 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 73 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73) [2] r=0 lpr=73 pi=[56,73)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [2] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:35 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 73 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73) [2] r=0 lpr=73 pi=[56,73)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:35 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.15 deep-scrub ok
Oct 11 01:45:35 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 73 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=71/72 n=6 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73 pruub=15.018041611s) [2] async=[2] r=-1 lpr=73 pi=[56,73)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 151.852172852s@ mbc={255={}}] start_peering_interval up [2] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:35 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 73 pg[9.6( v 53'585 (0'0,53'585] local-lis/les=71/72 n=7 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73 pruub=15.017793655s) [2] async=[2] r=-1 lpr=73 pi=[56,73)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 151.852096558s@ mbc={255={}}] start_peering_interval up [2] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:35 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 73 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=71/72 n=6 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73 pruub=15.017905235s) [2] r=-1 lpr=73 pi=[56,73)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 151.852172852s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:35 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 73 pg[9.6( v 53'585 (0'0,53'585] local-lis/les=71/72 n=7 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73 pruub=15.017682076s) [2] r=-1 lpr=73 pi=[56,73)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 151.852096558s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:35 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 73 pg[9.e( v 53'585 (0'0,53'585] local-lis/les=71/72 n=7 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73 pruub=15.017831802s) [2] async=[2] r=-1 lpr=73 pi=[56,73)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 151.852355957s@ mbc={255={}}] start_peering_interval up [2] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:35 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 73 pg[9.e( v 53'585 (0'0,53'585] local-lis/les=71/72 n=7 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73 pruub=15.017685890s) [2] r=-1 lpr=73 pi=[56,73)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 151.852355957s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:35 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 73 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=71/72 n=6 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73 pruub=15.017221451s) [2] async=[2] r=-1 lpr=73 pi=[56,73)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 151.852355957s@ mbc={255={}}] start_peering_interval up [2] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:35 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 73 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=71/72 n=6 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73 pruub=15.017123222s) [2] r=-1 lpr=73 pi=[56,73)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 151.852355957s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:35 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 73 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=63/64 n=7 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=73) [2]/[0] r=0 lpr=73 pi=[63,73)/1 crt=53'585 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:35 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 73 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=73) [2]/[0] r=0 lpr=73 pi=[64,73)/1 crt=53'585 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:35 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 73 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=73) [2]/[0] r=0 lpr=73 pi=[64,73)/1 crt=53'585 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:35 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 73 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=63/64 n=7 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=73) [2]/[0] r=0 lpr=73 pi=[63,73)/1 crt=53'585 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:35 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 73 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=63/64 n=7 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=73) [2]/[0] r=0 lpr=73 pi=[63,73)/1 crt=53'585 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:35 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 73 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=63/64 n=7 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=73) [2]/[0] r=0 lpr=73 pi=[63,73)/1 crt=53'585 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:35 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 73 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=73) [2]/[0] r=0 lpr=73 pi=[64,73)/1 crt=53'585 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:35 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 73 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=73) [2]/[0] r=0 lpr=73 pi=[64,73)/1 crt=53'585 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:36 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 4.11 deep-scrub starts
Oct 11 01:45:36 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 4.11 deep-scrub ok
Oct 11 01:45:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v174: 321 pgs: 4 active+remapped, 317 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 54 B/s, 5 objects/s recovering
Oct 11 01:45:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "9"} v 0) v1
Oct 11 01:45:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "9"}]: dispatch
Oct 11 01:45:36 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 4.d scrub starts
Oct 11 01:45:36 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 4.d scrub ok
Oct 11 01:45:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e73 do_prune osdmap full prune enabled
Oct 11 01:45:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "9"}]': finished
Oct 11 01:45:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e74 e74: 3 total, 3 up, 3 in
Oct 11 01:45:36 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e74: 3 total, 3 up, 3 in
Oct 11 01:45:36 compute-0 ceph-mon[191930]: osdmap e73: 3 total, 3 up, 3 in
Oct 11 01:45:36 compute-0 ceph-mon[191930]: 3.15 deep-scrub starts
Oct 11 01:45:36 compute-0 ceph-mon[191930]: 3.15 deep-scrub ok
Oct 11 01:45:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "9"}]: dispatch
Oct 11 01:45:36 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.13 scrub starts
Oct 11 01:45:36 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 74 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=73/74 n=6 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73) [2] r=0 lpr=73 pi=[56,73)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:36 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.13 scrub ok
Oct 11 01:45:36 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 74 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=73/74 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=73) [2]/[0] async=[2] r=0 lpr=73 pi=[64,73)/1 crt=53'585 mlcod 0'0 active+remapped mbc={255={(0+1)=4}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:36 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 74 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=73/74 n=7 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=73) [2]/[0] async=[2] r=0 lpr=73 pi=[63,73)/1 crt=53'585 mlcod 0'0 active+remapped mbc={255={(0+1)=6}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:36 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 74 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=73/74 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=73) [2]/[0] async=[2] r=0 lpr=73 pi=[64,73)/1 crt=53'585 mlcod 0'0 active+remapped mbc={255={(0+1)=6}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:36 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 74 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=73/74 n=7 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=73) [2]/[0] async=[2] r=0 lpr=73 pi=[63,73)/1 crt=53'585 mlcod 0'0 active+remapped mbc={255={(0+1)=7}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:36 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 74 pg[9.e( v 53'585 (0'0,53'585] local-lis/les=73/74 n=7 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73) [2] r=0 lpr=73 pi=[56,73)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:36 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 74 pg[9.6( v 53'585 (0'0,53'585] local-lis/les=73/74 n=7 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73) [2] r=0 lpr=73 pi=[56,73)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:36 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 74 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=73/74 n=6 ec=56/47 lis/c=71/56 les/c/f=72/57/0 sis=73) [2] r=0 lpr=73 pi=[56,73)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:36 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 74 pg[9.8( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=74 pruub=9.207142830s) [2] r=-1 lpr=74 pi=[56,74)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 147.314910889s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:36 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 74 pg[9.8( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=74 pruub=9.207056046s) [2] r=-1 lpr=74 pi=[56,74)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 147.314910889s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:36 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 74 pg[9.8( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=74) [2] r=0 lpr=74 pi=[56,74)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:36 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 74 pg[9.18( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=74 pruub=9.206172943s) [2] r=-1 lpr=74 pi=[56,74)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 147.316085815s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:36 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 74 pg[9.18( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=74 pruub=9.206095695s) [2] r=-1 lpr=74 pi=[56,74)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 147.316085815s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:36 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 74 pg[9.18( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=74) [2] r=0 lpr=74 pi=[56,74)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:37 compute-0 podman[223462]: 2025-10-11 01:45:37.237818578 +0000 UTC m=+0.126052144 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:45:37 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e74 do_prune osdmap full prune enabled
Oct 11 01:45:37 compute-0 ceph-mon[191930]: 4.11 deep-scrub starts
Oct 11 01:45:37 compute-0 ceph-mon[191930]: 4.11 deep-scrub ok
Oct 11 01:45:37 compute-0 ceph-mon[191930]: pgmap v174: 321 pgs: 4 active+remapped, 317 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 54 B/s, 5 objects/s recovering
Oct 11 01:45:37 compute-0 ceph-mon[191930]: 4.d scrub starts
Oct 11 01:45:37 compute-0 ceph-mon[191930]: 4.d scrub ok
Oct 11 01:45:37 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "9"}]': finished
Oct 11 01:45:37 compute-0 ceph-mon[191930]: osdmap e74: 3 total, 3 up, 3 in
Oct 11 01:45:37 compute-0 ceph-mon[191930]: 2.13 scrub starts
Oct 11 01:45:37 compute-0 ceph-mon[191930]: 2.13 scrub ok
Oct 11 01:45:37 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e75 e75: 3 total, 3 up, 3 in
Oct 11 01:45:37 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e75: 3 total, 3 up, 3 in
Oct 11 01:45:37 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 75 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=73/74 n=7 ec=56/47 lis/c=73/63 les/c/f=74/64/0 sis=75 pruub=14.983347893s) [2] async=[2] r=-1 lpr=75 pi=[63,75)/1 crt=53'585 mlcod 53'585 active pruub 161.247360229s@ mbc={255={}}] start_peering_interval up [2] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:37 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 75 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=73/74 n=7 ec=56/47 lis/c=73/63 les/c/f=74/64/0 sis=75 pruub=14.983162880s) [2] r=-1 lpr=75 pi=[63,75)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 161.247360229s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:37 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 75 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=73/74 n=6 ec=56/47 lis/c=73/64 les/c/f=74/65/0 sis=75 pruub=14.982107162s) [2] async=[2] r=-1 lpr=75 pi=[64,75)/1 crt=53'585 mlcod 53'585 active pruub 161.247085571s@ mbc={255={}}] start_peering_interval up [2] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:37 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 75 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=73/74 n=6 ec=56/47 lis/c=73/64 les/c/f=74/65/0 sis=75 pruub=14.982028961s) [2] r=-1 lpr=75 pi=[64,75)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 161.247085571s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:37 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 75 pg[9.8( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=75) [2]/[1] r=0 lpr=75 pi=[56,75)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:37 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 75 pg[9.8( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=75) [2]/[1] r=0 lpr=75 pi=[56,75)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:37 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 75 pg[9.18( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=75) [2]/[1] r=0 lpr=75 pi=[56,75)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:37 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 75 pg[9.18( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=75) [2]/[1] r=0 lpr=75 pi=[56,75)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:37 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 75 pg[9.18( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=75) [2]/[1] r=-1 lpr=75 pi=[56,75)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:37 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 75 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=73/74 n=6 ec=56/47 lis/c=73/64 les/c/f=74/65/0 sis=75 pruub=14.981540680s) [2] async=[2] r=-1 lpr=75 pi=[64,75)/1 crt=53'585 mlcod 53'585 active pruub 161.247283936s@ mbc={255={}}] start_peering_interval up [2] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:37 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 75 pg[9.18( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=75) [2]/[1] r=-1 lpr=75 pi=[56,75)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:37 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 75 pg[9.8( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=75) [2]/[1] r=-1 lpr=75 pi=[56,75)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:37 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 75 pg[9.8( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=75) [2]/[1] r=-1 lpr=75 pi=[56,75)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:37 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 75 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=73/74 n=7 ec=56/47 lis/c=73/63 les/c/f=74/64/0 sis=75 pruub=14.978719711s) [2] async=[2] r=-1 lpr=75 pi=[63,75)/1 crt=53'585 mlcod 53'585 active pruub 161.247222900s@ mbc={255={}}] start_peering_interval up [2] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:37 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 75 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=73/64 les/c/f=74/65/0 sis=75) [2] r=0 lpr=75 pi=[64,75)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [2] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:37 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 75 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=73/64 les/c/f=74/65/0 sis=75) [2] r=0 lpr=75 pi=[64,75)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:37 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 75 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=73/63 les/c/f=74/64/0 sis=75) [2] r=0 lpr=75 pi=[63,75)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [2] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:37 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 75 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=73/63 les/c/f=74/64/0 sis=75) [2] r=0 lpr=75 pi=[63,75)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:37 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 75 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=73/74 n=7 ec=56/47 lis/c=73/63 les/c/f=74/64/0 sis=75 pruub=14.978499413s) [2] r=-1 lpr=75 pi=[63,75)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 161.247222900s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:37 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 75 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=73/63 les/c/f=74/64/0 sis=75) [2] r=0 lpr=75 pi=[63,75)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [2] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:37 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 75 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=73/63 les/c/f=74/64/0 sis=75) [2] r=0 lpr=75 pi=[63,75)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:37 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 75 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=73/64 les/c/f=74/65/0 sis=75) [2] r=0 lpr=75 pi=[64,75)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [2] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:37 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 75 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=73/64 les/c/f=74/65/0 sis=75) [2] r=0 lpr=75 pi=[64,75)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:37 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 75 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=73/74 n=6 ec=56/47 lis/c=73/64 les/c/f=74/65/0 sis=75 pruub=14.979998589s) [2] r=-1 lpr=75 pi=[64,75)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 161.247283936s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v177: 321 pgs: 2 unknown, 319 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 54 B/s, 5 objects/s recovering
Oct 11 01:45:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e75 do_prune osdmap full prune enabled
Oct 11 01:45:38 compute-0 ceph-mon[191930]: osdmap e75: 3 total, 3 up, 3 in
Oct 11 01:45:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e76 e76: 3 total, 3 up, 3 in
Oct 11 01:45:38 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e76: 3 total, 3 up, 3 in
Oct 11 01:45:38 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 76 pg[9.17( v 53'585 (0'0,53'585] local-lis/les=75/76 n=6 ec=56/47 lis/c=73/64 les/c/f=74/65/0 sis=75) [2] r=0 lpr=75 pi=[64,75)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:38 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 76 pg[9.f( v 53'585 (0'0,53'585] local-lis/les=75/76 n=7 ec=56/47 lis/c=73/63 les/c/f=74/64/0 sis=75) [2] r=0 lpr=75 pi=[63,75)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:38 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 76 pg[9.7( v 53'585 (0'0,53'585] local-lis/les=75/76 n=7 ec=56/47 lis/c=73/63 les/c/f=74/64/0 sis=75) [2] r=0 lpr=75 pi=[63,75)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:38 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 76 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=75/76 n=6 ec=56/47 lis/c=73/64 les/c/f=74/65/0 sis=75) [2] r=0 lpr=75 pi=[64,75)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:38 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 76 pg[9.18( v 53'585 (0'0,53'585] local-lis/les=75/76 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=75) [2]/[1] async=[2] r=0 lpr=75 pi=[56,75)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=5}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:38 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 76 pg[9.8( v 53'585 (0'0,53'585] local-lis/les=75/76 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=75) [2]/[1] async=[2] r=0 lpr=75 pi=[56,75)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=9}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:39 compute-0 sshd-session[223482]: Accepted publickey for zuul from 192.168.122.30 port 43360 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:45:39 compute-0 systemd-logind[804]: New session 42 of user zuul.
Oct 11 01:45:39 compute-0 systemd[1]: Started Session 42 of User zuul.
Oct 11 01:45:39 compute-0 sshd-session[223482]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:45:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e76 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:45:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e76 do_prune osdmap full prune enabled
Oct 11 01:45:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e77 e77: 3 total, 3 up, 3 in
Oct 11 01:45:39 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e77: 3 total, 3 up, 3 in
Oct 11 01:45:39 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 77 pg[9.8( v 53'585 (0'0,53'585] local-lis/les=75/76 n=7 ec=56/47 lis/c=75/56 les/c/f=76/57/0 sis=77 pruub=15.155653954s) [2] async=[2] r=-1 lpr=77 pi=[56,77)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 155.945343018s@ mbc={255={}}] start_peering_interval up [2] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:39 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 77 pg[9.8( v 53'585 (0'0,53'585] local-lis/les=75/76 n=7 ec=56/47 lis/c=75/56 les/c/f=76/57/0 sis=77 pruub=15.155481339s) [2] r=-1 lpr=77 pi=[56,77)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 155.945343018s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:39 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 77 pg[9.18( v 53'585 (0'0,53'585] local-lis/les=75/76 n=6 ec=56/47 lis/c=75/56 les/c/f=76/57/0 sis=77 pruub=15.154553413s) [2] async=[2] r=-1 lpr=77 pi=[56,77)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 155.945266724s@ mbc={255={}}] start_peering_interval up [2] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:39 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 77 pg[9.18( v 53'585 (0'0,53'585] local-lis/les=75/76 n=6 ec=56/47 lis/c=75/56 les/c/f=76/57/0 sis=77 pruub=15.154427528s) [2] r=-1 lpr=77 pi=[56,77)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 155.945266724s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:39 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 77 pg[9.8( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=75/56 les/c/f=76/57/0 sis=77) [2] r=0 lpr=77 pi=[56,77)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [2] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:39 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 77 pg[9.8( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=75/56 les/c/f=76/57/0 sis=77) [2] r=0 lpr=77 pi=[56,77)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:39 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 77 pg[9.18( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=75/56 les/c/f=76/57/0 sis=77) [2] r=0 lpr=77 pi=[56,77)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [2] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:39 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 77 pg[9.18( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=75/56 les/c/f=76/57/0 sis=77) [2] r=0 lpr=77 pi=[56,77)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:39 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.f scrub starts
Oct 11 01:45:39 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.f scrub ok
Oct 11 01:45:39 compute-0 ceph-mon[191930]: pgmap v177: 321 pgs: 2 unknown, 319 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 54 B/s, 5 objects/s recovering
Oct 11 01:45:39 compute-0 ceph-mon[191930]: osdmap e76: 3 total, 3 up, 3 in
Oct 11 01:45:39 compute-0 ceph-mon[191930]: osdmap e77: 3 total, 3 up, 3 in
Oct 11 01:45:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v180: 321 pgs: 2 unknown, 319 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e77 do_prune osdmap full prune enabled
Oct 11 01:45:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e78 e78: 3 total, 3 up, 3 in
Oct 11 01:45:40 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.c deep-scrub starts
Oct 11 01:45:40 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e78: 3 total, 3 up, 3 in
Oct 11 01:45:40 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.c deep-scrub ok
Oct 11 01:45:40 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 78 pg[9.8( v 53'585 (0'0,53'585] local-lis/les=77/78 n=7 ec=56/47 lis/c=75/56 les/c/f=76/57/0 sis=77) [2] r=0 lpr=77 pi=[56,77)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:40 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 78 pg[9.18( v 53'585 (0'0,53'585] local-lis/les=77/78 n=6 ec=56/47 lis/c=75/56 les/c/f=76/57/0 sis=77) [2] r=0 lpr=77 pi=[56,77)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:40 compute-0 python3.9[223635]: ansible-ansible.legacy.ping Invoked with data=pong
Oct 11 01:45:40 compute-0 ceph-mon[191930]: 2.f scrub starts
Oct 11 01:45:40 compute-0 ceph-mon[191930]: 2.f scrub ok
Oct 11 01:45:40 compute-0 ceph-mon[191930]: osdmap e78: 3 total, 3 up, 3 in
Oct 11 01:45:41 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.17 deep-scrub starts
Oct 11 01:45:41 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 4.f scrub starts
Oct 11 01:45:41 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.17 deep-scrub ok
Oct 11 01:45:41 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 4.f scrub ok
Oct 11 01:45:41 compute-0 ceph-mon[191930]: pgmap v180: 321 pgs: 2 unknown, 319 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:41 compute-0 ceph-mon[191930]: 6.c deep-scrub starts
Oct 11 01:45:41 compute-0 ceph-mon[191930]: 6.c deep-scrub ok
Oct 11 01:45:42 compute-0 python3.9[223809]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:45:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v182: 321 pgs: 2 unknown, 319 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:42 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.12 scrub starts
Oct 11 01:45:42 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.12 scrub ok
Oct 11 01:45:42 compute-0 ceph-mon[191930]: 3.17 deep-scrub starts
Oct 11 01:45:42 compute-0 ceph-mon[191930]: 4.f scrub starts
Oct 11 01:45:42 compute-0 ceph-mon[191930]: 3.17 deep-scrub ok
Oct 11 01:45:42 compute-0 ceph-mon[191930]: 4.f scrub ok
Oct 11 01:45:43 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 4.1c scrub starts
Oct 11 01:45:43 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 4.1c scrub ok
Oct 11 01:45:43 compute-0 podman[223873]: 2025-10-11 01:45:43.227200733 +0000 UTC m=+0.110682244 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 01:45:43 compute-0 podman[223883]: 2025-10-11 01:45:43.268938823 +0000 UTC m=+0.135096534 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, vendor=Red Hat, Inc., com.redhat.component=ubi9-container, summary=Provides the latest release of Red Hat Universal Base Image 9., release=1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, name=ubi9, managed_by=edpm_ansible, io.openshift.expose-services=, maintainer=Red Hat, Inc., version=9.4, architecture=x86_64, container_name=kepler, distribution-scope=public, io.buildah.version=1.29.0, io.k8s.display-name=Red Hat Universal Base Image 9, build-date=2024-09-18T21:23:30, release-0.7.12=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 01:45:43 compute-0 podman[223880]: 2025-10-11 01:45:43.276450964 +0000 UTC m=+0.159696369 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 01:45:43 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 7.13 scrub starts
Oct 11 01:45:43 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 7.13 scrub ok
Oct 11 01:45:43 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.d scrub starts
Oct 11 01:45:43 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.d scrub ok
Oct 11 01:45:43 compute-0 sudo[224028]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-exiiyrrboqlqyywsqswkrovvqdezynvr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147143.0203974-45-15941578037071/AnsiballZ_command.py'
Oct 11 01:45:43 compute-0 sudo[224028]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:45:43 compute-0 ceph-mon[191930]: pgmap v182: 321 pgs: 2 unknown, 319 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:43 compute-0 ceph-mon[191930]: 3.12 scrub starts
Oct 11 01:45:43 compute-0 ceph-mon[191930]: 3.12 scrub ok
Oct 11 01:45:44 compute-0 python3.9[224030]: ansible-ansible.legacy.command Invoked with _raw_params=PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin which growvols
                                              _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:45:44 compute-0 sudo[224028]: pam_unix(sudo:session): session closed for user root
Oct 11 01:45:44 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 6.1f scrub starts
Oct 11 01:45:44 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 6.1f scrub ok
Oct 11 01:45:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v183: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 7.3 KiB/s rd, 341 B/s wr, 16 op/s; 128 B/s, 6 objects/s recovering
Oct 11 01:45:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "10"} v 0) v1
Oct 11 01:45:44 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "10"}]: dispatch
Oct 11 01:45:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e78 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:45:44 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.f scrub starts
Oct 11 01:45:44 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.f scrub ok
Oct 11 01:45:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e78 do_prune osdmap full prune enabled
Oct 11 01:45:44 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "10"}]': finished
Oct 11 01:45:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e79 e79: 3 total, 3 up, 3 in
Oct 11 01:45:44 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e79: 3 total, 3 up, 3 in
Oct 11 01:45:44 compute-0 ceph-mon[191930]: 4.1c scrub starts
Oct 11 01:45:44 compute-0 ceph-mon[191930]: 4.1c scrub ok
Oct 11 01:45:44 compute-0 ceph-mon[191930]: 7.13 scrub starts
Oct 11 01:45:44 compute-0 ceph-mon[191930]: 7.13 scrub ok
Oct 11 01:45:44 compute-0 ceph-mon[191930]: 6.d scrub starts
Oct 11 01:45:44 compute-0 ceph-mon[191930]: 6.d scrub ok
Oct 11 01:45:44 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "10"}]: dispatch
Oct 11 01:45:45 compute-0 sudo[224181]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vxfvnenbmansvtisunufkipdhgrdzkhk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147144.4883327-57-127069667452066/AnsiballZ_stat.py'
Oct 11 01:45:45 compute-0 sudo[224181]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:45:45 compute-0 python3.9[224183]: ansible-ansible.builtin.stat Invoked with path=/etc/ansible/facts.d/bootc.fact follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:45:45 compute-0 sudo[224181]: pam_unix(sudo:session): session closed for user root
Oct 11 01:45:45 compute-0 ceph-mon[191930]: 6.1f scrub starts
Oct 11 01:45:45 compute-0 ceph-mon[191930]: 6.1f scrub ok
Oct 11 01:45:45 compute-0 ceph-mon[191930]: pgmap v183: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 7.3 KiB/s rd, 341 B/s wr, 16 op/s; 128 B/s, 6 objects/s recovering
Oct 11 01:45:45 compute-0 ceph-mon[191930]: 3.f scrub starts
Oct 11 01:45:45 compute-0 ceph-mon[191930]: 3.f scrub ok
Oct 11 01:45:45 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "10"}]': finished
Oct 11 01:45:45 compute-0 ceph-mon[191930]: osdmap e79: 3 total, 3 up, 3 in
Oct 11 01:45:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v185: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 6.4 KiB/s rd, 149 B/s wr, 14 op/s; 112 B/s, 5 objects/s recovering
Oct 11 01:45:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "11"} v 0) v1
Oct 11 01:45:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "11"}]: dispatch
Oct 11 01:45:46 compute-0 sudo[224335]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-giuacpauivneisojewrqmsgrsuukroid ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147145.8747067-68-161668436452981/AnsiballZ_file.py'
Oct 11 01:45:46 compute-0 sudo[224335]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:45:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e79 do_prune osdmap full prune enabled
Oct 11 01:45:46 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "11"}]: dispatch
Oct 11 01:45:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "11"}]': finished
Oct 11 01:45:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e80 e80: 3 total, 3 up, 3 in
Oct 11 01:45:46 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e80: 3 total, 3 up, 3 in
Oct 11 01:45:46 compute-0 python3.9[224337]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/log/journal setype=var_log_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:45:46 compute-0 sudo[224335]: pam_unix(sudo:session): session closed for user root
Oct 11 01:45:47 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 5.1e scrub starts
Oct 11 01:45:47 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 5.1e scrub ok
Oct 11 01:45:47 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.2 scrub starts
Oct 11 01:45:47 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.2 scrub ok
Oct 11 01:45:47 compute-0 ceph-mon[191930]: pgmap v185: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 6.4 KiB/s rd, 149 B/s wr, 14 op/s; 112 B/s, 5 objects/s recovering
Oct 11 01:45:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "11"}]': finished
Oct 11 01:45:47 compute-0 ceph-mon[191930]: osdmap e80: 3 total, 3 up, 3 in
Oct 11 01:45:48 compute-0 podman[224461]: 2025-10-11 01:45:48.10656838 +0000 UTC m=+0.137224240 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.41.4, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.build-date=20251007, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Oct 11 01:45:48 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.11 scrub starts
Oct 11 01:45:48 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.11 scrub ok
Oct 11 01:45:48 compute-0 python3.9[224501]: ansible-ansible.builtin.service_facts Invoked
Oct 11 01:45:48 compute-0 network[224523]: You are using 'network' service provided by 'network-scripts', which are now deprecated.
Oct 11 01:45:48 compute-0 network[224524]: 'network-scripts' will be removed from distribution in near future.
Oct 11 01:45:48 compute-0 network[224525]: It is advised to switch to 'NetworkManager' instead for network management.
Oct 11 01:45:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v187: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 5.6 KiB/s rd, 0 B/s wr, 11 op/s; 98 B/s, 4 objects/s recovering
Oct 11 01:45:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "12"} v 0) v1
Oct 11 01:45:48 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "12"}]: dispatch
Oct 11 01:45:48 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.6 scrub starts
Oct 11 01:45:48 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.6 scrub ok
Oct 11 01:45:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e80 do_prune osdmap full prune enabled
Oct 11 01:45:48 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "12"}]': finished
Oct 11 01:45:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e81 e81: 3 total, 3 up, 3 in
Oct 11 01:45:48 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e81: 3 total, 3 up, 3 in
Oct 11 01:45:48 compute-0 ceph-mon[191930]: 5.1e scrub starts
Oct 11 01:45:48 compute-0 ceph-mon[191930]: 5.1e scrub ok
Oct 11 01:45:48 compute-0 ceph-mon[191930]: 6.2 scrub starts
Oct 11 01:45:48 compute-0 ceph-mon[191930]: 6.2 scrub ok
Oct 11 01:45:48 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "12"}]: dispatch
Oct 11 01:45:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e81 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:45:49 compute-0 ceph-mon[191930]: 7.11 scrub starts
Oct 11 01:45:49 compute-0 ceph-mon[191930]: 7.11 scrub ok
Oct 11 01:45:49 compute-0 ceph-mon[191930]: pgmap v187: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail; 5.6 KiB/s rd, 0 B/s wr, 11 op/s; 98 B/s, 4 objects/s recovering
Oct 11 01:45:49 compute-0 ceph-mon[191930]: 6.6 scrub starts
Oct 11 01:45:49 compute-0 ceph-mon[191930]: 6.6 scrub ok
Oct 11 01:45:49 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "12"}]': finished
Oct 11 01:45:49 compute-0 ceph-mon[191930]: osdmap e81: 3 total, 3 up, 3 in
Oct 11 01:45:50 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.1c scrub starts
Oct 11 01:45:50 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.1c scrub ok
Oct 11 01:45:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v189: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "13"} v 0) v1
Oct 11 01:45:50 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "13"}]: dispatch
Oct 11 01:45:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e81 do_prune osdmap full prune enabled
Oct 11 01:45:50 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "13"}]': finished
Oct 11 01:45:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e82 e82: 3 total, 3 up, 3 in
Oct 11 01:45:50 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e82: 3 total, 3 up, 3 in
Oct 11 01:45:50 compute-0 ceph-mon[191930]: 7.1c scrub starts
Oct 11 01:45:50 compute-0 ceph-mon[191930]: 7.1c scrub ok
Oct 11 01:45:50 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "13"}]: dispatch
Oct 11 01:45:51 compute-0 ceph-mon[191930]: pgmap v189: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:51 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "13"}]': finished
Oct 11 01:45:51 compute-0 ceph-mon[191930]: osdmap e82: 3 total, 3 up, 3 in
Oct 11 01:45:52 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 82 pg[9.c( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=82 pruub=9.726855278s) [2] r=-1 lpr=82 pi=[56,82)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 163.311309814s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:52 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 82 pg[9.c( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=82 pruub=9.726771355s) [2] r=-1 lpr=82 pi=[56,82)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 163.311309814s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:52 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 82 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=82 pruub=9.733068466s) [2] r=-1 lpr=82 pi=[56,82)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 163.317871094s@ mbc={}] start_peering_interval up [1] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:52 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 82 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=82 pruub=9.732996941s) [2] r=-1 lpr=82 pi=[56,82)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 163.317871094s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:52 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 82 pg[9.c( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=82) [2] r=0 lpr=82 pi=[56,82)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:52 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 82 pg[9.1c( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=82) [2] r=0 lpr=82 pi=[56,82)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v191: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "14"} v 0) v1
Oct 11 01:45:52 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "14"}]: dispatch
Oct 11 01:45:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e82 do_prune osdmap full prune enabled
Oct 11 01:45:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "14"}]: dispatch
Oct 11 01:45:52 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "14"}]': finished
Oct 11 01:45:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e83 e83: 3 total, 3 up, 3 in
Oct 11 01:45:52 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e83: 3 total, 3 up, 3 in
Oct 11 01:45:52 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 83 pg[9.c( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=83) [2]/[1] r=-1 lpr=83 pi=[56,83)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:52 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 83 pg[9.c( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=83) [2]/[1] r=-1 lpr=83 pi=[56,83)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:52 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 83 pg[9.1c( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=83) [2]/[1] r=-1 lpr=83 pi=[56,83)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:52 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 83 pg[9.1c( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=83) [2]/[1] r=-1 lpr=83 pi=[56,83)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:52 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 83 pg[9.c( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=83) [2]/[1] r=0 lpr=83 pi=[56,83)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:52 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 83 pg[9.c( v 53'585 (0'0,53'585] local-lis/les=56/57 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=83) [2]/[1] r=0 lpr=83 pi=[56,83)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:52 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 83 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=83) [2]/[1] r=0 lpr=83 pi=[56,83)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:52 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 83 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=56/57 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=83) [2]/[1] r=0 lpr=83 pi=[56,83)/1 crt=53'585 lcod 0'0 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e83 do_prune osdmap full prune enabled
Oct 11 01:45:53 compute-0 ceph-mon[191930]: pgmap v191: 321 pgs: 321 active+clean; 456 KiB data, 103 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:53 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "14"}]': finished
Oct 11 01:45:53 compute-0 ceph-mon[191930]: osdmap e83: 3 total, 3 up, 3 in
Oct 11 01:45:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e84 e84: 3 total, 3 up, 3 in
Oct 11 01:45:54 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e84: 3 total, 3 up, 3 in
Oct 11 01:45:54 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 84 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=83/84 n=6 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=83) [2]/[1] async=[2] r=0 lpr=83 pi=[56,83)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=7}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:54 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 84 pg[9.c( v 53'585 (0'0,53'585] local-lis/les=83/84 n=7 ec=56/47 lis/c=56/56 les/c/f=57/57/0 sis=83) [2]/[1] async=[2] r=0 lpr=83 pi=[56,83)/1 crt=53'585 lcod 0'0 mlcod 0'0 active+remapped mbc={255={(0+1)=5}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v194: 321 pgs: 2 unknown, 319 active+clean; 456 KiB data, 104 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:54 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.4 scrub starts
Oct 11 01:45:54 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.4 scrub ok
Oct 11 01:45:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e84 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:45:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e84 do_prune osdmap full prune enabled
Oct 11 01:45:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e85 e85: 3 total, 3 up, 3 in
Oct 11 01:45:54 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e85: 3 total, 3 up, 3 in
Oct 11 01:45:54 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 85 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=83/56 les/c/f=84/57/0 sis=85) [2] r=0 lpr=85 pi=[56,85)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [2] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:54 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 85 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=83/56 les/c/f=84/57/0 sis=85) [2] r=0 lpr=85 pi=[56,85)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:54 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 85 pg[9.c( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=83/56 les/c/f=84/57/0 sis=85) [2] r=0 lpr=85 pi=[56,85)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [2] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:54 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 85 pg[9.c( v 53'585 (0'0,53'585] local-lis/les=0/0 n=7 ec=56/47 lis/c=83/56 les/c/f=84/57/0 sis=85) [2] r=0 lpr=85 pi=[56,85)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:45:54 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 85 pg[9.c( v 53'585 (0'0,53'585] local-lis/les=83/84 n=7 ec=56/47 lis/c=83/56 les/c/f=84/57/0 sis=85 pruub=15.476942062s) [2] async=[2] r=-1 lpr=85 pi=[56,85)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 171.277236938s@ mbc={255={}}] start_peering_interval up [2] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:54 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 85 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=83/84 n=6 ec=56/47 lis/c=83/56 les/c/f=84/57/0 sis=85 pruub=15.476865768s) [2] async=[2] r=-1 lpr=85 pi=[56,85)/1 crt=53'585 lcod 0'0 mlcod 0'0 active pruub 171.277191162s@ mbc={255={}}] start_peering_interval up [2] -> [2], acting [1] -> [2], acting_primary 1 -> 2, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:45:54 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 85 pg[9.c( v 53'585 (0'0,53'585] local-lis/les=83/84 n=7 ec=56/47 lis/c=83/56 les/c/f=84/57/0 sis=85 pruub=15.476826668s) [2] r=-1 lpr=85 pi=[56,85)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 171.277236938s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:54 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 85 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=83/84 n=6 ec=56/47 lis/c=83/56 les/c/f=84/57/0 sis=85 pruub=15.476740837s) [2] r=-1 lpr=85 pi=[56,85)/1 crt=53'585 lcod 0'0 mlcod 0'0 unknown NOTIFY pruub 171.277191162s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:45:54 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 5.5 scrub starts
Oct 11 01:45:54 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 5.5 scrub ok
Oct 11 01:45:54 compute-0 ceph-mon[191930]: osdmap e84: 3 total, 3 up, 3 in
Oct 11 01:45:55 compute-0 ceph-mon[191930]: 6.4 scrub starts
Oct 11 01:45:55 compute-0 ceph-mon[191930]: 6.4 scrub ok
Oct 11 01:45:55 compute-0 ceph-mon[191930]: osdmap e85: 3 total, 3 up, 3 in
Oct 11 01:45:55 compute-0 python3.9[224798]: ansible-ansible.builtin.lineinfile Invoked with line=cloud-init=disabled path=/proc/cmdline state=present backrefs=False create=False backup=False firstmatch=False unsafe_writes=False regexp=None search_string=None insertafter=None insertbefore=None validate=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:45:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e85 do_prune osdmap full prune enabled
Oct 11 01:45:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e86 e86: 3 total, 3 up, 3 in
Oct 11 01:45:55 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e86: 3 total, 3 up, 3 in
Oct 11 01:45:55 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 86 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=85/86 n=6 ec=56/47 lis/c=83/56 les/c/f=84/57/0 sis=85) [2] r=0 lpr=85 pi=[56,85)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:55 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 86 pg[9.c( v 53'585 (0'0,53'585] local-lis/les=85/86 n=7 ec=56/47 lis/c=83/56 les/c/f=84/57/0 sis=85) [2] r=0 lpr=85 pi=[56,85)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:45:56 compute-0 ceph-mon[191930]: pgmap v194: 321 pgs: 2 unknown, 319 active+clean; 456 KiB data, 104 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:56 compute-0 ceph-mon[191930]: 5.5 scrub starts
Oct 11 01:45:56 compute-0 ceph-mon[191930]: 5.5 scrub ok
Oct 11 01:45:56 compute-0 ceph-mon[191930]: osdmap e86: 3 total, 3 up, 3 in
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:45:56
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Some PGs (0.006231) are unknown; try again later
Oct 11 01:45:56 compute-0 python3.9[224948]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v197: 321 pgs: 2 unknown, 319 active+clean; 456 KiB data, 104 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:45:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:45:56 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.1 scrub starts
Oct 11 01:45:56 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.1 scrub ok
Oct 11 01:45:57 compute-0 ceph-mon[191930]: 6.1 scrub starts
Oct 11 01:45:57 compute-0 ceph-mon[191930]: 6.1 scrub ok
Oct 11 01:45:57 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 4.7 scrub starts
Oct 11 01:45:57 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 4.7 scrub ok
Oct 11 01:45:58 compute-0 ceph-mon[191930]: pgmap v197: 321 pgs: 2 unknown, 319 active+clean; 456 KiB data, 104 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:45:58 compute-0 ceph-mon[191930]: 4.7 scrub starts
Oct 11 01:45:58 compute-0 ceph-mon[191930]: 4.7 scrub ok
Oct 11 01:45:58 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 3.11 deep-scrub starts
Oct 11 01:45:58 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 3.11 deep-scrub ok
Oct 11 01:45:58 compute-0 python3.9[225102]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local', 'distribution'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:45:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v198: 321 pgs: 321 active+clean; 456 KiB data, 104 MiB used, 60 GiB / 60 GiB avail; 8.0 KiB/s rd, 373 B/s wr, 18 op/s; 20 B/s, 2 objects/s recovering
Oct 11 01:45:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "15"} v 0) v1
Oct 11 01:45:58 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "15"}]: dispatch
Oct 11 01:45:58 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 7.9 scrub starts
Oct 11 01:45:58 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 7.9 scrub ok
Oct 11 01:45:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e86 do_prune osdmap full prune enabled
Oct 11 01:45:59 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "15"}]': finished
Oct 11 01:45:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e87 e87: 3 total, 3 up, 3 in
Oct 11 01:45:59 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e87: 3 total, 3 up, 3 in
Oct 11 01:45:59 compute-0 ceph-mon[191930]: 3.11 deep-scrub starts
Oct 11 01:45:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "15"}]: dispatch
Oct 11 01:45:59 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.15 scrub starts
Oct 11 01:45:59 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.15 scrub ok
Oct 11 01:45:59 compute-0 sudo[225258]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tyvriqrmtkitapazcvbvuxxpjvexwqpu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147158.8362849-116-223040465473584/AnsiballZ_setup.py'
Oct 11 01:45:59 compute-0 sudo[225258]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:45:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e87 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:45:59 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 7.f scrub starts
Oct 11 01:45:59 compute-0 podman[157119]: time="2025-10-11T01:45:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:45:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:45:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:45:59 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 7.f scrub ok
Oct 11 01:45:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:45:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6800 "" "Go-http-client/1.1"
Oct 11 01:45:59 compute-0 python3.9[225260]: ansible-ansible.legacy.setup Invoked with filter=['ansible_pkg_mgr'] gather_subset=['!all'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:46:00 compute-0 ceph-mon[191930]: 3.11 deep-scrub ok
Oct 11 01:46:00 compute-0 ceph-mon[191930]: pgmap v198: 321 pgs: 321 active+clean; 456 KiB data, 104 MiB used, 60 GiB / 60 GiB avail; 8.0 KiB/s rd, 373 B/s wr, 18 op/s; 20 B/s, 2 objects/s recovering
Oct 11 01:46:00 compute-0 ceph-mon[191930]: 7.9 scrub starts
Oct 11 01:46:00 compute-0 ceph-mon[191930]: 7.9 scrub ok
Oct 11 01:46:00 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "15"}]': finished
Oct 11 01:46:00 compute-0 ceph-mon[191930]: osdmap e87: 3 total, 3 up, 3 in
Oct 11 01:46:00 compute-0 ceph-mon[191930]: 7.15 scrub starts
Oct 11 01:46:00 compute-0 ceph-mon[191930]: 7.15 scrub ok
Oct 11 01:46:00 compute-0 sudo[225258]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v200: 321 pgs: 321 active+clean; 456 KiB data, 104 MiB used, 60 GiB / 60 GiB avail; 7.3 KiB/s rd, 341 B/s wr, 16 op/s; 18 B/s, 1 objects/s recovering
Oct 11 01:46:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "16"} v 0) v1
Oct 11 01:46:00 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "16"}]: dispatch
Oct 11 01:46:00 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 7.6 scrub starts
Oct 11 01:46:00 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 7.6 scrub ok
Oct 11 01:46:00 compute-0 sudo[225343]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xcmqlyvtrgtmnphjydnhiturwfgomjwr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147158.8362849-116-223040465473584/AnsiballZ_dnf.py'
Oct 11 01:46:00 compute-0 sudo[225343]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:46:01 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 3.18 scrub starts
Oct 11 01:46:01 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 3.18 scrub ok
Oct 11 01:46:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e87 do_prune osdmap full prune enabled
Oct 11 01:46:01 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "16"}]': finished
Oct 11 01:46:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e88 e88: 3 total, 3 up, 3 in
Oct 11 01:46:01 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e88: 3 total, 3 up, 3 in
Oct 11 01:46:01 compute-0 ceph-mon[191930]: 7.f scrub starts
Oct 11 01:46:01 compute-0 ceph-mon[191930]: 7.f scrub ok
Oct 11 01:46:01 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "16"}]: dispatch
Oct 11 01:46:01 compute-0 python3.9[225345]: ansible-ansible.legacy.dnf Invoked with name=['driverctl', 'lvm2', 'crudini', 'jq', 'nftables', 'NetworkManager', 'openstack-selinux', 'python3-libselinux', 'python3-pyyaml', 'rsync', 'tmpwatch', 'sysstat', 'iproute-tc', 'ksmtuned', 'systemd-container', 'crypto-policies-scripts', 'grubby', 'sos'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:46:01 compute-0 openstack_network_exporter[159265]: ERROR   01:46:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:46:01 compute-0 openstack_network_exporter[159265]: ERROR   01:46:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:46:01 compute-0 openstack_network_exporter[159265]: ERROR   01:46:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:46:01 compute-0 openstack_network_exporter[159265]: ERROR   01:46:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:46:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:46:01 compute-0 openstack_network_exporter[159265]: ERROR   01:46:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:46:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:46:01 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.e scrub starts
Oct 11 01:46:01 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.e scrub ok
Oct 11 01:46:01 compute-0 anacron[26555]: Job `cron.daily' started
Oct 11 01:46:01 compute-0 anacron[26555]: Job `cron.daily' terminated
Oct 11 01:46:02 compute-0 ceph-mon[191930]: pgmap v200: 321 pgs: 321 active+clean; 456 KiB data, 104 MiB used, 60 GiB / 60 GiB avail; 7.3 KiB/s rd, 341 B/s wr, 16 op/s; 18 B/s, 1 objects/s recovering
Oct 11 01:46:02 compute-0 ceph-mon[191930]: 7.6 scrub starts
Oct 11 01:46:02 compute-0 ceph-mon[191930]: 7.6 scrub ok
Oct 11 01:46:02 compute-0 ceph-mon[191930]: 3.18 scrub starts
Oct 11 01:46:02 compute-0 ceph-mon[191930]: 3.18 scrub ok
Oct 11 01:46:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "16"}]': finished
Oct 11 01:46:02 compute-0 ceph-mon[191930]: osdmap e88: 3 total, 3 up, 3 in
Oct 11 01:46:02 compute-0 ceph-mon[191930]: 6.e scrub starts
Oct 11 01:46:02 compute-0 ceph-mon[191930]: 6.e scrub ok
Oct 11 01:46:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v202: 321 pgs: 321 active+clean; 456 KiB data, 104 MiB used, 60 GiB / 60 GiB avail; 6.4 KiB/s rd, 299 B/s wr, 14 op/s; 16 B/s, 1 objects/s recovering
Oct 11 01:46:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "17"} v 0) v1
Oct 11 01:46:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "17"}]: dispatch
Oct 11 01:46:02 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.b scrub starts
Oct 11 01:46:02 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.b scrub ok
Oct 11 01:46:03 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.a deep-scrub starts
Oct 11 01:46:03 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.a deep-scrub ok
Oct 11 01:46:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e88 do_prune osdmap full prune enabled
Oct 11 01:46:03 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "17"}]: dispatch
Oct 11 01:46:03 compute-0 ceph-mon[191930]: 6.b scrub starts
Oct 11 01:46:03 compute-0 ceph-mon[191930]: 6.b scrub ok
Oct 11 01:46:03 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "17"}]': finished
Oct 11 01:46:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e89 e89: 3 total, 3 up, 3 in
Oct 11 01:46:03 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e89: 3 total, 3 up, 3 in
Oct 11 01:46:03 compute-0 podman[225394]: 2025-10-11 01:46:03.253289067 +0000 UTC m=+0.137856440 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 01:46:03 compute-0 podman[225395]: 2025-10-11 01:46:03.279364097 +0000 UTC m=+0.164394371 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.33.7, io.openshift.tags=minimal rhel9, name=ubi9-minimal, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., architecture=x86_64, build-date=2025-08-20T13:12:41, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, version=9.6, container_name=openstack_network_exporter, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, release=1755695350, distribution-scope=public, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, maintainer=Red Hat, Inc., vcs-type=git, managed_by=edpm_ansible, com.redhat.component=ubi9-minimal-container, io.openshift.expose-services=, vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_id=edpm, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, url=https://catalog.redhat.com/en/search?searchType=containers)
Oct 11 01:46:03 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 4.8 scrub starts
Oct 11 01:46:03 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 4.8 scrub ok
Oct 11 01:46:03 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.3 scrub starts
Oct 11 01:46:03 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.3 scrub ok
Oct 11 01:46:04 compute-0 ceph-mon[191930]: pgmap v202: 321 pgs: 321 active+clean; 456 KiB data, 104 MiB used, 60 GiB / 60 GiB avail; 6.4 KiB/s rd, 299 B/s wr, 14 op/s; 16 B/s, 1 objects/s recovering
Oct 11 01:46:04 compute-0 ceph-mon[191930]: 7.a deep-scrub starts
Oct 11 01:46:04 compute-0 ceph-mon[191930]: 7.a deep-scrub ok
Oct 11 01:46:04 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "17"}]': finished
Oct 11 01:46:04 compute-0 ceph-mon[191930]: osdmap e89: 3 total, 3 up, 3 in
Oct 11 01:46:04 compute-0 ceph-mon[191930]: 4.8 scrub starts
Oct 11 01:46:04 compute-0 ceph-mon[191930]: 4.8 scrub ok
Oct 11 01:46:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v204: 321 pgs: 321 active+clean; 456 KiB data, 104 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "18"} v 0) v1
Oct 11 01:46:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "18"}]: dispatch
Oct 11 01:46:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e89 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:46:04 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 7.4 scrub starts
Oct 11 01:46:04 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 7.4 scrub ok
Oct 11 01:46:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e89 do_prune osdmap full prune enabled
Oct 11 01:46:05 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "18"}]': finished
Oct 11 01:46:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e90 e90: 3 total, 3 up, 3 in
Oct 11 01:46:05 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e90: 3 total, 3 up, 3 in
Oct 11 01:46:05 compute-0 ceph-mon[191930]: 3.3 scrub starts
Oct 11 01:46:05 compute-0 ceph-mon[191930]: 3.3 scrub ok
Oct 11 01:46:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "18"}]: dispatch
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 01:46:06 compute-0 ceph-mon[191930]: pgmap v204: 321 pgs: 321 active+clean; 456 KiB data, 104 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:06 compute-0 ceph-mon[191930]: 7.4 scrub starts
Oct 11 01:46:06 compute-0 ceph-mon[191930]: 7.4 scrub ok
Oct 11 01:46:06 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "18"}]': finished
Oct 11 01:46:06 compute-0 ceph-mon[191930]: osdmap e90: 3 total, 3 up, 3 in
Oct 11 01:46:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v206: 321 pgs: 321 active+clean; 456 KiB data, 104 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "19"} v 0) v1
Oct 11 01:46:06 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "19"}]: dispatch
Oct 11 01:46:06 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.17 scrub starts
Oct 11 01:46:06 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.17 scrub ok
Oct 11 01:46:06 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.c scrub starts
Oct 11 01:46:06 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.c scrub ok
Oct 11 01:46:07 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.5 scrub starts
Oct 11 01:46:07 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.5 scrub ok
Oct 11 01:46:07 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e90 do_prune osdmap full prune enabled
Oct 11 01:46:07 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "19"}]: dispatch
Oct 11 01:46:07 compute-0 ceph-mon[191930]: 6.17 scrub starts
Oct 11 01:46:07 compute-0 ceph-mon[191930]: 6.17 scrub ok
Oct 11 01:46:07 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "19"}]': finished
Oct 11 01:46:07 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e91 e91: 3 total, 3 up, 3 in
Oct 11 01:46:07 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e91: 3 total, 3 up, 3 in
Oct 11 01:46:07 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 4.14 scrub starts
Oct 11 01:46:07 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 4.14 scrub ok
Oct 11 01:46:08 compute-0 ceph-mon[191930]: pgmap v206: 321 pgs: 321 active+clean; 456 KiB data, 104 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:08 compute-0 ceph-mon[191930]: 3.c scrub starts
Oct 11 01:46:08 compute-0 ceph-mon[191930]: 3.c scrub ok
Oct 11 01:46:08 compute-0 ceph-mon[191930]: 7.5 scrub starts
Oct 11 01:46:08 compute-0 ceph-mon[191930]: 7.5 scrub ok
Oct 11 01:46:08 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "19"}]': finished
Oct 11 01:46:08 compute-0 ceph-mon[191930]: osdmap e91: 3 total, 3 up, 3 in
Oct 11 01:46:08 compute-0 ceph-mon[191930]: 4.14 scrub starts
Oct 11 01:46:08 compute-0 ceph-mon[191930]: 4.14 scrub ok
Oct 11 01:46:08 compute-0 podman[225456]: 2025-10-11 01:46:08.258002093 +0000 UTC m=+0.148589288 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_ipmi, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.build-date=20251009, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Oct 11 01:46:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v208: 321 pgs: 321 active+clean; 456 KiB data, 139 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:08 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "20"} v 0) v1
Oct 11 01:46:08 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "20"}]: dispatch
Oct 11 01:46:08 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 4.12 scrub starts
Oct 11 01:46:08 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 4.12 scrub ok
Oct 11 01:46:08 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 7.3 scrub starts
Oct 11 01:46:08 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 7.3 scrub ok
Oct 11 01:46:09 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 3.16 scrub starts
Oct 11 01:46:09 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 3.16 scrub ok
Oct 11 01:46:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e91 do_prune osdmap full prune enabled
Oct 11 01:46:09 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "20"}]: dispatch
Oct 11 01:46:09 compute-0 ceph-mon[191930]: 4.12 scrub starts
Oct 11 01:46:09 compute-0 ceph-mon[191930]: 4.12 scrub ok
Oct 11 01:46:09 compute-0 ceph-mon[191930]: 3.16 scrub starts
Oct 11 01:46:09 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "20"}]': finished
Oct 11 01:46:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e92 e92: 3 total, 3 up, 3 in
Oct 11 01:46:09 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e92: 3 total, 3 up, 3 in
Oct 11 01:46:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e92 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:46:09 compute-0 sudo[225476]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:46:09 compute-0 sudo[225476]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:09 compute-0 sudo[225476]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:10 compute-0 sudo[225501]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:46:10 compute-0 sudo[225501]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:10 compute-0 sudo[225501]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:10 compute-0 sudo[225526]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:46:10 compute-0 sudo[225526]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:10 compute-0 sudo[225526]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:10 compute-0 ceph-mon[191930]: pgmap v208: 321 pgs: 321 active+clean; 456 KiB data, 139 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:10 compute-0 ceph-mon[191930]: 7.3 scrub starts
Oct 11 01:46:10 compute-0 ceph-mon[191930]: 7.3 scrub ok
Oct 11 01:46:10 compute-0 ceph-mon[191930]: 3.16 scrub ok
Oct 11 01:46:10 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "20"}]': finished
Oct 11 01:46:10 compute-0 ceph-mon[191930]: osdmap e92: 3 total, 3 up, 3 in
Oct 11 01:46:10 compute-0 sudo[225551]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ls
Oct 11 01:46:10 compute-0 sudo[225551]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v210: 321 pgs: 321 active+clean; 456 KiB data, 139 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "21"} v 0) v1
Oct 11 01:46:10 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "21"}]: dispatch
Oct 11 01:46:10 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 4.10 scrub starts
Oct 11 01:46:10 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 4.10 scrub ok
Oct 11 01:46:10 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.6 scrub starts
Oct 11 01:46:10 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.6 scrub ok
Oct 11 01:46:11 compute-0 podman[225646]: 2025-10-11 01:46:11.223378768 +0000 UTC m=+0.142643164 container exec ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, ceph=True, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:46:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e92 do_prune osdmap full prune enabled
Oct 11 01:46:11 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "21"}]: dispatch
Oct 11 01:46:11 compute-0 ceph-mon[191930]: 4.10 scrub starts
Oct 11 01:46:11 compute-0 ceph-mon[191930]: 4.10 scrub ok
Oct 11 01:46:11 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "21"}]': finished
Oct 11 01:46:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e93 e93: 3 total, 3 up, 3 in
Oct 11 01:46:11 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e93: 3 total, 3 up, 3 in
Oct 11 01:46:11 compute-0 podman[225646]: 2025-10-11 01:46:11.387760608 +0000 UTC m=+0.307025014 container exec_died ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:46:11 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 92 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=63/64 n=6 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=92 pruub=11.438323975s) [2] r=-1 lpr=92 pi=[63,92)/1 crt=53'585 mlcod 0'0 active pruub 191.917404175s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:11 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 93 pg[9.13( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=92) [2] r=0 lpr=93 pi=[63,92)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:11 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 93 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=63/64 n=6 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=92 pruub=11.438224792s) [2] r=-1 lpr=92 pi=[63,92)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 191.917404175s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:12 compute-0 ceph-mon[191930]: pgmap v210: 321 pgs: 321 active+clean; 456 KiB data, 139 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:12 compute-0 ceph-mon[191930]: 3.6 scrub starts
Oct 11 01:46:12 compute-0 ceph-mon[191930]: 3.6 scrub ok
Oct 11 01:46:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "21"}]': finished
Oct 11 01:46:12 compute-0 ceph-mon[191930]: osdmap e93: 3 total, 3 up, 3 in
Oct 11 01:46:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e93 do_prune osdmap full prune enabled
Oct 11 01:46:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e94 e94: 3 total, 3 up, 3 in
Oct 11 01:46:12 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e94: 3 total, 3 up, 3 in
Oct 11 01:46:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 94 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=63/64 n=6 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=94) [2]/[0] r=0 lpr=94 pi=[63,94)/1 crt=53'585 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:12 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 94 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=63/64 n=6 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=94) [2]/[0] r=0 lpr=94 pi=[63,94)/1 crt=53'585 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 94 pg[9.13( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=94) [2]/[0] r=-1 lpr=94 pi=[63,94)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:12 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 94 pg[9.13( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=94) [2]/[0] r=-1 lpr=94 pi=[63,94)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v213: 321 pgs: 321 active+clean; 456 KiB data, 139 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "22"} v 0) v1
Oct 11 01:46:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "22"}]: dispatch
Oct 11 01:46:12 compute-0 sudo[225551]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:46:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:46:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:46:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:46:12 compute-0 sudo[225811]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:46:12 compute-0 sudo[225811]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:12 compute-0 sudo[225811]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:12 compute-0 sudo[225836]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:46:12 compute-0 sudo[225836]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:12 compute-0 sudo[225836]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:13 compute-0 sudo[225861]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:46:13 compute-0 sudo[225861]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:13 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 3.e scrub starts
Oct 11 01:46:13 compute-0 sudo[225861]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:13 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 3.e scrub ok
Oct 11 01:46:13 compute-0 sudo[225886]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 01:46:13 compute-0 sudo[225886]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e94 do_prune osdmap full prune enabled
Oct 11 01:46:13 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "22"}]': finished
Oct 11 01:46:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e95 e95: 3 total, 3 up, 3 in
Oct 11 01:46:13 compute-0 ceph-mon[191930]: osdmap e94: 3 total, 3 up, 3 in
Oct 11 01:46:13 compute-0 ceph-mon[191930]: pgmap v213: 321 pgs: 321 active+clean; 456 KiB data, 139 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "22"}]: dispatch
Oct 11 01:46:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:46:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:46:13 compute-0 ceph-mon[191930]: 3.e scrub starts
Oct 11 01:46:13 compute-0 ceph-mon[191930]: 3.e scrub ok
Oct 11 01:46:13 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e95: 3 total, 3 up, 3 in
Oct 11 01:46:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 95 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=95 pruub=11.059928894s) [1] r=-1 lpr=95 pi=[64,95)/1 crt=53'585 mlcod 0'0 active pruub 192.908676147s@ mbc={}] start_peering_interval up [0] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 0 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:13 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 95 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=95 pruub=11.059857368s) [1] r=-1 lpr=95 pi=[64,95)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 192.908676147s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:13 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 95 pg[9.15( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=95) [1] r=0 lpr=95 pi=[64,95)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:13 compute-0 sudo[225886]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:46:13 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:46:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:46:13 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:46:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:46:13 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:46:13 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev be7a3581-9dbc-40cc-afda-e792f9b2225a does not exist
Oct 11 01:46:13 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 24bacb4f-e237-4523-848f-ebead24420d6 does not exist
Oct 11 01:46:13 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 0f7c3a8c-bab4-4f1f-b572-d15d415a38c8 does not exist
Oct 11 01:46:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 01:46:13 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:46:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 01:46:13 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:46:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:46:13 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:46:14 compute-0 sudo[225941]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:46:14 compute-0 sudo[225941]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:14 compute-0 sudo[225941]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:14 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 95 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=94/95 n=6 ec=56/47 lis/c=63/63 les/c/f=64/64/0 sis=94) [2]/[0] async=[2] r=0 lpr=94 pi=[63,94)/1 crt=53'585 mlcod 0'0 active+remapped mbc={255={(0+1)=5}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:46:14 compute-0 sudo[225986]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:46:14 compute-0 podman[225965]: 2025-10-11 01:46:14.164605874 +0000 UTC m=+0.123826519 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 01:46:14 compute-0 sudo[225986]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:14 compute-0 sudo[225986]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:14 compute-0 podman[225967]: 2025-10-11 01:46:14.174336282 +0000 UTC m=+0.119714426 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, summary=Provides the latest release of Red Hat Universal Base Image 9., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, name=ubi9, io.k8s.display-name=Red Hat Universal Base Image 9, release-0.7.12=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, vendor=Red Hat, Inc., version=9.4, io.openshift.expose-services=, build-date=2024-09-18T21:23:30, distribution-scope=public, io.openshift.tags=base rhel9, container_name=kepler, architecture=x86_64, config_id=edpm, com.redhat.component=ubi9-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.29.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1214.1726694543, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., managed_by=edpm_ansible)
Oct 11 01:46:14 compute-0 podman[225966]: 2025-10-11 01:46:14.216374155 +0000 UTC m=+0.159505087 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, config_id=ovn_controller, tcib_managed=true, container_name=ovn_controller, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 01:46:14 compute-0 sudo[226049]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:46:14 compute-0 sudo[226049]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:14 compute-0 sudo[226049]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e95 do_prune osdmap full prune enabled
Oct 11 01:46:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e96 e96: 3 total, 3 up, 3 in
Oct 11 01:46:14 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e96: 3 total, 3 up, 3 in
Oct 11 01:46:14 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 96 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=94/63 les/c/f=95/64/0 sis=96) [2] r=0 lpr=96 pi=[63,96)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [2] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:14 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 96 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=94/63 les/c/f=95/64/0 sis=96) [2] r=0 lpr=96 pi=[63,96)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:14 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 96 pg[9.15( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=96) [1]/[0] r=-1 lpr=96 pi=[64,96)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [1] -> [1], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:14 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 96 pg[9.15( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=96) [1]/[0] r=-1 lpr=96 pi=[64,96)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:14 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 96 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=94/95 n=6 ec=56/47 lis/c=94/63 les/c/f=95/64/0 sis=96 pruub=15.750972748s) [2] async=[2] r=-1 lpr=96 pi=[63,96)/1 crt=53'585 mlcod 53'585 active pruub 198.618499756s@ mbc={255={}}] start_peering_interval up [2] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:14 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 96 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=94/95 n=6 ec=56/47 lis/c=94/63 les/c/f=95/64/0 sis=96 pruub=15.750823021s) [2] r=-1 lpr=96 pi=[63,96)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 198.618499756s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:14 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 96 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=96) [1]/[0] r=0 lpr=96 pi=[64,96)/1 crt=53'585 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [1] -> [1], acting [1] -> [0], acting_primary 1 -> 0, up_primary 1 -> 1, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:14 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 96 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=96) [1]/[0] r=0 lpr=96 pi=[64,96)/1 crt=53'585 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "22"}]': finished
Oct 11 01:46:14 compute-0 ceph-mon[191930]: osdmap e95: 3 total, 3 up, 3 in
Oct 11 01:46:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:46:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:46:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:46:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:46:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:46:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:46:14 compute-0 sudo[226079]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 01:46:14 compute-0 sudo[226079]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v216: 321 pgs: 1 activating+remapped, 320 active+clean; 456 KiB data, 139 MiB used, 60 GiB / 60 GiB avail; 5/247 objects misplaced (2.024%)
Oct 11 01:46:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e96 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:46:14 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 7.1f deep-scrub starts
Oct 11 01:46:14 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 7.1f deep-scrub ok
Oct 11 01:46:14 compute-0 podman[226139]: 2025-10-11 01:46:14.937453455 +0000 UTC m=+0.074693560 container create e836b1304a6afcbfe6d79b2ac5c82cfbab67ccc45f80ac31820a2505fad77efd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_goldstine, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:46:14 compute-0 podman[226139]: 2025-10-11 01:46:14.900804536 +0000 UTC m=+0.038044691 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:46:15 compute-0 systemd[1]: Started libpod-conmon-e836b1304a6afcbfe6d79b2ac5c82cfbab67ccc45f80ac31820a2505fad77efd.scope.
Oct 11 01:46:15 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:46:15 compute-0 podman[226139]: 2025-10-11 01:46:15.087847223 +0000 UTC m=+0.225087318 container init e836b1304a6afcbfe6d79b2ac5c82cfbab67ccc45f80ac31820a2505fad77efd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_goldstine, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:46:15 compute-0 podman[226139]: 2025-10-11 01:46:15.104336446 +0000 UTC m=+0.241576511 container start e836b1304a6afcbfe6d79b2ac5c82cfbab67ccc45f80ac31820a2505fad77efd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_goldstine, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:46:15 compute-0 podman[226139]: 2025-10-11 01:46:15.1087108 +0000 UTC m=+0.245950915 container attach e836b1304a6afcbfe6d79b2ac5c82cfbab67ccc45f80ac31820a2505fad77efd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_goldstine, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0)
Oct 11 01:46:15 compute-0 sweet_goldstine[226156]: 167 167
Oct 11 01:46:15 compute-0 systemd[1]: libpod-e836b1304a6afcbfe6d79b2ac5c82cfbab67ccc45f80ac31820a2505fad77efd.scope: Deactivated successfully.
Oct 11 01:46:15 compute-0 podman[226139]: 2025-10-11 01:46:15.116826846 +0000 UTC m=+0.254066951 container died e836b1304a6afcbfe6d79b2ac5c82cfbab67ccc45f80ac31820a2505fad77efd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_goldstine, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=reef)
Oct 11 01:46:15 compute-0 systemd[1]: var-lib-containers-storage-overlay-76303f46890d778fa0f8da559b862736f97e22ed839ad21fcb6f4afb29595022-merged.mount: Deactivated successfully.
Oct 11 01:46:15 compute-0 podman[226139]: 2025-10-11 01:46:15.1971075 +0000 UTC m=+0.334347565 container remove e836b1304a6afcbfe6d79b2ac5c82cfbab67ccc45f80ac31820a2505fad77efd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_goldstine, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:46:15 compute-0 systemd[1]: libpod-conmon-e836b1304a6afcbfe6d79b2ac5c82cfbab67ccc45f80ac31820a2505fad77efd.scope: Deactivated successfully.
Oct 11 01:46:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e96 do_prune osdmap full prune enabled
Oct 11 01:46:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e97 e97: 3 total, 3 up, 3 in
Oct 11 01:46:15 compute-0 ceph-mon[191930]: osdmap e96: 3 total, 3 up, 3 in
Oct 11 01:46:15 compute-0 ceph-mon[191930]: pgmap v216: 321 pgs: 1 activating+remapped, 320 active+clean; 456 KiB data, 139 MiB used, 60 GiB / 60 GiB avail; 5/247 objects misplaced (2.024%)
Oct 11 01:46:15 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e97: 3 total, 3 up, 3 in
Oct 11 01:46:15 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 97 pg[9.13( v 53'585 (0'0,53'585] local-lis/les=96/97 n=6 ec=56/47 lis/c=94/63 les/c/f=95/64/0 sis=96) [2] r=0 lpr=96 pi=[63,96)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:46:15 compute-0 podman[226181]: 2025-10-11 01:46:15.486666379 +0000 UTC m=+0.072137568 container create 5728db022049164992dcca99e5bfa996375ba76708a7803d4f4dd4ebfca1c5f7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_lumiere, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:46:15 compute-0 podman[226181]: 2025-10-11 01:46:15.454031703 +0000 UTC m=+0.039502942 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:46:15 compute-0 systemd[1]: Started libpod-conmon-5728db022049164992dcca99e5bfa996375ba76708a7803d4f4dd4ebfca1c5f7.scope.
Oct 11 01:46:15 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:46:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6093424966f468e0c5d762e58287a93a10ac847e3fd43483cca579f150ae2c70/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:46:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6093424966f468e0c5d762e58287a93a10ac847e3fd43483cca579f150ae2c70/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:46:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6093424966f468e0c5d762e58287a93a10ac847e3fd43483cca579f150ae2c70/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:46:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6093424966f468e0c5d762e58287a93a10ac847e3fd43483cca579f150ae2c70/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:46:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6093424966f468e0c5d762e58287a93a10ac847e3fd43483cca579f150ae2c70/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:46:15 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.1c deep-scrub starts
Oct 11 01:46:15 compute-0 podman[226181]: 2025-10-11 01:46:15.663184918 +0000 UTC m=+0.248656157 container init 5728db022049164992dcca99e5bfa996375ba76708a7803d4f4dd4ebfca1c5f7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_lumiere, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3)
Oct 11 01:46:15 compute-0 podman[226181]: 2025-10-11 01:46:15.676371229 +0000 UTC m=+0.261842378 container start 5728db022049164992dcca99e5bfa996375ba76708a7803d4f4dd4ebfca1c5f7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_lumiere, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 01:46:15 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.1c deep-scrub ok
Oct 11 01:46:15 compute-0 podman[226181]: 2025-10-11 01:46:15.681061123 +0000 UTC m=+0.266532362 container attach 5728db022049164992dcca99e5bfa996375ba76708a7803d4f4dd4ebfca1c5f7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_lumiere, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:46:15 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 97 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=96/97 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=96) [1]/[0] async=[1] r=0 lpr=96 pi=[64,96)/1 crt=53'585 mlcod 0'0 active+remapped mbc={255={(0+1)=5}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:46:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e97 do_prune osdmap full prune enabled
Oct 11 01:46:16 compute-0 ceph-mon[191930]: 7.1f deep-scrub starts
Oct 11 01:46:16 compute-0 ceph-mon[191930]: 7.1f deep-scrub ok
Oct 11 01:46:16 compute-0 ceph-mon[191930]: osdmap e97: 3 total, 3 up, 3 in
Oct 11 01:46:16 compute-0 ceph-mon[191930]: 6.1c deep-scrub starts
Oct 11 01:46:16 compute-0 ceph-mon[191930]: 6.1c deep-scrub ok
Oct 11 01:46:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e98 e98: 3 total, 3 up, 3 in
Oct 11 01:46:16 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e98: 3 total, 3 up, 3 in
Oct 11 01:46:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 98 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=96/64 les/c/f=97/65/0 sis=98) [1] r=0 lpr=98 pi=[64,98)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [1] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 1 -> 1, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:16 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 98 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=96/64 les/c/f=97/65/0 sis=98) [1] r=0 lpr=98 pi=[64,98)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 98 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=96/97 n=6 ec=56/47 lis/c=96/64 les/c/f=97/65/0 sis=98 pruub=15.290844917s) [1] async=[1] r=-1 lpr=98 pi=[64,98)/1 crt=53'585 mlcod 53'585 active pruub 200.222381592s@ mbc={255={}}] start_peering_interval up [1] -> [1], acting [0] -> [1], acting_primary 0 -> 1, up_primary 1 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:16 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 98 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=96/97 n=6 ec=56/47 lis/c=96/64 les/c/f=97/65/0 sis=98 pruub=15.290600777s) [1] r=-1 lpr=98 pi=[64,98)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 200.222381592s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v219: 321 pgs: 1 activating+remapped, 320 active+clean; 456 KiB data, 139 MiB used, 60 GiB / 60 GiB avail; 5/247 objects misplaced (2.024%)
Oct 11 01:46:16 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.a scrub starts
Oct 11 01:46:16 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.a scrub ok
Oct 11 01:46:16 compute-0 recursing_lumiere[226200]: --> passed data devices: 0 physical, 3 LVM
Oct 11 01:46:16 compute-0 recursing_lumiere[226200]: --> relative data size: 1.0
Oct 11 01:46:16 compute-0 recursing_lumiere[226200]: --> All data devices are unavailable
Oct 11 01:46:16 compute-0 systemd[1]: libpod-5728db022049164992dcca99e5bfa996375ba76708a7803d4f4dd4ebfca1c5f7.scope: Deactivated successfully.
Oct 11 01:46:16 compute-0 systemd[1]: libpod-5728db022049164992dcca99e5bfa996375ba76708a7803d4f4dd4ebfca1c5f7.scope: Consumed 1.095s CPU time.
Oct 11 01:46:16 compute-0 podman[226181]: 2025-10-11 01:46:16.837063747 +0000 UTC m=+1.422534896 container died 5728db022049164992dcca99e5bfa996375ba76708a7803d4f4dd4ebfca1c5f7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_lumiere, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 01:46:16 compute-0 systemd[1]: var-lib-containers-storage-overlay-6093424966f468e0c5d762e58287a93a10ac847e3fd43483cca579f150ae2c70-merged.mount: Deactivated successfully.
Oct 11 01:46:16 compute-0 podman[226181]: 2025-10-11 01:46:16.92911662 +0000 UTC m=+1.514587779 container remove 5728db022049164992dcca99e5bfa996375ba76708a7803d4f4dd4ebfca1c5f7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_lumiere, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:46:16 compute-0 systemd[1]: libpod-conmon-5728db022049164992dcca99e5bfa996375ba76708a7803d4f4dd4ebfca1c5f7.scope: Deactivated successfully.
Oct 11 01:46:16 compute-0 sudo[226079]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:17 compute-0 sudo[226255]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:46:17 compute-0 sudo[226255]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:17 compute-0 sudo[226255]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:17 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.8 scrub starts
Oct 11 01:46:17 compute-0 sudo[226280]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:46:17 compute-0 sudo[226280]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:17 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.8 scrub ok
Oct 11 01:46:17 compute-0 sudo[226280]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:17 compute-0 sudo[226307]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:46:17 compute-0 sudo[226307]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:17 compute-0 sudo[226307]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e98 do_prune osdmap full prune enabled
Oct 11 01:46:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e99 e99: 3 total, 3 up, 3 in
Oct 11 01:46:17 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e99: 3 total, 3 up, 3 in
Oct 11 01:46:17 compute-0 ceph-mon[191930]: osdmap e98: 3 total, 3 up, 3 in
Oct 11 01:46:17 compute-0 ceph-mon[191930]: pgmap v219: 321 pgs: 1 activating+remapped, 320 active+clean; 456 KiB data, 139 MiB used, 60 GiB / 60 GiB avail; 5/247 objects misplaced (2.024%)
Oct 11 01:46:17 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 99 pg[9.15( v 53'585 (0'0,53'585] local-lis/les=98/99 n=6 ec=56/47 lis/c=96/64 les/c/f=97/65/0 sis=98) [1] r=0 lpr=98 pi=[64,98)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:46:17 compute-0 sudo[226336]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 01:46:17 compute-0 sudo[226336]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:17 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 4.9 scrub starts
Oct 11 01:46:17 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 4.9 scrub ok
Oct 11 01:46:18 compute-0 podman[226405]: 2025-10-11 01:46:18.138193475 +0000 UTC m=+0.091486852 container create 4b6fae5b420d00d177a441d25c1edb3f422032414f4e3af203ae09dc91bb0a47 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=frosty_lamarr, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:46:18 compute-0 podman[226405]: 2025-10-11 01:46:18.10616716 +0000 UTC m=+0.059460537 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:46:18 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.1 scrub starts
Oct 11 01:46:18 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.1 scrub ok
Oct 11 01:46:18 compute-0 systemd[1]: Started libpod-conmon-4b6fae5b420d00d177a441d25c1edb3f422032414f4e3af203ae09dc91bb0a47.scope.
Oct 11 01:46:18 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:46:18 compute-0 podman[226405]: 2025-10-11 01:46:18.292147327 +0000 UTC m=+0.245440744 container init 4b6fae5b420d00d177a441d25c1edb3f422032414f4e3af203ae09dc91bb0a47 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=frosty_lamarr, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:46:18 compute-0 podman[226405]: 2025-10-11 01:46:18.317825447 +0000 UTC m=+0.271118784 container start 4b6fae5b420d00d177a441d25c1edb3f422032414f4e3af203ae09dc91bb0a47 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=frosty_lamarr, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 01:46:18 compute-0 podman[226405]: 2025-10-11 01:46:18.322736631 +0000 UTC m=+0.276029968 container attach 4b6fae5b420d00d177a441d25c1edb3f422032414f4e3af203ae09dc91bb0a47 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=frosty_lamarr, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:46:18 compute-0 frosty_lamarr[226434]: 167 167
Oct 11 01:46:18 compute-0 podman[226421]: 2025-10-11 01:46:18.331465928 +0000 UTC m=+0.119873426 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, container_name=ceilometer_agent_compute, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_managed=true, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image)
Oct 11 01:46:18 compute-0 systemd[1]: libpod-4b6fae5b420d00d177a441d25c1edb3f422032414f4e3af203ae09dc91bb0a47.scope: Deactivated successfully.
Oct 11 01:46:18 compute-0 podman[226405]: 2025-10-11 01:46:18.335623211 +0000 UTC m=+0.288916628 container died 4b6fae5b420d00d177a441d25c1edb3f422032414f4e3af203ae09dc91bb0a47 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=frosty_lamarr, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True)
Oct 11 01:46:18 compute-0 systemd[1]: var-lib-containers-storage-overlay-89e32131d705e60a8f85a33b0adbb2c4058eb25cd7e747a039db255e5d8bf9b3-merged.mount: Deactivated successfully.
Oct 11 01:46:18 compute-0 ceph-mon[191930]: 3.a scrub starts
Oct 11 01:46:18 compute-0 ceph-mon[191930]: 3.a scrub ok
Oct 11 01:46:18 compute-0 ceph-mon[191930]: 7.8 scrub starts
Oct 11 01:46:18 compute-0 ceph-mon[191930]: 7.8 scrub ok
Oct 11 01:46:18 compute-0 ceph-mon[191930]: osdmap e99: 3 total, 3 up, 3 in
Oct 11 01:46:18 compute-0 ceph-mon[191930]: 4.9 scrub starts
Oct 11 01:46:18 compute-0 ceph-mon[191930]: 4.9 scrub ok
Oct 11 01:46:18 compute-0 podman[226405]: 2025-10-11 01:46:18.420774189 +0000 UTC m=+0.374067556 container remove 4b6fae5b420d00d177a441d25c1edb3f422032414f4e3af203ae09dc91bb0a47 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=frosty_lamarr, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, OSD_FLAVOR=default)
Oct 11 01:46:18 compute-0 systemd[1]: libpod-conmon-4b6fae5b420d00d177a441d25c1edb3f422032414f4e3af203ae09dc91bb0a47.scope: Deactivated successfully.
Oct 11 01:46:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v221: 321 pgs: 321 active+clean; 456 KiB data, 139 MiB used, 60 GiB / 60 GiB avail; 26 B/s, 1 objects/s recovering
Oct 11 01:46:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "23"} v 0) v1
Oct 11 01:46:18 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "23"}]: dispatch
Oct 11 01:46:18 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.1d scrub starts
Oct 11 01:46:18 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 6.1d scrub ok
Oct 11 01:46:18 compute-0 podman[226470]: 2025-10-11 01:46:18.718839194 +0000 UTC m=+0.102950652 container create 82a538eb506b7e59a8bbf71fb0a1d4d4810a04e1f8a85714173bb48ccbb5c353 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_kepler, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507)
Oct 11 01:46:18 compute-0 podman[226470]: 2025-10-11 01:46:18.681344195 +0000 UTC m=+0.065455743 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:46:18 compute-0 systemd[1]: Started libpod-conmon-82a538eb506b7e59a8bbf71fb0a1d4d4810a04e1f8a85714173bb48ccbb5c353.scope.
Oct 11 01:46:18 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:46:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/243b64ec021ae92eaf3d184a69eb6eb6990ef5f08bc077f9e4eaf13205ae8e75/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:46:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/243b64ec021ae92eaf3d184a69eb6eb6990ef5f08bc077f9e4eaf13205ae8e75/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:46:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/243b64ec021ae92eaf3d184a69eb6eb6990ef5f08bc077f9e4eaf13205ae8e75/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:46:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/243b64ec021ae92eaf3d184a69eb6eb6990ef5f08bc077f9e4eaf13205ae8e75/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:46:18 compute-0 podman[226470]: 2025-10-11 01:46:18.900204351 +0000 UTC m=+0.284315839 container init 82a538eb506b7e59a8bbf71fb0a1d4d4810a04e1f8a85714173bb48ccbb5c353 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_kepler, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:46:18 compute-0 podman[226470]: 2025-10-11 01:46:18.915889259 +0000 UTC m=+0.300000727 container start 82a538eb506b7e59a8bbf71fb0a1d4d4810a04e1f8a85714173bb48ccbb5c353 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_kepler, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:46:18 compute-0 podman[226470]: 2025-10-11 01:46:18.923559435 +0000 UTC m=+0.307670903 container attach 82a538eb506b7e59a8bbf71fb0a1d4d4810a04e1f8a85714173bb48ccbb5c353 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_kepler, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 01:46:19 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.2 scrub starts
Oct 11 01:46:19 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.2 scrub ok
Oct 11 01:46:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e99 do_prune osdmap full prune enabled
Oct 11 01:46:19 compute-0 ceph-mon[191930]: 7.1 scrub starts
Oct 11 01:46:19 compute-0 ceph-mon[191930]: 7.1 scrub ok
Oct 11 01:46:19 compute-0 ceph-mon[191930]: pgmap v221: 321 pgs: 321 active+clean; 456 KiB data, 139 MiB used, 60 GiB / 60 GiB avail; 26 B/s, 1 objects/s recovering
Oct 11 01:46:19 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "23"}]: dispatch
Oct 11 01:46:19 compute-0 ceph-mon[191930]: 6.1d scrub starts
Oct 11 01:46:19 compute-0 ceph-mon[191930]: 6.1d scrub ok
Oct 11 01:46:19 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "23"}]': finished
Oct 11 01:46:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e100 e100: 3 total, 3 up, 3 in
Oct 11 01:46:19 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e100: 3 total, 3 up, 3 in
Oct 11 01:46:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e100 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]: {
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:     "0": [
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:         {
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "devices": [
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "/dev/loop3"
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             ],
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "lv_name": "ceph_lv0",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "lv_size": "21470642176",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "name": "ceph_lv0",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "tags": {
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.cluster_name": "ceph",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.crush_device_class": "",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.encrypted": "0",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.osd_id": "0",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.type": "block",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.vdo": "0"
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             },
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "type": "block",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "vg_name": "ceph_vg0"
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:         }
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:     ],
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:     "1": [
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:         {
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "devices": [
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "/dev/loop4"
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             ],
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "lv_name": "ceph_lv1",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "lv_size": "21470642176",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "name": "ceph_lv1",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "tags": {
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.cluster_name": "ceph",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.crush_device_class": "",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.encrypted": "0",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.osd_id": "1",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.type": "block",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.vdo": "0"
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             },
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "type": "block",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "vg_name": "ceph_vg1"
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:         }
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:     ],
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:     "2": [
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:         {
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "devices": [
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "/dev/loop5"
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             ],
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "lv_name": "ceph_lv2",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "lv_size": "21470642176",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "name": "ceph_lv2",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "tags": {
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.cluster_name": "ceph",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.crush_device_class": "",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.encrypted": "0",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.osd_id": "2",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.type": "block",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:                 "ceph.vdo": "0"
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             },
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "type": "block",
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:             "vg_name": "ceph_vg2"
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:         }
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]:     ]
Oct 11 01:46:19 compute-0 compassionate_kepler[226485]: }
Oct 11 01:46:19 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.1b scrub starts
Oct 11 01:46:19 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.1b scrub ok
Oct 11 01:46:19 compute-0 systemd[1]: libpod-82a538eb506b7e59a8bbf71fb0a1d4d4810a04e1f8a85714173bb48ccbb5c353.scope: Deactivated successfully.
Oct 11 01:46:19 compute-0 podman[226470]: 2025-10-11 01:46:19.763441703 +0000 UTC m=+1.147553231 container died 82a538eb506b7e59a8bbf71fb0a1d4d4810a04e1f8a85714173bb48ccbb5c353 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_kepler, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:46:19 compute-0 systemd[1]: var-lib-containers-storage-overlay-243b64ec021ae92eaf3d184a69eb6eb6990ef5f08bc077f9e4eaf13205ae8e75-merged.mount: Deactivated successfully.
Oct 11 01:46:19 compute-0 podman[226470]: 2025-10-11 01:46:19.861332297 +0000 UTC m=+1.245443755 container remove 82a538eb506b7e59a8bbf71fb0a1d4d4810a04e1f8a85714173bb48ccbb5c353 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_kepler, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True)
Oct 11 01:46:19 compute-0 systemd[1]: libpod-conmon-82a538eb506b7e59a8bbf71fb0a1d4d4810a04e1f8a85714173bb48ccbb5c353.scope: Deactivated successfully.
Oct 11 01:46:19 compute-0 sudo[226336]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:20 compute-0 sudo[226508]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:46:20 compute-0 sudo[226508]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:20 compute-0 sudo[226508]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:20 compute-0 sudo[226533]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:46:20 compute-0 sudo[226533]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:20 compute-0 sudo[226533]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:20 compute-0 sudo[226558]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:46:20 compute-0 sudo[226558]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:20 compute-0 sudo[226558]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:20 compute-0 sudo[226583]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 01:46:20 compute-0 sudo[226583]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:20 compute-0 ceph-mon[191930]: 7.2 scrub starts
Oct 11 01:46:20 compute-0 ceph-mon[191930]: 7.2 scrub ok
Oct 11 01:46:20 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "23"}]': finished
Oct 11 01:46:20 compute-0 ceph-mon[191930]: osdmap e100: 3 total, 3 up, 3 in
Oct 11 01:46:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v223: 321 pgs: 321 active+clean; 456 KiB data, 139 MiB used, 60 GiB / 60 GiB avail; 21 B/s, 0 objects/s recovering
Oct 11 01:46:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "24"} v 0) v1
Oct 11 01:46:20 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "24"}]: dispatch
Oct 11 01:46:20 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.9 scrub starts
Oct 11 01:46:20 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 2.1b scrub starts
Oct 11 01:46:20 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 100 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=73/74 n=6 ec=56/47 lis/c=73/73 les/c/f=74/74/0 sis=100 pruub=11.964403152s) [0] r=-1 lpr=100 pi=[73,100)/1 crt=53'585 mlcod 0'0 active pruub 186.679748535s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:20 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 100 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=73/74 n=6 ec=56/47 lis/c=73/73 les/c/f=74/74/0 sis=100 pruub=11.964285851s) [0] r=-1 lpr=100 pi=[73,100)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 186.679748535s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:20 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 100 pg[9.16( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=73/73 les/c/f=74/74/0 sis=100) [0] r=0 lpr=100 pi=[73,100)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:20 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.9 scrub ok
Oct 11 01:46:20 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 2.1b scrub ok
Oct 11 01:46:21 compute-0 podman[226647]: 2025-10-11 01:46:21.060212398 +0000 UTC m=+0.089872521 container create fe213109ed74b2e1b434151a75015635bdacbfcf98c6998de93aa4ac28300f4f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=bold_diffie, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:46:21 compute-0 podman[226647]: 2025-10-11 01:46:21.028417152 +0000 UTC m=+0.058077335 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:46:21 compute-0 systemd[1]: Started libpod-conmon-fe213109ed74b2e1b434151a75015635bdacbfcf98c6998de93aa4ac28300f4f.scope.
Oct 11 01:46:21 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:46:21 compute-0 podman[226647]: 2025-10-11 01:46:21.224802823 +0000 UTC m=+0.254463016 container init fe213109ed74b2e1b434151a75015635bdacbfcf98c6998de93aa4ac28300f4f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=bold_diffie, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:46:21 compute-0 podman[226647]: 2025-10-11 01:46:21.242329401 +0000 UTC m=+0.271989524 container start fe213109ed74b2e1b434151a75015635bdacbfcf98c6998de93aa4ac28300f4f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=bold_diffie, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS)
Oct 11 01:46:21 compute-0 podman[226647]: 2025-10-11 01:46:21.249082067 +0000 UTC m=+0.278742350 container attach fe213109ed74b2e1b434151a75015635bdacbfcf98c6998de93aa4ac28300f4f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=bold_diffie, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2)
Oct 11 01:46:21 compute-0 bold_diffie[226662]: 167 167
Oct 11 01:46:21 compute-0 podman[226647]: 2025-10-11 01:46:21.256089949 +0000 UTC m=+0.285750062 container died fe213109ed74b2e1b434151a75015635bdacbfcf98c6998de93aa4ac28300f4f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=bold_diffie, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 01:46:21 compute-0 systemd[1]: libpod-fe213109ed74b2e1b434151a75015635bdacbfcf98c6998de93aa4ac28300f4f.scope: Deactivated successfully.
Oct 11 01:46:21 compute-0 systemd[1]: var-lib-containers-storage-overlay-b8c0fcd63b39c9871c4d0645c635ba83970989033656488006c0345b5a536392-merged.mount: Deactivated successfully.
Oct 11 01:46:21 compute-0 podman[226647]: 2025-10-11 01:46:21.342371072 +0000 UTC m=+0.372031205 container remove fe213109ed74b2e1b434151a75015635bdacbfcf98c6998de93aa4ac28300f4f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=bold_diffie, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507)
Oct 11 01:46:21 compute-0 systemd[1]: libpod-conmon-fe213109ed74b2e1b434151a75015635bdacbfcf98c6998de93aa4ac28300f4f.scope: Deactivated successfully.
Oct 11 01:46:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e100 do_prune osdmap full prune enabled
Oct 11 01:46:21 compute-0 ceph-mon[191930]: 3.1b scrub starts
Oct 11 01:46:21 compute-0 ceph-mon[191930]: 3.1b scrub ok
Oct 11 01:46:21 compute-0 ceph-mon[191930]: pgmap v223: 321 pgs: 321 active+clean; 456 KiB data, 139 MiB used, 60 GiB / 60 GiB avail; 21 B/s, 0 objects/s recovering
Oct 11 01:46:21 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "24"}]: dispatch
Oct 11 01:46:21 compute-0 ceph-mon[191930]: 2.1b scrub starts
Oct 11 01:46:21 compute-0 ceph-mon[191930]: 2.1b scrub ok
Oct 11 01:46:21 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "24"}]': finished
Oct 11 01:46:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e101 e101: 3 total, 3 up, 3 in
Oct 11 01:46:21 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e101: 3 total, 3 up, 3 in
Oct 11 01:46:21 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 101 pg[9.16( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=73/73 les/c/f=74/74/0 sis=101) [0]/[2] r=-1 lpr=101 pi=[73,101)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:21 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 101 pg[9.16( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=73/73 les/c/f=74/74/0 sis=101) [0]/[2] r=-1 lpr=101 pi=[73,101)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:21 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 101 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=73/74 n=6 ec=56/47 lis/c=73/73 les/c/f=74/74/0 sis=101) [0]/[2] r=0 lpr=101 pi=[73,101)/1 crt=53'585 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:21 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 101 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=73/74 n=6 ec=56/47 lis/c=73/73 les/c/f=74/74/0 sis=101) [0]/[2] r=0 lpr=101 pi=[73,101)/1 crt=53'585 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:21 compute-0 podman[226685]: 2025-10-11 01:46:21.680588505 +0000 UTC m=+0.114483912 container create b036eda6f6cf1cdd7eb2657d35bc4e42647324e4d7177e0f6d93e1c0a33ecd92 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_gates, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2)
Oct 11 01:46:21 compute-0 podman[226685]: 2025-10-11 01:46:21.644821034 +0000 UTC m=+0.078716461 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:46:21 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.1f scrub starts
Oct 11 01:46:21 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 3.1f scrub ok
Oct 11 01:46:21 compute-0 systemd[1]: Started libpod-conmon-b036eda6f6cf1cdd7eb2657d35bc4e42647324e4d7177e0f6d93e1c0a33ecd92.scope.
Oct 11 01:46:21 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:46:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/df3f589c5031bbe2291378273519f11c542b0cf2884388732047b41f1d860d5b/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:46:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/df3f589c5031bbe2291378273519f11c542b0cf2884388732047b41f1d860d5b/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:46:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/df3f589c5031bbe2291378273519f11c542b0cf2884388732047b41f1d860d5b/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:46:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/df3f589c5031bbe2291378273519f11c542b0cf2884388732047b41f1d860d5b/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:46:21 compute-0 podman[226685]: 2025-10-11 01:46:21.919674488 +0000 UTC m=+0.353569965 container init b036eda6f6cf1cdd7eb2657d35bc4e42647324e4d7177e0f6d93e1c0a33ecd92 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_gates, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 01:46:21 compute-0 podman[226685]: 2025-10-11 01:46:21.937312979 +0000 UTC m=+0.371208376 container start b036eda6f6cf1cdd7eb2657d35bc4e42647324e4d7177e0f6d93e1c0a33ecd92 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_gates, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, ceph=True)
Oct 11 01:46:21 compute-0 podman[226685]: 2025-10-11 01:46:21.952021507 +0000 UTC m=+0.385916974 container attach b036eda6f6cf1cdd7eb2657d35bc4e42647324e4d7177e0f6d93e1c0a33ecd92 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_gates, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.license=GPLv2)
Oct 11 01:46:22 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 3.7 scrub starts
Oct 11 01:46:22 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 3.7 scrub ok
Oct 11 01:46:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v225: 321 pgs: 321 active+clean; 456 KiB data, 139 MiB used, 60 GiB / 60 GiB avail; 36 B/s, 1 objects/s recovering
Oct 11 01:46:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "25"} v 0) v1
Oct 11 01:46:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "25"}]: dispatch
Oct 11 01:46:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e101 do_prune osdmap full prune enabled
Oct 11 01:46:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "25"}]': finished
Oct 11 01:46:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e102 e102: 3 total, 3 up, 3 in
Oct 11 01:46:22 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e102: 3 total, 3 up, 3 in
Oct 11 01:46:22 compute-0 ceph-mon[191930]: 3.9 scrub starts
Oct 11 01:46:22 compute-0 ceph-mon[191930]: 3.9 scrub ok
Oct 11 01:46:22 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "24"}]': finished
Oct 11 01:46:22 compute-0 ceph-mon[191930]: osdmap e101: 3 total, 3 up, 3 in
Oct 11 01:46:22 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "25"}]: dispatch
Oct 11 01:46:22 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 102 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=101/102 n=6 ec=56/47 lis/c=73/73 les/c/f=74/74/0 sis=101) [0]/[2] async=[0] r=0 lpr=101 pi=[73,101)/1 crt=53'585 mlcod 0'0 active+remapped mbc={255={(0+1)=6}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:46:23 compute-0 modest_gates[226701]: {
Oct 11 01:46:23 compute-0 modest_gates[226701]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 01:46:23 compute-0 modest_gates[226701]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:46:23 compute-0 modest_gates[226701]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 01:46:23 compute-0 modest_gates[226701]:         "osd_id": 1,
Oct 11 01:46:23 compute-0 modest_gates[226701]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:46:23 compute-0 modest_gates[226701]:         "type": "bluestore"
Oct 11 01:46:23 compute-0 modest_gates[226701]:     },
Oct 11 01:46:23 compute-0 modest_gates[226701]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 01:46:23 compute-0 modest_gates[226701]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:46:23 compute-0 modest_gates[226701]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 01:46:23 compute-0 modest_gates[226701]:         "osd_id": 2,
Oct 11 01:46:23 compute-0 modest_gates[226701]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:46:23 compute-0 modest_gates[226701]:         "type": "bluestore"
Oct 11 01:46:23 compute-0 modest_gates[226701]:     },
Oct 11 01:46:23 compute-0 modest_gates[226701]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 01:46:23 compute-0 modest_gates[226701]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:46:23 compute-0 modest_gates[226701]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 01:46:23 compute-0 modest_gates[226701]:         "osd_id": 0,
Oct 11 01:46:23 compute-0 modest_gates[226701]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:46:23 compute-0 modest_gates[226701]:         "type": "bluestore"
Oct 11 01:46:23 compute-0 modest_gates[226701]:     }
Oct 11 01:46:23 compute-0 modest_gates[226701]: }
Oct 11 01:46:23 compute-0 systemd[1]: libpod-b036eda6f6cf1cdd7eb2657d35bc4e42647324e4d7177e0f6d93e1c0a33ecd92.scope: Deactivated successfully.
Oct 11 01:46:23 compute-0 systemd[1]: libpod-b036eda6f6cf1cdd7eb2657d35bc4e42647324e4d7177e0f6d93e1c0a33ecd92.scope: Consumed 1.216s CPU time.
Oct 11 01:46:23 compute-0 podman[226685]: 2025-10-11 01:46:23.160332112 +0000 UTC m=+1.594227509 container died b036eda6f6cf1cdd7eb2657d35bc4e42647324e4d7177e0f6d93e1c0a33ecd92 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_gates, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 01:46:23 compute-0 systemd[1]: var-lib-containers-storage-overlay-df3f589c5031bbe2291378273519f11c542b0cf2884388732047b41f1d860d5b-merged.mount: Deactivated successfully.
Oct 11 01:46:23 compute-0 podman[226685]: 2025-10-11 01:46:23.269712414 +0000 UTC m=+1.703607791 container remove b036eda6f6cf1cdd7eb2657d35bc4e42647324e4d7177e0f6d93e1c0a33ecd92 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_gates, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default)
Oct 11 01:46:23 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 3.8 scrub starts
Oct 11 01:46:23 compute-0 systemd[1]: libpod-conmon-b036eda6f6cf1cdd7eb2657d35bc4e42647324e4d7177e0f6d93e1c0a33ecd92.scope: Deactivated successfully.
Oct 11 01:46:23 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 3.8 scrub ok
Oct 11 01:46:23 compute-0 sudo[226583]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:46:23 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:46:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:46:23 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:46:23 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 6a5bf750-26a2-45c7-8813-d7f2b00f460b does not exist
Oct 11 01:46:23 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev efef6945-b311-4d37-898e-2cbee9dd7fb4 does not exist
Oct 11 01:46:23 compute-0 sudo[226748]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:46:23 compute-0 sudo[226748]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:23 compute-0 sudo[226748]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e102 do_prune osdmap full prune enabled
Oct 11 01:46:23 compute-0 ceph-mon[191930]: 3.1f scrub starts
Oct 11 01:46:23 compute-0 ceph-mon[191930]: 3.1f scrub ok
Oct 11 01:46:23 compute-0 ceph-mon[191930]: 3.7 scrub starts
Oct 11 01:46:23 compute-0 ceph-mon[191930]: 3.7 scrub ok
Oct 11 01:46:23 compute-0 ceph-mon[191930]: pgmap v225: 321 pgs: 321 active+clean; 456 KiB data, 139 MiB used, 60 GiB / 60 GiB avail; 36 B/s, 1 objects/s recovering
Oct 11 01:46:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "25"}]': finished
Oct 11 01:46:23 compute-0 ceph-mon[191930]: osdmap e102: 3 total, 3 up, 3 in
Oct 11 01:46:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:46:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:46:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e103 e103: 3 total, 3 up, 3 in
Oct 11 01:46:23 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e103: 3 total, 3 up, 3 in
Oct 11 01:46:23 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 103 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=101/102 n=6 ec=56/47 lis/c=101/73 les/c/f=102/74/0 sis=103 pruub=14.997951508s) [0] async=[0] r=-1 lpr=103 pi=[73,103)/1 crt=53'585 mlcod 53'585 active pruub 192.502365112s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:23 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 103 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=101/102 n=6 ec=56/47 lis/c=101/73 les/c/f=102/74/0 sis=103 pruub=14.997781754s) [0] r=-1 lpr=103 pi=[73,103)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 192.502365112s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:23 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 103 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=101/73 les/c/f=102/74/0 sis=103) [0] r=0 lpr=103 pi=[73,103)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:23 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 103 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=101/73 les/c/f=102/74/0 sis=103) [0] r=0 lpr=103 pi=[73,103)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:23 compute-0 sudo[226773]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:46:23 compute-0 sudo[226773]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:46:23 compute-0 sudo[226773]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:23 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 7.1b scrub starts
Oct 11 01:46:23 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 7.1b scrub ok
Oct 11 01:46:24 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.e scrub starts
Oct 11 01:46:24 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.e scrub ok
Oct 11 01:46:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v228: 321 pgs: 1 active+remapped, 320 active+clean; 456 KiB data, 139 MiB used, 60 GiB / 60 GiB avail; 43 B/s, 2 objects/s recovering
Oct 11 01:46:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "26"} v 0) v1
Oct 11 01:46:24 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "26"}]: dispatch
Oct 11 01:46:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e103 do_prune osdmap full prune enabled
Oct 11 01:46:24 compute-0 ceph-mon[191930]: 3.8 scrub starts
Oct 11 01:46:24 compute-0 ceph-mon[191930]: 3.8 scrub ok
Oct 11 01:46:24 compute-0 ceph-mon[191930]: osdmap e103: 3 total, 3 up, 3 in
Oct 11 01:46:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "26"}]: dispatch
Oct 11 01:46:24 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "26"}]': finished
Oct 11 01:46:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e104 e104: 3 total, 3 up, 3 in
Oct 11 01:46:24 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e104: 3 total, 3 up, 3 in
Oct 11 01:46:24 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 104 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=104 pruub=15.802420616s) [2] r=-1 lpr=104 pi=[64,104)/1 crt=53'585 mlcod 0'0 active pruub 208.916641235s@ mbc={}] start_peering_interval up [0] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:24 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 104 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=104 pruub=15.801739693s) [2] r=-1 lpr=104 pi=[64,104)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 208.916641235s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:24 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 104 pg[9.16( v 53'585 (0'0,53'585] local-lis/les=103/104 n=6 ec=56/47 lis/c=101/73 les/c/f=102/74/0 sis=103) [0] r=0 lpr=103 pi=[73,103)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:46:24 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 104 pg[9.19( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=104) [2] r=0 lpr=104 pi=[64,104)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e104 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:46:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e104 do_prune osdmap full prune enabled
Oct 11 01:46:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e105 e105: 3 total, 3 up, 3 in
Oct 11 01:46:24 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e105: 3 total, 3 up, 3 in
Oct 11 01:46:24 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 105 pg[9.19( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=105) [2]/[0] r=-1 lpr=105 pi=[64,105)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:24 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 105 pg[9.19( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=105) [2]/[0] r=-1 lpr=105 pi=[64,105)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:24 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 105 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=105) [2]/[0] r=0 lpr=105 pi=[64,105)/1 crt=53'585 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [2] -> [2], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:24 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 105 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=64/65 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=105) [2]/[0] r=0 lpr=105 pi=[64,105)/1 crt=53'585 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:25 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.1a scrub starts
Oct 11 01:46:25 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.1a scrub ok
Oct 11 01:46:25 compute-0 ceph-mon[191930]: 7.1b scrub starts
Oct 11 01:46:25 compute-0 ceph-mon[191930]: 7.1b scrub ok
Oct 11 01:46:25 compute-0 ceph-mon[191930]: 7.e scrub starts
Oct 11 01:46:25 compute-0 ceph-mon[191930]: 7.e scrub ok
Oct 11 01:46:25 compute-0 ceph-mon[191930]: pgmap v228: 321 pgs: 1 active+remapped, 320 active+clean; 456 KiB data, 139 MiB used, 60 GiB / 60 GiB avail; 43 B/s, 2 objects/s recovering
Oct 11 01:46:25 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "26"}]': finished
Oct 11 01:46:25 compute-0 ceph-mon[191930]: osdmap e104: 3 total, 3 up, 3 in
Oct 11 01:46:25 compute-0 ceph-mon[191930]: osdmap e105: 3 total, 3 up, 3 in
Oct 11 01:46:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e105 do_prune osdmap full prune enabled
Oct 11 01:46:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e106 e106: 3 total, 3 up, 3 in
Oct 11 01:46:25 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e106: 3 total, 3 up, 3 in
Oct 11 01:46:25 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 2.17 scrub starts
Oct 11 01:46:25 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 2.17 scrub ok
Oct 11 01:46:26 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 106 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=105/106 n=6 ec=56/47 lis/c=64/64 les/c/f=65/65/0 sis=105) [2]/[0] async=[2] r=0 lpr=105 pi=[64,105)/1 crt=53'585 mlcod 0'0 active+remapped mbc={255={(0+1)=11}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:46:26 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 3.1d scrub starts
Oct 11 01:46:26 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 3.1d scrub ok
Oct 11 01:46:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v232: 321 pgs: 1 active+remapped, 320 active+clean; 456 KiB data, 140 MiB used, 60 GiB / 60 GiB avail; 27 B/s, 1 objects/s recovering
Oct 11 01:46:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "27"} v 0) v1
Oct 11 01:46:26 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "27"}]: dispatch
Oct 11 01:46:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:46:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:46:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:46:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:46:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:46:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:46:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e106 do_prune osdmap full prune enabled
Oct 11 01:46:26 compute-0 ceph-mon[191930]: 7.1a scrub starts
Oct 11 01:46:26 compute-0 ceph-mon[191930]: 7.1a scrub ok
Oct 11 01:46:26 compute-0 ceph-mon[191930]: osdmap e106: 3 total, 3 up, 3 in
Oct 11 01:46:26 compute-0 ceph-mon[191930]: 2.17 scrub starts
Oct 11 01:46:26 compute-0 ceph-mon[191930]: 2.17 scrub ok
Oct 11 01:46:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "27"}]: dispatch
Oct 11 01:46:26 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "27"}]': finished
Oct 11 01:46:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e107 e107: 3 total, 3 up, 3 in
Oct 11 01:46:26 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e107: 3 total, 3 up, 3 in
Oct 11 01:46:26 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 107 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=105/106 n=6 ec=56/47 lis/c=105/64 les/c/f=106/65/0 sis=107 pruub=15.336351395s) [2] async=[2] r=-1 lpr=107 pi=[64,107)/1 crt=53'585 mlcod 53'585 active pruub 210.546432495s@ mbc={255={}}] start_peering_interval up [2] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:26 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 107 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=105/106 n=6 ec=56/47 lis/c=105/64 les/c/f=106/65/0 sis=107 pruub=15.336169243s) [2] r=-1 lpr=107 pi=[64,107)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 210.546432495s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:26 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 107 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=105/64 les/c/f=106/65/0 sis=107) [2] r=0 lpr=107 pi=[64,107)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [2] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 2 -> 2, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:26 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 107 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=105/64 les/c/f=106/65/0 sis=107) [2] r=0 lpr=107 pi=[64,107)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:26 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 7.18 deep-scrub starts
Oct 11 01:46:26 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 7.18 deep-scrub ok
Oct 11 01:46:27 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 3.1e scrub starts
Oct 11 01:46:27 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 3.1e scrub ok
Oct 11 01:46:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e107 do_prune osdmap full prune enabled
Oct 11 01:46:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e108 e108: 3 total, 3 up, 3 in
Oct 11 01:46:27 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e108: 3 total, 3 up, 3 in
Oct 11 01:46:27 compute-0 ceph-mon[191930]: 3.1d scrub starts
Oct 11 01:46:27 compute-0 ceph-mon[191930]: 3.1d scrub ok
Oct 11 01:46:27 compute-0 ceph-mon[191930]: pgmap v232: 321 pgs: 1 active+remapped, 320 active+clean; 456 KiB data, 140 MiB used, 60 GiB / 60 GiB avail; 27 B/s, 1 objects/s recovering
Oct 11 01:46:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "27"}]': finished
Oct 11 01:46:27 compute-0 ceph-mon[191930]: osdmap e107: 3 total, 3 up, 3 in
Oct 11 01:46:27 compute-0 ceph-mon[191930]: 7.18 deep-scrub starts
Oct 11 01:46:27 compute-0 ceph-mon[191930]: 7.18 deep-scrub ok
Oct 11 01:46:27 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 108 pg[9.19( v 53'585 (0'0,53'585] local-lis/les=107/108 n=6 ec=56/47 lis/c=105/64 les/c/f=106/65/0 sis=107) [2] r=0 lpr=107 pi=[64,107)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:46:28 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 3.5 scrub starts
Oct 11 01:46:28 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 3.5 scrub ok
Oct 11 01:46:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v235: 321 pgs: 1 activating, 320 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail; 28 B/s, 2 objects/s recovering
Oct 11 01:46:28 compute-0 ceph-mon[191930]: 3.1e scrub starts
Oct 11 01:46:28 compute-0 ceph-mon[191930]: 3.1e scrub ok
Oct 11 01:46:28 compute-0 ceph-mon[191930]: osdmap e108: 3 total, 3 up, 3 in
Oct 11 01:46:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e108 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:46:29 compute-0 ceph-mon[191930]: 3.5 scrub starts
Oct 11 01:46:29 compute-0 ceph-mon[191930]: 3.5 scrub ok
Oct 11 01:46:29 compute-0 ceph-mon[191930]: pgmap v235: 321 pgs: 1 activating, 320 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail; 28 B/s, 2 objects/s recovering
Oct 11 01:46:29 compute-0 podman[157119]: time="2025-10-11T01:46:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:46:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:46:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:46:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:46:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6796 "" "Go-http-client/1.1"
Oct 11 01:46:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v236: 321 pgs: 1 activating, 320 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail; 18 B/s, 1 objects/s recovering
Oct 11 01:46:30 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 5.2 scrub starts
Oct 11 01:46:30 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 5.2 scrub ok
Oct 11 01:46:31 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.c scrub starts
Oct 11 01:46:31 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 7.c scrub ok
Oct 11 01:46:31 compute-0 openstack_network_exporter[159265]: ERROR   01:46:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:46:31 compute-0 openstack_network_exporter[159265]: ERROR   01:46:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:46:31 compute-0 openstack_network_exporter[159265]: ERROR   01:46:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:46:31 compute-0 openstack_network_exporter[159265]: ERROR   01:46:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:46:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:46:31 compute-0 openstack_network_exporter[159265]: ERROR   01:46:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:46:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:46:31 compute-0 ceph-mon[191930]: pgmap v236: 321 pgs: 1 activating, 320 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail; 18 B/s, 1 objects/s recovering
Oct 11 01:46:31 compute-0 ceph-mon[191930]: 5.2 scrub starts
Oct 11 01:46:31 compute-0 ceph-mon[191930]: 5.2 scrub ok
Oct 11 01:46:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v237: 321 pgs: 1 activating, 320 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail; 16 B/s, 1 objects/s recovering
Oct 11 01:46:32 compute-0 ceph-mon[191930]: 7.c scrub starts
Oct 11 01:46:32 compute-0 ceph-mon[191930]: 7.c scrub ok
Oct 11 01:46:33 compute-0 ceph-mon[191930]: pgmap v237: 321 pgs: 1 activating, 320 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail; 16 B/s, 1 objects/s recovering
Oct 11 01:46:34 compute-0 podman[226825]: 2025-10-11 01:46:34.248128302 +0000 UTC m=+0.131015430 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:46:34 compute-0 podman[226826]: 2025-10-11 01:46:34.280316648 +0000 UTC m=+0.163370570 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.33.7, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, version=9.6, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, managed_by=edpm_ansible, build-date=2025-08-20T13:12:41, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, distribution-scope=public, architecture=x86_64, io.openshift.expose-services=, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, container_name=openstack_network_exporter, name=ubi9-minimal, vendor=Red Hat, Inc., maintainer=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=1755695350, com.redhat.component=ubi9-minimal-container, io.openshift.tags=minimal rhel9, vcs-type=git, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 01:46:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v238: 321 pgs: 321 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail; 13 B/s, 1 objects/s recovering
Oct 11 01:46:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "28"} v 0) v1
Oct 11 01:46:34 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "28"}]: dispatch
Oct 11 01:46:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e108 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:46:34 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.8 scrub starts
Oct 11 01:46:34 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.8 scrub ok
Oct 11 01:46:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e108 do_prune osdmap full prune enabled
Oct 11 01:46:34 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "28"}]: dispatch
Oct 11 01:46:34 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "28"}]': finished
Oct 11 01:46:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e109 e109: 3 total, 3 up, 3 in
Oct 11 01:46:34 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e109: 3 total, 3 up, 3 in
Oct 11 01:46:34 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.11 scrub starts
Oct 11 01:46:34 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.11 scrub ok
Oct 11 01:46:35 compute-0 ceph-mon[191930]: pgmap v238: 321 pgs: 321 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail; 13 B/s, 1 objects/s recovering
Oct 11 01:46:35 compute-0 ceph-mon[191930]: 2.8 scrub starts
Oct 11 01:46:35 compute-0 ceph-mon[191930]: 2.8 scrub ok
Oct 11 01:46:35 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "28"}]': finished
Oct 11 01:46:35 compute-0 ceph-mon[191930]: osdmap e109: 3 total, 3 up, 3 in
Oct 11 01:46:35 compute-0 ceph-mon[191930]: 5.11 scrub starts
Oct 11 01:46:35 compute-0 ceph-mon[191930]: 5.11 scrub ok
Oct 11 01:46:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v240: 321 pgs: 321 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail; 12 B/s, 1 objects/s recovering
Oct 11 01:46:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "29"} v 0) v1
Oct 11 01:46:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "29"}]: dispatch
Oct 11 01:46:36 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 5.4 scrub starts
Oct 11 01:46:36 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 5.4 scrub ok
Oct 11 01:46:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e109 do_prune osdmap full prune enabled
Oct 11 01:46:36 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.1d scrub starts
Oct 11 01:46:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "29"}]: dispatch
Oct 11 01:46:36 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.1d scrub ok
Oct 11 01:46:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "29"}]': finished
Oct 11 01:46:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e110 e110: 3 total, 3 up, 3 in
Oct 11 01:46:36 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e110: 3 total, 3 up, 3 in
Oct 11 01:46:37 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.1c scrub starts
Oct 11 01:46:37 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.1c scrub ok
Oct 11 01:46:37 compute-0 ceph-mon[191930]: pgmap v240: 321 pgs: 321 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail; 12 B/s, 1 objects/s recovering
Oct 11 01:46:37 compute-0 ceph-mon[191930]: 5.4 scrub starts
Oct 11 01:46:37 compute-0 ceph-mon[191930]: 5.4 scrub ok
Oct 11 01:46:37 compute-0 ceph-mon[191930]: 5.1d scrub starts
Oct 11 01:46:37 compute-0 ceph-mon[191930]: 5.1d scrub ok
Oct 11 01:46:37 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "29"}]': finished
Oct 11 01:46:37 compute-0 ceph-mon[191930]: osdmap e110: 3 total, 3 up, 3 in
Oct 11 01:46:38 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 110 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=85/86 n=6 ec=56/47 lis/c=85/85 les/c/f=86/86/0 sis=110 pruub=13.541626930s) [0] r=-1 lpr=110 pi=[85,110)/1 crt=53'585 mlcod 0'0 active pruub 205.612747192s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:38 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 110 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=85/86 n=6 ec=56/47 lis/c=85/85 les/c/f=86/86/0 sis=110 pruub=13.541577339s) [0] r=-1 lpr=110 pi=[85,110)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 205.612747192s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:38 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 110 pg[9.1c( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=85/85 les/c/f=86/86/0 sis=110) [0] r=0 lpr=110 pi=[85,110)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v242: 321 pgs: 1 unknown, 320 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:38 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 2.15 scrub starts
Oct 11 01:46:38 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 2.15 scrub ok
Oct 11 01:46:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e110 do_prune osdmap full prune enabled
Oct 11 01:46:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e111 e111: 3 total, 3 up, 3 in
Oct 11 01:46:38 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e111: 3 total, 3 up, 3 in
Oct 11 01:46:38 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 111 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=85/86 n=6 ec=56/47 lis/c=85/85 les/c/f=86/86/0 sis=111) [0]/[2] r=0 lpr=111 pi=[85,111)/1 crt=53'585 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:38 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 111 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=85/86 n=6 ec=56/47 lis/c=85/85 les/c/f=86/86/0 sis=111) [0]/[2] r=0 lpr=111 pi=[85,111)/1 crt=53'585 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:38 compute-0 ceph-mon[191930]: 2.1c scrub starts
Oct 11 01:46:38 compute-0 ceph-mon[191930]: 2.1c scrub ok
Oct 11 01:46:38 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 111 pg[9.1c( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=85/85 les/c/f=86/86/0 sis=111) [0]/[2] r=-1 lpr=111 pi=[85,111)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:38 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 111 pg[9.1c( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=85/85 les/c/f=86/86/0 sis=111) [0]/[2] r=-1 lpr=111 pi=[85,111)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:39 compute-0 podman[226867]: 2025-10-11 01:46:39.292683044 +0000 UTC m=+0.180679803 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_id=edpm, org.label-schema.license=GPLv2, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3)
Oct 11 01:46:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e111 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:46:39 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.1f scrub starts
Oct 11 01:46:39 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.1f scrub ok
Oct 11 01:46:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e111 do_prune osdmap full prune enabled
Oct 11 01:46:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e112 e112: 3 total, 3 up, 3 in
Oct 11 01:46:39 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e112: 3 total, 3 up, 3 in
Oct 11 01:46:39 compute-0 ceph-mon[191930]: pgmap v242: 321 pgs: 1 unknown, 320 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:39 compute-0 ceph-mon[191930]: 2.15 scrub starts
Oct 11 01:46:39 compute-0 ceph-mon[191930]: 2.15 scrub ok
Oct 11 01:46:39 compute-0 ceph-mon[191930]: osdmap e111: 3 total, 3 up, 3 in
Oct 11 01:46:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v245: 321 pgs: 1 unknown, 320 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:40 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 112 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=111/112 n=6 ec=56/47 lis/c=85/85 les/c/f=86/86/0 sis=111) [0]/[2] async=[0] r=0 lpr=111 pi=[85,111)/1 crt=53'585 mlcod 0'0 active+remapped mbc={255={(0+1)=7}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:46:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e112 do_prune osdmap full prune enabled
Oct 11 01:46:40 compute-0 ceph-mon[191930]: 2.1f scrub starts
Oct 11 01:46:40 compute-0 ceph-mon[191930]: 2.1f scrub ok
Oct 11 01:46:40 compute-0 ceph-mon[191930]: osdmap e112: 3 total, 3 up, 3 in
Oct 11 01:46:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e113 e113: 3 total, 3 up, 3 in
Oct 11 01:46:40 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e113: 3 total, 3 up, 3 in
Oct 11 01:46:40 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 113 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=111/112 n=6 ec=56/47 lis/c=111/85 les/c/f=112/86/0 sis=113 pruub=15.710827827s) [0] async=[0] r=-1 lpr=113 pi=[85,113)/1 crt=53'585 mlcod 53'585 active pruub 210.615646362s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:40 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 113 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=111/112 n=6 ec=56/47 lis/c=111/85 les/c/f=112/86/0 sis=113 pruub=15.709301949s) [0] r=-1 lpr=113 pi=[85,113)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 210.615646362s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:40 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 113 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=111/85 les/c/f=112/86/0 sis=113) [0] r=0 lpr=113 pi=[85,113)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:40 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 113 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=111/85 les/c/f=112/86/0 sis=113) [0] r=0 lpr=113 pi=[85,113)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:41 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.1d deep-scrub starts
Oct 11 01:46:41 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.1d deep-scrub ok
Oct 11 01:46:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e113 do_prune osdmap full prune enabled
Oct 11 01:46:41 compute-0 ceph-mon[191930]: pgmap v245: 321 pgs: 1 unknown, 320 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:41 compute-0 ceph-mon[191930]: osdmap e113: 3 total, 3 up, 3 in
Oct 11 01:46:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e114 e114: 3 total, 3 up, 3 in
Oct 11 01:46:41 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e114: 3 total, 3 up, 3 in
Oct 11 01:46:41 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 114 pg[9.1c( v 53'585 (0'0,53'585] local-lis/les=113/114 n=6 ec=56/47 lis/c=111/85 les/c/f=112/86/0 sis=113) [0] r=0 lpr=113 pi=[85,113)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:46:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v248: 321 pgs: 1 unknown, 320 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:42 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.b scrub starts
Oct 11 01:46:42 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 2.b scrub ok
Oct 11 01:46:42 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.12 scrub starts
Oct 11 01:46:42 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.12 scrub ok
Oct 11 01:46:42 compute-0 ceph-mon[191930]: 2.1d deep-scrub starts
Oct 11 01:46:42 compute-0 ceph-mon[191930]: 2.1d deep-scrub ok
Oct 11 01:46:42 compute-0 ceph-mon[191930]: osdmap e114: 3 total, 3 up, 3 in
Oct 11 01:46:43 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.d scrub starts
Oct 11 01:46:43 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.d scrub ok
Oct 11 01:46:43 compute-0 ceph-mon[191930]: pgmap v248: 321 pgs: 1 unknown, 320 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:43 compute-0 ceph-mon[191930]: 2.b scrub starts
Oct 11 01:46:43 compute-0 ceph-mon[191930]: 2.b scrub ok
Oct 11 01:46:43 compute-0 ceph-mon[191930]: 5.12 scrub starts
Oct 11 01:46:43 compute-0 ceph-mon[191930]: 5.12 scrub ok
Oct 11 01:46:44 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 10.3 scrub starts
Oct 11 01:46:44 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 10.3 scrub ok
Oct 11 01:46:44 compute-0 sudo[225343]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v249: 321 pgs: 321 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail; 3.9 KiB/s rd, 181 B/s wr, 8 op/s; 39 B/s, 2 objects/s recovering
Oct 11 01:46:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "30"} v 0) v1
Oct 11 01:46:44 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "30"}]: dispatch
Oct 11 01:46:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e114 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:46:44 compute-0 podman[226916]: 2025-10-11 01:46:44.827748288 +0000 UTC m=+0.108236229 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 01:46:44 compute-0 podman[226925]: 2025-10-11 01:46:44.834352621 +0000 UTC m=+0.104724953 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.component=ubi9-container, container_name=kepler, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vendor=Red Hat, Inc., maintainer=Red Hat, Inc., io.openshift.tags=base rhel9, release=1214.1726694543, release-0.7.12=, vcs-type=git, io.buildah.version=1.29.0, version=9.4, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of Red Hat Universal Base Image 9., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, config_id=edpm, distribution-scope=public, architecture=x86_64, managed_by=edpm_ansible, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, name=ubi9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.openshift.expose-services=, build-date=2024-09-18T21:23:30)
Oct 11 01:46:44 compute-0 podman[226920]: 2025-10-11 01:46:44.892744562 +0000 UTC m=+0.168356037 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.build-date=20251009, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3)
Oct 11 01:46:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e114 do_prune osdmap full prune enabled
Oct 11 01:46:44 compute-0 ceph-mon[191930]: 10.d scrub starts
Oct 11 01:46:44 compute-0 ceph-mon[191930]: 10.d scrub ok
Oct 11 01:46:44 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "30"}]: dispatch
Oct 11 01:46:45 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "30"}]': finished
Oct 11 01:46:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e115 e115: 3 total, 3 up, 3 in
Oct 11 01:46:45 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e115: 3 total, 3 up, 3 in
Oct 11 01:46:45 compute-0 sudo[227100]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fcvwvdrbndkmzbvbyxffkwbjjzdrtpjj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147204.7652345-128-265894881303761/AnsiballZ_command.py'
Oct 11 01:46:45 compute-0 sudo[227100]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:46:45 compute-0 python3.9[227102]: ansible-ansible.legacy.command Invoked with _raw_params=rpm -V driverctl lvm2 crudini jq nftables NetworkManager openstack-selinux python3-libselinux python3-pyyaml rsync tmpwatch sysstat iproute-tc ksmtuned systemd-container crypto-policies-scripts grubby sos _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:46:45 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.16 scrub starts
Oct 11 01:46:45 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.16 scrub ok
Oct 11 01:46:46 compute-0 ceph-mon[191930]: 10.3 scrub starts
Oct 11 01:46:46 compute-0 ceph-mon[191930]: 10.3 scrub ok
Oct 11 01:46:46 compute-0 ceph-mon[191930]: pgmap v249: 321 pgs: 321 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail; 3.9 KiB/s rd, 181 B/s wr, 8 op/s; 39 B/s, 2 objects/s recovering
Oct 11 01:46:46 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "30"}]': finished
Oct 11 01:46:46 compute-0 ceph-mon[191930]: osdmap e115: 3 total, 3 up, 3 in
Oct 11 01:46:46 compute-0 ceph-mon[191930]: 5.16 scrub starts
Oct 11 01:46:46 compute-0 ceph-mon[191930]: 5.16 scrub ok
Oct 11 01:46:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v251: 321 pgs: 321 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s rd, 170 B/s wr, 8 op/s; 36 B/s, 2 objects/s recovering
Oct 11 01:46:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "31"} v 0) v1
Oct 11 01:46:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "31"}]: dispatch
Oct 11 01:46:46 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.9 scrub starts
Oct 11 01:46:46 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.9 scrub ok
Oct 11 01:46:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e115 do_prune osdmap full prune enabled
Oct 11 01:46:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "31"}]: dispatch
Oct 11 01:46:47 compute-0 ceph-mon[191930]: 5.9 scrub starts
Oct 11 01:46:47 compute-0 ceph-mon[191930]: 5.9 scrub ok
Oct 11 01:46:47 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "31"}]': finished
Oct 11 01:46:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e116 e116: 3 total, 3 up, 3 in
Oct 11 01:46:47 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e116: 3 total, 3 up, 3 in
Oct 11 01:46:47 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 116 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=73/74 n=6 ec=56/47 lis/c=73/73 les/c/f=74/74/0 sis=116 pruub=9.659255028s) [0] r=-1 lpr=116 pi=[73,116)/1 crt=53'585 mlcod 0'0 active pruub 210.673934937s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:47 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 116 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=73/74 n=6 ec=56/47 lis/c=73/73 les/c/f=74/74/0 sis=116 pruub=9.658277512s) [0] r=-1 lpr=116 pi=[73,116)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 210.673934937s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:47 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 116 pg[9.1e( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=73/73 les/c/f=74/74/0 sis=116) [0] r=0 lpr=116 pi=[73,116)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:47 compute-0 sudo[227100]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e116 do_prune osdmap full prune enabled
Oct 11 01:46:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e117 e117: 3 total, 3 up, 3 in
Oct 11 01:46:48 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e117: 3 total, 3 up, 3 in
Oct 11 01:46:48 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 117 pg[9.1e( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=73/73 les/c/f=74/74/0 sis=117) [0]/[2] r=-1 lpr=117 pi=[73,117)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:48 compute-0 ceph-mon[191930]: pgmap v251: 321 pgs: 321 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s rd, 170 B/s wr, 8 op/s; 36 B/s, 2 objects/s recovering
Oct 11 01:46:48 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "31"}]': finished
Oct 11 01:46:48 compute-0 ceph-mon[191930]: osdmap e116: 3 total, 3 up, 3 in
Oct 11 01:46:48 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 117 pg[9.1e( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=73/73 les/c/f=74/74/0 sis=117) [0]/[2] r=-1 lpr=117 pi=[73,117)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:48 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 117 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=73/74 n=6 ec=56/47 lis/c=73/73 les/c/f=74/74/0 sis=117) [0]/[2] r=0 lpr=117 pi=[73,117)/1 crt=53'585 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:48 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 117 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=73/74 n=6 ec=56/47 lis/c=73/73 les/c/f=74/74/0 sis=117) [0]/[2] r=0 lpr=117 pi=[73,117)/1 crt=53'585 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v254: 321 pgs: 321 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s rd, 170 B/s wr, 8 op/s; 36 B/s, 2 objects/s recovering
Oct 11 01:46:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "32"} v 0) v1
Oct 11 01:46:48 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:46:48 compute-0 sudo[227403]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lhnmcqlwhnvveixnhobvdrlwockfmtrj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147207.615969-136-10768209669368/AnsiballZ_selinux.py'
Oct 11 01:46:48 compute-0 sudo[227403]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:46:48 compute-0 podman[227361]: 2025-10-11 01:46:48.661502111 +0000 UTC m=+0.157540713 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 01:46:48 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.1e scrub starts
Oct 11 01:46:48 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.1e scrub ok
Oct 11 01:46:48 compute-0 python3.9[227408]: ansible-ansible.posix.selinux Invoked with policy=targeted state=enforcing configfile=/etc/selinux/config update_kernel_param=False
Oct 11 01:46:48 compute-0 sudo[227403]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e117 do_prune osdmap full prune enabled
Oct 11 01:46:49 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:46:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e118 e118: 3 total, 3 up, 3 in
Oct 11 01:46:49 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e118: 3 total, 3 up, 3 in
Oct 11 01:46:49 compute-0 ceph-mon[191930]: osdmap e117: 3 total, 3 up, 3 in
Oct 11 01:46:49 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "32"}]: dispatch
Oct 11 01:46:49 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 118 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=75/76 n=6 ec=56/47 lis/c=75/75 les/c/f=76/76/0 sis=118 pruub=9.678158760s) [1] r=-1 lpr=118 pi=[75,118)/1 crt=53'585 mlcod 0'0 active pruub 212.735763550s@ mbc={}] start_peering_interval up [2] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 2 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:49 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 118 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=75/76 n=6 ec=56/47 lis/c=75/75 les/c/f=76/76/0 sis=118 pruub=9.678028107s) [1] r=-1 lpr=118 pi=[75,118)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 212.735763550s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:49 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 118 pg[9.1f( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=75/75 les/c/f=76/76/0 sis=118) [1] r=0 lpr=118 pi=[75,118)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:49 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 118 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=117/118 n=6 ec=56/47 lis/c=73/73 les/c/f=74/74/0 sis=117) [0]/[2] async=[0] r=0 lpr=117 pi=[73,117)/1 crt=53'585 mlcod 0'0 active+remapped mbc={255={(0+1)=6}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:46:49 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 10.5 scrub starts
Oct 11 01:46:49 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 10.5 scrub ok
Oct 11 01:46:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e118 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:46:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e118 do_prune osdmap full prune enabled
Oct 11 01:46:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e119 e119: 3 total, 3 up, 3 in
Oct 11 01:46:49 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e119: 3 total, 3 up, 3 in
Oct 11 01:46:49 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 119 pg[9.1f( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=75/75 les/c/f=76/76/0 sis=119) [1]/[2] r=-1 lpr=119 pi=[75,119)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [1] -> [1], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:49 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 119 pg[9.1f( empty local-lis/les=0/0 n=0 ec=56/47 lis/c=75/75 les/c/f=76/76/0 sis=119) [1]/[2] r=-1 lpr=119 pi=[75,119)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:49 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 119 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=117/118 n=6 ec=56/47 lis/c=117/73 les/c/f=118/74/0 sis=119 pruub=15.455020905s) [0] async=[0] r=-1 lpr=119 pi=[73,119)/1 crt=53'585 mlcod 53'585 active pruub 219.071517944s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:49 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 119 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=75/76 n=6 ec=56/47 lis/c=75/75 les/c/f=76/76/0 sis=119) [1]/[2] r=0 lpr=119 pi=[75,119)/1 crt=53'585 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [1] -> [1], acting [1] -> [2], acting_primary 1 -> 2, up_primary 1 -> 1, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:49 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 119 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=75/76 n=6 ec=56/47 lis/c=75/75 les/c/f=76/76/0 sis=119) [1]/[2] r=0 lpr=119 pi=[75,119)/1 crt=53'585 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:49 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 119 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=117/118 n=6 ec=56/47 lis/c=117/73 les/c/f=118/74/0 sis=119 pruub=15.454694748s) [0] r=-1 lpr=119 pi=[73,119)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 219.071517944s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:49 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 119 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=117/73 les/c/f=118/74/0 sis=119) [0] r=0 lpr=119 pi=[73,119)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:49 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 119 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=117/73 les/c/f=118/74/0 sis=119) [0] r=0 lpr=119 pi=[73,119)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:49 compute-0 sudo[227559]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iukfwrmhssjrqvdutedtsvvvzbitjtsg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147209.4039664-147-44185143725952/AnsiballZ_command.py'
Oct 11 01:46:50 compute-0 sudo[227559]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:46:50 compute-0 ceph-mon[191930]: pgmap v254: 321 pgs: 321 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s rd, 170 B/s wr, 8 op/s; 36 B/s, 2 objects/s recovering
Oct 11 01:46:50 compute-0 ceph-mon[191930]: 10.1e scrub starts
Oct 11 01:46:50 compute-0 ceph-mon[191930]: 10.1e scrub ok
Oct 11 01:46:50 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "32"}]': finished
Oct 11 01:46:50 compute-0 ceph-mon[191930]: osdmap e118: 3 total, 3 up, 3 in
Oct 11 01:46:50 compute-0 ceph-mon[191930]: osdmap e119: 3 total, 3 up, 3 in
Oct 11 01:46:50 compute-0 python3.9[227561]: ansible-ansible.legacy.command Invoked with cmd=dd if=/dev/zero of=/swap count=1024 bs=1M creates=/swap _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None removes=None stdin=None
Oct 11 01:46:50 compute-0 sudo[227559]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v257: 321 pgs: 321 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e119 do_prune osdmap full prune enabled
Oct 11 01:46:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e120 e120: 3 total, 3 up, 3 in
Oct 11 01:46:50 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e120: 3 total, 3 up, 3 in
Oct 11 01:46:50 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.4 scrub starts
Oct 11 01:46:50 compute-0 ceph-osd[205667]: osd.0 pg_epoch: 120 pg[9.1e( v 53'585 (0'0,53'585] local-lis/les=119/120 n=6 ec=56/47 lis/c=117/73 les/c/f=118/74/0 sis=119) [0] r=0 lpr=119 pi=[73,119)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:46:50 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.4 scrub ok
Oct 11 01:46:50 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 2.7 scrub starts
Oct 11 01:46:50 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 2.7 scrub ok
Oct 11 01:46:50 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 120 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=119/120 n=6 ec=56/47 lis/c=75/75 les/c/f=76/76/0 sis=119) [1]/[2] async=[1] r=0 lpr=119 pi=[75,119)/1 crt=53'585 mlcod 0'0 active+remapped mbc={255={(0+1)=6}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:46:51 compute-0 ceph-mon[191930]: 10.5 scrub starts
Oct 11 01:46:51 compute-0 ceph-mon[191930]: 10.5 scrub ok
Oct 11 01:46:51 compute-0 ceph-mon[191930]: osdmap e120: 3 total, 3 up, 3 in
Oct 11 01:46:51 compute-0 ceph-mon[191930]: 2.7 scrub starts
Oct 11 01:46:51 compute-0 ceph-mon[191930]: 2.7 scrub ok
Oct 11 01:46:51 compute-0 sudo[227711]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-esnbtdjqztdhqykardktaqexqhbsvbiu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147210.5474432-155-63065385819298/AnsiballZ_file.py'
Oct 11 01:46:51 compute-0 sudo[227711]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:46:51 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 10.a scrub starts
Oct 11 01:46:51 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 10.a scrub ok
Oct 11 01:46:51 compute-0 python3.9[227713]: ansible-ansible.builtin.file Invoked with group=root mode=0600 owner=root path=/swap recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False state=None _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:46:51 compute-0 sudo[227711]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e120 do_prune osdmap full prune enabled
Oct 11 01:46:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e121 e121: 3 total, 3 up, 3 in
Oct 11 01:46:51 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e121: 3 total, 3 up, 3 in
Oct 11 01:46:51 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 121 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=119/75 les/c/f=120/76/0 sis=121) [1] r=0 lpr=121 pi=[75,121)/1 luod=0'0 crt=53'585 mlcod 0'0 active mbc={}] start_peering_interval up [1] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 1 -> 1, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:51 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 121 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=0/0 n=6 ec=56/47 lis/c=119/75 les/c/f=120/76/0 sis=121) [1] r=0 lpr=121 pi=[75,121)/1 crt=53'585 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Oct 11 01:46:51 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 121 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=119/120 n=6 ec=56/47 lis/c=119/75 les/c/f=120/76/0 sis=121 pruub=15.141231537s) [1] async=[1] r=-1 lpr=121 pi=[75,121)/1 crt=53'585 mlcod 53'585 active pruub 220.776199341s@ mbc={255={}}] start_peering_interval up [1] -> [1], acting [2] -> [1], acting_primary 2 -> 1, up_primary 1 -> 1, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Oct 11 01:46:51 compute-0 ceph-osd[207831]: osd.2 pg_epoch: 121 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=119/120 n=6 ec=56/47 lis/c=119/75 les/c/f=120/76/0 sis=121 pruub=15.141097069s) [1] r=-1 lpr=121 pi=[75,121)/1 crt=53'585 mlcod 0'0 unknown NOTIFY pruub 220.776199341s@ mbc={}] state<Start>: transitioning to Stray
Oct 11 01:46:52 compute-0 ceph-mon[191930]: pgmap v257: 321 pgs: 321 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:52 compute-0 ceph-mon[191930]: 10.4 scrub starts
Oct 11 01:46:52 compute-0 ceph-mon[191930]: 10.4 scrub ok
Oct 11 01:46:52 compute-0 ceph-mon[191930]: osdmap e121: 3 total, 3 up, 3 in
Oct 11 01:46:52 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 10.c scrub starts
Oct 11 01:46:52 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 10.c scrub ok
Oct 11 01:46:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v260: 321 pgs: 321 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:52 compute-0 sudo[227863]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jcxmjmaefayasusonnswgujpjiimiwbt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147211.8309565-163-253717164397749/AnsiballZ_mount.py'
Oct 11 01:46:52 compute-0 sudo[227863]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:46:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e121 do_prune osdmap full prune enabled
Oct 11 01:46:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 e122: 3 total, 3 up, 3 in
Oct 11 01:46:52 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e122: 3 total, 3 up, 3 in
Oct 11 01:46:52 compute-0 ceph-osd[206800]: osd.1 pg_epoch: 122 pg[9.1f( v 53'585 (0'0,53'585] local-lis/les=121/122 n=6 ec=56/47 lis/c=119/75 les/c/f=120/76/0 sis=121) [1] r=0 lpr=121 pi=[75,121)/1 crt=53'585 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Oct 11 01:46:52 compute-0 python3.9[227865]: ansible-ansible.posix.mount Invoked with dump=0 fstype=swap name=none opts=sw passno=0 src=/swap state=present path=none boot=True opts_no_log=False backup=False fstab=None
Oct 11 01:46:52 compute-0 sudo[227863]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:53 compute-0 ceph-mon[191930]: 10.a scrub starts
Oct 11 01:46:53 compute-0 ceph-mon[191930]: 10.a scrub ok
Oct 11 01:46:53 compute-0 ceph-mon[191930]: osdmap e122: 3 total, 3 up, 3 in
Oct 11 01:46:54 compute-0 ceph-mon[191930]: 10.c scrub starts
Oct 11 01:46:54 compute-0 ceph-mon[191930]: 10.c scrub ok
Oct 11 01:46:54 compute-0 ceph-mon[191930]: pgmap v260: 321 pgs: 321 active+clean; 456 KiB data, 144 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:46:54 compute-0 sudo[228015]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ltmeclbugdbgngeuxepyaokxehcyecpz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147213.6337543-191-242745971177244/AnsiballZ_file.py'
Oct 11 01:46:54 compute-0 sudo[228015]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:46:54 compute-0 python3.9[228017]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/pki/ca-trust/source/anchors setype=cert_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:46:54 compute-0 sudo[228015]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v262: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 0 B/s, 1 objects/s recovering
Oct 11 01:46:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:46:55 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 10.18 scrub starts
Oct 11 01:46:55 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 10.18 scrub ok
Oct 11 01:46:55 compute-0 sudo[228167]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fxrskoyicvagpfkmoleaknfwcavjbgat ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147214.7840772-199-29878899432859/AnsiballZ_stat.py'
Oct 11 01:46:55 compute-0 sudo[228167]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:46:55 compute-0 python3.9[228169]: ansible-ansible.legacy.stat Invoked with path=/etc/pki/ca-trust/source/anchors/tls-ca-bundle.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:46:55 compute-0 sudo[228167]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:56 compute-0 ceph-mon[191930]: pgmap v262: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 0 B/s, 1 objects/s recovering
Oct 11 01:46:56 compute-0 ceph-mon[191930]: 10.18 scrub starts
Oct 11 01:46:56 compute-0 ceph-mon[191930]: 10.18 scrub ok
Oct 11 01:46:56 compute-0 sudo[228245]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-evlooftzhbagbwhaptfgwpqcacqewdtr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147214.7840772-199-29878899432859/AnsiballZ_file.py'
Oct 11 01:46:56 compute-0 sudo[228245]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:46:56
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['.mgr', 'images', 'cephfs.cephfs.data', 'default.rgw.meta', 'volumes', 'backups', 'default.rgw.control', '.rgw.root', 'vms', 'cephfs.cephfs.meta', 'default.rgw.log']
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 01:46:56 compute-0 python3.9[228247]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/pki/ca-trust/source/anchors/tls-ca-bundle.pem _original_basename=tls-ca-bundle.pem recurse=False state=file path=/etc/pki/ca-trust/source/anchors/tls-ca-bundle.pem force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:46:56 compute-0 sudo[228245]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v263: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 18 B/s, 1 objects/s recovering
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:46:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:46:57 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.13 deep-scrub starts
Oct 11 01:46:57 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.13 deep-scrub ok
Oct 11 01:46:57 compute-0 sudo[228397]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zcnlinhklolmqdgpfbycifwwlxscswwf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147217.301026-223-95678837333569/AnsiballZ_getent.py'
Oct 11 01:46:57 compute-0 sudo[228397]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:46:58 compute-0 ceph-mon[191930]: pgmap v263: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 18 B/s, 1 objects/s recovering
Oct 11 01:46:58 compute-0 ceph-mon[191930]: 5.13 deep-scrub starts
Oct 11 01:46:58 compute-0 ceph-mon[191930]: 5.13 deep-scrub ok
Oct 11 01:46:58 compute-0 python3.9[228399]: ansible-ansible.builtin.getent Invoked with database=passwd key=qemu fail_key=True service=None split=None
Oct 11 01:46:58 compute-0 sudo[228397]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v264: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 14 B/s, 1 objects/s recovering
Oct 11 01:46:59 compute-0 sudo[228550]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eqbmaxruvwmpobffwsgszgrqgvbpxmem ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147218.608459-233-92754923367137/AnsiballZ_getent.py'
Oct 11 01:46:59 compute-0 sudo[228550]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:46:59 compute-0 python3.9[228552]: ansible-ansible.builtin.getent Invoked with database=passwd key=hugetlbfs fail_key=True service=None split=None
Oct 11 01:46:59 compute-0 sudo[228550]: pam_unix(sudo:session): session closed for user root
Oct 11 01:46:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:46:59 compute-0 podman[157119]: time="2025-10-11T01:46:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:46:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:46:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:46:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:46:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6807 "" "Go-http-client/1.1"
Oct 11 01:47:00 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 10.1b scrub starts
Oct 11 01:47:00 compute-0 ceph-mon[191930]: pgmap v264: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 14 B/s, 1 objects/s recovering
Oct 11 01:47:00 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 10.1b scrub ok
Oct 11 01:47:00 compute-0 sudo[228703]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-komkgvvldgyaqbjexkvvlacdmzkkrfei ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147219.6808202-241-47442120305253/AnsiballZ_group.py'
Oct 11 01:47:00 compute-0 sudo[228703]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v265: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 12 B/s, 1 objects/s recovering
Oct 11 01:47:00 compute-0 python3.9[228705]: ansible-ansible.builtin.group Invoked with gid=42477 name=hugetlbfs state=present force=False system=False local=False non_unique=False gid_min=None gid_max=None
Oct 11 01:47:00 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 2.d deep-scrub starts
Oct 11 01:47:00 compute-0 sudo[228703]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:00 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 2.d deep-scrub ok
Oct 11 01:47:01 compute-0 ceph-mon[191930]: 10.1b scrub starts
Oct 11 01:47:01 compute-0 ceph-mon[191930]: 10.1b scrub ok
Oct 11 01:47:01 compute-0 ceph-mon[191930]: 2.d deep-scrub starts
Oct 11 01:47:01 compute-0 ceph-mon[191930]: 2.d deep-scrub ok
Oct 11 01:47:01 compute-0 openstack_network_exporter[159265]: ERROR   01:47:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:47:01 compute-0 openstack_network_exporter[159265]: ERROR   01:47:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:47:01 compute-0 openstack_network_exporter[159265]: ERROR   01:47:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:47:01 compute-0 openstack_network_exporter[159265]: ERROR   01:47:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:47:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:47:01 compute-0 openstack_network_exporter[159265]: ERROR   01:47:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:47:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:47:01 compute-0 sudo[228855]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uymudjzozbhzridskhmoplmooguutbqk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147221.0902896-250-111974639234478/AnsiballZ_file.py'
Oct 11 01:47:01 compute-0 sudo[228855]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:01 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 2.6 scrub starts
Oct 11 01:47:01 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 2.6 scrub ok
Oct 11 01:47:01 compute-0 python3.9[228857]: ansible-ansible.builtin.file Invoked with group=qemu mode=0755 owner=qemu path=/var/lib/vhost_sockets setype=virt_cache_t seuser=system_u state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None serole=None selevel=None attributes=None
Oct 11 01:47:01 compute-0 sudo[228855]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:02 compute-0 ceph-mon[191930]: pgmap v265: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 12 B/s, 1 objects/s recovering
Oct 11 01:47:02 compute-0 ceph-mon[191930]: 2.6 scrub starts
Oct 11 01:47:02 compute-0 ceph-mon[191930]: 2.6 scrub ok
Oct 11 01:47:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v266: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 10 B/s, 1 objects/s recovering
Oct 11 01:47:02 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.8 scrub starts
Oct 11 01:47:02 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.8 scrub ok
Oct 11 01:47:02 compute-0 sudo[229007]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mvhgnfgyselnjkdtzyzrgylocywbmwpj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147222.426646-261-82212553924010/AnsiballZ_dnf.py'
Oct 11 01:47:02 compute-0 sudo[229007]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:03 compute-0 python3.9[229009]: ansible-ansible.legacy.dnf Invoked with name=['dracut-config-generic'] state=absent allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:47:04 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 10.1c scrub starts
Oct 11 01:47:04 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 10.1c scrub ok
Oct 11 01:47:04 compute-0 ceph-mon[191930]: pgmap v266: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 10 B/s, 1 objects/s recovering
Oct 11 01:47:04 compute-0 ceph-mon[191930]: 10.8 scrub starts
Oct 11 01:47:04 compute-0 ceph-mon[191930]: 10.8 scrub ok
Oct 11 01:47:04 compute-0 ceph-mon[191930]: 10.1c scrub starts
Oct 11 01:47:04 compute-0 ceph-mon[191930]: 10.1c scrub ok
Oct 11 01:47:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v267: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 9 B/s, 1 objects/s recovering
Oct 11 01:47:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:47:04 compute-0 sudo[229007]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:05 compute-0 podman[229088]: 2025-10-11 01:47:05.283634554 +0000 UTC m=+0.159300481 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=openstack_network_exporter, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, url=https://catalog.redhat.com/en/search?searchType=containers, version=9.6, release=1755695350, architecture=x86_64, build-date=2025-08-20T13:12:41, vendor=Red Hat, Inc., com.redhat.component=ubi9-minimal-container, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.33.7, managed_by=edpm_ansible, io.openshift.tags=minimal rhel9, name=ubi9-minimal, config_id=edpm, distribution-scope=public, vcs-type=git, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc.)
Oct 11 01:47:05 compute-0 podman[229087]: 2025-10-11 01:47:05.28821568 +0000 UTC m=+0.169781480 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 01:47:05 compute-0 sudo[229200]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vfgtgfiyicbkoevwjwgvusfckqdxkhlr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147224.9458935-269-109780570605562/AnsiballZ_file.py'
Oct 11 01:47:05 compute-0 sudo[229200]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:05 compute-0 python3.9[229202]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/modules-load.d setype=etc_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:47:05 compute-0 sudo[229200]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 01:47:06 compute-0 ceph-mon[191930]: pgmap v267: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 9 B/s, 1 objects/s recovering
Oct 11 01:47:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v268: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 9 B/s, 0 objects/s recovering
Oct 11 01:47:06 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.1 scrub starts
Oct 11 01:47:06 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.1 scrub ok
Oct 11 01:47:06 compute-0 sudo[229352]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gtdvmzgotwwjxicibwgvkzswidzxeshn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147226.1410384-277-116450911374537/AnsiballZ_stat.py'
Oct 11 01:47:06 compute-0 sudo[229352]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:06 compute-0 python3.9[229354]: ansible-ansible.legacy.stat Invoked with path=/etc/modules-load.d/99-edpm.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:47:07 compute-0 sudo[229352]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:07 compute-0 ceph-mon[191930]: 5.1 scrub starts
Oct 11 01:47:07 compute-0 ceph-mon[191930]: 5.1 scrub ok
Oct 11 01:47:07 compute-0 sudo[229430]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hiioykwthlmucenksqfxmwysphkitaxo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147226.1410384-277-116450911374537/AnsiballZ_file.py'
Oct 11 01:47:07 compute-0 sudo[229430]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:07 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 2.5 scrub starts
Oct 11 01:47:07 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.7 scrub starts
Oct 11 01:47:07 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 2.5 scrub ok
Oct 11 01:47:07 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.7 scrub ok
Oct 11 01:47:07 compute-0 python3.9[229432]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root setype=etc_t dest=/etc/modules-load.d/99-edpm.conf _original_basename=edpm-modprobe.conf.j2 recurse=False state=file path=/etc/modules-load.d/99-edpm.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:47:07 compute-0 sudo[229430]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.935 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.936 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.937 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.938 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f8ed27f97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.939 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb8c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.939 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb1a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb200>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed2874260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed3ab42f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb350>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb90>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fa390>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb3b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.943 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.945 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f8ed27fbad0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.945 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.946 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f8ed27faff0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.946 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbbf0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.947 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbc80>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.946 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f8ed27fb110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f8ed27fb170>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.951 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27f9610>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.951 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb620>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f8ed27fb1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.951 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbe30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.953 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbec0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.953 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbf50>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed238a810>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.954 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f8ed27fb230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.955 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f8ed2874230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.955 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.955 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f8ed27fb290>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.955 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.955 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f8ed5778d70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.955 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.956 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f8ed27fb650>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.956 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.956 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f8ed27fbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.956 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.956 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f8ed27fb320>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.956 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.957 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f8ed27fbb60>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.957 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.957 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f8ed27fa3f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.957 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.957 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f8ed27fb380>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.957 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.958 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f8ed27fbbc0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.958 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f8ed27fbc50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.958 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f8ed27fbce0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.959 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f8ed27fbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.959 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.959 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f8ed27fb590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.959 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.959 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f8ed27f95e0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.960 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f8ed27fb5f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.960 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f8ed27fbe00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.960 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f8ed27fbe90>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.961 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f8ed27fbf20>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.962 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.962 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.962 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.963 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.963 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.963 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.963 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.963 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:47:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:47:08 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 10.1d scrub starts
Oct 11 01:47:08 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 10.1d scrub ok
Oct 11 01:47:08 compute-0 ceph-mon[191930]: pgmap v268: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 9 B/s, 0 objects/s recovering
Oct 11 01:47:08 compute-0 ceph-mon[191930]: 2.5 scrub starts
Oct 11 01:47:08 compute-0 ceph-mon[191930]: 2.5 scrub ok
Oct 11 01:47:08 compute-0 ceph-mon[191930]: 10.1d scrub starts
Oct 11 01:47:08 compute-0 ceph-mon[191930]: 10.1d scrub ok
Oct 11 01:47:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v269: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:08 compute-0 sudo[229583]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ygxxbxvpxqbnesspqenwlvbrxdtgxwgv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147228.064446-290-260586371983611/AnsiballZ_stat.py'
Oct 11 01:47:08 compute-0 sudo[229583]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:08 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.f scrub starts
Oct 11 01:47:08 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.f scrub ok
Oct 11 01:47:08 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.e deep-scrub starts
Oct 11 01:47:08 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.e deep-scrub ok
Oct 11 01:47:08 compute-0 python3.9[229585]: ansible-ansible.legacy.stat Invoked with path=/etc/sysctl.d/99-edpm.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:47:08 compute-0 sudo[229583]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:09 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 10.1f scrub starts
Oct 11 01:47:09 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 10.1f scrub ok
Oct 11 01:47:09 compute-0 ceph-mon[191930]: 10.7 scrub starts
Oct 11 01:47:09 compute-0 ceph-mon[191930]: 10.7 scrub ok
Oct 11 01:47:09 compute-0 ceph-mon[191930]: pgmap v269: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:09 compute-0 ceph-mon[191930]: 5.f scrub starts
Oct 11 01:47:09 compute-0 ceph-mon[191930]: 5.f scrub ok
Oct 11 01:47:09 compute-0 ceph-mon[191930]: 10.1f scrub starts
Oct 11 01:47:09 compute-0 ceph-mon[191930]: 10.1f scrub ok
Oct 11 01:47:09 compute-0 sudo[229661]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hjbizizljttoifzorehpquerzwiooejc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147228.064446-290-260586371983611/AnsiballZ_file.py'
Oct 11 01:47:09 compute-0 sudo[229661]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:09 compute-0 podman[229663]: 2025-10-11 01:47:09.52577882 +0000 UTC m=+0.118576009 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 01:47:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:47:09 compute-0 python3.9[229664]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root setype=etc_t dest=/etc/sysctl.d/99-edpm.conf _original_basename=edpm-sysctl.conf.j2 recurse=False state=file path=/etc/sysctl.d/99-edpm.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:47:09 compute-0 sudo[229661]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:10 compute-0 ceph-mon[191930]: 10.e deep-scrub starts
Oct 11 01:47:10 compute-0 ceph-mon[191930]: 10.e deep-scrub ok
Oct 11 01:47:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v270: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:10 compute-0 sudo[229832]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bdugtkqeqcwlhygbuwbpahmatusqatan ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147230.0778809-305-184323678480472/AnsiballZ_dnf.py'
Oct 11 01:47:10 compute-0 sudo[229832]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:10 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.1a deep-scrub starts
Oct 11 01:47:10 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.1a deep-scrub ok
Oct 11 01:47:10 compute-0 python3.9[229834]: ansible-ansible.legacy.dnf Invoked with name=['tuned', 'tuned-profiles-cpu-partitioning'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:47:11 compute-0 ceph-mon[191930]: pgmap v270: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:11 compute-0 ceph-mon[191930]: 5.1a deep-scrub starts
Oct 11 01:47:11 compute-0 ceph-mon[191930]: 5.1a deep-scrub ok
Oct 11 01:47:11 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.10 scrub starts
Oct 11 01:47:11 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.10 scrub ok
Oct 11 01:47:12 compute-0 sudo[229832]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v271: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:12 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 2.9 scrub starts
Oct 11 01:47:12 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 2.9 scrub ok
Oct 11 01:47:12 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.b scrub starts
Oct 11 01:47:12 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.b scrub ok
Oct 11 01:47:13 compute-0 python3.9[229985]: ansible-ansible.builtin.stat Invoked with path=/etc/tuned/active_profile follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:47:13 compute-0 ceph-mon[191930]: 8.10 scrub starts
Oct 11 01:47:13 compute-0 ceph-mon[191930]: 8.10 scrub ok
Oct 11 01:47:13 compute-0 ceph-mon[191930]: pgmap v271: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:13 compute-0 ceph-mon[191930]: 2.9 scrub starts
Oct 11 01:47:13 compute-0 ceph-mon[191930]: 2.9 scrub ok
Oct 11 01:47:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v272: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:14 compute-0 ceph-mon[191930]: 8.b scrub starts
Oct 11 01:47:14 compute-0 ceph-mon[191930]: 8.b scrub ok
Oct 11 01:47:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:47:15 compute-0 python3.9[230137]: ansible-ansible.builtin.slurp Invoked with src=/etc/tuned/active_profile
Oct 11 01:47:15 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 8.15 deep-scrub starts
Oct 11 01:47:15 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 8.15 deep-scrub ok
Oct 11 01:47:15 compute-0 podman[230139]: 2025-10-11 01:47:15.228364066 +0000 UTC m=+0.120755772 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 01:47:15 compute-0 podman[230147]: 2025-10-11 01:47:15.267148094 +0000 UTC m=+0.135568359 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, distribution-scope=public, name=ubi9, version=9.4, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.29.0, io.openshift.expose-services=, summary=Provides the latest release of Red Hat Universal Base Image 9., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, release=1214.1726694543, com.redhat.component=ubi9-container, io.openshift.tags=base rhel9, managed_by=edpm_ansible, vcs-type=git, vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, config_id=edpm, architecture=x86_64, container_name=kepler, release-0.7.12=, build-date=2024-09-18T21:23:30, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, maintainer=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9)
Oct 11 01:47:15 compute-0 podman[230144]: 2025-10-11 01:47:15.269868047 +0000 UTC m=+0.161651535 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.schema-version=1.0, container_name=ovn_controller, managed_by=edpm_ansible, tcib_managed=true, config_id=ovn_controller, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:47:15 compute-0 ceph-mon[191930]: pgmap v272: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:15 compute-0 ceph-mon[191930]: 8.15 deep-scrub starts
Oct 11 01:47:15 compute-0 ceph-mon[191930]: 8.15 deep-scrub ok
Oct 11 01:47:15 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.19 scrub starts
Oct 11 01:47:15 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.19 scrub ok
Oct 11 01:47:16 compute-0 python3.9[230351]: ansible-ansible.builtin.stat Invoked with path=/etc/tuned/throughput-performance-variables.conf follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:47:16 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.15 scrub starts
Oct 11 01:47:16 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.15 scrub ok
Oct 11 01:47:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v273: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:16 compute-0 ceph-mon[191930]: 5.19 scrub starts
Oct 11 01:47:16 compute-0 ceph-mon[191930]: 5.19 scrub ok
Oct 11 01:47:16 compute-0 ceph-mon[191930]: 11.15 scrub starts
Oct 11 01:47:16 compute-0 ceph-mon[191930]: 11.15 scrub ok
Oct 11 01:47:17 compute-0 sudo[230501]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xaufalzpwxegugmquigxrvoflfrutwlu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147236.5869446-346-181258385450006/AnsiballZ_systemd.py'
Oct 11 01:47:17 compute-0 sudo[230501]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:17 compute-0 ceph-mon[191930]: pgmap v273: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:17 compute-0 python3.9[230503]: ansible-ansible.builtin.systemd Invoked with enabled=True name=tuned state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:47:17 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 11.4 scrub starts
Oct 11 01:47:17 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 11.4 scrub ok
Oct 11 01:47:17 compute-0 systemd[1]: Stopping Dynamic System Tuning Daemon...
Oct 11 01:47:18 compute-0 systemd[1]: tuned.service: Deactivated successfully.
Oct 11 01:47:18 compute-0 systemd[1]: Stopped Dynamic System Tuning Daemon.
Oct 11 01:47:18 compute-0 systemd[1]: Starting Dynamic System Tuning Daemon...
Oct 11 01:47:18 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.2 scrub starts
Oct 11 01:47:18 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.2 scrub ok
Oct 11 01:47:18 compute-0 systemd[1]: Started Dynamic System Tuning Daemon.
Oct 11 01:47:18 compute-0 sudo[230501]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v274: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:18 compute-0 ceph-mon[191930]: 11.4 scrub starts
Oct 11 01:47:18 compute-0 ceph-mon[191930]: 11.4 scrub ok
Oct 11 01:47:18 compute-0 ceph-mon[191930]: 11.2 scrub starts
Oct 11 01:47:18 compute-0 ceph-mon[191930]: 11.2 scrub ok
Oct 11 01:47:18 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.c scrub starts
Oct 11 01:47:18 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.c scrub ok
Oct 11 01:47:19 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.3 scrub starts
Oct 11 01:47:19 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.3 scrub ok
Oct 11 01:47:19 compute-0 podman[230638]: 2025-10-11 01:47:19.263633039 +0000 UTC m=+0.146898897 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS)
Oct 11 01:47:19 compute-0 python3.9[230679]: ansible-ansible.builtin.slurp Invoked with src=/proc/cmdline
Oct 11 01:47:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:47:19 compute-0 ceph-mon[191930]: pgmap v274: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:19 compute-0 ceph-mon[191930]: 5.c scrub starts
Oct 11 01:47:19 compute-0 ceph-mon[191930]: 5.c scrub ok
Oct 11 01:47:19 compute-0 ceph-mon[191930]: 11.3 scrub starts
Oct 11 01:47:19 compute-0 ceph-mon[191930]: 11.3 scrub ok
Oct 11 01:47:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v275: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:21 compute-0 ceph-mon[191930]: pgmap v275: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:21 compute-0 sudo[230835]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-clldopgczvsasdnpctxulakfpultluct ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147241.2112992-403-18663790291999/AnsiballZ_systemd.py'
Oct 11 01:47:21 compute-0 sudo[230835]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:22 compute-0 systemd[193665]: Created slice User Background Tasks Slice.
Oct 11 01:47:22 compute-0 systemd[193665]: Starting Cleanup of User's Temporary Files and Directories...
Oct 11 01:47:22 compute-0 systemd[193665]: Finished Cleanup of User's Temporary Files and Directories.
Oct 11 01:47:22 compute-0 python3.9[230837]: ansible-ansible.builtin.systemd Invoked with enabled=False name=ksm.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:47:22 compute-0 sudo[230835]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v276: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:23 compute-0 sudo[230990]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uhnjlgjnnqlxxqvpbacnkmmajyerziea ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147242.6873431-403-234225172845654/AnsiballZ_systemd.py'
Oct 11 01:47:23 compute-0 sudo[230990]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:23 compute-0 python3.9[230992]: ansible-ansible.builtin.systemd Invoked with enabled=False name=ksmtuned.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:47:23 compute-0 sudo[230990]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:23 compute-0 ceph-mon[191930]: pgmap v276: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:23 compute-0 sudo[230995]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:47:23 compute-0 sudo[230995]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:47:23 compute-0 sudo[230995]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:23 compute-0 sudo[231037]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:47:23 compute-0 sudo[231037]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:47:23 compute-0 sudo[231037]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:24 compute-0 sudo[231069]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:47:24 compute-0 sudo[231069]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:47:24 compute-0 sudo[231069]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:24 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 8.2 scrub starts
Oct 11 01:47:24 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 8.2 scrub ok
Oct 11 01:47:24 compute-0 sudo[231094]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 01:47:24 compute-0 sudo[231094]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:47:24 compute-0 sshd-session[223485]: Connection closed by 192.168.122.30 port 43360
Oct 11 01:47:24 compute-0 sshd-session[223482]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:47:24 compute-0 systemd[1]: session-42.scope: Deactivated successfully.
Oct 11 01:47:24 compute-0 systemd[1]: session-42.scope: Consumed 1min 25.575s CPU time.
Oct 11 01:47:24 compute-0 systemd-logind[804]: Session 42 logged out. Waiting for processes to exit.
Oct 11 01:47:24 compute-0 systemd-logind[804]: Removed session 42.
Oct 11 01:47:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v277: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:47:24 compute-0 ceph-mon[191930]: 8.2 scrub starts
Oct 11 01:47:24 compute-0 ceph-mon[191930]: 8.2 scrub ok
Oct 11 01:47:24 compute-0 sudo[231094]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:47:24 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:47:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:47:24 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:47:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:47:24 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:47:24 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev cf8f2268-f451-4e76-9b6b-9c437050a340 does not exist
Oct 11 01:47:24 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 4a727c17-f6fb-494c-bc7a-3fcb35f69eb3 does not exist
Oct 11 01:47:24 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 9688309c-e9c0-4347-b3f6-e772f60a83ab does not exist
Oct 11 01:47:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 01:47:24 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:47:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 01:47:24 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:47:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:47:24 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:47:25 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.d scrub starts
Oct 11 01:47:25 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.d scrub ok
Oct 11 01:47:25 compute-0 sudo[231150]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:47:25 compute-0 sudo[231150]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:47:25 compute-0 sudo[231150]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:25 compute-0 sudo[231175]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:47:25 compute-0 sudo[231175]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:47:25 compute-0 sudo[231175]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:25 compute-0 sudo[231200]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:47:25 compute-0 sudo[231200]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:47:25 compute-0 sudo[231200]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:25 compute-0 sudo[231225]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 01:47:25 compute-0 sudo[231225]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:47:25 compute-0 ceph-mon[191930]: pgmap v277: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:25 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:47:25 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:47:25 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:47:25 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:47:25 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:47:25 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:47:25 compute-0 ceph-mon[191930]: 11.d scrub starts
Oct 11 01:47:25 compute-0 ceph-mon[191930]: 11.d scrub ok
Oct 11 01:47:25 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 2.a scrub starts
Oct 11 01:47:25 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 2.a scrub ok
Oct 11 01:47:26 compute-0 podman[231289]: 2025-10-11 01:47:26.149987345 +0000 UTC m=+0.072683530 container create b5a3cd82a6bcef29ef224fececa2acfac793bf9f2483eac6e9c0e10007ad0ee7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_goldwasser, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:47:26 compute-0 podman[231289]: 2025-10-11 01:47:26.123392192 +0000 UTC m=+0.046088387 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:47:26 compute-0 systemd[1]: Started libpod-conmon-b5a3cd82a6bcef29ef224fececa2acfac793bf9f2483eac6e9c0e10007ad0ee7.scope.
Oct 11 01:47:26 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:47:26 compute-0 podman[231289]: 2025-10-11 01:47:26.294696198 +0000 UTC m=+0.217392473 container init b5a3cd82a6bcef29ef224fececa2acfac793bf9f2483eac6e9c0e10007ad0ee7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_goldwasser, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:47:26 compute-0 podman[231289]: 2025-10-11 01:47:26.314308611 +0000 UTC m=+0.237004786 container start b5a3cd82a6bcef29ef224fececa2acfac793bf9f2483eac6e9c0e10007ad0ee7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_goldwasser, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2)
Oct 11 01:47:26 compute-0 podman[231289]: 2025-10-11 01:47:26.319785885 +0000 UTC m=+0.242482100 container attach b5a3cd82a6bcef29ef224fececa2acfac793bf9f2483eac6e9c0e10007ad0ee7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_goldwasser, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:47:26 compute-0 goofy_goldwasser[231306]: 167 167
Oct 11 01:47:26 compute-0 systemd[1]: libpod-b5a3cd82a6bcef29ef224fececa2acfac793bf9f2483eac6e9c0e10007ad0ee7.scope: Deactivated successfully.
Oct 11 01:47:26 compute-0 podman[231289]: 2025-10-11 01:47:26.330426544 +0000 UTC m=+0.253122789 container died b5a3cd82a6bcef29ef224fececa2acfac793bf9f2483eac6e9c0e10007ad0ee7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_goldwasser, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 01:47:26 compute-0 systemd[1]: var-lib-containers-storage-overlay-a3bd3a5d3765fb62fccfc8ccd99f437443a69bba83442a8e91e2aa55b2ebafe8-merged.mount: Deactivated successfully.
Oct 11 01:47:26 compute-0 podman[231289]: 2025-10-11 01:47:26.42872664 +0000 UTC m=+0.351422845 container remove b5a3cd82a6bcef29ef224fececa2acfac793bf9f2483eac6e9c0e10007ad0ee7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_goldwasser, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS)
Oct 11 01:47:26 compute-0 systemd[1]: libpod-conmon-b5a3cd82a6bcef29ef224fececa2acfac793bf9f2483eac6e9c0e10007ad0ee7.scope: Deactivated successfully.
Oct 11 01:47:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v278: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:47:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:47:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:47:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:47:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:47:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:47:26 compute-0 podman[231329]: 2025-10-11 01:47:26.710756466 +0000 UTC m=+0.087110070 container create c11c85f2e554ae3dee6f7b24ba5e819c7f34c24012ef95525a880de9b1a72477 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wonderful_johnson, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3)
Oct 11 01:47:26 compute-0 ceph-mon[191930]: 2.a scrub starts
Oct 11 01:47:26 compute-0 ceph-mon[191930]: 2.a scrub ok
Oct 11 01:47:26 compute-0 podman[231329]: 2025-10-11 01:47:26.679318587 +0000 UTC m=+0.055672201 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:47:26 compute-0 systemd[1]: Started libpod-conmon-c11c85f2e554ae3dee6f7b24ba5e819c7f34c24012ef95525a880de9b1a72477.scope.
Oct 11 01:47:26 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:47:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/34d04146b873057be2d9dd0ee3b6d21377a62a177e155328d66c9d4762f47da9/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:47:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/34d04146b873057be2d9dd0ee3b6d21377a62a177e155328d66c9d4762f47da9/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:47:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/34d04146b873057be2d9dd0ee3b6d21377a62a177e155328d66c9d4762f47da9/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:47:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/34d04146b873057be2d9dd0ee3b6d21377a62a177e155328d66c9d4762f47da9/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:47:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/34d04146b873057be2d9dd0ee3b6d21377a62a177e155328d66c9d4762f47da9/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:47:26 compute-0 podman[231329]: 2025-10-11 01:47:26.89284692 +0000 UTC m=+0.269200494 container init c11c85f2e554ae3dee6f7b24ba5e819c7f34c24012ef95525a880de9b1a72477 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wonderful_johnson, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507)
Oct 11 01:47:26 compute-0 podman[231329]: 2025-10-11 01:47:26.911362796 +0000 UTC m=+0.287716390 container start c11c85f2e554ae3dee6f7b24ba5e819c7f34c24012ef95525a880de9b1a72477 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wonderful_johnson, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 01:47:26 compute-0 podman[231329]: 2025-10-11 01:47:26.919661543 +0000 UTC m=+0.296015107 container attach c11c85f2e554ae3dee6f7b24ba5e819c7f34c24012ef95525a880de9b1a72477 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wonderful_johnson, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507)
Oct 11 01:47:27 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 11.14 scrub starts
Oct 11 01:47:27 compute-0 ceph-mon[191930]: pgmap v278: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:27 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 11.14 scrub ok
Oct 11 01:47:27 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.8 scrub starts
Oct 11 01:47:27 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.8 scrub ok
Oct 11 01:47:28 compute-0 wonderful_johnson[231345]: --> passed data devices: 0 physical, 3 LVM
Oct 11 01:47:28 compute-0 wonderful_johnson[231345]: --> relative data size: 1.0
Oct 11 01:47:28 compute-0 wonderful_johnson[231345]: --> All data devices are unavailable
Oct 11 01:47:28 compute-0 systemd[1]: libpod-c11c85f2e554ae3dee6f7b24ba5e819c7f34c24012ef95525a880de9b1a72477.scope: Deactivated successfully.
Oct 11 01:47:28 compute-0 podman[231329]: 2025-10-11 01:47:28.284322931 +0000 UTC m=+1.660676525 container died c11c85f2e554ae3dee6f7b24ba5e819c7f34c24012ef95525a880de9b1a72477 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wonderful_johnson, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0)
Oct 11 01:47:28 compute-0 systemd[1]: libpod-c11c85f2e554ae3dee6f7b24ba5e819c7f34c24012ef95525a880de9b1a72477.scope: Consumed 1.315s CPU time.
Oct 11 01:47:28 compute-0 systemd[1]: var-lib-containers-storage-overlay-34d04146b873057be2d9dd0ee3b6d21377a62a177e155328d66c9d4762f47da9-merged.mount: Deactivated successfully.
Oct 11 01:47:28 compute-0 podman[231329]: 2025-10-11 01:47:28.398633561 +0000 UTC m=+1.774987125 container remove c11c85f2e554ae3dee6f7b24ba5e819c7f34c24012ef95525a880de9b1a72477 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wonderful_johnson, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 01:47:28 compute-0 systemd[1]: libpod-conmon-c11c85f2e554ae3dee6f7b24ba5e819c7f34c24012ef95525a880de9b1a72477.scope: Deactivated successfully.
Oct 11 01:47:28 compute-0 sudo[231225]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v279: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:28 compute-0 sudo[231386]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:47:28 compute-0 sudo[231386]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:47:28 compute-0 sudo[231386]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:28 compute-0 sudo[231411]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:47:28 compute-0 sudo[231411]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:47:28 compute-0 sudo[231411]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:28 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.9 scrub starts
Oct 11 01:47:28 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.9 scrub ok
Oct 11 01:47:28 compute-0 ceph-mon[191930]: 11.14 scrub starts
Oct 11 01:47:28 compute-0 ceph-mon[191930]: 11.14 scrub ok
Oct 11 01:47:28 compute-0 ceph-mon[191930]: 11.8 scrub starts
Oct 11 01:47:28 compute-0 ceph-mon[191930]: 11.8 scrub ok
Oct 11 01:47:28 compute-0 sudo[231436]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:47:28 compute-0 sudo[231436]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:47:28 compute-0 sudo[231436]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:28 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 8.d scrub starts
Oct 11 01:47:28 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 8.d scrub ok
Oct 11 01:47:28 compute-0 sudo[231461]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 01:47:28 compute-0 sudo[231461]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:47:29 compute-0 podman[231526]: 2025-10-11 01:47:29.618027348 +0000 UTC m=+0.095235806 container create f73131693468327613f130e4e3bdaed2f7d0991f1ddcb1a794786c1c727c5701 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_hellman, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:47:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:47:29 compute-0 podman[231526]: 2025-10-11 01:47:29.58629167 +0000 UTC m=+0.063500108 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:47:29 compute-0 systemd[1]: Started libpod-conmon-f73131693468327613f130e4e3bdaed2f7d0991f1ddcb1a794786c1c727c5701.scope.
Oct 11 01:47:29 compute-0 podman[157119]: time="2025-10-11T01:47:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:47:29 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:47:29 compute-0 podman[231526]: 2025-10-11 01:47:29.788160376 +0000 UTC m=+0.265368904 container init f73131693468327613f130e4e3bdaed2f7d0991f1ddcb1a794786c1c727c5701 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_hellman, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:47:29 compute-0 ceph-mon[191930]: pgmap v279: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:29 compute-0 ceph-mon[191930]: 8.9 scrub starts
Oct 11 01:47:29 compute-0 ceph-mon[191930]: 8.9 scrub ok
Oct 11 01:47:29 compute-0 ceph-mon[191930]: 8.d scrub starts
Oct 11 01:47:29 compute-0 ceph-mon[191930]: 8.d scrub ok
Oct 11 01:47:29 compute-0 podman[231526]: 2025-10-11 01:47:29.811344929 +0000 UTC m=+0.288553377 container start f73131693468327613f130e4e3bdaed2f7d0991f1ddcb1a794786c1c727c5701 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_hellman, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0)
Oct 11 01:47:29 compute-0 podman[231526]: 2025-10-11 01:47:29.819364906 +0000 UTC m=+0.296573424 container attach f73131693468327613f130e4e3bdaed2f7d0991f1ddcb1a794786c1c727c5701 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_hellman, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:47:29 compute-0 sshd-session[231542]: Accepted publickey for zuul from 192.168.122.30 port 35070 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:47:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:47:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 34191 "" "Go-http-client/1.1"
Oct 11 01:47:29 compute-0 funny_hellman[231543]: 167 167
Oct 11 01:47:29 compute-0 systemd[1]: libpod-f73131693468327613f130e4e3bdaed2f7d0991f1ddcb1a794786c1c727c5701.scope: Deactivated successfully.
Oct 11 01:47:29 compute-0 podman[231526]: 2025-10-11 01:47:29.827121774 +0000 UTC m=+0.304330192 container died f73131693468327613f130e4e3bdaed2f7d0991f1ddcb1a794786c1c727c5701 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_hellman, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef)
Oct 11 01:47:29 compute-0 systemd-logind[804]: New session 43 of user zuul.
Oct 11 01:47:29 compute-0 systemd[1]: Started Session 43 of User zuul.
Oct 11 01:47:29 compute-0 systemd[1]: var-lib-containers-storage-overlay-6c2d608ebc30916e205a13db9f405ada22004a4d46b88bd94160d208569fbb38-merged.mount: Deactivated successfully.
Oct 11 01:47:29 compute-0 rsyslogd[187706]: imjournal from <compute-0:systemd>: begin to drop messages due to rate-limiting
Oct 11 01:47:29 compute-0 sshd-session[231542]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:47:29 compute-0 podman[231526]: 2025-10-11 01:47:29.909024797 +0000 UTC m=+0.386233225 container remove f73131693468327613f130e4e3bdaed2f7d0991f1ddcb1a794786c1c727c5701 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_hellman, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 01:47:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:47:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6802 "" "Go-http-client/1.1"
Oct 11 01:47:29 compute-0 systemd[1]: libpod-conmon-f73131693468327613f130e4e3bdaed2f7d0991f1ddcb1a794786c1c727c5701.scope: Deactivated successfully.
Oct 11 01:47:30 compute-0 podman[231602]: 2025-10-11 01:47:30.200837894 +0000 UTC m=+0.087125489 container create ee98d0eeca071ea412f2355bd44014fe4b44ffd264fd26a31d793f7e21dd464e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_shtern, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0)
Oct 11 01:47:30 compute-0 podman[231602]: 2025-10-11 01:47:30.171329879 +0000 UTC m=+0.057617554 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:47:30 compute-0 systemd[1]: Started libpod-conmon-ee98d0eeca071ea412f2355bd44014fe4b44ffd264fd26a31d793f7e21dd464e.scope.
Oct 11 01:47:30 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:47:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f145e1c8f0fc741684ffe4eb01d4fa55af5b4972029200b8a9fe9823cf6a6914/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:47:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f145e1c8f0fc741684ffe4eb01d4fa55af5b4972029200b8a9fe9823cf6a6914/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:47:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f145e1c8f0fc741684ffe4eb01d4fa55af5b4972029200b8a9fe9823cf6a6914/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:47:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f145e1c8f0fc741684ffe4eb01d4fa55af5b4972029200b8a9fe9823cf6a6914/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:47:30 compute-0 podman[231602]: 2025-10-11 01:47:30.410691918 +0000 UTC m=+0.296979593 container init ee98d0eeca071ea412f2355bd44014fe4b44ffd264fd26a31d793f7e21dd464e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_shtern, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, ceph=True)
Oct 11 01:47:30 compute-0 podman[231602]: 2025-10-11 01:47:30.434497399 +0000 UTC m=+0.320785024 container start ee98d0eeca071ea412f2355bd44014fe4b44ffd264fd26a31d793f7e21dd464e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_shtern, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:47:30 compute-0 podman[231602]: 2025-10-11 01:47:30.441818478 +0000 UTC m=+0.328106113 container attach ee98d0eeca071ea412f2355bd44014fe4b44ffd264fd26a31d793f7e21dd464e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_shtern, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:47:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v280: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:31 compute-0 nervous_shtern[231637]: {
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:     "0": [
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:         {
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "devices": [
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "/dev/loop3"
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             ],
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "lv_name": "ceph_lv0",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "lv_size": "21470642176",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "name": "ceph_lv0",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "tags": {
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.cluster_name": "ceph",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.crush_device_class": "",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.encrypted": "0",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.osd_id": "0",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.type": "block",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.vdo": "0"
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             },
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "type": "block",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "vg_name": "ceph_vg0"
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:         }
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:     ],
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:     "1": [
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:         {
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "devices": [
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "/dev/loop4"
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             ],
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "lv_name": "ceph_lv1",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "lv_size": "21470642176",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "name": "ceph_lv1",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "tags": {
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.cluster_name": "ceph",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.crush_device_class": "",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.encrypted": "0",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.osd_id": "1",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.type": "block",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.vdo": "0"
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             },
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "type": "block",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "vg_name": "ceph_vg1"
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:         }
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:     ],
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:     "2": [
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:         {
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "devices": [
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "/dev/loop5"
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             ],
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "lv_name": "ceph_lv2",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "lv_size": "21470642176",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "name": "ceph_lv2",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "tags": {
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.cluster_name": "ceph",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.crush_device_class": "",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.encrypted": "0",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.osd_id": "2",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.type": "block",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:                 "ceph.vdo": "0"
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             },
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "type": "block",
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:             "vg_name": "ceph_vg2"
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:         }
Oct 11 01:47:31 compute-0 nervous_shtern[231637]:     ]
Oct 11 01:47:31 compute-0 nervous_shtern[231637]: }
Oct 11 01:47:31 compute-0 systemd[1]: libpod-ee98d0eeca071ea412f2355bd44014fe4b44ffd264fd26a31d793f7e21dd464e.scope: Deactivated successfully.
Oct 11 01:47:31 compute-0 podman[231602]: 2025-10-11 01:47:31.329116045 +0000 UTC m=+1.215403630 container died ee98d0eeca071ea412f2355bd44014fe4b44ffd264fd26a31d793f7e21dd464e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_shtern, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:47:31 compute-0 systemd[1]: var-lib-containers-storage-overlay-f145e1c8f0fc741684ffe4eb01d4fa55af5b4972029200b8a9fe9823cf6a6914-merged.mount: Deactivated successfully.
Oct 11 01:47:31 compute-0 podman[231602]: 2025-10-11 01:47:31.410938359 +0000 UTC m=+1.297225944 container remove ee98d0eeca071ea412f2355bd44014fe4b44ffd264fd26a31d793f7e21dd464e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_shtern, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3)
Oct 11 01:47:31 compute-0 python3.9[231739]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:47:31 compute-0 openstack_network_exporter[159265]: ERROR   01:47:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:47:31 compute-0 openstack_network_exporter[159265]: ERROR   01:47:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:47:31 compute-0 openstack_network_exporter[159265]: ERROR   01:47:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:47:31 compute-0 openstack_network_exporter[159265]: ERROR   01:47:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:47:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:47:31 compute-0 openstack_network_exporter[159265]: ERROR   01:47:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:47:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:47:31 compute-0 systemd[1]: libpod-conmon-ee98d0eeca071ea412f2355bd44014fe4b44ffd264fd26a31d793f7e21dd464e.scope: Deactivated successfully.
Oct 11 01:47:31 compute-0 sudo[231461]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:31 compute-0 sudo[231760]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:47:31 compute-0 sudo[231760]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:47:31 compute-0 sudo[231760]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:31 compute-0 sudo[231785]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:47:31 compute-0 sudo[231785]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:47:31 compute-0 sudo[231785]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:31 compute-0 ceph-mon[191930]: pgmap v280: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:31 compute-0 sudo[231810]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:47:31 compute-0 sudo[231810]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:47:31 compute-0 sudo[231810]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:31 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.9 scrub starts
Oct 11 01:47:31 compute-0 sudo[231859]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 01:47:31 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.9 scrub ok
Oct 11 01:47:31 compute-0 sudo[231859]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:47:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v281: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:32 compute-0 podman[231974]: 2025-10-11 01:47:32.532908248 +0000 UTC m=+0.097315330 container create 526fd98980704a85deedb8e2cd86afb2fa638552bd601cb7512e57ec4f5a789d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_robinson, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:47:32 compute-0 podman[231974]: 2025-10-11 01:47:32.495806745 +0000 UTC m=+0.060213847 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:47:32 compute-0 systemd[1]: Started libpod-conmon-526fd98980704a85deedb8e2cd86afb2fa638552bd601cb7512e57ec4f5a789d.scope.
Oct 11 01:47:32 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:47:32 compute-0 podman[231974]: 2025-10-11 01:47:32.693190745 +0000 UTC m=+0.257597867 container init 526fd98980704a85deedb8e2cd86afb2fa638552bd601cb7512e57ec4f5a789d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_robinson, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:47:32 compute-0 podman[231974]: 2025-10-11 01:47:32.710926864 +0000 UTC m=+0.275333936 container start 526fd98980704a85deedb8e2cd86afb2fa638552bd601cb7512e57ec4f5a789d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_robinson, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:47:32 compute-0 podman[231974]: 2025-10-11 01:47:32.718755051 +0000 UTC m=+0.283162173 container attach 526fd98980704a85deedb8e2cd86afb2fa638552bd601cb7512e57ec4f5a789d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_robinson, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 01:47:32 compute-0 strange_robinson[232013]: 167 167
Oct 11 01:47:32 compute-0 systemd[1]: libpod-526fd98980704a85deedb8e2cd86afb2fa638552bd601cb7512e57ec4f5a789d.scope: Deactivated successfully.
Oct 11 01:47:32 compute-0 podman[231974]: 2025-10-11 01:47:32.723590027 +0000 UTC m=+0.287997099 container died 526fd98980704a85deedb8e2cd86afb2fa638552bd601cb7512e57ec4f5a789d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_robinson, CEPH_REF=reef, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0)
Oct 11 01:47:32 compute-0 systemd[1]: var-lib-containers-storage-overlay-75ea5f2775cb9ff8b9a3b852ef56a7b784cca6598fdec675c2a7efcf9bfd76b4-merged.mount: Deactivated successfully.
Oct 11 01:47:32 compute-0 podman[231974]: 2025-10-11 01:47:32.797841293 +0000 UTC m=+0.362248335 container remove 526fd98980704a85deedb8e2cd86afb2fa638552bd601cb7512e57ec4f5a789d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_robinson, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0)
Oct 11 01:47:32 compute-0 ceph-mon[191930]: 11.9 scrub starts
Oct 11 01:47:32 compute-0 ceph-mon[191930]: 11.9 scrub ok
Oct 11 01:47:32 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.18 scrub starts
Oct 11 01:47:32 compute-0 systemd[1]: libpod-conmon-526fd98980704a85deedb8e2cd86afb2fa638552bd601cb7512e57ec4f5a789d.scope: Deactivated successfully.
Oct 11 01:47:32 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 5.18 scrub ok
Oct 11 01:47:32 compute-0 sudo[232080]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vywsubpdntahnoflpqyynbdxetirzvbn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147252.1379423-36-154174126547178/AnsiballZ_getent.py'
Oct 11 01:47:32 compute-0 sudo[232080]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:33 compute-0 podman[232088]: 2025-10-11 01:47:33.013159321 +0000 UTC m=+0.066969368 container create 60d37db2e4a84cdfa784be1a93fc6cc9ba5cb5e1e7015cc1ce3d83290b3af7ad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_nash, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 01:47:33 compute-0 podman[232088]: 2025-10-11 01:47:32.989526969 +0000 UTC m=+0.043337076 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:47:33 compute-0 systemd[1]: Started libpod-conmon-60d37db2e4a84cdfa784be1a93fc6cc9ba5cb5e1e7015cc1ce3d83290b3af7ad.scope.
Oct 11 01:47:33 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:47:33 compute-0 python3.9[232082]: ansible-ansible.builtin.getent Invoked with database=passwd key=openvswitch fail_key=True service=None split=None
Oct 11 01:47:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/be30c8c83cc7b00dbef6a094663afd51c2f785eeda897afd2eac8fdec7f121bc/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:47:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/be30c8c83cc7b00dbef6a094663afd51c2f785eeda897afd2eac8fdec7f121bc/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:47:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/be30c8c83cc7b00dbef6a094663afd51c2f785eeda897afd2eac8fdec7f121bc/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:47:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/be30c8c83cc7b00dbef6a094663afd51c2f785eeda897afd2eac8fdec7f121bc/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:47:33 compute-0 sudo[232080]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:33 compute-0 podman[232088]: 2025-10-11 01:47:33.178039125 +0000 UTC m=+0.231849222 container init 60d37db2e4a84cdfa784be1a93fc6cc9ba5cb5e1e7015cc1ce3d83290b3af7ad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_nash, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0)
Oct 11 01:47:33 compute-0 podman[232088]: 2025-10-11 01:47:33.188350055 +0000 UTC m=+0.242160092 container start 60d37db2e4a84cdfa784be1a93fc6cc9ba5cb5e1e7015cc1ce3d83290b3af7ad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_nash, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_REF=reef)
Oct 11 01:47:33 compute-0 podman[232088]: 2025-10-11 01:47:33.199635062 +0000 UTC m=+0.253445189 container attach 60d37db2e4a84cdfa784be1a93fc6cc9ba5cb5e1e7015cc1ce3d83290b3af7ad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_nash, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:47:33 compute-0 ceph-mon[191930]: pgmap v281: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:33 compute-0 ceph-mon[191930]: 5.18 scrub starts
Oct 11 01:47:33 compute-0 ceph-mon[191930]: 5.18 scrub ok
Oct 11 01:47:34 compute-0 sudo[232272]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-clqppomvhcfudfncemcbofdkpsacchnf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147253.5443687-48-110084898712319/AnsiballZ_setup.py'
Oct 11 01:47:34 compute-0 sudo[232272]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:34 compute-0 happy_nash[232104]: {
Oct 11 01:47:34 compute-0 happy_nash[232104]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 01:47:34 compute-0 happy_nash[232104]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:47:34 compute-0 happy_nash[232104]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 01:47:34 compute-0 happy_nash[232104]:         "osd_id": 1,
Oct 11 01:47:34 compute-0 happy_nash[232104]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:47:34 compute-0 happy_nash[232104]:         "type": "bluestore"
Oct 11 01:47:34 compute-0 happy_nash[232104]:     },
Oct 11 01:47:34 compute-0 happy_nash[232104]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 01:47:34 compute-0 happy_nash[232104]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:47:34 compute-0 happy_nash[232104]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 01:47:34 compute-0 happy_nash[232104]:         "osd_id": 2,
Oct 11 01:47:34 compute-0 happy_nash[232104]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:47:34 compute-0 happy_nash[232104]:         "type": "bluestore"
Oct 11 01:47:34 compute-0 happy_nash[232104]:     },
Oct 11 01:47:34 compute-0 happy_nash[232104]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 01:47:34 compute-0 happy_nash[232104]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:47:34 compute-0 happy_nash[232104]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 01:47:34 compute-0 happy_nash[232104]:         "osd_id": 0,
Oct 11 01:47:34 compute-0 happy_nash[232104]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:47:34 compute-0 happy_nash[232104]:         "type": "bluestore"
Oct 11 01:47:34 compute-0 happy_nash[232104]:     }
Oct 11 01:47:34 compute-0 happy_nash[232104]: }
Oct 11 01:47:34 compute-0 systemd[1]: libpod-60d37db2e4a84cdfa784be1a93fc6cc9ba5cb5e1e7015cc1ce3d83290b3af7ad.scope: Deactivated successfully.
Oct 11 01:47:34 compute-0 systemd[1]: libpod-60d37db2e4a84cdfa784be1a93fc6cc9ba5cb5e1e7015cc1ce3d83290b3af7ad.scope: Consumed 1.147s CPU time.
Oct 11 01:47:34 compute-0 conmon[232104]: conmon 60d37db2e4a84cdfa784 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-60d37db2e4a84cdfa784be1a93fc6cc9ba5cb5e1e7015cc1ce3d83290b3af7ad.scope/container/memory.events
Oct 11 01:47:34 compute-0 podman[232088]: 2025-10-11 01:47:34.35314146 +0000 UTC m=+1.406951497 container died 60d37db2e4a84cdfa784be1a93fc6cc9ba5cb5e1e7015cc1ce3d83290b3af7ad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_nash, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_REF=reef, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:47:34 compute-0 systemd[1]: var-lib-containers-storage-overlay-be30c8c83cc7b00dbef6a094663afd51c2f785eeda897afd2eac8fdec7f121bc-merged.mount: Deactivated successfully.
Oct 11 01:47:34 compute-0 python3.9[232275]: ansible-ansible.legacy.setup Invoked with filter=['ansible_pkg_mgr'] gather_subset=['!all'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:47:34 compute-0 podman[232088]: 2025-10-11 01:47:34.464112919 +0000 UTC m=+1.517922976 container remove 60d37db2e4a84cdfa784be1a93fc6cc9ba5cb5e1e7015cc1ce3d83290b3af7ad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_nash, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef)
Oct 11 01:47:34 compute-0 systemd[1]: libpod-conmon-60d37db2e4a84cdfa784be1a93fc6cc9ba5cb5e1e7015cc1ce3d83290b3af7ad.scope: Deactivated successfully.
Oct 11 01:47:34 compute-0 sudo[231859]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:47:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v282: 321 pgs: 321 active+clean; 457 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:34 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:47:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:47:34 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:47:34 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 6fb6e792-501b-4f47-a01a-063b7fb1fbdd does not exist
Oct 11 01:47:34 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 9628d89f-58d3-4cd6-9ccd-07eb836abfd7 does not exist
Oct 11 01:47:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:47:34 compute-0 sudo[232308]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:47:34 compute-0 sudo[232308]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:47:34 compute-0 sudo[232308]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:34 compute-0 sudo[232333]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:47:34 compute-0 sudo[232333]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:47:34 compute-0 sudo[232333]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:34 compute-0 sudo[232272]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:34 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 8.4 scrub starts
Oct 11 01:47:34 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 8.4 scrub ok
Oct 11 01:47:35 compute-0 sudo[232460]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dkbfqqpybrlhskmtyxedwqezomtihubo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147253.5443687-48-110084898712319/AnsiballZ_dnf.py'
Oct 11 01:47:35 compute-0 sudo[232460]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:35 compute-0 podman[232405]: 2025-10-11 01:47:35.502035251 +0000 UTC m=+0.120698162 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 01:47:35 compute-0 ceph-mon[191930]: pgmap v282: 321 pgs: 321 active+clean; 457 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:35 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:47:35 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:47:35 compute-0 ceph-mon[191930]: 8.4 scrub starts
Oct 11 01:47:35 compute-0 ceph-mon[191930]: 8.4 scrub ok
Oct 11 01:47:35 compute-0 podman[232406]: 2025-10-11 01:47:35.548960126 +0000 UTC m=+0.165493873 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, managed_by=edpm_ansible, config_id=edpm, io.buildah.version=1.33.7, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., release=1755695350, architecture=x86_64, url=https://catalog.redhat.com/en/search?searchType=containers, maintainer=Red Hat, Inc., distribution-scope=public, container_name=openstack_network_exporter, vcs-type=git, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi9-minimal-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, build-date=2025-08-20T13:12:41, version=9.6, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.expose-services=, name=ubi9-minimal, vendor=Red Hat, Inc.)
Oct 11 01:47:35 compute-0 python3.9[232474]: ansible-ansible.legacy.dnf Invoked with download_only=True name=['openvswitch'] allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None state=None
Oct 11 01:47:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v283: 321 pgs: 321 active+clean; 457 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:37 compute-0 sudo[232460]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:37 compute-0 ceph-mon[191930]: pgmap v283: 321 pgs: 321 active+clean; 457 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:37 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.1 scrub starts
Oct 11 01:47:37 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.1 scrub ok
Oct 11 01:47:38 compute-0 sudo[232627]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mhpgbgicpskksvtvfsvasowoijdewlfz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147257.4857123-62-173613512593268/AnsiballZ_dnf.py'
Oct 11 01:47:38 compute-0 sudo[232627]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:38 compute-0 python3.9[232629]: ansible-ansible.legacy.dnf Invoked with name=['openvswitch'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:47:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v284: 321 pgs: 321 active+clean; 457 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:38 compute-0 ceph-mon[191930]: 8.1 scrub starts
Oct 11 01:47:38 compute-0 ceph-mon[191930]: 8.1 scrub ok
Oct 11 01:47:39 compute-0 ceph-mon[191930]: pgmap v284: 321 pgs: 321 active+clean; 457 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:47:39 compute-0 sudo[232627]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:40 compute-0 podman[232705]: 2025-10-11 01:47:40.257914803 +0000 UTC m=+0.144313054 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible)
Oct 11 01:47:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v285: 321 pgs: 321 active+clean; 457 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:40 compute-0 sudo[232800]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wlnohvgpuumcotjqnbgasqtpfsuuqjkz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147259.9494057-70-76124634479311/AnsiballZ_systemd.py'
Oct 11 01:47:40 compute-0 sudo[232800]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:41 compute-0 python3.9[232802]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=openvswitch.service state=started daemon_reload=False daemon_reexec=False scope=system no_block=False force=None
Oct 11 01:47:41 compute-0 sudo[232800]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:41 compute-0 ceph-mon[191930]: pgmap v285: 321 pgs: 321 active+clean; 457 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:41 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.18 deep-scrub starts
Oct 11 01:47:41 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.18 deep-scrub ok
Oct 11 01:47:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v286: 321 pgs: 321 active+clean; 457 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:42 compute-0 ceph-mon[191930]: 11.18 deep-scrub starts
Oct 11 01:47:42 compute-0 ceph-mon[191930]: 11.18 deep-scrub ok
Oct 11 01:47:42 compute-0 python3.9[232955]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'selinux'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:47:42 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.3 scrub starts
Oct 11 01:47:42 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.3 scrub ok
Oct 11 01:47:42 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 8.1b scrub starts
Oct 11 01:47:42 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 8.1b scrub ok
Oct 11 01:47:43 compute-0 ceph-mon[191930]: pgmap v286: 321 pgs: 321 active+clean; 457 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:43 compute-0 ceph-mon[191930]: 8.3 scrub starts
Oct 11 01:47:43 compute-0 ceph-mon[191930]: 8.3 scrub ok
Oct 11 01:47:43 compute-0 ceph-mon[191930]: 8.1b scrub starts
Oct 11 01:47:43 compute-0 ceph-mon[191930]: 8.1b scrub ok
Oct 11 01:47:43 compute-0 sudo[233105]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ncjedrgawnpmcfqnldeuybejpmsvxhdv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147263.1910267-88-9341928773443/AnsiballZ_sefcontext.py'
Oct 11 01:47:43 compute-0 sudo[233105]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:44 compute-0 python3.9[233107]: ansible-community.general.sefcontext Invoked with selevel=s0 setype=container_file_t state=present target=/var/lib/edpm-config(/.*)? ignore_selinux_state=False ftype=a reload=True substitute=None seuser=None
Oct 11 01:47:44 compute-0 sudo[233105]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:44 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 11.6 scrub starts
Oct 11 01:47:44 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 11.6 scrub ok
Oct 11 01:47:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v287: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:47:44 compute-0 ceph-mon[191930]: 11.6 scrub starts
Oct 11 01:47:44 compute-0 ceph-mon[191930]: 11.6 scrub ok
Oct 11 01:47:45 compute-0 ceph-mon[191930]: pgmap v287: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:45 compute-0 python3.9[233257]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local', 'distribution'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:47:46 compute-0 podman[233264]: 2025-10-11 01:47:46.197951605 +0000 UTC m=+0.097378187 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 01:47:46 compute-0 podman[233266]: 2025-10-11 01:47:46.233644075 +0000 UTC m=+0.106027260 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, release=1214.1726694543, container_name=kepler, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of Red Hat Universal Base Image 9., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, io.openshift.expose-services=, release-0.7.12=, architecture=x86_64, maintainer=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, com.redhat.component=ubi9-container, io.buildah.version=1.29.0, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 9, managed_by=edpm_ansible, version=9.4, distribution-scope=public, build-date=2024-09-18T21:23:30, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.openshift.tags=base rhel9, name=ubi9)
Oct 11 01:47:46 compute-0 podman[233265]: 2025-10-11 01:47:46.281012823 +0000 UTC m=+0.166250809 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_managed=true, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3)
Oct 11 01:47:46 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.f scrub starts
Oct 11 01:47:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v288: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:46 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.f scrub ok
Oct 11 01:47:46 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.5 scrub starts
Oct 11 01:47:46 compute-0 ceph-mon[191930]: 8.f scrub starts
Oct 11 01:47:46 compute-0 ceph-mon[191930]: 8.f scrub ok
Oct 11 01:47:46 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.5 scrub ok
Oct 11 01:47:47 compute-0 sudo[233479]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-runafdpfnqklvusbepvkdrmkdojgjkom ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147266.4767523-106-229329539718061/AnsiballZ_dnf.py'
Oct 11 01:47:47 compute-0 sudo[233479]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:47 compute-0 python3.9[233481]: ansible-ansible.legacy.dnf Invoked with name=['driverctl', 'lvm2', 'crudini', 'jq', 'nftables', 'NetworkManager', 'openstack-selinux', 'python3-libselinux', 'python3-pyyaml', 'rsync', 'tmpwatch', 'sysstat', 'iproute-tc', 'ksmtuned', 'systemd-container', 'crypto-policies-scripts', 'grubby', 'sos'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:47:47 compute-0 ceph-mon[191930]: pgmap v288: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:47 compute-0 ceph-mon[191930]: 8.5 scrub starts
Oct 11 01:47:47 compute-0 ceph-mon[191930]: 8.5 scrub ok
Oct 11 01:47:47 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.1b scrub starts
Oct 11 01:47:47 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.1b scrub ok
Oct 11 01:47:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v289: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:48 compute-0 sudo[233479]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:48 compute-0 ceph-mon[191930]: 11.1b scrub starts
Oct 11 01:47:48 compute-0 ceph-mon[191930]: 11.1b scrub ok
Oct 11 01:47:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:47:49 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.7 scrub starts
Oct 11 01:47:49 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.7 scrub ok
Oct 11 01:47:49 compute-0 ceph-mon[191930]: pgmap v289: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:49 compute-0 sudo[233650]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cgomdaefjxjwpfmvywhjknkfbledyauw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147269.0200949-114-174632791612609/AnsiballZ_command.py'
Oct 11 01:47:49 compute-0 sudo[233650]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:49 compute-0 podman[233607]: 2025-10-11 01:47:49.850510956 +0000 UTC m=+0.180379004 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, container_name=ceilometer_agent_compute)
Oct 11 01:47:50 compute-0 python3.9[233655]: ansible-ansible.legacy.command Invoked with _raw_params=rpm -V driverctl lvm2 crudini jq nftables NetworkManager openstack-selinux python3-libselinux python3-pyyaml rsync tmpwatch sysstat iproute-tc ksmtuned systemd-container crypto-policies-scripts grubby sos _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:47:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v290: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:50 compute-0 ceph-mon[191930]: 8.7 scrub starts
Oct 11 01:47:50 compute-0 ceph-mon[191930]: 8.7 scrub ok
Oct 11 01:47:51 compute-0 sudo[233650]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:51 compute-0 ceph-mon[191930]: pgmap v290: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:51 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.b scrub starts
Oct 11 01:47:51 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.b scrub ok
Oct 11 01:47:52 compute-0 sudo[233940]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hfsazbqrwdaynkopjmdaprfgqlnzbjmd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147271.5682342-122-67644609463349/AnsiballZ_file.py'
Oct 11 01:47:52 compute-0 sudo[233940]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:52 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 11.e scrub starts
Oct 11 01:47:52 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 11.e scrub ok
Oct 11 01:47:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v291: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:52 compute-0 python3.9[233942]: ansible-ansible.builtin.file Invoked with mode=0750 path=/var/lib/edpm-config selevel=s0 setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None attributes=None
Oct 11 01:47:52 compute-0 sudo[233940]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:52 compute-0 ceph-mon[191930]: 11.b scrub starts
Oct 11 01:47:52 compute-0 ceph-mon[191930]: 11.b scrub ok
Oct 11 01:47:52 compute-0 ceph-mon[191930]: 11.e scrub starts
Oct 11 01:47:52 compute-0 ceph-mon[191930]: 11.e scrub ok
Oct 11 01:47:53 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.e scrub starts
Oct 11 01:47:53 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.e scrub ok
Oct 11 01:47:53 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.8 scrub starts
Oct 11 01:47:53 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.8 scrub ok
Oct 11 01:47:53 compute-0 ceph-mon[191930]: pgmap v291: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:53 compute-0 ceph-mon[191930]: 8.e scrub starts
Oct 11 01:47:53 compute-0 ceph-mon[191930]: 8.e scrub ok
Oct 11 01:47:53 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.1e scrub starts
Oct 11 01:47:53 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.1e scrub ok
Oct 11 01:47:53 compute-0 python3.9[234092]: ansible-ansible.builtin.stat Invoked with path=/etc/cloud/cloud.cfg.d follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:47:54 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.15 scrub starts
Oct 11 01:47:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v292: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:54 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.15 scrub ok
Oct 11 01:47:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:47:54 compute-0 ceph-mon[191930]: 8.8 scrub starts
Oct 11 01:47:54 compute-0 ceph-mon[191930]: 8.8 scrub ok
Oct 11 01:47:54 compute-0 ceph-mon[191930]: 11.1e scrub starts
Oct 11 01:47:54 compute-0 ceph-mon[191930]: 11.1e scrub ok
Oct 11 01:47:54 compute-0 ceph-mon[191930]: 10.15 scrub starts
Oct 11 01:47:54 compute-0 ceph-mon[191930]: 10.15 scrub ok
Oct 11 01:47:54 compute-0 sudo[234244]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vbiigmwjhsfsgmauhwcmhfstkeyibcqp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147274.2926323-138-190303609987365/AnsiballZ_dnf.py'
Oct 11 01:47:54 compute-0 sudo[234244]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:55 compute-0 python3.9[234246]: ansible-ansible.legacy.dnf Invoked with name=['NetworkManager-ovs'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:47:55 compute-0 ceph-mon[191930]: pgmap v292: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:47:56
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.meta', 'cephfs.cephfs.data', 'cephfs.cephfs.meta', 'default.rgw.control', '.mgr', 'vms', 'images', '.rgw.root', 'volumes', 'default.rgw.log', 'backups']
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:47:56 compute-0 sudo[234244]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v293: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:47:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:47:57 compute-0 sudo[234397]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-joubtrthadcylxlleiclzxodoczzgeim ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147276.894844-147-35511504627037/AnsiballZ_dnf.py'
Oct 11 01:47:57 compute-0 sudo[234397]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:47:57 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.16 scrub starts
Oct 11 01:47:57 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.16 scrub ok
Oct 11 01:47:57 compute-0 python3.9[234399]: ansible-ansible.legacy.dnf Invoked with name=['os-net-config'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:47:57 compute-0 ceph-mon[191930]: pgmap v293: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:57 compute-0 ceph-mon[191930]: 10.16 scrub starts
Oct 11 01:47:57 compute-0 ceph-mon[191930]: 10.16 scrub ok
Oct 11 01:47:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v294: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:59 compute-0 sudo[234397]: pam_unix(sudo:session): session closed for user root
Oct 11 01:47:59 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.a deep-scrub starts
Oct 11 01:47:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:47:59 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.a deep-scrub ok
Oct 11 01:47:59 compute-0 podman[157119]: time="2025-10-11T01:47:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:47:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:47:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:47:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:47:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6809 "" "Go-http-client/1.1"
Oct 11 01:47:59 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.11 deep-scrub starts
Oct 11 01:47:59 compute-0 ceph-mon[191930]: pgmap v294: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:47:59 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.11 deep-scrub ok
Oct 11 01:48:00 compute-0 sudo[234550]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iajuzmwnxinvypmzosgejdrqvtciyhte ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147279.6645365-159-861064946413/AnsiballZ_stat.py'
Oct 11 01:48:00 compute-0 sudo[234550]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:00 compute-0 python3.9[234552]: ansible-ansible.builtin.stat Invoked with path=/var/lib/edpm-config/os-net-config.returncode follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:48:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v295: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:00 compute-0 sudo[234550]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:00 compute-0 ceph-mon[191930]: 8.a deep-scrub starts
Oct 11 01:48:00 compute-0 ceph-mon[191930]: 8.a deep-scrub ok
Oct 11 01:48:00 compute-0 ceph-mon[191930]: 11.11 deep-scrub starts
Oct 11 01:48:00 compute-0 ceph-mon[191930]: 11.11 deep-scrub ok
Oct 11 01:48:00 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 8.12 deep-scrub starts
Oct 11 01:48:00 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 8.12 deep-scrub ok
Oct 11 01:48:01 compute-0 openstack_network_exporter[159265]: ERROR   01:48:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:48:01 compute-0 openstack_network_exporter[159265]: ERROR   01:48:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:48:01 compute-0 openstack_network_exporter[159265]: ERROR   01:48:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:48:01 compute-0 openstack_network_exporter[159265]: ERROR   01:48:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:48:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:48:01 compute-0 openstack_network_exporter[159265]: ERROR   01:48:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:48:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:48:01 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.13 scrub starts
Oct 11 01:48:01 compute-0 sudo[234704]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-udlrhhrtyfcmjklorqewwnhocdprwwlk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147280.8647168-167-159445704768457/AnsiballZ_slurp.py'
Oct 11 01:48:01 compute-0 sudo[234704]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:01 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.13 scrub ok
Oct 11 01:48:01 compute-0 python3.9[234706]: ansible-ansible.builtin.slurp Invoked with path=/var/lib/edpm-config/os-net-config.returncode src=/var/lib/edpm-config/os-net-config.returncode
Oct 11 01:48:01 compute-0 sudo[234704]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:01 compute-0 ceph-mon[191930]: pgmap v295: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:01 compute-0 ceph-mon[191930]: 8.12 deep-scrub starts
Oct 11 01:48:01 compute-0 ceph-mon[191930]: 8.12 deep-scrub ok
Oct 11 01:48:02 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.9 scrub starts
Oct 11 01:48:02 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.9 scrub ok
Oct 11 01:48:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v296: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:02 compute-0 sshd-session[231559]: Connection closed by 192.168.122.30 port 35070
Oct 11 01:48:02 compute-0 sshd-session[231542]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:48:02 compute-0 systemd[1]: session-43.scope: Deactivated successfully.
Oct 11 01:48:02 compute-0 systemd[1]: session-43.scope: Consumed 27.363s CPU time.
Oct 11 01:48:02 compute-0 systemd-logind[804]: Session 43 logged out. Waiting for processes to exit.
Oct 11 01:48:02 compute-0 systemd-logind[804]: Removed session 43.
Oct 11 01:48:02 compute-0 ceph-mon[191930]: 8.13 scrub starts
Oct 11 01:48:02 compute-0 ceph-mon[191930]: 8.13 scrub ok
Oct 11 01:48:02 compute-0 ceph-mon[191930]: 10.9 scrub starts
Oct 11 01:48:02 compute-0 ceph-mon[191930]: 10.9 scrub ok
Oct 11 01:48:03 compute-0 ceph-mon[191930]: pgmap v296: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:04 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.17 scrub starts
Oct 11 01:48:04 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.17 scrub ok
Oct 11 01:48:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v297: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:04 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.16 deep-scrub starts
Oct 11 01:48:04 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.16 deep-scrub ok
Oct 11 01:48:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:48:04 compute-0 ceph-mon[191930]: 10.17 scrub starts
Oct 11 01:48:04 compute-0 ceph-mon[191930]: 10.17 scrub ok
Oct 11 01:48:05 compute-0 ceph-mon[191930]: pgmap v297: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:05 compute-0 ceph-mon[191930]: 8.16 deep-scrub starts
Oct 11 01:48:05 compute-0 ceph-mon[191930]: 8.16 deep-scrub ok
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 01:48:06 compute-0 podman[234731]: 2025-10-11 01:48:06.257385054 +0000 UTC m=+0.142471118 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 01:48:06 compute-0 podman[234732]: 2025-10-11 01:48:06.279715393 +0000 UTC m=+0.163941896 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, distribution-scope=public, io.openshift.tags=minimal rhel9, container_name=openstack_network_exporter, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.buildah.version=1.33.7, vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.6, architecture=x86_64, url=https://catalog.redhat.com/en/search?searchType=containers, config_id=edpm, vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.expose-services=, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, build-date=2025-08-20T13:12:41, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, managed_by=edpm_ansible, name=ubi9-minimal, com.redhat.component=ubi9-minimal-container, maintainer=Red Hat, Inc., release=1755695350)
Oct 11 01:48:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v298: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:07 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.1 scrub starts
Oct 11 01:48:07 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 10.1 scrub ok
Oct 11 01:48:07 compute-0 ceph-mon[191930]: pgmap v298: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:07 compute-0 ceph-mon[191930]: 10.1 scrub starts
Oct 11 01:48:07 compute-0 ceph-mon[191930]: 10.1 scrub ok
Oct 11 01:48:08 compute-0 sshd-session[234774]: Accepted publickey for zuul from 192.168.122.30 port 37420 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:48:08 compute-0 systemd-logind[804]: New session 44 of user zuul.
Oct 11 01:48:08 compute-0 systemd[1]: Started Session 44 of User zuul.
Oct 11 01:48:08 compute-0 sshd-session[234774]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:48:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v299: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:08 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.12 scrub starts
Oct 11 01:48:08 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.12 scrub ok
Oct 11 01:48:09 compute-0 python3.9[234927]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:48:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:48:09 compute-0 ceph-mon[191930]: pgmap v299: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:09 compute-0 ceph-mon[191930]: 11.12 scrub starts
Oct 11 01:48:09 compute-0 ceph-mon[191930]: 11.12 scrub ok
Oct 11 01:48:10 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.c scrub starts
Oct 11 01:48:10 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.c scrub ok
Oct 11 01:48:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v300: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:10 compute-0 podman[235055]: 2025-10-11 01:48:10.87706381 +0000 UTC m=+0.156100793 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, org.label-schema.license=GPLv2, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 01:48:11 compute-0 ceph-mon[191930]: 8.c scrub starts
Oct 11 01:48:11 compute-0 ceph-mon[191930]: 8.c scrub ok
Oct 11 01:48:11 compute-0 python3.9[235095]: ansible-ansible.builtin.setup Invoked with filter=['ansible_default_ipv4'] gather_subset=['!all', '!min', 'network'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:48:11 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 11.1 scrub starts
Oct 11 01:48:11 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 11.1 scrub ok
Oct 11 01:48:11 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.1f deep-scrub starts
Oct 11 01:48:11 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.1f deep-scrub ok
Oct 11 01:48:12 compute-0 ceph-mon[191930]: pgmap v300: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:12 compute-0 ceph-mon[191930]: 11.1 scrub starts
Oct 11 01:48:12 compute-0 ceph-mon[191930]: 11.1 scrub ok
Oct 11 01:48:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v301: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:13 compute-0 ceph-mon[191930]: 11.1f deep-scrub starts
Oct 11 01:48:13 compute-0 ceph-mon[191930]: 11.1f deep-scrub ok
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #18. Immutable memtables: 0.
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:48:13.064406) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 3] Flushing memtable with next log file: 18
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147293064574, "job": 3, "event": "flush_started", "num_memtables": 1, "num_entries": 7185, "num_deletes": 251, "total_data_size": 8748424, "memory_usage": 8985648, "flush_reason": "Manual Compaction"}
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 3] Level-0 flush table #19: started
Oct 11 01:48:13 compute-0 python3.9[235300]: ansible-ansible.legacy.command Invoked with _raw_params=hostname -f _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147293110541, "cf_name": "default", "job": 3, "event": "table_file_creation", "file_number": 19, "file_size": 7115140, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 140, "largest_seqno": 7322, "table_properties": {"data_size": 7088817, "index_size": 17184, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 8069, "raw_key_size": 74595, "raw_average_key_size": 23, "raw_value_size": 7026841, "raw_average_value_size": 2184, "num_data_blocks": 755, "num_entries": 3217, "num_filter_entries": 3217, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146869, "oldest_key_time": 1760146869, "file_creation_time": 1760147293, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 19, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 3] Flush lasted 46235 microseconds, and 27911 cpu microseconds.
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:48:13.110654) [db/flush_job.cc:967] [default] [JOB 3] Level-0 flush table #19: 7115140 bytes OK
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:48:13.110686) [db/memtable_list.cc:519] [default] Level-0 commit table #19 started
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:48:13.113975) [db/memtable_list.cc:722] [default] Level-0 commit table #19: memtable #1 done
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:48:13.114003) EVENT_LOG_v1 {"time_micros": 1760147293113995, "job": 3, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [3, 0, 0, 0, 0, 0, 0], "immutable_memtables": 0}
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:48:13.114037) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: files[3 0 0 0 0 0 0] max score 0.75
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 3] Try to delete WAL files size 8717234, prev total WAL file size 8717234, number of live WAL files 2.
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000014.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:48:13.117677) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F730030' seq:72057594037927935, type:22 .. '7061786F7300323532' seq:0, type:0; will stop at (end)
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 4] Compacting 3@0 files to L6, score -1.00
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 3 Base level 0, inputs: [19(6948KB) 13(52KB) 8(1944B)]
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147293117889, "job": 4, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [19, 13, 8], "score": -1, "input_data_size": 7171237, "oldest_snapshot_seqno": -1}
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 4] Generated table #20: 3032 keys, 7127126 bytes, temperature: kUnknown
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147293174942, "cf_name": "default", "job": 4, "event": "table_file_creation", "file_number": 20, "file_size": 7127126, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 7101249, "index_size": 17191, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 7621, "raw_key_size": 72640, "raw_average_key_size": 23, "raw_value_size": 7040865, "raw_average_value_size": 2322, "num_data_blocks": 757, "num_entries": 3032, "num_filter_entries": 3032, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760147293, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 20, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:48:13.175573) [db/compaction/compaction_job.cc:1663] [default] [JOB 4] Compacted 3@0 files to L6 => 7127126 bytes
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:48:13.178676) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 125.5 rd, 124.7 wr, level 6, files in(3, 0) out(1 +0 blob) MB in(6.8, 0.0 +0.0 blob) out(6.8 +0.0 blob), read-write-amplify(2.0) write-amplify(1.0) OK, records in: 3322, records dropped: 290 output_compression: NoCompression
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:48:13.178715) EVENT_LOG_v1 {"time_micros": 1760147293178695, "job": 4, "event": "compaction_finished", "compaction_time_micros": 57164, "compaction_time_cpu_micros": 35207, "output_level": 6, "num_output_files": 1, "total_output_size": 7127126, "num_input_records": 3322, "num_output_records": 3032, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000019.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147293181797, "job": 4, "event": "table_file_deletion", "file_number": 19}
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000013.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147293181904, "job": 4, "event": "table_file_deletion", "file_number": 13}
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000008.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147293181964, "job": 4, "event": "table_file_deletion", "file_number": 8}
Oct 11 01:48:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:48:13.117326) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:48:13 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.18 scrub starts
Oct 11 01:48:13 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.18 scrub ok
Oct 11 01:48:13 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.17 scrub starts
Oct 11 01:48:13 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.17 scrub ok
Oct 11 01:48:13 compute-0 sshd-session[234777]: Connection closed by 192.168.122.30 port 37420
Oct 11 01:48:13 compute-0 sshd-session[234774]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:48:13 compute-0 systemd[1]: session-44.scope: Deactivated successfully.
Oct 11 01:48:13 compute-0 systemd[1]: session-44.scope: Consumed 4.361s CPU time.
Oct 11 01:48:13 compute-0 systemd-logind[804]: Session 44 logged out. Waiting for processes to exit.
Oct 11 01:48:13 compute-0 systemd-logind[804]: Removed session 44.
Oct 11 01:48:14 compute-0 ceph-mon[191930]: pgmap v301: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:14 compute-0 ceph-mon[191930]: 8.18 scrub starts
Oct 11 01:48:14 compute-0 ceph-mon[191930]: 8.17 scrub starts
Oct 11 01:48:14 compute-0 ceph-mon[191930]: 8.18 scrub ok
Oct 11 01:48:14 compute-0 ceph-mon[191930]: 8.17 scrub ok
Oct 11 01:48:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v302: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:48:14 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 8.11 scrub starts
Oct 11 01:48:14 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 8.11 scrub ok
Oct 11 01:48:16 compute-0 ceph-mon[191930]: pgmap v302: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:16 compute-0 ceph-mon[191930]: 8.11 scrub starts
Oct 11 01:48:16 compute-0 ceph-mon[191930]: 8.11 scrub ok
Oct 11 01:48:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v303: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:17 compute-0 podman[235327]: 2025-10-11 01:48:17.230815022 +0000 UTC m=+0.114853186 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 01:48:17 compute-0 podman[235329]: 2025-10-11 01:48:17.274973804 +0000 UTC m=+0.148213908 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, version=9.4, com.redhat.component=ubi9-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release-0.7.12=, name=ubi9, architecture=x86_64, maintainer=Red Hat, Inc., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, release=1214.1726694543, io.openshift.expose-services=, io.k8s.display-name=Red Hat Universal Base Image 9, managed_by=edpm_ansible, build-date=2024-09-18T21:23:30, io.buildah.version=1.29.0, io.openshift.tags=base rhel9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, distribution-scope=public, summary=Provides the latest release of Red Hat Universal Base Image 9., vendor=Red Hat, Inc., container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Oct 11 01:48:17 compute-0 podman[235328]: 2025-10-11 01:48:17.283496667 +0000 UTC m=+0.162852994 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, container_name=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, managed_by=edpm_ansible)
Oct 11 01:48:18 compute-0 ceph-mon[191930]: pgmap v303: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:18 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.14 scrub starts
Oct 11 01:48:18 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.14 scrub ok
Oct 11 01:48:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v304: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:19 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 8.1c scrub starts
Oct 11 01:48:19 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 8.1c scrub ok
Oct 11 01:48:19 compute-0 ceph-mon[191930]: 8.14 scrub starts
Oct 11 01:48:19 compute-0 ceph-mon[191930]: 8.14 scrub ok
Oct 11 01:48:19 compute-0 sshd-session[235392]: Accepted publickey for zuul from 192.168.122.30 port 38946 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:48:19 compute-0 systemd-logind[804]: New session 45 of user zuul.
Oct 11 01:48:19 compute-0 systemd[1]: Started Session 45 of User zuul.
Oct 11 01:48:19 compute-0 sshd-session[235392]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:48:19 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.19 scrub starts
Oct 11 01:48:19 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.19 scrub ok
Oct 11 01:48:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:48:20 compute-0 ceph-mon[191930]: pgmap v304: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:20 compute-0 ceph-mon[191930]: 8.1c scrub starts
Oct 11 01:48:20 compute-0 ceph-mon[191930]: 8.1c scrub ok
Oct 11 01:48:20 compute-0 ceph-mon[191930]: 8.19 scrub starts
Oct 11 01:48:20 compute-0 ceph-mon[191930]: 8.19 scrub ok
Oct 11 01:48:20 compute-0 podman[235472]: 2025-10-11 01:48:20.244210708 +0000 UTC m=+0.131590827 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.schema-version=1.0, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 01:48:20 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 11.17 scrub starts
Oct 11 01:48:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v305: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:20 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 11.17 scrub ok
Oct 11 01:48:20 compute-0 python3.9[235565]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:48:21 compute-0 ceph-mon[191930]: 11.17 scrub starts
Oct 11 01:48:21 compute-0 ceph-mon[191930]: 11.17 scrub ok
Oct 11 01:48:21 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.1e deep-scrub starts
Oct 11 01:48:21 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 8.1e deep-scrub ok
Oct 11 01:48:22 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.1a scrub starts
Oct 11 01:48:22 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.1a scrub ok
Oct 11 01:48:22 compute-0 ceph-mon[191930]: pgmap v305: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:22 compute-0 ceph-mon[191930]: 8.1e deep-scrub starts
Oct 11 01:48:22 compute-0 ceph-mon[191930]: 8.1e deep-scrub ok
Oct 11 01:48:22 compute-0 python3.9[235719]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:48:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v306: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:23 compute-0 ceph-mon[191930]: 11.1a scrub starts
Oct 11 01:48:23 compute-0 ceph-mon[191930]: 11.1a scrub ok
Oct 11 01:48:23 compute-0 sudo[235873]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fbybtdlvttadsktswmbnosqkogonpvib ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147303.1392243-40-252013760355448/AnsiballZ_setup.py'
Oct 11 01:48:23 compute-0 sudo[235873]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:24 compute-0 python3.9[235875]: ansible-ansible.legacy.setup Invoked with filter=['ansible_pkg_mgr'] gather_subset=['!all'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:48:24 compute-0 ceph-mon[191930]: pgmap v306: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:24 compute-0 sudo[235873]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v307: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:48:25 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.1c scrub starts
Oct 11 01:48:25 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 11.1c scrub ok
Oct 11 01:48:25 compute-0 sudo[235957]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-srcdwuipwqlgfcpzdluevdmfwaxscayg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147303.1392243-40-252013760355448/AnsiballZ_dnf.py'
Oct 11 01:48:25 compute-0 sudo[235957]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:25 compute-0 ceph-mon[191930]: 11.1c scrub starts
Oct 11 01:48:25 compute-0 ceph-mon[191930]: 11.1c scrub ok
Oct 11 01:48:25 compute-0 python3.9[235959]: ansible-ansible.legacy.dnf Invoked with name=['podman'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:48:26 compute-0 ceph-mon[191930]: pgmap v307: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:26 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 9.2 scrub starts
Oct 11 01:48:26 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 9.2 scrub ok
Oct 11 01:48:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:48:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:48:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:48:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:48:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:48:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:48:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v308: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:26 compute-0 sudo[235957]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:27 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.e scrub starts
Oct 11 01:48:27 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.e scrub ok
Oct 11 01:48:27 compute-0 ceph-mon[191930]: 9.2 scrub starts
Oct 11 01:48:27 compute-0 ceph-mon[191930]: 9.2 scrub ok
Oct 11 01:48:27 compute-0 ceph-mon[191930]: 9.e scrub starts
Oct 11 01:48:27 compute-0 ceph-mon[191930]: 9.e scrub ok
Oct 11 01:48:27 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 9.4 deep-scrub starts
Oct 11 01:48:27 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 9.4 deep-scrub ok
Oct 11 01:48:27 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.1d scrub starts
Oct 11 01:48:27 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.1d scrub ok
Oct 11 01:48:27 compute-0 sudo[236110]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hwysplgfccwkyvptobidzpdoxuckrgjw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147307.0973833-52-185465514783146/AnsiballZ_setup.py'
Oct 11 01:48:27 compute-0 sudo[236110]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:28 compute-0 python3.9[236112]: ansible-ansible.builtin.setup Invoked with filter=['ansible_interfaces'] gather_subset=['!all', '!min', 'network'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:48:28 compute-0 ceph-mon[191930]: pgmap v308: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:28 compute-0 ceph-mon[191930]: 9.4 deep-scrub starts
Oct 11 01:48:28 compute-0 ceph-mon[191930]: 9.4 deep-scrub ok
Oct 11 01:48:28 compute-0 ceph-mon[191930]: 8.1d scrub starts
Oct 11 01:48:28 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 9.a scrub starts
Oct 11 01:48:28 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 9.a scrub ok
Oct 11 01:48:28 compute-0 sudo[236110]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v309: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:28 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 11.19 deep-scrub starts
Oct 11 01:48:28 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 11.19 deep-scrub ok
Oct 11 01:48:28 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.6 scrub starts
Oct 11 01:48:29 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.6 scrub ok
Oct 11 01:48:29 compute-0 ceph-mon[191930]: 8.1d scrub ok
Oct 11 01:48:29 compute-0 ceph-mon[191930]: 9.a scrub starts
Oct 11 01:48:29 compute-0 ceph-mon[191930]: 9.a scrub ok
Oct 11 01:48:29 compute-0 ceph-mon[191930]: 9.6 scrub starts
Oct 11 01:48:29 compute-0 ceph-mon[191930]: 9.6 scrub ok
Oct 11 01:48:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:48:29 compute-0 sudo[236313]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pxgjfrfaboafsxhpgzlolwxyoqkpkdtz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147308.9502068-63-199852083545584/AnsiballZ_file.py'
Oct 11 01:48:29 compute-0 sudo[236313]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:29 compute-0 podman[157119]: time="2025-10-11T01:48:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:48:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:48:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:48:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:48:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6816 "" "Go-http-client/1.1"
Oct 11 01:48:29 compute-0 python3.9[236315]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/containers/networks recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:48:29 compute-0 sudo[236313]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:30 compute-0 ceph-mon[191930]: pgmap v309: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:30 compute-0 ceph-mon[191930]: 11.19 deep-scrub starts
Oct 11 01:48:30 compute-0 ceph-mon[191930]: 11.19 deep-scrub ok
Oct 11 01:48:30 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 9.10 scrub starts
Oct 11 01:48:30 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 9.10 scrub ok
Oct 11 01:48:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v310: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:31 compute-0 sudo[236465]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-aouspaihglqqoqcpvorwteevuhxevilx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147310.2299476-71-62761749728879/AnsiballZ_command.py'
Oct 11 01:48:31 compute-0 sudo[236465]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:31 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.17 scrub starts
Oct 11 01:48:31 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.17 scrub ok
Oct 11 01:48:31 compute-0 python3.9[236467]: ansible-ansible.legacy.command Invoked with _raw_params=podman network inspect podman
                                              _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:48:31 compute-0 ceph-mon[191930]: 9.10 scrub starts
Oct 11 01:48:31 compute-0 ceph-mon[191930]: 9.10 scrub ok
Oct 11 01:48:31 compute-0 ceph-mon[191930]: 9.17 scrub starts
Oct 11 01:48:31 compute-0 ceph-mon[191930]: 9.17 scrub ok
Oct 11 01:48:31 compute-0 sudo[236465]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:31 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 9.12 scrub starts
Oct 11 01:48:31 compute-0 openstack_network_exporter[159265]: ERROR   01:48:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:48:31 compute-0 openstack_network_exporter[159265]: ERROR   01:48:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:48:31 compute-0 openstack_network_exporter[159265]: ERROR   01:48:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:48:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:48:31 compute-0 openstack_network_exporter[159265]: ERROR   01:48:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:48:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:48:31 compute-0 openstack_network_exporter[159265]: ERROR   01:48:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:48:31 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 9.12 scrub ok
Oct 11 01:48:32 compute-0 ceph-mon[191930]: pgmap v310: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:32 compute-0 ceph-mon[191930]: 9.12 scrub starts
Oct 11 01:48:32 compute-0 ceph-mon[191930]: 9.12 scrub ok
Oct 11 01:48:32 compute-0 sudo[236631]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jiwimslirdrmkznhzekszzbqowpnpslq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147311.7179868-79-62284225200959/AnsiballZ_stat.py'
Oct 11 01:48:32 compute-0 sudo[236631]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:32 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.1a scrub starts
Oct 11 01:48:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v311: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:32 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.1a scrub ok
Oct 11 01:48:32 compute-0 python3.9[236633]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/networks/podman.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:48:32 compute-0 sudo[236631]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:33 compute-0 sudo[236709]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ngmfvhjsubfleqdgwwyubqmigxzelctb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147311.7179868-79-62284225200959/AnsiballZ_file.py'
Oct 11 01:48:33 compute-0 sudo[236709]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:33 compute-0 python3.9[236711]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/containers/networks/podman.json _original_basename=podman_network_config.j2 recurse=False state=file path=/etc/containers/networks/podman.json force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:48:33 compute-0 ceph-mon[191930]: 8.1a scrub starts
Oct 11 01:48:33 compute-0 ceph-mon[191930]: 8.1a scrub ok
Oct 11 01:48:33 compute-0 sudo[236709]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:33 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 9.14 scrub starts
Oct 11 01:48:33 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 9.14 scrub ok
Oct 11 01:48:33 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 11.f scrub starts
Oct 11 01:48:33 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 11.f scrub ok
Oct 11 01:48:34 compute-0 sudo[236861]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ukmmytehnjunampwftaeczalnbaqgayu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147313.5891194-91-272709474214530/AnsiballZ_stat.py'
Oct 11 01:48:34 compute-0 sudo[236861]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:34 compute-0 ceph-mon[191930]: pgmap v311: 321 pgs: 321 active+clean; 456 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:34 compute-0 ceph-mon[191930]: 9.14 scrub starts
Oct 11 01:48:34 compute-0 ceph-mon[191930]: 9.14 scrub ok
Oct 11 01:48:34 compute-0 ceph-mon[191930]: 11.f scrub starts
Oct 11 01:48:34 compute-0 ceph-mon[191930]: 11.f scrub ok
Oct 11 01:48:34 compute-0 python3.9[236863]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/registries.conf.d/20-edpm-podman-registries.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:48:34 compute-0 sudo[236861]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v312: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:48:34 compute-0 sudo[236940]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-efrtbgdtityhumptlhcyjzakdtrrrrwh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147313.5891194-91-272709474214530/AnsiballZ_file.py'
Oct 11 01:48:34 compute-0 sudo[236940]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:34 compute-0 sudo[236939]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:48:34 compute-0 sudo[236939]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:48:34 compute-0 sudo[236939]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:35 compute-0 sudo[236967]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:48:35 compute-0 sudo[236967]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:48:35 compute-0 sudo[236967]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:35 compute-0 python3.9[236958]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root setype=etc_t dest=/etc/containers/registries.conf.d/20-edpm-podman-registries.conf _original_basename=registries.conf.j2 recurse=False state=file path=/etc/containers/registries.conf.d/20-edpm-podman-registries.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:48:35 compute-0 sudo[236940]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:35 compute-0 sudo[236992]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:48:35 compute-0 sudo[236992]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:48:35 compute-0 sudo[236992]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:35 compute-0 sudo[237030]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 01:48:35 compute-0 sudo[237030]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:48:35 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.1f scrub starts
Oct 11 01:48:35 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.1f scrub ok
Oct 11 01:48:35 compute-0 sudo[237030]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:48:35 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:48:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:48:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:48:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:48:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:48:36 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 141c1ca0-f8c9-4ff1-9538-11cd53a89b53 does not exist
Oct 11 01:48:36 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 638d28de-7f97-4fb6-9600-9fb0ebf8dc95 does not exist
Oct 11 01:48:36 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev fffbcd80-58b4-4d3c-9298-19e4907c019e does not exist
Oct 11 01:48:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 01:48:36 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:48:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 01:48:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:48:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:48:36 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:48:36 compute-0 sudo[237238]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-alglpsknhmkoxutblhhixohiobbbdhme ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147315.4037163-104-264477887057598/AnsiballZ_ini_file.py'
Oct 11 01:48:36 compute-0 sudo[237238]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:36 compute-0 sudo[237205]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:48:36 compute-0 sudo[237205]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:48:36 compute-0 sudo[237205]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:36 compute-0 sudo[237248]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:48:36 compute-0 sudo[237248]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:48:36 compute-0 sudo[237248]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:36 compute-0 python3.9[237245]: ansible-community.general.ini_file Invoked with create=True group=root mode=0644 option=pids_limit owner=root path=/etc/containers/containers.conf section=containers setype=etc_t value=4096 backup=False state=present exclusive=True no_extra_spaces=False ignore_spaces=False allow_no_value=False modify_inactive_option=True follow=False unsafe_writes=False section_has_values=None values=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:48:36 compute-0 ceph-mon[191930]: pgmap v312: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:36 compute-0 ceph-mon[191930]: 8.1f scrub starts
Oct 11 01:48:36 compute-0 ceph-mon[191930]: 8.1f scrub ok
Oct 11 01:48:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:48:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:48:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:48:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:48:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:48:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:48:36 compute-0 sudo[237238]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:36 compute-0 podman[237273]: 2025-10-11 01:48:36.450517912 +0000 UTC m=+0.108864123 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, distribution-scope=public, vcs-type=git, architecture=x86_64, config_id=edpm, io.openshift.tags=minimal rhel9, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, build-date=2025-08-20T13:12:41, name=ubi9-minimal, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.expose-services=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.6, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, managed_by=edpm_ansible, url=https://catalog.redhat.com/en/search?searchType=containers, container_name=openstack_network_exporter, release=1755695350, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, maintainer=Red Hat, Inc., vendor=Red Hat, Inc., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.component=ubi9-minimal-container)
Oct 11 01:48:36 compute-0 sudo[237288]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:48:36 compute-0 sudo[237288]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:48:36 compute-0 podman[237272]: 2025-10-11 01:48:36.470183749 +0000 UTC m=+0.137709151 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 01:48:36 compute-0 sudo[237288]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:36 compute-0 sudo[237365]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 01:48:36 compute-0 sudo[237365]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:48:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v313: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:36 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.6 scrub starts
Oct 11 01:48:36 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 8.6 scrub ok
Oct 11 01:48:37 compute-0 podman[237527]: 2025-10-11 01:48:37.094589904 +0000 UTC m=+0.083553278 container create 77157aa04650c947532779326a89f9edd0b5f812b243c99e672c21396dd4a178 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_ride, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 01:48:37 compute-0 podman[237527]: 2025-10-11 01:48:37.062463777 +0000 UTC m=+0.051427211 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:48:37 compute-0 systemd[1]: Started libpod-conmon-77157aa04650c947532779326a89f9edd0b5f812b243c99e672c21396dd4a178.scope.
Oct 11 01:48:37 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:48:37 compute-0 sudo[237572]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tkxgddltlagoveszcdajjybdfoqsqvcf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147316.60695-104-209220103070158/AnsiballZ_ini_file.py'
Oct 11 01:48:37 compute-0 sudo[237572]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:37 compute-0 podman[237527]: 2025-10-11 01:48:37.229715113 +0000 UTC m=+0.218678557 container init 77157aa04650c947532779326a89f9edd0b5f812b243c99e672c21396dd4a178 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_ride, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:48:37 compute-0 podman[237527]: 2025-10-11 01:48:37.249791275 +0000 UTC m=+0.238754679 container start 77157aa04650c947532779326a89f9edd0b5f812b243c99e672c21396dd4a178 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_ride, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:48:37 compute-0 podman[237527]: 2025-10-11 01:48:37.256541906 +0000 UTC m=+0.245505350 container attach 77157aa04650c947532779326a89f9edd0b5f812b243c99e672c21396dd4a178 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_ride, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0)
Oct 11 01:48:37 compute-0 trusting_ride[237573]: 167 167
Oct 11 01:48:37 compute-0 systemd[1]: libpod-77157aa04650c947532779326a89f9edd0b5f812b243c99e672c21396dd4a178.scope: Deactivated successfully.
Oct 11 01:48:37 compute-0 podman[237527]: 2025-10-11 01:48:37.265212161 +0000 UTC m=+0.254175565 container died 77157aa04650c947532779326a89f9edd0b5f812b243c99e672c21396dd4a178 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_ride, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0)
Oct 11 01:48:37 compute-0 systemd[1]: var-lib-containers-storage-overlay-622db65b1a0dc3f1517fe5229d02290414d8d91f33dd53f54a8f34114865a5b1-merged.mount: Deactivated successfully.
Oct 11 01:48:37 compute-0 podman[237527]: 2025-10-11 01:48:37.336969046 +0000 UTC m=+0.325932430 container remove 77157aa04650c947532779326a89f9edd0b5f812b243c99e672c21396dd4a178 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_ride, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:48:37 compute-0 ceph-mon[191930]: pgmap v313: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:37 compute-0 ceph-mon[191930]: 8.6 scrub starts
Oct 11 01:48:37 compute-0 ceph-mon[191930]: 8.6 scrub ok
Oct 11 01:48:37 compute-0 systemd[1]: libpod-conmon-77157aa04650c947532779326a89f9edd0b5f812b243c99e672c21396dd4a178.scope: Deactivated successfully.
Oct 11 01:48:37 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 9.1a scrub starts
Oct 11 01:48:37 compute-0 python3.9[237577]: ansible-community.general.ini_file Invoked with create=True group=root mode=0644 option=events_logger owner=root path=/etc/containers/containers.conf section=engine setype=etc_t value="journald" backup=False state=present exclusive=True no_extra_spaces=False ignore_spaces=False allow_no_value=False modify_inactive_option=True follow=False unsafe_writes=False section_has_values=None values=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:48:37 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 9.1a scrub ok
Oct 11 01:48:37 compute-0 sudo[237572]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:37 compute-0 podman[237615]: 2025-10-11 01:48:37.621484615 +0000 UTC m=+0.084429029 container create 9af682a62f50a2a3ba60345a4d22d28d2db8a3f8522abd438c5c5082f49f6f02 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_morse, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:48:37 compute-0 podman[237615]: 2025-10-11 01:48:37.59374378 +0000 UTC m=+0.056688204 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:48:37 compute-0 systemd[1]: Started libpod-conmon-9af682a62f50a2a3ba60345a4d22d28d2db8a3f8522abd438c5c5082f49f6f02.scope.
Oct 11 01:48:37 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:48:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4c0c5ba271a993eeb86c922495300fc07b8fb54c44a1f868c8915f5291a24323/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:48:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4c0c5ba271a993eeb86c922495300fc07b8fb54c44a1f868c8915f5291a24323/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:48:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4c0c5ba271a993eeb86c922495300fc07b8fb54c44a1f868c8915f5291a24323/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:48:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4c0c5ba271a993eeb86c922495300fc07b8fb54c44a1f868c8915f5291a24323/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:48:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4c0c5ba271a993eeb86c922495300fc07b8fb54c44a1f868c8915f5291a24323/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:48:37 compute-0 podman[237615]: 2025-10-11 01:48:37.811054288 +0000 UTC m=+0.273998722 container init 9af682a62f50a2a3ba60345a4d22d28d2db8a3f8522abd438c5c5082f49f6f02 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_morse, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS)
Oct 11 01:48:37 compute-0 podman[237615]: 2025-10-11 01:48:37.834610202 +0000 UTC m=+0.297554586 container start 9af682a62f50a2a3ba60345a4d22d28d2db8a3f8522abd438c5c5082f49f6f02 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_morse, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2)
Oct 11 01:48:37 compute-0 podman[237615]: 2025-10-11 01:48:37.845493263 +0000 UTC m=+0.308437667 container attach 9af682a62f50a2a3ba60345a4d22d28d2db8a3f8522abd438c5c5082f49f6f02 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_morse, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:48:38 compute-0 sudo[237767]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xoihobomfcnpjgzlmvdnsqvquvfuwfiq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147317.6587641-104-111789112828850/AnsiballZ_ini_file.py'
Oct 11 01:48:38 compute-0 sudo[237767]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:38 compute-0 ceph-mon[191930]: 9.1a scrub starts
Oct 11 01:48:38 compute-0 ceph-mon[191930]: 9.1a scrub ok
Oct 11 01:48:38 compute-0 python3.9[237769]: ansible-community.general.ini_file Invoked with create=True group=root mode=0644 option=runtime owner=root path=/etc/containers/containers.conf section=engine setype=etc_t value="crun" backup=False state=present exclusive=True no_extra_spaces=False ignore_spaces=False allow_no_value=False modify_inactive_option=True follow=False unsafe_writes=False section_has_values=None values=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:48:38 compute-0 sudo[237767]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v314: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:39 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.7 scrub starts
Oct 11 01:48:39 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.7 scrub ok
Oct 11 01:48:39 compute-0 modest_morse[237668]: --> passed data devices: 0 physical, 3 LVM
Oct 11 01:48:39 compute-0 modest_morse[237668]: --> relative data size: 1.0
Oct 11 01:48:39 compute-0 modest_morse[237668]: --> All data devices are unavailable
Oct 11 01:48:39 compute-0 systemd[1]: libpod-9af682a62f50a2a3ba60345a4d22d28d2db8a3f8522abd438c5c5082f49f6f02.scope: Deactivated successfully.
Oct 11 01:48:39 compute-0 systemd[1]: libpod-9af682a62f50a2a3ba60345a4d22d28d2db8a3f8522abd438c5c5082f49f6f02.scope: Consumed 1.228s CPU time.
Oct 11 01:48:39 compute-0 podman[237615]: 2025-10-11 01:48:39.124990754 +0000 UTC m=+1.587935168 container died 9af682a62f50a2a3ba60345a4d22d28d2db8a3f8522abd438c5c5082f49f6f02 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_morse, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:48:39 compute-0 systemd[1]: var-lib-containers-storage-overlay-4c0c5ba271a993eeb86c922495300fc07b8fb54c44a1f868c8915f5291a24323-merged.mount: Deactivated successfully.
Oct 11 01:48:39 compute-0 podman[237615]: 2025-10-11 01:48:39.245101591 +0000 UTC m=+1.708045975 container remove 9af682a62f50a2a3ba60345a4d22d28d2db8a3f8522abd438c5c5082f49f6f02 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_morse, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 01:48:39 compute-0 systemd[1]: libpod-conmon-9af682a62f50a2a3ba60345a4d22d28d2db8a3f8522abd438c5c5082f49f6f02.scope: Deactivated successfully.
Oct 11 01:48:39 compute-0 sudo[237365]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:39 compute-0 sudo[237956]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cvacmdmkiefrewcxiwmmkmiwargvvwqs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147318.764832-104-204039294564961/AnsiballZ_ini_file.py'
Oct 11 01:48:39 compute-0 sudo[237956]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:39 compute-0 ceph-mon[191930]: pgmap v314: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:39 compute-0 ceph-mon[191930]: 9.7 scrub starts
Oct 11 01:48:39 compute-0 ceph-mon[191930]: 9.7 scrub ok
Oct 11 01:48:39 compute-0 sudo[237957]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:48:39 compute-0 sudo[237957]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:48:39 compute-0 sudo[237957]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:39 compute-0 sudo[237984]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:48:39 compute-0 sudo[237984]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:48:39 compute-0 sudo[237984]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:39 compute-0 python3.9[237962]: ansible-community.general.ini_file Invoked with create=True group=root mode=0644 option=network_backend owner=root path=/etc/containers/containers.conf section=network setype=etc_t value="netavark" backup=False state=present exclusive=True no_extra_spaces=False ignore_spaces=False allow_no_value=False modify_inactive_option=True follow=False unsafe_writes=False section_has_values=None values=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:48:39 compute-0 sudo[237956]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:39 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 11.10 scrub starts
Oct 11 01:48:39 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 11.10 scrub ok
Oct 11 01:48:39 compute-0 sudo[238009]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:48:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:48:39 compute-0 sudo[238009]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:48:39 compute-0 sudo[238009]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:39 compute-0 sudo[238055]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 01:48:39 compute-0 sudo[238055]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:48:40 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.f scrub starts
Oct 11 01:48:40 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.f scrub ok
Oct 11 01:48:40 compute-0 podman[238196]: 2025-10-11 01:48:40.337288624 +0000 UTC m=+0.098878372 container create 66e0cbfcd0b8f9ae3797b43377fd24c9a8994a6d6cd67f8b15d5352eaabae207 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_goodall, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2)
Oct 11 01:48:40 compute-0 podman[238196]: 2025-10-11 01:48:40.302379104 +0000 UTC m=+0.063968972 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:48:40 compute-0 ceph-mon[191930]: 11.10 scrub starts
Oct 11 01:48:40 compute-0 ceph-mon[191930]: 11.10 scrub ok
Oct 11 01:48:40 compute-0 ceph-mon[191930]: 9.f scrub starts
Oct 11 01:48:40 compute-0 ceph-mon[191930]: 9.f scrub ok
Oct 11 01:48:40 compute-0 systemd[1]: Started libpod-conmon-66e0cbfcd0b8f9ae3797b43377fd24c9a8994a6d6cd67f8b15d5352eaabae207.scope.
Oct 11 01:48:40 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:48:40 compute-0 podman[238196]: 2025-10-11 01:48:40.478044251 +0000 UTC m=+0.239634089 container init 66e0cbfcd0b8f9ae3797b43377fd24c9a8994a6d6cd67f8b15d5352eaabae207 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_goodall, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Oct 11 01:48:40 compute-0 podman[238196]: 2025-10-11 01:48:40.497986851 +0000 UTC m=+0.259576639 container start 66e0cbfcd0b8f9ae3797b43377fd24c9a8994a6d6cd67f8b15d5352eaabae207 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_goodall, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:48:40 compute-0 podman[238196]: 2025-10-11 01:48:40.505439941 +0000 UTC m=+0.267029779 container attach 66e0cbfcd0b8f9ae3797b43377fd24c9a8994a6d6cd67f8b15d5352eaabae207 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_goodall, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:48:40 compute-0 nervous_goodall[238244]: 167 167
Oct 11 01:48:40 compute-0 systemd[1]: libpod-66e0cbfcd0b8f9ae3797b43377fd24c9a8994a6d6cd67f8b15d5352eaabae207.scope: Deactivated successfully.
Oct 11 01:48:40 compute-0 podman[238196]: 2025-10-11 01:48:40.508905573 +0000 UTC m=+0.270495361 container died 66e0cbfcd0b8f9ae3797b43377fd24c9a8994a6d6cd67f8b15d5352eaabae207 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_goodall, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default)
Oct 11 01:48:40 compute-0 sudo[238268]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qtdvxewuhacapguaqxnzrjfqqwfsbkkb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147319.991605-135-125752135027627/AnsiballZ_dnf.py'
Oct 11 01:48:40 compute-0 sudo[238268]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:40 compute-0 systemd[1]: var-lib-containers-storage-overlay-f7d16882638d2ba9b1161e516bdff0afc1a73ef448e8ec46962b36839ce427d7-merged.mount: Deactivated successfully.
Oct 11 01:48:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v315: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:40 compute-0 podman[238196]: 2025-10-11 01:48:40.584568015 +0000 UTC m=+0.346157803 container remove 66e0cbfcd0b8f9ae3797b43377fd24c9a8994a6d6cd67f8b15d5352eaabae207 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_goodall, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:48:40 compute-0 systemd[1]: libpod-conmon-66e0cbfcd0b8f9ae3797b43377fd24c9a8994a6d6cd67f8b15d5352eaabae207.scope: Deactivated successfully.
Oct 11 01:48:40 compute-0 podman[238289]: 2025-10-11 01:48:40.837405822 +0000 UTC m=+0.073895582 container create e3414f0a3196549ffd5c5c26c23767106f3825c214dd8c13616183af0e38ccd1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_snyder, org.label-schema.build-date=20250507, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:48:40 compute-0 python3.9[238278]: ansible-ansible.legacy.dnf Invoked with name=['openssh-server'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:48:40 compute-0 podman[238289]: 2025-10-11 01:48:40.801902574 +0000 UTC m=+0.038392374 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:48:40 compute-0 systemd[1]: Started libpod-conmon-e3414f0a3196549ffd5c5c26c23767106f3825c214dd8c13616183af0e38ccd1.scope.
Oct 11 01:48:40 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:48:40 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd051ce3ed6c4b8052b4a7caa9f49ace8b349bc0bff8fcf9c996952ec15f260c/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:48:40 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd051ce3ed6c4b8052b4a7caa9f49ace8b349bc0bff8fcf9c996952ec15f260c/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:48:40 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd051ce3ed6c4b8052b4a7caa9f49ace8b349bc0bff8fcf9c996952ec15f260c/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:48:40 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd051ce3ed6c4b8052b4a7caa9f49ace8b349bc0bff8fcf9c996952ec15f260c/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:48:41 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.18 deep-scrub starts
Oct 11 01:48:41 compute-0 podman[238289]: 2025-10-11 01:48:41.042948299 +0000 UTC m=+0.279438099 container init e3414f0a3196549ffd5c5c26c23767106f3825c214dd8c13616183af0e38ccd1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_snyder, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:48:41 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.18 deep-scrub ok
Oct 11 01:48:41 compute-0 podman[238289]: 2025-10-11 01:48:41.077123511 +0000 UTC m=+0.313613241 container start e3414f0a3196549ffd5c5c26c23767106f3825c214dd8c13616183af0e38ccd1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_snyder, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:48:41 compute-0 podman[238289]: 2025-10-11 01:48:41.082519456 +0000 UTC m=+0.319009186 container attach e3414f0a3196549ffd5c5c26c23767106f3825c214dd8c13616183af0e38ccd1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_snyder, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:48:41 compute-0 podman[238309]: 2025-10-11 01:48:41.112623149 +0000 UTC m=+0.144647625 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi)
Oct 11 01:48:41 compute-0 ceph-mon[191930]: pgmap v315: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:41 compute-0 ceph-mon[191930]: 9.18 deep-scrub starts
Oct 11 01:48:41 compute-0 ceph-mon[191930]: 9.18 deep-scrub ok
Oct 11 01:48:41 compute-0 eager_snyder[238307]: {
Oct 11 01:48:41 compute-0 eager_snyder[238307]:     "0": [
Oct 11 01:48:41 compute-0 eager_snyder[238307]:         {
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "devices": [
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "/dev/loop3"
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             ],
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "lv_name": "ceph_lv0",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "lv_size": "21470642176",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "name": "ceph_lv0",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "tags": {
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.cluster_name": "ceph",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.crush_device_class": "",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.encrypted": "0",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.osd_id": "0",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.type": "block",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.vdo": "0"
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             },
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "type": "block",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "vg_name": "ceph_vg0"
Oct 11 01:48:41 compute-0 eager_snyder[238307]:         }
Oct 11 01:48:41 compute-0 eager_snyder[238307]:     ],
Oct 11 01:48:41 compute-0 eager_snyder[238307]:     "1": [
Oct 11 01:48:41 compute-0 eager_snyder[238307]:         {
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "devices": [
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "/dev/loop4"
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             ],
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "lv_name": "ceph_lv1",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "lv_size": "21470642176",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "name": "ceph_lv1",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "tags": {
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.cluster_name": "ceph",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.crush_device_class": "",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.encrypted": "0",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.osd_id": "1",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.type": "block",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.vdo": "0"
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             },
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "type": "block",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "vg_name": "ceph_vg1"
Oct 11 01:48:41 compute-0 eager_snyder[238307]:         }
Oct 11 01:48:41 compute-0 eager_snyder[238307]:     ],
Oct 11 01:48:41 compute-0 eager_snyder[238307]:     "2": [
Oct 11 01:48:41 compute-0 eager_snyder[238307]:         {
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "devices": [
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "/dev/loop5"
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             ],
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "lv_name": "ceph_lv2",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "lv_size": "21470642176",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "name": "ceph_lv2",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "tags": {
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.cluster_name": "ceph",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.crush_device_class": "",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.encrypted": "0",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.osd_id": "2",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.type": "block",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:                 "ceph.vdo": "0"
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             },
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "type": "block",
Oct 11 01:48:41 compute-0 eager_snyder[238307]:             "vg_name": "ceph_vg2"
Oct 11 01:48:41 compute-0 eager_snyder[238307]:         }
Oct 11 01:48:41 compute-0 eager_snyder[238307]:     ]
Oct 11 01:48:41 compute-0 eager_snyder[238307]: }
Oct 11 01:48:41 compute-0 systemd[1]: libpod-e3414f0a3196549ffd5c5c26c23767106f3825c214dd8c13616183af0e38ccd1.scope: Deactivated successfully.
Oct 11 01:48:41 compute-0 podman[238289]: 2025-10-11 01:48:41.964508114 +0000 UTC m=+1.200997864 container died e3414f0a3196549ffd5c5c26c23767106f3825c214dd8c13616183af0e38ccd1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_snyder, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:48:42 compute-0 systemd[1]: var-lib-containers-storage-overlay-fd051ce3ed6c4b8052b4a7caa9f49ace8b349bc0bff8fcf9c996952ec15f260c-merged.mount: Deactivated successfully.
Oct 11 01:48:42 compute-0 sudo[238268]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:42 compute-0 podman[238289]: 2025-10-11 01:48:42.090533533 +0000 UTC m=+1.327023253 container remove e3414f0a3196549ffd5c5c26c23767106f3825c214dd8c13616183af0e38ccd1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_snyder, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:48:42 compute-0 systemd[1]: libpod-conmon-e3414f0a3196549ffd5c5c26c23767106f3825c214dd8c13616183af0e38ccd1.scope: Deactivated successfully.
Oct 11 01:48:42 compute-0 sudo[238055]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:42 compute-0 sudo[238367]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:48:42 compute-0 sudo[238367]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:48:42 compute-0 sudo[238367]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:42 compute-0 sudo[238395]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:48:42 compute-0 sudo[238395]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:48:42 compute-0 sudo[238395]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v316: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:42 compute-0 sudo[238420]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:48:42 compute-0 sudo[238420]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:48:42 compute-0 sudo[238420]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:42 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.d scrub starts
Oct 11 01:48:42 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.d scrub ok
Oct 11 01:48:42 compute-0 ceph-mon[191930]: 9.d scrub starts
Oct 11 01:48:42 compute-0 sudo[238469]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 01:48:42 compute-0 sudo[238469]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:48:43 compute-0 sudo[238632]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ltchbjbeabwojiznocwogigjwdjcgppj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147322.5920026-146-220991316103036/AnsiballZ_setup.py'
Oct 11 01:48:43 compute-0 sudo[238632]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:43 compute-0 podman[238633]: 2025-10-11 01:48:43.281654159 +0000 UTC m=+0.093901343 container create e8b945e24c0a6cfb0a4866bb374781ced8235c1d9c6ce721051f54291b49f86e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_rhodes, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, ceph=True, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:48:43 compute-0 podman[238633]: 2025-10-11 01:48:43.247448976 +0000 UTC m=+0.059696200 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:48:43 compute-0 systemd[1]: Started libpod-conmon-e8b945e24c0a6cfb0a4866bb374781ced8235c1d9c6ce721051f54291b49f86e.scope.
Oct 11 01:48:43 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:48:43 compute-0 podman[238633]: 2025-10-11 01:48:43.427939412 +0000 UTC m=+0.240186646 container init e8b945e24c0a6cfb0a4866bb374781ced8235c1d9c6ce721051f54291b49f86e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_rhodes, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:48:43 compute-0 podman[238633]: 2025-10-11 01:48:43.445691636 +0000 UTC m=+0.257938810 container start e8b945e24c0a6cfb0a4866bb374781ced8235c1d9c6ce721051f54291b49f86e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_rhodes, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, ceph=True)
Oct 11 01:48:43 compute-0 podman[238633]: 2025-10-11 01:48:43.451590997 +0000 UTC m=+0.263838171 container attach e8b945e24c0a6cfb0a4866bb374781ced8235c1d9c6ce721051f54291b49f86e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_rhodes, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507)
Oct 11 01:48:43 compute-0 crazy_rhodes[238651]: 167 167
Oct 11 01:48:43 compute-0 systemd[1]: libpod-e8b945e24c0a6cfb0a4866bb374781ced8235c1d9c6ce721051f54291b49f86e.scope: Deactivated successfully.
Oct 11 01:48:43 compute-0 podman[238633]: 2025-10-11 01:48:43.454198458 +0000 UTC m=+0.266445612 container died e8b945e24c0a6cfb0a4866bb374781ced8235c1d9c6ce721051f54291b49f86e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_rhodes, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True)
Oct 11 01:48:43 compute-0 python3.9[238636]: ansible-setup Invoked with gather_subset=['!all', '!min', 'distribution', 'distribution_major_version', 'distribution_version', 'os_family'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:48:43 compute-0 systemd[1]: var-lib-containers-storage-overlay-3c1c11ed7578dae2a26a21377c59a1fe4b4ca88b70ba52bad58ef1927bdc6252-merged.mount: Deactivated successfully.
Oct 11 01:48:43 compute-0 sudo[238632]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:43 compute-0 podman[238633]: 2025-10-11 01:48:43.544333184 +0000 UTC m=+0.356580328 container remove e8b945e24c0a6cfb0a4866bb374781ced8235c1d9c6ce721051f54291b49f86e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_rhodes, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.license=GPLv2)
Oct 11 01:48:43 compute-0 systemd[1]: libpod-conmon-e8b945e24c0a6cfb0a4866bb374781ced8235c1d9c6ce721051f54291b49f86e.scope: Deactivated successfully.
Oct 11 01:48:43 compute-0 ceph-mon[191930]: pgmap v316: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:43 compute-0 ceph-mon[191930]: 9.d scrub ok
Oct 11 01:48:43 compute-0 podman[238699]: 2025-10-11 01:48:43.762442163 +0000 UTC m=+0.080396830 container create 4bc467fdd7b56fbf3252a840ca4b4b3b7897efbfdd09729e3547efe96b51e092 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_solomon, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:48:43 compute-0 podman[238699]: 2025-10-11 01:48:43.733088609 +0000 UTC m=+0.051043396 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:48:43 compute-0 systemd[1]: Started libpod-conmon-4bc467fdd7b56fbf3252a840ca4b4b3b7897efbfdd09729e3547efe96b51e092.scope.
Oct 11 01:48:43 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:48:43 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/06a70b894326fa559b557aa2c9b95df01f771cce4f2218d0734ddcb59433e49f/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:48:43 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/06a70b894326fa559b557aa2c9b95df01f771cce4f2218d0734ddcb59433e49f/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:48:43 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/06a70b894326fa559b557aa2c9b95df01f771cce4f2218d0734ddcb59433e49f/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:48:43 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/06a70b894326fa559b557aa2c9b95df01f771cce4f2218d0734ddcb59433e49f/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:48:43 compute-0 podman[238699]: 2025-10-11 01:48:43.903025087 +0000 UTC m=+0.220979834 container init 4bc467fdd7b56fbf3252a840ca4b4b3b7897efbfdd09729e3547efe96b51e092 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_solomon, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:48:43 compute-0 podman[238699]: 2025-10-11 01:48:43.922285599 +0000 UTC m=+0.240240306 container start 4bc467fdd7b56fbf3252a840ca4b4b3b7897efbfdd09729e3547efe96b51e092 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_solomon, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:48:43 compute-0 podman[238699]: 2025-10-11 01:48:43.929324924 +0000 UTC m=+0.247279681 container attach 4bc467fdd7b56fbf3252a840ca4b4b3b7897efbfdd09729e3547efe96b51e092 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_solomon, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2)
Oct 11 01:48:44 compute-0 sudo[238844]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-utwxqxwseabhklylbznkrqllvtmdctdb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147323.8001602-154-261985627350736/AnsiballZ_stat.py'
Oct 11 01:48:44 compute-0 sudo[238844]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:44 compute-0 python3.9[238846]: ansible-stat Invoked with path=/run/ostree-booted follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:48:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v317: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:44 compute-0 sudo[238844]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:48:44 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.11 scrub starts
Oct 11 01:48:44 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.11 scrub ok
Oct 11 01:48:45 compute-0 infallible_solomon[238743]: {
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:         "osd_id": 1,
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:         "type": "bluestore"
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:     },
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:         "osd_id": 2,
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:         "type": "bluestore"
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:     },
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:         "osd_id": 0,
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:         "type": "bluestore"
Oct 11 01:48:45 compute-0 infallible_solomon[238743]:     }
Oct 11 01:48:45 compute-0 infallible_solomon[238743]: }
Oct 11 01:48:45 compute-0 systemd[1]: libpod-4bc467fdd7b56fbf3252a840ca4b4b3b7897efbfdd09729e3547efe96b51e092.scope: Deactivated successfully.
Oct 11 01:48:45 compute-0 systemd[1]: libpod-4bc467fdd7b56fbf3252a840ca4b4b3b7897efbfdd09729e3547efe96b51e092.scope: Consumed 1.141s CPU time.
Oct 11 01:48:45 compute-0 podman[238699]: 2025-10-11 01:48:45.069542905 +0000 UTC m=+1.387497592 container died 4bc467fdd7b56fbf3252a840ca4b4b3b7897efbfdd09729e3547efe96b51e092 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_solomon, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 01:48:45 compute-0 systemd[1]: var-lib-containers-storage-overlay-06a70b894326fa559b557aa2c9b95df01f771cce4f2218d0734ddcb59433e49f-merged.mount: Deactivated successfully.
Oct 11 01:48:45 compute-0 podman[238699]: 2025-10-11 01:48:45.170168438 +0000 UTC m=+1.488123125 container remove 4bc467fdd7b56fbf3252a840ca4b4b3b7897efbfdd09729e3547efe96b51e092 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_solomon, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default)
Oct 11 01:48:45 compute-0 systemd[1]: libpod-conmon-4bc467fdd7b56fbf3252a840ca4b4b3b7897efbfdd09729e3547efe96b51e092.scope: Deactivated successfully.
Oct 11 01:48:45 compute-0 sudo[238469]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:48:45 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:48:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:48:45 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:48:45 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev eab86028-8d1d-423b-9e4f-5bdc4549669d does not exist
Oct 11 01:48:45 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 7348f915-89bb-4b61-ad06-0bfa8a4c9be4 does not exist
Oct 11 01:48:45 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 11.5 scrub starts
Oct 11 01:48:45 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 11.5 scrub ok
Oct 11 01:48:45 compute-0 sudo[239010]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:48:45 compute-0 sudo[239010]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:48:45 compute-0 sudo[239010]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:45 compute-0 sudo[239061]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-egbdfdwuugizfhdlyeduwhiuuenbpxij ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147324.8728974-163-237023927727452/AnsiballZ_stat.py'
Oct 11 01:48:45 compute-0 sudo[239061]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:45 compute-0 sudo[239062]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:48:45 compute-0 sudo[239062]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:48:45 compute-0 sudo[239062]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:45 compute-0 python3.9[239068]: ansible-stat Invoked with path=/sbin/transactional-update follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:48:45 compute-0 sudo[239061]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:45 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.b scrub starts
Oct 11 01:48:45 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.b scrub ok
Oct 11 01:48:46 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.8 scrub starts
Oct 11 01:48:46 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.8 scrub ok
Oct 11 01:48:46 compute-0 ceph-mon[191930]: pgmap v317: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:46 compute-0 ceph-mon[191930]: 9.11 scrub starts
Oct 11 01:48:46 compute-0 ceph-mon[191930]: 9.11 scrub ok
Oct 11 01:48:46 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:48:46 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:48:46 compute-0 ceph-mon[191930]: 11.5 scrub starts
Oct 11 01:48:46 compute-0 ceph-mon[191930]: 11.5 scrub ok
Oct 11 01:48:46 compute-0 ceph-mon[191930]: 9.8 scrub starts
Oct 11 01:48:46 compute-0 ceph-mon[191930]: 9.8 scrub ok
Oct 11 01:48:46 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 11.7 scrub starts
Oct 11 01:48:46 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 11.7 scrub ok
Oct 11 01:48:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v318: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:46 compute-0 sudo[239238]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xwbbxccuiqndnhvfurfaflidibgkdpwk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147325.9985821-173-157898840326361/AnsiballZ_service_facts.py'
Oct 11 01:48:46 compute-0 sudo[239238]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:46 compute-0 python3.9[239240]: ansible-service_facts Invoked
Oct 11 01:48:47 compute-0 network[239257]: You are using 'network' service provided by 'network-scripts', which are now deprecated.
Oct 11 01:48:47 compute-0 network[239258]: 'network-scripts' will be removed from distribution in near future.
Oct 11 01:48:47 compute-0 network[239259]: It is advised to switch to 'NetworkManager' instead for network management.
Oct 11 01:48:47 compute-0 ceph-mon[191930]: 9.b scrub starts
Oct 11 01:48:47 compute-0 ceph-mon[191930]: 9.b scrub ok
Oct 11 01:48:47 compute-0 ceph-mon[191930]: 11.7 scrub starts
Oct 11 01:48:47 compute-0 ceph-mon[191930]: 11.7 scrub ok
Oct 11 01:48:48 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.c scrub starts
Oct 11 01:48:48 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.c scrub ok
Oct 11 01:48:48 compute-0 podman[239265]: 2025-10-11 01:48:48.264604062 +0000 UTC m=+0.111115770 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:48:48 compute-0 podman[239268]: 2025-10-11 01:48:48.274256928 +0000 UTC m=+0.121297003 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, release=1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., vendor=Red Hat, Inc., build-date=2024-09-18T21:23:30, io.openshift.expose-services=, managed_by=edpm_ansible, release-0.7.12=, com.redhat.component=ubi9-container, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.29.0, io.k8s.display-name=Red Hat Universal Base Image 9, name=ubi9, container_name=kepler, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, vcs-type=git, version=9.4, io.openshift.tags=base rhel9, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., architecture=x86_64, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, config_id=edpm)
Oct 11 01:48:48 compute-0 ceph-mon[191930]: pgmap v318: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:48 compute-0 ceph-mon[191930]: 9.c scrub starts
Oct 11 01:48:48 compute-0 ceph-mon[191930]: 9.c scrub ok
Oct 11 01:48:48 compute-0 podman[239267]: 2025-10-11 01:48:48.3075617 +0000 UTC m=+0.155213062 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 01:48:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v319: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:48:50 compute-0 ceph-mon[191930]: pgmap v319: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:50 compute-0 podman[239389]: 2025-10-11 01:48:50.448037776 +0000 UTC m=+0.128309018 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 10 Base Image, config_id=edpm, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4)
Oct 11 01:48:50 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 11.a scrub starts
Oct 11 01:48:50 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 11.a scrub ok
Oct 11 01:48:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v320: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:50 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.5 deep-scrub starts
Oct 11 01:48:50 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.5 deep-scrub ok
Oct 11 01:48:51 compute-0 ceph-mon[191930]: 11.a scrub starts
Oct 11 01:48:51 compute-0 ceph-mon[191930]: 11.a scrub ok
Oct 11 01:48:51 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 11.c scrub starts
Oct 11 01:48:51 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 11.c scrub ok
Oct 11 01:48:52 compute-0 ceph-mon[191930]: pgmap v320: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:52 compute-0 ceph-mon[191930]: 9.5 deep-scrub starts
Oct 11 01:48:52 compute-0 ceph-mon[191930]: 9.5 deep-scrub ok
Oct 11 01:48:52 compute-0 ceph-mon[191930]: 11.c scrub starts
Oct 11 01:48:52 compute-0 ceph-mon[191930]: 11.c scrub ok
Oct 11 01:48:52 compute-0 sudo[239238]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v321: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:53 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 11.13 scrub starts
Oct 11 01:48:53 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 11.13 scrub ok
Oct 11 01:48:53 compute-0 sudo[239634]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xigawigwbzygwleasigcyhtobwpkslom ; /bin/bash /home/zuul/.ansible/tmp/ansible-tmp-1760147333.2246192-186-134760703262675/AnsiballZ_timesync_provider.sh /home/zuul/.ansible/tmp/ansible-tmp-1760147333.2246192-186-134760703262675/args'
Oct 11 01:48:53 compute-0 sudo[239634]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:54 compute-0 sudo[239634]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:54 compute-0 ceph-mon[191930]: pgmap v321: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:54 compute-0 ceph-mon[191930]: 11.13 scrub starts
Oct 11 01:48:54 compute-0 ceph-mon[191930]: 11.13 scrub ok
Oct 11 01:48:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v322: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:48:54 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.3 scrub starts
Oct 11 01:48:54 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.3 scrub ok
Oct 11 01:48:55 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.13 scrub starts
Oct 11 01:48:55 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.13 scrub ok
Oct 11 01:48:55 compute-0 sudo[239801]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vdrqnyrjtlbjtwvlpbkbpmlebgrmwpwq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147334.6285603-197-149954077596031/AnsiballZ_dnf.py'
Oct 11 01:48:55 compute-0 sudo[239801]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:55 compute-0 ceph-mon[191930]: pgmap v322: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:55 compute-0 ceph-mon[191930]: 9.13 scrub starts
Oct 11 01:48:55 compute-0 ceph-mon[191930]: 9.13 scrub ok
Oct 11 01:48:55 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 11.16 deep-scrub starts
Oct 11 01:48:55 compute-0 python3.9[239803]: ansible-ansible.legacy.dnf Invoked with name=['chrony'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:48:55 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 11.16 deep-scrub ok
Oct 11 01:48:55 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.9 scrub starts
Oct 11 01:48:55 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.9 scrub ok
Oct 11 01:48:56 compute-0 ceph-mon[191930]: 9.3 scrub starts
Oct 11 01:48:56 compute-0 ceph-mon[191930]: 9.3 scrub ok
Oct 11 01:48:56 compute-0 ceph-mon[191930]: 11.16 deep-scrub starts
Oct 11 01:48:56 compute-0 ceph-mon[191930]: 11.16 deep-scrub ok
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:48:56
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['volumes', '.mgr', 'backups', 'cephfs.cephfs.meta', 'vms', 'default.rgw.control', 'default.rgw.meta', 'cephfs.cephfs.data', '.rgw.root', 'default.rgw.log', 'images']
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 01:48:56 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 11.1d scrub starts
Oct 11 01:48:56 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 11.1d scrub ok
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v323: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:48:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:48:56 compute-0 sudo[239801]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:57 compute-0 ceph-mon[191930]: 9.9 scrub starts
Oct 11 01:48:57 compute-0 ceph-mon[191930]: 9.9 scrub ok
Oct 11 01:48:57 compute-0 ceph-mon[191930]: 11.1d scrub starts
Oct 11 01:48:57 compute-0 ceph-mon[191930]: 11.1d scrub ok
Oct 11 01:48:57 compute-0 ceph-mon[191930]: pgmap v323: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:57 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.19 scrub starts
Oct 11 01:48:57 compute-0 ceph-osd[207831]: log_channel(cluster) log [DBG] : 9.19 scrub ok
Oct 11 01:48:58 compute-0 sudo[239954]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gsiztblqeeouknhszhhhhgyqfhspnlcd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147337.3521252-210-230023126068436/AnsiballZ_package_facts.py'
Oct 11 01:48:58 compute-0 sudo[239954]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:48:58 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.b scrub starts
Oct 11 01:48:58 compute-0 ceph-mon[191930]: 9.19 scrub starts
Oct 11 01:48:58 compute-0 ceph-mon[191930]: 9.19 scrub ok
Oct 11 01:48:58 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.b scrub ok
Oct 11 01:48:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v324: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:58 compute-0 python3.9[239956]: ansible-package_facts Invoked with manager=['auto'] strategy=first
Oct 11 01:48:58 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.1d scrub starts
Oct 11 01:48:58 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.1d scrub ok
Oct 11 01:48:58 compute-0 sudo[239954]: pam_unix(sudo:session): session closed for user root
Oct 11 01:48:59 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.13 scrub starts
Oct 11 01:48:59 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.13 scrub ok
Oct 11 01:48:59 compute-0 ceph-mon[191930]: 10.b scrub starts
Oct 11 01:48:59 compute-0 ceph-mon[191930]: 10.b scrub ok
Oct 11 01:48:59 compute-0 ceph-mon[191930]: pgmap v324: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:48:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:48:59 compute-0 podman[157119]: time="2025-10-11T01:48:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:48:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:48:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:48:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:48:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6820 "" "Go-http-client/1.1"
Oct 11 01:48:59 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.1b scrub starts
Oct 11 01:48:59 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.1b scrub ok
Oct 11 01:49:00 compute-0 sudo[240106]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fbfszlbtyfklztwsbgfwfmlxkmlizvjs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147339.5302567-220-176562432794517/AnsiballZ_stat.py'
Oct 11 01:49:00 compute-0 sudo[240106]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:00 compute-0 python3.9[240108]: ansible-ansible.legacy.stat Invoked with path=/etc/chrony.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:49:00 compute-0 ceph-mon[191930]: 9.1d scrub starts
Oct 11 01:49:00 compute-0 ceph-mon[191930]: 9.1d scrub ok
Oct 11 01:49:00 compute-0 ceph-mon[191930]: 10.13 scrub starts
Oct 11 01:49:00 compute-0 ceph-mon[191930]: 10.13 scrub ok
Oct 11 01:49:00 compute-0 sudo[240106]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v325: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:00 compute-0 sudo[240184]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qkuexogtnznyjpcinrdxerhyetguelrb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147339.5302567-220-176562432794517/AnsiballZ_file.py'
Oct 11 01:49:00 compute-0 sudo[240184]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:01 compute-0 python3.9[240186]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/etc/chrony.conf _original_basename=chrony.conf.j2 recurse=False state=file path=/etc/chrony.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:49:01 compute-0 sudo[240184]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:01 compute-0 openstack_network_exporter[159265]: ERROR   01:49:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:49:01 compute-0 openstack_network_exporter[159265]: ERROR   01:49:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:49:01 compute-0 openstack_network_exporter[159265]: ERROR   01:49:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:49:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:49:01 compute-0 openstack_network_exporter[159265]: ERROR   01:49:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:49:01 compute-0 openstack_network_exporter[159265]: ERROR   01:49:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:49:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:49:01 compute-0 ceph-mon[191930]: 9.1b scrub starts
Oct 11 01:49:01 compute-0 ceph-mon[191930]: 9.1b scrub ok
Oct 11 01:49:01 compute-0 ceph-mon[191930]: pgmap v325: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:01 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.1 scrub starts
Oct 11 01:49:01 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.1 scrub ok
Oct 11 01:49:02 compute-0 sudo[240336]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lwgxzijuixvxmcqxwotzuiknolgxqhjh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147341.5833063-232-191544020017802/AnsiballZ_stat.py'
Oct 11 01:49:02 compute-0 sudo[240336]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:02 compute-0 python3.9[240338]: ansible-ansible.legacy.stat Invoked with path=/etc/sysconfig/chronyd follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:49:02 compute-0 sudo[240336]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v326: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:02 compute-0 ceph-mon[191930]: 9.1 scrub starts
Oct 11 01:49:02 compute-0 ceph-mon[191930]: 9.1 scrub ok
Oct 11 01:49:02 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.16 scrub starts
Oct 11 01:49:02 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.16 scrub ok
Oct 11 01:49:02 compute-0 sudo[240414]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ydnxfpvehwbphqkfyvvspzolkjkmqsyn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147341.5833063-232-191544020017802/AnsiballZ_file.py'
Oct 11 01:49:02 compute-0 sudo[240414]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:03 compute-0 python3.9[240416]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/etc/sysconfig/chronyd _original_basename=chronyd.sysconfig.j2 recurse=False state=file path=/etc/sysconfig/chronyd force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:49:03 compute-0 sudo[240414]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:03 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.12 deep-scrub starts
Oct 11 01:49:03 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.12 deep-scrub ok
Oct 11 01:49:03 compute-0 ceph-mon[191930]: pgmap v326: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:03 compute-0 ceph-mon[191930]: 9.16 scrub starts
Oct 11 01:49:03 compute-0 ceph-mon[191930]: 9.16 scrub ok
Oct 11 01:49:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v327: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:04 compute-0 sudo[240566]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xhjphybiewyzedclticaxdntszkinecw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147343.8316994-250-121088523757749/AnsiballZ_lineinfile.py'
Oct 11 01:49:04 compute-0 sudo[240566]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:49:04 compute-0 ceph-mon[191930]: 10.12 deep-scrub starts
Oct 11 01:49:04 compute-0 ceph-mon[191930]: 10.12 deep-scrub ok
Oct 11 01:49:04 compute-0 python3.9[240568]: ansible-lineinfile Invoked with backup=True create=True dest=/etc/sysconfig/network line=PEERNTP=no mode=0644 regexp=^PEERNTP= state=present path=/etc/sysconfig/network backrefs=False firstmatch=False unsafe_writes=False search_string=None insertafter=None insertbefore=None validate=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:49:04 compute-0 sudo[240566]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:05 compute-0 ceph-mon[191930]: pgmap v327: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:05 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.1c scrub starts
Oct 11 01:49:05 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.1c scrub ok
Oct 11 01:49:06 compute-0 sudo[240718]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dphgxrgaxfmusshsjphgwlesgvdxzubt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147345.5321245-265-148942318851806/AnsiballZ_setup.py'
Oct 11 01:49:06 compute-0 sudo[240718]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 01:49:06 compute-0 python3.9[240720]: ansible-ansible.legacy.setup Invoked with gather_subset=['!all'] filter=['ansible_service_mgr'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:49:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v328: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:06 compute-0 ceph-mon[191930]: 9.1c scrub starts
Oct 11 01:49:06 compute-0 ceph-mon[191930]: 9.1c scrub ok
Oct 11 01:49:06 compute-0 sudo[240718]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:07 compute-0 podman[240730]: 2025-10-11 01:49:07.2501513 +0000 UTC m=+0.122518686 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, name=ubi9-minimal, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, com.redhat.component=ubi9-minimal-container, vcs-type=git, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, config_id=edpm, build-date=2025-08-20T13:12:41, url=https://catalog.redhat.com/en/search?searchType=containers, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, managed_by=edpm_ansible, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vendor=Red Hat, Inc., maintainer=Red Hat, Inc., io.openshift.expose-services=, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., container_name=openstack_network_exporter, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, io.buildah.version=1.33.7, release=1755695350, io.openshift.tags=minimal rhel9, version=9.6)
Oct 11 01:49:07 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.10 deep-scrub starts
Oct 11 01:49:07 compute-0 podman[240729]: 2025-10-11 01:49:07.265049204 +0000 UTC m=+0.141180663 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 01:49:07 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.10 deep-scrub ok
Oct 11 01:49:07 compute-0 sudo[240847]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oopkugfebifrbpfgesgtcvqergbupkoy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147345.5321245-265-148942318851806/AnsiballZ_systemd.py'
Oct 11 01:49:07 compute-0 sudo[240847]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:07 compute-0 ceph-mon[191930]: pgmap v328: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.936 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.938 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.938 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.939 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f8ed27f97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb8c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb1a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb200>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed2874260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed3ab42f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb350>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb90>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fa390>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb3b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbbf0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbc80>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27f9610>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.942 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb620>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbe30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbec0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbf50>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed25ecda0>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.943 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f8ed27fbad0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.944 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.944 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f8ed27faff0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.944 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.945 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f8ed27fb110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.945 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.945 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f8ed27fb170>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.945 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.945 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f8ed27fb1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.945 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.945 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f8ed27fb230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.945 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.945 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f8ed2874230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.945 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.946 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f8ed27fb290>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.946 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.946 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f8ed5778d70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.946 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.946 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f8ed27fb650>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.946 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.946 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f8ed27fbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.946 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.946 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f8ed27fb320>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.947 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.947 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f8ed27fbb60>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.947 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.947 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f8ed27fa3f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.947 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.947 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f8ed27fb380>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.947 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.947 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f8ed27fbbc0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.947 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.947 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f8ed27fbc50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.948 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f8ed27fbce0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.948 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f8ed27fbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.948 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f8ed27fb590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f8ed27f95e0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f8ed27fb5f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f8ed27fbe00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f8ed27fbe90>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f8ed27fbf20>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.950 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.950 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.950 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.950 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.951 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.951 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.951 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.951 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.951 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.951 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.951 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.951 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.951 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.951 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.952 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.952 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.952 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.952 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.952 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.952 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.952 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.952 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.953 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.953 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.953 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:49:07.953 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:49:07 compute-0 python3.9[240849]: ansible-ansible.legacy.systemd Invoked with enabled=True name=chronyd state=started daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:49:08 compute-0 sudo[240847]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v329: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:08 compute-0 sshd-session[235395]: Connection closed by 192.168.122.30 port 38946
Oct 11 01:49:08 compute-0 sshd-session[235392]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:49:08 compute-0 systemd[1]: session-45.scope: Deactivated successfully.
Oct 11 01:49:08 compute-0 systemd[1]: session-45.scope: Consumed 39.911s CPU time.
Oct 11 01:49:08 compute-0 systemd-logind[804]: Session 45 logged out. Waiting for processes to exit.
Oct 11 01:49:08 compute-0 systemd-logind[804]: Removed session 45.
Oct 11 01:49:08 compute-0 ceph-mon[191930]: 10.10 deep-scrub starts
Oct 11 01:49:08 compute-0 ceph-mon[191930]: 10.10 deep-scrub ok
Oct 11 01:49:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:49:09 compute-0 ceph-mon[191930]: pgmap v329: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:09 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.1e scrub starts
Oct 11 01:49:09 compute-0 ceph-osd[205667]: log_channel(cluster) log [DBG] : 9.1e scrub ok
Oct 11 01:49:10 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.11 scrub starts
Oct 11 01:49:10 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.11 scrub ok
Oct 11 01:49:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v330: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:10 compute-0 ceph-mon[191930]: 9.1e scrub starts
Oct 11 01:49:10 compute-0 ceph-mon[191930]: 9.1e scrub ok
Oct 11 01:49:11 compute-0 ceph-mon[191930]: 10.11 scrub starts
Oct 11 01:49:11 compute-0 ceph-mon[191930]: 10.11 scrub ok
Oct 11 01:49:11 compute-0 ceph-mon[191930]: pgmap v330: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:12 compute-0 podman[240877]: 2025-10-11 01:49:12.247798018 +0000 UTC m=+0.137327362 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=edpm, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.license=GPLv2, container_name=ceilometer_agent_ipmi)
Oct 11 01:49:12 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.1a scrub starts
Oct 11 01:49:12 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.1a scrub ok
Oct 11 01:49:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v331: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:13 compute-0 ceph-mon[191930]: 10.1a scrub starts
Oct 11 01:49:13 compute-0 ceph-mon[191930]: 10.1a scrub ok
Oct 11 01:49:13 compute-0 ceph-mon[191930]: pgmap v331: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:14 compute-0 sshd-session[240897]: Accepted publickey for zuul from 192.168.122.30 port 39844 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:49:14 compute-0 systemd-logind[804]: New session 46 of user zuul.
Oct 11 01:49:14 compute-0 systemd[1]: Started Session 46 of User zuul.
Oct 11 01:49:14 compute-0 sshd-session[240897]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:49:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v332: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:49:15 compute-0 sudo[241050]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vxbpzfmxishuuxqmktunxgwpzpvufdgo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147354.47564-22-106317095608395/AnsiballZ_file.py'
Oct 11 01:49:15 compute-0 sudo[241050]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:15 compute-0 python3.9[241052]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/lib/edpm-config/firewall state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:49:15 compute-0 sudo[241050]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:15 compute-0 ceph-mon[191930]: pgmap v332: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v333: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:16 compute-0 sudo[241202]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pklteytdbsybqnjorpoolcwovodgdrzf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147355.8577275-34-40532623025926/AnsiballZ_stat.py'
Oct 11 01:49:16 compute-0 sudo[241202]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:16 compute-0 python3.9[241204]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/ceph-networks.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:49:16 compute-0 sudo[241202]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:17 compute-0 sudo[241280]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-shyevwpmcesdwvumxvawjirifxubwgmw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147355.8577275-34-40532623025926/AnsiballZ_file.py'
Oct 11 01:49:17 compute-0 sudo[241280]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:17 compute-0 python3.9[241282]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/edpm-config/firewall/ceph-networks.yaml _original_basename=firewall.yaml.j2 recurse=False state=file path=/var/lib/edpm-config/firewall/ceph-networks.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:49:17 compute-0 sudo[241280]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:17 compute-0 ceph-mon[191930]: pgmap v333: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:18 compute-0 sshd-session[240900]: Connection closed by 192.168.122.30 port 39844
Oct 11 01:49:18 compute-0 sshd-session[240897]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:49:18 compute-0 systemd[1]: session-46.scope: Deactivated successfully.
Oct 11 01:49:18 compute-0 systemd[1]: session-46.scope: Consumed 2.983s CPU time.
Oct 11 01:49:18 compute-0 systemd-logind[804]: Session 46 logged out. Waiting for processes to exit.
Oct 11 01:49:18 compute-0 systemd-logind[804]: Removed session 46.
Oct 11 01:49:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v334: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:19 compute-0 podman[241307]: 2025-10-11 01:49:19.253970454 +0000 UTC m=+0.131966922 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 01:49:19 compute-0 podman[241309]: 2025-10-11 01:49:19.268181759 +0000 UTC m=+0.148789438 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.component=ubi9-container, release=1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., architecture=x86_64, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, release-0.7.12=, managed_by=edpm_ansible, config_id=edpm, version=9.4, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, io.k8s.display-name=Red Hat Universal Base Image 9, name=ubi9, vendor=Red Hat, Inc., distribution-scope=public, io.buildah.version=1.29.0, container_name=kepler, io.openshift.expose-services=, maintainer=Red Hat, Inc., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, build-date=2024-09-18T21:23:30, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543)
Oct 11 01:49:19 compute-0 podman[241308]: 2025-10-11 01:49:19.31695073 +0000 UTC m=+0.194271249 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_controller, config_id=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible)
Oct 11 01:49:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:49:19 compute-0 ceph-mon[191930]: pgmap v334: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v335: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:21 compute-0 podman[241378]: 2025-10-11 01:49:21.270082349 +0000 UTC m=+0.156933374 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, config_id=edpm, io.buildah.version=1.41.4, tcib_managed=true, org.label-schema.name=CentOS Stream 10 Base Image, maintainer=OpenStack Kubernetes Operator team)
Oct 11 01:49:21 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.19 scrub starts
Oct 11 01:49:21 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.19 scrub ok
Oct 11 01:49:21 compute-0 ceph-mon[191930]: pgmap v335: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:22 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.6 scrub starts
Oct 11 01:49:22 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.6 scrub ok
Oct 11 01:49:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v336: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:22 compute-0 ceph-mon[191930]: 10.19 scrub starts
Oct 11 01:49:22 compute-0 ceph-mon[191930]: 10.19 scrub ok
Oct 11 01:49:23 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.14 scrub starts
Oct 11 01:49:23 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.14 scrub ok
Oct 11 01:49:23 compute-0 sshd-session[241397]: Accepted publickey for zuul from 192.168.122.30 port 52652 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:49:23 compute-0 systemd-logind[804]: New session 47 of user zuul.
Oct 11 01:49:23 compute-0 systemd[1]: Started Session 47 of User zuul.
Oct 11 01:49:23 compute-0 sshd-session[241397]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:49:23 compute-0 ceph-mon[191930]: 10.6 scrub starts
Oct 11 01:49:23 compute-0 ceph-mon[191930]: 10.6 scrub ok
Oct 11 01:49:23 compute-0 ceph-mon[191930]: pgmap v336: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v337: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:49:24 compute-0 python3.9[241550]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:49:24 compute-0 ceph-mon[191930]: 10.14 scrub starts
Oct 11 01:49:24 compute-0 ceph-mon[191930]: 10.14 scrub ok
Oct 11 01:49:25 compute-0 ceph-mon[191930]: pgmap v337: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:26 compute-0 sudo[241704]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vhabwouzjngiloqpvxlcfrsuonbqblca ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147365.5753596-33-280355055228960/AnsiballZ_file.py'
Oct 11 01:49:26 compute-0 sudo[241704]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:49:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:49:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:49:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:49:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:49:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:49:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v338: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:26 compute-0 python3.9[241706]: ansible-ansible.builtin.file Invoked with group=zuul mode=0770 owner=zuul path=/root/.config/containers recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:49:26 compute-0 sudo[241704]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:27 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.f scrub starts
Oct 11 01:49:27 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.f scrub ok
Oct 11 01:49:27 compute-0 sudo[241879]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-imlprodpgigeqfbmimukkyzqlnfuyepw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147367.0552905-41-97402185190704/AnsiballZ_stat.py'
Oct 11 01:49:27 compute-0 sudo[241879]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:27 compute-0 ceph-mon[191930]: pgmap v338: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:28 compute-0 python3.9[241881]: ansible-ansible.legacy.stat Invoked with path=/root/.config/containers/auth.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:49:28 compute-0 sudo[241879]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:28 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.2 scrub starts
Oct 11 01:49:28 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 10.2 scrub ok
Oct 11 01:49:28 compute-0 sudo[241957]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-smhdmsedpwsbzsmsqmrjyyorjbbscrfr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147367.0552905-41-97402185190704/AnsiballZ_file.py'
Oct 11 01:49:28 compute-0 sudo[241957]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v339: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:28 compute-0 python3.9[241959]: ansible-ansible.legacy.file Invoked with group=zuul mode=0660 owner=zuul dest=/root/.config/containers/auth.json _original_basename=.qrgmtx_h recurse=False state=file path=/root/.config/containers/auth.json force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:49:28 compute-0 sudo[241957]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:28 compute-0 ceph-mon[191930]: 10.f scrub starts
Oct 11 01:49:28 compute-0 ceph-mon[191930]: 10.f scrub ok
Oct 11 01:49:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:49:29 compute-0 podman[157119]: time="2025-10-11T01:49:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:49:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:49:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:49:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:49:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6824 "" "Go-http-client/1.1"
Oct 11 01:49:30 compute-0 ceph-mon[191930]: 10.2 scrub starts
Oct 11 01:49:30 compute-0 ceph-mon[191930]: 10.2 scrub ok
Oct 11 01:49:30 compute-0 ceph-mon[191930]: pgmap v339: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:30 compute-0 sudo[242109]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pwcrwsuintbfglubknxgzbejcacgpfpu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147369.342571-61-167603460915880/AnsiballZ_stat.py'
Oct 11 01:49:30 compute-0 sudo[242109]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:30 compute-0 python3.9[242111]: ansible-ansible.legacy.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:49:30 compute-0 sudo[242109]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v340: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:30 compute-0 sudo[242187]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bsdwkdbmgafyezxbqjhzleeeugbldgin ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147369.342571-61-167603460915880/AnsiballZ_file.py'
Oct 11 01:49:30 compute-0 sudo[242187]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:30 compute-0 python3.9[242189]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/etc/sysconfig/podman_drop_in _original_basename=.adhuxj8i recurse=False state=file path=/etc/sysconfig/podman_drop_in force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:49:31 compute-0 sudo[242187]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:31 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 9.15 scrub starts
Oct 11 01:49:31 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 9.15 scrub ok
Oct 11 01:49:31 compute-0 openstack_network_exporter[159265]: ERROR   01:49:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:49:31 compute-0 openstack_network_exporter[159265]: ERROR   01:49:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:49:31 compute-0 openstack_network_exporter[159265]: ERROR   01:49:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:49:31 compute-0 openstack_network_exporter[159265]: ERROR   01:49:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:49:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:49:31 compute-0 openstack_network_exporter[159265]: ERROR   01:49:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:49:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:49:31 compute-0 sudo[242339]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yolpfetmqwjjrmrlcfjiddztnygvbqyx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147371.365243-74-144082439190263/AnsiballZ_file.py'
Oct 11 01:49:31 compute-0 sudo[242339]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:32 compute-0 ceph-mon[191930]: pgmap v340: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:32 compute-0 python3.9[242341]: ansible-ansible.builtin.file Invoked with path=/var/local/libexec recurse=True setype=container_file_t state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:49:32 compute-0 sudo[242339]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v341: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:33 compute-0 sudo[242491]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xztcktopgctjgtyqxptntffutmvlamcr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147372.4724278-82-46083227879045/AnsiballZ_stat.py'
Oct 11 01:49:33 compute-0 sudo[242491]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:33 compute-0 ceph-mon[191930]: 9.15 scrub starts
Oct 11 01:49:33 compute-0 ceph-mon[191930]: 9.15 scrub ok
Oct 11 01:49:33 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 9.1f scrub starts
Oct 11 01:49:33 compute-0 ceph-osd[206800]: log_channel(cluster) log [DBG] : 9.1f scrub ok
Oct 11 01:49:33 compute-0 python3.9[242493]: ansible-ansible.legacy.stat Invoked with path=/var/local/libexec/edpm-container-shutdown follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:49:33 compute-0 sudo[242491]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:33 compute-0 sudo[242569]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mtwxzssumwtmctrzwisarcneesitedkj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147372.4724278-82-46083227879045/AnsiballZ_file.py'
Oct 11 01:49:33 compute-0 sudo[242569]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:34 compute-0 python3.9[242571]: ansible-ansible.legacy.file Invoked with group=root mode=0700 owner=root setype=container_file_t dest=/var/local/libexec/edpm-container-shutdown _original_basename=edpm-container-shutdown recurse=False state=file path=/var/local/libexec/edpm-container-shutdown force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:49:34 compute-0 ceph-mon[191930]: pgmap v341: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:34 compute-0 sudo[242569]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v342: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:49:34 compute-0 sudo[242721]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uzvxenpuqrjjplzxndojfwnzvqptkoak ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147374.3261127-82-132516991370315/AnsiballZ_stat.py'
Oct 11 01:49:34 compute-0 sudo[242721]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:35 compute-0 ceph-mon[191930]: 9.1f scrub starts
Oct 11 01:49:35 compute-0 ceph-mon[191930]: 9.1f scrub ok
Oct 11 01:49:35 compute-0 python3.9[242723]: ansible-ansible.legacy.stat Invoked with path=/var/local/libexec/edpm-start-podman-container follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:49:35 compute-0 sudo[242721]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:35 compute-0 sudo[242799]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-geqjautsfrximtleyjvqiawfpyddfppk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147374.3261127-82-132516991370315/AnsiballZ_file.py'
Oct 11 01:49:35 compute-0 sudo[242799]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:35 compute-0 python3.9[242801]: ansible-ansible.legacy.file Invoked with group=root mode=0700 owner=root setype=container_file_t dest=/var/local/libexec/edpm-start-podman-container _original_basename=edpm-start-podman-container recurse=False state=file path=/var/local/libexec/edpm-start-podman-container force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:49:35 compute-0 sudo[242799]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:36 compute-0 ceph-mon[191930]: pgmap v342: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v343: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:36 compute-0 sudo[242951]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-brrqzcszgndhnnxhfmqdibpgmlapsirm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147376.0937586-105-225422656621190/AnsiballZ_file.py'
Oct 11 01:49:36 compute-0 sudo[242951]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:36 compute-0 python3.9[242953]: ansible-ansible.builtin.file Invoked with mode=420 path=/etc/systemd/system-preset state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:49:36 compute-0 sudo[242951]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:37 compute-0 sudo[243128]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tqkatehyuqgvktqhsufchjehygdtgqxc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147377.2304082-113-236683283081516/AnsiballZ_stat.py'
Oct 11 01:49:37 compute-0 sudo[243128]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:37 compute-0 podman[243078]: 2025-10-11 01:49:37.739285134 +0000 UTC m=+0.100692966 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.tags=minimal rhel9, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=openstack_network_exporter, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.buildah.version=1.33.7, vendor=Red Hat, Inc., io.openshift.expose-services=, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, config_id=edpm, com.redhat.component=ubi9-minimal-container, managed_by=edpm_ansible, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-type=git, maintainer=Red Hat, Inc., url=https://catalog.redhat.com/en/search?searchType=containers, architecture=x86_64, name=ubi9-minimal, release=1755695350, version=9.6, distribution-scope=public, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b)
Oct 11 01:49:37 compute-0 podman[243077]: 2025-10-11 01:49:37.754444724 +0000 UTC m=+0.116230551 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 01:49:37 compute-0 python3.9[243143]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/edpm-container-shutdown.service follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:49:37 compute-0 sudo[243128]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:38 compute-0 ceph-mon[191930]: pgmap v343: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:38 compute-0 sudo[243223]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bsijpyuhoqvexbmtefzvxjhcfvcuakkk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147377.2304082-113-236683283081516/AnsiballZ_file.py'
Oct 11 01:49:38 compute-0 sudo[243223]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v344: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:38 compute-0 python3.9[243225]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/edpm-container-shutdown.service _original_basename=edpm-container-shutdown-service recurse=False state=file path=/etc/systemd/system/edpm-container-shutdown.service force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:49:38 compute-0 sudo[243223]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:49:39 compute-0 sudo[243375]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lnwvarrqdipkkbjfhxpnrsbpyrtgbkqg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147379.196629-125-67509213893491/AnsiballZ_stat.py'
Oct 11 01:49:39 compute-0 sudo[243375]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:40 compute-0 python3.9[243377]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system-preset/91-edpm-container-shutdown.preset follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:49:40 compute-0 rsyslogd[187706]: imjournal: 1638 messages lost due to rate-limiting (20000 allowed within 600 seconds)
Oct 11 01:49:40 compute-0 sudo[243375]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:40 compute-0 ceph-mon[191930]: pgmap v344: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:40 compute-0 sudo[243453]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-luvbpmhkwsfzinvomnlptntruolutaon ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147379.196629-125-67509213893491/AnsiballZ_file.py'
Oct 11 01:49:40 compute-0 sudo[243453]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v345: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:40 compute-0 python3.9[243455]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system-preset/91-edpm-container-shutdown.preset _original_basename=91-edpm-container-shutdown-preset recurse=False state=file path=/etc/systemd/system-preset/91-edpm-container-shutdown.preset force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:49:40 compute-0 sudo[243453]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:41 compute-0 sudo[243605]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kpjdyqasteypoujciwkureaohmrkmphz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147381.089614-137-186577772189093/AnsiballZ_systemd.py'
Oct 11 01:49:41 compute-0 sudo[243605]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:42 compute-0 ceph-mon[191930]: pgmap v345: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:42 compute-0 python3.9[243607]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True enabled=True name=edpm-container-shutdown state=started daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:49:42 compute-0 systemd[1]: Reloading.
Oct 11 01:49:42 compute-0 podman[243609]: 2025-10-11 01:49:42.494862157 +0000 UTC m=+0.166170394 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, config_id=edpm, io.buildah.version=1.41.3, managed_by=edpm_ansible)
Oct 11 01:49:42 compute-0 systemd-rc-local-generator[243657]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:49:42 compute-0 systemd-sysv-generator[243661]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:49:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v346: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:42 compute-0 sudo[243605]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:43 compute-0 sudo[243815]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-icbdonbqjlkjyucixdjrofypefuokeck ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147383.244438-145-197774417041885/AnsiballZ_stat.py'
Oct 11 01:49:43 compute-0 sudo[243815]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:44 compute-0 ceph-mon[191930]: pgmap v346: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:44 compute-0 python3.9[243817]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/netns-placeholder.service follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:49:44 compute-0 sudo[243815]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v347: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:49:44 compute-0 sudo[243893]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-owkuqinwsyfypnbsplkqfyzcbwdahkbx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147383.244438-145-197774417041885/AnsiballZ_file.py'
Oct 11 01:49:44 compute-0 sudo[243893]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:44 compute-0 python3.9[243895]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/netns-placeholder.service _original_basename=netns-placeholder-service recurse=False state=file path=/etc/systemd/system/netns-placeholder.service force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:49:44 compute-0 sudo[243893]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:45 compute-0 sudo[244008]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:49:45 compute-0 sudo[244008]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:49:45 compute-0 sudo[244008]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:45 compute-0 sudo[244078]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xikzycxbuzouygrkquclnbdykcbkmckf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147385.2455654-157-239319969885688/AnsiballZ_stat.py'
Oct 11 01:49:45 compute-0 sudo[244078]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:45 compute-0 sudo[244065]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:49:45 compute-0 sudo[244065]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:49:45 compute-0 sudo[244065]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:45 compute-0 sudo[244098]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:49:45 compute-0 sudo[244098]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:49:45 compute-0 sudo[244098]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:45 compute-0 sudo[244123]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 01:49:45 compute-0 python3.9[244093]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system-preset/91-netns-placeholder.preset follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:49:45 compute-0 sudo[244123]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:49:46 compute-0 sudo[244078]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:46 compute-0 ceph-mon[191930]: pgmap v347: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:46 compute-0 sudo[244239]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dsengyswwsirynkbphvwlqdsmupswecr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147385.2455654-157-239319969885688/AnsiballZ_file.py'
Oct 11 01:49:46 compute-0 sudo[244239]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:46 compute-0 sudo[244123]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:49:46 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:49:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:49:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:49:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:49:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:49:46 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev cbc2e2d6-62a3-45e6-aca2-0f9d9a56f981 does not exist
Oct 11 01:49:46 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 2519169e-3552-48a7-92cf-1659d9d9e24d does not exist
Oct 11 01:49:46 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 4e9a0340-5237-42b0-a32e-868a06e62e69 does not exist
Oct 11 01:49:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 01:49:46 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:49:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 01:49:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:49:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:49:46 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:49:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v348: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:46 compute-0 python3.9[244243]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system-preset/91-netns-placeholder.preset _original_basename=91-netns-placeholder-preset recurse=False state=file path=/etc/systemd/system-preset/91-netns-placeholder.preset force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:49:46 compute-0 sudo[244239]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:46 compute-0 sudo[244256]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:49:46 compute-0 sudo[244256]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:49:46 compute-0 sudo[244256]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:46 compute-0 sudo[244284]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:49:46 compute-0 sudo[244284]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:49:46 compute-0 sudo[244284]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:46 compute-0 sudo[244330]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:49:46 compute-0 sudo[244330]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:49:46 compute-0 sudo[244330]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:47 compute-0 sudo[244378]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 01:49:47 compute-0 sudo[244378]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:49:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:49:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:49:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:49:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:49:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:49:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:49:47 compute-0 sudo[244551]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-errhwonbtkvnltvjkqoezemmahxgpygg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147386.9742951-169-93241184680018/AnsiballZ_systemd.py'
Oct 11 01:49:47 compute-0 sudo[244551]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:47 compute-0 podman[244527]: 2025-10-11 01:49:47.555483431 +0000 UTC m=+0.056815880 container create 1680abc1962b7eccaca43f69c42ce294a092db092f1a2474cadcba4e5dc39fd1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wizardly_boyd, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:49:47 compute-0 systemd[1]: Started libpod-conmon-1680abc1962b7eccaca43f69c42ce294a092db092f1a2474cadcba4e5dc39fd1.scope.
Oct 11 01:49:47 compute-0 podman[244527]: 2025-10-11 01:49:47.531473165 +0000 UTC m=+0.032805654 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:49:47 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:49:47 compute-0 podman[244527]: 2025-10-11 01:49:47.688492397 +0000 UTC m=+0.189824866 container init 1680abc1962b7eccaca43f69c42ce294a092db092f1a2474cadcba4e5dc39fd1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wizardly_boyd, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:49:47 compute-0 podman[244527]: 2025-10-11 01:49:47.705337322 +0000 UTC m=+0.206669761 container start 1680abc1962b7eccaca43f69c42ce294a092db092f1a2474cadcba4e5dc39fd1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wizardly_boyd, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_REF=reef, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default)
Oct 11 01:49:47 compute-0 podman[244527]: 2025-10-11 01:49:47.709539308 +0000 UTC m=+0.210871747 container attach 1680abc1962b7eccaca43f69c42ce294a092db092f1a2474cadcba4e5dc39fd1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wizardly_boyd, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Oct 11 01:49:47 compute-0 wizardly_boyd[244560]: 167 167
Oct 11 01:49:47 compute-0 systemd[1]: libpod-1680abc1962b7eccaca43f69c42ce294a092db092f1a2474cadcba4e5dc39fd1.scope: Deactivated successfully.
Oct 11 01:49:47 compute-0 conmon[244560]: conmon 1680abc1962b7eccaca4 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-1680abc1962b7eccaca43f69c42ce294a092db092f1a2474cadcba4e5dc39fd1.scope/container/memory.events
Oct 11 01:49:47 compute-0 podman[244527]: 2025-10-11 01:49:47.718564451 +0000 UTC m=+0.219896890 container died 1680abc1962b7eccaca43f69c42ce294a092db092f1a2474cadcba4e5dc39fd1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wizardly_boyd, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:49:47 compute-0 systemd[1]: var-lib-containers-storage-overlay-9f47392b06501dbc3789bea2b4dfa6c135832e6294231549efd3079d675679dd-merged.mount: Deactivated successfully.
Oct 11 01:49:47 compute-0 podman[244527]: 2025-10-11 01:49:47.786991895 +0000 UTC m=+0.288324334 container remove 1680abc1962b7eccaca43f69c42ce294a092db092f1a2474cadcba4e5dc39fd1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wizardly_boyd, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:49:47 compute-0 systemd[1]: libpod-conmon-1680abc1962b7eccaca43f69c42ce294a092db092f1a2474cadcba4e5dc39fd1.scope: Deactivated successfully.
Oct 11 01:49:47 compute-0 python3.9[244556]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True enabled=True name=netns-placeholder state=started daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:49:47 compute-0 systemd[1]: Reloading.
Oct 11 01:49:48 compute-0 podman[244584]: 2025-10-11 01:49:48.035152647 +0000 UTC m=+0.084227276 container create 31eaf012fe4bd0ad920b12a0f977d3c46eb05a53b5643731455a78727ff7b959 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=frosty_mendel, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:49:48 compute-0 systemd-sysv-generator[244626]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:49:48 compute-0 systemd-rc-local-generator[244621]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:49:48 compute-0 podman[244584]: 2025-10-11 01:49:48.015898288 +0000 UTC m=+0.064972937 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:49:48 compute-0 ceph-mon[191930]: pgmap v348: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:48 compute-0 systemd[1]: Started libpod-conmon-31eaf012fe4bd0ad920b12a0f977d3c46eb05a53b5643731455a78727ff7b959.scope.
Oct 11 01:49:48 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:49:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ff9375f1a9e205b310b33954055a0dc7522a4bb2b6fbcb6dd679dbf1f649829d/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:49:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ff9375f1a9e205b310b33954055a0dc7522a4bb2b6fbcb6dd679dbf1f649829d/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:49:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ff9375f1a9e205b310b33954055a0dc7522a4bb2b6fbcb6dd679dbf1f649829d/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:49:48 compute-0 systemd[1]: Starting Create netns directory...
Oct 11 01:49:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ff9375f1a9e205b310b33954055a0dc7522a4bb2b6fbcb6dd679dbf1f649829d/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:49:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ff9375f1a9e205b310b33954055a0dc7522a4bb2b6fbcb6dd679dbf1f649829d/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:49:48 compute-0 systemd[1]: run-netns-placeholder.mount: Deactivated successfully.
Oct 11 01:49:48 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Oct 11 01:49:48 compute-0 systemd[1]: Finished Create netns directory.
Oct 11 01:49:48 compute-0 podman[244584]: 2025-10-11 01:49:48.566353761 +0000 UTC m=+0.615428430 container init 31eaf012fe4bd0ad920b12a0f977d3c46eb05a53b5643731455a78727ff7b959 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=frosty_mendel, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 01:49:48 compute-0 podman[244584]: 2025-10-11 01:49:48.594143061 +0000 UTC m=+0.643217730 container start 31eaf012fe4bd0ad920b12a0f977d3c46eb05a53b5643731455a78727ff7b959 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=frosty_mendel, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0)
Oct 11 01:49:48 compute-0 podman[244584]: 2025-10-11 01:49:48.601086285 +0000 UTC m=+0.650161004 container attach 31eaf012fe4bd0ad920b12a0f977d3c46eb05a53b5643731455a78727ff7b959 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=frosty_mendel, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True)
Oct 11 01:49:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v349: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:48 compute-0 sudo[244551]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:49 compute-0 podman[244784]: 2025-10-11 01:49:49.633193983 +0000 UTC m=+0.095931258 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.openshift.expose-services=, release=1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., version=9.4, architecture=x86_64, maintainer=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, managed_by=edpm_ansible, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.29.0, release-0.7.12=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, name=ubi9, vendor=Red Hat, Inc., com.redhat.component=ubi9-container, config_id=edpm, container_name=kepler, io.k8s.display-name=Red Hat Universal Base Image 9, distribution-scope=public, vcs-type=git, build-date=2024-09-18T21:23:30)
Oct 11 01:49:49 compute-0 podman[244774]: 2025-10-11 01:49:49.662065116 +0000 UTC m=+0.133780025 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 01:49:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:49:49 compute-0 podman[244781]: 2025-10-11 01:49:49.680829511 +0000 UTC m=+0.150819606 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, container_name=ovn_controller, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 01:49:49 compute-0 python3.9[244834]: ansible-ansible.builtin.service_facts Invoked
Oct 11 01:49:49 compute-0 network[244897]: You are using 'network' service provided by 'network-scripts', which are now deprecated.
Oct 11 01:49:49 compute-0 network[244898]: 'network-scripts' will be removed from distribution in near future.
Oct 11 01:49:49 compute-0 network[244899]: It is advised to switch to 'NetworkManager' instead for network management.
Oct 11 01:49:49 compute-0 frosty_mendel[244636]: --> passed data devices: 0 physical, 3 LVM
Oct 11 01:49:49 compute-0 frosty_mendel[244636]: --> relative data size: 1.0
Oct 11 01:49:49 compute-0 frosty_mendel[244636]: --> All data devices are unavailable
Oct 11 01:49:50 compute-0 podman[244584]: 2025-10-11 01:49:50.045029345 +0000 UTC m=+2.094104004 container died 31eaf012fe4bd0ad920b12a0f977d3c46eb05a53b5643731455a78727ff7b959 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=frosty_mendel, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef)
Oct 11 01:49:50 compute-0 ceph-mon[191930]: pgmap v349: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v350: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:50 compute-0 sshd-session[244923]: banner exchange: Connection from 195.178.110.15 port 46964: invalid format
Oct 11 01:49:50 compute-0 sshd-session[244924]: banner exchange: Connection from 195.178.110.15 port 46976: invalid format
Oct 11 01:49:50 compute-0 systemd[1]: libpod-31eaf012fe4bd0ad920b12a0f977d3c46eb05a53b5643731455a78727ff7b959.scope: Deactivated successfully.
Oct 11 01:49:50 compute-0 systemd[1]: libpod-31eaf012fe4bd0ad920b12a0f977d3c46eb05a53b5643731455a78727ff7b959.scope: Consumed 1.364s CPU time.
Oct 11 01:49:50 compute-0 systemd[1]: var-lib-containers-storage-overlay-ff9375f1a9e205b310b33954055a0dc7522a4bb2b6fbcb6dd679dbf1f649829d-merged.mount: Deactivated successfully.
Oct 11 01:49:50 compute-0 podman[244584]: 2025-10-11 01:49:50.937977352 +0000 UTC m=+2.987051981 container remove 31eaf012fe4bd0ad920b12a0f977d3c46eb05a53b5643731455a78727ff7b959 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=frosty_mendel, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default)
Oct 11 01:49:50 compute-0 systemd[1]: libpod-conmon-31eaf012fe4bd0ad920b12a0f977d3c46eb05a53b5643731455a78727ff7b959.scope: Deactivated successfully.
Oct 11 01:49:50 compute-0 sudo[244378]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:51 compute-0 sudo[244928]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:49:51 compute-0 sudo[244928]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:49:51 compute-0 sudo[244928]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:51 compute-0 sudo[244957]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:49:51 compute-0 sudo[244957]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:49:51 compute-0 sudo[244957]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:51 compute-0 sudo[244991]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:49:51 compute-0 podman[244984]: 2025-10-11 01:49:51.505022203 +0000 UTC m=+0.148189236 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, config_id=edpm, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.build-date=20251007)
Oct 11 01:49:51 compute-0 sudo[244991]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:49:51 compute-0 sudo[244991]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:51 compute-0 sudo[245033]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 01:49:51 compute-0 sudo[245033]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:49:52 compute-0 ceph-mon[191930]: pgmap v350: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:52 compute-0 podman[245110]: 2025-10-11 01:49:52.212006846 +0000 UTC m=+0.087801881 container create 1fe64c27c7c2aaae173ddee4a72ef0ca187dbf9301693306f9211b98f68290f2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=charming_mclaren, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0)
Oct 11 01:49:52 compute-0 podman[245110]: 2025-10-11 01:49:52.179693585 +0000 UTC m=+0.055488620 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:49:52 compute-0 systemd[1]: Started libpod-conmon-1fe64c27c7c2aaae173ddee4a72ef0ca187dbf9301693306f9211b98f68290f2.scope.
Oct 11 01:49:52 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:49:52 compute-0 podman[245110]: 2025-10-11 01:49:52.373443531 +0000 UTC m=+0.249238616 container init 1fe64c27c7c2aaae173ddee4a72ef0ca187dbf9301693306f9211b98f68290f2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=charming_mclaren, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:49:52 compute-0 podman[245110]: 2025-10-11 01:49:52.387127584 +0000 UTC m=+0.262922589 container start 1fe64c27c7c2aaae173ddee4a72ef0ca187dbf9301693306f9211b98f68290f2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=charming_mclaren, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507)
Oct 11 01:49:52 compute-0 podman[245110]: 2025-10-11 01:49:52.395592086 +0000 UTC m=+0.271387131 container attach 1fe64c27c7c2aaae173ddee4a72ef0ca187dbf9301693306f9211b98f68290f2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=charming_mclaren, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:49:52 compute-0 charming_mclaren[245131]: 167 167
Oct 11 01:49:52 compute-0 podman[245110]: 2025-10-11 01:49:52.401639794 +0000 UTC m=+0.277434829 container died 1fe64c27c7c2aaae173ddee4a72ef0ca187dbf9301693306f9211b98f68290f2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=charming_mclaren, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0)
Oct 11 01:49:52 compute-0 systemd[1]: libpod-1fe64c27c7c2aaae173ddee4a72ef0ca187dbf9301693306f9211b98f68290f2.scope: Deactivated successfully.
Oct 11 01:49:52 compute-0 systemd[1]: var-lib-containers-storage-overlay-dfb2785dbb753eb68b1aa4c18725a481ff4279fcf01c710ee969ba37609610d7-merged.mount: Deactivated successfully.
Oct 11 01:49:52 compute-0 podman[245110]: 2025-10-11 01:49:52.489005001 +0000 UTC m=+0.364800006 container remove 1fe64c27c7c2aaae173ddee4a72ef0ca187dbf9301693306f9211b98f68290f2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=charming_mclaren, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507)
Oct 11 01:49:52 compute-0 systemd[1]: libpod-conmon-1fe64c27c7c2aaae173ddee4a72ef0ca187dbf9301693306f9211b98f68290f2.scope: Deactivated successfully.
Oct 11 01:49:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v351: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:52 compute-0 podman[245166]: 2025-10-11 01:49:52.700839472 +0000 UTC m=+0.062629232 container create 6423d325020be14a8f3eb7eb8fd1895d21af7ef7f4a220ac01a2b52c75281425 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_kirch, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 01:49:52 compute-0 podman[245166]: 2025-10-11 01:49:52.670884726 +0000 UTC m=+0.032674566 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:49:52 compute-0 systemd[1]: Started libpod-conmon-6423d325020be14a8f3eb7eb8fd1895d21af7ef7f4a220ac01a2b52c75281425.scope.
Oct 11 01:49:52 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:49:52 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3c443b5660067f742f74c69dbbf639953d4f27668a7219ea8ca57e3a873e5e92/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:49:52 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3c443b5660067f742f74c69dbbf639953d4f27668a7219ea8ca57e3a873e5e92/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:49:52 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3c443b5660067f742f74c69dbbf639953d4f27668a7219ea8ca57e3a873e5e92/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:49:52 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3c443b5660067f742f74c69dbbf639953d4f27668a7219ea8ca57e3a873e5e92/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:49:52 compute-0 podman[245166]: 2025-10-11 01:49:52.839507541 +0000 UTC m=+0.201297311 container init 6423d325020be14a8f3eb7eb8fd1895d21af7ef7f4a220ac01a2b52c75281425 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_kirch, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 01:49:52 compute-0 podman[245166]: 2025-10-11 01:49:52.852975987 +0000 UTC m=+0.214765737 container start 6423d325020be14a8f3eb7eb8fd1895d21af7ef7f4a220ac01a2b52c75281425 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_kirch, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, ceph=True, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:49:52 compute-0 podman[245166]: 2025-10-11 01:49:52.858288797 +0000 UTC m=+0.220078607 container attach 6423d325020be14a8f3eb7eb8fd1895d21af7ef7f4a220ac01a2b52c75281425 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_kirch, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 01:49:53 compute-0 romantic_kirch[245188]: {
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:     "0": [
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:         {
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "devices": [
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "/dev/loop3"
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             ],
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "lv_name": "ceph_lv0",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "lv_size": "21470642176",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "name": "ceph_lv0",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "tags": {
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.cluster_name": "ceph",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.crush_device_class": "",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.encrypted": "0",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.osd_id": "0",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.type": "block",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.vdo": "0"
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             },
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "type": "block",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "vg_name": "ceph_vg0"
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:         }
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:     ],
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:     "1": [
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:         {
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "devices": [
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "/dev/loop4"
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             ],
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "lv_name": "ceph_lv1",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "lv_size": "21470642176",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "name": "ceph_lv1",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "tags": {
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.cluster_name": "ceph",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.crush_device_class": "",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.encrypted": "0",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.osd_id": "1",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.type": "block",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.vdo": "0"
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             },
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "type": "block",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "vg_name": "ceph_vg1"
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:         }
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:     ],
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:     "2": [
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:         {
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "devices": [
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "/dev/loop5"
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             ],
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "lv_name": "ceph_lv2",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "lv_size": "21470642176",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "name": "ceph_lv2",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "tags": {
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.cluster_name": "ceph",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.crush_device_class": "",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.encrypted": "0",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.osd_id": "2",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.type": "block",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:                 "ceph.vdo": "0"
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             },
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "type": "block",
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:             "vg_name": "ceph_vg2"
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:         }
Oct 11 01:49:53 compute-0 romantic_kirch[245188]:     ]
Oct 11 01:49:53 compute-0 romantic_kirch[245188]: }
Oct 11 01:49:53 compute-0 systemd[1]: libpod-6423d325020be14a8f3eb7eb8fd1895d21af7ef7f4a220ac01a2b52c75281425.scope: Deactivated successfully.
Oct 11 01:49:53 compute-0 podman[245166]: 2025-10-11 01:49:53.702879265 +0000 UTC m=+1.064669045 container died 6423d325020be14a8f3eb7eb8fd1895d21af7ef7f4a220ac01a2b52c75281425 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_kirch, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:49:53 compute-0 systemd[1]: var-lib-containers-storage-overlay-3c443b5660067f742f74c69dbbf639953d4f27668a7219ea8ca57e3a873e5e92-merged.mount: Deactivated successfully.
Oct 11 01:49:53 compute-0 podman[245166]: 2025-10-11 01:49:53.816698802 +0000 UTC m=+1.178488572 container remove 6423d325020be14a8f3eb7eb8fd1895d21af7ef7f4a220ac01a2b52c75281425 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_kirch, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 01:49:53 compute-0 systemd[1]: libpod-conmon-6423d325020be14a8f3eb7eb8fd1895d21af7ef7f4a220ac01a2b52c75281425.scope: Deactivated successfully.
Oct 11 01:49:53 compute-0 sudo[245033]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:53 compute-0 sudo[245244]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:49:53 compute-0 sudo[245244]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:49:53 compute-0 sudo[245244]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:54 compute-0 sudo[245273]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:49:54 compute-0 sudo[245273]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:49:54 compute-0 sudo[245273]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:54 compute-0 sudo[245302]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:49:54 compute-0 ceph-mon[191930]: pgmap v351: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:54 compute-0 sudo[245302]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:49:54 compute-0 sudo[245302]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:54 compute-0 sudo[245330]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 01:49:54 compute-0 sudo[245330]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:49:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v352: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:49:54 compute-0 podman[245409]: 2025-10-11 01:49:54.823617401 +0000 UTC m=+0.075648635 container create ed026f9207ea8723973487a6fbcd5161de21cdc40e6510ea300f9da6c5a0fa84 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_mcclintock, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:49:54 compute-0 systemd[1]: Started libpod-conmon-ed026f9207ea8723973487a6fbcd5161de21cdc40e6510ea300f9da6c5a0fa84.scope.
Oct 11 01:49:54 compute-0 podman[245409]: 2025-10-11 01:49:54.802138347 +0000 UTC m=+0.054169571 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:49:54 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:49:54 compute-0 podman[245409]: 2025-10-11 01:49:54.956088415 +0000 UTC m=+0.208119679 container init ed026f9207ea8723973487a6fbcd5161de21cdc40e6510ea300f9da6c5a0fa84 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_mcclintock, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef)
Oct 11 01:49:54 compute-0 podman[245409]: 2025-10-11 01:49:54.973711218 +0000 UTC m=+0.225742472 container start ed026f9207ea8723973487a6fbcd5161de21cdc40e6510ea300f9da6c5a0fa84 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_mcclintock, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:49:54 compute-0 podman[245409]: 2025-10-11 01:49:54.979782836 +0000 UTC m=+0.231814080 container attach ed026f9207ea8723973487a6fbcd5161de21cdc40e6510ea300f9da6c5a0fa84 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_mcclintock, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:49:54 compute-0 vigorous_mcclintock[245450]: 167 167
Oct 11 01:49:54 compute-0 systemd[1]: libpod-ed026f9207ea8723973487a6fbcd5161de21cdc40e6510ea300f9da6c5a0fa84.scope: Deactivated successfully.
Oct 11 01:49:54 compute-0 podman[245409]: 2025-10-11 01:49:54.987777465 +0000 UTC m=+0.239808729 container died ed026f9207ea8723973487a6fbcd5161de21cdc40e6510ea300f9da6c5a0fa84 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_mcclintock, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 01:49:55 compute-0 systemd[1]: var-lib-containers-storage-overlay-318e3c15cf97284b6faa7f0cc896fbf0f5ead5647f19770d70e6cfe609a7c998-merged.mount: Deactivated successfully.
Oct 11 01:49:55 compute-0 podman[245409]: 2025-10-11 01:49:55.069907121 +0000 UTC m=+0.321938385 container remove ed026f9207ea8723973487a6fbcd5161de21cdc40e6510ea300f9da6c5a0fa84 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_mcclintock, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:49:55 compute-0 systemd[1]: libpod-conmon-ed026f9207ea8723973487a6fbcd5161de21cdc40e6510ea300f9da6c5a0fa84.scope: Deactivated successfully.
Oct 11 01:49:55 compute-0 podman[245474]: 2025-10-11 01:49:55.346398204 +0000 UTC m=+0.084229756 container create 711a1148bdc1f0055267e7adb1ca5b5421159c10657fba15c1e0ff3ed39ffe25 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_ritchie, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:49:55 compute-0 podman[245474]: 2025-10-11 01:49:55.310873112 +0000 UTC m=+0.048704704 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:49:55 compute-0 systemd[1]: Started libpod-conmon-711a1148bdc1f0055267e7adb1ca5b5421159c10657fba15c1e0ff3ed39ffe25.scope.
Oct 11 01:49:55 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:49:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/152ea7839142dceab67bec751591cab5f9d477794df7dcec75969a0bd0877dd3/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:49:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/152ea7839142dceab67bec751591cab5f9d477794df7dcec75969a0bd0877dd3/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:49:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/152ea7839142dceab67bec751591cab5f9d477794df7dcec75969a0bd0877dd3/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:49:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/152ea7839142dceab67bec751591cab5f9d477794df7dcec75969a0bd0877dd3/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:49:55 compute-0 podman[245474]: 2025-10-11 01:49:55.513870537 +0000 UTC m=+0.251702139 container init 711a1148bdc1f0055267e7adb1ca5b5421159c10657fba15c1e0ff3ed39ffe25 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_ritchie, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:49:55 compute-0 podman[245474]: 2025-10-11 01:49:55.537061596 +0000 UTC m=+0.274893098 container start 711a1148bdc1f0055267e7adb1ca5b5421159c10657fba15c1e0ff3ed39ffe25 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_ritchie, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:49:55 compute-0 podman[245474]: 2025-10-11 01:49:55.544360676 +0000 UTC m=+0.282192268 container attach 711a1148bdc1f0055267e7adb1ca5b5421159c10657fba15c1e0ff3ed39ffe25 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_ritchie, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:49:56 compute-0 sudo[245620]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-htinnfeivilwwpuzbazsoqvqdknqvybt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147395.480375-195-272377372157729/AnsiballZ_stat.py'
Oct 11 01:49:56 compute-0 sudo[245620]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:56 compute-0 ceph-mon[191930]: pgmap v352: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:56 compute-0 python3.9[245622]: ansible-ansible.legacy.stat Invoked with path=/etc/ssh/sshd_config follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:49:56 compute-0 sudo[245620]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:49:56
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.control', 'cephfs.cephfs.data', 'default.rgw.meta', 'images', 'volumes', 'vms', 'backups', '.rgw.root', 'cephfs.cephfs.meta', 'default.rgw.log', '.mgr']
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v353: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:49:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]: {
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:         "osd_id": 1,
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:         "type": "bluestore"
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:     },
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:         "osd_id": 2,
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:         "type": "bluestore"
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:     },
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:         "osd_id": 0,
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:         "type": "bluestore"
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]:     }
Oct 11 01:49:56 compute-0 optimistic_ritchie[245508]: }
Oct 11 01:49:56 compute-0 sudo[245724]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fsmgqnxbmnxsggnynfvtlculadpadngt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147395.480375-195-272377372157729/AnsiballZ_file.py'
Oct 11 01:49:56 compute-0 sudo[245724]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:56 compute-0 systemd[1]: libpod-711a1148bdc1f0055267e7adb1ca5b5421159c10657fba15c1e0ff3ed39ffe25.scope: Deactivated successfully.
Oct 11 01:49:56 compute-0 podman[245474]: 2025-10-11 01:49:56.855681714 +0000 UTC m=+1.593513256 container died 711a1148bdc1f0055267e7adb1ca5b5421159c10657fba15c1e0ff3ed39ffe25 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_ritchie, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 01:49:56 compute-0 systemd[1]: libpod-711a1148bdc1f0055267e7adb1ca5b5421159c10657fba15c1e0ff3ed39ffe25.scope: Consumed 1.305s CPU time.
Oct 11 01:49:56 compute-0 systemd[1]: var-lib-containers-storage-overlay-152ea7839142dceab67bec751591cab5f9d477794df7dcec75969a0bd0877dd3-merged.mount: Deactivated successfully.
Oct 11 01:49:56 compute-0 podman[245474]: 2025-10-11 01:49:56.973393921 +0000 UTC m=+1.711225443 container remove 711a1148bdc1f0055267e7adb1ca5b5421159c10657fba15c1e0ff3ed39ffe25 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_ritchie, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 01:49:56 compute-0 systemd[1]: libpod-conmon-711a1148bdc1f0055267e7adb1ca5b5421159c10657fba15c1e0ff3ed39ffe25.scope: Deactivated successfully.
Oct 11 01:49:57 compute-0 sudo[245330]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:57 compute-0 python3.9[245728]: ansible-ansible.legacy.file Invoked with mode=0600 dest=/etc/ssh/sshd_config _original_basename=sshd_config_block.j2 recurse=False state=file path=/etc/ssh/sshd_config force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:49:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:49:57 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:49:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:49:57 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:49:57 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 22743fa1-a81e-4f29-98d5-0842057d9dd5 does not exist
Oct 11 01:49:57 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 3dba4474-91d6-4a1c-9d45-b54d6911d208 does not exist
Oct 11 01:49:57 compute-0 sudo[245724]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:57 compute-0 sudo[245741]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:49:57 compute-0 sudo[245741]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:49:57 compute-0 sudo[245741]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:57 compute-0 sudo[245788]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:49:57 compute-0 sudo[245788]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:49:57 compute-0 sudo[245788]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:58 compute-0 ceph-mon[191930]: pgmap v353: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:49:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:49:58 compute-0 sudo[245940]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yyitfelxmtwtpwhgfwnmnlvlphvabrcw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147397.5555253-208-74995431506670/AnsiballZ_file.py'
Oct 11 01:49:58 compute-0 sudo[245940]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:58 compute-0 python3.9[245942]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/lib/edpm-config/firewall state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:49:58 compute-0 sudo[245940]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:58 compute-0 sshd-session[187757]: Received disconnect from 38.102.83.70 port 42574:11: disconnected by user
Oct 11 01:49:58 compute-0 sshd-session[187757]: Disconnected from user zuul 38.102.83.70 port 42574
Oct 11 01:49:58 compute-0 sshd-session[187754]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:49:58 compute-0 systemd[1]: session-25.scope: Deactivated successfully.
Oct 11 01:49:58 compute-0 systemd[1]: session-25.scope: Consumed 2min 55.992s CPU time.
Oct 11 01:49:58 compute-0 systemd-logind[804]: Session 25 logged out. Waiting for processes to exit.
Oct 11 01:49:58 compute-0 systemd-logind[804]: Removed session 25.
Oct 11 01:49:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v354: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:49:59 compute-0 sudo[246092]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ytztgdexvyfjdobwifovezynzkpuauik ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147398.6505594-216-51992859124044/AnsiballZ_stat.py'
Oct 11 01:49:59 compute-0 sudo[246092]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:49:59 compute-0 python3.9[246094]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/sshd-networks.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:49:59 compute-0 sudo[246092]: pam_unix(sudo:session): session closed for user root
Oct 11 01:49:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:49:59 compute-0 podman[157119]: time="2025-10-11T01:49:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:49:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:49:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:49:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:49:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6829 "" "Go-http-client/1.1"
Oct 11 01:50:00 compute-0 sudo[246170]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uagxpwffoiotgkebbwlaqkmqcfgfkxxc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147398.6505594-216-51992859124044/AnsiballZ_file.py'
Oct 11 01:50:00 compute-0 sudo[246170]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:00 compute-0 ceph-mon[191930]: pgmap v354: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:00 compute-0 python3.9[246172]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/var/lib/edpm-config/firewall/sshd-networks.yaml _original_basename=firewall.yaml.j2 recurse=False state=file path=/var/lib/edpm-config/firewall/sshd-networks.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:50:00 compute-0 sudo[246170]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v355: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:01 compute-0 openstack_network_exporter[159265]: ERROR   01:50:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:50:01 compute-0 openstack_network_exporter[159265]: ERROR   01:50:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:50:01 compute-0 openstack_network_exporter[159265]: ERROR   01:50:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:50:01 compute-0 openstack_network_exporter[159265]: ERROR   01:50:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:50:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:50:01 compute-0 openstack_network_exporter[159265]: ERROR   01:50:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:50:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:50:01 compute-0 sudo[246322]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wudprofvzccoeimvhykdjgcfvegaewpe ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147400.7244623-231-145754052602426/AnsiballZ_timezone.py'
Oct 11 01:50:01 compute-0 sudo[246322]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:01 compute-0 python3.9[246324]: ansible-community.general.timezone Invoked with name=UTC hwclock=None
Oct 11 01:50:01 compute-0 systemd[1]: Starting Time & Date Service...
Oct 11 01:50:01 compute-0 systemd[1]: Started Time & Date Service.
Oct 11 01:50:02 compute-0 sudo[246322]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:02 compute-0 ceph-mon[191930]: pgmap v355: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v356: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:03 compute-0 sudo[246478]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ewqyvaqzxigmnhvnsxerzqwecmfmiuyn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147402.4405398-240-134807497108710/AnsiballZ_file.py'
Oct 11 01:50:03 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 01:50:03 compute-0 sudo[246478]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:03 compute-0 python3.9[246481]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/lib/edpm-config/firewall state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:50:03 compute-0 sudo[246478]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:04 compute-0 sudo[246631]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yluganjlfgapqwvtjbapqqbwyjkerqld ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147403.5101032-248-223969370144521/AnsiballZ_stat.py'
Oct 11 01:50:04 compute-0 sudo[246631]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:04 compute-0 ceph-mon[191930]: pgmap v356: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:04 compute-0 python3.9[246633]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:50:04 compute-0 sudo[246631]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v357: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:50:04 compute-0 sudo[246709]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cmwxojpryaoxrkiqccervsnjqekbhevv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147403.5101032-248-223969370144521/AnsiballZ_file.py'
Oct 11 01:50:04 compute-0 sudo[246709]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:04 compute-0 python3.9[246711]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml _original_basename=base-rules.yaml.j2 recurse=False state=file path=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:50:05 compute-0 sudo[246709]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:05 compute-0 sudo[246861]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ciqpjobvwicmvssvcibnpkdghihdewhe ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147405.2944968-260-260363388108293/AnsiballZ_stat.py'
Oct 11 01:50:05 compute-0 sudo[246861]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:06 compute-0 python3.9[246863]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:50:06 compute-0 sudo[246861]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:06 compute-0 ceph-mon[191930]: pgmap v357: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 01:50:06 compute-0 sudo[246939]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jjipmgolupmgkfzgwzocfwvlnakjlkyn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147405.2944968-260-260363388108293/AnsiballZ_file.py'
Oct 11 01:50:06 compute-0 sudo[246939]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v358: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:06 compute-0 python3.9[246941]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml _original_basename=.n0r0uxgp recurse=False state=file path=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:50:06 compute-0 sudo[246939]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:07 compute-0 sudo[247091]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nagbkfrfrvjbaurkrgfxmglxfgwtiqhy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147407.2123485-272-145291895989601/AnsiballZ_stat.py'
Oct 11 01:50:07 compute-0 sudo[247091]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:07 compute-0 python3.9[247093]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/iptables.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:50:08 compute-0 sudo[247091]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:08 compute-0 ceph-mon[191930]: pgmap v358: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:08 compute-0 podman[247096]: 2025-10-11 01:50:08.243790168 +0000 UTC m=+0.139954374 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 01:50:08 compute-0 podman[247097]: 2025-10-11 01:50:08.25100888 +0000 UTC m=+0.130492247 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.expose-services=, architecture=x86_64, managed_by=edpm_ansible, config_id=edpm, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, url=https://catalog.redhat.com/en/search?searchType=containers, container_name=openstack_network_exporter, name=ubi9-minimal, version=9.6, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.component=ubi9-minimal-container, io.openshift.tags=minimal rhel9, build-date=2025-08-20T13:12:41, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, release=1755695350, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., vcs-type=git, vendor=Red Hat, Inc., distribution-scope=public, io.buildah.version=1.33.7)
Oct 11 01:50:08 compute-0 sudo[247214]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yicvdjhckovgrzemsvbsmlczupeqdlpx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147407.2123485-272-145291895989601/AnsiballZ_file.py'
Oct 11 01:50:08 compute-0 sudo[247214]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v359: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:08 compute-0 python3.9[247216]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/iptables.nft _original_basename=iptables.nft recurse=False state=file path=/etc/nftables/iptables.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:50:08 compute-0 sudo[247214]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:50:09 compute-0 sudo[247366]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-okjjhtobxnqejhmtgggytfcplqpuswec ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147409.1189916-285-254240062201719/AnsiballZ_command.py'
Oct 11 01:50:09 compute-0 sudo[247366]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:10 compute-0 python3.9[247368]: ansible-ansible.legacy.command Invoked with _raw_params=nft -j list ruleset _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:50:10 compute-0 ceph-mon[191930]: pgmap v359: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:10 compute-0 sudo[247366]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v360: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:11 compute-0 sudo[247519]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-etgnbqaaqrtzmieezhhhfboyqwtduqzl ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760147410.522723-293-40303020738196/AnsiballZ_edpm_nftables_from_files.py'
Oct 11 01:50:11 compute-0 sudo[247519]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:11 compute-0 python3[247521]: ansible-edpm_nftables_from_files Invoked with src=/var/lib/edpm-config/firewall
Oct 11 01:50:11 compute-0 sudo[247519]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:12 compute-0 ceph-mon[191930]: pgmap v360: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:12 compute-0 sudo[247671]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eyhlcmqycsnjsyumbtrarsghxezdmrap ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147411.8825488-301-156345661473996/AnsiballZ_stat.py'
Oct 11 01:50:12 compute-0 sudo[247671]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v361: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:12 compute-0 python3.9[247673]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:50:12 compute-0 sudo[247671]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:13 compute-0 podman[247700]: 2025-10-11 01:50:13.252399272 +0000 UTC m=+0.133562358 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_managed=true)
Oct 11 01:50:13 compute-0 sudo[247768]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-scmrbzkvptcjtdfsdvxglaualmimshjm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147411.8825488-301-156345661473996/AnsiballZ_file.py'
Oct 11 01:50:13 compute-0 sudo[247768]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:13 compute-0 python3.9[247771]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-jumps.nft _original_basename=jump-chain.j2 recurse=False state=file path=/etc/nftables/edpm-jumps.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:50:13 compute-0 sudo[247768]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:14 compute-0 ceph-mon[191930]: pgmap v361: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:14 compute-0 sudo[247921]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-erutnqommghpzrytntjcmkcpjvnliior ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147413.9382708-313-69126820461198/AnsiballZ_stat.py'
Oct 11 01:50:14 compute-0 sudo[247921]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v362: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:50:14 compute-0 python3.9[247923]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-update-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:50:14 compute-0 sudo[247921]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:15 compute-0 sudo[247999]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vdzuvgrzyahwgweyzvdlhnigoxhffamv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147413.9382708-313-69126820461198/AnsiballZ_file.py'
Oct 11 01:50:15 compute-0 sudo[247999]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:15 compute-0 python3.9[248001]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-update-jumps.nft _original_basename=jump-chain.j2 recurse=False state=file path=/etc/nftables/edpm-update-jumps.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:50:15 compute-0 sudo[247999]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:16 compute-0 ceph-mon[191930]: pgmap v362: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:16 compute-0 sudo[248151]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ndrfuenyxzpzkueejhyegfuocilubsde ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147415.8997533-325-183023320578258/AnsiballZ_stat.py'
Oct 11 01:50:16 compute-0 sudo[248151]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v363: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:16 compute-0 python3.9[248153]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-flushes.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:50:16 compute-0 sudo[248151]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:17 compute-0 sudo[248229]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bvjrtbgvzmzuqeltolfxivufkioaempf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147415.8997533-325-183023320578258/AnsiballZ_file.py'
Oct 11 01:50:17 compute-0 sudo[248229]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:17 compute-0 python3.9[248231]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-flushes.nft _original_basename=flush-chain.j2 recurse=False state=file path=/etc/nftables/edpm-flushes.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:50:17 compute-0 sudo[248229]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:18 compute-0 ceph-mon[191930]: pgmap v363: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:18 compute-0 sudo[248381]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fwuzofsvkpqtvilylsngzeuixlwavjhl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147417.753202-337-229654578687313/AnsiballZ_stat.py'
Oct 11 01:50:18 compute-0 sudo[248381]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v364: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:18 compute-0 python3.9[248383]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-chains.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:50:18 compute-0 sudo[248381]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:19 compute-0 sudo[248459]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hamhohghfgoygfzbahdybotkwleyhwju ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147417.753202-337-229654578687313/AnsiballZ_file.py'
Oct 11 01:50:19 compute-0 sudo[248459]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:19 compute-0 python3.9[248461]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-chains.nft _original_basename=chains.j2 recurse=False state=file path=/etc/nftables/edpm-chains.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:50:19 compute-0 sudo[248459]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:50:20 compute-0 ceph-mon[191930]: pgmap v364: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:20 compute-0 podman[248574]: 2025-10-11 01:50:20.212502886 +0000 UTC m=+0.102643328 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 01:50:20 compute-0 podman[248584]: 2025-10-11 01:50:20.23788923 +0000 UTC m=+0.106038398 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., io.openshift.tags=base rhel9, config_id=edpm, com.redhat.component=ubi9-container, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, architecture=x86_64, io.openshift.expose-services=, build-date=2024-09-18T21:23:30, distribution-scope=public, name=ubi9, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=1214.1726694543, version=9.4, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 9, container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of Red Hat Universal Base Image 9., managed_by=edpm_ansible, release-0.7.12=, io.buildah.version=1.29.0)
Oct 11 01:50:20 compute-0 sudo[248675]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rostkmzglcikbfwonlcfwingmamjhmgh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147419.646718-349-59738007311624/AnsiballZ_stat.py'
Oct 11 01:50:20 compute-0 sudo[248675]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:20 compute-0 podman[248581]: 2025-10-11 01:50:20.281341576 +0000 UTC m=+0.155290845 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 01:50:20 compute-0 python3.9[248679]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-rules.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:50:20 compute-0 sudo[248675]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v365: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:21 compute-0 sudo[248755]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-soeduhgrviroakgmfervadlclfnbuxeg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147419.646718-349-59738007311624/AnsiballZ_file.py'
Oct 11 01:50:21 compute-0 sudo[248755]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:21 compute-0 python3.9[248757]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-rules.nft _original_basename=ruleset.j2 recurse=False state=file path=/etc/nftables/edpm-rules.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:50:21 compute-0 sudo[248755]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:22 compute-0 sudo[248923]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ktzwazxwfabfhfgcqlpbwqgqbkrnhkii ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147421.5942438-362-96661907670716/AnsiballZ_command.py'
Oct 11 01:50:22 compute-0 sudo[248923]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:22 compute-0 podman[248881]: 2025-10-11 01:50:22.167316903 +0000 UTC m=+0.153418536 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team)
Oct 11 01:50:22 compute-0 ceph-mon[191930]: pgmap v365: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:22 compute-0 python3.9[248928]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail; cat /etc/nftables/edpm-chains.nft /etc/nftables/edpm-flushes.nft /etc/nftables/edpm-rules.nft /etc/nftables/edpm-update-jumps.nft /etc/nftables/edpm-jumps.nft | nft -c -f - _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:50:22 compute-0 sudo[248923]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v366: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:23 compute-0 sudo[249081]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ikpwuumhaokftwljpixrjithipkyaacn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147422.6969528-370-268987862721433/AnsiballZ_blockinfile.py'
Oct 11 01:50:23 compute-0 sudo[249081]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:23 compute-0 python3.9[249083]: ansible-ansible.builtin.blockinfile Invoked with backup=False block=include "/etc/nftables/iptables.nft"
                                             include "/etc/nftables/edpm-chains.nft"
                                             include "/etc/nftables/edpm-rules.nft"
                                             include "/etc/nftables/edpm-jumps.nft"
                                              path=/etc/sysconfig/nftables.conf validate=nft -c -f %s state=present marker=# {mark} ANSIBLE MANAGED BLOCK create=False marker_begin=BEGIN marker_end=END append_newline=False prepend_newline=False unsafe_writes=False insertafter=None insertbefore=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:50:23 compute-0 sudo[249081]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:24 compute-0 ceph-mon[191930]: pgmap v366: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:24 compute-0 sudo[249233]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mszsfrwrtddklrsekmgwxqenouksteqk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147424.0369954-379-198197985174386/AnsiballZ_file.py'
Oct 11 01:50:24 compute-0 sudo[249233]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v367: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:50:24 compute-0 python3.9[249235]: ansible-ansible.builtin.file Invoked with group=hugetlbfs mode=0775 owner=zuul path=/dev/hugepages1G state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:50:24 compute-0 sudo[249233]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:25 compute-0 sudo[249385]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ziolndwjrcalpqpvduteswfrzqjxucck ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147425.2206078-379-222154024985147/AnsiballZ_file.py'
Oct 11 01:50:25 compute-0 sudo[249385]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:25 compute-0 python3.9[249387]: ansible-ansible.builtin.file Invoked with group=hugetlbfs mode=0775 owner=zuul path=/dev/hugepages2M state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:50:26 compute-0 sudo[249385]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:26 compute-0 ceph-mon[191930]: pgmap v367: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:50:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:50:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:50:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:50:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:50:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:50:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v368: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:27 compute-0 sudo[249537]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lydodmgxmccreoskdirmaifochwxejyi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147426.3543377-394-32087404411480/AnsiballZ_mount.py'
Oct 11 01:50:27 compute-0 sudo[249537]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:27 compute-0 python3.9[249539]: ansible-ansible.posix.mount Invoked with fstype=hugetlbfs opts=pagesize=1G path=/dev/hugepages1G src=none state=mounted boot=True dump=0 opts_no_log=False passno=0 backup=False fstab=None
Oct 11 01:50:27 compute-0 sudo[249537]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:28 compute-0 sudo[249689]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-axgvlikmwzbwdwrvpvozhuurrxcyggkn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147427.6071103-394-190903034320883/AnsiballZ_mount.py'
Oct 11 01:50:28 compute-0 sudo[249689]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:28 compute-0 ceph-mon[191930]: pgmap v368: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:28 compute-0 python3.9[249691]: ansible-ansible.posix.mount Invoked with fstype=hugetlbfs opts=pagesize=2M path=/dev/hugepages2M src=none state=mounted boot=True dump=0 opts_no_log=False passno=0 backup=False fstab=None
Oct 11 01:50:28 compute-0 sudo[249689]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v369: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:28 compute-0 sshd-session[241400]: Connection closed by 192.168.122.30 port 52652
Oct 11 01:50:28 compute-0 sshd-session[241397]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:50:28 compute-0 systemd[1]: session-47.scope: Deactivated successfully.
Oct 11 01:50:28 compute-0 systemd[1]: session-47.scope: Consumed 54.440s CPU time.
Oct 11 01:50:28 compute-0 systemd-logind[804]: Session 47 logged out. Waiting for processes to exit.
Oct 11 01:50:28 compute-0 systemd-logind[804]: Removed session 47.
Oct 11 01:50:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:50:29 compute-0 podman[157119]: time="2025-10-11T01:50:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:50:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:50:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:50:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:50:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6825 "" "Go-http-client/1.1"
Oct 11 01:50:30 compute-0 ceph-mon[191930]: pgmap v369: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v370: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:31 compute-0 openstack_network_exporter[159265]: ERROR   01:50:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:50:31 compute-0 openstack_network_exporter[159265]: ERROR   01:50:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:50:31 compute-0 openstack_network_exporter[159265]: ERROR   01:50:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:50:31 compute-0 openstack_network_exporter[159265]: ERROR   01:50:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:50:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:50:31 compute-0 openstack_network_exporter[159265]: ERROR   01:50:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:50:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:50:31 compute-0 systemd[1]: systemd-timedated.service: Deactivated successfully.
Oct 11 01:50:32 compute-0 ceph-mon[191930]: pgmap v370: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v371: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:33 compute-0 sshd-session[249718]: Accepted publickey for zuul from 192.168.122.30 port 33876 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:50:34 compute-0 systemd-logind[804]: New session 48 of user zuul.
Oct 11 01:50:34 compute-0 systemd[1]: Started Session 48 of User zuul.
Oct 11 01:50:34 compute-0 sshd-session[249718]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:50:34 compute-0 ceph-mon[191930]: pgmap v371: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v372: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:50:35 compute-0 sudo[249871]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zaaaymzkwcoetrqskeovuvkgbreyeyry ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147434.212207-16-27233292259236/AnsiballZ_tempfile.py'
Oct 11 01:50:35 compute-0 sudo[249871]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:35 compute-0 python3.9[249873]: ansible-ansible.builtin.tempfile Invoked with state=file prefix=ansible. suffix= path=None
Oct 11 01:50:35 compute-0 sudo[249871]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:36 compute-0 sudo[250023]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qwdgaclmbvksyqqpoukymdjpfcfjdsor ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147435.5927823-28-142543399584242/AnsiballZ_stat.py'
Oct 11 01:50:36 compute-0 sudo[250023]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:36 compute-0 ceph-mon[191930]: pgmap v372: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:36 compute-0 python3.9[250025]: ansible-ansible.builtin.stat Invoked with path=/etc/ssh/ssh_known_hosts follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:50:36 compute-0 sudo[250023]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v373: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:37 compute-0 sudo[250177]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zfddlynjvmtmeqjukrepbgrhhzawlshv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147436.9256017-36-115865169798996/AnsiballZ_slurp.py'
Oct 11 01:50:37 compute-0 sudo[250177]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:37 compute-0 python3.9[250179]: ansible-ansible.builtin.slurp Invoked with src=/etc/ssh/ssh_known_hosts
Oct 11 01:50:37 compute-0 sudo[250177]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:38 compute-0 ceph-mon[191930]: pgmap v373: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v374: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:38 compute-0 podman[250303]: 2025-10-11 01:50:38.742769675 +0000 UTC m=+0.109017274 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 01:50:38 compute-0 sudo[250354]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sdteclhmqoabcvmrhszajvhxillagxds ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147438.2098002-44-250711429169945/AnsiballZ_stat.py'
Oct 11 01:50:38 compute-0 sudo[250354]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:38 compute-0 podman[250304]: 2025-10-11 01:50:38.79037782 +0000 UTC m=+0.150474300 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.expose-services=, io.buildah.version=1.33.7, maintainer=Red Hat, Inc., release=1755695350, version=9.6, url=https://catalog.redhat.com/en/search?searchType=containers, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, config_id=edpm, container_name=openstack_network_exporter, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, distribution-scope=public, name=ubi9-minimal, vendor=Red Hat, Inc., com.redhat.component=ubi9-minimal-container, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-type=git, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.openshift.tags=minimal rhel9, build-date=2025-08-20T13:12:41)
Oct 11 01:50:38 compute-0 python3.9[250370]: ansible-ansible.legacy.stat Invoked with path=/tmp/ansible.y2edue0c follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:50:38 compute-0 sudo[250354]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:50:39 compute-0 sudo[250493]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ohjjmbrzliucluodmriyjqvhwvsqxqye ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147438.2098002-44-250711429169945/AnsiballZ_copy.py'
Oct 11 01:50:39 compute-0 sudo[250493]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:40 compute-0 python3.9[250495]: ansible-ansible.legacy.copy Invoked with dest=/tmp/ansible.y2edue0c mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1760147438.2098002-44-250711429169945/.source.y2edue0c _original_basename=.bwubzv8e follow=False checksum=a15ede4d79f7c06c94878b6b67886804d72f6da3 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:50:40 compute-0 sudo[250493]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:40 compute-0 ceph-mon[191930]: pgmap v374: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v375: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:41 compute-0 sudo[250645]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-icudqaduuopntgegmqdrnqcfpexcbmyl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147440.5253377-59-101677396940075/AnsiballZ_setup.py'
Oct 11 01:50:41 compute-0 sudo[250645]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:41 compute-0 ceph-mon[191930]: pgmap v375: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:41 compute-0 python3.9[250647]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'ssh_host_key_rsa_public', 'ssh_host_key_ed25519_public', 'ssh_host_key_ecdsa_public'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:50:41 compute-0 sudo[250645]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v376: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:42 compute-0 sudo[250797]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-evimasqzqbtkkpcelurkpnzltjacntca ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147442.144484-68-1643638297838/AnsiballZ_blockinfile.py'
Oct 11 01:50:42 compute-0 sudo[250797]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:43 compute-0 python3.9[250799]: ansible-ansible.builtin.blockinfile Invoked with block=compute-0.ctlplane.example.com,192.168.122.100,compute-0* ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDCIdnNJXEzty4CiVEsQZKTsrvDMbMIfWnM6SmxDoDiaBSKjE0gIdIyszcedOqlaH5lXEVeZ6fus0sCQfkBMbju4l7W6IePjHdglc1yQ217jhLN1W7FgWyQ1JAkOEeNwnCwt5yGoqPA4fqm3z+EQ2MyCrpiYZ3y3GF1AT0EOJY+BlcEteWbn8iSJi/MnErCUbyN2BLQKNR+S5HUIlOztpgfDXDYHBzZXoWzBrL5yUtkr3R2lcz7vpcVZZkkq6xH00zMoBtbqcZWJ0Aj21Luo3oo/wnyBcuD9+hRyj9/C6KnM6jtVxWUo09w3S8IyG9y5GBBJ+uzOZbQ9piGsxnTFwC9B7IRFzx2H+QcKWFK+i+HUSiK1KOvXBF+a/owkP83YlGBYvNoiUq/c+vl31EAdDUtbu/bBHk2N00CRhrKWme2O8A1dc7PNhaW/rUzoZL87Hp49wdreMb5uxWHlA0vUJweOxVVHVwkAKdnLpKyeV6JqLzVvWH2tm5a4G4vhUlLIqM=
                                             compute-0.ctlplane.example.com,192.168.122.100,compute-0* ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGo7eC2eXuR6HNsLolVokRdWYpxD4UNnKEqOwYb+hR8L
                                             compute-0.ctlplane.example.com,192.168.122.100,compute-0* ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKNcUmGzkxmH914R7kTLqjEeDLEAUeCYxOthKxvUCNxgbJBfHePifvSlCMmzlBeBDi+9zUqh+p6KJjBIHHSgO5I=
                                              create=True mode=0644 path=/tmp/ansible.y2edue0c state=present marker=# {mark} ANSIBLE MANAGED BLOCK backup=False marker_begin=BEGIN marker_end=END append_newline=False prepend_newline=False unsafe_writes=False insertafter=None insertbefore=None validate=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:50:43 compute-0 sudo[250797]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:43 compute-0 ceph-mon[191930]: pgmap v376: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:44 compute-0 sudo[250965]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xkfylnpgaiyoyoombnokzrvusdbbzjbc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147443.4196699-76-259620925568594/AnsiballZ_command.py'
Oct 11 01:50:44 compute-0 sudo[250965]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:44 compute-0 podman[250923]: 2025-10-11 01:50:44.232524671 +0000 UTC m=+0.160623030 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS)
Oct 11 01:50:44 compute-0 python3.9[250971]: ansible-ansible.legacy.command Invoked with _raw_params=cat '/tmp/ansible.y2edue0c' > /etc/ssh/ssh_known_hosts _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:50:44 compute-0 sudo[250965]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v377: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:50:45 compute-0 sudo[251123]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xehmsqrpgbrxpbdkvxemwtaolclsfygs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147444.7829359-84-185495387189288/AnsiballZ_file.py'
Oct 11 01:50:45 compute-0 sudo[251123]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:45 compute-0 python3.9[251125]: ansible-ansible.builtin.file Invoked with path=/tmp/ansible.y2edue0c state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:50:45 compute-0 ceph-mon[191930]: pgmap v377: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:45 compute-0 sudo[251123]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:46 compute-0 sshd-session[249721]: Connection closed by 192.168.122.30 port 33876
Oct 11 01:50:46 compute-0 sshd-session[249718]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:50:46 compute-0 systemd[1]: session-48.scope: Deactivated successfully.
Oct 11 01:50:46 compute-0 systemd[1]: session-48.scope: Consumed 9.530s CPU time.
Oct 11 01:50:46 compute-0 systemd-logind[804]: Session 48 logged out. Waiting for processes to exit.
Oct 11 01:50:46 compute-0 systemd-logind[804]: Removed session 48.
Oct 11 01:50:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v378: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:47 compute-0 ceph-mon[191930]: pgmap v378: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v379: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #21. Immutable memtables: 0.
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:50:49.698142) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 5] Flushing memtable with next log file: 21
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147449698308, "job": 5, "event": "flush_started", "num_memtables": 1, "num_entries": 1705, "num_deletes": 252, "total_data_size": 2380639, "memory_usage": 2422896, "flush_reason": "Manual Compaction"}
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 5] Level-0 flush table #22: started
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147449714781, "cf_name": "default", "job": 5, "event": "table_file_creation", "file_number": 22, "file_size": 1398137, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 7323, "largest_seqno": 9027, "table_properties": {"data_size": 1392498, "index_size": 2522, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 2053, "raw_key_size": 16745, "raw_average_key_size": 20, "raw_value_size": 1379034, "raw_average_value_size": 1725, "num_data_blocks": 118, "num_entries": 799, "num_filter_entries": 799, "num_deletions": 252, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760147294, "oldest_key_time": 1760147294, "file_creation_time": 1760147449, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 22, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 5] Flush lasted 16734 microseconds, and 8480 cpu microseconds.
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:50:49.714880) [db/flush_job.cc:967] [default] [JOB 5] Level-0 flush table #22: 1398137 bytes OK
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:50:49.714909) [db/memtable_list.cc:519] [default] Level-0 commit table #22 started
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:50:49.717876) [db/memtable_list.cc:722] [default] Level-0 commit table #22: memtable #1 done
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:50:49.717899) EVENT_LOG_v1 {"time_micros": 1760147449717892, "job": 5, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:50:49.717930) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 5] Try to delete WAL files size 2373012, prev total WAL file size 2373012, number of live WAL files 2.
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000018.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:50:49.719496) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '6D6772737461740030' seq:72057594037927935, type:22 .. '6D67727374617400323533' seq:0, type:0; will stop at (end)
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 6] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 5 Base level 0, inputs: [22(1365KB)], [20(6960KB)]
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147449719577, "job": 6, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [22], "files_L6": [20], "score": -1, "input_data_size": 8525263, "oldest_snapshot_seqno": -1}
Oct 11 01:50:49 compute-0 ceph-mon[191930]: pgmap v379: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 6] Generated table #23: 3387 keys, 6821636 bytes, temperature: kUnknown
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147449776424, "cf_name": "default", "job": 6, "event": "table_file_creation", "file_number": 23, "file_size": 6821636, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 6795667, "index_size": 16387, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 8517, "raw_key_size": 81000, "raw_average_key_size": 23, "raw_value_size": 6731115, "raw_average_value_size": 1987, "num_data_blocks": 728, "num_entries": 3387, "num_filter_entries": 3387, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760147449, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 23, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:50:49.776711) [db/compaction/compaction_job.cc:1663] [default] [JOB 6] Compacted 1@0 + 1@6 files to L6 => 6821636 bytes
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:50:49.781523) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 149.8 rd, 119.9 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(1.3, 6.8 +0.0 blob) out(6.5 +0.0 blob), read-write-amplify(11.0) write-amplify(4.9) OK, records in: 3831, records dropped: 444 output_compression: NoCompression
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:50:49.781588) EVENT_LOG_v1 {"time_micros": 1760147449781560, "job": 6, "event": "compaction_finished", "compaction_time_micros": 56914, "compaction_time_cpu_micros": 36005, "output_level": 6, "num_output_files": 1, "total_output_size": 6821636, "num_input_records": 3831, "num_output_records": 3387, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000022.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147449783328, "job": 6, "event": "table_file_deletion", "file_number": 22}
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000020.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147449786526, "job": 6, "event": "table_file_deletion", "file_number": 20}
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:50:49.719181) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:50:49.786783) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:50:49.786791) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:50:49.786794) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:50:49.786798) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:50:49 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:50:49.786801) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:50:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v380: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:51 compute-0 podman[251151]: 2025-10-11 01:50:51.237648626 +0000 UTC m=+0.119180944 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:50:51 compute-0 podman[251153]: 2025-10-11 01:50:51.279695985 +0000 UTC m=+0.150196919 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.tags=base rhel9, io.buildah.version=1.29.0, name=ubi9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., version=9.4, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, summary=Provides the latest release of Red Hat Universal Base Image 9., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-type=git, config_id=edpm, container_name=kepler, distribution-scope=public, release-0.7.12=, architecture=x86_64, maintainer=Red Hat, Inc., release=1214.1726694543, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, com.redhat.component=ubi9-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, managed_by=edpm_ansible, build-date=2024-09-18T21:23:30)
Oct 11 01:50:51 compute-0 podman[251152]: 2025-10-11 01:50:51.320385119 +0000 UTC m=+0.196937402 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, config_id=ovn_controller)
Oct 11 01:50:51 compute-0 ceph-mon[191930]: pgmap v380: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:51 compute-0 sshd-session[251220]: Accepted publickey for zuul from 192.168.122.30 port 35912 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:50:51 compute-0 systemd-logind[804]: New session 49 of user zuul.
Oct 11 01:50:52 compute-0 systemd[1]: Started Session 49 of User zuul.
Oct 11 01:50:52 compute-0 sshd-session[251220]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:50:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v381: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:53 compute-0 podman[251347]: 2025-10-11 01:50:53.285990968 +0000 UTC m=+0.174257852 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 10 Base Image, tcib_managed=true)
Oct 11 01:50:53 compute-0 python3.9[251387]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:50:53 compute-0 ceph-mon[191930]: pgmap v381: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v382: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:50:54 compute-0 sudo[251546]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-egvrwotqlksnalamogcjdqtitsqjndmb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147454.1265972-32-70691660741417/AnsiballZ_systemd.py'
Oct 11 01:50:55 compute-0 sudo[251546]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:55 compute-0 python3.9[251548]: ansible-ansible.builtin.systemd Invoked with enabled=True name=sshd daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None masked=None
Oct 11 01:50:55 compute-0 sudo[251546]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:55 compute-0 ceph-mon[191930]: pgmap v382: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:56 compute-0 sudo[251700]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-harmprreclrazvvjhbmmcuybclszycki ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147455.7057014-40-267242402654818/AnsiballZ_systemd.py'
Oct 11 01:50:56 compute-0 sudo[251700]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:50:56
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['vms', 'default.rgw.log', 'default.rgw.meta', 'cephfs.cephfs.meta', 'volumes', '.rgw.root', '.mgr', 'default.rgw.control', 'backups', 'images', 'cephfs.cephfs.data']
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v383: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:50:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:50:56 compute-0 python3.9[251702]: ansible-ansible.builtin.systemd Invoked with name=sshd state=started daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 01:50:56 compute-0 sudo[251700]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:57 compute-0 sudo[251780]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:50:57 compute-0 sudo[251780]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:50:57 compute-0 sudo[251780]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:57 compute-0 sudo[251805]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:50:57 compute-0 sudo[251805]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:50:57 compute-0 sudo[251805]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:57 compute-0 sudo[251853]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:50:57 compute-0 sudo[251853]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:50:57 compute-0 sudo[251853]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:57 compute-0 ceph-mon[191930]: pgmap v383: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:57 compute-0 sudo[251902]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 01:50:57 compute-0 sudo[251902]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:50:57 compute-0 sudo[251952]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vrqawkmqccmivrbavouvacnlseoeveoe ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147457.1470356-49-16341021228196/AnsiballZ_command.py'
Oct 11 01:50:57 compute-0 sudo[251952]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:58 compute-0 python3.9[251955]: ansible-ansible.legacy.command Invoked with _raw_params=nft -f /etc/nftables/edpm-chains.nft _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:50:58 compute-0 sudo[251952]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:58 compute-0 sudo[251902]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:50:58 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:50:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:50:58 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:50:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:50:58 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:50:58 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 670268a2-cc29-40f1-b39f-249d4d29fc9a does not exist
Oct 11 01:50:58 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev fec74e50-01be-43e8-a5e1-cb09758809d8 does not exist
Oct 11 01:50:58 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 528c4f5b-0962-42b5-95d1-17dc23519aa0 does not exist
Oct 11 01:50:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 01:50:58 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:50:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 01:50:58 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:50:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:50:58 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:50:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v384: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:58 compute-0 sudo[252062]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:50:58 compute-0 sudo[252062]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:50:58 compute-0 sudo[252062]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:50:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:50:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:50:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:50:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:50:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:50:58 compute-0 sudo[252087]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:50:58 compute-0 sudo[252087]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:50:58 compute-0 sudo[252087]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:59 compute-0 sudo[252135]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:50:59 compute-0 sudo[252135]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:50:59 compute-0 sudo[252135]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:59 compute-0 sudo[252184]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 01:50:59 compute-0 sudo[252184]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:50:59 compute-0 sudo[252235]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rjktjuacvdgerrillkpirtidowwdlptl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147458.511612-57-241531297983958/AnsiballZ_stat.py'
Oct 11 01:50:59 compute-0 sudo[252235]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:50:59 compute-0 python3.9[252237]: ansible-ansible.builtin.stat Invoked with path=/etc/nftables/edpm-rules.nft.changed follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:50:59 compute-0 sudo[252235]: pam_unix(sudo:session): session closed for user root
Oct 11 01:50:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:50:59 compute-0 podman[157119]: time="2025-10-11T01:50:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:50:59 compute-0 podman[252301]: 2025-10-11 01:50:59.757669596 +0000 UTC m=+0.089718364 container create 0c0f4c1abc401b4b812086a20c61e0e48e77bb80b3fc7de81e24c772005340ad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=focused_goldwasser, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 01:50:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:50:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:50:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:50:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6822 "" "Go-http-client/1.1"
Oct 11 01:50:59 compute-0 podman[252301]: 2025-10-11 01:50:59.713210951 +0000 UTC m=+0.045259769 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:50:59 compute-0 systemd[1]: Started libpod-conmon-0c0f4c1abc401b4b812086a20c61e0e48e77bb80b3fc7de81e24c772005340ad.scope.
Oct 11 01:50:59 compute-0 ceph-mon[191930]: pgmap v384: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:50:59 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:50:59 compute-0 podman[252301]: 2025-10-11 01:50:59.925799259 +0000 UTC m=+0.257848087 container init 0c0f4c1abc401b4b812086a20c61e0e48e77bb80b3fc7de81e24c772005340ad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=focused_goldwasser, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:50:59 compute-0 podman[252301]: 2025-10-11 01:50:59.940699044 +0000 UTC m=+0.272747812 container start 0c0f4c1abc401b4b812086a20c61e0e48e77bb80b3fc7de81e24c772005340ad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=focused_goldwasser, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, OSD_FLAVOR=default)
Oct 11 01:50:59 compute-0 podman[252301]: 2025-10-11 01:50:59.947440365 +0000 UTC m=+0.279489133 container attach 0c0f4c1abc401b4b812086a20c61e0e48e77bb80b3fc7de81e24c772005340ad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=focused_goldwasser, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, ceph=True, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default)
Oct 11 01:50:59 compute-0 focused_goldwasser[252338]: 167 167
Oct 11 01:50:59 compute-0 systemd[1]: libpod-0c0f4c1abc401b4b812086a20c61e0e48e77bb80b3fc7de81e24c772005340ad.scope: Deactivated successfully.
Oct 11 01:50:59 compute-0 podman[252301]: 2025-10-11 01:50:59.956650733 +0000 UTC m=+0.288699501 container died 0c0f4c1abc401b4b812086a20c61e0e48e77bb80b3fc7de81e24c772005340ad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=focused_goldwasser, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS)
Oct 11 01:51:00 compute-0 systemd[1]: var-lib-containers-storage-overlay-b64b5f407626c2a6fc5c26321558462e495656009b9de3895cf5bec7bea34a67-merged.mount: Deactivated successfully.
Oct 11 01:51:00 compute-0 podman[252301]: 2025-10-11 01:51:00.037557449 +0000 UTC m=+0.369606187 container remove 0c0f4c1abc401b4b812086a20c61e0e48e77bb80b3fc7de81e24c772005340ad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=focused_goldwasser, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:51:00 compute-0 systemd[1]: libpod-conmon-0c0f4c1abc401b4b812086a20c61e0e48e77bb80b3fc7de81e24c772005340ad.scope: Deactivated successfully.
Oct 11 01:51:00 compute-0 podman[252392]: 2025-10-11 01:51:00.32985756 +0000 UTC m=+0.105409102 container create dd8130b7a17bd6aa09b25eac008c3d4f73046954d36597540be78c8ba3dc3d50 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gifted_hawking, CEPH_REF=reef, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:51:00 compute-0 podman[252392]: 2025-10-11 01:51:00.287443451 +0000 UTC m=+0.062995063 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:51:00 compute-0 systemd[1]: Started libpod-conmon-dd8130b7a17bd6aa09b25eac008c3d4f73046954d36597540be78c8ba3dc3d50.scope.
Oct 11 01:51:00 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:51:00 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/345e22c3f3e86773bfd2c71575d74cf81685b43185ac63f3b3c3923e45534462/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:51:00 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/345e22c3f3e86773bfd2c71575d74cf81685b43185ac63f3b3c3923e45534462/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:51:00 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/345e22c3f3e86773bfd2c71575d74cf81685b43185ac63f3b3c3923e45534462/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:51:00 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/345e22c3f3e86773bfd2c71575d74cf81685b43185ac63f3b3c3923e45534462/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:51:00 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/345e22c3f3e86773bfd2c71575d74cf81685b43185ac63f3b3c3923e45534462/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:51:00 compute-0 podman[252392]: 2025-10-11 01:51:00.509185366 +0000 UTC m=+0.284736948 container init dd8130b7a17bd6aa09b25eac008c3d4f73046954d36597540be78c8ba3dc3d50 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gifted_hawking, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:51:00 compute-0 podman[252392]: 2025-10-11 01:51:00.542126037 +0000 UTC m=+0.317677559 container start dd8130b7a17bd6aa09b25eac008c3d4f73046954d36597540be78c8ba3dc3d50 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gifted_hawking, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3)
Oct 11 01:51:00 compute-0 podman[252392]: 2025-10-11 01:51:00.551975427 +0000 UTC m=+0.327526979 container attach dd8130b7a17bd6aa09b25eac008c3d4f73046954d36597540be78c8ba3dc3d50 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gifted_hawking, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default)
Oct 11 01:51:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v385: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:00 compute-0 sudo[252486]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iczkhhikmkdhyicgkqihkqvqphezoiou ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147459.849313-66-276594217518522/AnsiballZ_file.py'
Oct 11 01:51:00 compute-0 sudo[252486]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:00 compute-0 python3.9[252488]: ansible-ansible.builtin.file Invoked with path=/etc/nftables/edpm-rules.nft.changed state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:51:01 compute-0 sudo[252486]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:01 compute-0 sshd-session[251223]: Connection closed by 192.168.122.30 port 35912
Oct 11 01:51:01 compute-0 openstack_network_exporter[159265]: ERROR   01:51:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:51:01 compute-0 openstack_network_exporter[159265]: ERROR   01:51:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:51:01 compute-0 sshd-session[251220]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:51:01 compute-0 openstack_network_exporter[159265]: ERROR   01:51:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:51:01 compute-0 openstack_network_exporter[159265]: ERROR   01:51:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:51:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:51:01 compute-0 openstack_network_exporter[159265]: ERROR   01:51:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:51:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:51:01 compute-0 systemd[1]: session-49.scope: Deactivated successfully.
Oct 11 01:51:01 compute-0 systemd[1]: session-49.scope: Consumed 7.188s CPU time.
Oct 11 01:51:01 compute-0 systemd-logind[804]: Session 49 logged out. Waiting for processes to exit.
Oct 11 01:51:01 compute-0 systemd-logind[804]: Removed session 49.
Oct 11 01:51:01 compute-0 ceph-mon[191930]: pgmap v385: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:01 compute-0 gifted_hawking[252431]: --> passed data devices: 0 physical, 3 LVM
Oct 11 01:51:01 compute-0 gifted_hawking[252431]: --> relative data size: 1.0
Oct 11 01:51:01 compute-0 gifted_hawking[252431]: --> All data devices are unavailable
Oct 11 01:51:01 compute-0 systemd[1]: libpod-dd8130b7a17bd6aa09b25eac008c3d4f73046954d36597540be78c8ba3dc3d50.scope: Deactivated successfully.
Oct 11 01:51:01 compute-0 systemd[1]: libpod-dd8130b7a17bd6aa09b25eac008c3d4f73046954d36597540be78c8ba3dc3d50.scope: Consumed 1.357s CPU time.
Oct 11 01:51:01 compute-0 conmon[252431]: conmon dd8130b7a17bd6aa09b2 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-dd8130b7a17bd6aa09b25eac008c3d4f73046954d36597540be78c8ba3dc3d50.scope/container/memory.events
Oct 11 01:51:01 compute-0 podman[252392]: 2025-10-11 01:51:01.955478832 +0000 UTC m=+1.731030514 container died dd8130b7a17bd6aa09b25eac008c3d4f73046954d36597540be78c8ba3dc3d50 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gifted_hawking, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3)
Oct 11 01:51:02 compute-0 systemd[1]: var-lib-containers-storage-overlay-345e22c3f3e86773bfd2c71575d74cf81685b43185ac63f3b3c3923e45534462-merged.mount: Deactivated successfully.
Oct 11 01:51:02 compute-0 podman[252392]: 2025-10-11 01:51:02.060320482 +0000 UTC m=+1.835872024 container remove dd8130b7a17bd6aa09b25eac008c3d4f73046954d36597540be78c8ba3dc3d50 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gifted_hawking, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507)
Oct 11 01:51:02 compute-0 systemd[1]: libpod-conmon-dd8130b7a17bd6aa09b25eac008c3d4f73046954d36597540be78c8ba3dc3d50.scope: Deactivated successfully.
Oct 11 01:51:02 compute-0 sudo[252184]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:02 compute-0 sudo[252551]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:51:02 compute-0 sudo[252551]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:51:02 compute-0 sudo[252551]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:02 compute-0 sudo[252576]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:51:02 compute-0 sudo[252576]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:51:02 compute-0 sudo[252576]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:02 compute-0 sudo[252601]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:51:02 compute-0 sudo[252601]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:51:02 compute-0 sudo[252601]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v386: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:02 compute-0 sudo[252626]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 01:51:02 compute-0 sudo[252626]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:51:03 compute-0 podman[252691]: 2025-10-11 01:51:03.281025712 +0000 UTC m=+0.094039508 container create a576e5d87e0b76098806e29ed1dc56e56e96d5bd0cecded4d6bc8305d60d4073 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=serene_bouman, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:51:03 compute-0 podman[252691]: 2025-10-11 01:51:03.243779108 +0000 UTC m=+0.056792954 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:51:03 compute-0 systemd[1]: Started libpod-conmon-a576e5d87e0b76098806e29ed1dc56e56e96d5bd0cecded4d6bc8305d60d4073.scope.
Oct 11 01:51:03 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:51:03 compute-0 podman[252691]: 2025-10-11 01:51:03.436443525 +0000 UTC m=+0.249457381 container init a576e5d87e0b76098806e29ed1dc56e56e96d5bd0cecded4d6bc8305d60d4073 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=serene_bouman, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, ceph=True)
Oct 11 01:51:03 compute-0 podman[252691]: 2025-10-11 01:51:03.455299383 +0000 UTC m=+0.268313169 container start a576e5d87e0b76098806e29ed1dc56e56e96d5bd0cecded4d6bc8305d60d4073 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=serene_bouman, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS)
Oct 11 01:51:03 compute-0 podman[252691]: 2025-10-11 01:51:03.463037396 +0000 UTC m=+0.276051232 container attach a576e5d87e0b76098806e29ed1dc56e56e96d5bd0cecded4d6bc8305d60d4073 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=serene_bouman, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:51:03 compute-0 serene_bouman[252707]: 167 167
Oct 11 01:51:03 compute-0 systemd[1]: libpod-a576e5d87e0b76098806e29ed1dc56e56e96d5bd0cecded4d6bc8305d60d4073.scope: Deactivated successfully.
Oct 11 01:51:03 compute-0 podman[252691]: 2025-10-11 01:51:03.466594647 +0000 UTC m=+0.279608433 container died a576e5d87e0b76098806e29ed1dc56e56e96d5bd0cecded4d6bc8305d60d4073 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=serene_bouman, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:51:03 compute-0 systemd[1]: var-lib-containers-storage-overlay-5a61425c598eea22fd7a13e7b85ae47b324e1138a70d26763ec20249230fa4c7-merged.mount: Deactivated successfully.
Oct 11 01:51:03 compute-0 podman[252691]: 2025-10-11 01:51:03.548786087 +0000 UTC m=+0.361799893 container remove a576e5d87e0b76098806e29ed1dc56e56e96d5bd0cecded4d6bc8305d60d4073 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=serene_bouman, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:51:03 compute-0 systemd[1]: libpod-conmon-a576e5d87e0b76098806e29ed1dc56e56e96d5bd0cecded4d6bc8305d60d4073.scope: Deactivated successfully.
Oct 11 01:51:03 compute-0 podman[252730]: 2025-10-11 01:51:03.867453778 +0000 UTC m=+0.095125710 container create 5ee5c377dd4fbf123229315f8aff4234f5c018e5b0e0b2d227fff0220f2cccd3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_swartz, org.label-schema.vendor=CentOS, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:51:03 compute-0 ceph-mon[191930]: pgmap v386: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:03 compute-0 podman[252730]: 2025-10-11 01:51:03.83179589 +0000 UTC m=+0.059467862 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:51:03 compute-0 systemd[1]: Started libpod-conmon-5ee5c377dd4fbf123229315f8aff4234f5c018e5b0e0b2d227fff0220f2cccd3.scope.
Oct 11 01:51:04 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:51:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3d16166c458af7966f646964a4c5fe78a07a355d89e0f9d1cd058ef0c94db865/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:51:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3d16166c458af7966f646964a4c5fe78a07a355d89e0f9d1cd058ef0c94db865/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:51:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3d16166c458af7966f646964a4c5fe78a07a355d89e0f9d1cd058ef0c94db865/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:51:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3d16166c458af7966f646964a4c5fe78a07a355d89e0f9d1cd058ef0c94db865/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:51:04 compute-0 podman[252730]: 2025-10-11 01:51:04.035143349 +0000 UTC m=+0.262815321 container init 5ee5c377dd4fbf123229315f8aff4234f5c018e5b0e0b2d227fff0220f2cccd3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_swartz, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507, io.buildah.version=1.39.3)
Oct 11 01:51:04 compute-0 podman[252730]: 2025-10-11 01:51:04.055406111 +0000 UTC m=+0.283078033 container start 5ee5c377dd4fbf123229315f8aff4234f5c018e5b0e0b2d227fff0220f2cccd3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_swartz, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:51:04 compute-0 podman[252730]: 2025-10-11 01:51:04.062533713 +0000 UTC m=+0.290205675 container attach 5ee5c377dd4fbf123229315f8aff4234f5c018e5b0e0b2d227fff0220f2cccd3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_swartz, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0)
Oct 11 01:51:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v387: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:51:04 compute-0 gracious_swartz[252746]: {
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:     "0": [
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:         {
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "devices": [
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "/dev/loop3"
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             ],
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "lv_name": "ceph_lv0",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "lv_size": "21470642176",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "name": "ceph_lv0",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "tags": {
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.cluster_name": "ceph",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.crush_device_class": "",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.encrypted": "0",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.osd_id": "0",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.type": "block",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.vdo": "0"
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             },
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "type": "block",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "vg_name": "ceph_vg0"
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:         }
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:     ],
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:     "1": [
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:         {
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "devices": [
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "/dev/loop4"
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             ],
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "lv_name": "ceph_lv1",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "lv_size": "21470642176",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "name": "ceph_lv1",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "tags": {
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.cluster_name": "ceph",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.crush_device_class": "",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.encrypted": "0",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.osd_id": "1",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.type": "block",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.vdo": "0"
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             },
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "type": "block",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "vg_name": "ceph_vg1"
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:         }
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:     ],
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:     "2": [
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:         {
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "devices": [
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "/dev/loop5"
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             ],
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "lv_name": "ceph_lv2",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "lv_size": "21470642176",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "name": "ceph_lv2",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "tags": {
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.cluster_name": "ceph",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.crush_device_class": "",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.encrypted": "0",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.osd_id": "2",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.type": "block",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:                 "ceph.vdo": "0"
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             },
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "type": "block",
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:             "vg_name": "ceph_vg2"
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:         }
Oct 11 01:51:04 compute-0 gracious_swartz[252746]:     ]
Oct 11 01:51:04 compute-0 gracious_swartz[252746]: }
Oct 11 01:51:04 compute-0 systemd[1]: libpod-5ee5c377dd4fbf123229315f8aff4234f5c018e5b0e0b2d227fff0220f2cccd3.scope: Deactivated successfully.
Oct 11 01:51:04 compute-0 podman[252755]: 2025-10-11 01:51:04.995713305 +0000 UTC m=+0.080380615 container died 5ee5c377dd4fbf123229315f8aff4234f5c018e5b0e0b2d227fff0220f2cccd3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_swartz, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 01:51:05 compute-0 systemd[1]: var-lib-containers-storage-overlay-3d16166c458af7966f646964a4c5fe78a07a355d89e0f9d1cd058ef0c94db865-merged.mount: Deactivated successfully.
Oct 11 01:51:05 compute-0 podman[252755]: 2025-10-11 01:51:05.102465771 +0000 UTC m=+0.187133061 container remove 5ee5c377dd4fbf123229315f8aff4234f5c018e5b0e0b2d227fff0220f2cccd3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_swartz, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507)
Oct 11 01:51:05 compute-0 systemd[1]: libpod-conmon-5ee5c377dd4fbf123229315f8aff4234f5c018e5b0e0b2d227fff0220f2cccd3.scope: Deactivated successfully.
Oct 11 01:51:05 compute-0 sudo[252626]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:05 compute-0 sudo[252769]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:51:05 compute-0 sudo[252769]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:51:05 compute-0 sudo[252769]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:05 compute-0 sudo[252794]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:51:05 compute-0 sudo[252794]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:51:05 compute-0 sudo[252794]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:05 compute-0 sudo[252819]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:51:05 compute-0 sudo[252819]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:51:05 compute-0 sudo[252819]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:05 compute-0 sudo[252844]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 01:51:05 compute-0 sudo[252844]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:51:05 compute-0 ceph-mon[191930]: pgmap v387: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 01:51:06 compute-0 podman[252910]: 2025-10-11 01:51:06.425574173 +0000 UTC m=+0.087758839 container create eb587b2f8821a3aef0f1d990547b991cf3c0619cdfeb32c06b4bcbdb3077e6c9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_cannon, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2)
Oct 11 01:51:06 compute-0 podman[252910]: 2025-10-11 01:51:06.390835077 +0000 UTC m=+0.053019833 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:51:06 compute-0 systemd[1]: Started libpod-conmon-eb587b2f8821a3aef0f1d990547b991cf3c0619cdfeb32c06b4bcbdb3077e6c9.scope.
Oct 11 01:51:06 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:51:06 compute-0 podman[252910]: 2025-10-11 01:51:06.572119179 +0000 UTC m=+0.234303925 container init eb587b2f8821a3aef0f1d990547b991cf3c0619cdfeb32c06b4bcbdb3077e6c9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_cannon, io.buildah.version=1.39.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:51:06 compute-0 podman[252910]: 2025-10-11 01:51:06.590326545 +0000 UTC m=+0.252511231 container start eb587b2f8821a3aef0f1d990547b991cf3c0619cdfeb32c06b4bcbdb3077e6c9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_cannon, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 01:51:06 compute-0 podman[252910]: 2025-10-11 01:51:06.596512633 +0000 UTC m=+0.258697289 container attach eb587b2f8821a3aef0f1d990547b991cf3c0619cdfeb32c06b4bcbdb3077e6c9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_cannon, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:51:06 compute-0 nervous_cannon[252924]: 167 167
Oct 11 01:51:06 compute-0 systemd[1]: libpod-eb587b2f8821a3aef0f1d990547b991cf3c0619cdfeb32c06b4bcbdb3077e6c9.scope: Deactivated successfully.
Oct 11 01:51:06 compute-0 podman[252910]: 2025-10-11 01:51:06.602482142 +0000 UTC m=+0.264666798 container died eb587b2f8821a3aef0f1d990547b991cf3c0619cdfeb32c06b4bcbdb3077e6c9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_cannon, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:51:06 compute-0 systemd[1]: var-lib-containers-storage-overlay-acdc43acee3ecb81e7ed55cc599303f2176d6c4d5df4c5b170afbed7fd7381da-merged.mount: Deactivated successfully.
Oct 11 01:51:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v388: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:06 compute-0 podman[252910]: 2025-10-11 01:51:06.670090548 +0000 UTC m=+0.332275244 container remove eb587b2f8821a3aef0f1d990547b991cf3c0619cdfeb32c06b4bcbdb3077e6c9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_cannon, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 01:51:06 compute-0 systemd[1]: libpod-conmon-eb587b2f8821a3aef0f1d990547b991cf3c0619cdfeb32c06b4bcbdb3077e6c9.scope: Deactivated successfully.
Oct 11 01:51:06 compute-0 podman[252949]: 2025-10-11 01:51:06.927395952 +0000 UTC m=+0.080298976 container create f95ee1d0de3b612bc43f6ae68084da28f22ac216004b3d2c5f66275aecc15016 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_lovelace, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef)
Oct 11 01:51:06 compute-0 podman[252949]: 2025-10-11 01:51:06.8940874 +0000 UTC m=+0.046990474 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:51:07 compute-0 systemd[1]: Started libpod-conmon-f95ee1d0de3b612bc43f6ae68084da28f22ac216004b3d2c5f66275aecc15016.scope.
Oct 11 01:51:07 compute-0 sshd-session[252961]: Accepted publickey for zuul from 192.168.122.30 port 54182 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:51:07 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:51:07 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5a36162546b2e180d01ffc48375293fd7f0b0169f4fe5f8ee380fd9fdac3fbb5/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:51:07 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5a36162546b2e180d01ffc48375293fd7f0b0169f4fe5f8ee380fd9fdac3fbb5/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:51:07 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5a36162546b2e180d01ffc48375293fd7f0b0169f4fe5f8ee380fd9fdac3fbb5/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:51:07 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5a36162546b2e180d01ffc48375293fd7f0b0169f4fe5f8ee380fd9fdac3fbb5/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:51:07 compute-0 systemd-logind[804]: New session 50 of user zuul.
Oct 11 01:51:07 compute-0 systemd[1]: Started Session 50 of User zuul.
Oct 11 01:51:07 compute-0 podman[252949]: 2025-10-11 01:51:07.121733964 +0000 UTC m=+0.274636988 container init f95ee1d0de3b612bc43f6ae68084da28f22ac216004b3d2c5f66275aecc15016 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_lovelace, io.buildah.version=1.39.3, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:51:07 compute-0 sshd-session[252961]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:51:07 compute-0 podman[252949]: 2025-10-11 01:51:07.144061412 +0000 UTC m=+0.296964426 container start f95ee1d0de3b612bc43f6ae68084da28f22ac216004b3d2c5f66275aecc15016 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_lovelace, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:51:07 compute-0 podman[252949]: 2025-10-11 01:51:07.150385191 +0000 UTC m=+0.303288215 container attach f95ee1d0de3b612bc43f6ae68084da28f22ac216004b3d2c5f66275aecc15016 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_lovelace, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS)
Oct 11 01:51:07 compute-0 ceph-mon[191930]: pgmap v388: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.937 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.939 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.939 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.940 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f8ed27f97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb8c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb1a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb200>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed2874260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed3ab42f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb350>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb90>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fa390>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.942 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb3b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.947 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbbf0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.947 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbc80>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.947 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.947 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27f9610>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb620>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.946 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f8ed27fbad0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f8ed27faff0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.949 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbe30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.951 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbec0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.952 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbf50>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed2329b50>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f8ed27fb110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.953 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.953 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f8ed27fb170>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.953 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.953 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f8ed27fb1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.953 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.954 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f8ed27fb230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.955 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f8ed2874230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.955 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.955 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f8ed27fb290>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.955 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.956 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f8ed5778d70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.956 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.956 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f8ed27fb650>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.956 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.957 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f8ed27fbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.957 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.957 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f8ed27fb320>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.957 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.957 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f8ed27fbb60>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.957 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.958 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f8ed27fa3f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.958 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f8ed27fb380>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.958 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f8ed27fbbc0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.959 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f8ed27fbc50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.959 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.959 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f8ed27fbce0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.959 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f8ed27fbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.960 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f8ed27fb590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.960 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f8ed27f95e0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.961 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f8ed27fb5f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.961 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f8ed27fbe00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.961 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f8ed27fbe90>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.962 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.962 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f8ed27fbf20>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.962 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.963 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.963 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.963 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.963 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.963 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:51:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]: {
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:         "osd_id": 1,
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:         "type": "bluestore"
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:     },
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:         "osd_id": 2,
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:         "type": "bluestore"
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:     },
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:         "osd_id": 0,
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:         "type": "bluestore"
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]:     }
Oct 11 01:51:08 compute-0 affectionate_lovelace[252968]: }
Oct 11 01:51:08 compute-0 systemd[1]: libpod-f95ee1d0de3b612bc43f6ae68084da28f22ac216004b3d2c5f66275aecc15016.scope: Deactivated successfully.
Oct 11 01:51:08 compute-0 systemd[1]: libpod-f95ee1d0de3b612bc43f6ae68084da28f22ac216004b3d2c5f66275aecc15016.scope: Consumed 1.197s CPU time.
Oct 11 01:51:08 compute-0 podman[252949]: 2025-10-11 01:51:08.339921145 +0000 UTC m=+1.492824139 container died f95ee1d0de3b612bc43f6ae68084da28f22ac216004b3d2c5f66275aecc15016 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_lovelace, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:51:08 compute-0 systemd[1]: var-lib-containers-storage-overlay-5a36162546b2e180d01ffc48375293fd7f0b0169f4fe5f8ee380fd9fdac3fbb5-merged.mount: Deactivated successfully.
Oct 11 01:51:08 compute-0 podman[252949]: 2025-10-11 01:51:08.450881603 +0000 UTC m=+1.603784597 container remove f95ee1d0de3b612bc43f6ae68084da28f22ac216004b3d2c5f66275aecc15016 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_lovelace, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:51:08 compute-0 systemd[1]: libpod-conmon-f95ee1d0de3b612bc43f6ae68084da28f22ac216004b3d2c5f66275aecc15016.scope: Deactivated successfully.
Oct 11 01:51:08 compute-0 sudo[252844]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:08 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:51:08 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:51:08 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:51:08 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:51:08 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev f88efe12-2ecb-4682-b49d-47d942fd9555 does not exist
Oct 11 01:51:08 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev b9c79b0b-3496-446e-bf47-7e86c4afbabd does not exist
Oct 11 01:51:08 compute-0 python3.9[253150]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:51:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v389: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:08 compute-0 sudo[253166]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:51:08 compute-0 sudo[253166]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:51:08 compute-0 sudo[253166]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:08 compute-0 sudo[253195]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:51:08 compute-0 sudo[253195]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:51:08 compute-0 sudo[253195]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:08 compute-0 podman[253219]: 2025-10-11 01:51:08.990917028 +0000 UTC m=+0.097052396 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 01:51:09 compute-0 podman[253220]: 2025-10-11 01:51:09.02088647 +0000 UTC m=+0.130208868 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vendor=Red Hat, Inc., vcs-type=git, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, version=9.6, maintainer=Red Hat, Inc., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.component=ubi9-minimal-container, container_name=openstack_network_exporter, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, name=ubi9-minimal, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, build-date=2025-08-20T13:12:41, config_id=edpm, distribution-scope=public, io.openshift.expose-services=, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., managed_by=edpm_ansible, architecture=x86_64)
Oct 11 01:51:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 01:51:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 600.0 total, 600.0 interval
                                            Cumulative writes: 2039 writes, 9058 keys, 2039 commit groups, 1.0 writes per commit group, ingest: 0.01 GB, 0.02 MB/s
                                            Cumulative WAL: 2039 writes, 2039 syncs, 1.00 writes per sync, written: 0.01 GB, 0.02 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 2039 writes, 9058 keys, 2039 commit groups, 1.0 writes per commit group, ingest: 10.87 MB, 0.02 MB/s
                                            Interval WAL: 2039 writes, 2039 syncs, 1.00 writes per sync, written: 0.01 GB, 0.02 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
                                            
                                            ** Compaction Stats [default] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0    121.5      0.07              0.04         3    0.022       0      0       0.0       0.0
                                              L6      1/0    6.51 MB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.6    131.2    116.6      0.11              0.07         2    0.057    7153    734       0.0       0.0
                                             Sum      1/0    6.51 MB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   2.6     82.5    118.4      0.18              0.11         5    0.036    7153    734       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   2.6     84.5    121.0      0.18              0.11         4    0.044    7153    734       0.0       0.0
                                            
                                            ** Compaction Stats [default] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Low      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0    131.2    116.6      0.11              0.07         2    0.057    7153    734       0.0       0.0
                                            High      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0    128.9      0.06              0.04         2    0.031       0      0       0.0       0.0
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0     12.0      0.00              0.00         1    0.004       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.0 total, 600.0 interval
                                            Flush(GB): cumulative 0.008, interval 0.008
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.02 GB write, 0.04 MB/s write, 0.01 GB read, 0.02 MB/s read, 0.2 seconds
                                            Interval compaction: 0.02 GB write, 0.04 MB/s write, 0.01 GB read, 0.02 MB/s read, 0.2 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x55816e47f1f0#2 capacity: 308.00 MB usage: 645.52 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 0 last_secs: 8.7e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(37,554.55 KB,0.175828%) FilterBlock(6,27.80 KB,0.00881344%) IndexBlock(6,63.17 KB,0.0200296%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [default] **
Oct 11 01:51:09 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:51:09 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:51:09 compute-0 ceph-mon[191930]: pgmap v389: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:51:09 compute-0 sudo[253411]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-aumppvftpoqbejvliitwxuncmzdlyiyj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147469.2673643-34-58246613004337/AnsiballZ_setup.py'
Oct 11 01:51:09 compute-0 sudo[253411]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:10 compute-0 python3.9[253413]: ansible-ansible.legacy.setup Invoked with filter=['ansible_pkg_mgr'] gather_subset=['!all'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:51:10 compute-0 sudo[253411]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v390: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:11 compute-0 sudo[253495]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nxtnxexzlbhpwmoaphyiknmamrjdvznh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147469.2673643-34-58246613004337/AnsiballZ_dnf.py'
Oct 11 01:51:11 compute-0 sudo[253495]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:11 compute-0 python3.9[253497]: ansible-ansible.legacy.dnf Invoked with name=['yum-utils'] allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None state=None
Oct 11 01:51:11 compute-0 ceph-mon[191930]: pgmap v390: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v391: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:12 compute-0 sudo[253495]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:13 compute-0 ceph-mon[191930]: pgmap v391: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:13 compute-0 python3.9[253648]: ansible-ansible.legacy.command Invoked with _raw_params=needs-restarting -r _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:51:14 compute-0 podman[253650]: 2025-10-11 01:51:14.453471953 +0000 UTC m=+0.106266175 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.schema-version=1.0, tcib_managed=true, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 01:51:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v392: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:51:15 compute-0 python3.9[253819]: ansible-ansible.builtin.find Invoked with paths=['/var/lib/openstack/reboot_required/'] patterns=[] read_whole_file=False file_type=file age_stamp=mtime recurse=False hidden=False follow=False get_checksum=False checksum_algorithm=sha1 use_regex=False exact_mode=True excludes=None contains=None age=None size=None depth=None mode=None encoding=None limit=None
Oct 11 01:51:15 compute-0 ceph-mon[191930]: pgmap v392: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v393: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:16 compute-0 python3.9[253969]: ansible-ansible.builtin.stat Invoked with path=/var/lib/config-data/puppet-generated follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:51:17 compute-0 ceph-mon[191930]: pgmap v393: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:17 compute-0 python3.9[254119]: ansible-ansible.builtin.stat Invoked with path=/var/lib/openstack/config follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:51:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v394: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:19 compute-0 sshd-session[252973]: Connection closed by 192.168.122.30 port 54182
Oct 11 01:51:19 compute-0 sshd-session[252961]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:51:19 compute-0 systemd[1]: session-50.scope: Deactivated successfully.
Oct 11 01:51:19 compute-0 systemd[1]: session-50.scope: Consumed 9.437s CPU time.
Oct 11 01:51:19 compute-0 systemd-logind[804]: Session 50 logged out. Waiting for processes to exit.
Oct 11 01:51:19 compute-0 systemd-logind[804]: Removed session 50.
Oct 11 01:51:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:51:19 compute-0 ceph-mon[191930]: pgmap v394: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v395: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:21 compute-0 ceph-mon[191930]: pgmap v395: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:22 compute-0 podman[254147]: 2025-10-11 01:51:22.281143372 +0000 UTC m=+0.158638080 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 01:51:22 compute-0 podman[254149]: 2025-10-11 01:51:22.287455229 +0000 UTC m=+0.155957054 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, architecture=x86_64, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, release=1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., config_id=edpm, maintainer=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.buildah.version=1.29.0, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, com.redhat.component=ubi9-container, container_name=kepler, io.openshift.expose-services=, io.openshift.tags=base rhel9, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, release-0.7.12=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, version=9.4, build-date=2024-09-18T21:23:30, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, managed_by=edpm_ansible, vcs-type=git, vendor=Red Hat, Inc.)
Oct 11 01:51:22 compute-0 podman[254148]: 2025-10-11 01:51:22.315214709 +0000 UTC m=+0.188019975 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_managed=true, config_id=ovn_controller, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, container_name=ovn_controller, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']})
Oct 11 01:51:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v396: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:23 compute-0 ceph-mon[191930]: pgmap v396: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:24 compute-0 podman[254215]: 2025-10-11 01:51:24.264392727 +0000 UTC m=+0.149785287 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image)
Oct 11 01:51:24 compute-0 sshd-session[254235]: Accepted publickey for zuul from 192.168.122.30 port 40180 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:51:24 compute-0 systemd-logind[804]: New session 51 of user zuul.
Oct 11 01:51:24 compute-0 systemd[1]: Started Session 51 of User zuul.
Oct 11 01:51:24 compute-0 sshd-session[254235]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:51:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v397: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:51:25 compute-0 ceph-mon[191930]: pgmap v397: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:25 compute-0 python3.9[254388]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:51:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:51:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:51:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:51:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:51:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:51:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:51:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v398: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:27 compute-0 ceph-mon[191930]: pgmap v398: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:27 compute-0 sudo[254542]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ffgjxyeliooybfxlipiunyhgnkvzvohr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147487.2138495-50-266244084904413/AnsiballZ_file.py'
Oct 11 01:51:27 compute-0 sudo[254542]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:28 compute-0 python3.9[254544]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/certs/ovn/default setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:51:28 compute-0 sudo[254542]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v399: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:28 compute-0 sudo[254694]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vgkcsxdhvvvnzdohmrfktnlwcxqufbti ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147488.4079065-50-76702351927432/AnsiballZ_file.py'
Oct 11 01:51:28 compute-0 sudo[254694]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:29 compute-0 python3.9[254696]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/certs/ovn/default setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:51:29 compute-0 sudo[254694]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:51:29 compute-0 podman[157119]: time="2025-10-11T01:51:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:51:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:51:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:51:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:51:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6827 "" "Go-http-client/1.1"
Oct 11 01:51:29 compute-0 ceph-mon[191930]: pgmap v399: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:30 compute-0 sudo[254846]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nzhydczsnklwqdqkxrgymwjphyumgelm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147489.499178-65-195858471512405/AnsiballZ_stat.py'
Oct 11 01:51:30 compute-0 sudo[254846]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:30 compute-0 python3.9[254848]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/ovn/default/tls.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:51:30 compute-0 sudo[254846]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v400: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:31 compute-0 sudo[254924]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wwlxtyayrifxaeipuzbfuqdlvxohojoz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147489.499178-65-195858471512405/AnsiballZ_file.py'
Oct 11 01:51:31 compute-0 sudo[254924]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:31 compute-0 python3.9[254926]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/var/lib/openstack/certs/ovn/default/tls.crt _original_basename=compute-0.ctlplane.example.com-tls.crt recurse=False state=file path=/var/lib/openstack/certs/ovn/default/tls.crt force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:51:31 compute-0 sudo[254924]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:31 compute-0 openstack_network_exporter[159265]: ERROR   01:51:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:51:31 compute-0 openstack_network_exporter[159265]: ERROR   01:51:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:51:31 compute-0 openstack_network_exporter[159265]: ERROR   01:51:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:51:31 compute-0 openstack_network_exporter[159265]: ERROR   01:51:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:51:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:51:31 compute-0 openstack_network_exporter[159265]: ERROR   01:51:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:51:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:51:31 compute-0 ceph-mon[191930]: pgmap v400: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:32 compute-0 sudo[255076]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ztdsengwxezxeszoynoaqjeroatwuzls ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147491.586835-65-55112907797066/AnsiballZ_stat.py'
Oct 11 01:51:32 compute-0 sudo[255076]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:32 compute-0 python3.9[255078]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/ovn/default/ca.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:51:32 compute-0 sudo[255076]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v401: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:32 compute-0 sudo[255154]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jppqpxlkpaucpeqoaxwmabityrjjjvcq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147491.586835-65-55112907797066/AnsiballZ_file.py'
Oct 11 01:51:32 compute-0 sudo[255154]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:33 compute-0 python3.9[255156]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/var/lib/openstack/certs/ovn/default/ca.crt _original_basename=compute-0.ctlplane.example.com-ca.crt recurse=False state=file path=/var/lib/openstack/certs/ovn/default/ca.crt force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:51:33 compute-0 sudo[255154]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:33 compute-0 ceph-mon[191930]: pgmap v401: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:34 compute-0 sudo[255306]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gkchevjjnzpuqalqytckehpvhilxbjoa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147493.4121838-65-66827372378092/AnsiballZ_stat.py'
Oct 11 01:51:34 compute-0 sudo[255306]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:34 compute-0 python3.9[255308]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/ovn/default/tls.key follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:51:34 compute-0 sudo[255306]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v402: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:51:34 compute-0 sudo[255384]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wwaqxkrubabuymkfsicmmlazksjygctq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147493.4121838-65-66827372378092/AnsiballZ_file.py'
Oct 11 01:51:34 compute-0 sudo[255384]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:35 compute-0 python3.9[255386]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/var/lib/openstack/certs/ovn/default/tls.key _original_basename=compute-0.ctlplane.example.com-tls.key recurse=False state=file path=/var/lib/openstack/certs/ovn/default/tls.key force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:51:35 compute-0 sudo[255384]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:35 compute-0 sudo[255536]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yqjndfohxrmbcsendvmbuccwrlyiquld ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147495.36143-100-8839148723343/AnsiballZ_file.py'
Oct 11 01:51:35 compute-0 sudo[255536]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:35 compute-0 ceph-mon[191930]: pgmap v402: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:36 compute-0 python3.9[255538]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/certs/telemetry/default setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:51:36 compute-0 sudo[255536]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v403: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:36 compute-0 sudo[255688]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yyylxdobeivituflrfeceykuqjppgpov ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147496.4294915-100-270867733140370/AnsiballZ_file.py'
Oct 11 01:51:36 compute-0 sudo[255688]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:37 compute-0 python3.9[255690]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/certs/telemetry/default setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:51:37 compute-0 sudo[255688]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:37 compute-0 ceph-mon[191930]: pgmap v403: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:38 compute-0 sudo[255840]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ngzxtuzvrtsqysvdlflfndfekxutkozh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147497.5699923-115-102311527942789/AnsiballZ_stat.py'
Oct 11 01:51:38 compute-0 sudo[255840]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:38 compute-0 python3.9[255842]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/telemetry/default/tls.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:51:38 compute-0 sudo[255840]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v404: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:38 compute-0 sudo[255918]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oikpwzydipqeueatsvkuuvpvxcqbkkxj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147497.5699923-115-102311527942789/AnsiballZ_file.py'
Oct 11 01:51:38 compute-0 sudo[255918]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:39 compute-0 python3.9[255920]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/var/lib/openstack/certs/telemetry/default/tls.crt _original_basename=compute-0.ctlplane.example.com-tls.crt recurse=False state=file path=/var/lib/openstack/certs/telemetry/default/tls.crt force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:51:39 compute-0 sudo[255918]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:39 compute-0 podman[255922]: 2025-10-11 01:51:39.26110914 +0000 UTC m=+0.136939514 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, container_name=openstack_network_exporter, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.expose-services=, maintainer=Red Hat, Inc., com.redhat.component=ubi9-minimal-container, config_id=edpm, url=https://catalog.redhat.com/en/search?searchType=containers, build-date=2025-08-20T13:12:41, version=9.6, vcs-type=git, vendor=Red Hat, Inc., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, name=ubi9-minimal, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.buildah.version=1.33.7, architecture=x86_64, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, managed_by=edpm_ansible, release=1755695350, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Oct 11 01:51:39 compute-0 podman[255921]: 2025-10-11 01:51:39.275664557 +0000 UTC m=+0.157935278 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 01:51:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:51:39 compute-0 sudo[256112]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hthwjqeyapkcqwjklxerqsmmlzuvgdna ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147499.4102795-115-171195798383251/AnsiballZ_stat.py'
Oct 11 01:51:39 compute-0 sudo[256112]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:39 compute-0 ceph-mon[191930]: pgmap v404: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:40 compute-0 python3.9[256114]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/telemetry/default/ca.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:51:40 compute-0 sudo[256112]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:40 compute-0 sudo[256190]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bouaknexewuvcxwlferrrfbiebfiytyj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147499.4102795-115-171195798383251/AnsiballZ_file.py'
Oct 11 01:51:40 compute-0 sudo[256190]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v405: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:40 compute-0 python3.9[256192]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/var/lib/openstack/certs/telemetry/default/ca.crt _original_basename=compute-0.ctlplane.example.com-ca.crt recurse=False state=file path=/var/lib/openstack/certs/telemetry/default/ca.crt force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:51:40 compute-0 sudo[256190]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:41 compute-0 sudo[256342]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cmfcuneknqhfmqumiwwttofynsirdvqc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147501.2262049-115-28725403830716/AnsiballZ_stat.py'
Oct 11 01:51:41 compute-0 sudo[256342]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:41 compute-0 ceph-mon[191930]: pgmap v405: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:42 compute-0 python3.9[256344]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/telemetry/default/tls.key follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:51:42 compute-0 sudo[256342]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:42 compute-0 sudo[256420]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-liyugvrmsccfrhptgzudtyjxwsznqelh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147501.2262049-115-28725403830716/AnsiballZ_file.py'
Oct 11 01:51:42 compute-0 sudo[256420]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v406: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:42 compute-0 python3.9[256422]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/var/lib/openstack/certs/telemetry/default/tls.key _original_basename=compute-0.ctlplane.example.com-tls.key recurse=False state=file path=/var/lib/openstack/certs/telemetry/default/tls.key force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:51:42 compute-0 sudo[256420]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:43 compute-0 sudo[256572]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jhyzfiqfiatrrqfldslkjwlmwsuwwmyt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147503.1628058-150-252076799418730/AnsiballZ_file.py'
Oct 11 01:51:43 compute-0 sudo[256572]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:43 compute-0 python3.9[256574]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/certs/neutron-metadata/default setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:51:43 compute-0 sudo[256572]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:44 compute-0 ceph-mon[191930]: pgmap v406: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v407: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:51:44 compute-0 sudo[256736]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-meyubkbxcyiqucuyffwdxvnnwusrsfhe ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147504.2009978-150-263790653915670/AnsiballZ_file.py'
Oct 11 01:51:44 compute-0 sudo[256736]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:44 compute-0 podman[256698]: 2025-10-11 01:51:44.866369715 +0000 UTC m=+0.190211112 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, config_id=edpm, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, tcib_managed=true)
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #24. Immutable memtables: 0.
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:51:45.027523) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 7] Flushing memtable with next log file: 24
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147505027607, "job": 7, "event": "flush_started", "num_memtables": 1, "num_entries": 685, "num_deletes": 251, "total_data_size": 843999, "memory_usage": 856480, "flush_reason": "Manual Compaction"}
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 7] Level-0 flush table #25: started
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147505034190, "cf_name": "default", "job": 7, "event": "table_file_creation", "file_number": 25, "file_size": 836577, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 9028, "largest_seqno": 9712, "table_properties": {"data_size": 832977, "index_size": 1444, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1093, "raw_key_size": 7787, "raw_average_key_size": 18, "raw_value_size": 825806, "raw_average_value_size": 1961, "num_data_blocks": 67, "num_entries": 421, "num_filter_entries": 421, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760147449, "oldest_key_time": 1760147449, "file_creation_time": 1760147505, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 25, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 7] Flush lasted 6726 microseconds, and 3230 cpu microseconds.
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:51:45.034273) [db/flush_job.cc:967] [default] [JOB 7] Level-0 flush table #25: 836577 bytes OK
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:51:45.034291) [db/memtable_list.cc:519] [default] Level-0 commit table #25 started
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:51:45.036868) [db/memtable_list.cc:722] [default] Level-0 commit table #25: memtable #1 done
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:51:45.036879) EVENT_LOG_v1 {"time_micros": 1760147505036875, "job": 7, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:51:45.036899) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 7] Try to delete WAL files size 840417, prev total WAL file size 840417, number of live WAL files 2.
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000021.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:51:45.037641) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F7300323531' seq:72057594037927935, type:22 .. '7061786F7300353033' seq:0, type:0; will stop at (end)
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 8] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 7 Base level 0, inputs: [25(816KB)], [23(6661KB)]
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147505037685, "job": 8, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [25], "files_L6": [23], "score": -1, "input_data_size": 7658213, "oldest_snapshot_seqno": -1}
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 8] Generated table #26: 3294 keys, 6136334 bytes, temperature: kUnknown
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147505065518, "cf_name": "default", "job": 8, "event": "table_file_creation", "file_number": 26, "file_size": 6136334, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 6112281, "index_size": 14728, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 8261, "raw_key_size": 79862, "raw_average_key_size": 24, "raw_value_size": 6050579, "raw_average_value_size": 1836, "num_data_blocks": 644, "num_entries": 3294, "num_filter_entries": 3294, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760147505, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 26, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:51:45.065682) [db/compaction/compaction_job.cc:1663] [default] [JOB 8] Compacted 1@0 + 1@6 files to L6 => 6136334 bytes
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:51:45.067489) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 274.8 rd, 220.2 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(0.8, 6.5 +0.0 blob) out(5.9 +0.0 blob), read-write-amplify(16.5) write-amplify(7.3) OK, records in: 3808, records dropped: 514 output_compression: NoCompression
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:51:45.067508) EVENT_LOG_v1 {"time_micros": 1760147505067499, "job": 8, "event": "compaction_finished", "compaction_time_micros": 27869, "compaction_time_cpu_micros": 14150, "output_level": 6, "num_output_files": 1, "total_output_size": 6136334, "num_input_records": 3808, "num_output_records": 3294, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000025.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147505067759, "job": 8, "event": "table_file_deletion", "file_number": 25}
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000023.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147505068970, "job": 8, "event": "table_file_deletion", "file_number": 23}
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:51:45.037495) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:51:45.069348) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:51:45.069356) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:51:45.069360) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:51:45.069363) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:51:45 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:51:45.069366) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:51:45 compute-0 python3.9[256742]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/certs/neutron-metadata/default setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:51:45 compute-0 sudo[256736]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:46 compute-0 ceph-mon[191930]: pgmap v407: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:46 compute-0 sudo[256893]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jfowianvcrzhpzpizksgtexqyuzzxuan ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147505.503298-165-89551079164750/AnsiballZ_stat.py'
Oct 11 01:51:46 compute-0 sudo[256893]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:46 compute-0 python3.9[256895]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/neutron-metadata/default/tls.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:51:46 compute-0 sudo[256893]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v408: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:47 compute-0 sudo[257016]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rfcopkvkaxmeutqihhojnfgjvnycqyat ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147505.503298-165-89551079164750/AnsiballZ_copy.py'
Oct 11 01:51:47 compute-0 sudo[257016]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:47 compute-0 python3.9[257018]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/certs/neutron-metadata/default/tls.crt group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760147505.503298-165-89551079164750/.source.crt _original_basename=compute-0.ctlplane.example.com-tls.crt follow=False checksum=3a7583000764d72410d5d551ca52e13b6b7c13c1 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:51:47 compute-0 sudo[257016]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:48 compute-0 ceph-mon[191930]: pgmap v408: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:48 compute-0 sudo[257168]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ixvsgmslgridkcursqfgjkmpmbrrradt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147507.685313-165-172941362186523/AnsiballZ_stat.py'
Oct 11 01:51:48 compute-0 sudo[257168]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:48 compute-0 python3.9[257170]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/neutron-metadata/default/ca.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:51:48 compute-0 sudo[257168]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v409: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:49 compute-0 sudo[257291]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fwkvkppjiolqslvdhrlitqeaqxsneahc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147507.685313-165-172941362186523/AnsiballZ_copy.py'
Oct 11 01:51:49 compute-0 sudo[257291]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:49 compute-0 python3.9[257293]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/certs/neutron-metadata/default/ca.crt group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760147507.685313-165-172941362186523/.source.crt _original_basename=compute-0.ctlplane.example.com-ca.crt follow=False checksum=dd1ca38573e4329f99bc06f928c8186c0e5d69d9 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:51:49 compute-0 sudo[257291]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:51:50 compute-0 ceph-mon[191930]: pgmap v409: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:50 compute-0 sudo[257444]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zccedapsrhewlyznndgxuqdhftlpplzb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147509.7161891-165-25471342753607/AnsiballZ_stat.py'
Oct 11 01:51:50 compute-0 sudo[257444]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:50 compute-0 python3.9[257446]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/neutron-metadata/default/tls.key follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:51:50 compute-0 sudo[257444]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v410: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:51 compute-0 sudo[257567]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gyrjoyufmrmfcateffjiisaakuwzhdyd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147509.7161891-165-25471342753607/AnsiballZ_copy.py'
Oct 11 01:51:51 compute-0 sudo[257567]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:51 compute-0 python3.9[257569]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/certs/neutron-metadata/default/tls.key group=root mode=0600 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760147509.7161891-165-25471342753607/.source.key _original_basename=compute-0.ctlplane.example.com-tls.key follow=False checksum=edce2d7bd42c10fda094bc5c0314f198e1f23cce backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:51:51 compute-0 sudo[257567]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:52 compute-0 ceph-mon[191930]: pgmap v410: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:52 compute-0 podman[257693]: 2025-10-11 01:51:52.534926793 +0000 UTC m=+0.120860893 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 01:51:52 compute-0 sudo[257766]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yqovtdfhqwrdsomfhiehstwmaqwlkaze ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147511.9446504-209-206652821057407/AnsiballZ_file.py'
Oct 11 01:51:52 compute-0 podman[257695]: 2025-10-11 01:51:52.552471848 +0000 UTC m=+0.119783690 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, release=1214.1726694543, name=ubi9, maintainer=Red Hat, Inc., vcs-type=git, release-0.7.12=, summary=Provides the latest release of Red Hat Universal Base Image 9., architecture=x86_64, container_name=kepler, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2024-09-18T21:23:30, vendor=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.openshift.tags=base rhel9, managed_by=edpm_ansible, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.openshift.expose-services=, config_id=edpm, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, io.buildah.version=1.29.0, version=9.4, com.redhat.component=ubi9-container)
Oct 11 01:51:52 compute-0 sudo[257766]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:52 compute-0 podman[257694]: 2025-10-11 01:51:52.58810868 +0000 UTC m=+0.162810921 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, tcib_managed=true, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3)
Oct 11 01:51:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v411: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:52 compute-0 python3.9[257784]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/certs/libvirt/default setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:51:52 compute-0 sudo[257766]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:53 compute-0 sudo[257937]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-emhypfilkiukpgukvxnjzpwwrlswdufy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147513.0919893-209-13684065794069/AnsiballZ_file.py'
Oct 11 01:51:53 compute-0 sudo[257937]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:53 compute-0 python3.9[257939]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/certs/libvirt/default setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:51:53 compute-0 sudo[257937]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:54 compute-0 ceph-mon[191930]: pgmap v411: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:51:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v412: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:54 compute-0 sudo[258107]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yupdobnzxqzsvljjcudmqtghsgljlmgf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147514.2462254-224-208086976958663/AnsiballZ_stat.py'
Oct 11 01:51:54 compute-0 sudo[258107]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:54 compute-0 podman[258063]: 2025-10-11 01:51:54.866184215 +0000 UTC m=+0.142516339 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_compute, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible)
Oct 11 01:51:55 compute-0 python3.9[258110]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/libvirt/default/tls.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:51:55 compute-0 sudo[258107]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:55 compute-0 sudo[258186]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oaahwkiwyqqksjzhhuwytiwpfvwglkpz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147514.2462254-224-208086976958663/AnsiballZ_file.py'
Oct 11 01:51:55 compute-0 sudo[258186]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:55 compute-0 python3.9[258188]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/var/lib/openstack/certs/libvirt/default/tls.crt _original_basename=compute-0.ctlplane.example.com-tls.crt recurse=False state=file path=/var/lib/openstack/certs/libvirt/default/tls.crt force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:51:55 compute-0 sudo[258186]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:56 compute-0 ceph-mon[191930]: pgmap v412: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:51:56
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['cephfs.cephfs.data', 'images', 'default.rgw.log', 'default.rgw.control', 'vms', '.rgw.root', '.mgr', 'default.rgw.meta', 'cephfs.cephfs.meta', 'backups', 'volumes']
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:51:56 compute-0 sudo[258338]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ufjjitcyahzdgjuiyojwswmyoivengac ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147516.0411386-224-182650797611201/AnsiballZ_stat.py'
Oct 11 01:51:56 compute-0 sudo[258338]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:51:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v413: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:56 compute-0 python3.9[258340]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/libvirt/default/ca.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:51:56 compute-0 sudo[258338]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:57 compute-0 sudo[258416]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ritypdmpxxtjkupyeycenewwqnivrdtc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147516.0411386-224-182650797611201/AnsiballZ_file.py'
Oct 11 01:51:57 compute-0 sudo[258416]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:57 compute-0 python3.9[258418]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/var/lib/openstack/certs/libvirt/default/ca.crt _original_basename=compute-0.ctlplane.example.com-ca.crt recurse=False state=file path=/var/lib/openstack/certs/libvirt/default/ca.crt force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:51:57 compute-0 sudo[258416]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:58 compute-0 ceph-mon[191930]: pgmap v413: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:58 compute-0 sudo[258569]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nwakyuboudxtsegefghawbrczvfpwlsk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147517.7875953-224-67867973555989/AnsiballZ_stat.py'
Oct 11 01:51:58 compute-0 sudo[258569]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:58 compute-0 python3.9[258571]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/libvirt/default/tls.key follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:51:58 compute-0 sudo[258569]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v414: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:51:59 compute-0 sudo[258647]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tzcbzunzchmrtlqmmrgimbhgqhnzdepc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147517.7875953-224-67867973555989/AnsiballZ_file.py'
Oct 11 01:51:59 compute-0 sudo[258647]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:51:59 compute-0 python3.9[258649]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/var/lib/openstack/certs/libvirt/default/tls.key _original_basename=compute-0.ctlplane.example.com-tls.key recurse=False state=file path=/var/lib/openstack/certs/libvirt/default/tls.key force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:51:59 compute-0 sudo[258647]: pam_unix(sudo:session): session closed for user root
Oct 11 01:51:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:51:59 compute-0 podman[157119]: time="2025-10-11T01:51:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:51:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:51:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:51:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:51:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6825 "" "Go-http-client/1.1"
Oct 11 01:52:00 compute-0 ceph-mon[191930]: pgmap v414: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:00 compute-0 sudo[258799]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yllwuiwjvefpuizwsgpwieguhndbmbwb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147519.7185605-259-13580640577505/AnsiballZ_file.py'
Oct 11 01:52:00 compute-0 sudo[258799]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:00 compute-0 python3.9[258801]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/certs/telemetry-power-monitoring/default setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:52:00 compute-0 sudo[258799]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v415: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:01 compute-0 openstack_network_exporter[159265]: ERROR   01:52:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:52:01 compute-0 openstack_network_exporter[159265]: ERROR   01:52:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:52:01 compute-0 openstack_network_exporter[159265]: ERROR   01:52:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:52:01 compute-0 openstack_network_exporter[159265]: ERROR   01:52:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:52:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:52:01 compute-0 openstack_network_exporter[159265]: ERROR   01:52:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:52:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:52:01 compute-0 sudo[258951]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rgtmkmgpkesuczkvxlxznjjbywevrxiq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147520.7788126-259-79967068186939/AnsiballZ_file.py'
Oct 11 01:52:01 compute-0 sudo[258951]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:01 compute-0 python3.9[258953]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/certs/telemetry-power-monitoring/default setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:52:01 compute-0 sudo[258951]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:02 compute-0 ceph-mon[191930]: pgmap v415: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v416: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:02 compute-0 sudo[259103]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-psknujiocgvifnxaxnoduzdybetckuvh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147522.105622-274-50714208623660/AnsiballZ_stat.py'
Oct 11 01:52:02 compute-0 sudo[259103]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:03 compute-0 python3.9[259105]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/telemetry-power-monitoring/default/tls.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:52:03 compute-0 sudo[259103]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:03 compute-0 sudo[259181]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xwmrmxytjdvncsecmlcaxqbiiikppicp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147522.105622-274-50714208623660/AnsiballZ_file.py'
Oct 11 01:52:03 compute-0 sudo[259181]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:03 compute-0 python3.9[259183]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/var/lib/openstack/certs/telemetry-power-monitoring/default/tls.crt _original_basename=compute-0.ctlplane.example.com-tls.crt recurse=False state=file path=/var/lib/openstack/certs/telemetry-power-monitoring/default/tls.crt force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:52:03 compute-0 sudo[259181]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:04 compute-0 ceph-mon[191930]: pgmap v416: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:04 compute-0 sudo[259333]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-opvqmdwdmqeiwkptocyeazatltwirhmk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147524.0676005-274-185242285680000/AnsiballZ_stat.py'
Oct 11 01:52:04 compute-0 sudo[259333]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:52:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v417: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:04 compute-0 python3.9[259335]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/telemetry-power-monitoring/default/ca.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:52:04 compute-0 sudo[259333]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:05 compute-0 sudo[259411]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zoozbdvvteqfqtitzvzviirnujrtkrhb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147524.0676005-274-185242285680000/AnsiballZ_file.py'
Oct 11 01:52:05 compute-0 sudo[259411]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:05 compute-0 python3.9[259413]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/var/lib/openstack/certs/telemetry-power-monitoring/default/ca.crt _original_basename=compute-0.ctlplane.example.com-ca.crt recurse=False state=file path=/var/lib/openstack/certs/telemetry-power-monitoring/default/ca.crt force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:52:05 compute-0 sudo[259411]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:06 compute-0 ceph-mon[191930]: pgmap v417: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 01:52:06 compute-0 sudo[259563]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lbyychbjptbeqoghacrdfqvydaakneai ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147525.9487321-274-252962747550309/AnsiballZ_stat.py'
Oct 11 01:52:06 compute-0 sudo[259563]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v418: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:06 compute-0 python3.9[259565]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/certs/telemetry-power-monitoring/default/tls.key follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:52:06 compute-0 sudo[259563]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:07 compute-0 sudo[259641]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tdqmdsabpzionqsffvxooqirggqpqbmn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147525.9487321-274-252962747550309/AnsiballZ_file.py'
Oct 11 01:52:07 compute-0 sudo[259641]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:07 compute-0 python3.9[259643]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/var/lib/openstack/certs/telemetry-power-monitoring/default/tls.key _original_basename=compute-0.ctlplane.example.com-tls.key recurse=False state=file path=/var/lib/openstack/certs/telemetry-power-monitoring/default/tls.key force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:52:07 compute-0 sudo[259641]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:08 compute-0 ceph-mon[191930]: pgmap v418: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v419: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:09 compute-0 sudo[259769]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:52:09 compute-0 sudo[259816]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vgrelhlupfosfmbcfqpiumvzsxasltlc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147528.5032048-325-148008788499779/AnsiballZ_file.py'
Oct 11 01:52:09 compute-0 sudo[259769]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:52:09 compute-0 sudo[259816]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:09 compute-0 sudo[259769]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:09 compute-0 sudo[259821]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:52:09 compute-0 sudo[259821]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:52:09 compute-0 sudo[259821]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:09 compute-0 python3.9[259820]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/cacerts/telemetry-power-monitoring setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:52:09 compute-0 sudo[259816]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:09 compute-0 sudo[259846]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:52:09 compute-0 sudo[259846]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:52:09 compute-0 sudo[259846]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:09 compute-0 sudo[259897]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 01:52:09 compute-0 sudo[259897]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:52:09 compute-0 podman[259875]: 2025-10-11 01:52:09.486674265 +0000 UTC m=+0.112912442 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, version=9.6, config_id=edpm, container_name=openstack_network_exporter, io.openshift.tags=minimal rhel9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi9-minimal-container, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, maintainer=Red Hat, Inc., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, distribution-scope=public, managed_by=edpm_ansible, io.buildah.version=1.33.7, vendor=Red Hat, Inc., vcs-type=git, architecture=x86_64, url=https://catalog.redhat.com/en/search?searchType=containers, build-date=2025-08-20T13:12:41, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, io.openshift.expose-services=, release=1755695350)
Oct 11 01:52:09 compute-0 podman[259870]: 2025-10-11 01:52:09.497850374 +0000 UTC m=+0.137544186 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 01:52:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:52:10 compute-0 sudo[260118]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qxxlnulrnpafvrkpheehrcfmmohlbbbb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147529.5860221-333-182942059414553/AnsiballZ_stat.py'
Oct 11 01:52:10 compute-0 sudo[260118]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:10 compute-0 sudo[259897]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:52:10 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:52:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:52:10 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:52:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:52:10 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:52:10 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 4ee9e5d4-aa98-48db-a053-67d8c18ecc1d does not exist
Oct 11 01:52:10 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 04c06f23-963c-4728-a925-d6a376623a50 does not exist
Oct 11 01:52:10 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 4dd17803-1c06-451a-8c45-f241f3bbbcfb does not exist
Oct 11 01:52:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 01:52:10 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:52:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 01:52:10 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:52:10 compute-0 ceph-mon[191930]: pgmap v419: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:10 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:52:10 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:52:10 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:52:10 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:52:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:52:10 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:52:10 compute-0 python3.9[260120]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:52:10 compute-0 sudo[260121]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:52:10 compute-0 sudo[260121]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:52:10 compute-0 sudo[260121]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:10 compute-0 sudo[260118]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:10 compute-0 sudo[260148]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:52:10 compute-0 sudo[260148]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:52:10 compute-0 sudo[260148]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:10 compute-0 sudo[260196]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:52:10 compute-0 sudo[260196]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:52:10 compute-0 sudo[260196]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v420: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:10 compute-0 sudo[260221]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 01:52:10 compute-0 sudo[260221]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:52:10 compute-0 sudo[260298]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gqcgmyxyaiqapspppxctnxqljxmlgonh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147529.5860221-333-182942059414553/AnsiballZ_file.py'
Oct 11 01:52:10 compute-0 sudo[260298]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:11 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:52:11 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:52:11 compute-0 python3.9[260307]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem _original_basename=tls-ca-bundle.pem recurse=False state=file path=/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:52:11 compute-0 sudo[260298]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:11 compute-0 podman[260345]: 2025-10-11 01:52:11.450648021 +0000 UTC m=+0.092489680 container create 1115ab15eff4480ac66453b083965ed3a0e4d7e1539b65c81decd2ae288587b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_bhaskara, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_REF=reef)
Oct 11 01:52:11 compute-0 podman[260345]: 2025-10-11 01:52:11.406478267 +0000 UTC m=+0.048320006 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:52:11 compute-0 systemd[1]: Started libpod-conmon-1115ab15eff4480ac66453b083965ed3a0e4d7e1539b65c81decd2ae288587b7.scope.
Oct 11 01:52:11 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:52:11 compute-0 podman[260345]: 2025-10-11 01:52:11.607762176 +0000 UTC m=+0.249603865 container init 1115ab15eff4480ac66453b083965ed3a0e4d7e1539b65c81decd2ae288587b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_bhaskara, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, ceph=True)
Oct 11 01:52:11 compute-0 podman[260345]: 2025-10-11 01:52:11.630556955 +0000 UTC m=+0.272398614 container start 1115ab15eff4480ac66453b083965ed3a0e4d7e1539b65c81decd2ae288587b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_bhaskara, org.label-schema.build-date=20250507, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0)
Oct 11 01:52:11 compute-0 podman[260345]: 2025-10-11 01:52:11.636824291 +0000 UTC m=+0.278665990 container attach 1115ab15eff4480ac66453b083965ed3a0e4d7e1539b65c81decd2ae288587b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_bhaskara, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 01:52:11 compute-0 quirky_bhaskara[260379]: 167 167
Oct 11 01:52:11 compute-0 systemd[1]: libpod-1115ab15eff4480ac66453b083965ed3a0e4d7e1539b65c81decd2ae288587b7.scope: Deactivated successfully.
Oct 11 01:52:11 compute-0 podman[260345]: 2025-10-11 01:52:11.645674224 +0000 UTC m=+0.287515903 container died 1115ab15eff4480ac66453b083965ed3a0e4d7e1539b65c81decd2ae288587b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_bhaskara, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:52:11 compute-0 systemd[1]: var-lib-containers-storage-overlay-12ccae1ea367ef68a645ee6c1fa9f632ca52c074cc880a21afc024a7c8439cce-merged.mount: Deactivated successfully.
Oct 11 01:52:11 compute-0 podman[260345]: 2025-10-11 01:52:11.720455047 +0000 UTC m=+0.362296706 container remove 1115ab15eff4480ac66453b083965ed3a0e4d7e1539b65c81decd2ae288587b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_bhaskara, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:52:11 compute-0 systemd[1]: libpod-conmon-1115ab15eff4480ac66453b083965ed3a0e4d7e1539b65c81decd2ae288587b7.scope: Deactivated successfully.
Oct 11 01:52:12 compute-0 podman[260476]: 2025-10-11 01:52:12.026007415 +0000 UTC m=+0.090049693 container create 53a36d1d03cff75400c9e63e93befe17a219ad719bef68958ceebb9518bc2a5e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_elgamal, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507)
Oct 11 01:52:12 compute-0 podman[260476]: 2025-10-11 01:52:11.991423916 +0000 UTC m=+0.055466254 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:52:12 compute-0 systemd[1]: Started libpod-conmon-53a36d1d03cff75400c9e63e93befe17a219ad719bef68958ceebb9518bc2a5e.scope.
Oct 11 01:52:12 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:52:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/578e2624ac50045567b08dd7fcdb61b507f61868563b2afb4ab7be19a1c71cd4/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:52:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/578e2624ac50045567b08dd7fcdb61b507f61868563b2afb4ab7be19a1c71cd4/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:52:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/578e2624ac50045567b08dd7fcdb61b507f61868563b2afb4ab7be19a1c71cd4/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:52:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/578e2624ac50045567b08dd7fcdb61b507f61868563b2afb4ab7be19a1c71cd4/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:52:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/578e2624ac50045567b08dd7fcdb61b507f61868563b2afb4ab7be19a1c71cd4/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:52:12 compute-0 sudo[260545]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mnugohjtotfmilkwnufifwfirtqlpcdg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147531.6506486-346-20372959268350/AnsiballZ_file.py'
Oct 11 01:52:12 compute-0 sudo[260545]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:12 compute-0 podman[260476]: 2025-10-11 01:52:12.217565329 +0000 UTC m=+0.281607637 container init 53a36d1d03cff75400c9e63e93befe17a219ad719bef68958ceebb9518bc2a5e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_elgamal, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef, OSD_FLAVOR=default, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2)
Oct 11 01:52:12 compute-0 ceph-mon[191930]: pgmap v420: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:12 compute-0 podman[260476]: 2025-10-11 01:52:12.240855029 +0000 UTC m=+0.304897297 container start 53a36d1d03cff75400c9e63e93befe17a219ad719bef68958ceebb9518bc2a5e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_elgamal, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_REF=reef)
Oct 11 01:52:12 compute-0 podman[260476]: 2025-10-11 01:52:12.261766953 +0000 UTC m=+0.325809251 container attach 53a36d1d03cff75400c9e63e93befe17a219ad719bef68958ceebb9518bc2a5e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_elgamal, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:52:12 compute-0 python3.9[260547]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/cacerts/bootstrap setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:52:12 compute-0 sudo[260545]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v421: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:13 compute-0 sudo[260713]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pjycawwlipgfwswgiyedeifjjefbagac ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147532.7445092-354-153960867953995/AnsiballZ_stat.py'
Oct 11 01:52:13 compute-0 sudo[260713]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:13 compute-0 python3.9[260717]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/cacerts/bootstrap/tls-ca-bundle.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:52:13 compute-0 admiring_elgamal[260522]: --> passed data devices: 0 physical, 3 LVM
Oct 11 01:52:13 compute-0 admiring_elgamal[260522]: --> relative data size: 1.0
Oct 11 01:52:13 compute-0 admiring_elgamal[260522]: --> All data devices are unavailable
Oct 11 01:52:13 compute-0 sudo[260713]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:13 compute-0 systemd[1]: libpod-53a36d1d03cff75400c9e63e93befe17a219ad719bef68958ceebb9518bc2a5e.scope: Deactivated successfully.
Oct 11 01:52:13 compute-0 systemd[1]: libpod-53a36d1d03cff75400c9e63e93befe17a219ad719bef68958ceebb9518bc2a5e.scope: Consumed 1.330s CPU time.
Oct 11 01:52:13 compute-0 podman[260476]: 2025-10-11 01:52:13.637898722 +0000 UTC m=+1.701941040 container died 53a36d1d03cff75400c9e63e93befe17a219ad719bef68958ceebb9518bc2a5e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_elgamal, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, ceph=True)
Oct 11 01:52:13 compute-0 systemd[1]: var-lib-containers-storage-overlay-578e2624ac50045567b08dd7fcdb61b507f61868563b2afb4ab7be19a1c71cd4-merged.mount: Deactivated successfully.
Oct 11 01:52:13 compute-0 podman[260476]: 2025-10-11 01:52:13.743719295 +0000 UTC m=+1.807761563 container remove 53a36d1d03cff75400c9e63e93befe17a219ad719bef68958ceebb9518bc2a5e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_elgamal, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef)
Oct 11 01:52:13 compute-0 systemd[1]: libpod-conmon-53a36d1d03cff75400c9e63e93befe17a219ad719bef68958ceebb9518bc2a5e.scope: Deactivated successfully.
Oct 11 01:52:13 compute-0 sudo[260221]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:13 compute-0 sudo[260762]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:52:13 compute-0 sudo[260762]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:52:13 compute-0 sudo[260762]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:14 compute-0 sudo[260814]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:52:14 compute-0 sudo[260814]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:52:14 compute-0 sudo[260860]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iwcrxhgyjgyogjrnembknjdnrmzwhvwq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147532.7445092-354-153960867953995/AnsiballZ_file.py'
Oct 11 01:52:14 compute-0 sudo[260814]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:14 compute-0 sudo[260860]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:14 compute-0 ceph-mon[191930]: pgmap v421: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:14 compute-0 sudo[260865]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:52:14 compute-0 sudo[260865]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:52:14 compute-0 sudo[260865]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:14 compute-0 python3.9[260864]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/var/lib/openstack/cacerts/bootstrap/tls-ca-bundle.pem _original_basename=tls-ca-bundle.pem recurse=False state=file path=/var/lib/openstack/cacerts/bootstrap/tls-ca-bundle.pem force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:52:14 compute-0 sudo[260860]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:14 compute-0 sudo[260890]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 01:52:14 compute-0 sudo[260890]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:52:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:52:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v422: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:15 compute-0 podman[261017]: 2025-10-11 01:52:15.010672923 +0000 UTC m=+0.076238018 container create b5fa10a3c4ff6554e6ce4794d597b0390c20129d6b52040a6e04353da41d82ea (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_napier, CEPH_REF=reef, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:52:15 compute-0 podman[261017]: 2025-10-11 01:52:14.979564563 +0000 UTC m=+0.045129658 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:52:15 compute-0 systemd[1]: Started libpod-conmon-b5fa10a3c4ff6554e6ce4794d597b0390c20129d6b52040a6e04353da41d82ea.scope.
Oct 11 01:52:15 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:52:15 compute-0 podman[261017]: 2025-10-11 01:52:15.149635811 +0000 UTC m=+0.215200896 container init b5fa10a3c4ff6554e6ce4794d597b0390c20129d6b52040a6e04353da41d82ea (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_napier, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2)
Oct 11 01:52:15 compute-0 podman[261017]: 2025-10-11 01:52:15.16851689 +0000 UTC m=+0.234081975 container start b5fa10a3c4ff6554e6ce4794d597b0390c20129d6b52040a6e04353da41d82ea (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_napier, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507)
Oct 11 01:52:15 compute-0 podman[261017]: 2025-10-11 01:52:15.176874351 +0000 UTC m=+0.242439416 container attach b5fa10a3c4ff6554e6ce4794d597b0390c20129d6b52040a6e04353da41d82ea (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_napier, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:52:15 compute-0 sleepy_napier[261068]: 167 167
Oct 11 01:52:15 compute-0 systemd[1]: libpod-b5fa10a3c4ff6554e6ce4794d597b0390c20129d6b52040a6e04353da41d82ea.scope: Deactivated successfully.
Oct 11 01:52:15 compute-0 conmon[261068]: conmon b5fa10a3c4ff6554e6ce <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-b5fa10a3c4ff6554e6ce4794d597b0390c20129d6b52040a6e04353da41d82ea.scope/container/memory.events
Oct 11 01:52:15 compute-0 podman[261017]: 2025-10-11 01:52:15.186186165 +0000 UTC m=+0.251751250 container died b5fa10a3c4ff6554e6ce4794d597b0390c20129d6b52040a6e04353da41d82ea (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_napier, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:52:15 compute-0 podman[261059]: 2025-10-11 01:52:15.237883199 +0000 UTC m=+0.156815176 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi)
Oct 11 01:52:15 compute-0 systemd[1]: var-lib-containers-storage-overlay-5b07ce223583f0e45dbab177218f23d3767a69835bd167f8e6aa1731ebac4b38-merged.mount: Deactivated successfully.
Oct 11 01:52:15 compute-0 podman[261017]: 2025-10-11 01:52:15.265106399 +0000 UTC m=+0.330671464 container remove b5fa10a3c4ff6554e6ce4794d597b0390c20129d6b52040a6e04353da41d82ea (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_napier, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:52:15 compute-0 systemd[1]: libpod-conmon-b5fa10a3c4ff6554e6ce4794d597b0390c20129d6b52040a6e04353da41d82ea.scope: Deactivated successfully.
Oct 11 01:52:15 compute-0 sudo[261152]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tjtccfdhjthafibkvuuchhmssyuqhmok ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147534.8462224-367-256628677673266/AnsiballZ_file.py'
Oct 11 01:52:15 compute-0 sudo[261152]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:15 compute-0 podman[261160]: 2025-10-11 01:52:15.55628387 +0000 UTC m=+0.108662251 container create 34a957dfda5064d56de4560f98e29e48dfa0281ccb511399a5498a84121eef06 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_hertz, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2)
Oct 11 01:52:15 compute-0 podman[261160]: 2025-10-11 01:52:15.526321783 +0000 UTC m=+0.078700214 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:52:15 compute-0 systemd[1]: Started libpod-conmon-34a957dfda5064d56de4560f98e29e48dfa0281ccb511399a5498a84121eef06.scope.
Oct 11 01:52:15 compute-0 python3.9[261157]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/cacerts/nova setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:52:15 compute-0 sudo[261152]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:15 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:52:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9248fa0509c2809bc20ff245d902983965d59344223e65cb24d0c77388e04849/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:52:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9248fa0509c2809bc20ff245d902983965d59344223e65cb24d0c77388e04849/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:52:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9248fa0509c2809bc20ff245d902983965d59344223e65cb24d0c77388e04849/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:52:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9248fa0509c2809bc20ff245d902983965d59344223e65cb24d0c77388e04849/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:52:15 compute-0 podman[261160]: 2025-10-11 01:52:15.763664995 +0000 UTC m=+0.316043426 container init 34a957dfda5064d56de4560f98e29e48dfa0281ccb511399a5498a84121eef06 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_hertz, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 01:52:15 compute-0 podman[261160]: 2025-10-11 01:52:15.78507403 +0000 UTC m=+0.337452421 container start 34a957dfda5064d56de4560f98e29e48dfa0281ccb511399a5498a84121eef06 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_hertz, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:52:15 compute-0 podman[261160]: 2025-10-11 01:52:15.790954195 +0000 UTC m=+0.343332666 container attach 34a957dfda5064d56de4560f98e29e48dfa0281ccb511399a5498a84121eef06 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_hertz, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, ceph=True)
Oct 11 01:52:16 compute-0 ceph-mon[191930]: pgmap v422: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:16 compute-0 sudo[261332]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lxvystbdvfaxqmmymgwyvdsroktxmccl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147535.994963-375-157794989138956/AnsiballZ_stat.py'
Oct 11 01:52:16 compute-0 sudo[261332]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:16 compute-0 loving_hertz[261176]: {
Oct 11 01:52:16 compute-0 loving_hertz[261176]:     "0": [
Oct 11 01:52:16 compute-0 loving_hertz[261176]:         {
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "devices": [
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "/dev/loop3"
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             ],
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "lv_name": "ceph_lv0",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "lv_size": "21470642176",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "name": "ceph_lv0",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "tags": {
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.cluster_name": "ceph",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.crush_device_class": "",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.encrypted": "0",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.osd_id": "0",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.type": "block",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.vdo": "0"
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             },
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "type": "block",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "vg_name": "ceph_vg0"
Oct 11 01:52:16 compute-0 loving_hertz[261176]:         }
Oct 11 01:52:16 compute-0 loving_hertz[261176]:     ],
Oct 11 01:52:16 compute-0 loving_hertz[261176]:     "1": [
Oct 11 01:52:16 compute-0 loving_hertz[261176]:         {
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "devices": [
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "/dev/loop4"
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             ],
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "lv_name": "ceph_lv1",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "lv_size": "21470642176",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "name": "ceph_lv1",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "tags": {
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.cluster_name": "ceph",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.crush_device_class": "",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.encrypted": "0",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.osd_id": "1",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.type": "block",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.vdo": "0"
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             },
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "type": "block",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "vg_name": "ceph_vg1"
Oct 11 01:52:16 compute-0 loving_hertz[261176]:         }
Oct 11 01:52:16 compute-0 loving_hertz[261176]:     ],
Oct 11 01:52:16 compute-0 loving_hertz[261176]:     "2": [
Oct 11 01:52:16 compute-0 loving_hertz[261176]:         {
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "devices": [
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "/dev/loop5"
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             ],
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "lv_name": "ceph_lv2",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "lv_size": "21470642176",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "name": "ceph_lv2",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "tags": {
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.cluster_name": "ceph",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.crush_device_class": "",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.encrypted": "0",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.osd_id": "2",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.type": "block",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:                 "ceph.vdo": "0"
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             },
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "type": "block",
Oct 11 01:52:16 compute-0 loving_hertz[261176]:             "vg_name": "ceph_vg2"
Oct 11 01:52:16 compute-0 loving_hertz[261176]:         }
Oct 11 01:52:16 compute-0 loving_hertz[261176]:     ]
Oct 11 01:52:16 compute-0 loving_hertz[261176]: }
Oct 11 01:52:16 compute-0 systemd[1]: libpod-34a957dfda5064d56de4560f98e29e48dfa0281ccb511399a5498a84121eef06.scope: Deactivated successfully.
Oct 11 01:52:16 compute-0 podman[261160]: 2025-10-11 01:52:16.651626154 +0000 UTC m=+1.204004545 container died 34a957dfda5064d56de4560f98e29e48dfa0281ccb511399a5498a84121eef06 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_hertz, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:52:16 compute-0 systemd[1]: var-lib-containers-storage-overlay-9248fa0509c2809bc20ff245d902983965d59344223e65cb24d0c77388e04849-merged.mount: Deactivated successfully.
Oct 11 01:52:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v423: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:16 compute-0 podman[261160]: 2025-10-11 01:52:16.753889537 +0000 UTC m=+1.306267928 container remove 34a957dfda5064d56de4560f98e29e48dfa0281ccb511399a5498a84121eef06 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_hertz, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_REF=reef)
Oct 11 01:52:16 compute-0 systemd[1]: libpod-conmon-34a957dfda5064d56de4560f98e29e48dfa0281ccb511399a5498a84121eef06.scope: Deactivated successfully.
Oct 11 01:52:16 compute-0 sudo[260890]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:16 compute-0 python3.9[261336]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/cacerts/nova/tls-ca-bundle.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:52:16 compute-0 sudo[261351]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:52:16 compute-0 sudo[261332]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:16 compute-0 sudo[261351]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:52:16 compute-0 sudo[261351]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:17 compute-0 sudo[261376]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:52:17 compute-0 sudo[261376]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:52:17 compute-0 sudo[261376]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:17 compute-0 sudo[261424]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:52:17 compute-0 sudo[261424]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:52:17 compute-0 sudo[261424]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:17 compute-0 sudo[261473]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 01:52:17 compute-0 sudo[261473]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:52:17 compute-0 sudo[261595]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nqvtqpftukpejruvpdyrtvfmditvehvc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147535.994963-375-157794989138956/AnsiballZ_copy.py'
Oct 11 01:52:17 compute-0 sudo[261595]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:17 compute-0 podman[261610]: 2025-10-11 01:52:17.782329471 +0000 UTC m=+0.077950478 container create 68d7da1d5da8a3ecfa6e17aa350264a5318bce7bdec34b8f3aafbe66d68f1421 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_roentgen, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 01:52:17 compute-0 python3.9[261604]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/cacerts/nova/tls-ca-bundle.pem group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760147535.994963-375-157794989138956/.source.pem _original_basename=tls-ca-bundle.pem follow=False checksum=f57cfa4065467101f7ba494c2f61c0e2e8a6dad5 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:52:17 compute-0 podman[261610]: 2025-10-11 01:52:17.747065647 +0000 UTC m=+0.042686664 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:52:17 compute-0 systemd[1]: Started libpod-conmon-68d7da1d5da8a3ecfa6e17aa350264a5318bce7bdec34b8f3aafbe66d68f1421.scope.
Oct 11 01:52:17 compute-0 sudo[261595]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:17 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:52:17 compute-0 podman[261610]: 2025-10-11 01:52:17.959997451 +0000 UTC m=+0.255618488 container init 68d7da1d5da8a3ecfa6e17aa350264a5318bce7bdec34b8f3aafbe66d68f1421 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_roentgen, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3)
Oct 11 01:52:17 compute-0 podman[261610]: 2025-10-11 01:52:17.979767262 +0000 UTC m=+0.275388289 container start 68d7da1d5da8a3ecfa6e17aa350264a5318bce7bdec34b8f3aafbe66d68f1421 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_roentgen, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 01:52:17 compute-0 podman[261610]: 2025-10-11 01:52:17.988171964 +0000 UTC m=+0.283793041 container attach 68d7da1d5da8a3ecfa6e17aa350264a5318bce7bdec34b8f3aafbe66d68f1421 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_roentgen, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:52:17 compute-0 friendly_roentgen[261626]: 167 167
Oct 11 01:52:17 compute-0 systemd[1]: libpod-68d7da1d5da8a3ecfa6e17aa350264a5318bce7bdec34b8f3aafbe66d68f1421.scope: Deactivated successfully.
Oct 11 01:52:17 compute-0 podman[261610]: 2025-10-11 01:52:17.992469402 +0000 UTC m=+0.288090439 container died 68d7da1d5da8a3ecfa6e17aa350264a5318bce7bdec34b8f3aafbe66d68f1421 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_roentgen, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3)
Oct 11 01:52:18 compute-0 systemd[1]: var-lib-containers-storage-overlay-6cc02efda3062172cfa0a7e18767276609b3f0d124950be89127c654b755c32c-merged.mount: Deactivated successfully.
Oct 11 01:52:18 compute-0 podman[261610]: 2025-10-11 01:52:18.071035212 +0000 UTC m=+0.366656219 container remove 68d7da1d5da8a3ecfa6e17aa350264a5318bce7bdec34b8f3aafbe66d68f1421 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_roentgen, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:52:18 compute-0 systemd[1]: libpod-conmon-68d7da1d5da8a3ecfa6e17aa350264a5318bce7bdec34b8f3aafbe66d68f1421.scope: Deactivated successfully.
Oct 11 01:52:18 compute-0 ceph-mon[191930]: pgmap v423: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:18 compute-0 podman[261707]: 2025-10-11 01:52:18.349220774 +0000 UTC m=+0.090749020 container create 30d70ecf800d88e6ce2a540a88f00e258f64f234a1649ae2a3153d6cae392254 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_northcutt, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507)
Oct 11 01:52:18 compute-0 podman[261707]: 2025-10-11 01:52:18.312057737 +0000 UTC m=+0.053585983 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:52:18 compute-0 systemd[1]: Started libpod-conmon-30d70ecf800d88e6ce2a540a88f00e258f64f234a1649ae2a3153d6cae392254.scope.
Oct 11 01:52:18 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:52:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/efb0e2f1f06cb180da4f9cc19b74e3eec364b2c495796e9dca47c20a85f90a78/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:52:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/efb0e2f1f06cb180da4f9cc19b74e3eec364b2c495796e9dca47c20a85f90a78/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:52:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/efb0e2f1f06cb180da4f9cc19b74e3eec364b2c495796e9dca47c20a85f90a78/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:52:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/efb0e2f1f06cb180da4f9cc19b74e3eec364b2c495796e9dca47c20a85f90a78/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:52:18 compute-0 podman[261707]: 2025-10-11 01:52:18.494826443 +0000 UTC m=+0.236354729 container init 30d70ecf800d88e6ce2a540a88f00e258f64f234a1649ae2a3153d6cae392254 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_northcutt, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:52:18 compute-0 podman[261707]: 2025-10-11 01:52:18.518695117 +0000 UTC m=+0.260223333 container start 30d70ecf800d88e6ce2a540a88f00e258f64f234a1649ae2a3153d6cae392254 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_northcutt, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:52:18 compute-0 podman[261707]: 2025-10-11 01:52:18.526186688 +0000 UTC m=+0.267714934 container attach 30d70ecf800d88e6ce2a540a88f00e258f64f234a1649ae2a3153d6cae392254 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_northcutt, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:52:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v424: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:18 compute-0 sudo[261819]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jtukjtoawgjdeetwdzjnwkceiopfjvbb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147538.182733-391-169839649774106/AnsiballZ_file.py'
Oct 11 01:52:18 compute-0 sudo[261819]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:18 compute-0 python3.9[261821]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/cacerts/ovn setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:52:19 compute-0 sudo[261819]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]: {
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:         "osd_id": 1,
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:         "type": "bluestore"
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:     },
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:         "osd_id": 2,
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:         "type": "bluestore"
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:     },
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:         "osd_id": 0,
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:         "type": "bluestore"
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]:     }
Oct 11 01:52:19 compute-0 laughing_northcutt[261762]: }
Oct 11 01:52:19 compute-0 systemd[1]: libpod-30d70ecf800d88e6ce2a540a88f00e258f64f234a1649ae2a3153d6cae392254.scope: Deactivated successfully.
Oct 11 01:52:19 compute-0 systemd[1]: libpod-30d70ecf800d88e6ce2a540a88f00e258f64f234a1649ae2a3153d6cae392254.scope: Consumed 1.290s CPU time.
Oct 11 01:52:19 compute-0 podman[261707]: 2025-10-11 01:52:19.827165354 +0000 UTC m=+1.568693560 container died 30d70ecf800d88e6ce2a540a88f00e258f64f234a1649ae2a3153d6cae392254 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_northcutt, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0)
Oct 11 01:52:19 compute-0 systemd[1]: var-lib-containers-storage-overlay-efb0e2f1f06cb180da4f9cc19b74e3eec364b2c495796e9dca47c20a85f90a78-merged.mount: Deactivated successfully.
Oct 11 01:52:19 compute-0 podman[261707]: 2025-10-11 01:52:19.914827932 +0000 UTC m=+1.656356128 container remove 30d70ecf800d88e6ce2a540a88f00e258f64f234a1649ae2a3153d6cae392254 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_northcutt, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 01:52:19 compute-0 systemd[1]: libpod-conmon-30d70ecf800d88e6ce2a540a88f00e258f64f234a1649ae2a3153d6cae392254.scope: Deactivated successfully.
Oct 11 01:52:19 compute-0 sudo[262011]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-djnsnwnrwkztmitxikkhymaiaizeottc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147539.3714678-399-213287222278435/AnsiballZ_stat.py'
Oct 11 01:52:19 compute-0 sudo[262011]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:19 compute-0 sudo[261473]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:52:19 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:52:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:52:19 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:52:20 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 4ddba267-c881-4803-acbd-4c713e70c1f3 does not exist
Oct 11 01:52:20 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 340c41de-6780-4eb8-95f1-0b66d7d91c34 does not exist
Oct 11 01:52:20 compute-0 sudo[262016]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:52:20 compute-0 sudo[262016]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:52:20 compute-0 sudo[262016]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:20 compute-0 python3.9[262015]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:52:20 compute-0 sudo[262011]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:20 compute-0 sudo[262041]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:52:20 compute-0 sudo[262041]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:52:20 compute-0 sudo[262041]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:20 compute-0 ceph-mon[191930]: pgmap v424: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:20 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:52:20 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:52:20 compute-0 sudo[262141]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kvrvakzyudgwjijucydkpvcdquxhxkjh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147539.3714678-399-213287222278435/AnsiballZ_file.py'
Oct 11 01:52:20 compute-0 sudo[262141]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v425: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:20 compute-0 python3.9[262143]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem _original_basename=tls-ca-bundle.pem recurse=False state=file path=/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:52:20 compute-0 sudo[262141]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:21 compute-0 sudo[262293]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fyfeeedlhjbzhzjgtewdsgahxptcsupa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147541.232596-412-38396123967311/AnsiballZ_file.py'
Oct 11 01:52:21 compute-0 sudo[262293]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:22 compute-0 python3.9[262295]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/cacerts/telemetry setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:52:22 compute-0 sudo[262293]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:22 compute-0 ceph-mon[191930]: pgmap v425: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v426: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:22 compute-0 sudo[262488]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cixucnpjirphrylwrxsmuurqkotyndvl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147542.380719-420-234872555537498/AnsiballZ_stat.py'
Oct 11 01:52:23 compute-0 sudo[262488]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:23 compute-0 podman[262419]: 2025-10-11 01:52:23.019120797 +0000 UTC m=+0.150737597 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 01:52:23 compute-0 podman[262421]: 2025-10-11 01:52:23.026661789 +0000 UTC m=+0.150510572 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, com.redhat.component=ubi9-container, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, maintainer=Red Hat, Inc., managed_by=edpm_ansible, build-date=2024-09-18T21:23:30, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, vendor=Red Hat, Inc., version=9.4, container_name=kepler, io.k8s.display-name=Red Hat Universal Base Image 9, release=1214.1726694543, io.buildah.version=1.29.0, io.openshift.expose-services=, name=ubi9, architecture=x86_64, io.openshift.tags=base rhel9, summary=Provides the latest release of Red Hat Universal Base Image 9., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release-0.7.12=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543)
Oct 11 01:52:23 compute-0 podman[262420]: 2025-10-11 01:52:23.067550071 +0000 UTC m=+0.194083136 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']})
Oct 11 01:52:23 compute-0 python3.9[262505]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:52:23 compute-0 sudo[262488]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:23 compute-0 sudo[262586]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fistpenltljjffoliatxrlgavmjsvbil ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147542.380719-420-234872555537498/AnsiballZ_file.py'
Oct 11 01:52:23 compute-0 sudo[262586]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:24 compute-0 python3.9[262588]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem _original_basename=tls-ca-bundle.pem recurse=False state=file path=/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:52:24 compute-0 sudo[262586]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:24 compute-0 ceph-mon[191930]: pgmap v426: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:52:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v427: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:25 compute-0 sudo[262738]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ssedfcwcusctvsgekrlcyumpyxhfamrd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147544.4161184-433-157847721400017/AnsiballZ_file.py'
Oct 11 01:52:25 compute-0 sudo[262738]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:25 compute-0 podman[262740]: 2025-10-11 01:52:25.191823614 +0000 UTC m=+0.140982015 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251007, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 01:52:25 compute-0 python3.9[262741]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/cacerts/neutron-metadata setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:52:25 compute-0 sudo[262738]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:26 compute-0 sudo[262910]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gsvgqvjgnzjjstkezvyctkovkwehimgy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147545.643163-441-156235588810864/AnsiballZ_stat.py'
Oct 11 01:52:26 compute-0 sudo[262910]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:26 compute-0 ceph-mon[191930]: pgmap v427: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:26 compute-0 python3.9[262912]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:52:26 compute-0 sudo[262910]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:52:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:52:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:52:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:52:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:52:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:52:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v428: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:27 compute-0 sudo[263033]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ojucqbwiweymsdoqunojntzieykonvvc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147545.643163-441-156235588810864/AnsiballZ_copy.py'
Oct 11 01:52:27 compute-0 sudo[263033]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:27 compute-0 python3.9[263035]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem group=root mode=0644 owner=root src=/home/zuul/.ansible/tmp/ansible-tmp-1760147545.643163-441-156235588810864/.source.pem _original_basename=tls-ca-bundle.pem follow=False checksum=f57cfa4065467101f7ba494c2f61c0e2e8a6dad5 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:52:27 compute-0 sudo[263033]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:28 compute-0 sudo[263185]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nhljaylxkvzmdbqirvxpdwcfwmfmemba ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147547.8119512-457-18293137054727/AnsiballZ_file.py'
Oct 11 01:52:28 compute-0 sudo[263185]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:28 compute-0 ceph-mon[191930]: pgmap v428: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:28 compute-0 python3.9[263187]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/cacerts/repo-setup setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:52:28 compute-0 sudo[263185]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v429: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:29 compute-0 sudo[263337]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ejgmyqleonytygjwwksetrglxdnnzbvy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147548.8104935-465-267127691068560/AnsiballZ_stat.py'
Oct 11 01:52:29 compute-0 sudo[263337]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:29 compute-0 python3.9[263339]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/cacerts/repo-setup/tls-ca-bundle.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:52:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:52:29 compute-0 podman[157119]: time="2025-10-11T01:52:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:52:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:52:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:52:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:52:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6820 "" "Go-http-client/1.1"
Oct 11 01:52:29 compute-0 sudo[263337]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:30 compute-0 sudo[263415]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-djgiuacefjifjluldszjwhlscuwhaiwe ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147548.8104935-465-267127691068560/AnsiballZ_file.py'
Oct 11 01:52:30 compute-0 sudo[263415]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:30 compute-0 ceph-mon[191930]: pgmap v429: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:30 compute-0 python3.9[263417]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/var/lib/openstack/cacerts/repo-setup/tls-ca-bundle.pem _original_basename=tls-ca-bundle.pem recurse=False state=file path=/var/lib/openstack/cacerts/repo-setup/tls-ca-bundle.pem force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:52:30 compute-0 sudo[263415]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v430: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:31 compute-0 sudo[263567]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yiestmelajoznoeaufqdfjxqepcrcjed ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147550.824687-478-211205542415847/AnsiballZ_file.py'
Oct 11 01:52:31 compute-0 sudo[263567]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:31 compute-0 openstack_network_exporter[159265]: ERROR   01:52:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:52:31 compute-0 openstack_network_exporter[159265]: ERROR   01:52:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:52:31 compute-0 openstack_network_exporter[159265]: ERROR   01:52:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:52:31 compute-0 openstack_network_exporter[159265]: ERROR   01:52:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:52:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:52:31 compute-0 openstack_network_exporter[159265]: ERROR   01:52:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:52:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:52:31 compute-0 python3.9[263569]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/var/lib/openstack/cacerts/libvirt setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:52:31 compute-0 sudo[263567]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:32 compute-0 ceph-mon[191930]: pgmap v430: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:32 compute-0 sudo[263719]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ilrqodkewgzukcwszonocjufofssrcjy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147551.9612815-486-150362681649919/AnsiballZ_stat.py'
Oct 11 01:52:32 compute-0 sudo[263719]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v431: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:32 compute-0 python3.9[263721]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/cacerts/libvirt/tls-ca-bundle.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:52:32 compute-0 sudo[263719]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:33 compute-0 sudo[263797]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kqyntsnyyhemnsxuvuzsbzknbexkvmnd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147551.9612815-486-150362681649919/AnsiballZ_file.py'
Oct 11 01:52:33 compute-0 sudo[263797]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:33 compute-0 python3.9[263799]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/var/lib/openstack/cacerts/libvirt/tls-ca-bundle.pem _original_basename=tls-ca-bundle.pem recurse=False state=file path=/var/lib/openstack/cacerts/libvirt/tls-ca-bundle.pem force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:52:33 compute-0 sudo[263797]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:33 compute-0 sshd-session[254238]: Connection closed by 192.168.122.30 port 40180
Oct 11 01:52:34 compute-0 sshd-session[254235]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:52:34 compute-0 systemd[1]: session-51.scope: Deactivated successfully.
Oct 11 01:52:34 compute-0 systemd[1]: session-51.scope: Consumed 1min 2.000s CPU time.
Oct 11 01:52:34 compute-0 systemd-logind[804]: Session 51 logged out. Waiting for processes to exit.
Oct 11 01:52:34 compute-0 systemd-logind[804]: Removed session 51.
Oct 11 01:52:34 compute-0 ceph-mon[191930]: pgmap v431: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:52:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v432: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:36 compute-0 ceph-mon[191930]: pgmap v432: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v433: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:36 compute-0 unix_chkpwd[263826]: password check failed for user (root)
Oct 11 01:52:36 compute-0 sshd-session[263824]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 01:52:38 compute-0 ceph-mon[191930]: pgmap v433: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v434: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:39 compute-0 sshd-session[263824]: Failed password for root from 193.46.255.217 port 19186 ssh2
Oct 11 01:52:39 compute-0 sshd-session[263827]: Accepted publickey for zuul from 192.168.122.30 port 34076 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:52:39 compute-0 systemd-logind[804]: New session 52 of user zuul.
Oct 11 01:52:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:52:39 compute-0 systemd[1]: Started Session 52 of User zuul.
Oct 11 01:52:39 compute-0 sshd-session[263827]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:52:39 compute-0 podman[263829]: 2025-10-11 01:52:39.798547322 +0000 UTC m=+0.137821102 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 01:52:39 compute-0 podman[263830]: 2025-10-11 01:52:39.808867628 +0000 UTC m=+0.146793578 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, config_id=edpm, managed_by=edpm_ansible, vcs-type=git, release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.openshift.tags=minimal rhel9, io.buildah.version=1.33.7, name=ubi9-minimal, vendor=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.6, io.openshift.expose-services=, build-date=2025-08-20T13:12:41, maintainer=Red Hat, Inc., com.redhat.component=ubi9-minimal-container, architecture=x86_64, distribution-scope=public, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, container_name=openstack_network_exporter)
Oct 11 01:52:40 compute-0 unix_chkpwd[263928]: password check failed for user (root)
Oct 11 01:52:40 compute-0 ceph-mon[191930]: pgmap v434: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:40 compute-0 sudo[264026]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-egodbdppcaznqwztftccwblsucftkyhw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147559.9165854-22-231770778662448/AnsiballZ_file.py'
Oct 11 01:52:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v435: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:40 compute-0 sudo[264026]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:40 compute-0 python3.9[264028]: ansible-ansible.builtin.file Invoked with mode=0755 path=/var/lib/openstack/config/ceph state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:52:40 compute-0 sudo[264026]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:41 compute-0 ceph-mon[191930]: pgmap v435: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:41 compute-0 sudo[264178]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-unjxbuajpsfqulldgasnwidtevsdsgan ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147561.2891202-34-248771228092361/AnsiballZ_stat.py'
Oct 11 01:52:42 compute-0 sudo[264178]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:42 compute-0 sshd-session[263824]: Failed password for root from 193.46.255.217 port 19186 ssh2
Oct 11 01:52:42 compute-0 python3.9[264180]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/ceph/ceph.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:52:42 compute-0 sudo[264178]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v436: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:43 compute-0 sudo[264301]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-quoyjjkfusgjgyqexwjajmdqgjbdayst ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147561.2891202-34-248771228092361/AnsiballZ_copy.py'
Oct 11 01:52:43 compute-0 sudo[264301]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:43 compute-0 unix_chkpwd[264304]: password check failed for user (root)
Oct 11 01:52:43 compute-0 python3.9[264303]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/ceph/ceph.conf mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1760147561.2891202-34-248771228092361/.source.conf _original_basename=ceph.conf follow=False checksum=4608e5502903673aab556c915c0ec98adc63de05 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:52:43 compute-0 sudo[264301]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:43 compute-0 ceph-mon[191930]: pgmap v436: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:44 compute-0 sudo[264454]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-potyxnkwwdlouspgtalhggacocjitfab ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147563.7289891-34-82247536774698/AnsiballZ_stat.py'
Oct 11 01:52:44 compute-0 sudo[264454]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:44 compute-0 python3.9[264456]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/ceph/ceph.client.openstack.keyring follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:52:44 compute-0 sudo[264454]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:52:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v437: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:45 compute-0 sshd-session[263824]: Failed password for root from 193.46.255.217 port 19186 ssh2
Oct 11 01:52:45 compute-0 sudo[264577]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ogycdgsbrupixpklzfdlknfhwtjgkroc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147563.7289891-34-82247536774698/AnsiballZ_copy.py'
Oct 11 01:52:45 compute-0 sudo[264577]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:45 compute-0 podman[264579]: 2025-10-11 01:52:45.540666937 +0000 UTC m=+0.128362188 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 01:52:45 compute-0 python3.9[264580]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/ceph/ceph.client.openstack.keyring mode=0600 src=/home/zuul/.ansible/tmp/ansible-tmp-1760147563.7289891-34-82247536774698/.source.keyring _original_basename=ceph.client.openstack.keyring follow=False checksum=5a4e28cd9e72beb8739807af57732c755958e969 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:52:45 compute-0 sudo[264577]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:45 compute-0 ceph-mon[191930]: pgmap v437: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:46 compute-0 sshd-session[263857]: Connection closed by 192.168.122.30 port 34076
Oct 11 01:52:46 compute-0 sshd-session[263827]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:52:46 compute-0 systemd[1]: session-52.scope: Deactivated successfully.
Oct 11 01:52:46 compute-0 systemd[1]: session-52.scope: Consumed 5.158s CPU time.
Oct 11 01:52:46 compute-0 systemd-logind[804]: Session 52 logged out. Waiting for processes to exit.
Oct 11 01:52:46 compute-0 systemd-logind[804]: Removed session 52.
Oct 11 01:52:46 compute-0 sshd-session[263824]: Received disconnect from 193.46.255.217 port 19186:11:  [preauth]
Oct 11 01:52:46 compute-0 sshd-session[263824]: Disconnected from authenticating user root 193.46.255.217 port 19186 [preauth]
Oct 11 01:52:46 compute-0 sshd-session[263824]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 01:52:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v438: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:47 compute-0 unix_chkpwd[264627]: password check failed for user (root)
Oct 11 01:52:47 compute-0 sshd-session[264625]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 01:52:47 compute-0 ceph-mon[191930]: pgmap v438: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v439: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:49 compute-0 sshd-session[264625]: Failed password for root from 193.46.255.217 port 35126 ssh2
Oct 11 01:52:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:52:49 compute-0 ceph-mon[191930]: pgmap v439: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:50 compute-0 unix_chkpwd[264629]: password check failed for user (root)
Oct 11 01:52:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v440: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:51 compute-0 sshd-session[264630]: Accepted publickey for zuul from 192.168.122.30 port 38744 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:52:51 compute-0 systemd-logind[804]: New session 53 of user zuul.
Oct 11 01:52:51 compute-0 systemd[1]: Started Session 53 of User zuul.
Oct 11 01:52:51 compute-0 sshd-session[264630]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:52:51 compute-0 ceph-mon[191930]: pgmap v440: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v441: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:52 compute-0 sshd-session[264625]: Failed password for root from 193.46.255.217 port 35126 ssh2
Oct 11 01:52:53 compute-0 python3.9[264783]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:52:53 compute-0 podman[264785]: 2025-10-11 01:52:53.224067641 +0000 UTC m=+0.109279762 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.expose-services=, maintainer=Red Hat, Inc., summary=Provides the latest release of Red Hat Universal Base Image 9., version=9.4, com.redhat.component=ubi9-container, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, build-date=2024-09-18T21:23:30, io.openshift.tags=base rhel9, config_id=edpm, release-0.7.12=, architecture=x86_64, container_name=kepler, distribution-scope=public, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, io.buildah.version=1.29.0, release=1214.1726694543, io.k8s.display-name=Red Hat Universal Base Image 9, managed_by=edpm_ansible, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Oct 11 01:52:53 compute-0 podman[264784]: 2025-10-11 01:52:53.23893362 +0000 UTC m=+0.127137769 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 01:52:53 compute-0 podman[264786]: 2025-10-11 01:52:53.28321015 +0000 UTC m=+0.157845460 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, container_name=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=ovn_controller, io.buildah.version=1.41.3)
Oct 11 01:52:53 compute-0 unix_chkpwd[264898]: password check failed for user (root)
Oct 11 01:52:53 compute-0 ceph-mon[191930]: pgmap v441: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:54 compute-0 sudo[265001]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nxniolqhlfpoehqrjoiqbytvimkyvnit ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147573.900031-34-156880287587805/AnsiballZ_file.py'
Oct 11 01:52:54 compute-0 sudo[265001]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:52:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v442: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:54 compute-0 python3.9[265003]: ansible-ansible.builtin.file Invoked with group=zuul mode=0750 owner=zuul path=/var/lib/edpm-config/firewall setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:52:54 compute-0 sudo[265001]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:55 compute-0 sudo[265169]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rkojimbscksqdcjygeuexfhtuhtoumsw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147575.1264238-34-65761760582802/AnsiballZ_file.py'
Oct 11 01:52:55 compute-0 sudo[265169]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:55 compute-0 podman[265127]: 2025-10-11 01:52:55.692440841 +0000 UTC m=+0.129721599 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 10 Base Image, tcib_managed=true, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2)
Oct 11 01:52:55 compute-0 sshd-session[264625]: Failed password for root from 193.46.255.217 port 35126 ssh2
Oct 11 01:52:55 compute-0 python3.9[265174]: ansible-ansible.builtin.file Invoked with group=openvswitch owner=openvswitch path=/var/lib/openvswitch/ovn setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:52:55 compute-0 ceph-mon[191930]: pgmap v442: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:55 compute-0 sudo[265169]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:52:56
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.control', 'default.rgw.meta', 'vms', 'images', 'cephfs.cephfs.meta', 'default.rgw.log', 'cephfs.cephfs.data', '.mgr', '.rgw.root', 'volumes', 'backups']
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:52:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v443: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:57 compute-0 sshd-session[264625]: Received disconnect from 193.46.255.217 port 35126:11:  [preauth]
Oct 11 01:52:57 compute-0 sshd-session[264625]: Disconnected from authenticating user root 193.46.255.217 port 35126 [preauth]
Oct 11 01:52:57 compute-0 sshd-session[264625]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 01:52:57 compute-0 python3.9[265324]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'selinux'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:52:57 compute-0 ceph-mon[191930]: pgmap v443: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:57 compute-0 unix_chkpwd[265426]: password check failed for user (root)
Oct 11 01:52:57 compute-0 sshd-session[265330]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 01:52:58 compute-0 sudo[265477]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qhaterwaemhuzzcjgnruikqxmeuodrns ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147577.4831858-57-269497027084499/AnsiballZ_seboolean.py'
Oct 11 01:52:58 compute-0 sudo[265477]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:52:58 compute-0 python3.9[265479]: ansible-ansible.posix.seboolean Invoked with name=virt_sandbox_use_netlink persistent=True state=True ignore_selinux_state=False
Oct 11 01:52:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v444: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:52:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:52:59 compute-0 podman[157119]: time="2025-10-11T01:52:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:52:59 compute-0 sudo[265477]: pam_unix(sudo:session): session closed for user root
Oct 11 01:52:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:52:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:52:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:52:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6821 "" "Go-http-client/1.1"
Oct 11 01:52:59 compute-0 ceph-mon[191930]: pgmap v444: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:00 compute-0 sshd-session[265330]: Failed password for root from 193.46.255.217 port 37272 ssh2
Oct 11 01:53:00 compute-0 sudo[265629]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dkipbngwuxxjpauxlggmpsopvfjwmkam ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147580.1742408-67-178184541777193/AnsiballZ_setup.py'
Oct 11 01:53:00 compute-0 sudo[265629]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v445: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:01 compute-0 python3.9[265631]: ansible-ansible.legacy.setup Invoked with filter=['ansible_pkg_mgr'] gather_subset=['!all'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:53:01 compute-0 unix_chkpwd[265640]: password check failed for user (root)
Oct 11 01:53:01 compute-0 openstack_network_exporter[159265]: ERROR   01:53:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:53:01 compute-0 openstack_network_exporter[159265]: ERROR   01:53:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:53:01 compute-0 openstack_network_exporter[159265]: ERROR   01:53:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:53:01 compute-0 openstack_network_exporter[159265]: ERROR   01:53:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:53:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:53:01 compute-0 openstack_network_exporter[159265]: ERROR   01:53:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:53:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:53:01 compute-0 sudo[265629]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:01 compute-0 ceph-mon[191930]: pgmap v445: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:02 compute-0 sudo[265714]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jtuqskwbhsnrsgoyogbiuakudpfhaicb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147580.1742408-67-178184541777193/AnsiballZ_dnf.py'
Oct 11 01:53:02 compute-0 sudo[265714]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:02 compute-0 python3.9[265716]: ansible-ansible.legacy.dnf Invoked with name=['openvswitch'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:53:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v446: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:03 compute-0 sshd-session[265330]: Failed password for root from 193.46.255.217 port 37272 ssh2
Oct 11 01:53:03 compute-0 sudo[265714]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:03 compute-0 ceph-mon[191930]: pgmap v446: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:04 compute-0 unix_chkpwd[265794]: password check failed for user (root)
Oct 11 01:53:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:53:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v447: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:04 compute-0 sudo[265868]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jtfwgfvhgyvuqnlakokpyflbhnescgdl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147584.0140584-79-238309550124555/AnsiballZ_systemd.py'
Oct 11 01:53:04 compute-0 sudo[265868]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:05 compute-0 python3.9[265870]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=openvswitch.service state=started daemon_reload=False daemon_reexec=False scope=system no_block=False force=None
Oct 11 01:53:05 compute-0 sudo[265868]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:05 compute-0 ceph-mon[191930]: pgmap v447: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 01:53:06 compute-0 sudo[266023]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kvjxzwvetobzyhtoasqhioanvlpezgvu ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760147585.6861932-87-204688873409948/AnsiballZ_edpm_nftables_snippet.py'
Oct 11 01:53:06 compute-0 sudo[266023]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:06 compute-0 python3[266025]: ansible-osp.edpm.edpm_nftables_snippet Invoked with content=- rule_name: 118 neutron vxlan networks
                                             rule:
                                               proto: udp
                                               dport: 4789
                                           - rule_name: 119 neutron geneve networks
                                             rule:
                                               proto: udp
                                               dport: 6081
                                               state: ["UNTRACKED"]
                                           - rule_name: 120 neutron geneve networks no conntrack
                                             rule:
                                               proto: udp
                                               dport: 6081
                                               table: raw
                                               chain: OUTPUT
                                               jump: NOTRACK
                                               action: append
                                               state: []
                                           - rule_name: 121 neutron geneve networks no conntrack
                                             rule:
                                               proto: udp
                                               dport: 6081
                                               table: raw
                                               chain: PREROUTING
                                               jump: NOTRACK
                                               action: append
                                               state: []
                                            dest=/var/lib/edpm-config/firewall/ovn.yaml state=present
Oct 11 01:53:06 compute-0 sudo[266023]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:06 compute-0 sshd-session[265330]: Failed password for root from 193.46.255.217 port 37272 ssh2
Oct 11 01:53:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v448: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:07 compute-0 sshd-session[265330]: Received disconnect from 193.46.255.217 port 37272:11:  [preauth]
Oct 11 01:53:07 compute-0 sshd-session[265330]: Disconnected from authenticating user root 193.46.255.217 port 37272 [preauth]
Oct 11 01:53:07 compute-0 sshd-session[265330]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 01:53:07 compute-0 sudo[266175]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-upbzrutejkgtbjzqtrfvlzygexwjfrnq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147587.1036823-96-209860113786643/AnsiballZ_file.py'
Oct 11 01:53:07 compute-0 sudo[266175]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:07 compute-0 python3.9[266177]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/lib/edpm-config/firewall state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.938 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.939 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.939 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.940 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f8ed27f97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb8c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 sudo[266175]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb1a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb200>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed2874260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed3ab42f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb350>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb90>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fa390>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb3b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbbf0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbc80>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.947 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27f9610>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.947 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb620>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.947 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbe30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbec0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbf50>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f8ed27fbad0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f8ed27faff0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f8ed27fb110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f8ed27fb170>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f8ed27fb1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f8ed27fb230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.952 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f8ed2874230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.952 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f8ed27fb290>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.952 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f8ed5778d70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.953 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.953 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f8ed27fb650>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.953 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.953 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f8ed27fbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.953 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.953 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f8ed27fb320>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.953 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.954 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f8ed27fbb60>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.954 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f8ed27fa3f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.954 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f8ed27fb380>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.955 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f8ed27fbbc0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.955 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.955 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f8ed27fbc50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.955 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.955 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f8ed27fbce0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.955 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.956 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f8ed27fbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.956 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.956 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f8ed27fb590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.956 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.956 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f8ed27f95e0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.956 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.957 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f8ed27fb5f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.957 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.957 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f8ed27fbe00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.957 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.957 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f8ed27fbe90>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.957 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.958 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f8ed27fbf20>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.958 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.959 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.959 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.959 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.959 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.959 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.960 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.960 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.960 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.960 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.960 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.960 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.960 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.961 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.961 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.961 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.961 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.961 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.961 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.961 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.962 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.962 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.962 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.962 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.962 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:53:07.962 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:53:08 compute-0 ceph-mon[191930]: pgmap v448: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v449: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:08 compute-0 sudo[266328]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vwycpztgqwtxxjlfndrrrdfajmzktzja ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147588.2237964-104-212502181919527/AnsiballZ_stat.py'
Oct 11 01:53:08 compute-0 sudo[266328]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:09 compute-0 python3.9[266330]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:53:09 compute-0 sudo[266328]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:53:09 compute-0 sudo[266406]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jwhrpwqfsxiivnirkjyewpujbplkavpp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147588.2237964-104-212502181919527/AnsiballZ_file.py'
Oct 11 01:53:09 compute-0 sudo[266406]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:09 compute-0 python3.9[266408]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml _original_basename=base-rules.yaml.j2 recurse=False state=file path=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:53:10 compute-0 sudo[266406]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:10 compute-0 ceph-mon[191930]: pgmap v449: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:10 compute-0 podman[266422]: 2025-10-11 01:53:10.213281711 +0000 UTC m=+0.100656216 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 01:53:10 compute-0 podman[266430]: 2025-10-11 01:53:10.273675067 +0000 UTC m=+0.152811824 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vcs-type=git, io.buildah.version=1.33.7, io.openshift.expose-services=, version=9.6, managed_by=edpm_ansible, architecture=x86_64, build-date=2025-08-20T13:12:41, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.tags=minimal rhel9, vendor=Red Hat, Inc., container_name=openstack_network_exporter, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi9-minimal-container, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, name=ubi9-minimal, url=https://catalog.redhat.com/en/search?searchType=containers, config_id=edpm, maintainer=Red Hat, Inc., release=1755695350, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 01:53:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v450: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:10 compute-0 sudo[266598]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ezirnstzrfijgafnuqxlnkzecjfswepp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147590.3075676-116-123623284907428/AnsiballZ_stat.py'
Oct 11 01:53:10 compute-0 sudo[266598]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:11 compute-0 python3.9[266600]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:53:11 compute-0 sudo[266598]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:11 compute-0 sudo[266676]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gwxvgmpkrripbepsernmvjlczpenjilm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147590.3075676-116-123623284907428/AnsiballZ_file.py'
Oct 11 01:53:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 01:53:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Cumulative writes: 5427 writes, 23K keys, 5427 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.03 MB/s
                                            Cumulative WAL: 5427 writes, 779 syncs, 6.97 writes per sync, written: 0.02 GB, 0.03 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 5427 writes, 23K keys, 5427 commit groups, 1.0 writes per commit group, ingest: 18.45 MB, 0.03 MB/s
                                            Interval WAL: 5427 writes, 779 syncs, 6.97 writes per sync, written: 0.02 GB, 0.03 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
                                            
                                            ** Compaction Stats [default] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      2/0    2.63 KB   0.2      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                             Sum      2/0    2.63 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [default] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [default] **
                                            
                                            ** Compaction Stats [m-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-0] **
                                            
                                            ** Compaction Stats [m-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-1] **
                                            
                                            ** Compaction Stats [m-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-2] **
                                            
                                            ** Compaction Stats [p-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      1/0    1.56 KB   0.1      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.01              0.00         1    0.006       0      0       0.0       0.0
                                             Sum      1/0    1.56 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.01              0.00         1    0.006       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.3      0.01              0.00         1    0.006       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-0] **
                                            
                                            ** Compaction Stats [p-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-1] **
                                            
                                            ** Compaction Stats [p-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-2] **
                                            
                                            ** Compaction Stats [O-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4430#2 capacity: 224.00 MB usage: 0.45 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 2 last_secs: 9e-06 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1,0.20 KB,8.85555e-05%) FilterBlock(1,0.11 KB,4.76837e-05%) IndexBlock(1,0.14 KB,6.13076e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-0] **
                                            
                                            ** Compaction Stats [O-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4430#2 capacity: 224.00 MB usage: 0.45 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 2 last_secs: 9e-06 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1,0.20 KB,8.85555e-05%) FilterBlock(1,0.11 KB,4.76837e-05%) IndexBlock(1,0.14 KB,6.13076e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-1] **
                                            
                                            ** Compaction Stats [O-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      1/0    1.25 KB   0.1      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.00              0.00         1    0.005       0      0       0.0       0.0
                                             Sum      1/0    1.25 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.00              0.00         1    0.005       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.2      0.00              0.00         1    0.005       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4430#2 capacity: 224.00 MB usage: 0.45 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 2 last_secs: 9e-06 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1,0.20 KB,8.85555e-05%) FilterBlock(1,0.11 KB,4.76837e-05%) IndexBlock(1,0.14 KB,6.13076e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-2] **
                                            
                                            ** Compaction Stats [L] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [L] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [L] **
                                            
                                            ** Compaction Stats [P] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [P] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [P] **
Oct 11 01:53:11 compute-0 sudo[266676]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:11 compute-0 python3.9[266678]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml _original_basename=.3s4dsuky recurse=False state=file path=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:53:11 compute-0 sudo[266676]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:12 compute-0 ceph-mon[191930]: pgmap v450: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:12 compute-0 sudo[266828]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ekdprqmjhesgvmfzwlauibrgrhswbrja ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147592.0555282-128-131163543368550/AnsiballZ_stat.py'
Oct 11 01:53:12 compute-0 sudo[266828]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v451: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:12 compute-0 python3.9[266830]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/iptables.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:53:12 compute-0 sudo[266828]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:13 compute-0 sudo[266906]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ddvnmbmsamxnuiswghieicwtmfrimtmx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147592.0555282-128-131163543368550/AnsiballZ_file.py'
Oct 11 01:53:13 compute-0 sudo[266906]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:13 compute-0 python3.9[266908]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/iptables.nft _original_basename=iptables.nft recurse=False state=file path=/etc/nftables/iptables.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:53:13 compute-0 sudo[266906]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:14 compute-0 ceph-mon[191930]: pgmap v451: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:53:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v452: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:14 compute-0 sudo[267058]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xfoilmghryejyfhsjzgsbjsdhshfjqhb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147593.9875014-141-207063870659707/AnsiballZ_command.py'
Oct 11 01:53:14 compute-0 sudo[267058]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:15 compute-0 python3.9[267060]: ansible-ansible.legacy.command Invoked with _raw_params=nft -j list ruleset _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:53:15 compute-0 sudo[267058]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:16 compute-0 ceph-mon[191930]: pgmap v452: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:16 compute-0 sudo[267228]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-blwkofcahmaetrermulomekatmllbagi ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760147595.4264328-149-78461739068367/AnsiballZ_edpm_nftables_from_files.py'
Oct 11 01:53:16 compute-0 sudo[267228]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:16 compute-0 podman[267185]: 2025-10-11 01:53:16.211220356 +0000 UTC m=+0.132138009 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=edpm, org.label-schema.schema-version=1.0)
Oct 11 01:53:16 compute-0 python3[267233]: ansible-edpm_nftables_from_files Invoked with src=/var/lib/edpm-config/firewall
Oct 11 01:53:16 compute-0 sudo[267228]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v453: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:17 compute-0 sudo[267383]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nxzypbejiyocpeiixhekcnqilhaneytz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147596.7411242-157-149779396544415/AnsiballZ_stat.py'
Oct 11 01:53:17 compute-0 sudo[267383]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:17 compute-0 python3.9[267385]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:53:17 compute-0 sudo[267383]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:18 compute-0 sudo[267461]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rqqxlxhdyqaeieoymsfzpwkpwuczobos ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147596.7411242-157-149779396544415/AnsiballZ_file.py'
Oct 11 01:53:18 compute-0 ceph-mon[191930]: pgmap v453: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:18 compute-0 sudo[267461]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:18 compute-0 python3.9[267463]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-jumps.nft _original_basename=jump-chain.j2 recurse=False state=file path=/etc/nftables/edpm-jumps.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:53:18 compute-0 sudo[267461]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v454: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 01:53:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Cumulative writes: 6767 writes, 28K keys, 6767 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.03 MB/s
                                            Cumulative WAL: 6767 writes, 1159 syncs, 5.84 writes per sync, written: 0.02 GB, 0.03 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 6767 writes, 28K keys, 6767 commit groups, 1.0 writes per commit group, ingest: 19.56 MB, 0.03 MB/s
                                            Interval WAL: 6767 writes, 1159 syncs, 5.84 writes per sync, written: 0.02 GB, 0.03 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
                                            
                                            ** Compaction Stats [default] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      2/0    2.63 KB   0.2      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.01              0.00         1    0.005       0      0       0.0       0.0
                                             Sum      2/0    2.63 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.01              0.00         1    0.005       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [default] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.2      0.01              0.00         1    0.005       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [default] **
                                            
                                            ** Compaction Stats [m-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-0] **
                                            
                                            ** Compaction Stats [m-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-1] **
                                            
                                            ** Compaction Stats [m-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-2] **
                                            
                                            ** Compaction Stats [p-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      1/0    1.56 KB   0.1      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.00              0.00         1    0.005       0      0       0.0       0.0
                                             Sum      1/0    1.56 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.00              0.00         1    0.005       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.3      0.00              0.00         1    0.005       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-0] **
                                            
                                            ** Compaction Stats [p-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-1] **
                                            
                                            ** Compaction Stats [p-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-2] **
                                            
                                            ** Compaction Stats [O-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16430#2 capacity: 224.00 MB usage: 0.45 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 2 last_secs: 1.2e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1,0.20 KB,8.85555e-05%) FilterBlock(1,0.11 KB,4.76837e-05%) IndexBlock(1,0.14 KB,6.13076e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-0] **
                                            
                                            ** Compaction Stats [O-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16430#2 capacity: 224.00 MB usage: 0.45 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 2 last_secs: 1.2e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1,0.20 KB,8.85555e-05%) FilterBlock(1,0.11 KB,4.76837e-05%) IndexBlock(1,0.14 KB,6.13076e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-1] **
                                            
                                            ** Compaction Stats [O-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      1/0    1.25 KB   0.1      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.00              0.00         1    0.004       0      0       0.0       0.0
                                             Sum      1/0    1.25 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.00              0.00         1    0.004       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.3      0.00              0.00         1    0.004       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16430#2 capacity: 224.00 MB usage: 0.45 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 2 last_secs: 1.2e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1,0.20 KB,8.85555e-05%) FilterBlock(1,0.11 KB,4.76837e-05%) IndexBlock(1,0.14 KB,6.13076e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-2] **
                                            
                                            ** Compaction Stats [L] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [L] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [L] **
                                            
                                            ** Compaction Stats [P] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [P] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [P] **
Oct 11 01:53:19 compute-0 sudo[267613]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kwpxrkuiltknxzsqxqwhnwuvmxicklrq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147598.6444418-169-164752014328051/AnsiballZ_stat.py'
Oct 11 01:53:19 compute-0 sudo[267613]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:19 compute-0 python3.9[267615]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-update-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:53:19 compute-0 sudo[267613]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:53:19 compute-0 sudo[267692]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ooyplswjwteqkqiulvapxlrwnlefbniv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147598.6444418-169-164752014328051/AnsiballZ_file.py'
Oct 11 01:53:19 compute-0 sudo[267692]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:20 compute-0 ceph-mon[191930]: pgmap v454: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:20 compute-0 python3.9[267694]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-update-jumps.nft _original_basename=jump-chain.j2 recurse=False state=file path=/etc/nftables/edpm-update-jumps.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:53:20 compute-0 sudo[267692]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:20 compute-0 sudo[267698]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:53:20 compute-0 sudo[267698]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:20 compute-0 sudo[267698]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:20 compute-0 sudo[267744]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:53:20 compute-0 sudo[267744]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:20 compute-0 sudo[267744]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:20 compute-0 sudo[267793]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:53:20 compute-0 sudo[267793]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:20 compute-0 sudo[267793]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:20 compute-0 sudo[267846]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 check-host
Oct 11 01:53:20 compute-0 sudo[267846]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v455: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:20 compute-0 sudo[267958]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mdvyjracmvytjbskzreemtrvknenucse ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147600.4434588-181-195804324173313/AnsiballZ_stat.py'
Oct 11 01:53:21 compute-0 sudo[267958]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:21 compute-0 sudo[267846]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:53:21 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:53:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:53:21 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:53:21 compute-0 sudo[267965]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:53:21 compute-0 sudo[267965]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:21 compute-0 sudo[267965]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:21 compute-0 python3.9[267964]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-flushes.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:53:21 compute-0 sudo[267990]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:53:21 compute-0 sudo[267990]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:21 compute-0 sudo[267990]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:21 compute-0 sudo[267958]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:21 compute-0 sudo[268017]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:53:21 compute-0 sudo[268017]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:21 compute-0 sudo[268017]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:21 compute-0 sudo[268060]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 01:53:21 compute-0 sudo[268060]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:21 compute-0 sudo[268142]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kfuepdatcpwlehgljxpvlxphuvydumsq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147600.4434588-181-195804324173313/AnsiballZ_file.py'
Oct 11 01:53:21 compute-0 sudo[268142]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:21 compute-0 python3.9[268152]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-flushes.nft _original_basename=flush-chain.j2 recurse=False state=file path=/etc/nftables/edpm-flushes.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:53:21 compute-0 sudo[268142]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:22 compute-0 sudo[268060]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:22 compute-0 ceph-mon[191930]: pgmap v455: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:22 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:53:22 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:53:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:53:22 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:53:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:53:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:53:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:53:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:53:22 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev b5b71102-2b41-4429-bad0-ebc8e5aa47e3 does not exist
Oct 11 01:53:22 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 67a64f6b-027b-4351-8b76-e45beff5b0eb does not exist
Oct 11 01:53:22 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 3f63865a-57f2-4cd8-bf12-766063c40b77 does not exist
Oct 11 01:53:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 01:53:22 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:53:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 01:53:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:53:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:53:22 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:53:22 compute-0 sudo[268202]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:53:22 compute-0 sudo[268202]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:22 compute-0 sudo[268202]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:22 compute-0 sudo[268258]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:53:22 compute-0 sudo[268258]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:22 compute-0 sudo[268258]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:22 compute-0 sudo[268303]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:53:22 compute-0 sudo[268303]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:22 compute-0 sudo[268303]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:22 compute-0 sudo[268347]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 01:53:22 compute-0 sudo[268347]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:22 compute-0 sudo[268422]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ibwmvenbznnspuirkybllpjblpeaxbyg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147602.2001812-193-10738377922177/AnsiballZ_stat.py'
Oct 11 01:53:22 compute-0 sudo[268422]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v456: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:22 compute-0 python3.9[268431]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-chains.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:53:22 compute-0 sudo[268422]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:53:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:53:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:53:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:53:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:53:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:53:23 compute-0 podman[268466]: 2025-10-11 01:53:23.085569544 +0000 UTC m=+0.077020956 container create 9072e5241109c6da4f49cef96725840875a3a461484c5399c48d6a97f6a5cafa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_lalande, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 01:53:23 compute-0 podman[268466]: 2025-10-11 01:53:23.046843133 +0000 UTC m=+0.038294555 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:53:23 compute-0 systemd[1]: Started libpod-conmon-9072e5241109c6da4f49cef96725840875a3a461484c5399c48d6a97f6a5cafa.scope.
Oct 11 01:53:23 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:53:23 compute-0 podman[268466]: 2025-10-11 01:53:23.224008523 +0000 UTC m=+0.215459965 container init 9072e5241109c6da4f49cef96725840875a3a461484c5399c48d6a97f6a5cafa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_lalande, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:53:23 compute-0 podman[268466]: 2025-10-11 01:53:23.242767714 +0000 UTC m=+0.234219126 container start 9072e5241109c6da4f49cef96725840875a3a461484c5399c48d6a97f6a5cafa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_lalande, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:53:23 compute-0 podman[268466]: 2025-10-11 01:53:23.250473644 +0000 UTC m=+0.241925096 container attach 9072e5241109c6da4f49cef96725840875a3a461484c5399c48d6a97f6a5cafa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_lalande, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:53:23 compute-0 zealous_lalande[268506]: 167 167
Oct 11 01:53:23 compute-0 systemd[1]: libpod-9072e5241109c6da4f49cef96725840875a3a461484c5399c48d6a97f6a5cafa.scope: Deactivated successfully.
Oct 11 01:53:23 compute-0 podman[268466]: 2025-10-11 01:53:23.254904664 +0000 UTC m=+0.246356066 container died 9072e5241109c6da4f49cef96725840875a3a461484c5399c48d6a97f6a5cafa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_lalande, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:53:23 compute-0 systemd[1]: var-lib-containers-storage-overlay-875af0c29b2c543f5532248108eb40e06619dc7f02dec90a06393b36502bf521-merged.mount: Deactivated successfully.
Oct 11 01:53:23 compute-0 podman[268466]: 2025-10-11 01:53:23.326392847 +0000 UTC m=+0.317844229 container remove 9072e5241109c6da4f49cef96725840875a3a461484c5399c48d6a97f6a5cafa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_lalande, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, ceph=True, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:53:23 compute-0 systemd[1]: libpod-conmon-9072e5241109c6da4f49cef96725840875a3a461484c5399c48d6a97f6a5cafa.scope: Deactivated successfully.
Oct 11 01:53:23 compute-0 sudo[268619]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dpiwzplvtpbtkffrlcitxdulrueqprsg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147602.2001812-193-10738377922177/AnsiballZ_file.py'
Oct 11 01:53:23 compute-0 sudo[268619]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:23 compute-0 podman[268535]: 2025-10-11 01:53:23.427597248 +0000 UTC m=+0.125922821 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:53:23 compute-0 podman[268542]: 2025-10-11 01:53:23.436799671 +0000 UTC m=+0.138624969 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, release=1214.1726694543, config_id=edpm, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.openshift.tags=base rhel9, container_name=kepler, distribution-scope=public, version=9.4, com.redhat.component=ubi9-container, build-date=2024-09-18T21:23:30, vendor=Red Hat, Inc., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, architecture=x86_64, maintainer=Red Hat, Inc., managed_by=edpm_ansible, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, summary=Provides the latest release of Red Hat Universal Base Image 9., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.buildah.version=1.29.0, io.openshift.expose-services=, name=ubi9, io.k8s.display-name=Red Hat Universal Base Image 9, release-0.7.12=)
Oct 11 01:53:23 compute-0 podman[268559]: 2025-10-11 01:53:23.463289937 +0000 UTC m=+0.126093868 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, managed_by=edpm_ansible, org.label-schema.schema-version=1.0)
Oct 11 01:53:23 compute-0 podman[268647]: 2025-10-11 01:53:23.560061227 +0000 UTC m=+0.081992170 container create adcb6743aef1d6d80a9b5eb4ef7647bc6765a6dabcb28932dd3135061ced52f6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_volhard, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:53:23 compute-0 podman[268647]: 2025-10-11 01:53:23.51031823 +0000 UTC m=+0.032249143 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:53:23 compute-0 python3.9[268635]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-chains.nft _original_basename=chains.j2 recurse=False state=file path=/etc/nftables/edpm-chains.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:53:23 compute-0 systemd[1]: Started libpod-conmon-adcb6743aef1d6d80a9b5eb4ef7647bc6765a6dabcb28932dd3135061ced52f6.scope.
Oct 11 01:53:23 compute-0 sudo[268619]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:23 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:53:23 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2a21810b4b945f7783094b67f03feb6f31b4f4407bfd55fce6175bb20931cba2/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:53:23 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2a21810b4b945f7783094b67f03feb6f31b4f4407bfd55fce6175bb20931cba2/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:53:23 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2a21810b4b945f7783094b67f03feb6f31b4f4407bfd55fce6175bb20931cba2/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:53:23 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2a21810b4b945f7783094b67f03feb6f31b4f4407bfd55fce6175bb20931cba2/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:53:23 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2a21810b4b945f7783094b67f03feb6f31b4f4407bfd55fce6175bb20931cba2/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:53:23 compute-0 podman[268647]: 2025-10-11 01:53:23.713080447 +0000 UTC m=+0.235011410 container init adcb6743aef1d6d80a9b5eb4ef7647bc6765a6dabcb28932dd3135061ced52f6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_volhard, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:53:23 compute-0 podman[268647]: 2025-10-11 01:53:23.738428024 +0000 UTC m=+0.260358927 container start adcb6743aef1d6d80a9b5eb4ef7647bc6765a6dabcb28932dd3135061ced52f6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_volhard, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 01:53:23 compute-0 podman[268647]: 2025-10-11 01:53:23.743999672 +0000 UTC m=+0.265930575 container attach adcb6743aef1d6d80a9b5eb4ef7647bc6765a6dabcb28932dd3135061ced52f6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_volhard, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, io.buildah.version=1.39.3)
Oct 11 01:53:24 compute-0 ceph-mon[191930]: pgmap v456: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:24 compute-0 sudo[268825]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-skuuwsrisrkbyssnngeveafuldjazsza ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147603.9469717-205-80717578525900/AnsiballZ_stat.py'
Oct 11 01:53:24 compute-0 sudo[268825]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:53:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v457: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:24 compute-0 python3.9[268831]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-rules.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:53:24 compute-0 sudo[268825]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:24 compute-0 boring_volhard[268661]: --> passed data devices: 0 physical, 3 LVM
Oct 11 01:53:24 compute-0 boring_volhard[268661]: --> relative data size: 1.0
Oct 11 01:53:24 compute-0 boring_volhard[268661]: --> All data devices are unavailable
Oct 11 01:53:24 compute-0 systemd[1]: libpod-adcb6743aef1d6d80a9b5eb4ef7647bc6765a6dabcb28932dd3135061ced52f6.scope: Deactivated successfully.
Oct 11 01:53:24 compute-0 systemd[1]: libpod-adcb6743aef1d6d80a9b5eb4ef7647bc6765a6dabcb28932dd3135061ced52f6.scope: Consumed 1.172s CPU time.
Oct 11 01:53:25 compute-0 podman[268647]: 2025-10-11 01:53:25.000012342 +0000 UTC m=+1.521943295 container died adcb6743aef1d6d80a9b5eb4ef7647bc6765a6dabcb28932dd3135061ced52f6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_volhard, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:53:25 compute-0 systemd[1]: var-lib-containers-storage-overlay-2a21810b4b945f7783094b67f03feb6f31b4f4407bfd55fce6175bb20931cba2-merged.mount: Deactivated successfully.
Oct 11 01:53:25 compute-0 podman[268647]: 2025-10-11 01:53:25.113153921 +0000 UTC m=+1.635084854 container remove adcb6743aef1d6d80a9b5eb4ef7647bc6765a6dabcb28932dd3135061ced52f6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_volhard, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507)
Oct 11 01:53:25 compute-0 systemd[1]: libpod-conmon-adcb6743aef1d6d80a9b5eb4ef7647bc6765a6dabcb28932dd3135061ced52f6.scope: Deactivated successfully.
Oct 11 01:53:25 compute-0 sudo[268347]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:25 compute-0 sudo[268904]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:53:25 compute-0 sudo[268904]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:25 compute-0 sudo[268904]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:25 compute-0 sudo[268960]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wdxidoveziceyutypfctynjwedashfbm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147603.9469717-205-80717578525900/AnsiballZ_file.py'
Oct 11 01:53:25 compute-0 sudo[268960]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:25 compute-0 sudo[268956]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:53:25 compute-0 sudo[268956]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:25 compute-0 sudo[268956]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:25 compute-0 sudo[268984]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:53:25 compute-0 sudo[268984]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:25 compute-0 sudo[268984]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:25 compute-0 python3.9[268971]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-rules.nft _original_basename=ruleset.j2 recurse=False state=file path=/etc/nftables/edpm-rules.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:53:25 compute-0 sudo[268960]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:25 compute-0 sudo[269009]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 01:53:25 compute-0 sudo[269009]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:25 compute-0 podman[269050]: 2025-10-11 01:53:25.907775509 +0000 UTC m=+0.135614941 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, config_id=edpm, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, tcib_managed=true, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']})
Oct 11 01:53:26 compute-0 ceph-mgr[192233]: [devicehealth INFO root] Check health
Oct 11 01:53:26 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 01:53:26 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 600.2 total, 600.0 interval
                                            Cumulative writes: 5548 writes, 24K keys, 5548 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.03 MB/s
                                            Cumulative WAL: 5548 writes, 830 syncs, 6.68 writes per sync, written: 0.02 GB, 0.03 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 5548 writes, 24K keys, 5548 commit groups, 1.0 writes per commit group, ingest: 18.49 MB, 0.03 MB/s
                                            Interval WAL: 5548 writes, 830 syncs, 6.68 writes per sync, written: 0.02 GB, 0.03 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
                                            
                                            ** Compaction Stats [default] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      2/0    2.63 KB   0.2      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.014       0      0       0.0       0.0
                                             Sum      2/0    2.63 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.014       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [default] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.1      0.01              0.00         1    0.014       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [default] **
                                            
                                            ** Compaction Stats [m-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-0] **
                                            
                                            ** Compaction Stats [m-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-1] **
                                            
                                            ** Compaction Stats [m-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-2] **
                                            
                                            ** Compaction Stats [p-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      1/0    1.56 KB   0.1      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.01              0.00         1    0.009       0      0       0.0       0.0
                                             Sum      1/0    1.56 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.01              0.00         1    0.009       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.2      0.01              0.00         1    0.009       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-0] **
                                            
                                            ** Compaction Stats [p-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-1] **
                                            
                                            ** Compaction Stats [p-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-2] **
                                            
                                            ** Compaction Stats [O-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856430#2 capacity: 224.00 MB usage: 0.45 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 2 last_secs: 8e-06 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1,0.20 KB,8.85555e-05%) FilterBlock(1,0.11 KB,4.76837e-05%) IndexBlock(1,0.14 KB,6.13076e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-0] **
                                            
                                            ** Compaction Stats [O-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856430#2 capacity: 224.00 MB usage: 0.45 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 2 last_secs: 8e-06 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1,0.20 KB,8.85555e-05%) FilterBlock(1,0.11 KB,4.76837e-05%) IndexBlock(1,0.14 KB,6.13076e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-1] **
                                            
                                            ** Compaction Stats [O-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      1/0    1.25 KB   0.1      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                             Sum      1/0    1.25 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856430#2 capacity: 224.00 MB usage: 0.45 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 2 last_secs: 8e-06 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1,0.20 KB,8.85555e-05%) FilterBlock(1,0.11 KB,4.76837e-05%) IndexBlock(1,0.14 KB,6.13076e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-2] **
                                            
                                            ** Compaction Stats [L] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.005       0      0       0.0       0.0
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.005       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [L] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.005       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [L] **
                                            
                                            ** Compaction Stats [P] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [P] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 600.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 2 last_copies: 8 last_secs: 5.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [P] **
Oct 11 01:53:26 compute-0 ceph-mon[191930]: pgmap v457: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:26 compute-0 podman[269168]: 2025-10-11 01:53:26.237319579 +0000 UTC m=+0.075643421 container create e8a8ded772a7bb5d9aefb5ba2fb1f7b4b35bde98d59a08dc0f6892b59ebb4722 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_cerf, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 01:53:26 compute-0 podman[269168]: 2025-10-11 01:53:26.208024517 +0000 UTC m=+0.046348429 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:53:26 compute-0 systemd[1]: Started libpod-conmon-e8a8ded772a7bb5d9aefb5ba2fb1f7b4b35bde98d59a08dc0f6892b59ebb4722.scope.
Oct 11 01:53:26 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:53:26 compute-0 podman[269168]: 2025-10-11 01:53:26.421530917 +0000 UTC m=+0.259854759 container init e8a8ded772a7bb5d9aefb5ba2fb1f7b4b35bde98d59a08dc0f6892b59ebb4722 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_cerf, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:53:26 compute-0 podman[269168]: 2025-10-11 01:53:26.43689945 +0000 UTC m=+0.275223282 container start e8a8ded772a7bb5d9aefb5ba2fb1f7b4b35bde98d59a08dc0f6892b59ebb4722 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_cerf, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 01:53:26 compute-0 podman[269168]: 2025-10-11 01:53:26.444702095 +0000 UTC m=+0.283025927 container attach e8a8ded772a7bb5d9aefb5ba2fb1f7b4b35bde98d59a08dc0f6892b59ebb4722 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_cerf, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True)
Oct 11 01:53:26 compute-0 goofy_cerf[269224]: 167 167
Oct 11 01:53:26 compute-0 systemd[1]: libpod-e8a8ded772a7bb5d9aefb5ba2fb1f7b4b35bde98d59a08dc0f6892b59ebb4722.scope: Deactivated successfully.
Oct 11 01:53:26 compute-0 podman[269168]: 2025-10-11 01:53:26.447798517 +0000 UTC m=+0.286122370 container died e8a8ded772a7bb5d9aefb5ba2fb1f7b4b35bde98d59a08dc0f6892b59ebb4722 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_cerf, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 01:53:26 compute-0 systemd[1]: var-lib-containers-storage-overlay-1d6fb556f470d184c4c02247bceb90d499affa2404f7134e64fcfab90bd88f58-merged.mount: Deactivated successfully.
Oct 11 01:53:26 compute-0 podman[269168]: 2025-10-11 01:53:26.507959226 +0000 UTC m=+0.346283058 container remove e8a8ded772a7bb5d9aefb5ba2fb1f7b4b35bde98d59a08dc0f6892b59ebb4722 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_cerf, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Oct 11 01:53:26 compute-0 systemd[1]: libpod-conmon-e8a8ded772a7bb5d9aefb5ba2fb1f7b4b35bde98d59a08dc0f6892b59ebb4722.scope: Deactivated successfully.
Oct 11 01:53:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:53:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:53:26 compute-0 sudo[269270]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ohiqcyzleujygyfrgktmrtdwqvnugvsy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147606.000945-218-17985688399268/AnsiballZ_command.py'
Oct 11 01:53:26 compute-0 sudo[269270]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:53:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:53:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:53:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:53:26 compute-0 python3.9[269277]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail; cat /etc/nftables/edpm-chains.nft /etc/nftables/edpm-flushes.nft /etc/nftables/edpm-rules.nft /etc/nftables/edpm-update-jumps.nft /etc/nftables/edpm-jumps.nft | nft -c -f - _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:53:26 compute-0 podman[269283]: 2025-10-11 01:53:26.763876881 +0000 UTC m=+0.075856804 container create b4f636cceefa619a52613431b4a2c8c985b3a24a7cccb095f7c8620729b3a0a6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_hugle, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:53:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v458: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:26 compute-0 podman[269283]: 2025-10-11 01:53:26.734390179 +0000 UTC m=+0.046370202 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:53:26 compute-0 sudo[269270]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:26 compute-0 systemd[1]: Started libpod-conmon-b4f636cceefa619a52613431b4a2c8c985b3a24a7cccb095f7c8620729b3a0a6.scope.
Oct 11 01:53:26 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:53:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/de69aa40bd716fbce60eb629db5876f84bcafb113b9194eebfa103fbfedbd9d0/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:53:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/de69aa40bd716fbce60eb629db5876f84bcafb113b9194eebfa103fbfedbd9d0/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:53:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/de69aa40bd716fbce60eb629db5876f84bcafb113b9194eebfa103fbfedbd9d0/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:53:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/de69aa40bd716fbce60eb629db5876f84bcafb113b9194eebfa103fbfedbd9d0/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:53:26 compute-0 podman[269283]: 2025-10-11 01:53:26.927595457 +0000 UTC m=+0.239575410 container init b4f636cceefa619a52613431b4a2c8c985b3a24a7cccb095f7c8620729b3a0a6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_hugle, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:53:26 compute-0 podman[269283]: 2025-10-11 01:53:26.960991748 +0000 UTC m=+0.272971661 container start b4f636cceefa619a52613431b4a2c8c985b3a24a7cccb095f7c8620729b3a0a6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_hugle, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 01:53:26 compute-0 podman[269283]: 2025-10-11 01:53:26.965547507 +0000 UTC m=+0.277527460 container attach b4f636cceefa619a52613431b4a2c8c985b3a24a7cccb095f7c8620729b3a0a6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_hugle, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507)
Oct 11 01:53:27 compute-0 pensive_hugle[269302]: {
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:     "0": [
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:         {
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "devices": [
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "/dev/loop3"
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             ],
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "lv_name": "ceph_lv0",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "lv_size": "21470642176",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "name": "ceph_lv0",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "tags": {
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.cluster_name": "ceph",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.crush_device_class": "",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.encrypted": "0",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.osd_id": "0",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.type": "block",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.vdo": "0"
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             },
Oct 11 01:53:27 compute-0 sudo[269460]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pfvygmptjphjdhmnlxuvzwgexkwechqr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147607.085586-226-111734638829773/AnsiballZ_blockinfile.py'
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "type": "block",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "vg_name": "ceph_vg0"
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:         }
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:     ],
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:     "1": [
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:         {
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "devices": [
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "/dev/loop4"
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             ],
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "lv_name": "ceph_lv1",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "lv_size": "21470642176",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "name": "ceph_lv1",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "tags": {
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.cluster_name": "ceph",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.crush_device_class": "",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.encrypted": "0",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.osd_id": "1",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.type": "block",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.vdo": "0"
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             },
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "type": "block",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "vg_name": "ceph_vg1"
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:         }
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:     ],
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:     "2": [
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:         {
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "devices": [
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "/dev/loop5"
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             ],
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "lv_name": "ceph_lv2",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "lv_size": "21470642176",
Oct 11 01:53:27 compute-0 sudo[269460]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "name": "ceph_lv2",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "tags": {
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.cluster_name": "ceph",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.crush_device_class": "",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.encrypted": "0",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.osd_id": "2",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.type": "block",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:                 "ceph.vdo": "0"
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             },
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "type": "block",
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:             "vg_name": "ceph_vg2"
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:         }
Oct 11 01:53:27 compute-0 pensive_hugle[269302]:     ]
Oct 11 01:53:27 compute-0 pensive_hugle[269302]: }
Oct 11 01:53:27 compute-0 systemd[1]: libpod-b4f636cceefa619a52613431b4a2c8c985b3a24a7cccb095f7c8620729b3a0a6.scope: Deactivated successfully.
Oct 11 01:53:27 compute-0 podman[269283]: 2025-10-11 01:53:27.826289891 +0000 UTC m=+1.138269834 container died b4f636cceefa619a52613431b4a2c8c985b3a24a7cccb095f7c8620729b3a0a6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_hugle, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 01:53:27 compute-0 systemd[1]: var-lib-containers-storage-overlay-de69aa40bd716fbce60eb629db5876f84bcafb113b9194eebfa103fbfedbd9d0-merged.mount: Deactivated successfully.
Oct 11 01:53:27 compute-0 podman[269283]: 2025-10-11 01:53:27.918528906 +0000 UTC m=+1.230508829 container remove b4f636cceefa619a52613431b4a2c8c985b3a24a7cccb095f7c8620729b3a0a6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_hugle, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef)
Oct 11 01:53:27 compute-0 systemd[1]: libpod-conmon-b4f636cceefa619a52613431b4a2c8c985b3a24a7cccb095f7c8620729b3a0a6.scope: Deactivated successfully.
Oct 11 01:53:27 compute-0 sudo[269009]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:28 compute-0 sudo[269476]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:53:28 compute-0 sudo[269476]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:28 compute-0 sudo[269476]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:28 compute-0 python3.9[269462]: ansible-ansible.builtin.blockinfile Invoked with backup=False block=include "/etc/nftables/iptables.nft"
                                             include "/etc/nftables/edpm-chains.nft"
                                             include "/etc/nftables/edpm-rules.nft"
                                             include "/etc/nftables/edpm-jumps.nft"
                                              path=/etc/sysconfig/nftables.conf validate=nft -c -f %s state=present marker=# {mark} ANSIBLE MANAGED BLOCK create=False marker_begin=BEGIN marker_end=END append_newline=False prepend_newline=False unsafe_writes=False insertafter=None insertbefore=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:53:28 compute-0 sudo[269460]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:28 compute-0 ceph-mon[191930]: pgmap v458: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:28 compute-0 sudo[269501]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:53:28 compute-0 sudo[269501]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:28 compute-0 sudo[269501]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:28 compute-0 sudo[269541]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:53:28 compute-0 sudo[269541]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:28 compute-0 sudo[269541]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:28 compute-0 sudo[269575]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 01:53:28 compute-0 sudo[269575]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v459: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:28 compute-0 podman[269739]: 2025-10-11 01:53:28.873033902 +0000 UTC m=+0.060368122 container create 85af800b00ba263d519131355a81d520dffe2128d8a5cf108c85fb10d0e3fd87 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_roentgen, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:53:28 compute-0 systemd[1]: Started libpod-conmon-85af800b00ba263d519131355a81d520dffe2128d8a5cf108c85fb10d0e3fd87.scope.
Oct 11 01:53:28 compute-0 sudo[269781]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dsitumbcfmqgoebdmrocymwplheeuheg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147608.4501748-235-70165322516593/AnsiballZ_command.py'
Oct 11 01:53:28 compute-0 sudo[269781]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:28 compute-0 podman[269739]: 2025-10-11 01:53:28.853078054 +0000 UTC m=+0.040412294 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:53:28 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:53:28 compute-0 podman[269739]: 2025-10-11 01:53:28.985889967 +0000 UTC m=+0.173224277 container init 85af800b00ba263d519131355a81d520dffe2128d8a5cf108c85fb10d0e3fd87 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_roentgen, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.license=GPLv2)
Oct 11 01:53:29 compute-0 podman[269739]: 2025-10-11 01:53:29.003807198 +0000 UTC m=+0.191141418 container start 85af800b00ba263d519131355a81d520dffe2128d8a5cf108c85fb10d0e3fd87 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_roentgen, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:53:29 compute-0 podman[269739]: 2025-10-11 01:53:29.007872401 +0000 UTC m=+0.195206661 container attach 85af800b00ba263d519131355a81d520dffe2128d8a5cf108c85fb10d0e3fd87 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_roentgen, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:53:29 compute-0 optimistic_roentgen[269783]: 167 167
Oct 11 01:53:29 compute-0 systemd[1]: libpod-85af800b00ba263d519131355a81d520dffe2128d8a5cf108c85fb10d0e3fd87.scope: Deactivated successfully.
Oct 11 01:53:29 compute-0 podman[269739]: 2025-10-11 01:53:29.017771252 +0000 UTC m=+0.205105532 container died 85af800b00ba263d519131355a81d520dffe2128d8a5cf108c85fb10d0e3fd87 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_roentgen, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:53:29 compute-0 systemd[1]: var-lib-containers-storage-overlay-5839c04e0fdc12ea27d815002ff2a959795a7a043704413afe92c9f7d9860e60-merged.mount: Deactivated successfully.
Oct 11 01:53:29 compute-0 podman[269739]: 2025-10-11 01:53:29.104494358 +0000 UTC m=+0.291828618 container remove 85af800b00ba263d519131355a81d520dffe2128d8a5cf108c85fb10d0e3fd87 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_roentgen, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.license=GPLv2)
Oct 11 01:53:29 compute-0 systemd[1]: libpod-conmon-85af800b00ba263d519131355a81d520dffe2128d8a5cf108c85fb10d0e3fd87.scope: Deactivated successfully.
Oct 11 01:53:29 compute-0 python3.9[269785]: ansible-ansible.legacy.command Invoked with _raw_params=nft -f /etc/nftables/edpm-chains.nft _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:53:29 compute-0 sudo[269781]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:29 compute-0 podman[269809]: 2025-10-11 01:53:29.295195506 +0000 UTC m=+0.061482066 container create ff8fc848024d6042d9e7136d877704b723719f3374fd68112f6b89747f23fe4c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_yalow, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.vendor=CentOS)
Oct 11 01:53:29 compute-0 systemd[1]: Started libpod-conmon-ff8fc848024d6042d9e7136d877704b723719f3374fd68112f6b89747f23fe4c.scope.
Oct 11 01:53:29 compute-0 podman[269809]: 2025-10-11 01:53:29.26966577 +0000 UTC m=+0.035952310 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:53:29 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:53:29 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c1ecb6beb2d6f63c384dfce8aaf73ecabc058d67b1effd613b86c47ce610c36d/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:53:29 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c1ecb6beb2d6f63c384dfce8aaf73ecabc058d67b1effd613b86c47ce610c36d/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:53:29 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c1ecb6beb2d6f63c384dfce8aaf73ecabc058d67b1effd613b86c47ce610c36d/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:53:29 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c1ecb6beb2d6f63c384dfce8aaf73ecabc058d67b1effd613b86c47ce610c36d/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:53:29 compute-0 podman[269809]: 2025-10-11 01:53:29.441996068 +0000 UTC m=+0.208282608 container init ff8fc848024d6042d9e7136d877704b723719f3374fd68112f6b89747f23fe4c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_yalow, ceph=True, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:53:29 compute-0 podman[269809]: 2025-10-11 01:53:29.46448543 +0000 UTC m=+0.230771950 container start ff8fc848024d6042d9e7136d877704b723719f3374fd68112f6b89747f23fe4c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_yalow, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default)
Oct 11 01:53:29 compute-0 podman[269809]: 2025-10-11 01:53:29.469413868 +0000 UTC m=+0.235700388 container attach ff8fc848024d6042d9e7136d877704b723719f3374fd68112f6b89747f23fe4c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_yalow, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True)
Oct 11 01:53:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:53:29 compute-0 podman[157119]: time="2025-10-11T01:53:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:53:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:53:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 34387 "" "Go-http-client/1.1"
Oct 11 01:53:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:53:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 7246 "" "Go-http-client/1.1"
Oct 11 01:53:30 compute-0 sudo[269980]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gwktyscqrmvbvqwojlxaincjnnemeioc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147609.5256245-243-70395321908557/AnsiballZ_stat.py'
Oct 11 01:53:30 compute-0 sudo[269980]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:30 compute-0 ceph-mon[191930]: pgmap v459: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:30 compute-0 python3.9[269982]: ansible-ansible.builtin.stat Invoked with path=/etc/nftables/edpm-rules.nft.changed follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:53:30 compute-0 sudo[269980]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:30 compute-0 hungry_yalow[269849]: {
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:         "osd_id": 1,
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:         "type": "bluestore"
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:     },
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:         "osd_id": 2,
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:         "type": "bluestore"
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:     },
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:         "osd_id": 0,
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:         "type": "bluestore"
Oct 11 01:53:30 compute-0 hungry_yalow[269849]:     }
Oct 11 01:53:30 compute-0 hungry_yalow[269849]: }
Oct 11 01:53:30 compute-0 systemd[1]: libpod-ff8fc848024d6042d9e7136d877704b723719f3374fd68112f6b89747f23fe4c.scope: Deactivated successfully.
Oct 11 01:53:30 compute-0 podman[269809]: 2025-10-11 01:53:30.687694892 +0000 UTC m=+1.453981452 container died ff8fc848024d6042d9e7136d877704b723719f3374fd68112f6b89747f23fe4c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_yalow, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:53:30 compute-0 systemd[1]: libpod-ff8fc848024d6042d9e7136d877704b723719f3374fd68112f6b89747f23fe4c.scope: Consumed 1.232s CPU time.
Oct 11 01:53:30 compute-0 systemd[1]: var-lib-containers-storage-overlay-c1ecb6beb2d6f63c384dfce8aaf73ecabc058d67b1effd613b86c47ce610c36d-merged.mount: Deactivated successfully.
Oct 11 01:53:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v460: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:30 compute-0 podman[269809]: 2025-10-11 01:53:30.814302979 +0000 UTC m=+1.580589509 container remove ff8fc848024d6042d9e7136d877704b723719f3374fd68112f6b89747f23fe4c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_yalow, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:53:30 compute-0 systemd[1]: libpod-conmon-ff8fc848024d6042d9e7136d877704b723719f3374fd68112f6b89747f23fe4c.scope: Deactivated successfully.
Oct 11 01:53:30 compute-0 sudo[269575]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:53:30 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:53:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:53:30 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:53:30 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev a26077b5-9485-4d5e-a3e1-903174916c49 does not exist
Oct 11 01:53:30 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev b339d576-914b-41aa-b91b-0915dcbbcdeb does not exist
Oct 11 01:53:31 compute-0 sudo[270116]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:53:31 compute-0 sudo[270116]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:31 compute-0 sudo[270116]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:31 compute-0 sudo[270158]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:53:31 compute-0 sudo[270158]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:53:31 compute-0 sudo[270158]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:31 compute-0 sudo[270221]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ssqxgppdzhjsvubflwqlhusvrnywshqp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147610.681831-252-212491702572832/AnsiballZ_file.py'
Oct 11 01:53:31 compute-0 sudo[270221]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:31 compute-0 openstack_network_exporter[159265]: ERROR   01:53:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:53:31 compute-0 openstack_network_exporter[159265]: ERROR   01:53:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:53:31 compute-0 openstack_network_exporter[159265]: ERROR   01:53:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:53:31 compute-0 openstack_network_exporter[159265]: ERROR   01:53:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:53:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:53:31 compute-0 openstack_network_exporter[159265]: ERROR   01:53:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:53:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:53:31 compute-0 python3.9[270223]: ansible-ansible.builtin.file Invoked with path=/etc/nftables/edpm-rules.nft.changed state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:53:31 compute-0 sudo[270221]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:31 compute-0 ceph-mon[191930]: pgmap v460: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:31 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:53:31 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:53:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v461: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:33 compute-0 python3.9[270373]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'machine'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:53:33 compute-0 ceph-mon[191930]: pgmap v461: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:34 compute-0 sudo[270524]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-edmvpvobvuhgpvyblzsnktribvewbnjp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147613.7158012-292-126128122225679/AnsiballZ_command.py'
Oct 11 01:53:34 compute-0 sudo[270524]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:34 compute-0 python3.9[270526]: ansible-ansible.legacy.command Invoked with _raw_params=ovs-vsctl set open . external_ids:hostname=compute-0.ctlplane.example.com external_ids:ovn-bridge=br-int external_ids:ovn-bridge-mappings=datacentre:br-ex external_ids:ovn-chassis-mac-mappings="datacentre:2e:0a:c0:16:5a:16" external_ids:ovn-encap-ip=172.19.0.100 external_ids:ovn-encap-type=geneve external_ids:ovn-encap-tos=0 external_ids:ovn-match-northd-version=False external_ids:ovn-monitor-all=True external_ids:ovn-remote=ssl:ovsdbserver-sb.openstack.svc:6642 external_ids:ovn-remote-probe-interval=60000 external_ids:ovn-ofctrl-wait-before-clear=8000 external_ids:rundir=/var/run/openvswitch 
                                              _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:53:34 compute-0 ovs-vsctl[270527]: ovs|00001|vsctl|INFO|Called as ovs-vsctl set open . external_ids:hostname=compute-0.ctlplane.example.com external_ids:ovn-bridge=br-int external_ids:ovn-bridge-mappings=datacentre:br-ex external_ids:ovn-chassis-mac-mappings=datacentre:2e:0a:c0:16:5a:16 external_ids:ovn-encap-ip=172.19.0.100 external_ids:ovn-encap-type=geneve external_ids:ovn-encap-tos=0 external_ids:ovn-match-northd-version=False external_ids:ovn-monitor-all=True external_ids:ovn-remote=ssl:ovsdbserver-sb.openstack.svc:6642 external_ids:ovn-remote-probe-interval=60000 external_ids:ovn-ofctrl-wait-before-clear=8000 external_ids:rundir=/var/run/openvswitch
Oct 11 01:53:34 compute-0 sudo[270524]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:53:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v462: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:35 compute-0 sudo[270677]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fngtaetwwngshmbeskuwjlnlvslukpza ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147614.9145458-301-274820354830026/AnsiballZ_command.py'
Oct 11 01:53:35 compute-0 sudo[270677]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:35 compute-0 python3.9[270679]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail
                                             ovs-vsctl show | grep -q "Manager"
                                              _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:53:35 compute-0 sudo[270677]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:35 compute-0 ceph-mon[191930]: pgmap v462: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v463: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:36 compute-0 python3.9[270832]: ansible-ansible.builtin.stat Invoked with path=/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:53:37 compute-0 sudo[270984]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wgzyyfypeljmxshwvuzimbdunbgmugzh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147617.2924216-319-119111322881008/AnsiballZ_file.py'
Oct 11 01:53:37 compute-0 sudo[270984]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:37 compute-0 ceph-mon[191930]: pgmap v463: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:38 compute-0 python3.9[270986]: ansible-ansible.builtin.file Invoked with path=/var/local/libexec recurse=True setype=container_file_t state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:53:38 compute-0 sudo[270984]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:38 compute-0 sshd-session[271063]: banner exchange: Connection from 64.62.156.52 port 46286: invalid format
Oct 11 01:53:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v464: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:38 compute-0 sudo[271137]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eqjtylmjyrmqzcmefotcfxkhxuycwvlb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147618.3832977-327-38992226946905/AnsiballZ_stat.py'
Oct 11 01:53:38 compute-0 sudo[271137]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:39 compute-0 python3.9[271139]: ansible-ansible.legacy.stat Invoked with path=/var/local/libexec/edpm-container-shutdown follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:53:39 compute-0 sudo[271137]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:39 compute-0 sudo[271215]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-knguvekkakuycprnpqqnotxnihordobk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147618.3832977-327-38992226946905/AnsiballZ_file.py'
Oct 11 01:53:39 compute-0 sudo[271215]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:53:39 compute-0 python3.9[271217]: ansible-ansible.legacy.file Invoked with group=root mode=0700 owner=root setype=container_file_t dest=/var/local/libexec/edpm-container-shutdown _original_basename=edpm-container-shutdown recurse=False state=file path=/var/local/libexec/edpm-container-shutdown force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:53:39 compute-0 sudo[271215]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:39 compute-0 ceph-mon[191930]: pgmap v464: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:40 compute-0 sudo[271400]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xjifculbfxjrfkexbkurmnkjyvhzzvqx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147620.208263-327-109593450757324/AnsiballZ_stat.py'
Oct 11 01:53:40 compute-0 sudo[271400]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:40 compute-0 podman[271342]: 2025-10-11 01:53:40.723974534 +0000 UTC m=+0.121683961 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, version=9.6, maintainer=Red Hat, Inc., config_id=edpm, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-type=git, architecture=x86_64, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, distribution-scope=public, io.openshift.expose-services=, io.openshift.tags=minimal rhel9, name=ubi9-minimal, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=openstack_network_exporter, release=1755695350, com.redhat.component=ubi9-minimal-container, vendor=Red Hat, Inc., build-date=2025-08-20T13:12:41, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, managed_by=edpm_ansible, url=https://catalog.redhat.com/en/search?searchType=containers)
Oct 11 01:53:40 compute-0 podman[271341]: 2025-10-11 01:53:40.741646256 +0000 UTC m=+0.138978034 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 01:53:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v465: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:40 compute-0 python3.9[271412]: ansible-ansible.legacy.stat Invoked with path=/var/local/libexec/edpm-start-podman-container follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:53:40 compute-0 sudo[271400]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:41 compute-0 sudo[271490]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fqopuvugtwyjuourtgifdeelbxxttgeu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147620.208263-327-109593450757324/AnsiballZ_file.py'
Oct 11 01:53:41 compute-0 sudo[271490]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:41 compute-0 python3.9[271492]: ansible-ansible.legacy.file Invoked with group=root mode=0700 owner=root setype=container_file_t dest=/var/local/libexec/edpm-start-podman-container _original_basename=edpm-start-podman-container recurse=False state=file path=/var/local/libexec/edpm-start-podman-container force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:53:41 compute-0 sudo[271490]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:41 compute-0 ceph-mon[191930]: pgmap v465: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:42 compute-0 sudo[271642]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oergsgocebjybtnlwneurapezourleke ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147622.0056176-350-63027035329640/AnsiballZ_file.py'
Oct 11 01:53:42 compute-0 sudo[271642]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:42 compute-0 python3.9[271644]: ansible-ansible.builtin.file Invoked with mode=420 path=/etc/systemd/system-preset state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:53:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v466: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:42 compute-0 sudo[271642]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:43 compute-0 sudo[271794]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-selcwtjuqpjqusljnkyhyyfbkirxzpbm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147623.1157088-358-63741253803943/AnsiballZ_stat.py'
Oct 11 01:53:43 compute-0 sudo[271794]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:43 compute-0 python3.9[271796]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/edpm-container-shutdown.service follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:53:43 compute-0 ceph-mon[191930]: pgmap v466: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:44 compute-0 sudo[271794]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:44 compute-0 sudo[271872]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-glkwbmdmvgnoxqclnmaopxgqqsoamtac ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147623.1157088-358-63741253803943/AnsiballZ_file.py'
Oct 11 01:53:44 compute-0 sudo[271872]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:44 compute-0 python3.9[271874]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/edpm-container-shutdown.service _original_basename=edpm-container-shutdown-service recurse=False state=file path=/etc/systemd/system/edpm-container-shutdown.service force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:53:44 compute-0 sudo[271872]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:53:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v467: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:45 compute-0 sudo[272024]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yjmayzmrswwjblgkjyzmbssvkjdbmtny ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147625.038704-370-234487483807484/AnsiballZ_stat.py'
Oct 11 01:53:45 compute-0 sudo[272024]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:45 compute-0 python3.9[272026]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system-preset/91-edpm-container-shutdown.preset follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:53:45 compute-0 sudo[272024]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:45 compute-0 ceph-mon[191930]: pgmap v467: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:46 compute-0 sudo[272119]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vtzxabmkludzwbaxnzevbnyzzbwiyypm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147625.038704-370-234487483807484/AnsiballZ_file.py'
Oct 11 01:53:46 compute-0 sudo[272119]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:46 compute-0 podman[272076]: 2025-10-11 01:53:46.427930507 +0000 UTC m=+0.142782607 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_managed=true, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.license=GPLv2, config_id=edpm)
Oct 11 01:53:46 compute-0 python3.9[272122]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system-preset/91-edpm-container-shutdown.preset _original_basename=91-edpm-container-shutdown-preset recurse=False state=file path=/etc/systemd/system-preset/91-edpm-container-shutdown.preset force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:53:46 compute-0 sudo[272119]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v468: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:47 compute-0 sudo[272272]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mtxyfvmnjndukixbzbmrtjcajowpelxs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147626.9493659-382-211101556432806/AnsiballZ_systemd.py'
Oct 11 01:53:47 compute-0 sudo[272272]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:47 compute-0 python3.9[272274]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True enabled=True name=edpm-container-shutdown state=started daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:53:47 compute-0 systemd[1]: Reloading.
Oct 11 01:53:47 compute-0 ceph-mon[191930]: pgmap v468: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:48 compute-0 systemd-sysv-generator[272305]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:53:48 compute-0 systemd-rc-local-generator[272302]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:53:48 compute-0 sudo[272272]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v469: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:49 compute-0 sudo[272462]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vmoowdglugvfwjsqgvzdlwykdtusqirr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147628.8499093-390-24877500184102/AnsiballZ_stat.py'
Oct 11 01:53:49 compute-0 sudo[272462]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:49 compute-0 python3.9[272464]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/netns-placeholder.service follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:53:49 compute-0 sudo[272462]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:53:50 compute-0 ceph-mon[191930]: pgmap v469: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:50 compute-0 sudo[272541]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rgdpatlixqzzctyuylcqwwkvgnjklaqe ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147628.8499093-390-24877500184102/AnsiballZ_file.py'
Oct 11 01:53:50 compute-0 sudo[272541]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:50 compute-0 python3.9[272543]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/netns-placeholder.service _original_basename=netns-placeholder-service recurse=False state=file path=/etc/systemd/system/netns-placeholder.service force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:53:50 compute-0 sudo[272541]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v470: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:51 compute-0 sudo[272693]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ieqofxmoadhzgtrgkoiwjtpiokeawbtc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147630.705708-402-237501019739887/AnsiballZ_stat.py'
Oct 11 01:53:51 compute-0 sudo[272693]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:51 compute-0 python3.9[272695]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system-preset/91-netns-placeholder.preset follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:53:51 compute-0 sudo[272693]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:52 compute-0 ceph-mon[191930]: pgmap v470: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:52 compute-0 sudo[272771]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bteuiqzggvgncijpassyvaiwdwkuvnlo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147630.705708-402-237501019739887/AnsiballZ_file.py'
Oct 11 01:53:52 compute-0 sudo[272771]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:52 compute-0 python3.9[272773]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system-preset/91-netns-placeholder.preset _original_basename=91-netns-placeholder-preset recurse=False state=file path=/etc/systemd/system-preset/91-netns-placeholder.preset force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:53:52 compute-0 sudo[272771]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v471: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:53 compute-0 sudo[272923]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hgizfqyvdgcbvwbgntkddneossvuwast ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147632.6456964-414-69121997826715/AnsiballZ_systemd.py'
Oct 11 01:53:53 compute-0 sudo[272923]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:53 compute-0 python3.9[272925]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True enabled=True name=netns-placeholder state=started daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:53:53 compute-0 systemd[1]: Reloading.
Oct 11 01:53:53 compute-0 systemd-rc-local-generator[273003]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:53:53 compute-0 systemd-sysv-generator[273008]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:53:53 compute-0 podman[272927]: 2025-10-11 01:53:53.777595325 +0000 UTC m=+0.166685124 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 01:53:53 compute-0 podman[272929]: 2025-10-11 01:53:53.799156321 +0000 UTC m=+0.173437458 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.29.0, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=kepler, vendor=Red Hat, Inc., config_id=edpm, architecture=x86_64, com.redhat.component=ubi9-container, summary=Provides the latest release of Red Hat Universal Base Image 9., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, managed_by=edpm_ansible, distribution-scope=public, vcs-type=git, io.openshift.tags=base rhel9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9, release-0.7.12=, maintainer=Red Hat, Inc., name=ubi9, version=9.4, io.openshift.expose-services=, build-date=2024-09-18T21:23:30, release=1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f)
Oct 11 01:53:53 compute-0 podman[272928]: 2025-10-11 01:53:53.830083797 +0000 UTC m=+0.203426076 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, config_id=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller)
Oct 11 01:53:54 compute-0 ceph-mon[191930]: pgmap v471: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:54 compute-0 systemd[1]: Starting Create netns directory...
Oct 11 01:53:54 compute-0 systemd[1]: run-netns-placeholder.mount: Deactivated successfully.
Oct 11 01:53:54 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Oct 11 01:53:54 compute-0 systemd[1]: Finished Create netns directory.
Oct 11 01:53:54 compute-0 sudo[272923]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:53:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v472: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:55 compute-0 sudo[273179]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ljocsvbdkztbftlzakpnrlyaohsvmqor ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147634.5416052-424-150055397113578/AnsiballZ_file.py'
Oct 11 01:53:55 compute-0 sudo[273179]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:55 compute-0 python3.9[273181]: ansible-ansible.builtin.file Invoked with group=zuul mode=0755 owner=zuul path=/var/lib/openstack/healthchecks setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:53:55 compute-0 sudo[273179]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:56 compute-0 ceph-mon[191930]: pgmap v472: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:56 compute-0 sudo[273345]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hvzmvmsphnykqbmsdcofzrfnnbmiyajk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147635.672153-432-88007204964204/AnsiballZ_stat.py'
Oct 11 01:53:56 compute-0 podman[273305]: 2025-10-11 01:53:56.245817161 +0000 UTC m=+0.134729877 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_compute, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.vendor=CentOS, config_id=edpm, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 10 Base Image)
Oct 11 01:53:56 compute-0 sudo[273345]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:53:56
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.log', '.rgw.root', 'default.rgw.meta', 'cephfs.cephfs.meta', 'vms', 'cephfs.cephfs.data', '.mgr', 'images', 'volumes', 'backups', 'default.rgw.control']
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 01:53:56 compute-0 python3.9[273352]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/ovn_controller/healthcheck follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:53:56 compute-0 sudo[273345]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:53:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v473: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:57 compute-0 sudo[273428]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rwuproxjjhsxqdkykujmcsiutzlvydek ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147635.672153-432-88007204964204/AnsiballZ_file.py'
Oct 11 01:53:57 compute-0 sudo[273428]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:57 compute-0 python3.9[273430]: ansible-ansible.legacy.file Invoked with group=zuul mode=0700 owner=zuul setype=container_file_t dest=/var/lib/openstack/healthchecks/ovn_controller/ _original_basename=healthcheck recurse=False state=file path=/var/lib/openstack/healthchecks/ovn_controller/ force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:53:57 compute-0 sudo[273428]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:58 compute-0 ceph-mon[191930]: pgmap v473: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:58 compute-0 sudo[273580]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-alegtthcjnacgalpnqgkeyalviesbkcz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147637.7410612-446-163517483614816/AnsiballZ_file.py'
Oct 11 01:53:58 compute-0 sudo[273580]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:58 compute-0 python3.9[273582]: ansible-ansible.builtin.file Invoked with path=/var/lib/kolla/config_files recurse=True setype=container_file_t state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:53:58 compute-0 sudo[273580]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v474: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:53:59 compute-0 sudo[273732]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gpwoybbziuaeafeiuatzuyzmfqisroro ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147638.8417838-454-19805966122860/AnsiballZ_stat.py'
Oct 11 01:53:59 compute-0 sudo[273732]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:53:59 compute-0 python3.9[273734]: ansible-ansible.legacy.stat Invoked with path=/var/lib/kolla/config_files/ovn_controller.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:53:59 compute-0 sudo[273732]: pam_unix(sudo:session): session closed for user root
Oct 11 01:53:59 compute-0 podman[157119]: time="2025-10-11T01:53:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:53:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:53:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:53:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:53:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:53:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6833 "" "Go-http-client/1.1"
Oct 11 01:54:00 compute-0 ceph-mon[191930]: pgmap v474: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:00 compute-0 sudo[273810]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jtouitojajdscfpqpyawazklaldjryui ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147638.8417838-454-19805966122860/AnsiballZ_file.py'
Oct 11 01:54:00 compute-0 sudo[273810]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:00 compute-0 python3.9[273812]: ansible-ansible.legacy.file Invoked with mode=0600 dest=/var/lib/kolla/config_files/ovn_controller.json _original_basename=.e8upn68z recurse=False state=file path=/var/lib/kolla/config_files/ovn_controller.json force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:54:00 compute-0 sudo[273810]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v475: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:01 compute-0 sudo[273962]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-npbytssxrcnngpqoluilahcglfueqbzk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147640.6938121-466-184503211776682/AnsiballZ_file.py'
Oct 11 01:54:01 compute-0 sudo[273962]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:01 compute-0 openstack_network_exporter[159265]: ERROR   01:54:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:54:01 compute-0 openstack_network_exporter[159265]: ERROR   01:54:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:54:01 compute-0 openstack_network_exporter[159265]: ERROR   01:54:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:54:01 compute-0 openstack_network_exporter[159265]: ERROR   01:54:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:54:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:54:01 compute-0 openstack_network_exporter[159265]: ERROR   01:54:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:54:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:54:01 compute-0 python3.9[273964]: ansible-ansible.builtin.file Invoked with mode=0755 path=/var/lib/edpm-config/container-startup-config/ovn_controller state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:54:01 compute-0 sudo[273962]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:02 compute-0 ceph-mon[191930]: pgmap v475: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:02 compute-0 sudo[274114]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-muyxwdfhavqgokdvdhxgaycrafkfadpq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147641.8696287-474-62169179407771/AnsiballZ_stat.py'
Oct 11 01:54:02 compute-0 sudo[274114]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:02 compute-0 sudo[274114]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v476: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:03 compute-0 sudo[274192]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xhlrijratlkomepepsotkslqctrvkjkh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147641.8696287-474-62169179407771/AnsiballZ_file.py'
Oct 11 01:54:03 compute-0 sudo[274192]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:03 compute-0 sudo[274192]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:04 compute-0 ceph-mon[191930]: pgmap v476: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:54:04 compute-0 sudo[274344]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vvsszyzqxedmawsdvprlkugnveesllvn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147644.00239-488-61860034537228/AnsiballZ_container_config_data.py'
Oct 11 01:54:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v477: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:04 compute-0 sudo[274344]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:05 compute-0 python3.9[274346]: ansible-container_config_data Invoked with config_overrides={} config_path=/var/lib/edpm-config/container-startup-config/ovn_controller config_pattern=*.json debug=False
Oct 11 01:54:05 compute-0 sudo[274344]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:06 compute-0 ceph-mon[191930]: pgmap v477: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:06 compute-0 sudo[274496]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qliwoijziabzrsgwffynkethieharewd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147645.464658-497-207776140247794/AnsiballZ_container_config_hash.py'
Oct 11 01:54:06 compute-0 sudo[274496]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 01:54:06 compute-0 python3.9[274498]: ansible-container_config_hash Invoked with check_mode=False config_vol_prefix=/var/lib/config-data
Oct 11 01:54:06 compute-0 sudo[274496]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v478: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:07 compute-0 sudo[274648]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jvpjpnmxxybnfrzrgduawtltuqrgwniz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147646.7975376-506-270978197928787/AnsiballZ_podman_container_info.py'
Oct 11 01:54:07 compute-0 sudo[274648]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:07 compute-0 python3.9[274650]: ansible-containers.podman.podman_container_info Invoked with executable=podman name=None
Oct 11 01:54:08 compute-0 ceph-mon[191930]: pgmap v478: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:08 compute-0 sudo[274648]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v479: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:54:09 compute-0 sudo[274826]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ezyyavvcohzpjinlwwvhkdqhgswffuyd ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760147649.1595507-519-31567672903071/AnsiballZ_edpm_container_manage.py'
Oct 11 01:54:09 compute-0 sudo[274826]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:10 compute-0 ceph-mon[191930]: pgmap v479: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:10 compute-0 python3[274828]: ansible-edpm_container_manage Invoked with concurrency=1 config_dir=/var/lib/edpm-config/container-startup-config/ovn_controller config_id=ovn_controller config_overrides={} config_patterns=*.json log_base_path=/var/log/containers/stdouts debug=False
Oct 11 01:54:10 compute-0 python3[274828]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: [
                                                {
                                                     "Id": "3b86aea1acd0e80af91d8a3efa79cc99f54489e3c22377193c4282a256797350",
                                                     "Digest": "sha256:1f610ed4ebf657334da87dfd95b3dc5299fb3540ec1433ae3db34f0f247d8abf",
                                                     "RepoTags": [
                                                          "quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified"
                                                     ],
                                                     "RepoDigests": [
                                                          "quay.io/podified-antelope-centos9/openstack-ovn-controller@sha256:1f610ed4ebf657334da87dfd95b3dc5299fb3540ec1433ae3db34f0f247d8abf"
                                                     ],
                                                     "Parent": "",
                                                     "Comment": "",
                                                     "Created": "2025-10-10T06:45:05.562867456Z",
                                                     "Config": {
                                                          "User": "root",
                                                          "Env": [
                                                               "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
                                                               "LANG=en_US.UTF-8",
                                                               "TZ=UTC",
                                                               "container=oci"
                                                          ],
                                                          "Entrypoint": [
                                                               "dumb-init",
                                                               "--single-child",
                                                               "--"
                                                          ],
                                                          "Cmd": [
                                                               "kolla_start"
                                                          ],
                                                          "Labels": {
                                                               "io.buildah.version": "1.41.3",
                                                               "maintainer": "OpenStack Kubernetes Operator team",
                                                               "org.label-schema.build-date": "20251009",
                                                               "org.label-schema.license": "GPLv2",
                                                               "org.label-schema.name": "CentOS Stream 9 Base Image",
                                                               "org.label-schema.schema-version": "1.0",
                                                               "org.label-schema.vendor": "CentOS",
                                                               "tcib_build_tag": "c4b77291aeca5591ac860bd4127cec2f",
                                                               "tcib_managed": "true"
                                                          },
                                                          "StopSignal": "SIGTERM"
                                                     },
                                                     "Version": "",
                                                     "Author": "",
                                                     "Architecture": "amd64",
                                                     "Os": "linux",
                                                     "Size": 345589703,
                                                     "VirtualSize": 345589703,
                                                     "GraphDriver": {
                                                          "Name": "overlay",
                                                          "Data": {
                                                               "LowerDir": "/var/lib/containers/storage/overlay/4eae16a1a1bebf220e0e2776d142170ff50fdf4a29ecedfef58b13e383c2dea5/diff:/var/lib/containers/storage/overlay/c2ad6f8b1a6091551e22adfb2e6ce479ea8bed05ed630c600bde534ea0820278/diff:/var/lib/containers/storage/overlay/f3f40f6483bf6d587286da9e86e40878c2aaaf723da5aa2364fff24f5ea28424/diff",
                                                               "UpperDir": "/var/lib/containers/storage/overlay/196eba3e63acde37d1b6ff275b74a4528a29fe849571325f0cc9b33e4578d4ce/diff",
                                                               "WorkDir": "/var/lib/containers/storage/overlay/196eba3e63acde37d1b6ff275b74a4528a29fe849571325f0cc9b33e4578d4ce/work"
                                                          }
                                                     },
                                                     "RootFS": {
                                                          "Type": "layers",
                                                          "Layers": [
                                                               "sha256:f3f40f6483bf6d587286da9e86e40878c2aaaf723da5aa2364fff24f5ea28424",
                                                               "sha256:3a9d73afb8795f4b13d74c2653e4fc76293cf6011ed9e4a2a730031f9b5a587e",
                                                               "sha256:6d133c87d65a4b4f07263aad540d76db8bdc1e1bb8172cf1b92920a277165dd0",
                                                               "sha256:80aa3c91a4f50069ff6474e0c81303bb72cd077373680779a7645a58d46917be"
                                                          ]
                                                     },
                                                     "Labels": {
                                                          "io.buildah.version": "1.41.3",
                                                          "maintainer": "OpenStack Kubernetes Operator team",
                                                          "org.label-schema.build-date": "20251009",
                                                          "org.label-schema.license": "GPLv2",
                                                          "org.label-schema.name": "CentOS Stream 9 Base Image",
                                                          "org.label-schema.schema-version": "1.0",
                                                          "org.label-schema.vendor": "CentOS",
                                                          "tcib_build_tag": "c4b77291aeca5591ac860bd4127cec2f",
                                                          "tcib_managed": "true"
                                                     },
                                                     "Annotations": {},
                                                     "ManifestType": "application/vnd.docker.distribution.manifest.v2+json",
                                                     "User": "root",
                                                     "History": [
                                                          {
                                                               "created": "2025-10-09T00:18:03.867908726Z",
                                                               "created_by": "/bin/sh -c #(nop) ADD file:b2e608b9da8e087a764c2aebbd9c2cc9181047f5b301f1dab77fdf098a28268b in / ",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-09T00:18:03.868015697Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL org.label-schema.schema-version=\"1.0\"     org.label-schema.name=\"CentOS Stream 9 Base Image\"     org.label-schema.vendor=\"CentOS\"     org.label-schema.license=\"GPLv2\"     org.label-schema.build-date=\"20251009\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-09T00:18:07.890794359Z",
                                                               "created_by": "/bin/sh -c #(nop) CMD [\"/bin/bash\"]"
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:06.074259055Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL maintainer=\"OpenStack Kubernetes Operator team\"",
                                                               "comment": "FROM quay.io/centos/centos:stream9",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:06.074278165Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL tcib_managed=true",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:06.074291595Z",
                                                               "created_by": "/bin/sh -c #(nop) ENV LANG=\"en_US.UTF-8\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:06.074304206Z",
                                                               "created_by": "/bin/sh -c #(nop) ENV TZ=\"UTC\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:06.074315366Z",
                                                               "created_by": "/bin/sh -c #(nop) ENV container=\"oci\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:06.074330277Z",
                                                               "created_by": "/bin/sh -c #(nop) USER root",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:06.426277446Z",
                                                               "created_by": "/bin/sh -c if [ -f \"/etc/yum.repos.d/ubi.repo\" ]; then rm -f /etc/yum.repos.d/ubi.repo && dnf clean all && rm -rf /var/cache/dnf; fi",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:45.068043089Z",
                                                               "created_by": "/bin/sh -c dnf install -y crudini && crudini --del /etc/dnf/dnf.conf main override_install_langs && crudini --set /etc/dnf/dnf.conf main clean_requirements_on_remove True && crudini --set /etc/dnf/dnf.conf main exactarch 1 && crudini --set /etc/dnf/dnf.conf main gpgcheck 1 && crudini --set /etc/dnf/dnf.conf main install_weak_deps False && if [ 'centos' == 'centos' ];then crudini --set /etc/dnf/dnf.conf main best False; fi && crudini --set /etc/dnf/dnf.conf main installonly_limit 0 && crudini --set /etc/dnf/dnf.conf main keepcache 0 && crudini --set /etc/dnf/dnf.conf main obsoletes 1 && crudini --set /etc/dnf/dnf.conf main plugins 1 && crudini --set /etc/dnf/dnf.conf main skip_missing_names_on_install False && crudini --set /etc/dnf/dnf.conf main tsflags nodocs",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:48.840534792Z",
                                                               "created_by": "/bin/sh -c dnf install -y ca-certificates dumb-init glibc-langpack-en procps-ng python3 sudo util-linux-user which python-tcib-containers",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:49.254426116Z",
                                                               "created_by": "/bin/sh -c cp /usr/share/tcib/container-images/kolla/base/uid_gid_manage.sh /usr/local/bin/uid_gid_manage",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:49.615669407Z",
                                                               "created_by": "/bin/sh -c chmod 755 /usr/local/bin/uid_gid_manage",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:50.32351585Z",
                                                               "created_by": "/bin/sh -c bash /usr/local/bin/uid_gid_manage kolla hugetlbfs libvirt qemu",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:50.95150669Z",
                                                               "created_by": "/bin/sh -c touch /usr/local/bin/kolla_extend_start && chmod 755 /usr/local/bin/kolla_extend_start",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:51.275453253Z",
                                                               "created_by": "/bin/sh -c cp /usr/share/tcib/container-images/kolla/base/set_configs.py /usr/local/bin/kolla_set_configs",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:51.585556245Z",
                                                               "created_by": "/bin/sh -c chmod 755 /usr/local/bin/kolla_set_configs",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:51.900910818Z",
                                                               "created_by": "/bin/sh -c cp /usr/share/tcib/container-images/kolla/base/start.sh /usr/local/bin/kolla_start",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:52.204754907Z",
                                                               "created_by": "/bin/sh -c chmod 755 /usr/local/bin/kolla_start",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:52.491429185Z",
                                                               "created_by": "/bin/sh -c cp /usr/share/tcib/container-images/kolla/base/httpd_setup.sh /usr/local/bin/kolla_httpd_setup",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:52.847171086Z",
                                                               "created_by": "/bin/sh -c chmod 755 /usr/local/bin/kolla_httpd_setup",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:53.137059418Z",
                                                               "created_by": "/bin/sh -c cp /usr/share/tcib/container-images/kolla/base/copy_cacerts.sh /usr/local/bin/kolla_copy_cacerts",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:53.44892538Z",
                                                               "created_by": "/bin/sh -c chmod 755 /usr/local/bin/kolla_copy_cacerts",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:53.826190187Z",
                                                               "created_by": "/bin/sh -c cp /usr/share/tcib/container-images/kolla/base/sudoers /etc/sudoers",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:54.222427047Z",
                                                               "created_by": "/bin/sh -c chmod 440 /etc/sudoers",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:54.586798968Z",
                                                               "created_by": "/bin/sh -c sed -ri '/^(passwd:|group:)/ s/systemd//g' /etc/nsswitch.conf",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:58.557228847Z",
                                                               "created_by": "/bin/sh -c dnf -y reinstall which && rpm -e --nodeps tzdata && dnf -y install tzdata",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:59.14899644Z",
                                                               "created_by": "/bin/sh -c if [ ! -f \"/etc/localtime\" ]; then ln -s /usr/share/zoneinfo/Etc/UTC /etc/localtime; fi",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:59.683368642Z",
                                                               "created_by": "/bin/sh -c mkdir -p /openstack",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:13:01.385446652Z",
                                                               "created_by": "/bin/sh -c if [ 'centos' == 'centos' ];then if [ -n \"$(rpm -qa redhat-release)\" ];then rpm -e --nodeps redhat-release; fi ; dnf -y install centos-stream-release; fi",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:13:03.734832392Z",
                                                               "created_by": "/bin/sh -c dnf update --excludepkgs redhat-release -y && dnf clean all && rm -rf /var/cache/dnf",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:13:03.734976356Z",
                                                               "created_by": "/bin/sh -c #(nop) STOPSIGNAL SIGTERM",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:13:03.734988536Z",
                                                               "created_by": "/bin/sh -c #(nop) ENTRYPOINT [\"dumb-init\", \"--single-child\", \"--\"]",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:13:03.734997136Z",
                                                               "created_by": "/bin/sh -c #(nop) CMD [\"kolla_start\"]",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:13:04.949823794Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL \"tcib_build_tag\"=\"c4b77291aeca5591ac860bd4127cec2f\""
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:15:14.765927351Z",
                                                               "created_by": "/bin/sh -c #(nop) USER root",
                                                               "comment": "FROM quay.rdoproject.org/podified-antelope-centos9/openstack-base:c4b77291aeca5591ac860bd4127cec2f",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:16:36.432887411Z",
                                                               "created_by": "/bin/sh -c dnf -y install openvswitch openvswitch-ovn-common python3-netifaces python3-openvswitch tcpdump && dnf clean all && rm -rf /var/cache/dnf",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:16:38.542295901Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL \"tcib_build_tag\"=\"c4b77291aeca5591ac860bd4127cec2f\""
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:44:27.436985309Z",
                                                               "created_by": "/bin/sh -c #(nop) USER root",
                                                               "comment": "FROM quay.rdoproject.org/podified-antelope-centos9/openstack-ovn-base:c4b77291aeca5591ac860bd4127cec2f",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:45:05.560563155Z",
                                                               "created_by": "/bin/sh -c dnf -y install openvswitch-ovn-host && dnf clean all && rm -rf /var/cache/dnf",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:45:06.845333585Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL \"tcib_build_tag\"=\"c4b77291aeca5591ac860bd4127cec2f\""
                                                          }
                                                     ],
                                                     "NamesHistory": [
                                                          "quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified"
                                                     ]
                                                }
                                           ]
                                           : quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified
Oct 11 01:54:10 compute-0 sudo[274826]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v480: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:11 compute-0 podman[274937]: 2025-10-11 01:54:11.242364174 +0000 UTC m=+0.120478983 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 01:54:11 compute-0 podman[274943]: 2025-10-11 01:54:11.247517753 +0000 UTC m=+0.129859482 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.openshift.expose-services=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vendor=Red Hat, Inc., version=9.6, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.33.7, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, container_name=openstack_network_exporter, architecture=x86_64, com.redhat.component=ubi9-minimal-container, managed_by=edpm_ansible, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, io.openshift.tags=minimal rhel9, vcs-type=git, release=1755695350)
Oct 11 01:54:11 compute-0 sudo[275081]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-anxnefheetypswzbafrpkazgkfbhwgkp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147651.0635824-527-137388068495720/AnsiballZ_stat.py'
Oct 11 01:54:11 compute-0 sudo[275081]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:11 compute-0 python3.9[275083]: ansible-ansible.builtin.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:54:11 compute-0 sudo[275081]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:12 compute-0 ceph-mon[191930]: pgmap v480: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v481: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:12 compute-0 sudo[275235]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lirwaefgmylnrmsvxgxtuyyazznkkjme ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147652.2634969-536-63522208697685/AnsiballZ_file.py'
Oct 11 01:54:12 compute-0 sudo[275235]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:13 compute-0 python3.9[275237]: ansible-file Invoked with path=/etc/systemd/system/edpm_ovn_controller.requires state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:54:13 compute-0 sudo[275235]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:13 compute-0 sudo[275311]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lnkmtckxwnmrjvjputskzciwjztcprfj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147652.2634969-536-63522208697685/AnsiballZ_stat.py'
Oct 11 01:54:13 compute-0 sudo[275311]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:13 compute-0 python3.9[275313]: ansible-stat Invoked with path=/etc/systemd/system/edpm_ovn_controller_healthcheck.timer follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:54:13 compute-0 sudo[275311]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:14 compute-0 ceph-mon[191930]: pgmap v481: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:54:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v482: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:14 compute-0 sudo[275462]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dbzsmfctdxsnejxgzbjkhmzvwqvvtcgz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147653.9821079-536-262136314720124/AnsiballZ_copy.py'
Oct 11 01:54:14 compute-0 sudo[275462]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:15 compute-0 python3.9[275464]: ansible-copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760147653.9821079-536-262136314720124/source dest=/etc/systemd/system/edpm_ovn_controller.service mode=0644 owner=root group=root backup=False force=True remote_src=False follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:54:15 compute-0 sudo[275462]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:15 compute-0 sudo[275538]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-znkvzbhucpjjzttltblxqptjllquvpqj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147653.9821079-536-262136314720124/AnsiballZ_systemd.py'
Oct 11 01:54:15 compute-0 sudo[275538]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:15 compute-0 python3.9[275540]: ansible-systemd Invoked with state=started name=edpm_ovn_controller.service enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:54:16 compute-0 sudo[275538]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:16 compute-0 ceph-mon[191930]: pgmap v482: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v483: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:16 compute-0 sudo[275707]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ggacfdvzrutrfdfmqxsvsxhtsevcvdxk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147656.3084757-560-31259255445655/AnsiballZ_command.py'
Oct 11 01:54:16 compute-0 sudo[275707]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:16 compute-0 podman[275666]: 2025-10-11 01:54:16.871531637 +0000 UTC m=+0.132409076 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, managed_by=edpm_ansible, org.label-schema.schema-version=1.0)
Oct 11 01:54:17 compute-0 python3.9[275713]: ansible-ansible.legacy.command Invoked with _raw_params=ovs-vsctl remove open . other_config hw-offload
                                              _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:54:17 compute-0 ovs-vsctl[275715]: ovs|00001|vsctl|INFO|Called as ovs-vsctl remove open . other_config hw-offload
Oct 11 01:54:17 compute-0 sudo[275707]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:17 compute-0 sudo[275865]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-adseipbzwaplnoidjruxjeyfekmfpoxj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147657.4275649-568-87003078565845/AnsiballZ_command.py'
Oct 11 01:54:17 compute-0 sudo[275865]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:18 compute-0 ceph-mon[191930]: pgmap v483: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:18 compute-0 python3.9[275867]: ansible-ansible.legacy.command Invoked with _raw_params=ovs-vsctl get Open_vSwitch . external_ids:ovn-cms-options | sed 's/\"//g'
                                              _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:54:18 compute-0 ovs-vsctl[275869]: ovs|00001|db_ctl_base|ERR|no key "ovn-cms-options" in Open_vSwitch record "." column external_ids
Oct 11 01:54:18 compute-0 sudo[275865]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v484: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:19 compute-0 sudo[276020]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hbtrhtbhxjcbbtbzcrktygzewggvlboy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147658.8300633-582-56841679866507/AnsiballZ_command.py'
Oct 11 01:54:19 compute-0 sudo[276020]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:54:19 compute-0 python3.9[276023]: ansible-ansible.legacy.command Invoked with _raw_params=ovs-vsctl remove Open_vSwitch . external_ids ovn-cms-options
                                              _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:54:19 compute-0 ovs-vsctl[276024]: ovs|00001|vsctl|INFO|Called as ovs-vsctl remove Open_vSwitch . external_ids ovn-cms-options
Oct 11 01:54:19 compute-0 sudo[276020]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:20 compute-0 ceph-mon[191930]: pgmap v484: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:20 compute-0 sshd-session[264633]: Connection closed by 192.168.122.30 port 38744
Oct 11 01:54:20 compute-0 sshd-session[264630]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:54:20 compute-0 systemd[1]: session-53.scope: Deactivated successfully.
Oct 11 01:54:20 compute-0 systemd[1]: session-53.scope: Consumed 1min 15.383s CPU time.
Oct 11 01:54:20 compute-0 systemd-logind[804]: Session 53 logged out. Waiting for processes to exit.
Oct 11 01:54:20 compute-0 systemd-logind[804]: Removed session 53.
Oct 11 01:54:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v485: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:22 compute-0 ceph-mon[191930]: pgmap v485: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v486: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:24 compute-0 podman[276049]: 2025-10-11 01:54:24.243030741 +0000 UTC m=+0.129462238 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 01:54:24 compute-0 ceph-mon[191930]: pgmap v486: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:24 compute-0 podman[276051]: 2025-10-11 01:54:24.257905753 +0000 UTC m=+0.133925440 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, release=1214.1726694543, io.openshift.tags=base rhel9, io.buildah.version=1.29.0, io.k8s.display-name=Red Hat Universal Base Image 9, build-date=2024-09-18T21:23:30, managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, name=ubi9, version=9.4, distribution-scope=public, maintainer=Red Hat, Inc., vcs-type=git, com.redhat.component=ubi9-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release-0.7.12=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.openshift.expose-services=, architecture=x86_64, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., config_id=edpm, container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 01:54:24 compute-0 podman[276050]: 2025-10-11 01:54:24.347042155 +0000 UTC m=+0.231061989 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, managed_by=edpm_ansible, container_name=ovn_controller, tcib_managed=true)
Oct 11 01:54:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:54:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v487: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:25 compute-0 sshd-session[276119]: Accepted publickey for zuul from 192.168.122.30 port 57158 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:54:25 compute-0 systemd-logind[804]: New session 54 of user zuul.
Oct 11 01:54:25 compute-0 systemd[1]: Started Session 54 of User zuul.
Oct 11 01:54:25 compute-0 sshd-session[276119]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:54:26 compute-0 ceph-mon[191930]: pgmap v487: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:54:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:54:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:54:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:54:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:54:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:54:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v488: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:27 compute-0 podman[276246]: 2025-10-11 01:54:27.056875348 +0000 UTC m=+0.129100927 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, org.label-schema.license=GPLv2)
Oct 11 01:54:27 compute-0 python3.9[276286]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:54:28 compute-0 ceph-mon[191930]: pgmap v488: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v489: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:28 compute-0 sudo[276446]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-osbzfzandcbdmsfjdgtetyotwokfnyjo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147668.1968188-34-161990752457681/AnsiballZ_file.py'
Oct 11 01:54:28 compute-0 sudo[276446]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:29 compute-0 python3.9[276448]: ansible-ansible.builtin.file Invoked with group=zuul owner=zuul path=/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:54:29 compute-0 sudo[276446]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:29 compute-0 podman[157119]: time="2025-10-11T01:54:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:54:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:54:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:54:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:54:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:54:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6835 "" "Go-http-client/1.1"
Oct 11 01:54:29 compute-0 sudo[276598]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dncxraqdnxuikscvrpfqipseqbtdkkbz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147669.352409-34-130181664862683/AnsiballZ_file.py'
Oct 11 01:54:29 compute-0 sudo[276598]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:30 compute-0 python3.9[276600]: ansible-ansible.builtin.file Invoked with group=zuul mode=0755 owner=zuul path=/var/lib/neutron setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:54:30 compute-0 sudo[276598]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:30 compute-0 ceph-mon[191930]: pgmap v489: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v490: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:31 compute-0 sudo[276750]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vpimolixvfpxabbmrfvzchkkileulxao ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147670.4313824-34-17859459268114/AnsiballZ_file.py'
Oct 11 01:54:31 compute-0 sudo[276750]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:31 compute-0 python3.9[276752]: ansible-ansible.builtin.file Invoked with group=zuul mode=0755 owner=zuul path=/var/lib/neutron/kill_scripts setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:54:31 compute-0 sudo[276750]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:31 compute-0 sudo[276753]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:54:31 compute-0 sudo[276753]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:54:31 compute-0 sudo[276753]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:31 compute-0 sudo[276778]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:54:31 compute-0 openstack_network_exporter[159265]: ERROR   01:54:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:54:31 compute-0 openstack_network_exporter[159265]: ERROR   01:54:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:54:31 compute-0 openstack_network_exporter[159265]: ERROR   01:54:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:54:31 compute-0 openstack_network_exporter[159265]: ERROR   01:54:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:54:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:54:31 compute-0 openstack_network_exporter[159265]: ERROR   01:54:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:54:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:54:31 compute-0 sudo[276778]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:54:31 compute-0 sudo[276778]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:31 compute-0 sudo[276827]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:54:31 compute-0 sudo[276827]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:54:31 compute-0 sudo[276827]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:31 compute-0 sudo[276875]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 01:54:31 compute-0 sudo[276875]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:54:32 compute-0 sudo[277020]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-afrnyndottsyfpdwqyuzofprockaqlnb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147671.6133816-34-137474190088150/AnsiballZ_file.py'
Oct 11 01:54:32 compute-0 sudo[277020]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:32 compute-0 sudo[276875]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:32 compute-0 ceph-mon[191930]: pgmap v490: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"} v 0) v1
Oct 11 01:54:32 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"}]: dispatch
Oct 11 01:54:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:54:32 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:54:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:54:32 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:54:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:54:32 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:54:32 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev f81e657a-db5b-4f45-911c-670e45279cc4 does not exist
Oct 11 01:54:32 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 59806275-0e2c-4843-95ab-d41754abff7d does not exist
Oct 11 01:54:32 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 7ff87f8e-62f1-41ce-9cb2-a4ddd06062d6 does not exist
Oct 11 01:54:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 01:54:32 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:54:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 01:54:32 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:54:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:54:32 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:54:32 compute-0 python3.9[277024]: ansible-ansible.builtin.file Invoked with group=zuul mode=0755 owner=zuul path=/var/lib/neutron/ovn-metadata-proxy setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:54:32 compute-0 sudo[277020]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:32 compute-0 sudo[277037]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:54:32 compute-0 sudo[277037]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:54:32 compute-0 sudo[277037]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:32 compute-0 sudo[277083]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:54:32 compute-0 sudo[277083]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:54:32 compute-0 sudo[277083]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:32 compute-0 sudo[277131]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:54:32 compute-0 sudo[277131]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:54:32 compute-0 sudo[277131]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v491: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:32 compute-0 sudo[277185]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 01:54:32 compute-0 sudo[277185]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:54:33 compute-0 sudo[277308]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-chpyjrkadwmgzegrkbkxoehsplpxzwjn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147672.6625128-34-268591293243516/AnsiballZ_file.py'
Oct 11 01:54:33 compute-0 sudo[277308]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"}]: dispatch
Oct 11 01:54:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:54:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:54:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:54:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:54:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:54:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:54:33 compute-0 python3.9[277311]: ansible-ansible.builtin.file Invoked with group=zuul mode=0755 owner=zuul path=/var/lib/neutron/external/pids setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:54:33 compute-0 podman[277326]: 2025-10-11 01:54:33.382078925 +0000 UTC m=+0.085529841 container create 299a2583d7e1552a43c5b8febe7b76af8e3cf53448197c1d4853f30ccc224f31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_ardinghelli, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:54:33 compute-0 sudo[277308]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:33 compute-0 podman[277326]: 2025-10-11 01:54:33.341049082 +0000 UTC m=+0.044500098 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:54:33 compute-0 systemd[1]: Started libpod-conmon-299a2583d7e1552a43c5b8febe7b76af8e3cf53448197c1d4853f30ccc224f31.scope.
Oct 11 01:54:33 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:54:33 compute-0 podman[277326]: 2025-10-11 01:54:33.525734499 +0000 UTC m=+0.229185505 container init 299a2583d7e1552a43c5b8febe7b76af8e3cf53448197c1d4853f30ccc224f31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_ardinghelli, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_REF=reef)
Oct 11 01:54:33 compute-0 podman[277326]: 2025-10-11 01:54:33.536358481 +0000 UTC m=+0.239809427 container start 299a2583d7e1552a43c5b8febe7b76af8e3cf53448197c1d4853f30ccc224f31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_ardinghelli, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:54:33 compute-0 podman[277326]: 2025-10-11 01:54:33.543177604 +0000 UTC m=+0.246628560 container attach 299a2583d7e1552a43c5b8febe7b76af8e3cf53448197c1d4853f30ccc224f31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_ardinghelli, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507)
Oct 11 01:54:33 compute-0 silly_ardinghelli[277349]: 167 167
Oct 11 01:54:33 compute-0 systemd[1]: libpod-299a2583d7e1552a43c5b8febe7b76af8e3cf53448197c1d4853f30ccc224f31.scope: Deactivated successfully.
Oct 11 01:54:33 compute-0 podman[277326]: 2025-10-11 01:54:33.545498427 +0000 UTC m=+0.248949433 container died 299a2583d7e1552a43c5b8febe7b76af8e3cf53448197c1d4853f30ccc224f31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_ardinghelli, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef)
Oct 11 01:54:33 compute-0 systemd[1]: var-lib-containers-storage-overlay-6046f3b468a7e1814e5ba4195ae381fd5dbc4083ef9426951a2097fc74d01816-merged.mount: Deactivated successfully.
Oct 11 01:54:33 compute-0 podman[277326]: 2025-10-11 01:54:33.628761917 +0000 UTC m=+0.332212843 container remove 299a2583d7e1552a43c5b8febe7b76af8e3cf53448197c1d4853f30ccc224f31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_ardinghelli, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507)
Oct 11 01:54:33 compute-0 systemd[1]: libpod-conmon-299a2583d7e1552a43c5b8febe7b76af8e3cf53448197c1d4853f30ccc224f31.scope: Deactivated successfully.
Oct 11 01:54:33 compute-0 podman[277434]: 2025-10-11 01:54:33.895629375 +0000 UTC m=+0.086068816 container create 723d76f85e7a699953ec4cccf7d8dcd32ed583a6f27924a2d3a72a706b9af3cf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sad_murdock, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0)
Oct 11 01:54:33 compute-0 podman[277434]: 2025-10-11 01:54:33.867329327 +0000 UTC m=+0.057768778 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:54:33 compute-0 systemd[1]: Started libpod-conmon-723d76f85e7a699953ec4cccf7d8dcd32ed583a6f27924a2d3a72a706b9af3cf.scope.
Oct 11 01:54:34 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:54:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/91c281ef1947c0aa26de52992487488a13ffcea78bb1865001d7c23167c57b9d/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:54:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/91c281ef1947c0aa26de52992487488a13ffcea78bb1865001d7c23167c57b9d/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:54:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/91c281ef1947c0aa26de52992487488a13ffcea78bb1865001d7c23167c57b9d/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:54:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/91c281ef1947c0aa26de52992487488a13ffcea78bb1865001d7c23167c57b9d/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:54:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/91c281ef1947c0aa26de52992487488a13ffcea78bb1865001d7c23167c57b9d/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:54:34 compute-0 podman[277434]: 2025-10-11 01:54:34.077101838 +0000 UTC m=+0.267541319 container init 723d76f85e7a699953ec4cccf7d8dcd32ed583a6f27924a2d3a72a706b9af3cf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sad_murdock, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 01:54:34 compute-0 podman[277434]: 2025-10-11 01:54:34.11134036 +0000 UTC m=+0.301779801 container start 723d76f85e7a699953ec4cccf7d8dcd32ed583a6f27924a2d3a72a706b9af3cf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sad_murdock, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default)
Oct 11 01:54:34 compute-0 podman[277434]: 2025-10-11 01:54:34.118528289 +0000 UTC m=+0.308967740 container attach 723d76f85e7a699953ec4cccf7d8dcd32ed583a6f27924a2d3a72a706b9af3cf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sad_murdock, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507)
Oct 11 01:54:34 compute-0 ceph-mon[191930]: pgmap v491: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:34 compute-0 python3.9[277534]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'selinux'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:54:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:54:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v492: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:35 compute-0 sad_murdock[277479]: --> passed data devices: 0 physical, 3 LVM
Oct 11 01:54:35 compute-0 sad_murdock[277479]: --> relative data size: 1.0
Oct 11 01:54:35 compute-0 sad_murdock[277479]: --> All data devices are unavailable
Oct 11 01:54:35 compute-0 systemd[1]: libpod-723d76f85e7a699953ec4cccf7d8dcd32ed583a6f27924a2d3a72a706b9af3cf.scope: Deactivated successfully.
Oct 11 01:54:35 compute-0 systemd[1]: libpod-723d76f85e7a699953ec4cccf7d8dcd32ed583a6f27924a2d3a72a706b9af3cf.scope: Consumed 1.203s CPU time.
Oct 11 01:54:35 compute-0 podman[277434]: 2025-10-11 01:54:35.384467641 +0000 UTC m=+1.574907092 container died 723d76f85e7a699953ec4cccf7d8dcd32ed583a6f27924a2d3a72a706b9af3cf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sad_murdock, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, OSD_FLAVOR=default)
Oct 11 01:54:35 compute-0 systemd[1]: var-lib-containers-storage-overlay-91c281ef1947c0aa26de52992487488a13ffcea78bb1865001d7c23167c57b9d-merged.mount: Deactivated successfully.
Oct 11 01:54:35 compute-0 podman[277434]: 2025-10-11 01:54:35.482675195 +0000 UTC m=+1.673114616 container remove 723d76f85e7a699953ec4cccf7d8dcd32ed583a6f27924a2d3a72a706b9af3cf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sad_murdock, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default)
Oct 11 01:54:35 compute-0 systemd[1]: libpod-conmon-723d76f85e7a699953ec4cccf7d8dcd32ed583a6f27924a2d3a72a706b9af3cf.scope: Deactivated successfully.
Oct 11 01:54:35 compute-0 sudo[277185]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:35 compute-0 sudo[277696]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:54:35 compute-0 sudo[277696]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:54:35 compute-0 sudo[277696]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:35 compute-0 sudo[277746]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nugdyhawqogdyrqhlyzlsjtxjbmfhzzt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147674.9046848-78-71874556631526/AnsiballZ_seboolean.py'
Oct 11 01:54:35 compute-0 sudo[277746]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:35 compute-0 sudo[277748]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:54:35 compute-0 sudo[277748]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:54:35 compute-0 sudo[277748]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:35 compute-0 sudo[277775]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:54:35 compute-0 sudo[277775]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:54:35 compute-0 sudo[277775]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:35 compute-0 python3.9[277755]: ansible-ansible.posix.seboolean Invoked with name=virt_sandbox_use_netlink persistent=True state=True ignore_selinux_state=False
Oct 11 01:54:35 compute-0 sudo[277800]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 01:54:36 compute-0 sudo[277800]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:54:36 compute-0 ceph-mon[191930]: pgmap v492: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:36 compute-0 podman[277864]: 2025-10-11 01:54:36.568310314 +0000 UTC m=+0.083772144 container create 83f1bef84e59dcf26a8c778e3b770c82bafb2b94010c8c5714a55630a5ba4fbd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_liskov, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:54:36 compute-0 sudo[277746]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:36 compute-0 podman[277864]: 2025-10-11 01:54:36.531201135 +0000 UTC m=+0.046663055 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:54:36 compute-0 systemd[1]: Started libpod-conmon-83f1bef84e59dcf26a8c778e3b770c82bafb2b94010c8c5714a55630a5ba4fbd.scope.
Oct 11 01:54:36 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:54:36 compute-0 podman[277864]: 2025-10-11 01:54:36.698941978 +0000 UTC m=+0.214403838 container init 83f1bef84e59dcf26a8c778e3b770c82bafb2b94010c8c5714a55630a5ba4fbd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_liskov, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:54:36 compute-0 podman[277864]: 2025-10-11 01:54:36.712165796 +0000 UTC m=+0.227627656 container start 83f1bef84e59dcf26a8c778e3b770c82bafb2b94010c8c5714a55630a5ba4fbd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_liskov, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2)
Oct 11 01:54:36 compute-0 podman[277864]: 2025-10-11 01:54:36.719547834 +0000 UTC m=+0.235009704 container attach 83f1bef84e59dcf26a8c778e3b770c82bafb2b94010c8c5714a55630a5ba4fbd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_liskov, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2)
Oct 11 01:54:36 compute-0 condescending_liskov[277882]: 167 167
Oct 11 01:54:36 compute-0 systemd[1]: libpod-83f1bef84e59dcf26a8c778e3b770c82bafb2b94010c8c5714a55630a5ba4fbd.scope: Deactivated successfully.
Oct 11 01:54:36 compute-0 podman[277864]: 2025-10-11 01:54:36.734369593 +0000 UTC m=+0.249831423 container died 83f1bef84e59dcf26a8c778e3b770c82bafb2b94010c8c5714a55630a5ba4fbd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_liskov, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:54:36 compute-0 systemd[1]: var-lib-containers-storage-overlay-c6d9f1541bcc91999e609d94180a82e71d3522831ae707148b287b8aae0ccdc8-merged.mount: Deactivated successfully.
Oct 11 01:54:36 compute-0 podman[277864]: 2025-10-11 01:54:36.809767273 +0000 UTC m=+0.325229143 container remove 83f1bef84e59dcf26a8c778e3b770c82bafb2b94010c8c5714a55630a5ba4fbd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_liskov, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 01:54:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v493: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:36 compute-0 systemd[1]: libpod-conmon-83f1bef84e59dcf26a8c778e3b770c82bafb2b94010c8c5714a55630a5ba4fbd.scope: Deactivated successfully.
Oct 11 01:54:37 compute-0 podman[277951]: 2025-10-11 01:54:37.065381281 +0000 UTC m=+0.091905025 container create 91df772c95b43094466ed9d4af8518907bc42e11a15acea1ed369acbf509e4ae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=beautiful_euler, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2)
Oct 11 01:54:37 compute-0 podman[277951]: 2025-10-11 01:54:37.036877194 +0000 UTC m=+0.063400938 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:54:37 compute-0 systemd[1]: Started libpod-conmon-91df772c95b43094466ed9d4af8518907bc42e11a15acea1ed369acbf509e4ae.scope.
Oct 11 01:54:37 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:54:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9defe012588cbc0d3cf2ede6bde2a8113d40058f39fde74e91c171cb2861af77/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:54:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9defe012588cbc0d3cf2ede6bde2a8113d40058f39fde74e91c171cb2861af77/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:54:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9defe012588cbc0d3cf2ede6bde2a8113d40058f39fde74e91c171cb2861af77/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:54:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9defe012588cbc0d3cf2ede6bde2a8113d40058f39fde74e91c171cb2861af77/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:54:37 compute-0 podman[277951]: 2025-10-11 01:54:37.252937035 +0000 UTC m=+0.279460789 container init 91df772c95b43094466ed9d4af8518907bc42e11a15acea1ed369acbf509e4ae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=beautiful_euler, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.vendor=CentOS)
Oct 11 01:54:37 compute-0 podman[277951]: 2025-10-11 01:54:37.283006811 +0000 UTC m=+0.309530555 container start 91df772c95b43094466ed9d4af8518907bc42e11a15acea1ed369acbf509e4ae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=beautiful_euler, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507)
Oct 11 01:54:37 compute-0 podman[277951]: 2025-10-11 01:54:37.290717794 +0000 UTC m=+0.317241598 container attach 91df772c95b43094466ed9d4af8518907bc42e11a15acea1ed369acbf509e4ae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=beautiful_euler, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.vendor=CentOS, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507)
Oct 11 01:54:37 compute-0 python3.9[278072]: ansible-ansible.legacy.stat Invoked with path=/var/lib/neutron/ovn_metadata_haproxy_wrapper follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:54:38 compute-0 beautiful_euler[277994]: {
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:     "0": [
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:         {
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "devices": [
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "/dev/loop3"
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             ],
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "lv_name": "ceph_lv0",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "lv_size": "21470642176",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "name": "ceph_lv0",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "tags": {
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.cluster_name": "ceph",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.crush_device_class": "",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.encrypted": "0",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.osd_id": "0",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.type": "block",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.vdo": "0"
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             },
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "type": "block",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "vg_name": "ceph_vg0"
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:         }
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:     ],
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:     "1": [
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:         {
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "devices": [
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "/dev/loop4"
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             ],
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "lv_name": "ceph_lv1",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "lv_size": "21470642176",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "name": "ceph_lv1",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "tags": {
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.cluster_name": "ceph",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.crush_device_class": "",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.encrypted": "0",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.osd_id": "1",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.type": "block",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.vdo": "0"
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             },
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "type": "block",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "vg_name": "ceph_vg1"
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:         }
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:     ],
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:     "2": [
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:         {
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "devices": [
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "/dev/loop5"
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             ],
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "lv_name": "ceph_lv2",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "lv_size": "21470642176",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "name": "ceph_lv2",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "tags": {
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.cluster_name": "ceph",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.crush_device_class": "",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.encrypted": "0",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.osd_id": "2",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.type": "block",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:                 "ceph.vdo": "0"
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             },
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "type": "block",
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:             "vg_name": "ceph_vg2"
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:         }
Oct 11 01:54:38 compute-0 beautiful_euler[277994]:     ]
Oct 11 01:54:38 compute-0 beautiful_euler[277994]: }
Oct 11 01:54:38 compute-0 systemd[1]: libpod-91df772c95b43094466ed9d4af8518907bc42e11a15acea1ed369acbf509e4ae.scope: Deactivated successfully.
Oct 11 01:54:38 compute-0 podman[277951]: 2025-10-11 01:54:38.179477886 +0000 UTC m=+1.206001640 container died 91df772c95b43094466ed9d4af8518907bc42e11a15acea1ed369acbf509e4ae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=beautiful_euler, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 01:54:38 compute-0 systemd[1]: var-lib-containers-storage-overlay-9defe012588cbc0d3cf2ede6bde2a8113d40058f39fde74e91c171cb2861af77-merged.mount: Deactivated successfully.
Oct 11 01:54:38 compute-0 podman[277951]: 2025-10-11 01:54:38.278476085 +0000 UTC m=+1.304999819 container remove 91df772c95b43094466ed9d4af8518907bc42e11a15acea1ed369acbf509e4ae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=beautiful_euler, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2)
Oct 11 01:54:38 compute-0 systemd[1]: libpod-conmon-91df772c95b43094466ed9d4af8518907bc42e11a15acea1ed369acbf509e4ae.scope: Deactivated successfully.
Oct 11 01:54:38 compute-0 sudo[277800]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:38 compute-0 ceph-mon[191930]: pgmap v493: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:38 compute-0 sudo[278137]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:54:38 compute-0 sudo[278137]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:54:38 compute-0 sudo[278137]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:38 compute-0 sudo[278187]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:54:38 compute-0 sudo[278187]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:54:38 compute-0 sudo[278187]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:38 compute-0 sudo[278241]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:54:38 compute-0 sudo[278241]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:54:38 compute-0 sudo[278241]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v494: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:38 compute-0 sudo[278284]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 01:54:38 compute-0 sudo[278284]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:54:38 compute-0 python3.9[278275]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/neutron/ovn_metadata_haproxy_wrapper mode=0755 setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760147676.9227715-86-145089225083873/.source follow=False _original_basename=haproxy.j2 checksum=95c62e64c8f82dd9393a560d1b052dc98d38f810 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:54:39 compute-0 podman[278422]: 2025-10-11 01:54:39.411837804 +0000 UTC m=+0.061511084 container create 3a353fd9d99994da046658425d42bee76264b6852de3255699172b878d2d42e5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_morse, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2)
Oct 11 01:54:39 compute-0 systemd[1]: Started libpod-conmon-3a353fd9d99994da046658425d42bee76264b6852de3255699172b878d2d42e5.scope.
Oct 11 01:54:39 compute-0 podman[278422]: 2025-10-11 01:54:39.393946579 +0000 UTC m=+0.043619879 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:54:39 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:54:39 compute-0 podman[278422]: 2025-10-11 01:54:39.530213534 +0000 UTC m=+0.179886854 container init 3a353fd9d99994da046658425d42bee76264b6852de3255699172b878d2d42e5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_morse, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:54:39 compute-0 podman[278422]: 2025-10-11 01:54:39.548838012 +0000 UTC m=+0.198511322 container start 3a353fd9d99994da046658425d42bee76264b6852de3255699172b878d2d42e5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_morse, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507)
Oct 11 01:54:39 compute-0 quizzical_morse[278461]: 167 167
Oct 11 01:54:39 compute-0 podman[278422]: 2025-10-11 01:54:39.556401078 +0000 UTC m=+0.206074368 container attach 3a353fd9d99994da046658425d42bee76264b6852de3255699172b878d2d42e5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_morse, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:54:39 compute-0 systemd[1]: libpod-3a353fd9d99994da046658425d42bee76264b6852de3255699172b878d2d42e5.scope: Deactivated successfully.
Oct 11 01:54:39 compute-0 podman[278422]: 2025-10-11 01:54:39.567342434 +0000 UTC m=+0.217015744 container died 3a353fd9d99994da046658425d42bee76264b6852de3255699172b878d2d42e5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_morse, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_REF=reef)
Oct 11 01:54:39 compute-0 systemd[1]: var-lib-containers-storage-overlay-891a97df6de17eba6baf70a6fea85d51df241616d557a88ea771d1c78636b05f-merged.mount: Deactivated successfully.
Oct 11 01:54:39 compute-0 podman[278422]: 2025-10-11 01:54:39.647925965 +0000 UTC m=+0.297599255 container remove 3a353fd9d99994da046658425d42bee76264b6852de3255699172b878d2d42e5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_morse, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.build-date=20250507, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:54:39 compute-0 systemd[1]: libpod-conmon-3a353fd9d99994da046658425d42bee76264b6852de3255699172b878d2d42e5.scope: Deactivated successfully.
Oct 11 01:54:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:54:39 compute-0 podman[278535]: 2025-10-11 01:54:39.923120933 +0000 UTC m=+0.084941275 container create a22522a4b25015cb4c76da0ceb5bb1368c602380b1c75601db9d5dd21b79ea5c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hardcore_joliot, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:54:39 compute-0 python3.9[278529]: ansible-ansible.legacy.stat Invoked with path=/var/lib/neutron/kill_scripts/haproxy-kill follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:54:39 compute-0 podman[278535]: 2025-10-11 01:54:39.880367974 +0000 UTC m=+0.042188276 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:54:39 compute-0 systemd[1]: Started libpod-conmon-a22522a4b25015cb4c76da0ceb5bb1368c602380b1c75601db9d5dd21b79ea5c.scope.
Oct 11 01:54:40 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:54:40 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f428f1885b3ade215e9f4801b5c06b3c48a029350a4d8370d3e3ba370d291075/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:54:40 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f428f1885b3ade215e9f4801b5c06b3c48a029350a4d8370d3e3ba370d291075/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:54:40 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f428f1885b3ade215e9f4801b5c06b3c48a029350a4d8370d3e3ba370d291075/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:54:40 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f428f1885b3ade215e9f4801b5c06b3c48a029350a4d8370d3e3ba370d291075/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:54:40 compute-0 podman[278535]: 2025-10-11 01:54:40.102569557 +0000 UTC m=+0.264389919 container init a22522a4b25015cb4c76da0ceb5bb1368c602380b1c75601db9d5dd21b79ea5c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hardcore_joliot, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 01:54:40 compute-0 podman[278535]: 2025-10-11 01:54:40.123596821 +0000 UTC m=+0.285417113 container start a22522a4b25015cb4c76da0ceb5bb1368c602380b1c75601db9d5dd21b79ea5c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hardcore_joliot, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:54:40 compute-0 podman[278535]: 2025-10-11 01:54:40.128831824 +0000 UTC m=+0.290652226 container attach a22522a4b25015cb4c76da0ceb5bb1368c602380b1c75601db9d5dd21b79ea5c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hardcore_joliot, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0)
Oct 11 01:54:40 compute-0 ceph-mon[191930]: pgmap v494: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:40 compute-0 python3.9[278677]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/neutron/kill_scripts/haproxy-kill mode=0755 setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760147679.2030199-101-188208637690375/.source follow=False _original_basename=kill-script.j2 checksum=2dfb5489f491f61b95691c3bf95fa1fe48ff3700 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:54:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v495: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]: {
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:         "osd_id": 1,
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:         "type": "bluestore"
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:     },
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:         "osd_id": 2,
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:         "type": "bluestore"
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:     },
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:         "osd_id": 0,
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:         "type": "bluestore"
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]:     }
Oct 11 01:54:41 compute-0 hardcore_joliot[278556]: }
Oct 11 01:54:41 compute-0 systemd[1]: libpod-a22522a4b25015cb4c76da0ceb5bb1368c602380b1c75601db9d5dd21b79ea5c.scope: Deactivated successfully.
Oct 11 01:54:41 compute-0 systemd[1]: libpod-a22522a4b25015cb4c76da0ceb5bb1368c602380b1c75601db9d5dd21b79ea5c.scope: Consumed 1.176s CPU time.
Oct 11 01:54:41 compute-0 podman[278535]: 2025-10-11 01:54:41.307861064 +0000 UTC m=+1.469681356 container died a22522a4b25015cb4c76da0ceb5bb1368c602380b1c75601db9d5dd21b79ea5c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hardcore_joliot, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, ceph=True)
Oct 11 01:54:41 compute-0 systemd[1]: var-lib-containers-storage-overlay-f428f1885b3ade215e9f4801b5c06b3c48a029350a4d8370d3e3ba370d291075-merged.mount: Deactivated successfully.
Oct 11 01:54:41 compute-0 podman[278535]: 2025-10-11 01:54:41.398456229 +0000 UTC m=+1.560276531 container remove a22522a4b25015cb4c76da0ceb5bb1368c602380b1c75601db9d5dd21b79ea5c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hardcore_joliot, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3)
Oct 11 01:54:41 compute-0 systemd[1]: libpod-conmon-a22522a4b25015cb4c76da0ceb5bb1368c602380b1c75601db9d5dd21b79ea5c.scope: Deactivated successfully.
Oct 11 01:54:41 compute-0 sudo[278284]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:54:41 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:54:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:54:41 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:54:41 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev c0c47a75-cafb-43c2-b32c-9a5cb644aaab does not exist
Oct 11 01:54:41 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 22c716a5-ba35-419f-bd63-4709a6ebf4b1 does not exist
Oct 11 01:54:41 compute-0 podman[278767]: 2025-10-11 01:54:41.466300934 +0000 UTC m=+0.116215485 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 01:54:41 compute-0 podman[278779]: 2025-10-11 01:54:41.474643615 +0000 UTC m=+0.123177755 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.33.7, url=https://catalog.redhat.com/en/search?searchType=containers, vendor=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, managed_by=edpm_ansible, name=ubi9-minimal, maintainer=Red Hat, Inc., build-date=2025-08-20T13:12:41, config_id=edpm, version=9.6, io.openshift.expose-services=, architecture=x86_64, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, release=1755695350, vcs-type=git, container_name=openstack_network_exporter, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.component=ubi9-minimal-container, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public)
Oct 11 01:54:41 compute-0 sudo[278857]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:54:41 compute-0 sudo[278857]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:54:41 compute-0 sudo[278857]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:41 compute-0 sudo[278902]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:54:41 compute-0 sudo[278902]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:54:41 compute-0 sudo[278902]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:41 compute-0 sudo[278960]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-aydexbicjtujwbxvwltdiffiajrqczkc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147681.2581491-118-1083289701299/AnsiballZ_setup.py'
Oct 11 01:54:41 compute-0 sudo[278960]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:42 compute-0 python3.9[278962]: ansible-ansible.legacy.setup Invoked with filter=['ansible_pkg_mgr'] gather_subset=['!all'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:54:42 compute-0 ceph-mon[191930]: pgmap v495: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:54:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:54:42 compute-0 sudo[278960]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v496: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:43 compute-0 sudo[279044]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xmmevuuqkvebhlkfvuatknskjdlqdxrp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147681.2581491-118-1083289701299/AnsiballZ_dnf.py'
Oct 11 01:54:43 compute-0 sudo[279044]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:43 compute-0 python3.9[279046]: ansible-ansible.legacy.dnf Invoked with name=['openvswitch'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:54:44 compute-0 ceph-mon[191930]: pgmap v496: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:54:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v497: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:44 compute-0 sudo[279044]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:46 compute-0 sudo[279197]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-krybsqrbsflfwjwulwrjayezurdpjjjs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147685.1728067-130-122355369459252/AnsiballZ_systemd.py'
Oct 11 01:54:46 compute-0 sudo[279197]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:46 compute-0 python3.9[279199]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=openvswitch.service state=started daemon_reload=False daemon_reexec=False scope=system no_block=False force=None
Oct 11 01:54:46 compute-0 ceph-mon[191930]: pgmap v497: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:46 compute-0 sudo[279197]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v498: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:47 compute-0 podman[279203]: 2025-10-11 01:54:47.248799275 +0000 UTC m=+0.133734543 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']})
Oct 11 01:54:48 compute-0 ceph-mon[191930]: pgmap v498: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:48 compute-0 python3.9[279372]: ansible-ansible.legacy.stat Invoked with path=/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent/01-rootwrap.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:54:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v499: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:49 compute-0 python3.9[279493]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent/01-rootwrap.conf mode=0644 setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760147687.7578373-138-37737098745638/.source.conf follow=False _original_basename=rootwrap.conf.j2 checksum=11f2cfb4b7d97b2cef3c2c2d88089e6999cffe22 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:54:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:54:50 compute-0 ceph-mon[191930]: pgmap v499: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:54:50 compute-0 python3.9[279645]: ansible-ansible.legacy.stat Invoked with path=/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent/01-neutron-ovn-metadata-agent.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:54:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v500: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 5.2 KiB/s rd, 0 B/s wr, 8 op/s
Oct 11 01:54:51 compute-0 python3.9[279766]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent/01-neutron-ovn-metadata-agent.conf mode=0644 setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760147689.7275221-138-54194372644397/.source.conf follow=False _original_basename=neutron-ovn-metadata-agent.conf.j2 checksum=8bc979abbe81c2cf3993a225517a7e2483e20443 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:54:51 compute-0 ceph-mon[191930]: pgmap v500: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 5.2 KiB/s rd, 0 B/s wr, 8 op/s
Oct 11 01:54:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v501: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 5.2 KiB/s rd, 0 B/s wr, 8 op/s
Oct 11 01:54:53 compute-0 python3.9[279916]: ansible-ansible.legacy.stat Invoked with path=/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent/10-neutron-metadata.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:54:53 compute-0 ceph-mon[191930]: pgmap v501: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 5.2 KiB/s rd, 0 B/s wr, 8 op/s
Oct 11 01:54:54 compute-0 python3.9[280037]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent/10-neutron-metadata.conf mode=0644 setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760147692.4046905-182-61391132660902/.source.conf _original_basename=10-neutron-metadata.conf follow=False checksum=ca7d4d155f5b812fab1a3b70e34adb495d291b8d backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:54:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:54:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v502: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 30 KiB/s rd, 0 B/s wr, 49 op/s
Oct 11 01:54:55 compute-0 podman[280163]: 2025-10-11 01:54:55.090879474 +0000 UTC m=+0.128346514 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, distribution-scope=public, version=9.4, name=ubi9, vcs-type=git, architecture=x86_64, managed_by=edpm_ansible, container_name=kepler, io.openshift.tags=base rhel9, io.openshift.expose-services=, build-date=2024-09-18T21:23:30, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9, maintainer=Red Hat, Inc., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi9-container, release-0.7.12=, io.buildah.version=1.29.0, config_id=edpm, summary=Provides the latest release of Red Hat Universal Base Image 9.)
Oct 11 01:54:55 compute-0 podman[280161]: 2025-10-11 01:54:55.09100976 +0000 UTC m=+0.134408373 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 01:54:55 compute-0 podman[280162]: 2025-10-11 01:54:55.13647052 +0000 UTC m=+0.175728839 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:54:55 compute-0 python3.9[280230]: ansible-ansible.legacy.stat Invoked with path=/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent/05-nova-metadata.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:54:55 compute-0 ceph-mon[191930]: pgmap v502: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 30 KiB/s rd, 0 B/s wr, 49 op/s
Oct 11 01:54:56 compute-0 python3.9[280378]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent/05-nova-metadata.conf mode=0644 setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760147694.3214076-182-117764388400443/.source.conf _original_basename=05-nova-metadata.conf follow=False checksum=a14d6b38898a379cd37fc0bf365d17f10859446f backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:54:56
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.log', 'volumes', 'images', 'default.rgw.control', '.rgw.root', 'cephfs.cephfs.meta', 'default.rgw.meta', 'backups', 'cephfs.cephfs.data', 'vms', '.mgr']
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:54:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v503: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 01:54:57 compute-0 python3.9[280528]: ansible-ansible.builtin.stat Invoked with path=/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:54:57 compute-0 ceph-mon[191930]: pgmap v503: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 01:54:58 compute-0 sudo[280697]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sthedopxpjhedqfqazjrrruucuhdlycu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147697.5510087-220-151888683464201/AnsiballZ_file.py'
Oct 11 01:54:58 compute-0 podman[280654]: 2025-10-11 01:54:58.126098702 +0000 UTC m=+0.121768381 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.license=GPLv2)
Oct 11 01:54:58 compute-0 sudo[280697]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:58 compute-0 python3.9[280701]: ansible-ansible.builtin.file Invoked with path=/var/local/libexec recurse=True setype=container_file_t state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:54:58 compute-0 sudo[280697]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v504: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 01:54:59 compute-0 sudo[280852]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wcxbnuwsfikqhmkkpnonkrsxpjkdcdtk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147698.7454863-228-30623985450087/AnsiballZ_stat.py'
Oct 11 01:54:59 compute-0 sudo[280852]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:54:59 compute-0 python3.9[280854]: ansible-ansible.legacy.stat Invoked with path=/var/local/libexec/edpm-container-shutdown follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:54:59 compute-0 sudo[280852]: pam_unix(sudo:session): session closed for user root
Oct 11 01:54:59 compute-0 podman[157119]: time="2025-10-11T01:54:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:54:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:54:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:54:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:54:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:54:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6839 "" "Go-http-client/1.1"
Oct 11 01:54:59 compute-0 ceph-mon[191930]: pgmap v504: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 01:54:59 compute-0 sudo[280930]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nkdeksxpnhjbvupezgidvjzerbfyzhau ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147698.7454863-228-30623985450087/AnsiballZ_file.py'
Oct 11 01:54:59 compute-0 sudo[280930]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:00 compute-0 python3.9[280932]: ansible-ansible.legacy.file Invoked with group=root mode=0700 owner=root setype=container_file_t dest=/var/local/libexec/edpm-container-shutdown _original_basename=edpm-container-shutdown recurse=False state=file path=/var/local/libexec/edpm-container-shutdown force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:55:00 compute-0 sudo[280930]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v505: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 01:55:01 compute-0 sudo[281082]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sgppbyjsfltzpxtgllbvftrutdvfdmou ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147700.4399405-228-126875128471529/AnsiballZ_stat.py'
Oct 11 01:55:01 compute-0 sudo[281082]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:01 compute-0 python3.9[281084]: ansible-ansible.legacy.stat Invoked with path=/var/local/libexec/edpm-start-podman-container follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:55:01 compute-0 sudo[281082]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:01 compute-0 openstack_network_exporter[159265]: ERROR   01:55:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:55:01 compute-0 openstack_network_exporter[159265]: ERROR   01:55:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:55:01 compute-0 openstack_network_exporter[159265]: ERROR   01:55:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:55:01 compute-0 openstack_network_exporter[159265]: ERROR   01:55:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:55:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:55:01 compute-0 openstack_network_exporter[159265]: ERROR   01:55:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:55:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:55:01 compute-0 sudo[281160]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-omphvsvcpbpmjpcqzcontnlndrexdyhs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147700.4399405-228-126875128471529/AnsiballZ_file.py'
Oct 11 01:55:01 compute-0 sudo[281160]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:01 compute-0 ceph-mon[191930]: pgmap v505: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 01:55:02 compute-0 python3.9[281162]: ansible-ansible.legacy.file Invoked with group=root mode=0700 owner=root setype=container_file_t dest=/var/local/libexec/edpm-start-podman-container _original_basename=edpm-start-podman-container recurse=False state=file path=/var/local/libexec/edpm-start-podman-container force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:55:02 compute-0 sudo[281160]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v506: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 30 KiB/s rd, 0 B/s wr, 50 op/s
Oct 11 01:55:03 compute-0 sudo[281312]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ykeqbuwrukewbswiaijpqpdiystgllfb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147702.492933-251-86966511402210/AnsiballZ_file.py'
Oct 11 01:55:03 compute-0 sudo[281312]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:03 compute-0 python3.9[281314]: ansible-ansible.builtin.file Invoked with mode=420 path=/etc/systemd/system-preset state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:55:03 compute-0 sudo[281312]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:03 compute-0 ceph-mon[191930]: pgmap v506: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 30 KiB/s rd, 0 B/s wr, 50 op/s
Oct 11 01:55:04 compute-0 sudo[281464]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eknpcxrrhqnrwfatubmgqvkkvhprmwjj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147703.7012563-259-217128454641536/AnsiballZ_stat.py'
Oct 11 01:55:04 compute-0 sudo[281464]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:04 compute-0 python3.9[281466]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/edpm-container-shutdown.service follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:55:04 compute-0 sudo[281464]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:55:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v507: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 30 KiB/s rd, 0 B/s wr, 50 op/s
Oct 11 01:55:05 compute-0 sudo[281542]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rhcvocgwnocgnlshmmybdanryohcleiu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147703.7012563-259-217128454641536/AnsiballZ_file.py'
Oct 11 01:55:05 compute-0 sudo[281542]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:05 compute-0 python3.9[281544]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/edpm-container-shutdown.service _original_basename=edpm-container-shutdown-service recurse=False state=file path=/etc/systemd/system/edpm-container-shutdown.service force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:55:05 compute-0 sudo[281542]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:05 compute-0 ceph-mon[191930]: pgmap v507: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 30 KiB/s rd, 0 B/s wr, 50 op/s
Oct 11 01:55:06 compute-0 sudo[281694]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hfehaurodycwzixxuqvdkoxvymqygvis ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147705.666096-271-160617331199704/AnsiballZ_stat.py'
Oct 11 01:55:06 compute-0 sudo[281694]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 01:55:06 compute-0 python3.9[281696]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system-preset/91-edpm-container-shutdown.preset follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:55:06 compute-0 sudo[281694]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v508: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 6.0 KiB/s rd, 0 B/s wr, 9 op/s
Oct 11 01:55:06 compute-0 sudo[281772]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-czasrhnaejvgmxkiuimkeyqmxctzmovs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147705.666096-271-160617331199704/AnsiballZ_file.py'
Oct 11 01:55:07 compute-0 sudo[281772]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:07 compute-0 python3.9[281774]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system-preset/91-edpm-container-shutdown.preset _original_basename=91-edpm-container-shutdown-preset recurse=False state=file path=/etc/systemd/system-preset/91-edpm-container-shutdown.preset force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:55:07 compute-0 sudo[281772]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.939 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.940 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.940 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.941 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f8ed27f97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb8c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb1a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb200>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed2874260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed3ab42f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb350>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb90>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.947 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fa390>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.945 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.948 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f8ed27fbad0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.948 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f8ed27faff0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.947 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb3b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.949 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbbf0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.950 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbc80>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f8ed27fb110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f8ed27fb170>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f8ed27fb1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.952 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f8ed27fb230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.952 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f8ed2874230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.953 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f8ed27fb290>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.953 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.953 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f8ed5778d70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.953 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.954 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f8ed27fb650>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.954 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f8ed27fbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.955 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f8ed27fb320>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.955 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.950 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.956 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.956 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.956 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27f9610>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.957 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb620>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.957 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbe30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.957 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbec0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.958 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbf50>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.955 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f8ed27fbb60>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.958 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f8ed27fa3f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.959 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.959 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f8ed27fb380>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.959 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.959 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f8ed27fbbc0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.959 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f8ed27fbc50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.960 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f8ed27fbce0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.960 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f8ed27fbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.961 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f8ed27fb590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.961 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f8ed27f95e0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.962 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f8ed27fb5f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.962 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.962 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f8ed27fbe00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.962 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.962 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f8ed27fbe90>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.963 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.963 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f8ed27fbf20>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.963 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:55:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:55:08 compute-0 ceph-mon[191930]: pgmap v508: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 6.0 KiB/s rd, 0 B/s wr, 9 op/s
Oct 11 01:55:08 compute-0 sudo[281925]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qoyacpdmbxotnwskwqqujpjbpcsflcbp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147707.5568054-283-39937361257329/AnsiballZ_systemd.py'
Oct 11 01:55:08 compute-0 sudo[281925]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:08 compute-0 python3.9[281927]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True enabled=True name=edpm-container-shutdown state=started daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:55:08 compute-0 systemd[1]: Reloading.
Oct 11 01:55:08 compute-0 systemd-rc-local-generator[281951]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:55:08 compute-0 systemd-sysv-generator[281959]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:55:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v509: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:09 compute-0 sudo[281925]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:55:10 compute-0 ceph-mon[191930]: pgmap v509: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:10 compute-0 sudo[282114]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mukevppurtpnkphxrfdosaltolbozxsj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147709.4680297-291-95814851757389/AnsiballZ_stat.py'
Oct 11 01:55:10 compute-0 sudo[282114]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:10 compute-0 python3.9[282116]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/netns-placeholder.service follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:55:10 compute-0 sudo[282114]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:10 compute-0 sudo[282192]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lvjyykyqmhyknsgdgxllofkmcunebscm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147709.4680297-291-95814851757389/AnsiballZ_file.py'
Oct 11 01:55:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v510: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:10 compute-0 sudo[282192]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:11 compute-0 python3.9[282194]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/netns-placeholder.service _original_basename=netns-placeholder-service recurse=False state=file path=/etc/systemd/system/netns-placeholder.service force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:55:11 compute-0 sudo[282192]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:11 compute-0 podman[282319]: 2025-10-11 01:55:11.950984145 +0000 UTC m=+0.121787933 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, distribution-scope=public, io.buildah.version=1.33.7, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, version=9.6, container_name=openstack_network_exporter, managed_by=edpm_ansible, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.openshift.tags=minimal rhel9, architecture=x86_64, maintainer=Red Hat, Inc., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.expose-services=, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, build-date=2025-08-20T13:12:41, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, vcs-type=git, config_id=edpm, release=1755695350, url=https://catalog.redhat.com/en/search?searchType=containers, vendor=Red Hat, Inc., com.redhat.component=ubi9-minimal-container)
Oct 11 01:55:11 compute-0 podman[282318]: 2025-10-11 01:55:11.951903856 +0000 UTC m=+0.125496498 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 01:55:11 compute-0 sudo[282376]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mbjsjzolobsyzhdwknnrnvpwgvnbowly ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147711.3606849-303-232175016525165/AnsiballZ_stat.py'
Oct 11 01:55:11 compute-0 sudo[282376]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:12 compute-0 ceph-mon[191930]: pgmap v510: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:12 compute-0 python3.9[282387]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system-preset/91-netns-placeholder.preset follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:55:12 compute-0 sudo[282376]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:12 compute-0 sudo[282464]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lpvrfoyaypwbloridsvafcoodvnhhhwv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147711.3606849-303-232175016525165/AnsiballZ_file.py'
Oct 11 01:55:12 compute-0 sudo[282464]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v511: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:12 compute-0 python3.9[282466]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system-preset/91-netns-placeholder.preset _original_basename=91-netns-placeholder-preset recurse=False state=file path=/etc/systemd/system-preset/91-netns-placeholder.preset force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:55:12 compute-0 sudo[282464]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:13 compute-0 sudo[282616]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tlfumgrqmukvaxeuyacysrjuyrsfxgtm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147713.2639937-315-186462234725648/AnsiballZ_systemd.py'
Oct 11 01:55:13 compute-0 sudo[282616]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:14 compute-0 ceph-mon[191930]: pgmap v511: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:14 compute-0 python3.9[282618]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True enabled=True name=netns-placeholder state=started daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:55:14 compute-0 systemd[1]: Reloading.
Oct 11 01:55:14 compute-0 systemd-rc-local-generator[282644]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:55:14 compute-0 systemd-sysv-generator[282650]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:55:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:55:14 compute-0 systemd[1]: Starting Create netns directory...
Oct 11 01:55:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v512: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:14 compute-0 systemd[1]: run-netns-placeholder.mount: Deactivated successfully.
Oct 11 01:55:14 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Oct 11 01:55:14 compute-0 systemd[1]: Finished Create netns directory.
Oct 11 01:55:14 compute-0 sudo[282616]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:15 compute-0 sudo[282808]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ysaoraqwkiwhrzpxdplptmnrzndoxtez ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147715.3114004-325-200736291335994/AnsiballZ_file.py'
Oct 11 01:55:15 compute-0 sudo[282808]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:16 compute-0 ceph-mon[191930]: pgmap v512: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:16 compute-0 python3.9[282810]: ansible-ansible.builtin.file Invoked with group=zuul mode=0755 owner=zuul path=/var/lib/openstack/healthchecks setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:55:16 compute-0 sudo[282808]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v513: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:17 compute-0 sudo[282960]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jxcjtaojwtmuferzveozpxqzumzytesj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147716.4686506-333-205934288441042/AnsiballZ_stat.py'
Oct 11 01:55:17 compute-0 sudo[282960]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:17 compute-0 python3.9[282962]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/ovn_metadata_agent/healthcheck follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:55:17 compute-0 sudo[282960]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:18 compute-0 podman[283057]: 2025-10-11 01:55:18.072832317 +0000 UTC m=+0.111265565 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_id=edpm, container_name=ceilometer_agent_ipmi, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0)
Oct 11 01:55:18 compute-0 ceph-mon[191930]: pgmap v513: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:18 compute-0 sudo[283101]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zvfaitutkprknviosrxwxnbqqgllbyin ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147716.4686506-333-205934288441042/AnsiballZ_copy.py'
Oct 11 01:55:18 compute-0 sudo[283101]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:18 compute-0 python3.9[283104]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/healthchecks/ovn_metadata_agent/ group=zuul mode=0700 owner=zuul setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760147716.4686506-333-205934288441042/.source _original_basename=healthcheck follow=False checksum=898a5a1fcd473cf731177fc866e3bd7ebf20a131 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:55:18 compute-0 sudo[283101]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v514: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:19 compute-0 sudo[283254]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gwpcllvoigeijoaiehelpqfssedgmtyy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147718.7896864-350-225460319440895/AnsiballZ_file.py'
Oct 11 01:55:19 compute-0 sudo[283254]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:19 compute-0 python3.9[283256]: ansible-ansible.builtin.file Invoked with path=/var/lib/kolla/config_files recurse=True setype=container_file_t state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:55:19 compute-0 sudo[283254]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:55:20 compute-0 ceph-mon[191930]: pgmap v514: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:20 compute-0 sudo[283407]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dbfqpwdtsdidvftcuklxnhosmmuktcrp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147720.00451-358-171794152221992/AnsiballZ_stat.py'
Oct 11 01:55:20 compute-0 sudo[283407]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:20 compute-0 python3.9[283409]: ansible-ansible.legacy.stat Invoked with path=/var/lib/kolla/config_files/ovn_metadata_agent.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:55:20 compute-0 sudo[283407]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v515: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:21 compute-0 sudo[283530]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rjzklxlabfecpuvutkauugrdlwexkahj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147720.00451-358-171794152221992/AnsiballZ_copy.py'
Oct 11 01:55:21 compute-0 sudo[283530]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:21 compute-0 python3.9[283532]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/kolla/config_files/ovn_metadata_agent.json mode=0600 src=/home/zuul/.ansible/tmp/ansible-tmp-1760147720.00451-358-171794152221992/.source.json _original_basename=.i7qejej7 follow=False checksum=a908ef151ded3a33ae6c9ac8be72a35e5e33b9dc backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:55:21 compute-0 sudo[283530]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:22 compute-0 ceph-mon[191930]: pgmap v515: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:22 compute-0 sudo[283682]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gvqsofkrjocnjumupegqxduhsweqeyaf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147722.0163908-373-52432901540162/AnsiballZ_file.py'
Oct 11 01:55:22 compute-0 sudo[283682]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:22 compute-0 python3.9[283684]: ansible-ansible.builtin.file Invoked with mode=0755 path=/var/lib/edpm-config/container-startup-config/ovn_metadata_agent state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:55:22 compute-0 sudo[283682]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v516: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:23 compute-0 sudo[283834]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-npreunxctfturvephhqpxpnephqzwxxk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147723.1633935-381-263486377146394/AnsiballZ_stat.py'
Oct 11 01:55:23 compute-0 sudo[283834]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:23 compute-0 sudo[283834]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:24 compute-0 ceph-mon[191930]: pgmap v516: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:24 compute-0 sudo[283957]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wrcsjkgdwaizoppyyhginlgizdayfqgf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147723.1633935-381-263486377146394/AnsiballZ_copy.py'
Oct 11 01:55:24 compute-0 sudo[283957]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:55:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v517: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:24 compute-0 sudo[283957]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:26 compute-0 ceph-mon[191930]: pgmap v517: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:26 compute-0 sudo[284136]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jtgkjcecbzvemhsnjuiqxzzrdlmevztf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147725.3906553-398-233111278701668/AnsiballZ_container_config_data.py'
Oct 11 01:55:26 compute-0 sudo[284136]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:26 compute-0 podman[284084]: 2025-10-11 01:55:26.206043202 +0000 UTC m=+0.129481034 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 01:55:26 compute-0 podman[284092]: 2025-10-11 01:55:26.249162878 +0000 UTC m=+0.147767597 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, version=9.4, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, config_id=edpm, com.redhat.component=ubi9-container, build-date=2024-09-18T21:23:30, distribution-scope=public, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=kepler, name=ubi9, io.buildah.version=1.29.0, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, maintainer=Red Hat, Inc., summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 9, release-0.7.12=, managed_by=edpm_ansible, release=1214.1726694543, vendor=Red Hat, Inc., io.openshift.tags=base rhel9)
Oct 11 01:55:26 compute-0 podman[284090]: 2025-10-11 01:55:26.298587304 +0000 UTC m=+0.209866076 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, config_id=ovn_controller, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 01:55:26 compute-0 python3.9[284154]: ansible-container_config_data Invoked with config_overrides={} config_path=/var/lib/edpm-config/container-startup-config/ovn_metadata_agent config_pattern=*.json debug=False
Oct 11 01:55:26 compute-0 sudo[284136]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:55:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:55:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:55:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:55:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:55:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:55:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v518: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:27 compute-0 sudo[284326]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wrybdaxogcftlzbmuchzahubtylldedo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147726.7386212-407-92051390577911/AnsiballZ_container_config_hash.py'
Oct 11 01:55:27 compute-0 sudo[284326]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:27 compute-0 python3.9[284328]: ansible-container_config_hash Invoked with check_mode=False config_vol_prefix=/var/lib/config-data
Oct 11 01:55:27 compute-0 sudo[284326]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:28 compute-0 ceph-mon[191930]: pgmap v518: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v519: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:28 compute-0 sudo[284491]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nhxmgkiegohxnuaaauvlcdcwknlwjfwd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147728.2053413-416-281176881324025/AnsiballZ_podman_container_info.py'
Oct 11 01:55:28 compute-0 sudo[284491]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:28 compute-0 podman[284452]: 2025-10-11 01:55:28.987986725 +0000 UTC m=+0.146532972 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_compute, managed_by=edpm_ansible)
Oct 11 01:55:29 compute-0 python3.9[284497]: ansible-containers.podman.podman_container_info Invoked with executable=podman name=None
Oct 11 01:55:29 compute-0 sudo[284491]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:29 compute-0 podman[157119]: time="2025-10-11T01:55:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:55:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:55:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 32820 "" "Go-http-client/1.1"
Oct 11 01:55:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:55:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:55:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 6828 "" "Go-http-client/1.1"
Oct 11 01:55:30 compute-0 ceph-mon[191930]: pgmap v519: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v520: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:31 compute-0 sudo[284676]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ihwrovehvhsnlyvotphbsybbwkbnejmv ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760147730.5085948-429-176394173526103/AnsiballZ_edpm_container_manage.py'
Oct 11 01:55:31 compute-0 sudo[284676]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:31 compute-0 openstack_network_exporter[159265]: ERROR   01:55:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:55:31 compute-0 openstack_network_exporter[159265]: ERROR   01:55:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:55:31 compute-0 openstack_network_exporter[159265]: ERROR   01:55:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:55:31 compute-0 openstack_network_exporter[159265]: ERROR   01:55:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:55:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:55:31 compute-0 openstack_network_exporter[159265]: ERROR   01:55:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:55:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:55:31 compute-0 python3[284678]: ansible-edpm_container_manage Invoked with concurrency=1 config_dir=/var/lib/edpm-config/container-startup-config/ovn_metadata_agent config_id=ovn_metadata_agent config_overrides={} config_patterns=*.json log_base_path=/var/log/containers/stdouts debug=False
Oct 11 01:55:32 compute-0 ceph-mon[191930]: pgmap v520: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v521: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:34 compute-0 ceph-mon[191930]: pgmap v521: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:55:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v522: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:36 compute-0 ceph-mon[191930]: pgmap v522: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v523: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:38 compute-0 ceph-mon[191930]: pgmap v523: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v524: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:55:40 compute-0 ceph-mon[191930]: pgmap v524: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v525: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #27. Immutable memtables: 0.
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:55:41.405992) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 9] Flushing memtable with next log file: 27
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147741406027, "job": 9, "event": "flush_started", "num_memtables": 1, "num_entries": 2034, "num_deletes": 251, "total_data_size": 3492980, "memory_usage": 3541384, "flush_reason": "Manual Compaction"}
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 9] Level-0 flush table #28: started
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147741423191, "cf_name": "default", "job": 9, "event": "table_file_creation", "file_number": 28, "file_size": 3428326, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 9713, "largest_seqno": 11746, "table_properties": {"data_size": 3419051, "index_size": 5897, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 2309, "raw_key_size": 17701, "raw_average_key_size": 19, "raw_value_size": 3400762, "raw_average_value_size": 3732, "num_data_blocks": 267, "num_entries": 911, "num_filter_entries": 911, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760147506, "oldest_key_time": 1760147506, "file_creation_time": 1760147741, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 28, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 9] Flush lasted 17288 microseconds, and 7732 cpu microseconds.
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:55:41.423283) [db/flush_job.cc:967] [default] [JOB 9] Level-0 flush table #28: 3428326 bytes OK
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:55:41.423302) [db/memtable_list.cc:519] [default] Level-0 commit table #28 started
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:55:41.425158) [db/memtable_list.cc:722] [default] Level-0 commit table #28: memtable #1 done
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:55:41.425169) EVENT_LOG_v1 {"time_micros": 1760147741425166, "job": 9, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:55:41.425187) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 9] Try to delete WAL files size 3484515, prev total WAL file size 3484515, number of live WAL files 2.
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000024.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:55:41.426375) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F7300353032' seq:72057594037927935, type:22 .. '7061786F7300373534' seq:0, type:0; will stop at (end)
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 10] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 9 Base level 0, inputs: [28(3347KB)], [26(5992KB)]
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147741426410, "job": 10, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [28], "files_L6": [26], "score": -1, "input_data_size": 9564660, "oldest_snapshot_seqno": -1}
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 10] Generated table #29: 3691 keys, 7805064 bytes, temperature: kUnknown
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147741461991, "cf_name": "default", "job": 10, "event": "table_file_creation", "file_number": 29, "file_size": 7805064, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 7776843, "index_size": 17871, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 9285, "raw_key_size": 88638, "raw_average_key_size": 24, "raw_value_size": 7706642, "raw_average_value_size": 2087, "num_data_blocks": 773, "num_entries": 3691, "num_filter_entries": 3691, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760147741, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 29, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:55:41.462200) [db/compaction/compaction_job.cc:1663] [default] [JOB 10] Compacted 1@0 + 1@6 files to L6 => 7805064 bytes
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:55:41.464446) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 268.3 rd, 218.9 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(3.3, 5.9 +0.0 blob) out(7.4 +0.0 blob), read-write-amplify(5.1) write-amplify(2.3) OK, records in: 4205, records dropped: 514 output_compression: NoCompression
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:55:41.464465) EVENT_LOG_v1 {"time_micros": 1760147741464455, "job": 10, "event": "compaction_finished", "compaction_time_micros": 35648, "compaction_time_cpu_micros": 18291, "output_level": 6, "num_output_files": 1, "total_output_size": 7805064, "num_input_records": 4205, "num_output_records": 3691, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000028.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147741465129, "job": 10, "event": "table_file_deletion", "file_number": 28}
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000026.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147741466271, "job": 10, "event": "table_file_deletion", "file_number": 26}
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:55:41.426187) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:55:41.466420) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:55:41.466427) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:55:41.466429) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:55:41.466431) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:55:41 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:55:41.466433) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:55:41 compute-0 sudo[284750]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:55:41 compute-0 sudo[284750]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:55:41 compute-0 sudo[284750]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:41 compute-0 sudo[284775]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:55:41 compute-0 sudo[284775]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:55:41 compute-0 sudo[284775]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:42 compute-0 sudo[284814]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:55:42 compute-0 sudo[284814]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:55:42 compute-0 sudo[284814]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:42 compute-0 sudo[284854]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 01:55:42 compute-0 sudo[284854]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:55:42 compute-0 ceph-mon[191930]: pgmap v525: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v526: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:43 compute-0 podman[284838]: 2025-10-11 01:55:43.478088832 +0000 UTC m=+1.378324542 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:55:43 compute-0 podman[284839]: 2025-10-11 01:55:43.502339252 +0000 UTC m=+1.402651925 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, version=9.6, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, managed_by=edpm_ansible, io.openshift.expose-services=, com.redhat.component=ubi9-minimal-container, name=ubi9-minimal, build-date=2025-08-20T13:12:41, release=1755695350, io.openshift.tags=minimal rhel9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-type=git, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.buildah.version=1.33.7, container_name=openstack_network_exporter, url=https://catalog.redhat.com/en/search?searchType=containers, distribution-scope=public, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vendor=Red Hat, Inc., architecture=x86_64)
Oct 11 01:55:43 compute-0 ceph-mon[191930]: pgmap v526: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:43 compute-0 podman[284690]: 2025-10-11 01:55:43.675491206 +0000 UTC m=+11.871626662 image pull 1061e4fafe13e0b9aa1ef2c904ba4ad70c44f3e87b1d831f16c6db34937f4022 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Oct 11 01:55:43 compute-0 sudo[284854]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:55:43 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:55:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:55:43 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:55:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:55:43 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:55:43 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 1c75ed29-ad6c-4f47-bfdb-ca64172f4935 does not exist
Oct 11 01:55:43 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev f3524b69-7afe-4609-a4f5-a5d7437e3099 does not exist
Oct 11 01:55:43 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 9f8d9f20-9721-4308-aa09-c68a6d72cf8d does not exist
Oct 11 01:55:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 01:55:43 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:55:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 01:55:43 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:55:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:55:43 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:55:44 compute-0 podman[284960]: 2025-10-11 01:55:44.010562574 +0000 UTC m=+0.112698128 container create c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, maintainer=OpenStack Kubernetes Operator team, container_name=ovn_metadata_agent, tcib_managed=true, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, config_id=ovn_metadata_agent)
Oct 11 01:55:44 compute-0 podman[284960]: 2025-10-11 01:55:43.96092228 +0000 UTC m=+0.063057854 image pull 1061e4fafe13e0b9aa1ef2c904ba4ad70c44f3e87b1d831f16c6db34937f4022 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Oct 11 01:55:44 compute-0 python3[284678]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman create --name ovn_metadata_agent --cgroupns=host --conmon-pidfile /run/ovn_metadata_agent.pid --env KOLLA_CONFIG_STRATEGY=COPY_ALWAYS --env EDPM_CONFIG_HASH=0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d --healthcheck-command /openstack/healthcheck --label config_id=ovn_metadata_agent --label container_name=ovn_metadata_agent --label managed_by=edpm_ansible --label config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']} --log-driver journald --log-level info --network host --pid host --privileged=True --user root --volume /run/openvswitch:/run/openvswitch:z --volume /var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z --volume /run/netns:/run/netns:shared --volume /var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro --volume /var/lib/neutron:/var/lib/neutron:shared,z --volume /var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro --volume /var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro --volume /var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z --volume /var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z --volume /var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z --volume /var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z --volume /var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Oct 11 01:55:44 compute-0 sudo[284971]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:55:44 compute-0 sudo[284971]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:55:44 compute-0 sudo[284971]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:44 compute-0 sudo[285011]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:55:44 compute-0 sudo[284676]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:44 compute-0 sudo[285011]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:55:44 compute-0 sudo[285011]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:44 compute-0 sudo[285049]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:55:44 compute-0 sudo[285049]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:55:44 compute-0 sudo[285049]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:44 compute-0 sudo[285098]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 01:55:44 compute-0 sudo[285098]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:55:44 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:55:44 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:55:44 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:55:44 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:55:44 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:55:44 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:55:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:55:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v527: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:45 compute-0 sudo[285300]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lqyjnhsraekvewayopkpxknitftjqkvk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147744.5572824-437-50534164027811/AnsiballZ_stat.py'
Oct 11 01:55:45 compute-0 sudo[285300]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:45 compute-0 podman[285271]: 2025-10-11 01:55:45.206373264 +0000 UTC m=+0.092635868 container create 02b2dc7acc3e33c4daaa087bc2970e39a1ee58c4d6858cac7fe1ce3e3e282a67 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_black, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2)
Oct 11 01:55:45 compute-0 podman[285271]: 2025-10-11 01:55:45.168792572 +0000 UTC m=+0.055055256 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:55:45 compute-0 systemd[1]: Started libpod-conmon-02b2dc7acc3e33c4daaa087bc2970e39a1ee58c4d6858cac7fe1ce3e3e282a67.scope.
Oct 11 01:55:45 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:55:45 compute-0 podman[285271]: 2025-10-11 01:55:45.369611828 +0000 UTC m=+0.255874512 container init 02b2dc7acc3e33c4daaa087bc2970e39a1ee58c4d6858cac7fe1ce3e3e282a67 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_black, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3)
Oct 11 01:55:45 compute-0 podman[285271]: 2025-10-11 01:55:45.391216124 +0000 UTC m=+0.277478718 container start 02b2dc7acc3e33c4daaa087bc2970e39a1ee58c4d6858cac7fe1ce3e3e282a67 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_black, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507)
Oct 11 01:55:45 compute-0 unruffled_black[285308]: 167 167
Oct 11 01:55:45 compute-0 systemd[1]: libpod-02b2dc7acc3e33c4daaa087bc2970e39a1ee58c4d6858cac7fe1ce3e3e282a67.scope: Deactivated successfully.
Oct 11 01:55:45 compute-0 podman[285271]: 2025-10-11 01:55:45.404920725 +0000 UTC m=+0.291183359 container attach 02b2dc7acc3e33c4daaa087bc2970e39a1ee58c4d6858cac7fe1ce3e3e282a67 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_black, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:55:45 compute-0 podman[285271]: 2025-10-11 01:55:45.406464901 +0000 UTC m=+0.292727535 container died 02b2dc7acc3e33c4daaa087bc2970e39a1ee58c4d6858cac7fe1ce3e3e282a67 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_black, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2)
Oct 11 01:55:45 compute-0 python3.9[285305]: ansible-ansible.builtin.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:55:45 compute-0 sudo[285300]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:45 compute-0 systemd[1]: var-lib-containers-storage-overlay-cea2291e342be3a04ea133e9728fe0ef6190321b125778ae56776721c0fb0dfa-merged.mount: Deactivated successfully.
Oct 11 01:55:45 compute-0 podman[285271]: 2025-10-11 01:55:45.529506391 +0000 UTC m=+0.415768995 container remove 02b2dc7acc3e33c4daaa087bc2970e39a1ee58c4d6858cac7fe1ce3e3e282a67 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_black, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:55:45 compute-0 systemd[1]: libpod-conmon-02b2dc7acc3e33c4daaa087bc2970e39a1ee58c4d6858cac7fe1ce3e3e282a67.scope: Deactivated successfully.
Oct 11 01:55:45 compute-0 ceph-mon[191930]: pgmap v527: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:45 compute-0 podman[285356]: 2025-10-11 01:55:45.86196409 +0000 UTC m=+0.109349216 container create 33d788ea2f308c3ae4ebcf58ec0efdb095d0d4efec3955dcd81027d832fbae73 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=determined_kepler, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 01:55:45 compute-0 podman[285356]: 2025-10-11 01:55:45.828503308 +0000 UTC m=+0.075888474 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:55:45 compute-0 systemd[1]: Started libpod-conmon-33d788ea2f308c3ae4ebcf58ec0efdb095d0d4efec3955dcd81027d832fbae73.scope.
Oct 11 01:55:45 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:55:46 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/087ffba8f3e0c8c0138958144af03080f8a465857011ba07f28d967d22272770/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:55:46 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/087ffba8f3e0c8c0138958144af03080f8a465857011ba07f28d967d22272770/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:55:46 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/087ffba8f3e0c8c0138958144af03080f8a465857011ba07f28d967d22272770/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:55:46 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/087ffba8f3e0c8c0138958144af03080f8a465857011ba07f28d967d22272770/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:55:46 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/087ffba8f3e0c8c0138958144af03080f8a465857011ba07f28d967d22272770/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:55:46 compute-0 podman[285356]: 2025-10-11 01:55:46.035185624 +0000 UTC m=+0.282570780 container init 33d788ea2f308c3ae4ebcf58ec0efdb095d0d4efec3955dcd81027d832fbae73 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=determined_kepler, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:55:46 compute-0 podman[285356]: 2025-10-11 01:55:46.060552568 +0000 UTC m=+0.307937694 container start 33d788ea2f308c3ae4ebcf58ec0efdb095d0d4efec3955dcd81027d832fbae73 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=determined_kepler, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_REF=reef, org.label-schema.build-date=20250507)
Oct 11 01:55:46 compute-0 podman[285356]: 2025-10-11 01:55:46.068144863 +0000 UTC m=+0.315529989 container attach 33d788ea2f308c3ae4ebcf58ec0efdb095d0d4efec3955dcd81027d832fbae73 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=determined_kepler, io.buildah.version=1.39.3, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0)
Oct 11 01:55:46 compute-0 sudo[285502]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vfnegwhufhxjgafdqtvkguzhetxevsze ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147745.9283175-446-138231813285298/AnsiballZ_file.py'
Oct 11 01:55:46 compute-0 sudo[285502]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:46 compute-0 python3.9[285504]: ansible-file Invoked with path=/etc/systemd/system/edpm_ovn_metadata_agent.requires state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:55:46 compute-0 sudo[285502]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v528: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:47 compute-0 sudo[285599]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fhabealaehiizpwgeiqaedgatpzkwfed ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147745.9283175-446-138231813285298/AnsiballZ_stat.py'
Oct 11 01:55:47 compute-0 sudo[285599]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:47 compute-0 determined_kepler[285398]: --> passed data devices: 0 physical, 3 LVM
Oct 11 01:55:47 compute-0 determined_kepler[285398]: --> relative data size: 1.0
Oct 11 01:55:47 compute-0 determined_kepler[285398]: --> All data devices are unavailable
Oct 11 01:55:47 compute-0 systemd[1]: libpod-33d788ea2f308c3ae4ebcf58ec0efdb095d0d4efec3955dcd81027d832fbae73.scope: Deactivated successfully.
Oct 11 01:55:47 compute-0 systemd[1]: libpod-33d788ea2f308c3ae4ebcf58ec0efdb095d0d4efec3955dcd81027d832fbae73.scope: Consumed 1.263s CPU time.
Oct 11 01:55:47 compute-0 podman[285356]: 2025-10-11 01:55:47.391784676 +0000 UTC m=+1.639169802 container died 33d788ea2f308c3ae4ebcf58ec0efdb095d0d4efec3955dcd81027d832fbae73 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=determined_kepler, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:55:47 compute-0 systemd[1]: var-lib-containers-storage-overlay-087ffba8f3e0c8c0138958144af03080f8a465857011ba07f28d967d22272770-merged.mount: Deactivated successfully.
Oct 11 01:55:47 compute-0 podman[285356]: 2025-10-11 01:55:47.500592501 +0000 UTC m=+1.747977597 container remove 33d788ea2f308c3ae4ebcf58ec0efdb095d0d4efec3955dcd81027d832fbae73 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=determined_kepler, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=reef)
Oct 11 01:55:47 compute-0 systemd[1]: libpod-conmon-33d788ea2f308c3ae4ebcf58ec0efdb095d0d4efec3955dcd81027d832fbae73.scope: Deactivated successfully.
Oct 11 01:55:47 compute-0 python3.9[285603]: ansible-stat Invoked with path=/etc/systemd/system/edpm_ovn_metadata_agent_healthcheck.timer follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:55:47 compute-0 sudo[285098]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:47 compute-0 sudo[285599]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:47 compute-0 sudo[285616]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:55:47 compute-0 sudo[285616]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:55:47 compute-0 sudo[285616]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:47 compute-0 sudo[285668]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:55:47 compute-0 sudo[285668]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:55:47 compute-0 sudo[285668]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:47 compute-0 ceph-mon[191930]: pgmap v528: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:47 compute-0 sudo[285718]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:55:47 compute-0 sudo[285718]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:55:47 compute-0 sudo[285718]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:48 compute-0 sudo[285748]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 01:55:48 compute-0 sudo[285748]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:55:48 compute-0 podman[285791]: 2025-10-11 01:55:48.253216491 +0000 UTC m=+0.109780749 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_ipmi, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, config_id=edpm, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 01:55:48 compute-0 sudo[285909]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jjqpwddoiriokberaqtljrmcedzqfvqs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147747.7021384-446-173187157701497/AnsiballZ_copy.py'
Oct 11 01:55:48 compute-0 sudo[285909]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:48 compute-0 podman[285926]: 2025-10-11 01:55:48.701732825 +0000 UTC m=+0.091026104 container create c382e9ca427dc0562e6df2ec7d4e2019115c4760367a0313845202fbec8acafb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_dewdney, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507)
Oct 11 01:55:48 compute-0 python3.9[285918]: ansible-copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760147747.7021384-446-173187157701497/source dest=/etc/systemd/system/edpm_ovn_metadata_agent.service mode=0644 owner=root group=root backup=False force=True remote_src=False follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:55:48 compute-0 sudo[285909]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:48 compute-0 podman[285926]: 2025-10-11 01:55:48.666666641 +0000 UTC m=+0.055959960 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:55:48 compute-0 systemd[1]: Started libpod-conmon-c382e9ca427dc0562e6df2ec7d4e2019115c4760367a0313845202fbec8acafb.scope.
Oct 11 01:55:48 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:55:48 compute-0 podman[285926]: 2025-10-11 01:55:48.858128162 +0000 UTC m=+0.247421491 container init c382e9ca427dc0562e6df2ec7d4e2019115c4760367a0313845202fbec8acafb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_dewdney, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.license=GPLv2)
Oct 11 01:55:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v529: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:48 compute-0 podman[285926]: 2025-10-11 01:55:48.877561876 +0000 UTC m=+0.266855145 container start c382e9ca427dc0562e6df2ec7d4e2019115c4760367a0313845202fbec8acafb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_dewdney, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:55:48 compute-0 podman[285926]: 2025-10-11 01:55:48.883896527 +0000 UTC m=+0.273189856 container attach c382e9ca427dc0562e6df2ec7d4e2019115c4760367a0313845202fbec8acafb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_dewdney, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 01:55:48 compute-0 romantic_dewdney[285941]: 167 167
Oct 11 01:55:48 compute-0 systemd[1]: libpod-c382e9ca427dc0562e6df2ec7d4e2019115c4760367a0313845202fbec8acafb.scope: Deactivated successfully.
Oct 11 01:55:48 compute-0 podman[285926]: 2025-10-11 01:55:48.890132846 +0000 UTC m=+0.279426115 container died c382e9ca427dc0562e6df2ec7d4e2019115c4760367a0313845202fbec8acafb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_dewdney, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default)
Oct 11 01:55:48 compute-0 systemd[1]: var-lib-containers-storage-overlay-f8825bb8a4e33e7eda9c63dc298bfbe696caccd3d84ab5382c00b7868081b73c-merged.mount: Deactivated successfully.
Oct 11 01:55:48 compute-0 podman[285926]: 2025-10-11 01:55:48.970483464 +0000 UTC m=+0.359776733 container remove c382e9ca427dc0562e6df2ec7d4e2019115c4760367a0313845202fbec8acafb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_dewdney, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 01:55:48 compute-0 systemd[1]: libpod-conmon-c382e9ca427dc0562e6df2ec7d4e2019115c4760367a0313845202fbec8acafb.scope: Deactivated successfully.
Oct 11 01:55:49 compute-0 podman[286011]: 2025-10-11 01:55:49.206441835 +0000 UTC m=+0.063800950 container create e0945e383f3c03e8898caabecfd2731f3984250662eb72a89cfbaa78da0bd2cd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_meninsky, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef)
Oct 11 01:55:49 compute-0 podman[286011]: 2025-10-11 01:55:49.182594106 +0000 UTC m=+0.039953301 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:55:49 compute-0 systemd[1]: Started libpod-conmon-e0945e383f3c03e8898caabecfd2731f3984250662eb72a89cfbaa78da0bd2cd.scope.
Oct 11 01:55:49 compute-0 sudo[286051]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xwjmmpytuixeqgbymjtdehzkywqejrdh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147747.7021384-446-173187157701497/AnsiballZ_systemd.py'
Oct 11 01:55:49 compute-0 sudo[286051]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:49 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:55:49 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/41a52c432d4631b4307e7d26009a1aba5b92b071ae69118a0366b2011499a0e8/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:55:49 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/41a52c432d4631b4307e7d26009a1aba5b92b071ae69118a0366b2011499a0e8/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:55:49 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/41a52c432d4631b4307e7d26009a1aba5b92b071ae69118a0366b2011499a0e8/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:55:49 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/41a52c432d4631b4307e7d26009a1aba5b92b071ae69118a0366b2011499a0e8/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:55:49 compute-0 podman[286011]: 2025-10-11 01:55:49.385990237 +0000 UTC m=+0.243349432 container init e0945e383f3c03e8898caabecfd2731f3984250662eb72a89cfbaa78da0bd2cd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_meninsky, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:55:49 compute-0 podman[286011]: 2025-10-11 01:55:49.412527768 +0000 UTC m=+0.269886923 container start e0945e383f3c03e8898caabecfd2731f3984250662eb72a89cfbaa78da0bd2cd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_meninsky, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507)
Oct 11 01:55:49 compute-0 podman[286011]: 2025-10-11 01:55:49.420832466 +0000 UTC m=+0.278191681 container attach e0945e383f3c03e8898caabecfd2731f3984250662eb72a89cfbaa78da0bd2cd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_meninsky, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, org.label-schema.license=GPLv2)
Oct 11 01:55:49 compute-0 python3.9[286057]: ansible-systemd Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 01:55:49 compute-0 systemd[1]: Reloading.
Oct 11 01:55:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:55:49 compute-0 systemd-rc-local-generator[286091]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:55:49 compute-0 systemd-sysv-generator[286094]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:55:49 compute-0 ceph-mon[191930]: pgmap v529: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:50 compute-0 sudo[286051]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]: {
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:     "0": [
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:         {
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "devices": [
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "/dev/loop3"
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             ],
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "lv_name": "ceph_lv0",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "lv_size": "21470642176",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "name": "ceph_lv0",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "tags": {
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.cluster_name": "ceph",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.crush_device_class": "",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.encrypted": "0",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.osd_id": "0",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.type": "block",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.vdo": "0"
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             },
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "type": "block",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "vg_name": "ceph_vg0"
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:         }
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:     ],
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:     "1": [
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:         {
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "devices": [
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "/dev/loop4"
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             ],
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "lv_name": "ceph_lv1",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "lv_size": "21470642176",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "name": "ceph_lv1",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "tags": {
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.cluster_name": "ceph",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.crush_device_class": "",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.encrypted": "0",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.osd_id": "1",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.type": "block",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.vdo": "0"
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             },
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "type": "block",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "vg_name": "ceph_vg1"
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:         }
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:     ],
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:     "2": [
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:         {
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "devices": [
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "/dev/loop5"
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             ],
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "lv_name": "ceph_lv2",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "lv_size": "21470642176",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "name": "ceph_lv2",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "tags": {
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.cluster_name": "ceph",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.crush_device_class": "",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.encrypted": "0",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.osd_id": "2",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.type": "block",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:                 "ceph.vdo": "0"
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             },
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "type": "block",
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:             "vg_name": "ceph_vg2"
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:         }
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]:     ]
Oct 11 01:55:50 compute-0 thirsty_meninsky[286055]: }
Oct 11 01:55:50 compute-0 systemd[1]: libpod-e0945e383f3c03e8898caabecfd2731f3984250662eb72a89cfbaa78da0bd2cd.scope: Deactivated successfully.
Oct 11 01:55:50 compute-0 podman[286011]: 2025-10-11 01:55:50.296813991 +0000 UTC m=+1.154173146 container died e0945e383f3c03e8898caabecfd2731f3984250662eb72a89cfbaa78da0bd2cd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_meninsky, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:55:50 compute-0 systemd[1]: var-lib-containers-storage-overlay-41a52c432d4631b4307e7d26009a1aba5b92b071ae69118a0366b2011499a0e8-merged.mount: Deactivated successfully.
Oct 11 01:55:50 compute-0 podman[286011]: 2025-10-11 01:55:50.435786678 +0000 UTC m=+1.293145783 container remove e0945e383f3c03e8898caabecfd2731f3984250662eb72a89cfbaa78da0bd2cd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_meninsky, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507)
Oct 11 01:55:50 compute-0 systemd[1]: libpod-conmon-e0945e383f3c03e8898caabecfd2731f3984250662eb72a89cfbaa78da0bd2cd.scope: Deactivated successfully.
Oct 11 01:55:50 compute-0 sudo[285748]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:50 compute-0 sudo[286201]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xcttogxacjfxlwnkixunuunvptigucdg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147747.7021384-446-173187157701497/AnsiballZ_systemd.py'
Oct 11 01:55:50 compute-0 sudo[286201]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:55:50 compute-0 sudo[286175]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:55:50 compute-0 sudo[286175]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:55:50 compute-0 sudo[286175]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:50 compute-0 sudo[286214]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:55:50 compute-0 sudo[286214]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:55:50 compute-0 sudo[286214]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v530: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:50 compute-0 sudo[286239]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:55:50 compute-0 python3.9[286211]: ansible-systemd Invoked with state=restarted name=edpm_ovn_metadata_agent.service enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:55:50 compute-0 sudo[286239]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:55:50 compute-0 sudo[286239]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:51 compute-0 systemd[1]: Reloading.
Oct 11 01:55:51 compute-0 systemd-rc-local-generator[286318]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:55:51 compute-0 systemd-sysv-generator[286321]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:55:51 compute-0 sudo[286265]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 01:55:51 compute-0 systemd[1]: Starting ovn_metadata_agent container...
Oct 11 01:55:51 compute-0 sudo[286265]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:55:51 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:55:51 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8b57374b61c2741d590a9793ebdbeceb8a5ecc325d09f6937eb66c92991a4abb/merged/etc/neutron.conf.d supports timestamps until 2038 (0x7fffffff)
Oct 11 01:55:51 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8b57374b61c2741d590a9793ebdbeceb8a5ecc325d09f6937eb66c92991a4abb/merged/var/lib/neutron supports timestamps until 2038 (0x7fffffff)
Oct 11 01:55:51 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3.
Oct 11 01:55:51 compute-0 podman[286329]: 2025-10-11 01:55:51.895727983 +0000 UTC m=+0.255427208 container init c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, container_name=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS)
Oct 11 01:55:51 compute-0 ovn_metadata_agent[286344]: + sudo -E kolla_set_configs
Oct 11 01:55:51 compute-0 ceph-mon[191930]: pgmap v530: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:51 compute-0 podman[286329]: 2025-10-11 01:55:51.968370586 +0000 UTC m=+0.328069821 container start c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, config_id=ovn_metadata_agent, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_metadata_agent, org.label-schema.build-date=20251009, tcib_managed=true)
Oct 11 01:55:51 compute-0 edpm-start-podman-container[286329]: ovn_metadata_agent
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: INFO:__main__:Loading config file at /var/lib/kolla/config_files/config.json
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: INFO:__main__:Validating config file
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: INFO:__main__:Kolla config strategy set to: COPY_ALWAYS
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: INFO:__main__:Copying service configuration files
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: INFO:__main__:Deleting /etc/neutron/rootwrap.conf
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: INFO:__main__:Copying /etc/neutron.conf.d/01-rootwrap.conf to /etc/neutron/rootwrap.conf
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: INFO:__main__:Setting permission for /etc/neutron/rootwrap.conf
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: INFO:__main__:Writing out command to execute
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: INFO:__main__:Setting permission for /var/lib/neutron
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: INFO:__main__:Setting permission for /var/lib/neutron/kill_scripts
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: INFO:__main__:Setting permission for /var/lib/neutron/ovn-metadata-proxy
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: INFO:__main__:Setting permission for /var/lib/neutron/external
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: INFO:__main__:Setting permission for /var/lib/neutron/ovn_metadata_haproxy_wrapper
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: INFO:__main__:Setting permission for /var/lib/neutron/kill_scripts/haproxy-kill
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: INFO:__main__:Setting permission for /var/lib/neutron/external/pids
Oct 11 01:55:52 compute-0 edpm-start-podman-container[286328]: Creating additional drop-in dependency for "ovn_metadata_agent" (c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3)
Oct 11 01:55:52 compute-0 podman[286368]: 2025-10-11 01:55:52.071970837 +0000 UTC m=+0.106758873 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, container_name=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']})
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: ++ cat /run_command
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: + CMD=neutron-ovn-metadata-agent
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: + ARGS=
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: + sudo kolla_copy_cacerts
Oct 11 01:55:52 compute-0 systemd[1]: Reloading.
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: + [[ ! -n '' ]]
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: + . kolla_extend_start
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: Running command: 'neutron-ovn-metadata-agent'
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: + echo 'Running command: '\''neutron-ovn-metadata-agent'\'''
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: + umask 0022
Oct 11 01:55:52 compute-0 ovn_metadata_agent[286344]: + exec neutron-ovn-metadata-agent
Oct 11 01:55:52 compute-0 podman[286431]: 2025-10-11 01:55:52.219934453 +0000 UTC m=+0.067771899 container create d20a13c00426c99f9d102e6dde37e74ab7e69d549a87b4b5770cc8ad072bfe3b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_yonath, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 01:55:52 compute-0 systemd-sysv-generator[286467]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:55:52 compute-0 systemd-rc-local-generator[286464]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:55:52 compute-0 podman[286431]: 2025-10-11 01:55:52.193704277 +0000 UTC m=+0.041541713 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:55:52 compute-0 systemd[1]: Started ovn_metadata_agent container.
Oct 11 01:55:52 compute-0 systemd[1]: Started libpod-conmon-d20a13c00426c99f9d102e6dde37e74ab7e69d549a87b4b5770cc8ad072bfe3b.scope.
Oct 11 01:55:52 compute-0 sudo[286201]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:52 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:55:52 compute-0 podman[286431]: 2025-10-11 01:55:52.670506241 +0000 UTC m=+0.518343737 container init d20a13c00426c99f9d102e6dde37e74ab7e69d549a87b4b5770cc8ad072bfe3b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_yonath, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:55:52 compute-0 podman[286431]: 2025-10-11 01:55:52.689724127 +0000 UTC m=+0.537561533 container start d20a13c00426c99f9d102e6dde37e74ab7e69d549a87b4b5770cc8ad072bfe3b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_yonath, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:55:52 compute-0 intelligent_yonath[286483]: 167 167
Oct 11 01:55:52 compute-0 systemd[1]: libpod-d20a13c00426c99f9d102e6dde37e74ab7e69d549a87b4b5770cc8ad072bfe3b.scope: Deactivated successfully.
Oct 11 01:55:52 compute-0 podman[286431]: 2025-10-11 01:55:52.698804501 +0000 UTC m=+0.546641947 container attach d20a13c00426c99f9d102e6dde37e74ab7e69d549a87b4b5770cc8ad072bfe3b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_yonath, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:55:52 compute-0 podman[286431]: 2025-10-11 01:55:52.701182572 +0000 UTC m=+0.549020028 container died d20a13c00426c99f9d102e6dde37e74ab7e69d549a87b4b5770cc8ad072bfe3b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_yonath, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0)
Oct 11 01:55:52 compute-0 systemd[1]: var-lib-containers-storage-overlay-d20de208737f68e0b273c34b91496e4bda76f58ba1a5dff437659d224cd56e75-merged.mount: Deactivated successfully.
Oct 11 01:55:52 compute-0 podman[286431]: 2025-10-11 01:55:52.775196181 +0000 UTC m=+0.623033597 container remove d20a13c00426c99f9d102e6dde37e74ab7e69d549a87b4b5770cc8ad072bfe3b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_yonath, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef)
Oct 11 01:55:52 compute-0 systemd[1]: libpod-conmon-d20a13c00426c99f9d102e6dde37e74ab7e69d549a87b4b5770cc8ad072bfe3b.scope: Deactivated successfully.
Oct 11 01:55:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v531: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:53 compute-0 podman[286530]: 2025-10-11 01:55:53.05709971 +0000 UTC m=+0.069099933 container create 9a8a8c220fa3231367b3318cc3078730be72129490ab69305486cc2ce72d4eaa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_wiles, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:55:53 compute-0 sshd-session[276122]: Connection closed by 192.168.122.30 port 57158
Oct 11 01:55:53 compute-0 sshd-session[276119]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:55:53 compute-0 systemd[1]: session-54.scope: Deactivated successfully.
Oct 11 01:55:53 compute-0 systemd[1]: session-54.scope: Consumed 1min 36.348s CPU time.
Oct 11 01:55:53 compute-0 systemd-logind[804]: Session 54 logged out. Waiting for processes to exit.
Oct 11 01:55:53 compute-0 systemd-logind[804]: Removed session 54.
Oct 11 01:55:53 compute-0 systemd[1]: Started libpod-conmon-9a8a8c220fa3231367b3318cc3078730be72129490ab69305486cc2ce72d4eaa.scope.
Oct 11 01:55:53 compute-0 podman[286530]: 2025-10-11 01:55:53.027200959 +0000 UTC m=+0.039201182 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:55:53 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:55:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4d098a109f61e3a90c30dd22d3f085abec4f591e9623a1ac3c531ca735b79fa6/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:55:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4d098a109f61e3a90c30dd22d3f085abec4f591e9623a1ac3c531ca735b79fa6/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:55:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4d098a109f61e3a90c30dd22d3f085abec4f591e9623a1ac3c531ca735b79fa6/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:55:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4d098a109f61e3a90c30dd22d3f085abec4f591e9623a1ac3c531ca735b79fa6/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:55:53 compute-0 podman[286530]: 2025-10-11 01:55:53.238742184 +0000 UTC m=+0.250742457 container init 9a8a8c220fa3231367b3318cc3078730be72129490ab69305486cc2ce72d4eaa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_wiles, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:55:53 compute-0 podman[286530]: 2025-10-11 01:55:53.260658019 +0000 UTC m=+0.272658232 container start 9a8a8c220fa3231367b3318cc3078730be72129490ab69305486cc2ce72d4eaa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_wiles, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:55:53 compute-0 podman[286530]: 2025-10-11 01:55:53.269929548 +0000 UTC m=+0.281929821 container attach 9a8a8c220fa3231367b3318cc3078730be72129490ab69305486cc2ce72d4eaa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_wiles, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, OSD_FLAVOR=default)
Oct 11 01:55:53 compute-0 ceph-mon[191930]: pgmap v531: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:54 compute-0 brave_wiles[286547]: {
Oct 11 01:55:54 compute-0 brave_wiles[286547]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 01:55:54 compute-0 brave_wiles[286547]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:55:54 compute-0 brave_wiles[286547]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 01:55:54 compute-0 brave_wiles[286547]:         "osd_id": 1,
Oct 11 01:55:54 compute-0 brave_wiles[286547]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:55:54 compute-0 brave_wiles[286547]:         "type": "bluestore"
Oct 11 01:55:54 compute-0 brave_wiles[286547]:     },
Oct 11 01:55:54 compute-0 brave_wiles[286547]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 01:55:54 compute-0 brave_wiles[286547]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:55:54 compute-0 brave_wiles[286547]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 01:55:54 compute-0 brave_wiles[286547]:         "osd_id": 2,
Oct 11 01:55:54 compute-0 brave_wiles[286547]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:55:54 compute-0 brave_wiles[286547]:         "type": "bluestore"
Oct 11 01:55:54 compute-0 brave_wiles[286547]:     },
Oct 11 01:55:54 compute-0 brave_wiles[286547]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 01:55:54 compute-0 brave_wiles[286547]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:55:54 compute-0 brave_wiles[286547]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 01:55:54 compute-0 brave_wiles[286547]:         "osd_id": 0,
Oct 11 01:55:54 compute-0 brave_wiles[286547]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:55:54 compute-0 brave_wiles[286547]:         "type": "bluestore"
Oct 11 01:55:54 compute-0 brave_wiles[286547]:     }
Oct 11 01:55:54 compute-0 brave_wiles[286547]: }
Oct 11 01:55:54 compute-0 systemd[1]: libpod-9a8a8c220fa3231367b3318cc3078730be72129490ab69305486cc2ce72d4eaa.scope: Deactivated successfully.
Oct 11 01:55:54 compute-0 systemd[1]: libpod-9a8a8c220fa3231367b3318cc3078730be72129490ab69305486cc2ce72d4eaa.scope: Consumed 1.236s CPU time.
Oct 11 01:55:54 compute-0 podman[286530]: 2025-10-11 01:55:54.497949213 +0000 UTC m=+1.509949406 container died 9a8a8c220fa3231367b3318cc3078730be72129490ab69305486cc2ce72d4eaa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_wiles, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 01:55:54 compute-0 systemd[1]: var-lib-containers-storage-overlay-4d098a109f61e3a90c30dd22d3f085abec4f591e9623a1ac3c531ca735b79fa6-merged.mount: Deactivated successfully.
Oct 11 01:55:54 compute-0 podman[286530]: 2025-10-11 01:55:54.607548939 +0000 UTC m=+1.619549142 container remove 9a8a8c220fa3231367b3318cc3078730be72129490ab69305486cc2ce72d4eaa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_wiles, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default)
Oct 11 01:55:54 compute-0 systemd[1]: libpod-conmon-9a8a8c220fa3231367b3318cc3078730be72129490ab69305486cc2ce72d4eaa.scope: Deactivated successfully.
Oct 11 01:55:54 compute-0 sudo[286265]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:55:54 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:55:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:55:54 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:55:54 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 775a403f-a6d3-4432-9db1-59a371f06f2c does not exist
Oct 11 01:55:54 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 887ad084-27e3-4205-9bca-374ec4e26592 does not exist
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.770 286362 INFO neutron.common.config [-] Logging enabled!
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.770 286362 INFO neutron.common.config [-] /usr/bin/neutron-ovn-metadata-agent version 22.2.2.dev43
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.770 286362 DEBUG neutron.common.config [-] command line: /usr/bin/neutron-ovn-metadata-agent setup_logging /usr/lib/python3.9/site-packages/neutron/common/config.py:123
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.771 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2589
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.771 286362 DEBUG neutron.agent.ovn.metadata_agent [-] Configuration options gathered from: log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2590
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.771 286362 DEBUG neutron.agent.ovn.metadata_agent [-] command line args: [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2591
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.771 286362 DEBUG neutron.agent.ovn.metadata_agent [-] config files: ['/etc/neutron/neutron.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2592
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.771 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ================================================================================ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2594
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.771 286362 DEBUG neutron.agent.ovn.metadata_agent [-] agent_down_time                = 75 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.771 286362 DEBUG neutron.agent.ovn.metadata_agent [-] allow_bulk                     = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.772 286362 DEBUG neutron.agent.ovn.metadata_agent [-] api_extensions_path            =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.772 286362 DEBUG neutron.agent.ovn.metadata_agent [-] api_paste_config               = api-paste.ini log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.772 286362 DEBUG neutron.agent.ovn.metadata_agent [-] api_workers                    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.772 286362 DEBUG neutron.agent.ovn.metadata_agent [-] auth_ca_cert                   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.772 286362 DEBUG neutron.agent.ovn.metadata_agent [-] auth_strategy                  = keystone log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.772 286362 DEBUG neutron.agent.ovn.metadata_agent [-] backlog                        = 4096 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.772 286362 DEBUG neutron.agent.ovn.metadata_agent [-] base_mac                       = fa:16:3e:00:00:00 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.772 286362 DEBUG neutron.agent.ovn.metadata_agent [-] bind_host                      = 0.0.0.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.772 286362 DEBUG neutron.agent.ovn.metadata_agent [-] bind_port                      = 9696 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.772 286362 DEBUG neutron.agent.ovn.metadata_agent [-] client_socket_timeout          = 900 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.773 286362 DEBUG neutron.agent.ovn.metadata_agent [-] config_dir                     = ['/etc/neutron.conf.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.773 286362 DEBUG neutron.agent.ovn.metadata_agent [-] config_file                    = ['/etc/neutron/neutron.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.773 286362 DEBUG neutron.agent.ovn.metadata_agent [-] config_source                  = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.773 286362 DEBUG neutron.agent.ovn.metadata_agent [-] control_exchange               = neutron log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.773 286362 DEBUG neutron.agent.ovn.metadata_agent [-] core_plugin                    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.773 286362 DEBUG neutron.agent.ovn.metadata_agent [-] debug                          = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.773 286362 DEBUG neutron.agent.ovn.metadata_agent [-] default_availability_zones     = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.773 286362 DEBUG neutron.agent.ovn.metadata_agent [-] default_log_levels             = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'oslo_messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'requests.packages.urllib3.util.retry=WARN', 'urllib3.util.retry=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'taskflow=WARN', 'keystoneauth=WARN', 'oslo.cache=INFO', 'oslo_policy=INFO', 'dogpile.core.dogpile=INFO', 'OFPHandler=INFO', 'OfctlService=INFO', 'os_ken.base.app_manager=INFO', 'os_ken.controller.controller=INFO'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.773 286362 DEBUG neutron.agent.ovn.metadata_agent [-] dhcp_agent_notification        = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.774 286362 DEBUG neutron.agent.ovn.metadata_agent [-] dhcp_lease_duration            = 86400 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.774 286362 DEBUG neutron.agent.ovn.metadata_agent [-] dhcp_load_type                 = networks log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.774 286362 DEBUG neutron.agent.ovn.metadata_agent [-] dns_domain                     = openstacklocal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.774 286362 DEBUG neutron.agent.ovn.metadata_agent [-] enable_new_agents              = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.774 286362 DEBUG neutron.agent.ovn.metadata_agent [-] enable_traditional_dhcp        = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.774 286362 DEBUG neutron.agent.ovn.metadata_agent [-] external_dns_driver            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.774 286362 DEBUG neutron.agent.ovn.metadata_agent [-] external_pids                  = /var/lib/neutron/external/pids log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.774 286362 DEBUG neutron.agent.ovn.metadata_agent [-] filter_validation              = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.775 286362 DEBUG neutron.agent.ovn.metadata_agent [-] global_physnet_mtu             = 1500 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.775 286362 DEBUG neutron.agent.ovn.metadata_agent [-] host                           = compute-0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.775 286362 DEBUG neutron.agent.ovn.metadata_agent [-] http_retries                   = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.775 286362 DEBUG neutron.agent.ovn.metadata_agent [-] instance_format                = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.775 286362 DEBUG neutron.agent.ovn.metadata_agent [-] instance_uuid_format           = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.775 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ipam_driver                    = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.775 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ipv6_pd_enabled                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.775 286362 DEBUG neutron.agent.ovn.metadata_agent [-] log_config_append              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.776 286362 DEBUG neutron.agent.ovn.metadata_agent [-] log_date_format                = %Y-%m-%d %H:%M:%S log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.776 286362 DEBUG neutron.agent.ovn.metadata_agent [-] log_dir                        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.776 286362 DEBUG neutron.agent.ovn.metadata_agent [-] log_file                       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.776 286362 DEBUG neutron.agent.ovn.metadata_agent [-] log_rotate_interval            = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.776 286362 DEBUG neutron.agent.ovn.metadata_agent [-] log_rotate_interval_type       = days log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.776 286362 DEBUG neutron.agent.ovn.metadata_agent [-] log_rotation_type              = none log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.776 286362 DEBUG neutron.agent.ovn.metadata_agent [-] logging_context_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.776 286362 DEBUG neutron.agent.ovn.metadata_agent [-] logging_debug_format_suffix    = %(funcName)s %(pathname)s:%(lineno)d log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.776 286362 DEBUG neutron.agent.ovn.metadata_agent [-] logging_default_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.776 286362 DEBUG neutron.agent.ovn.metadata_agent [-] logging_exception_prefix       = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.777 286362 DEBUG neutron.agent.ovn.metadata_agent [-] logging_user_identity_format   = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.777 286362 DEBUG neutron.agent.ovn.metadata_agent [-] max_dns_nameservers            = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.777 286362 DEBUG neutron.agent.ovn.metadata_agent [-] max_header_line                = 16384 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.777 286362 DEBUG neutron.agent.ovn.metadata_agent [-] max_logfile_count              = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.777 286362 DEBUG neutron.agent.ovn.metadata_agent [-] max_logfile_size_mb            = 200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.777 286362 DEBUG neutron.agent.ovn.metadata_agent [-] max_subnet_host_routes         = 20 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.777 286362 DEBUG neutron.agent.ovn.metadata_agent [-] metadata_backlog               = 4096 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.777 286362 DEBUG neutron.agent.ovn.metadata_agent [-] metadata_proxy_group           =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.778 286362 DEBUG neutron.agent.ovn.metadata_agent [-] metadata_proxy_shared_secret   = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.778 286362 DEBUG neutron.agent.ovn.metadata_agent [-] metadata_proxy_socket          = /var/lib/neutron/metadata_proxy log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.778 286362 DEBUG neutron.agent.ovn.metadata_agent [-] metadata_proxy_socket_mode     = deduce log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.778 286362 DEBUG neutron.agent.ovn.metadata_agent [-] metadata_proxy_user            =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.778 286362 DEBUG neutron.agent.ovn.metadata_agent [-] metadata_workers               = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.778 286362 DEBUG neutron.agent.ovn.metadata_agent [-] network_link_prefix            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.778 286362 DEBUG neutron.agent.ovn.metadata_agent [-] notify_nova_on_port_data_changes = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.778 286362 DEBUG neutron.agent.ovn.metadata_agent [-] notify_nova_on_port_status_changes = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.779 286362 DEBUG neutron.agent.ovn.metadata_agent [-] nova_client_cert               =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.779 286362 DEBUG neutron.agent.ovn.metadata_agent [-] nova_client_priv_key           =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.779 286362 DEBUG neutron.agent.ovn.metadata_agent [-] nova_metadata_host             = nova-metadata-internal.openstack.svc log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.779 286362 DEBUG neutron.agent.ovn.metadata_agent [-] nova_metadata_insecure         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.779 286362 DEBUG neutron.agent.ovn.metadata_agent [-] nova_metadata_port             = 8775 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.779 286362 DEBUG neutron.agent.ovn.metadata_agent [-] nova_metadata_protocol         = https log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.779 286362 DEBUG neutron.agent.ovn.metadata_agent [-] pagination_max_limit           = -1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.779 286362 DEBUG neutron.agent.ovn.metadata_agent [-] periodic_fuzzy_delay           = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.779 286362 DEBUG neutron.agent.ovn.metadata_agent [-] periodic_interval              = 40 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.780 286362 DEBUG neutron.agent.ovn.metadata_agent [-] publish_errors                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.780 286362 DEBUG neutron.agent.ovn.metadata_agent [-] rate_limit_burst               = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.780 286362 DEBUG neutron.agent.ovn.metadata_agent [-] rate_limit_except_level        = CRITICAL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.780 286362 DEBUG neutron.agent.ovn.metadata_agent [-] rate_limit_interval            = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.780 286362 DEBUG neutron.agent.ovn.metadata_agent [-] retry_until_window             = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.780 286362 DEBUG neutron.agent.ovn.metadata_agent [-] rpc_resources_processing_step  = 20 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.780 286362 DEBUG neutron.agent.ovn.metadata_agent [-] rpc_response_max_timeout       = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.780 286362 DEBUG neutron.agent.ovn.metadata_agent [-] rpc_state_report_workers       = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.780 286362 DEBUG neutron.agent.ovn.metadata_agent [-] rpc_workers                    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.780 286362 DEBUG neutron.agent.ovn.metadata_agent [-] send_events_interval           = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.781 286362 DEBUG neutron.agent.ovn.metadata_agent [-] service_plugins                = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.781 286362 DEBUG neutron.agent.ovn.metadata_agent [-] setproctitle                   = on log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.781 286362 DEBUG neutron.agent.ovn.metadata_agent [-] state_path                     = /var/lib/neutron log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.781 286362 DEBUG neutron.agent.ovn.metadata_agent [-] syslog_log_facility            = syslog log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.781 286362 DEBUG neutron.agent.ovn.metadata_agent [-] tcp_keepidle                   = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.781 286362 DEBUG neutron.agent.ovn.metadata_agent [-] transport_url                  = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.781 286362 DEBUG neutron.agent.ovn.metadata_agent [-] use_eventlog                   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.781 286362 DEBUG neutron.agent.ovn.metadata_agent [-] use_journal                    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.782 286362 DEBUG neutron.agent.ovn.metadata_agent [-] use_json                       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.782 286362 DEBUG neutron.agent.ovn.metadata_agent [-] use_ssl                        = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.782 286362 DEBUG neutron.agent.ovn.metadata_agent [-] use_stderr                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.782 286362 DEBUG neutron.agent.ovn.metadata_agent [-] use_syslog                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.782 286362 DEBUG neutron.agent.ovn.metadata_agent [-] vlan_transparent               = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.782 286362 DEBUG neutron.agent.ovn.metadata_agent [-] watch_log_file                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.782 286362 DEBUG neutron.agent.ovn.metadata_agent [-] wsgi_default_pool_size         = 100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.782 286362 DEBUG neutron.agent.ovn.metadata_agent [-] wsgi_keep_alive                = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.782 286362 DEBUG neutron.agent.ovn.metadata_agent [-] wsgi_log_format                = %(client_ip)s "%(request_line)s" status: %(status_code)s  len: %(body_length)s time: %(wall_seconds).7f log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.783 286362 DEBUG neutron.agent.ovn.metadata_agent [-] wsgi_server_debug              = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.783 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_concurrency.disable_process_locking = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.783 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_concurrency.lock_path     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.783 286362 DEBUG neutron.agent.ovn.metadata_agent [-] profiler.connection_string     = messaging:// log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.783 286362 DEBUG neutron.agent.ovn.metadata_agent [-] profiler.enabled               = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.783 286362 DEBUG neutron.agent.ovn.metadata_agent [-] profiler.es_doc_type           = notification log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.783 286362 DEBUG neutron.agent.ovn.metadata_agent [-] profiler.es_scroll_size        = 10000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.783 286362 DEBUG neutron.agent.ovn.metadata_agent [-] profiler.es_scroll_time        = 2m log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.784 286362 DEBUG neutron.agent.ovn.metadata_agent [-] profiler.filter_error_trace    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.784 286362 DEBUG neutron.agent.ovn.metadata_agent [-] profiler.hmac_keys             = SECRET_KEY log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.784 286362 DEBUG neutron.agent.ovn.metadata_agent [-] profiler.sentinel_service_name = mymaster log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.784 286362 DEBUG neutron.agent.ovn.metadata_agent [-] profiler.socket_timeout        = 0.1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.784 286362 DEBUG neutron.agent.ovn.metadata_agent [-] profiler.trace_sqlalchemy      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.784 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_policy.enforce_new_defaults = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.784 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_policy.enforce_scope      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.784 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_policy.policy_default_rule = default log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.784 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_policy.policy_dirs        = ['policy.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.785 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_policy.policy_file        = policy.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.785 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_policy.remote_content_type = application/x-www-form-urlencoded log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.785 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_policy.remote_ssl_ca_crt_file = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.785 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_policy.remote_ssl_client_crt_file = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.785 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_policy.remote_ssl_client_key_file = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.785 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_policy.remote_ssl_verify_server_crt = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.785 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_metrics.metrics_buffer_size = 1000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.785 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_metrics.metrics_enabled = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.785 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_metrics.metrics_process_name =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.785 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_metrics.metrics_socket_file = /var/tmp/metrics_collector.sock log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.786 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_metrics.metrics_thread_stop_timeout = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.786 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_middleware.http_basic_auth_user_file = /etc/htpasswd log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.786 286362 DEBUG neutron.agent.ovn.metadata_agent [-] service_providers.service_provider = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.786 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep.capabilities           = [21, 12, 1, 2, 19] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.786 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep.group                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.786 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep.helper_command         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.786 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep.logger_name            = oslo_privsep.daemon log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.786 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep.thread_pool_size       = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.786 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep.user                   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.787 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_dhcp_release.capabilities = [21, 12] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.787 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_dhcp_release.group     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.787 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_dhcp_release.helper_command = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.787 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_dhcp_release.logger_name = oslo_privsep.daemon log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.787 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_dhcp_release.thread_pool_size = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.787 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_dhcp_release.user      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.787 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_ovs_vsctl.capabilities = [21, 12] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.787 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_ovs_vsctl.group        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.787 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_ovs_vsctl.helper_command = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.787 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_ovs_vsctl.logger_name  = oslo_privsep.daemon log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.788 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_ovs_vsctl.thread_pool_size = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.788 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_ovs_vsctl.user         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.788 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_namespace.capabilities = [21] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.788 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_namespace.group        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.788 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_namespace.helper_command = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.788 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_namespace.logger_name  = oslo_privsep.daemon log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.788 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_namespace.thread_pool_size = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.788 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_namespace.user         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.788 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_conntrack.capabilities = [12] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.788 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_conntrack.group        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.789 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_conntrack.helper_command = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.789 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_conntrack.logger_name  = oslo_privsep.daemon log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.789 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_conntrack.thread_pool_size = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.789 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_conntrack.user         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.789 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_link.capabilities      = [12, 21] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.789 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_link.group             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.789 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_link.helper_command    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.790 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_link.logger_name       = oslo_privsep.daemon log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.790 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_link.thread_pool_size  = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.790 286362 DEBUG neutron.agent.ovn.metadata_agent [-] privsep_link.user              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.790 286362 DEBUG neutron.agent.ovn.metadata_agent [-] AGENT.check_child_processes_action = respawn log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.790 286362 DEBUG neutron.agent.ovn.metadata_agent [-] AGENT.check_child_processes_interval = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.790 286362 DEBUG neutron.agent.ovn.metadata_agent [-] AGENT.comment_iptables_rules   = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.791 286362 DEBUG neutron.agent.ovn.metadata_agent [-] AGENT.debug_iptables_rules     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.791 286362 DEBUG neutron.agent.ovn.metadata_agent [-] AGENT.kill_scripts_path        = /etc/neutron/kill_scripts/ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.791 286362 DEBUG neutron.agent.ovn.metadata_agent [-] AGENT.root_helper              = sudo neutron-rootwrap /etc/neutron/rootwrap.conf log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.791 286362 DEBUG neutron.agent.ovn.metadata_agent [-] AGENT.root_helper_daemon       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.791 286362 DEBUG neutron.agent.ovn.metadata_agent [-] AGENT.use_helper_for_ns_read   = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.791 286362 DEBUG neutron.agent.ovn.metadata_agent [-] AGENT.use_random_fully         = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.791 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_versionedobjects.fatal_exception_format_errors = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.791 286362 DEBUG neutron.agent.ovn.metadata_agent [-] QUOTAS.default_quota           = -1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.792 286362 DEBUG neutron.agent.ovn.metadata_agent [-] QUOTAS.quota_driver            = neutron.db.quota.driver_nolock.DbQuotaNoLockDriver log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.792 286362 DEBUG neutron.agent.ovn.metadata_agent [-] QUOTAS.quota_network           = 100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.792 286362 DEBUG neutron.agent.ovn.metadata_agent [-] QUOTAS.quota_port              = 500 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.792 286362 DEBUG neutron.agent.ovn.metadata_agent [-] QUOTAS.quota_security_group    = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.792 286362 DEBUG neutron.agent.ovn.metadata_agent [-] QUOTAS.quota_security_group_rule = 100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.793 286362 DEBUG neutron.agent.ovn.metadata_agent [-] QUOTAS.quota_subnet            = 100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.793 286362 DEBUG neutron.agent.ovn.metadata_agent [-] QUOTAS.track_quota_usage       = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.793 286362 DEBUG neutron.agent.ovn.metadata_agent [-] nova.auth_section              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.793 286362 DEBUG neutron.agent.ovn.metadata_agent [-] nova.auth_type                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.793 286362 DEBUG neutron.agent.ovn.metadata_agent [-] nova.cafile                    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.793 286362 DEBUG neutron.agent.ovn.metadata_agent [-] nova.certfile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.793 286362 DEBUG neutron.agent.ovn.metadata_agent [-] nova.collect_timing            = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.794 286362 DEBUG neutron.agent.ovn.metadata_agent [-] nova.endpoint_type             = public log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.794 286362 DEBUG neutron.agent.ovn.metadata_agent [-] nova.insecure                  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.794 286362 DEBUG neutron.agent.ovn.metadata_agent [-] nova.keyfile                   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.794 286362 DEBUG neutron.agent.ovn.metadata_agent [-] nova.region_name               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.794 286362 DEBUG neutron.agent.ovn.metadata_agent [-] nova.split_loggers             = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.794 286362 DEBUG neutron.agent.ovn.metadata_agent [-] nova.timeout                   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.794 286362 DEBUG neutron.agent.ovn.metadata_agent [-] placement.auth_section         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.794 286362 DEBUG neutron.agent.ovn.metadata_agent [-] placement.auth_type            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.794 286362 DEBUG neutron.agent.ovn.metadata_agent [-] placement.cafile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.795 286362 DEBUG neutron.agent.ovn.metadata_agent [-] placement.certfile             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.795 286362 DEBUG neutron.agent.ovn.metadata_agent [-] placement.collect_timing       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.795 286362 DEBUG neutron.agent.ovn.metadata_agent [-] placement.endpoint_type        = public log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.795 286362 DEBUG neutron.agent.ovn.metadata_agent [-] placement.insecure             = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.795 286362 DEBUG neutron.agent.ovn.metadata_agent [-] placement.keyfile              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.795 286362 DEBUG neutron.agent.ovn.metadata_agent [-] placement.region_name          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.795 286362 DEBUG neutron.agent.ovn.metadata_agent [-] placement.split_loggers        = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.795 286362 DEBUG neutron.agent.ovn.metadata_agent [-] placement.timeout              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.795 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.auth_section            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.796 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.auth_type               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.796 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.cafile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.796 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.certfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.796 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.collect_timing          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.796 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.connect_retries         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.796 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.connect_retry_delay     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.796 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.enable_notifications    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.796 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.endpoint_override       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.796 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.insecure                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.797 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.interface               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.797 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.keyfile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.797 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.max_version             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.797 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.min_version             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.797 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.region_name             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.797 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.service_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.797 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.service_type            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.797 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.split_loggers           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.798 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.status_code_retries     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.798 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.status_code_retry_delay = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.798 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.timeout                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.798 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.valid_interfaces        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.798 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ironic.version                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.798 286362 DEBUG neutron.agent.ovn.metadata_agent [-] cli_script.dry_run             = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.798 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.allow_stateless_action_supported = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.798 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.dhcp_default_lease_time    = 43200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.799 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.disable_ovn_dhcp_for_baremetal_ports = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.799 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.dns_servers                = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.799 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.enable_distributed_floating_ip = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.799 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.neutron_sync_mode          = log log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.799 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.ovn_dhcp4_global_options   = {} log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.799 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.ovn_dhcp6_global_options   = {} log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.799 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.ovn_emit_need_to_frag      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.799 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.ovn_l3_mode                = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.800 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.ovn_l3_scheduler           = leastloaded log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.800 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.ovn_metadata_enabled       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.800 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.ovn_nb_ca_cert             =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.800 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.ovn_nb_certificate         =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.800 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.ovn_nb_connection          = tcp:127.0.0.1:6641 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.800 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.ovn_nb_private_key         =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.800 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.ovn_sb_ca_cert             = /etc/pki/tls/certs/ovndbca.crt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.800 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.ovn_sb_certificate         = /etc/pki/tls/certs/ovndb.crt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.801 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.ovn_sb_connection          = ssl:ovsdbserver-sb.openstack.svc:6642 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.801 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.ovn_sb_private_key         = /etc/pki/tls/private/ovndb.key log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.801 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.ovsdb_connection_timeout   = 180 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.801 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.ovsdb_log_level            = INFO log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.801 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.ovsdb_probe_interval       = 60000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.801 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.ovsdb_retry_max_interval   = 180 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.801 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.vhost_sock_dir             = /var/run/openvswitch log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.801 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovn.vif_type                   = ovs log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.802 286362 DEBUG neutron.agent.ovn.metadata_agent [-] OVS.bridge_mac_table_size      = 50000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.802 286362 DEBUG neutron.agent.ovn.metadata_agent [-] OVS.igmp_snooping_enable       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.802 286362 DEBUG neutron.agent.ovn.metadata_agent [-] OVS.ovsdb_timeout              = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.802 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovs.ovsdb_connection           = tcp:127.0.0.1:6640 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.802 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ovs.ovsdb_connection_timeout   = 180 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.802 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.amqp_auto_delete = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.802 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.amqp_durable_queues = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.802 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.conn_pool_min_size = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.802 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.conn_pool_ttl = 1200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.803 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.direct_mandatory_flag = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.803 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.enable_cancel_on_failover = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.803 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.heartbeat_in_pthread = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.803 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.heartbeat_rate = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.803 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.heartbeat_timeout_threshold = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.803 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.kombu_compression = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.803 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.kombu_failover_strategy = round-robin log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.803 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.kombu_missing_consumer_retry_timeout = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.803 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.kombu_reconnect_delay = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.804 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.rabbit_ha_queues = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.804 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.rabbit_interval_max = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.804 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.rabbit_login_method = AMQPLAIN log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.804 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.rabbit_qos_prefetch_count = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.804 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.rabbit_quorum_delivery_limit = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.804 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.rabbit_quorum_max_memory_bytes = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.804 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.rabbit_quorum_max_memory_length = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.804 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.rabbit_quorum_queue = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.804 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.rabbit_retry_backoff = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.805 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.rabbit_retry_interval = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.805 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.rabbit_transient_queues_ttl = 1800 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.805 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.rpc_conn_pool_size = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.805 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.ssl      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.805 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.ssl_ca_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.805 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.ssl_cert_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.805 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.ssl_enforce_fips_mode = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.805 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.ssl_key_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.805 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_rabbit.ssl_version =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.806 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_notifications.driver = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.806 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_notifications.retry = -1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.806 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_notifications.topics = ['notifications'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.806 286362 DEBUG neutron.agent.ovn.metadata_agent [-] oslo_messaging_notifications.transport_url = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.806 286362 DEBUG neutron.agent.ovn.metadata_agent [-] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2613
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.817 286362 DEBUG ovsdbapp.backend.ovs_idl [-] Created schema index Bridge.name autocreate_indices /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/__init__.py:106
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.817 286362 DEBUG ovsdbapp.backend.ovs_idl [-] Created schema index Port.name autocreate_indices /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/__init__.py:106
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.817 286362 DEBUG ovsdbapp.backend.ovs_idl [-] Created schema index Interface.name autocreate_indices /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/__init__.py:106
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.817 286362 INFO ovsdbapp.backend.ovs_idl.vlog [-] tcp:127.0.0.1:6640: connecting...
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.818 286362 INFO ovsdbapp.backend.ovs_idl.vlog [-] tcp:127.0.0.1:6640: connected
Oct 11 01:55:54 compute-0 sudo[286592]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:55:54 compute-0 sudo[286592]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.831 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Loaded chassis name 47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6 (UUID: 47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6) and ovn bridge br-int. _load_config /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:309
Oct 11 01:55:54 compute-0 sudo[286592]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.861 286362 INFO neutron.agent.ovn.metadata.ovsdb [-] Getting OvsdbSbOvnIdl for MetadataAgent with retry
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.862 286362 DEBUG ovsdbapp.backend.ovs_idl [-] Created lookup_table index Chassis.name autocreate_indices /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/__init__.py:87
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.862 286362 DEBUG ovsdbapp.backend.ovs_idl [-] Created schema index Datapath_Binding.tunnel_key autocreate_indices /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/__init__.py:106
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.862 286362 DEBUG ovsdbapp.backend.ovs_idl [-] Created schema index Chassis_Private.name autocreate_indices /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/__init__.py:106
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.866 286362 INFO ovsdbapp.backend.ovs_idl.vlog [-] ssl:ovsdbserver-sb.openstack.svc:6642: connecting...
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.872 286362 INFO ovsdbapp.backend.ovs_idl.vlog [-] ssl:ovsdbserver-sb.openstack.svc:6642: connected
Oct 11 01:55:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v532: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.878 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched CREATE: ChassisPrivateCreateEvent(events=('create',), table='Chassis_Private', conditions=(('name', '=', '47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6'),), old_conditions=None), priority=20 to row=Chassis_Private(chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], external_ids={}, name=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, nb_cfg_timestamp=1760146112378, nb_cfg=1) old= matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.880 286362 DEBUG neutron_lib.callbacks.manager [-] Subscribe: <bound method MetadataProxyHandler.post_fork_initialize of <neutron.agent.ovn.metadata.server.MetadataProxyHandler object at 0x7f7f70ffae80>> process after_init 55550000, False subscribe /usr/lib/python3.9/site-packages/neutron_lib/callbacks/manager.py:52
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.881 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "singleton_lock" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.881 286362 DEBUG oslo_concurrency.lockutils [-] Acquired lock "singleton_lock" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.881 286362 DEBUG oslo_concurrency.lockutils [-] Releasing lock "singleton_lock" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.882 286362 INFO oslo_service.service [-] Starting 1 workers
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.888 286362 DEBUG oslo_service.service [-] Started child 286629 _start_child /usr/lib/python3.9/site-packages/oslo_service/service.py:575
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.892 286362 INFO oslo.privsep.daemon [-] Running privsep helper: ['sudo', 'neutron-rootwrap', '/etc/neutron/rootwrap.conf', 'privsep-helper', '--config-file', '/etc/neutron/neutron.conf', '--config-dir', '/etc/neutron.conf.d', '--privsep_context', 'neutron.privileged.namespace_cmd', '--privsep_sock_path', '/tmp/tmpp4qofotq/privsep.sock']
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.897 286629 DEBUG neutron_lib.callbacks.manager [-] Publish callbacks ['neutron.agent.ovn.metadata.server.MetadataProxyHandler.post_fork_initialize-1936382'] for process (None), after_init _notify_loop /usr/lib/python3.9/site-packages/neutron_lib/callbacks/manager.py:184
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.931 286629 INFO neutron.agent.ovn.metadata.ovsdb [-] Getting OvsdbSbOvnIdl for MetadataAgent with retry
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.932 286629 DEBUG ovsdbapp.backend.ovs_idl [-] Created lookup_table index Chassis.name autocreate_indices /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/__init__.py:87
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.932 286629 DEBUG ovsdbapp.backend.ovs_idl [-] Created schema index Datapath_Binding.tunnel_key autocreate_indices /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/__init__.py:106
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.936 286629 INFO ovsdbapp.backend.ovs_idl.vlog [-] ssl:ovsdbserver-sb.openstack.svc:6642: connecting...
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.945 286629 INFO ovsdbapp.backend.ovs_idl.vlog [-] ssl:ovsdbserver-sb.openstack.svc:6642: connected
Oct 11 01:55:54 compute-0 sudo[286617]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:55:54 compute-0 sudo[286617]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:54.957 286629 INFO eventlet.wsgi.server [-] (286629) wsgi starting up on http:/var/lib/neutron/metadata_proxy
Oct 11 01:55:54 compute-0 sudo[286617]: pam_unix(sudo:session): session closed for user root
Oct 11 01:55:55 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:55.656 286362 INFO oslo.privsep.daemon [-] Spawned new privsep daemon via rootwrap
Oct 11 01:55:55 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:55.658 286362 DEBUG oslo.privsep.daemon [-] Accepted privsep connection to /tmp/tmpp4qofotq/privsep.sock __init__ /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:362
Oct 11 01:55:55 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:55.493 286647 INFO oslo.privsep.daemon [-] privsep daemon starting
Oct 11 01:55:55 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:55.501 286647 INFO oslo.privsep.daemon [-] privsep process running with uid/gid: 0/0
Oct 11 01:55:55 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:55.506 286647 INFO oslo.privsep.daemon [-] privsep process running with capabilities (eff/prm/inh): CAP_SYS_ADMIN/CAP_SYS_ADMIN/none
Oct 11 01:55:55 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:55.506 286647 INFO oslo.privsep.daemon [-] privsep daemon running as pid 286647
Oct 11 01:55:55 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:55.663 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[49f219ec-b209-482f-8ebf-960b834fa9ca]: (2,) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 01:55:55 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:55:55 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:55:55 compute-0 ceph-mon[191930]: pgmap v532: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.293 286647 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "context-manager" by "neutron_lib.db.api._create_context_manager" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.293 286647 DEBUG oslo_concurrency.lockutils [-] Lock "context-manager" acquired by "neutron_lib.db.api._create_context_manager" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.293 286647 DEBUG oslo_concurrency.lockutils [-] Lock "context-manager" "released" by "neutron_lib.db.api._create_context_manager" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:55:56
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['volumes', 'default.rgw.log', 'vms', 'default.rgw.control', 'cephfs.cephfs.meta', '.rgw.root', 'backups', '.mgr', 'default.rgw.meta', 'cephfs.cephfs.data', 'images']
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.871 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[4684a770-9fc9-4f69-934d-6437c08e334c]: (4, []) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 01:55:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v533: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.875 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbAddCommand(_result=None, table=Chassis_Private, record=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, column=external_ids, values=({'neutron:ovn-metadata-id': 'b4bb09ed-ff76-577b-a650-a3afd94910ca'},)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.936 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, col_values=(('external_ids', {'neutron:ovn-bridge': 'br-int'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.953 286362 DEBUG oslo_service.service [-] Full set of CONF: wait /usr/lib/python3.9/site-packages/oslo_service/service.py:649
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.953 286362 DEBUG oslo_service.service [-] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2589
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.954 286362 DEBUG oslo_service.service [-] Configuration options gathered from: log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2590
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.954 286362 DEBUG oslo_service.service [-] command line args: [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2591
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.954 286362 DEBUG oslo_service.service [-] config files: ['/etc/neutron/neutron.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2592
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.954 286362 DEBUG oslo_service.service [-] ================================================================================ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2594
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.955 286362 DEBUG oslo_service.service [-] agent_down_time                = 75 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.955 286362 DEBUG oslo_service.service [-] allow_bulk                     = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.955 286362 DEBUG oslo_service.service [-] api_extensions_path            =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.956 286362 DEBUG oslo_service.service [-] api_paste_config               = api-paste.ini log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.956 286362 DEBUG oslo_service.service [-] api_workers                    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.956 286362 DEBUG oslo_service.service [-] auth_ca_cert                   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.956 286362 DEBUG oslo_service.service [-] auth_strategy                  = keystone log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.957 286362 DEBUG oslo_service.service [-] backlog                        = 4096 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.957 286362 DEBUG oslo_service.service [-] base_mac                       = fa:16:3e:00:00:00 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.957 286362 DEBUG oslo_service.service [-] bind_host                      = 0.0.0.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.958 286362 DEBUG oslo_service.service [-] bind_port                      = 9696 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.958 286362 DEBUG oslo_service.service [-] client_socket_timeout          = 900 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.958 286362 DEBUG oslo_service.service [-] config_dir                     = ['/etc/neutron.conf.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.959 286362 DEBUG oslo_service.service [-] config_file                    = ['/etc/neutron/neutron.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.959 286362 DEBUG oslo_service.service [-] config_source                  = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.959 286362 DEBUG oslo_service.service [-] control_exchange               = neutron log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.959 286362 DEBUG oslo_service.service [-] core_plugin                    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.960 286362 DEBUG oslo_service.service [-] debug                          = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.960 286362 DEBUG oslo_service.service [-] default_availability_zones     = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.960 286362 DEBUG oslo_service.service [-] default_log_levels             = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'oslo_messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'requests.packages.urllib3.util.retry=WARN', 'urllib3.util.retry=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'taskflow=WARN', 'keystoneauth=WARN', 'oslo.cache=INFO', 'oslo_policy=INFO', 'dogpile.core.dogpile=INFO', 'OFPHandler=INFO', 'OfctlService=INFO', 'os_ken.base.app_manager=INFO', 'os_ken.controller.controller=INFO'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.961 286362 DEBUG oslo_service.service [-] dhcp_agent_notification        = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.961 286362 DEBUG oslo_service.service [-] dhcp_lease_duration            = 86400 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.961 286362 DEBUG oslo_service.service [-] dhcp_load_type                 = networks log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.961 286362 DEBUG oslo_service.service [-] dns_domain                     = openstacklocal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.962 286362 DEBUG oslo_service.service [-] enable_new_agents              = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.962 286362 DEBUG oslo_service.service [-] enable_traditional_dhcp        = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.962 286362 DEBUG oslo_service.service [-] external_dns_driver            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.963 286362 DEBUG oslo_service.service [-] external_pids                  = /var/lib/neutron/external/pids log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.963 286362 DEBUG oslo_service.service [-] filter_validation              = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.963 286362 DEBUG oslo_service.service [-] global_physnet_mtu             = 1500 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.963 286362 DEBUG oslo_service.service [-] graceful_shutdown_timeout      = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.964 286362 DEBUG oslo_service.service [-] host                           = compute-0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.964 286362 DEBUG oslo_service.service [-] http_retries                   = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.964 286362 DEBUG oslo_service.service [-] instance_format                = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.965 286362 DEBUG oslo_service.service [-] instance_uuid_format           = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.965 286362 DEBUG oslo_service.service [-] ipam_driver                    = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.965 286362 DEBUG oslo_service.service [-] ipv6_pd_enabled                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.965 286362 DEBUG oslo_service.service [-] log_config_append              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.966 286362 DEBUG oslo_service.service [-] log_date_format                = %Y-%m-%d %H:%M:%S log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.966 286362 DEBUG oslo_service.service [-] log_dir                        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.966 286362 DEBUG oslo_service.service [-] log_file                       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.966 286362 DEBUG oslo_service.service [-] log_options                    = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.967 286362 DEBUG oslo_service.service [-] log_rotate_interval            = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.967 286362 DEBUG oslo_service.service [-] log_rotate_interval_type       = days log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.967 286362 DEBUG oslo_service.service [-] log_rotation_type              = none log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.968 286362 DEBUG oslo_service.service [-] logging_context_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.968 286362 DEBUG oslo_service.service [-] logging_debug_format_suffix    = %(funcName)s %(pathname)s:%(lineno)d log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.968 286362 DEBUG oslo_service.service [-] logging_default_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.968 286362 DEBUG oslo_service.service [-] logging_exception_prefix       = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.969 286362 DEBUG oslo_service.service [-] logging_user_identity_format   = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.969 286362 DEBUG oslo_service.service [-] max_dns_nameservers            = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.969 286362 DEBUG oslo_service.service [-] max_header_line                = 16384 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.969 286362 DEBUG oslo_service.service [-] max_logfile_count              = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.970 286362 DEBUG oslo_service.service [-] max_logfile_size_mb            = 200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.970 286362 DEBUG oslo_service.service [-] max_subnet_host_routes         = 20 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.970 286362 DEBUG oslo_service.service [-] metadata_backlog               = 4096 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.970 286362 DEBUG oslo_service.service [-] metadata_proxy_group           =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.971 286362 DEBUG oslo_service.service [-] metadata_proxy_shared_secret   = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.971 286362 DEBUG oslo_service.service [-] metadata_proxy_socket          = /var/lib/neutron/metadata_proxy log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.972 286362 DEBUG oslo_service.service [-] metadata_proxy_socket_mode     = deduce log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.972 286362 DEBUG oslo_service.service [-] metadata_proxy_user            =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.972 286362 DEBUG oslo_service.service [-] metadata_workers               = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.973 286362 DEBUG oslo_service.service [-] network_link_prefix            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.973 286362 DEBUG oslo_service.service [-] notify_nova_on_port_data_changes = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.973 286362 DEBUG oslo_service.service [-] notify_nova_on_port_status_changes = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.973 286362 DEBUG oslo_service.service [-] nova_client_cert               =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.974 286362 DEBUG oslo_service.service [-] nova_client_priv_key           =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.974 286362 DEBUG oslo_service.service [-] nova_metadata_host             = nova-metadata-internal.openstack.svc log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.975 286362 DEBUG oslo_service.service [-] nova_metadata_insecure         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.975 286362 DEBUG oslo_service.service [-] nova_metadata_port             = 8775 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.975 286362 DEBUG oslo_service.service [-] nova_metadata_protocol         = https log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.976 286362 DEBUG oslo_service.service [-] pagination_max_limit           = -1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.976 286362 DEBUG oslo_service.service [-] periodic_fuzzy_delay           = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.976 286362 DEBUG oslo_service.service [-] periodic_interval              = 40 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.976 286362 DEBUG oslo_service.service [-] publish_errors                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.977 286362 DEBUG oslo_service.service [-] rate_limit_burst               = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.977 286362 DEBUG oslo_service.service [-] rate_limit_except_level        = CRITICAL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.977 286362 DEBUG oslo_service.service [-] rate_limit_interval            = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.977 286362 DEBUG oslo_service.service [-] retry_until_window             = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.978 286362 DEBUG oslo_service.service [-] rpc_resources_processing_step  = 20 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.978 286362 DEBUG oslo_service.service [-] rpc_response_max_timeout       = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.978 286362 DEBUG oslo_service.service [-] rpc_state_report_workers       = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.978 286362 DEBUG oslo_service.service [-] rpc_workers                    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.979 286362 DEBUG oslo_service.service [-] send_events_interval           = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.979 286362 DEBUG oslo_service.service [-] service_plugins                = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.979 286362 DEBUG oslo_service.service [-] setproctitle                   = on log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.979 286362 DEBUG oslo_service.service [-] state_path                     = /var/lib/neutron log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.980 286362 DEBUG oslo_service.service [-] syslog_log_facility            = syslog log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.980 286362 DEBUG oslo_service.service [-] tcp_keepidle                   = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.980 286362 DEBUG oslo_service.service [-] transport_url                  = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.980 286362 DEBUG oslo_service.service [-] use_eventlog                   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.981 286362 DEBUG oslo_service.service [-] use_journal                    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.981 286362 DEBUG oslo_service.service [-] use_json                       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.981 286362 DEBUG oslo_service.service [-] use_ssl                        = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.981 286362 DEBUG oslo_service.service [-] use_stderr                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.982 286362 DEBUG oslo_service.service [-] use_syslog                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.982 286362 DEBUG oslo_service.service [-] vlan_transparent               = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.982 286362 DEBUG oslo_service.service [-] watch_log_file                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.983 286362 DEBUG oslo_service.service [-] wsgi_default_pool_size         = 100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.983 286362 DEBUG oslo_service.service [-] wsgi_keep_alive                = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.984 286362 DEBUG oslo_service.service [-] wsgi_log_format                = %(client_ip)s "%(request_line)s" status: %(status_code)s  len: %(body_length)s time: %(wall_seconds).7f log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.984 286362 DEBUG oslo_service.service [-] wsgi_server_debug              = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.984 286362 DEBUG oslo_service.service [-] oslo_concurrency.disable_process_locking = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.985 286362 DEBUG oslo_service.service [-] oslo_concurrency.lock_path     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.985 286362 DEBUG oslo_service.service [-] profiler.connection_string     = messaging:// log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.985 286362 DEBUG oslo_service.service [-] profiler.enabled               = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.986 286362 DEBUG oslo_service.service [-] profiler.es_doc_type           = notification log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.986 286362 DEBUG oslo_service.service [-] profiler.es_scroll_size        = 10000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.986 286362 DEBUG oslo_service.service [-] profiler.es_scroll_time        = 2m log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.987 286362 DEBUG oslo_service.service [-] profiler.filter_error_trace    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.987 286362 DEBUG oslo_service.service [-] profiler.hmac_keys             = SECRET_KEY log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.987 286362 DEBUG oslo_service.service [-] profiler.sentinel_service_name = mymaster log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.988 286362 DEBUG oslo_service.service [-] profiler.socket_timeout        = 0.1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.988 286362 DEBUG oslo_service.service [-] profiler.trace_sqlalchemy      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.988 286362 DEBUG oslo_service.service [-] oslo_policy.enforce_new_defaults = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.989 286362 DEBUG oslo_service.service [-] oslo_policy.enforce_scope      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.989 286362 DEBUG oslo_service.service [-] oslo_policy.policy_default_rule = default log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.989 286362 DEBUG oslo_service.service [-] oslo_policy.policy_dirs        = ['policy.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.990 286362 DEBUG oslo_service.service [-] oslo_policy.policy_file        = policy.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.990 286362 DEBUG oslo_service.service [-] oslo_policy.remote_content_type = application/x-www-form-urlencoded log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.990 286362 DEBUG oslo_service.service [-] oslo_policy.remote_ssl_ca_crt_file = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.990 286362 DEBUG oslo_service.service [-] oslo_policy.remote_ssl_client_crt_file = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.991 286362 DEBUG oslo_service.service [-] oslo_policy.remote_ssl_client_key_file = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.991 286362 DEBUG oslo_service.service [-] oslo_policy.remote_ssl_verify_server_crt = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.991 286362 DEBUG oslo_service.service [-] oslo_messaging_metrics.metrics_buffer_size = 1000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.992 286362 DEBUG oslo_service.service [-] oslo_messaging_metrics.metrics_enabled = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.992 286362 DEBUG oslo_service.service [-] oslo_messaging_metrics.metrics_process_name =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.993 286362 DEBUG oslo_service.service [-] oslo_messaging_metrics.metrics_socket_file = /var/tmp/metrics_collector.sock log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.993 286362 DEBUG oslo_service.service [-] oslo_messaging_metrics.metrics_thread_stop_timeout = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.993 286362 DEBUG oslo_service.service [-] oslo_middleware.http_basic_auth_user_file = /etc/htpasswd log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.993 286362 DEBUG oslo_service.service [-] service_providers.service_provider = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.994 286362 DEBUG oslo_service.service [-] privsep.capabilities           = [21, 12, 1, 2, 19] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.994 286362 DEBUG oslo_service.service [-] privsep.group                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.994 286362 DEBUG oslo_service.service [-] privsep.helper_command         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.995 286362 DEBUG oslo_service.service [-] privsep.logger_name            = oslo_privsep.daemon log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.995 286362 DEBUG oslo_service.service [-] privsep.thread_pool_size       = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.995 286362 DEBUG oslo_service.service [-] privsep.user                   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.995 286362 DEBUG oslo_service.service [-] privsep_dhcp_release.capabilities = [21, 12] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.996 286362 DEBUG oslo_service.service [-] privsep_dhcp_release.group     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.996 286362 DEBUG oslo_service.service [-] privsep_dhcp_release.helper_command = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.996 286362 DEBUG oslo_service.service [-] privsep_dhcp_release.logger_name = oslo_privsep.daemon log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.997 286362 DEBUG oslo_service.service [-] privsep_dhcp_release.thread_pool_size = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.997 286362 DEBUG oslo_service.service [-] privsep_dhcp_release.user      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.997 286362 DEBUG oslo_service.service [-] privsep_ovs_vsctl.capabilities = [21, 12] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.997 286362 DEBUG oslo_service.service [-] privsep_ovs_vsctl.group        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.998 286362 DEBUG oslo_service.service [-] privsep_ovs_vsctl.helper_command = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.998 286362 DEBUG oslo_service.service [-] privsep_ovs_vsctl.logger_name  = oslo_privsep.daemon log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.998 286362 DEBUG oslo_service.service [-] privsep_ovs_vsctl.thread_pool_size = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.999 286362 DEBUG oslo_service.service [-] privsep_ovs_vsctl.user         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.999 286362 DEBUG oslo_service.service [-] privsep_namespace.capabilities = [21] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.999 286362 DEBUG oslo_service.service [-] privsep_namespace.group        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.999 286362 DEBUG oslo_service.service [-] privsep_namespace.helper_command = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.999 286362 DEBUG oslo_service.service [-] privsep_namespace.logger_name  = oslo_privsep.daemon log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:56.999 286362 DEBUG oslo_service.service [-] privsep_namespace.thread_pool_size = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.000 286362 DEBUG oslo_service.service [-] privsep_namespace.user         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.000 286362 DEBUG oslo_service.service [-] privsep_conntrack.capabilities = [12] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.000 286362 DEBUG oslo_service.service [-] privsep_conntrack.group        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.000 286362 DEBUG oslo_service.service [-] privsep_conntrack.helper_command = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.000 286362 DEBUG oslo_service.service [-] privsep_conntrack.logger_name  = oslo_privsep.daemon log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.000 286362 DEBUG oslo_service.service [-] privsep_conntrack.thread_pool_size = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.001 286362 DEBUG oslo_service.service [-] privsep_conntrack.user         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.001 286362 DEBUG oslo_service.service [-] privsep_link.capabilities      = [12, 21] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.001 286362 DEBUG oslo_service.service [-] privsep_link.group             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.001 286362 DEBUG oslo_service.service [-] privsep_link.helper_command    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.001 286362 DEBUG oslo_service.service [-] privsep_link.logger_name       = oslo_privsep.daemon log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.001 286362 DEBUG oslo_service.service [-] privsep_link.thread_pool_size  = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.001 286362 DEBUG oslo_service.service [-] privsep_link.user              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.002 286362 DEBUG oslo_service.service [-] AGENT.check_child_processes_action = respawn log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.002 286362 DEBUG oslo_service.service [-] AGENT.check_child_processes_interval = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.002 286362 DEBUG oslo_service.service [-] AGENT.comment_iptables_rules   = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.002 286362 DEBUG oslo_service.service [-] AGENT.debug_iptables_rules     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.002 286362 DEBUG oslo_service.service [-] AGENT.kill_scripts_path        = /etc/neutron/kill_scripts/ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.003 286362 DEBUG oslo_service.service [-] AGENT.root_helper              = sudo neutron-rootwrap /etc/neutron/rootwrap.conf log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.003 286362 DEBUG oslo_service.service [-] AGENT.root_helper_daemon       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.003 286362 DEBUG oslo_service.service [-] AGENT.use_helper_for_ns_read   = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.003 286362 DEBUG oslo_service.service [-] AGENT.use_random_fully         = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.003 286362 DEBUG oslo_service.service [-] oslo_versionedobjects.fatal_exception_format_errors = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.003 286362 DEBUG oslo_service.service [-] QUOTAS.default_quota           = -1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.004 286362 DEBUG oslo_service.service [-] QUOTAS.quota_driver            = neutron.db.quota.driver_nolock.DbQuotaNoLockDriver log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.004 286362 DEBUG oslo_service.service [-] QUOTAS.quota_network           = 100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.004 286362 DEBUG oslo_service.service [-] QUOTAS.quota_port              = 500 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.004 286362 DEBUG oslo_service.service [-] QUOTAS.quota_security_group    = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.004 286362 DEBUG oslo_service.service [-] QUOTAS.quota_security_group_rule = 100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.004 286362 DEBUG oslo_service.service [-] QUOTAS.quota_subnet            = 100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.005 286362 DEBUG oslo_service.service [-] QUOTAS.track_quota_usage       = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.005 286362 DEBUG oslo_service.service [-] nova.auth_section              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.005 286362 DEBUG oslo_service.service [-] nova.auth_type                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.005 286362 DEBUG oslo_service.service [-] nova.cafile                    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.005 286362 DEBUG oslo_service.service [-] nova.certfile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.005 286362 DEBUG oslo_service.service [-] nova.collect_timing            = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.006 286362 DEBUG oslo_service.service [-] nova.endpoint_type             = public log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.006 286362 DEBUG oslo_service.service [-] nova.insecure                  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.006 286362 DEBUG oslo_service.service [-] nova.keyfile                   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.006 286362 DEBUG oslo_service.service [-] nova.region_name               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.006 286362 DEBUG oslo_service.service [-] nova.split_loggers             = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.006 286362 DEBUG oslo_service.service [-] nova.timeout                   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.007 286362 DEBUG oslo_service.service [-] placement.auth_section         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.007 286362 DEBUG oslo_service.service [-] placement.auth_type            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.007 286362 DEBUG oslo_service.service [-] placement.cafile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.007 286362 DEBUG oslo_service.service [-] placement.certfile             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.007 286362 DEBUG oslo_service.service [-] placement.collect_timing       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.007 286362 DEBUG oslo_service.service [-] placement.endpoint_type        = public log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.008 286362 DEBUG oslo_service.service [-] placement.insecure             = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.008 286362 DEBUG oslo_service.service [-] placement.keyfile              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.008 286362 DEBUG oslo_service.service [-] placement.region_name          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.008 286362 DEBUG oslo_service.service [-] placement.split_loggers        = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.008 286362 DEBUG oslo_service.service [-] placement.timeout              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.008 286362 DEBUG oslo_service.service [-] ironic.auth_section            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.008 286362 DEBUG oslo_service.service [-] ironic.auth_type               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.009 286362 DEBUG oslo_service.service [-] ironic.cafile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.009 286362 DEBUG oslo_service.service [-] ironic.certfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.009 286362 DEBUG oslo_service.service [-] ironic.collect_timing          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.009 286362 DEBUG oslo_service.service [-] ironic.connect_retries         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.009 286362 DEBUG oslo_service.service [-] ironic.connect_retry_delay     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.009 286362 DEBUG oslo_service.service [-] ironic.enable_notifications    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.010 286362 DEBUG oslo_service.service [-] ironic.endpoint_override       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.010 286362 DEBUG oslo_service.service [-] ironic.insecure                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.010 286362 DEBUG oslo_service.service [-] ironic.interface               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.010 286362 DEBUG oslo_service.service [-] ironic.keyfile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.010 286362 DEBUG oslo_service.service [-] ironic.max_version             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.010 286362 DEBUG oslo_service.service [-] ironic.min_version             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.010 286362 DEBUG oslo_service.service [-] ironic.region_name             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.011 286362 DEBUG oslo_service.service [-] ironic.service_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.011 286362 DEBUG oslo_service.service [-] ironic.service_type            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.011 286362 DEBUG oslo_service.service [-] ironic.split_loggers           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.011 286362 DEBUG oslo_service.service [-] ironic.status_code_retries     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.011 286362 DEBUG oslo_service.service [-] ironic.status_code_retry_delay = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.011 286362 DEBUG oslo_service.service [-] ironic.timeout                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.012 286362 DEBUG oslo_service.service [-] ironic.valid_interfaces        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.012 286362 DEBUG oslo_service.service [-] ironic.version                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.012 286362 DEBUG oslo_service.service [-] cli_script.dry_run             = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.012 286362 DEBUG oslo_service.service [-] ovn.allow_stateless_action_supported = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.012 286362 DEBUG oslo_service.service [-] ovn.dhcp_default_lease_time    = 43200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.012 286362 DEBUG oslo_service.service [-] ovn.disable_ovn_dhcp_for_baremetal_ports = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.012 286362 DEBUG oslo_service.service [-] ovn.dns_servers                = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.013 286362 DEBUG oslo_service.service [-] ovn.enable_distributed_floating_ip = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.013 286362 DEBUG oslo_service.service [-] ovn.neutron_sync_mode          = log log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.013 286362 DEBUG oslo_service.service [-] ovn.ovn_dhcp4_global_options   = {} log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.013 286362 DEBUG oslo_service.service [-] ovn.ovn_dhcp6_global_options   = {} log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.013 286362 DEBUG oslo_service.service [-] ovn.ovn_emit_need_to_frag      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.013 286362 DEBUG oslo_service.service [-] ovn.ovn_l3_mode                = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.014 286362 DEBUG oslo_service.service [-] ovn.ovn_l3_scheduler           = leastloaded log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.014 286362 DEBUG oslo_service.service [-] ovn.ovn_metadata_enabled       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.014 286362 DEBUG oslo_service.service [-] ovn.ovn_nb_ca_cert             =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.014 286362 DEBUG oslo_service.service [-] ovn.ovn_nb_certificate         =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.014 286362 DEBUG oslo_service.service [-] ovn.ovn_nb_connection          = tcp:127.0.0.1:6641 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.014 286362 DEBUG oslo_service.service [-] ovn.ovn_nb_private_key         =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.015 286362 DEBUG oslo_service.service [-] ovn.ovn_sb_ca_cert             = /etc/pki/tls/certs/ovndbca.crt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.015 286362 DEBUG oslo_service.service [-] ovn.ovn_sb_certificate         = /etc/pki/tls/certs/ovndb.crt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.015 286362 DEBUG oslo_service.service [-] ovn.ovn_sb_connection          = ssl:ovsdbserver-sb.openstack.svc:6642 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.015 286362 DEBUG oslo_service.service [-] ovn.ovn_sb_private_key         = /etc/pki/tls/private/ovndb.key log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.015 286362 DEBUG oslo_service.service [-] ovn.ovsdb_connection_timeout   = 180 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.015 286362 DEBUG oslo_service.service [-] ovn.ovsdb_log_level            = INFO log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.016 286362 DEBUG oslo_service.service [-] ovn.ovsdb_probe_interval       = 60000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.016 286362 DEBUG oslo_service.service [-] ovn.ovsdb_retry_max_interval   = 180 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.016 286362 DEBUG oslo_service.service [-] ovn.vhost_sock_dir             = /var/run/openvswitch log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.016 286362 DEBUG oslo_service.service [-] ovn.vif_type                   = ovs log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.016 286362 DEBUG oslo_service.service [-] OVS.bridge_mac_table_size      = 50000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.016 286362 DEBUG oslo_service.service [-] OVS.igmp_snooping_enable       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.016 286362 DEBUG oslo_service.service [-] OVS.ovsdb_timeout              = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.017 286362 DEBUG oslo_service.service [-] ovs.ovsdb_connection           = tcp:127.0.0.1:6640 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.017 286362 DEBUG oslo_service.service [-] ovs.ovsdb_connection_timeout   = 180 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.017 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.amqp_auto_delete = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.017 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.amqp_durable_queues = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.017 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.conn_pool_min_size = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.018 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.conn_pool_ttl = 1200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.018 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.direct_mandatory_flag = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.018 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.enable_cancel_on_failover = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.018 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.heartbeat_in_pthread = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.018 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.heartbeat_rate = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.018 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.heartbeat_timeout_threshold = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.019 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.kombu_compression = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.019 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.kombu_failover_strategy = round-robin log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.019 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.kombu_missing_consumer_retry_timeout = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.019 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.kombu_reconnect_delay = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.019 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.rabbit_ha_queues = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.019 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.rabbit_interval_max = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.020 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.rabbit_login_method = AMQPLAIN log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.020 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.rabbit_qos_prefetch_count = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.020 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.rabbit_quorum_delivery_limit = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.020 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.rabbit_quorum_max_memory_bytes = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.020 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.rabbit_quorum_max_memory_length = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.020 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.rabbit_quorum_queue = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.021 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.rabbit_retry_backoff = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.021 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.rabbit_retry_interval = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.021 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.rabbit_transient_queues_ttl = 1800 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.021 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.rpc_conn_pool_size = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.021 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.ssl      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.021 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.ssl_ca_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.021 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.ssl_cert_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.022 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.ssl_enforce_fips_mode = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.022 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.ssl_key_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.022 286362 DEBUG oslo_service.service [-] oslo_messaging_rabbit.ssl_version =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.022 286362 DEBUG oslo_service.service [-] oslo_messaging_notifications.driver = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.022 286362 DEBUG oslo_service.service [-] oslo_messaging_notifications.retry = -1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.022 286362 DEBUG oslo_service.service [-] oslo_messaging_notifications.topics = ['notifications'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.023 286362 DEBUG oslo_service.service [-] oslo_messaging_notifications.transport_url = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 01:55:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:55:57.023 286362 DEBUG oslo_service.service [-] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2613
Oct 11 01:55:57 compute-0 podman[286652]: 2025-10-11 01:55:57.245185863 +0000 UTC m=+0.139219341 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:55:57 compute-0 podman[286653]: 2025-10-11 01:55:57.276054519 +0000 UTC m=+0.154713540 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Oct 11 01:55:57 compute-0 podman[286654]: 2025-10-11 01:55:57.2791999 +0000 UTC m=+0.152592296 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, build-date=2024-09-18T21:23:30, release=1214.1726694543, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, version=9.4, name=ubi9, io.openshift.tags=base rhel9, io.openshift.expose-services=, io.buildah.version=1.29.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., managed_by=edpm_ansible, config_id=edpm, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, distribution-scope=public, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9, com.redhat.component=ubi9-container, release-0.7.12=, container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of Red Hat Universal Base Image 9., architecture=x86_64, vcs-type=git, vendor=Red Hat, Inc.)
Oct 11 01:55:57 compute-0 ceph-mon[191930]: pgmap v533: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v534: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:55:59 compute-0 podman[286715]: 2025-10-11 01:55:59.255167453 +0000 UTC m=+0.135497835 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true)
Oct 11 01:55:59 compute-0 sshd-session[286734]: Accepted publickey for zuul from 192.168.122.30 port 46324 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:55:59 compute-0 systemd-logind[804]: New session 55 of user zuul.
Oct 11 01:55:59 compute-0 systemd[1]: Started Session 55 of User zuul.
Oct 11 01:55:59 compute-0 sshd-session[286734]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:55:59 compute-0 podman[157119]: time="2025-10-11T01:55:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:55:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:55:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 35732 "" "Go-http-client/1.1"
Oct 11 01:55:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:55:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:55:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 7257 "" "Go-http-client/1.1"
Oct 11 01:55:59 compute-0 ceph-mon[191930]: pgmap v534: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v535: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:01 compute-0 python3.9[286887]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:56:01 compute-0 openstack_network_exporter[159265]: ERROR   01:56:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:56:01 compute-0 openstack_network_exporter[159265]: ERROR   01:56:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:56:01 compute-0 openstack_network_exporter[159265]: ERROR   01:56:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:56:01 compute-0 openstack_network_exporter[159265]: ERROR   01:56:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:56:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:56:01 compute-0 openstack_network_exporter[159265]: ERROR   01:56:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:56:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:56:01 compute-0 ceph-mon[191930]: pgmap v535: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:02 compute-0 sudo[287041]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kubdpetpolyizqvelvzheqwzkfacxqxp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147761.963438-34-167649282321570/AnsiballZ_command.py'
Oct 11 01:56:02 compute-0 sudo[287041]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v536: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:02 compute-0 python3.9[287043]: ansible-ansible.legacy.command Invoked with _raw_params=podman ps -a --filter name=^nova_virtlogd$ --format \{\{.Names\}\} _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:56:03 compute-0 sudo[287041]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:04 compute-0 ceph-mon[191930]: pgmap v536: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:04 compute-0 sudo[287204]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pfaizutighlyhusrxswtcresyjzdspaj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147763.5760953-45-132716679764582/AnsiballZ_systemd_service.py'
Oct 11 01:56:04 compute-0 sudo[287204]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:04 compute-0 python3.9[287206]: ansible-ansible.builtin.systemd_service Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 01:56:04 compute-0 systemd[1]: Reloading.
Oct 11 01:56:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:56:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v537: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:04 compute-0 systemd-rc-local-generator[287234]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:56:04 compute-0 systemd-sysv-generator[287237]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:56:05 compute-0 sudo[287204]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:06 compute-0 ceph-mon[191930]: pgmap v537: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 01:56:06 compute-0 python3.9[287392]: ansible-ansible.builtin.service_facts Invoked
Oct 11 01:56:06 compute-0 network[287409]: You are using 'network' service provided by 'network-scripts', which are now deprecated.
Oct 11 01:56:06 compute-0 network[287410]: 'network-scripts' will be removed from distribution in near future.
Oct 11 01:56:06 compute-0 network[287411]: It is advised to switch to 'NetworkManager' instead for network management.
Oct 11 01:56:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v538: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:08 compute-0 ceph-mon[191930]: pgmap v538: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v539: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:56:10 compute-0 ceph-mon[191930]: pgmap v539: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v540: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:12 compute-0 ceph-mon[191930]: pgmap v540: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v541: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:13 compute-0 sudo[287683]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dgpkkchciylhpgvdbegeklajsijiixpo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147772.4776692-64-225916015705443/AnsiballZ_systemd_service.py'
Oct 11 01:56:13 compute-0 sudo[287683]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:13 compute-0 python3.9[287685]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_libvirt.target state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:56:13 compute-0 sudo[287683]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:14 compute-0 ceph-mon[191930]: pgmap v541: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:14 compute-0 podman[287786]: 2025-10-11 01:56:14.241284442 +0000 UTC m=+0.121375650 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 01:56:14 compute-0 podman[287791]: 2025-10-11 01:56:14.267748505 +0000 UTC m=+0.144045696 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, distribution-scope=public, build-date=2025-08-20T13:12:41, io.openshift.expose-services=, maintainer=Red Hat, Inc., release=1755695350, container_name=openstack_network_exporter, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, version=9.6, config_id=edpm, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, architecture=x86_64, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.tags=minimal rhel9, com.redhat.component=ubi9-minimal-container, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, managed_by=edpm_ansible, io.buildah.version=1.33.7, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, vcs-type=git, vendor=Red Hat, Inc., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers)
Oct 11 01:56:14 compute-0 sudo[287878]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-obpborppryxhznlmocpcxydrqhyotbxm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147773.7528424-64-114297525586363/AnsiballZ_systemd_service.py'
Oct 11 01:56:14 compute-0 sudo[287878]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:14 compute-0 python3.9[287880]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_virtlogd_wrapper.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:56:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:56:14 compute-0 sudo[287878]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v542: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:15 compute-0 sudo[288031]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ecocykldczrfpsvnqqenqdjyuuueujxn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147775.099482-64-158593544865157/AnsiballZ_systemd_service.py'
Oct 11 01:56:15 compute-0 sudo[288031]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:16 compute-0 ceph-mon[191930]: pgmap v542: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:16 compute-0 python3.9[288033]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_virtnodedevd.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:56:16 compute-0 sudo[288031]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v543: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:17 compute-0 sudo[288184]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pqzvtklucxuetmsvbevdyawzwinrbbgl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147776.4600537-64-193308435007482/AnsiballZ_systemd_service.py'
Oct 11 01:56:17 compute-0 sudo[288184]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:17 compute-0 python3.9[288186]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_virtproxyd.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:56:17 compute-0 sudo[288184]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:18 compute-0 ceph-mon[191930]: pgmap v543: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:18 compute-0 sudo[288337]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-affsjrdyphnytutfqraprzvaxdkxecvg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147777.7291703-64-191700207029687/AnsiballZ_systemd_service.py'
Oct 11 01:56:18 compute-0 sudo[288337]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:18 compute-0 podman[288339]: 2025-10-11 01:56:18.506835343 +0000 UTC m=+0.139312783 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.3, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.license=GPLv2, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 01:56:18 compute-0 python3.9[288340]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_virtqemud.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:56:18 compute-0 sudo[288337]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v544: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:19 compute-0 sudo[288511]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zosjkqyjpljcskiwcgawsfkhsylelqqk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147779.1742377-64-206805658968983/AnsiballZ_systemd_service.py'
Oct 11 01:56:19 compute-0 sudo[288511]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:56:20 compute-0 python3.9[288513]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_virtsecretd.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:56:20 compute-0 ceph-mon[191930]: pgmap v544: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:20 compute-0 sudo[288511]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v545: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:20 compute-0 sudo[288664]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-unvkxfoufngoazjhifdpbqxisefewanb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147780.422941-64-240879962222964/AnsiballZ_systemd_service.py'
Oct 11 01:56:20 compute-0 sudo[288664]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:21 compute-0 python3.9[288666]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_virtstoraged.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 01:56:21 compute-0 sudo[288664]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:22 compute-0 ceph-mon[191930]: pgmap v545: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:22 compute-0 sudo[288834]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mkoaplfbxcqlpjlrgxycarozltjzkkux ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147781.9064002-116-253776827212453/AnsiballZ_file.py'
Oct 11 01:56:22 compute-0 sudo[288834]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:22 compute-0 podman[288791]: 2025-10-11 01:56:22.72258918 +0000 UTC m=+0.142916547 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.build-date=20251009, container_name=ovn_metadata_agent, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0)
Oct 11 01:56:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v546: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:22 compute-0 python3.9[288838]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_libvirt.target state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:56:22 compute-0 sudo[288834]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:23 compute-0 sudo[288988]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dlblhockfywnsenbujyzcqubxheumipt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147783.2084012-116-252595586817493/AnsiballZ_file.py'
Oct 11 01:56:23 compute-0 sudo[288988]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:24 compute-0 ceph-mon[191930]: pgmap v546: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:24 compute-0 python3.9[288990]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_virtlogd_wrapper.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:56:24 compute-0 sudo[288988]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:56:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v547: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:25 compute-0 sudo[289140]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dsvcpqaoadvqhcqymjanpmxtsdmatvcr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147784.487022-116-77950690727253/AnsiballZ_file.py'
Oct 11 01:56:25 compute-0 sudo[289140]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:25 compute-0 python3.9[289142]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_virtnodedevd.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:56:25 compute-0 sudo[289140]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:26 compute-0 sudo[289292]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ncrvrqreobulgewkohzbexoeuyuzfolt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147785.558557-116-68895357891672/AnsiballZ_file.py'
Oct 11 01:56:26 compute-0 sudo[289292]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:26 compute-0 ceph-mon[191930]: pgmap v547: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:26 compute-0 python3.9[289294]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_virtproxyd.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:56:26 compute-0 sudo[289292]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:56:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:56:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:56:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:56:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:56:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:56:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v548: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:27 compute-0 sudo[289444]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fbrceklcmvnbeeybalbubiubinjeslme ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147786.6245854-116-15348619858086/AnsiballZ_file.py'
Oct 11 01:56:27 compute-0 sudo[289444]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:27 compute-0 python3.9[289446]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_virtqemud.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:56:27 compute-0 sudo[289444]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:28 compute-0 podman[289570]: 2025-10-11 01:56:28.179333495 +0000 UTC m=+0.094678942 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:56:28 compute-0 ceph-mon[191930]: pgmap v548: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:28 compute-0 sudo[289646]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jismqsajhcvdtriqqopilzqgkowlxhlw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147787.6440315-116-42804089505522/AnsiballZ_file.py'
Oct 11 01:56:28 compute-0 sudo[289646]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:28 compute-0 podman[289574]: 2025-10-11 01:56:28.227070496 +0000 UTC m=+0.118122457 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, name=ubi9, architecture=x86_64, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, distribution-scope=public, managed_by=edpm_ansible, config_id=edpm, build-date=2024-09-18T21:23:30, io.k8s.display-name=Red Hat Universal Base Image 9, maintainer=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, container_name=kepler, io.openshift.expose-services=, version=9.4, com.redhat.component=ubi9-container, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.buildah.version=1.29.0, release-0.7.12=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, summary=Provides the latest release of Red Hat Universal Base Image 9., io.openshift.tags=base rhel9, release=1214.1726694543, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Oct 11 01:56:28 compute-0 podman[289571]: 2025-10-11 01:56:28.287848943 +0000 UTC m=+0.187775752 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ovn_controller)
Oct 11 01:56:28 compute-0 python3.9[289656]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_virtsecretd.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:56:28 compute-0 sudo[289646]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v549: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:29 compute-0 sudo[289809]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yuwrhwgloamshosjnfzrpriukjxbtvhk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147788.7345357-116-219385458633692/AnsiballZ_file.py'
Oct 11 01:56:29 compute-0 sudo[289809]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:29 compute-0 podman[289811]: 2025-10-11 01:56:29.512196145 +0000 UTC m=+0.149267290 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 01:56:29 compute-0 python3.9[289812]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_virtstoraged.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:56:29 compute-0 sudo[289809]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:29 compute-0 podman[157119]: time="2025-10-11T01:56:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:56:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:56:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 35732 "" "Go-http-client/1.1"
Oct 11 01:56:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:56:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 7269 "" "Go-http-client/1.1"
Oct 11 01:56:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:56:30 compute-0 ceph-mon[191930]: pgmap v549: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:30 compute-0 sudo[289980]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-btwgifbcojazbicthvibqhrbfbroywei ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147789.8824387-166-127047650926008/AnsiballZ_file.py'
Oct 11 01:56:30 compute-0 sudo[289980]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:30 compute-0 python3.9[289982]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_libvirt.target state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:56:30 compute-0 sudo[289980]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v550: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:31 compute-0 openstack_network_exporter[159265]: ERROR   01:56:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:56:31 compute-0 openstack_network_exporter[159265]: ERROR   01:56:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:56:31 compute-0 openstack_network_exporter[159265]: ERROR   01:56:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:56:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:56:31 compute-0 openstack_network_exporter[159265]: ERROR   01:56:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:56:31 compute-0 openstack_network_exporter[159265]: ERROR   01:56:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:56:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:56:31 compute-0 sudo[290132]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sfchzzfoxjldxjsiovhkhvbxyribsecc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147790.9215448-166-120322519629102/AnsiballZ_file.py'
Oct 11 01:56:31 compute-0 sudo[290132]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:31 compute-0 python3.9[290134]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_virtlogd_wrapper.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:56:31 compute-0 sudo[290132]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:32 compute-0 ceph-mon[191930]: pgmap v550: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:32 compute-0 sudo[290284]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-appkpjpdysigsrnkcyajrrgbpztnhqab ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147791.9535403-166-23787627729775/AnsiballZ_file.py'
Oct 11 01:56:32 compute-0 sudo[290284]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:32 compute-0 python3.9[290286]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_virtnodedevd.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:56:32 compute-0 sudo[290284]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v551: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:33 compute-0 sudo[290436]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eeayzcosmymfsehthdqddcshotaguedx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147792.9405437-166-138723512975876/AnsiballZ_file.py'
Oct 11 01:56:33 compute-0 sudo[290436]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:33 compute-0 python3.9[290438]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_virtproxyd.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:56:33 compute-0 sudo[290436]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:34 compute-0 ceph-mon[191930]: pgmap v551: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:34 compute-0 sudo[290588]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-coontlszbbzmvndskieafgstoeqtclds ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147793.9356854-166-261337822472341/AnsiballZ_file.py'
Oct 11 01:56:34 compute-0 sudo[290588]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:34 compute-0 python3.9[290590]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_virtqemud.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:56:34 compute-0 sudo[290588]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:56:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v552: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:35 compute-0 sudo[290740]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vuuqzdeelxfklkjlksspitiasqolkpop ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147794.952169-166-255347588669128/AnsiballZ_file.py'
Oct 11 01:56:35 compute-0 sudo[290740]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:35 compute-0 python3.9[290742]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_virtsecretd.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:56:35 compute-0 sudo[290740]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:36 compute-0 ceph-mon[191930]: pgmap v552: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:36 compute-0 sudo[290892]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wfiqhctujfncqjlpohzdvobdlodewmfn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147795.9280562-166-241447114820870/AnsiballZ_file.py'
Oct 11 01:56:36 compute-0 sudo[290892]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:36 compute-0 python3.9[290894]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_virtstoraged.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:56:36 compute-0 sudo[290892]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v553: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:37 compute-0 sudo[291044]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-snegtwdcmzxgegfndcjdmdmzaxcxkjkg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147797.152273-217-44292859133885/AnsiballZ_command.py'
Oct 11 01:56:37 compute-0 sudo[291044]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:37 compute-0 python3.9[291046]: ansible-ansible.legacy.command Invoked with _raw_params=if systemctl is-active certmonger.service; then
                                               systemctl disable --now certmonger.service
                                               test -f /etc/systemd/system/certmonger.service || systemctl mask certmonger.service
                                             fi
                                              _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:56:38 compute-0 sudo[291044]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:38 compute-0 ceph-mon[191930]: pgmap v553: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v554: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:39 compute-0 python3.9[291198]: ansible-ansible.builtin.find Invoked with file_type=any hidden=True paths=['/var/lib/certmonger/requests'] patterns=[] read_whole_file=False age_stamp=mtime recurse=False follow=False get_checksum=False checksum_algorithm=sha1 use_regex=False exact_mode=True excludes=None contains=None age=None size=None depth=None mode=None encoding=None limit=None
Oct 11 01:56:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:56:40 compute-0 sudo[291348]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oiqdkqwhstlwqfeecldhezqstuqrdsjl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147799.6656744-235-174518600385949/AnsiballZ_systemd_service.py'
Oct 11 01:56:40 compute-0 sudo[291348]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:40 compute-0 ceph-mon[191930]: pgmap v554: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:40 compute-0 python3.9[291350]: ansible-ansible.builtin.systemd_service Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 01:56:40 compute-0 systemd[1]: Reloading.
Oct 11 01:56:40 compute-0 systemd-rc-local-generator[291374]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 01:56:40 compute-0 systemd-sysv-generator[291379]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 01:56:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v555: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:41 compute-0 sudo[291348]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:41 compute-0 sudo[291534]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qywusaqyweiwsffdfwtncvcxoiugbjww ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147801.3520463-243-204708652991954/AnsiballZ_command.py'
Oct 11 01:56:41 compute-0 sudo[291534]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:42 compute-0 python3.9[291536]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_libvirt.target _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:56:42 compute-0 ceph-mon[191930]: pgmap v555: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:42 compute-0 sudo[291534]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v556: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:43 compute-0 sudo[291687]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vwedammajkuaqvkcyxquueqeacondfdi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147802.5402794-243-61952295609296/AnsiballZ_command.py'
Oct 11 01:56:43 compute-0 sudo[291687]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:43 compute-0 python3.9[291689]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_virtlogd_wrapper.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:56:43 compute-0 sudo[291687]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:44 compute-0 sudo[291840]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jzayaiyylimoiiiilukqqokenhztsmdg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147803.659445-243-272609934279695/AnsiballZ_command.py'
Oct 11 01:56:44 compute-0 sudo[291840]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:44 compute-0 ceph-mon[191930]: pgmap v556: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:44 compute-0 python3.9[291842]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_virtnodedevd.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:56:44 compute-0 sudo[291840]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:44 compute-0 podman[291845]: 2025-10-11 01:56:44.609578016 +0000 UTC m=+0.118080344 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, url=https://catalog.redhat.com/en/search?searchType=containers, io.openshift.expose-services=, build-date=2025-08-20T13:12:41, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vcs-type=git, vendor=Red Hat, Inc., release=1755695350, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=minimal rhel9, architecture=x86_64, config_id=edpm, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.buildah.version=1.33.7, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, name=ubi9-minimal, version=9.6, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., container_name=openstack_network_exporter, distribution-scope=public, com.redhat.component=ubi9-minimal-container, maintainer=Red Hat, Inc., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 01:56:44 compute-0 podman[291844]: 2025-10-11 01:56:44.635863186 +0000 UTC m=+0.138464126 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 01:56:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:56:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v557: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:45 compute-0 sudo[292035]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yvpgccimosiwkuogyjcbzjcgzimsklyp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147804.727939-243-82812166019600/AnsiballZ_command.py'
Oct 11 01:56:45 compute-0 sudo[292035]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:45 compute-0 python3.9[292037]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_virtproxyd.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:56:45 compute-0 sudo[292035]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:46 compute-0 ceph-mon[191930]: pgmap v557: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:46 compute-0 sudo[292188]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ytyxfdrsquvrilfnkxhtgethpywnvgcg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147805.8312552-243-205956531719971/AnsiballZ_command.py'
Oct 11 01:56:46 compute-0 sudo[292188]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:46 compute-0 python3.9[292190]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_virtqemud.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:56:46 compute-0 sudo[292188]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v558: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:47 compute-0 sudo[292341]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-frxixawngdvgwtsysgbfqrasdjqfeams ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147807.01922-243-28658166320806/AnsiballZ_command.py'
Oct 11 01:56:47 compute-0 sudo[292341]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:47 compute-0 python3.9[292343]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_virtsecretd.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:56:47 compute-0 sudo[292341]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:48 compute-0 ceph-mon[191930]: pgmap v558: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:48 compute-0 sudo[292494]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wvtjcctetnlgogtprheymtpaufomqmac ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147808.1500733-243-38952610341208/AnsiballZ_command.py'
Oct 11 01:56:48 compute-0 sudo[292494]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:48 compute-0 podman[292496]: 2025-10-11 01:56:48.845639062 +0000 UTC m=+0.156905847 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=ceilometer_agent_ipmi, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 01:56:48 compute-0 python3.9[292497]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_virtstoraged.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:56:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v559: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:48 compute-0 sudo[292494]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:56:50 compute-0 sudo[292668]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rxncdoxpsfsxfuzxxmjnbhsewrqhdgae ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147809.420824-297-134606780875816/AnsiballZ_getent.py'
Oct 11 01:56:50 compute-0 sudo[292668]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:50 compute-0 python3.9[292670]: ansible-ansible.builtin.getent Invoked with database=passwd key=libvirt fail_key=True service=None split=None
Oct 11 01:56:50 compute-0 ceph-mon[191930]: pgmap v559: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:50 compute-0 sudo[292668]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v560: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:51 compute-0 sudo[292821]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dyxuvjfhgzpogkwxvqhlgwyvssidmrad ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147810.8137417-310-52909465114968/AnsiballZ_setup.py'
Oct 11 01:56:51 compute-0 sudo[292821]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:51 compute-0 python3.9[292823]: ansible-ansible.legacy.setup Invoked with filter=['ansible_pkg_mgr'] gather_subset=['!all'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 01:56:52 compute-0 sudo[292821]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:52 compute-0 ceph-mon[191930]: pgmap v560: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:52 compute-0 sudo[292905]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-velbdishgwagumqlkfcwvuvsnuvcixgs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147810.8137417-310-52909465114968/AnsiballZ_dnf.py'
Oct 11 01:56:52 compute-0 sudo[292905]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v561: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:53 compute-0 podman[292907]: 2025-10-11 01:56:53.001371703 +0000 UTC m=+0.146727727 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.vendor=CentOS, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, tcib_managed=true, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible)
Oct 11 01:56:53 compute-0 python3.9[292908]: ansible-ansible.legacy.dnf Invoked with name=['libvirt ', 'libvirt-admin ', 'libvirt-client ', 'libvirt-daemon ', 'qemu-kvm', 'qemu-img', 'libguestfs', 'libseccomp', 'swtpm', 'swtpm-tools', 'edk2-ovmf', 'ceph-common', 'cyrus-sasl-scram'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 01:56:54 compute-0 ceph-mon[191930]: pgmap v561: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:54 compute-0 sudo[292905]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:56:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:56:54.808 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 01:56:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:56:54.809 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 01:56:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:56:54.810 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 01:56:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v562: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:55 compute-0 sudo[292999]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:56:55 compute-0 sudo[292999]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:56:55 compute-0 sudo[292999]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:55 compute-0 sudo[293025]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:56:55 compute-0 sudo[293025]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:56:55 compute-0 sudo[293025]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:55 compute-0 sudo[293050]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:56:55 compute-0 sudo[293050]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:56:55 compute-0 sudo[293050]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:55 compute-0 sudo[293087]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ls
Oct 11 01:56:55 compute-0 sudo[293087]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:56:55 compute-0 sudo[293185]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ifexphjcwasxtfuqxwyiixphjlxcgznh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147814.8657684-322-197824714289035/AnsiballZ_systemd.py'
Oct 11 01:56:55 compute-0 sudo[293185]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:56 compute-0 python3.9[293189]: ansible-ansible.builtin.systemd Invoked with enabled=False masked=True name=libvirtd state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None
Oct 11 01:56:56 compute-0 sudo[293185]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:56 compute-0 ceph-mon[191930]: pgmap v562: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:56 compute-0 podman[293249]: 2025-10-11 01:56:56.421745583 +0000 UTC m=+0.115047662 container exec ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:56:56
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['vms', 'cephfs.cephfs.meta', 'backups', 'default.rgw.control', '.rgw.root', 'cephfs.cephfs.data', 'default.rgw.meta', 'volumes', 'images', 'default.rgw.log', '.mgr']
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:56:56 compute-0 podman[293249]: 2025-10-11 01:56:56.580733121 +0000 UTC m=+0.274035190 container exec_died ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:56:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v563: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:57 compute-0 sudo[293479]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wcuqnkvpuztgkzyngcxlqelrpxenqxjb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147816.4878201-322-68412790904650/AnsiballZ_systemd.py'
Oct 11 01:56:57 compute-0 sudo[293479]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:57 compute-0 python3.9[293484]: ansible-ansible.builtin.systemd Invoked with enabled=False masked=True name=libvirtd-tcp.socket state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None
Oct 11 01:56:57 compute-0 sudo[293479]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:57 compute-0 sudo[293087]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:56:57 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:56:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:56:57 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:56:57 compute-0 sudo[293604]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:56:57 compute-0 sudo[293604]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:56:57 compute-0 sudo[293604]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:57 compute-0 sudo[293658]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:56:57 compute-0 sudo[293658]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:56:57 compute-0 sudo[293658]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:58 compute-0 sudo[293706]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:56:58 compute-0 sudo[293706]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:56:58 compute-0 sudo[293706]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:58 compute-0 sudo[293739]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 01:56:58 compute-0 sudo[293739]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:56:58 compute-0 sudo[293806]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rpwmhybazstinvhllioynamajsnmvwbv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147817.7444787-322-80632821193723/AnsiballZ_systemd.py'
Oct 11 01:56:58 compute-0 sudo[293806]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:58 compute-0 ceph-mon[191930]: pgmap v563: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:56:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:56:58 compute-0 podman[293809]: 2025-10-11 01:56:58.417650477 +0000 UTC m=+0.121719232 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., name=ubi9, release=1214.1726694543, vcs-type=git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, managed_by=edpm_ansible, com.redhat.component=ubi9-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, summary=Provides the latest release of Red Hat Universal Base Image 9., version=9.4, config_id=edpm, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, build-date=2024-09-18T21:23:30, container_name=kepler, io.buildah.version=1.29.0, release-0.7.12=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., distribution-scope=public, io.openshift.tags=base rhel9, architecture=x86_64)
Oct 11 01:56:58 compute-0 podman[293808]: 2025-10-11 01:56:58.42586098 +0000 UTC m=+0.122464660 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 01:56:58 compute-0 podman[293862]: 2025-10-11 01:56:58.564844781 +0000 UTC m=+0.134376822 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 01:56:58 compute-0 python3.9[293813]: ansible-ansible.builtin.systemd Invoked with enabled=False masked=True name=libvirtd-tls.socket state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None
Oct 11 01:56:58 compute-0 sudo[293739]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:58 compute-0 sudo[293806]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:56:58 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:56:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:56:58 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:56:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:56:58 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:56:58 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 73dccdc4-078a-452a-842d-aae707f2c570 does not exist
Oct 11 01:56:58 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev be489984-06a0-4c99-8c2e-ba746f3aa459 does not exist
Oct 11 01:56:58 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev afff48c5-1ee6-4057-89f8-398c52fbcd15 does not exist
Oct 11 01:56:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 01:56:58 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:56:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 01:56:58 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:56:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:56:58 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:56:58 compute-0 sudo[293913]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:56:58 compute-0 sudo[293913]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:56:58 compute-0 sudo[293913]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v564: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:56:59 compute-0 sudo[293962]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:56:59 compute-0 sudo[293962]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:56:59 compute-0 sudo[293962]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:59 compute-0 sudo[294025]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:56:59 compute-0 sudo[294025]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:56:59 compute-0 sudo[294025]: pam_unix(sudo:session): session closed for user root
Oct 11 01:56:59 compute-0 sudo[294079]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 01:56:59 compute-0 sudo[294079]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:56:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:56:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:56:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:56:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:56:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:56:59 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:56:59 compute-0 sudo[294176]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-aszqpemllfkqrzobedqbwwsettrdntos ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147818.9530137-322-256040513032207/AnsiballZ_systemd.py'
Oct 11 01:56:59 compute-0 sudo[294176]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:56:59 compute-0 podman[157119]: time="2025-10-11T01:56:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:56:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:56:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 35732 "" "Go-http-client/1.1"
Oct 11 01:56:59 compute-0 podman[294182]: 2025-10-11 01:56:59.776478514 +0000 UTC m=+0.124371648 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible, config_id=edpm, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, io.buildah.version=1.41.4)
Oct 11 01:56:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:56:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 7267 "" "Go-http-client/1.1"
Oct 11 01:56:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:56:59 compute-0 podman[294215]: 2025-10-11 01:56:59.898593883 +0000 UTC m=+0.067850653 container create 9348d0a855183782130065047f311741965d400d82051e8af5cdce6871584a27 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jolly_khorana, OSD_FLAVOR=default, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:56:59 compute-0 systemd[1]: Started libpod-conmon-9348d0a855183782130065047f311741965d400d82051e8af5cdce6871584a27.scope.
Oct 11 01:56:59 compute-0 podman[294215]: 2025-10-11 01:56:59.873870229 +0000 UTC m=+0.043127029 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:56:59 compute-0 python3.9[294184]: ansible-ansible.builtin.systemd Invoked with enabled=False masked=True name=virtproxyd-tcp.socket state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None
Oct 11 01:56:59 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:57:00 compute-0 podman[294215]: 2025-10-11 01:57:00.021217435 +0000 UTC m=+0.190474285 container init 9348d0a855183782130065047f311741965d400d82051e8af5cdce6871584a27 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jolly_khorana, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:57:00 compute-0 podman[294215]: 2025-10-11 01:57:00.033583403 +0000 UTC m=+0.202840173 container start 9348d0a855183782130065047f311741965d400d82051e8af5cdce6871584a27 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jolly_khorana, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:57:00 compute-0 podman[294215]: 2025-10-11 01:57:00.039268979 +0000 UTC m=+0.208525749 container attach 9348d0a855183782130065047f311741965d400d82051e8af5cdce6871584a27 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jolly_khorana, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:57:00 compute-0 jolly_khorana[294231]: 167 167
Oct 11 01:57:00 compute-0 systemd[1]: libpod-9348d0a855183782130065047f311741965d400d82051e8af5cdce6871584a27.scope: Deactivated successfully.
Oct 11 01:57:00 compute-0 podman[294215]: 2025-10-11 01:57:00.049889963 +0000 UTC m=+0.219146723 container died 9348d0a855183782130065047f311741965d400d82051e8af5cdce6871584a27 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jolly_khorana, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:57:00 compute-0 systemd[1]: var-lib-containers-storage-overlay-65f4338128035700b4c8f6224f2a4e7cca152e8801152ff8aa559de784aca68f-merged.mount: Deactivated successfully.
Oct 11 01:57:00 compute-0 sudo[294176]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:00 compute-0 podman[294215]: 2025-10-11 01:57:00.136681187 +0000 UTC m=+0.305937957 container remove 9348d0a855183782130065047f311741965d400d82051e8af5cdce6871584a27 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jolly_khorana, CEPH_REF=reef, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:57:00 compute-0 systemd[1]: libpod-conmon-9348d0a855183782130065047f311741965d400d82051e8af5cdce6871584a27.scope: Deactivated successfully.
Oct 11 01:57:00 compute-0 ceph-mon[191930]: pgmap v564: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:00 compute-0 podman[294282]: 2025-10-11 01:57:00.431757585 +0000 UTC m=+0.097337048 container create 42fee5d4df34a5336d5a67e5d5ff593652e85ad533563282d9301ec7bcd40615 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=epic_taussig, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 01:57:00 compute-0 podman[294282]: 2025-10-11 01:57:00.397557154 +0000 UTC m=+0.063136637 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:57:00 compute-0 systemd[1]: Started libpod-conmon-42fee5d4df34a5336d5a67e5d5ff593652e85ad533563282d9301ec7bcd40615.scope.
Oct 11 01:57:00 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:57:00 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8e961b7e8aa477d3939fbdbc2a8a906458697dfe264c2e93f9a793b182679934/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:57:00 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8e961b7e8aa477d3939fbdbc2a8a906458697dfe264c2e93f9a793b182679934/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:57:00 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8e961b7e8aa477d3939fbdbc2a8a906458697dfe264c2e93f9a793b182679934/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:57:00 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8e961b7e8aa477d3939fbdbc2a8a906458697dfe264c2e93f9a793b182679934/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:57:00 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8e961b7e8aa477d3939fbdbc2a8a906458697dfe264c2e93f9a793b182679934/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:57:00 compute-0 podman[294282]: 2025-10-11 01:57:00.639592783 +0000 UTC m=+0.305172246 container init 42fee5d4df34a5336d5a67e5d5ff593652e85ad533563282d9301ec7bcd40615 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=epic_taussig, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.build-date=20250507, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:57:00 compute-0 podman[294282]: 2025-10-11 01:57:00.666720605 +0000 UTC m=+0.332300038 container start 42fee5d4df34a5336d5a67e5d5ff593652e85ad533563282d9301ec7bcd40615 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=epic_taussig, io.buildah.version=1.39.3, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:57:00 compute-0 podman[294282]: 2025-10-11 01:57:00.675224171 +0000 UTC m=+0.340803644 container attach 42fee5d4df34a5336d5a67e5d5ff593652e85ad533563282d9301ec7bcd40615 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=epic_taussig, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0)
Oct 11 01:57:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v565: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:00 compute-0 sudo[294427]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rtwnffoezgqnmlbnemmnnzvfotwdonef ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147820.4037542-351-108804658010623/AnsiballZ_systemd.py'
Oct 11 01:57:00 compute-0 sudo[294427]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:01 compute-0 python3.9[294429]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtlogd.service daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:57:01 compute-0 openstack_network_exporter[159265]: ERROR   01:57:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:57:01 compute-0 openstack_network_exporter[159265]: ERROR   01:57:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:57:01 compute-0 openstack_network_exporter[159265]: ERROR   01:57:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:57:01 compute-0 openstack_network_exporter[159265]: ERROR   01:57:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:57:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:57:01 compute-0 openstack_network_exporter[159265]: ERROR   01:57:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:57:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:57:01 compute-0 sudo[294427]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:01 compute-0 epic_taussig[294345]: --> passed data devices: 0 physical, 3 LVM
Oct 11 01:57:01 compute-0 epic_taussig[294345]: --> relative data size: 1.0
Oct 11 01:57:01 compute-0 epic_taussig[294345]: --> All data devices are unavailable
Oct 11 01:57:01 compute-0 systemd[1]: libpod-42fee5d4df34a5336d5a67e5d5ff593652e85ad533563282d9301ec7bcd40615.scope: Deactivated successfully.
Oct 11 01:57:01 compute-0 systemd[1]: libpod-42fee5d4df34a5336d5a67e5d5ff593652e85ad533563282d9301ec7bcd40615.scope: Consumed 1.183s CPU time.
Oct 11 01:57:01 compute-0 podman[294282]: 2025-10-11 01:57:01.954011726 +0000 UTC m=+1.619591189 container died 42fee5d4df34a5336d5a67e5d5ff593652e85ad533563282d9301ec7bcd40615 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=epic_taussig, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.build-date=20250507, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0)
Oct 11 01:57:02 compute-0 systemd[1]: var-lib-containers-storage-overlay-8e961b7e8aa477d3939fbdbc2a8a906458697dfe264c2e93f9a793b182679934-merged.mount: Deactivated successfully.
Oct 11 01:57:02 compute-0 podman[294282]: 2025-10-11 01:57:02.069802715 +0000 UTC m=+1.735382148 container remove 42fee5d4df34a5336d5a67e5d5ff593652e85ad533563282d9301ec7bcd40615 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=epic_taussig, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True)
Oct 11 01:57:02 compute-0 systemd[1]: libpod-conmon-42fee5d4df34a5336d5a67e5d5ff593652e85ad533563282d9301ec7bcd40615.scope: Deactivated successfully.
Oct 11 01:57:02 compute-0 sudo[294079]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:02 compute-0 sudo[294570]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:57:02 compute-0 sudo[294570]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:57:02 compute-0 sudo[294570]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:02 compute-0 sudo[294626]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:57:02 compute-0 sudo[294626]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:57:02 compute-0 sudo[294626]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:02 compute-0 sudo[294668]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jriymqijjxnezvdbhjavxmnzeqckcldq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147821.7235458-351-229897085254090/AnsiballZ_systemd.py'
Oct 11 01:57:02 compute-0 sudo[294668]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:02 compute-0 ceph-mon[191930]: pgmap v565: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:02 compute-0 sudo[294673]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:57:02 compute-0 sudo[294673]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:57:02 compute-0 sudo[294673]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:02 compute-0 sudo[294698]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 01:57:02 compute-0 sudo[294698]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:57:02 compute-0 python3.9[294672]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtnodedevd.service daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:57:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v566: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:02 compute-0 sudo[294668]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:03 compute-0 podman[294789]: 2025-10-11 01:57:03.200655317 +0000 UTC m=+0.061793132 container create ebdbfcb23379f01a8149299adedf8a831cccdea8ac06e1f766982b360455dc9d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_swartz, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default)
Oct 11 01:57:03 compute-0 systemd[1]: Started libpod-conmon-ebdbfcb23379f01a8149299adedf8a831cccdea8ac06e1f766982b360455dc9d.scope.
Oct 11 01:57:03 compute-0 podman[294789]: 2025-10-11 01:57:03.174470201 +0000 UTC m=+0.035607976 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:57:03 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:57:03 compute-0 podman[294789]: 2025-10-11 01:57:03.356872433 +0000 UTC m=+0.218010208 container init ebdbfcb23379f01a8149299adedf8a831cccdea8ac06e1f766982b360455dc9d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_swartz, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.license=GPLv2)
Oct 11 01:57:03 compute-0 podman[294789]: 2025-10-11 01:57:03.373634739 +0000 UTC m=+0.234772514 container start ebdbfcb23379f01a8149299adedf8a831cccdea8ac06e1f766982b360455dc9d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_swartz, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:57:03 compute-0 podman[294789]: 2025-10-11 01:57:03.377411598 +0000 UTC m=+0.238549373 container attach ebdbfcb23379f01a8149299adedf8a831cccdea8ac06e1f766982b360455dc9d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_swartz, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 01:57:03 compute-0 angry_swartz[294830]: 167 167
Oct 11 01:57:03 compute-0 systemd[1]: libpod-ebdbfcb23379f01a8149299adedf8a831cccdea8ac06e1f766982b360455dc9d.scope: Deactivated successfully.
Oct 11 01:57:03 compute-0 podman[294789]: 2025-10-11 01:57:03.388457213 +0000 UTC m=+0.249595018 container died ebdbfcb23379f01a8149299adedf8a831cccdea8ac06e1f766982b360455dc9d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_swartz, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:57:03 compute-0 systemd[1]: var-lib-containers-storage-overlay-d248e98af928b426f9bfe63176d84ae43908dc90eed5c8ef2bc96028c28f97b2-merged.mount: Deactivated successfully.
Oct 11 01:57:03 compute-0 podman[294789]: 2025-10-11 01:57:03.470850469 +0000 UTC m=+0.331988274 container remove ebdbfcb23379f01a8149299adedf8a831cccdea8ac06e1f766982b360455dc9d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_swartz, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:57:03 compute-0 systemd[1]: libpod-conmon-ebdbfcb23379f01a8149299adedf8a831cccdea8ac06e1f766982b360455dc9d.scope: Deactivated successfully.
Oct 11 01:57:03 compute-0 podman[294924]: 2025-10-11 01:57:03.716338869 +0000 UTC m=+0.059011938 container create 3e7edbe753e039eb68e6c0e80d729713e89ff5ab4b2f629639dba8eeefcc6d13 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_dubinsky, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:57:03 compute-0 systemd[1]: Started libpod-conmon-3e7edbe753e039eb68e6c0e80d729713e89ff5ab4b2f629639dba8eeefcc6d13.scope.
Oct 11 01:57:03 compute-0 podman[294924]: 2025-10-11 01:57:03.692643215 +0000 UTC m=+0.035316304 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:57:03 compute-0 sudo[294967]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yeqeeisblbyqoveezujiqbhfkjephavb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147823.2225084-351-1965852304803/AnsiballZ_systemd.py'
Oct 11 01:57:03 compute-0 sudo[294967]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:03 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:57:03 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/aba8ce77e6a4fe8398029183d7fabb991ce53d9a1e3770fb5b3c8b0a65d7ec08/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:57:03 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/aba8ce77e6a4fe8398029183d7fabb991ce53d9a1e3770fb5b3c8b0a65d7ec08/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:57:03 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/aba8ce77e6a4fe8398029183d7fabb991ce53d9a1e3770fb5b3c8b0a65d7ec08/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:57:03 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/aba8ce77e6a4fe8398029183d7fabb991ce53d9a1e3770fb5b3c8b0a65d7ec08/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:57:03 compute-0 podman[294924]: 2025-10-11 01:57:03.863380701 +0000 UTC m=+0.206053780 container init 3e7edbe753e039eb68e6c0e80d729713e89ff5ab4b2f629639dba8eeefcc6d13 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_dubinsky, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=reef)
Oct 11 01:57:03 compute-0 podman[294924]: 2025-10-11 01:57:03.89137957 +0000 UTC m=+0.234052639 container start 3e7edbe753e039eb68e6c0e80d729713e89ff5ab4b2f629639dba8eeefcc6d13 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_dubinsky, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2)
Oct 11 01:57:03 compute-0 podman[294924]: 2025-10-11 01:57:03.900400591 +0000 UTC m=+0.243073700 container attach 3e7edbe753e039eb68e6c0e80d729713e89ff5ab4b2f629639dba8eeefcc6d13 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_dubinsky, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 01:57:04 compute-0 python3.9[294972]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtproxyd.service daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:57:04 compute-0 sudo[294967]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:04 compute-0 ceph-mon[191930]: pgmap v566: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]: {
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:     "0": [
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:         {
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "devices": [
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "/dev/loop3"
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             ],
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "lv_name": "ceph_lv0",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "lv_size": "21470642176",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "name": "ceph_lv0",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "tags": {
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.cluster_name": "ceph",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.crush_device_class": "",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.encrypted": "0",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.osd_id": "0",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.type": "block",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.vdo": "0"
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             },
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "type": "block",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "vg_name": "ceph_vg0"
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:         }
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:     ],
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:     "1": [
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:         {
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "devices": [
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "/dev/loop4"
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             ],
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "lv_name": "ceph_lv1",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "lv_size": "21470642176",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "name": "ceph_lv1",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "tags": {
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.cluster_name": "ceph",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.crush_device_class": "",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.encrypted": "0",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.osd_id": "1",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.type": "block",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.vdo": "0"
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             },
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "type": "block",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "vg_name": "ceph_vg1"
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:         }
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:     ],
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:     "2": [
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:         {
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "devices": [
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "/dev/loop5"
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             ],
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "lv_name": "ceph_lv2",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "lv_size": "21470642176",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "name": "ceph_lv2",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "tags": {
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.cluster_name": "ceph",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.crush_device_class": "",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.encrypted": "0",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.osd_id": "2",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.type": "block",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:                 "ceph.vdo": "0"
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             },
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "type": "block",
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:             "vg_name": "ceph_vg2"
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:         }
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]:     ]
Oct 11 01:57:04 compute-0 xenodochial_dubinsky[294968]: }
Oct 11 01:57:04 compute-0 systemd[1]: libpod-3e7edbe753e039eb68e6c0e80d729713e89ff5ab4b2f629639dba8eeefcc6d13.scope: Deactivated successfully.
Oct 11 01:57:04 compute-0 podman[294924]: 2025-10-11 01:57:04.758456695 +0000 UTC m=+1.101129774 container died 3e7edbe753e039eb68e6c0e80d729713e89ff5ab4b2f629639dba8eeefcc6d13 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_dubinsky, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:57:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:57:04 compute-0 systemd[1]: var-lib-containers-storage-overlay-aba8ce77e6a4fe8398029183d7fabb991ce53d9a1e3770fb5b3c8b0a65d7ec08-merged.mount: Deactivated successfully.
Oct 11 01:57:04 compute-0 podman[294924]: 2025-10-11 01:57:04.843400281 +0000 UTC m=+1.186073350 container remove 3e7edbe753e039eb68e6c0e80d729713e89ff5ab4b2f629639dba8eeefcc6d13 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_dubinsky, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:57:04 compute-0 systemd[1]: libpod-conmon-3e7edbe753e039eb68e6c0e80d729713e89ff5ab4b2f629639dba8eeefcc6d13.scope: Deactivated successfully.
Oct 11 01:57:04 compute-0 sudo[294698]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v567: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:04 compute-0 sudo[295114]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:57:05 compute-0 sudo[295114]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:57:05 compute-0 sudo[295114]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:05 compute-0 sudo[295175]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qmgduxfbaqlwfmyaqnfzsqstqbdgvthf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147824.571382-351-6755057946712/AnsiballZ_systemd.py'
Oct 11 01:57:05 compute-0 sudo[295175]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:05 compute-0 sudo[295166]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:57:05 compute-0 sudo[295166]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:57:05 compute-0 sudo[295166]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:05 compute-0 sudo[295197]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:57:05 compute-0 sudo[295197]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:57:05 compute-0 sudo[295197]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:05 compute-0 sudo[295222]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 01:57:05 compute-0 sudo[295222]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:57:05 compute-0 python3.9[295189]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtqemud.service daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:57:05 compute-0 sudo[295175]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:05 compute-0 podman[295314]: 2025-10-11 01:57:05.846145594 +0000 UTC m=+0.086284812 container create dd6d7f85d682a4cc34150f065388b944246bada96356e57fb3ce878bbdfb3690 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_leakey, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:57:05 compute-0 podman[295314]: 2025-10-11 01:57:05.815213588 +0000 UTC m=+0.055352796 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:57:05 compute-0 systemd[1]: Started libpod-conmon-dd6d7f85d682a4cc34150f065388b944246bada96356e57fb3ce878bbdfb3690.scope.
Oct 11 01:57:05 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:57:05 compute-0 podman[295314]: 2025-10-11 01:57:05.980727224 +0000 UTC m=+0.220866442 container init dd6d7f85d682a4cc34150f065388b944246bada96356e57fb3ce878bbdfb3690 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_leakey, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 01:57:05 compute-0 podman[295314]: 2025-10-11 01:57:05.995109874 +0000 UTC m=+0.235249082 container start dd6d7f85d682a4cc34150f065388b944246bada96356e57fb3ce878bbdfb3690 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_leakey, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:57:06 compute-0 podman[295314]: 2025-10-11 01:57:06.002042902 +0000 UTC m=+0.242182120 container attach dd6d7f85d682a4cc34150f065388b944246bada96356e57fb3ce878bbdfb3690 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_leakey, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507)
Oct 11 01:57:06 compute-0 competent_leakey[295370]: 167 167
Oct 11 01:57:06 compute-0 systemd[1]: libpod-dd6d7f85d682a4cc34150f065388b944246bada96356e57fb3ce878bbdfb3690.scope: Deactivated successfully.
Oct 11 01:57:06 compute-0 podman[295314]: 2025-10-11 01:57:06.006976719 +0000 UTC m=+0.247115927 container died dd6d7f85d682a4cc34150f065388b944246bada96356e57fb3ce878bbdfb3690 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_leakey, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:57:06 compute-0 systemd[1]: var-lib-containers-storage-overlay-9d71e1da4c2b5f4739fe24cab518950aba54c42a6ab3e554a5cd68a5ad315c49-merged.mount: Deactivated successfully.
Oct 11 01:57:06 compute-0 podman[295314]: 2025-10-11 01:57:06.089017833 +0000 UTC m=+0.329157051 container remove dd6d7f85d682a4cc34150f065388b944246bada96356e57fb3ce878bbdfb3690 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_leakey, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0)
Oct 11 01:57:06 compute-0 systemd[1]: libpod-conmon-dd6d7f85d682a4cc34150f065388b944246bada96356e57fb3ce878bbdfb3690.scope: Deactivated successfully.
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 01:57:06 compute-0 podman[295452]: 2025-10-11 01:57:06.378646009 +0000 UTC m=+0.073609980 container create 478a696967df3f54d4de50a7483aaffbc2a36fd535de09abfa31c4255cfb5e11 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_williamson, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef)
Oct 11 01:57:06 compute-0 sudo[295488]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lodckoazjcpkbqxsayglxayslqrxcumc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147825.8271468-351-75236699242306/AnsiballZ_systemd.py'
Oct 11 01:57:06 compute-0 sudo[295488]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:06 compute-0 podman[295452]: 2025-10-11 01:57:06.353433554 +0000 UTC m=+0.048397505 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:57:06 compute-0 systemd[1]: Started libpod-conmon-478a696967df3f54d4de50a7483aaffbc2a36fd535de09abfa31c4255cfb5e11.scope.
Oct 11 01:57:06 compute-0 ceph-mon[191930]: pgmap v567: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:06 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:57:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ff8bc18e5dc474f8a6e5c145e35c3c30b5371c38449b4bf53df4042710e397d3/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:57:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ff8bc18e5dc474f8a6e5c145e35c3c30b5371c38449b4bf53df4042710e397d3/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:57:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ff8bc18e5dc474f8a6e5c145e35c3c30b5371c38449b4bf53df4042710e397d3/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:57:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ff8bc18e5dc474f8a6e5c145e35c3c30b5371c38449b4bf53df4042710e397d3/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:57:06 compute-0 podman[295452]: 2025-10-11 01:57:06.615390448 +0000 UTC m=+0.310354469 container init 478a696967df3f54d4de50a7483aaffbc2a36fd535de09abfa31c4255cfb5e11 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_williamson, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 01:57:06 compute-0 podman[295452]: 2025-10-11 01:57:06.643999036 +0000 UTC m=+0.338962997 container start 478a696967df3f54d4de50a7483aaffbc2a36fd535de09abfa31c4255cfb5e11 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_williamson, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:57:06 compute-0 podman[295452]: 2025-10-11 01:57:06.658216393 +0000 UTC m=+0.353180494 container attach 478a696967df3f54d4de50a7483aaffbc2a36fd535de09abfa31c4255cfb5e11 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_williamson, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, ceph=True, org.label-schema.license=GPLv2)
Oct 11 01:57:06 compute-0 python3.9[295490]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtsecretd.service daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:57:06 compute-0 sudo[295488]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v568: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:07 compute-0 sudo[295674]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fkgraqsmqxtgssmsqwyyvlysvzfyqlwm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147827.2304366-387-178307270043349/AnsiballZ_systemd.py'
Oct 11 01:57:07 compute-0 sudo[295674]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:07 compute-0 boring_williamson[295494]: {
Oct 11 01:57:07 compute-0 boring_williamson[295494]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 01:57:07 compute-0 boring_williamson[295494]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:57:07 compute-0 boring_williamson[295494]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 01:57:07 compute-0 boring_williamson[295494]:         "osd_id": 1,
Oct 11 01:57:07 compute-0 boring_williamson[295494]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:57:07 compute-0 boring_williamson[295494]:         "type": "bluestore"
Oct 11 01:57:07 compute-0 boring_williamson[295494]:     },
Oct 11 01:57:07 compute-0 boring_williamson[295494]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 01:57:07 compute-0 boring_williamson[295494]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:57:07 compute-0 boring_williamson[295494]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 01:57:07 compute-0 boring_williamson[295494]:         "osd_id": 2,
Oct 11 01:57:07 compute-0 boring_williamson[295494]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:57:07 compute-0 boring_williamson[295494]:         "type": "bluestore"
Oct 11 01:57:07 compute-0 boring_williamson[295494]:     },
Oct 11 01:57:07 compute-0 boring_williamson[295494]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 01:57:07 compute-0 boring_williamson[295494]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:57:07 compute-0 boring_williamson[295494]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 01:57:07 compute-0 boring_williamson[295494]:         "osd_id": 0,
Oct 11 01:57:07 compute-0 boring_williamson[295494]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:57:07 compute-0 boring_williamson[295494]:         "type": "bluestore"
Oct 11 01:57:07 compute-0 boring_williamson[295494]:     }
Oct 11 01:57:07 compute-0 boring_williamson[295494]: }
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.940 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.941 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.942 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f8ed27f97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb8c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb1a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb200>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed2874260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed3ab42f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb350>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb90>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fa390>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 systemd[1]: libpod-478a696967df3f54d4de50a7483aaffbc2a36fd535de09abfa31c4255cfb5e11.scope: Deactivated successfully.
Oct 11 01:57:07 compute-0 systemd[1]: libpod-478a696967df3f54d4de50a7483aaffbc2a36fd535de09abfa31c4255cfb5e11.scope: Consumed 1.294s CPU time.
Oct 11 01:57:07 compute-0 podman[295452]: 2025-10-11 01:57:07.949183268 +0000 UTC m=+1.644147219 container died 478a696967df3f54d4de50a7483aaffbc2a36fd535de09abfa31c4255cfb5e11 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_williamson, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True)
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.950 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb3b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.950 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbbf0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.950 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbc80>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.951 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.951 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.951 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.952 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27f9610>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.952 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb620>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.952 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbe30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.952 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbec0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.953 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbf50>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.946 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.953 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f8ed27fbad0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.954 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f8ed27faff0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.954 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f8ed27fb110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.954 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f8ed27fb170>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.955 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f8ed27fb1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.955 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.955 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f8ed27fb230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.955 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.955 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f8ed2874230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.956 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.956 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f8ed27fb290>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.956 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.956 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f8ed5778d70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.956 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.956 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f8ed27fb650>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.957 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.957 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f8ed27fbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.957 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.957 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f8ed27fb320>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.957 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.957 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f8ed27fbb60>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.958 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f8ed27fa3f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.958 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f8ed27fb380>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.958 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f8ed27fbbc0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.959 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.959 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f8ed27fbc50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.959 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.959 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f8ed27fbce0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.959 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f8ed27fbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.960 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f8ed27fb590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.960 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f8ed27f95e0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.960 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.961 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f8ed27fb5f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.961 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f8ed27fbe00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.961 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f8ed27fbe90>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.962 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f8ed27fbf20>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.962 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.962 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.962 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.963 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.963 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.963 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.963 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:57:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:57:08 compute-0 systemd[1]: var-lib-containers-storage-overlay-ff8bc18e5dc474f8a6e5c145e35c3c30b5371c38449b4bf53df4042710e397d3-merged.mount: Deactivated successfully.
Oct 11 01:57:08 compute-0 podman[295452]: 2025-10-11 01:57:08.05247239 +0000 UTC m=+1.747436341 container remove 478a696967df3f54d4de50a7483aaffbc2a36fd535de09abfa31c4255cfb5e11 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_williamson, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.39.3)
Oct 11 01:57:08 compute-0 systemd[1]: libpod-conmon-478a696967df3f54d4de50a7483aaffbc2a36fd535de09abfa31c4255cfb5e11.scope: Deactivated successfully.
Oct 11 01:57:08 compute-0 sudo[295222]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:08 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:57:08 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:57:08 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:57:08 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:57:08 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 85c13434-c8de-49e3-ac86-009e88f2ff1b does not exist
Oct 11 01:57:08 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 3d63b770-43a4-48c8-8455-e8df8562ffb2 does not exist
Oct 11 01:57:08 compute-0 python3.9[295679]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtproxyd-tls.socket state=started daemon_reload=False daemon_reexec=False scope=system no_block=False force=None
Oct 11 01:57:08 compute-0 sudo[295694]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:57:08 compute-0 sudo[295694]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:57:08 compute-0 sudo[295694]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:08 compute-0 sudo[295674]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:08 compute-0 sudo[295722]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:57:08 compute-0 sudo[295722]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:57:08 compute-0 sudo[295722]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:08 compute-0 ceph-mon[191930]: pgmap v568: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:08 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:57:08 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:57:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v569: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:09 compute-0 sudo[295896]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pheojpttsjrkrpgjnlqxeywzfejpuujb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147828.6292095-395-25200597210164/AnsiballZ_systemd.py'
Oct 11 01:57:09 compute-0 sudo[295896]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:09 compute-0 python3.9[295898]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtlogd.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:57:09 compute-0 sudo[295896]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:57:10 compute-0 ceph-mon[191930]: pgmap v569: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:10 compute-0 sudo[296051]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kelvlkhyrodvwodlbxryxpgofspnkijo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147830.049799-395-72499195139910/AnsiballZ_systemd.py'
Oct 11 01:57:10 compute-0 sudo[296051]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:10 compute-0 python3.9[296053]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtlogd-admin.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:57:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v570: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:11 compute-0 sudo[296051]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:11 compute-0 sudo[296206]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ngqbouuirxtpmbzcmhdneabtyiubpgqf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147831.288516-395-192042395403471/AnsiballZ_systemd.py'
Oct 11 01:57:11 compute-0 sudo[296206]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:12 compute-0 python3.9[296208]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtnodedevd.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:57:12 compute-0 sudo[296206]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:12 compute-0 ceph-mon[191930]: pgmap v570: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v571: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:13 compute-0 sudo[296361]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jgcaircfqcohpwvpkjkyutqtipkwgvbv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147832.6568496-395-101584946994986/AnsiballZ_systemd.py'
Oct 11 01:57:13 compute-0 sudo[296361]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:13 compute-0 python3.9[296363]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtnodedevd-ro.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:57:13 compute-0 sudo[296361]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:14 compute-0 ceph-mon[191930]: pgmap v571: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:14 compute-0 sudo[296516]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tuyfqdrgqlmhocizmirqzdoqihkzgxiw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147833.9956331-395-214883237081953/AnsiballZ_systemd.py'
Oct 11 01:57:14 compute-0 sudo[296516]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:57:14 compute-0 podman[296520]: 2025-10-11 01:57:14.858432878 +0000 UTC m=+0.127744581 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vcs-type=git, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.buildah.version=1.33.7, maintainer=Red Hat, Inc., name=ubi9-minimal, version=9.6, distribution-scope=public, io.openshift.tags=minimal rhel9, build-date=2025-08-20T13:12:41, release=1755695350, com.redhat.component=ubi9-minimal-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, managed_by=edpm_ansible, vendor=Red Hat, Inc., container_name=openstack_network_exporter, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_id=edpm, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.expose-services=, url=https://catalog.redhat.com/en/search?searchType=containers)
Oct 11 01:57:14 compute-0 podman[296519]: 2025-10-11 01:57:14.872023851 +0000 UTC m=+0.140129188 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 01:57:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v572: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:14 compute-0 python3.9[296518]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtnodedevd-admin.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:57:15 compute-0 sudo[296516]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:15 compute-0 sudo[296714]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-htiwsyykopcxlgdviuuwjrgkgwvoltko ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147835.3221943-395-228776503490519/AnsiballZ_systemd.py'
Oct 11 01:57:15 compute-0 sudo[296714]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:16 compute-0 python3.9[296716]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtproxyd.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:57:16 compute-0 sudo[296714]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:16 compute-0 ceph-mon[191930]: pgmap v572: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v573: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:17 compute-0 sudo[296869]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jmvbtpiccaehthiuoqextoscjytumdol ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147836.6563194-395-192694761321359/AnsiballZ_systemd.py'
Oct 11 01:57:17 compute-0 sudo[296869]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:17 compute-0 python3.9[296871]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtproxyd-ro.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:57:17 compute-0 sudo[296869]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:18 compute-0 ceph-mon[191930]: pgmap v573: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:18 compute-0 sudo[297024]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qwugliocxllmrbbgohgxwusgwqzyyypw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147838.0161905-395-46160512020739/AnsiballZ_systemd.py'
Oct 11 01:57:18 compute-0 sudo[297024]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v574: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:18 compute-0 python3.9[297026]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtproxyd-admin.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:57:19 compute-0 sudo[297024]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:19 compute-0 podman[297028]: 2025-10-11 01:57:19.132107213 +0000 UTC m=+0.129679784 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, container_name=ceilometer_agent_ipmi, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, org.label-schema.vendor=CentOS)
Oct 11 01:57:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:57:19 compute-0 sudo[297199]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bgzrfltccdfjpawfratgwlztvuhoqjjl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147839.394313-395-63018996932085/AnsiballZ_systemd.py'
Oct 11 01:57:19 compute-0 sudo[297199]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:20 compute-0 python3.9[297201]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtqemud.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:57:20 compute-0 sudo[297199]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:20 compute-0 ceph-mon[191930]: pgmap v574: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v575: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:21 compute-0 sudo[297354]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ferxeydhmwnikfrnqcoddksbjxtdvsxl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147840.694211-395-198651084869408/AnsiballZ_systemd.py'
Oct 11 01:57:21 compute-0 sudo[297354]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:21 compute-0 python3.9[297356]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtqemud-ro.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:57:21 compute-0 sudo[297354]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:22 compute-0 ceph-mon[191930]: pgmap v575: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:22 compute-0 sudo[297509]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uabbfwoejapmkrkyyjtgskmdvycdimso ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147842.118187-395-83190455631249/AnsiballZ_systemd.py'
Oct 11 01:57:22 compute-0 sudo[297509]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v576: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:23 compute-0 python3.9[297511]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtqemud-admin.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:57:23 compute-0 sudo[297509]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:23 compute-0 podman[297513]: 2025-10-11 01:57:23.245420617 +0000 UTC m=+0.146196146 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 01:57:23 compute-0 ceph-mon[191930]: pgmap v576: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:24 compute-0 sudo[297683]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tyxdcsjvnnihrqhzqgxrjktkssviklfw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147843.437031-395-60572046330711/AnsiballZ_systemd.py'
Oct 11 01:57:24 compute-0 sudo[297683]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:24 compute-0 python3.9[297685]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtsecretd.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:57:24 compute-0 sudo[297683]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:57:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v577: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:25 compute-0 sudo[297838]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xbyrrbmaezzhesftavbkawcuifiswgjl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147844.701536-395-239085094391821/AnsiballZ_systemd.py'
Oct 11 01:57:25 compute-0 sudo[297838]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:25 compute-0 python3.9[297840]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtsecretd-ro.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:57:25 compute-0 sudo[297838]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:26 compute-0 ceph-mon[191930]: pgmap v577: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:57:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:57:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:57:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:57:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:57:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:57:26 compute-0 sudo[297993]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eaeovswfhgneaunijqmxrwlfxjpkmptv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147846.0556827-395-279894935769064/AnsiballZ_systemd.py'
Oct 11 01:57:26 compute-0 sudo[297993]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v578: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:26 compute-0 python3.9[297995]: ansible-ansible.builtin.systemd Invoked with enabled=True masked=False name=virtsecretd-admin.socket daemon_reload=False daemon_reexec=False scope=system no_block=False state=None force=None
Oct 11 01:57:27 compute-0 sudo[297993]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:28 compute-0 ceph-mon[191930]: pgmap v578: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:28 compute-0 sudo[298148]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yyzzxomeeqcgciobfqehouknbwkwzbmh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147847.5889323-497-208546901504555/AnsiballZ_file.py'
Oct 11 01:57:28 compute-0 sudo[298148]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:28 compute-0 python3.9[298150]: ansible-ansible.builtin.file Invoked with group=root owner=root path=/etc/tmpfiles.d/ setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:57:28 compute-0 sudo[298148]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v579: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:29 compute-0 podman[298175]: 2025-10-11 01:57:29.239526433 +0000 UTC m=+0.128363858 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 01:57:29 compute-0 podman[298177]: 2025-10-11 01:57:29.254391799 +0000 UTC m=+0.129657402 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., managed_by=edpm_ansible, name=ubi9, version=9.4, io.openshift.expose-services=, io.openshift.tags=base rhel9, release=1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, architecture=x86_64, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, container_name=kepler, vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vendor=Red Hat, Inc., build-date=2024-09-18T21:23:30, com.redhat.component=ubi9-container, io.buildah.version=1.29.0, release-0.7.12=, config_id=edpm, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of Red Hat Universal Base Image 9., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9)
Oct 11 01:57:29 compute-0 podman[298176]: 2025-10-11 01:57:29.278656438 +0000 UTC m=+0.159089629 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team)
Oct 11 01:57:29 compute-0 sudo[298365]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nhrdgcirljlsfyehnlytryxzsmfuteuu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147849.1998322-497-138682964343353/AnsiballZ_file.py'
Oct 11 01:57:29 compute-0 podman[157119]: time="2025-10-11T01:57:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:57:29 compute-0 sudo[298365]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:57:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 35732 "" "Go-http-client/1.1"
Oct 11 01:57:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:57:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 7278 "" "Go-http-client/1.1"
Oct 11 01:57:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:57:29 compute-0 python3.9[298367]: ansible-ansible.builtin.file Invoked with group=root owner=root path=/var/lib/edpm-config/firewall setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:57:29 compute-0 sudo[298365]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:30 compute-0 ceph-mon[191930]: pgmap v579: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:30 compute-0 podman[298385]: 2025-10-11 01:57:30.259877762 +0000 UTC m=+0.150139793 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, io.buildah.version=1.41.4, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, tcib_managed=true, config_id=edpm)
Oct 11 01:57:30 compute-0 sudo[298537]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ykyxgwsgngeukppwqusmqzrzrhqyhzpt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147850.2477624-497-127056294303909/AnsiballZ_file.py'
Oct 11 01:57:30 compute-0 sudo[298537]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v580: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:31 compute-0 python3.9[298539]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/pki/libvirt setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:57:31 compute-0 sudo[298537]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:31 compute-0 openstack_network_exporter[159265]: ERROR   01:57:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:57:31 compute-0 openstack_network_exporter[159265]: ERROR   01:57:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:57:31 compute-0 openstack_network_exporter[159265]: ERROR   01:57:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:57:31 compute-0 openstack_network_exporter[159265]: ERROR   01:57:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:57:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:57:31 compute-0 openstack_network_exporter[159265]: ERROR   01:57:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:57:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:57:32 compute-0 ceph-mon[191930]: pgmap v580: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v581: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:32 compute-0 sudo[298689]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-umtfwlmtcufptwjuoyfvuskrjesurlqf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147852.3842342-497-150435966440037/AnsiballZ_file.py'
Oct 11 01:57:32 compute-0 sudo[298689]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:33 compute-0 python3.9[298691]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/pki/libvirt/private setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:57:33 compute-0 sudo[298689]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:34 compute-0 sudo[298841]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zkgmregqtkbloxynayswmtvpyujikejo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147853.4901066-497-8280003363554/AnsiballZ_file.py'
Oct 11 01:57:34 compute-0 sudo[298841]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:34 compute-0 ceph-mon[191930]: pgmap v581: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:34 compute-0 python3.9[298843]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/pki/CA setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:57:34 compute-0 sudo[298841]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:57:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v582: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:35 compute-0 sudo[298993]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fqsiokowxgwkyhrqnydrirfiqsxtybvn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147854.5386536-497-66283067966117/AnsiballZ_file.py'
Oct 11 01:57:35 compute-0 sudo[298993]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:35 compute-0 python3.9[298995]: ansible-ansible.builtin.file Invoked with group=qemu owner=root path=/etc/pki/qemu setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:57:35 compute-0 sudo[298993]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:36 compute-0 ceph-mon[191930]: pgmap v582: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:36 compute-0 sudo[299145]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bmoneorfelgbajzquaruqwwsaitdyyph ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147855.65753-540-230776154590204/AnsiballZ_stat.py'
Oct 11 01:57:36 compute-0 sudo[299145]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:36 compute-0 python3.9[299147]: ansible-ansible.legacy.stat Invoked with path=/etc/libvirt/virtlogd.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:57:36 compute-0 sudo[299145]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v583: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:37 compute-0 sudo[299223]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-curqfzylqugjmhjnfoaxtqudrwhfkarz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147855.65753-540-230776154590204/AnsiballZ_file.py'
Oct 11 01:57:37 compute-0 sudo[299223]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:37 compute-0 python3.9[299225]: ansible-ansible.legacy.file Invoked with group=libvirt mode=0640 owner=libvirt dest=/etc/libvirt/virtlogd.conf _original_basename=virtlogd.conf recurse=False state=file path=/etc/libvirt/virtlogd.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:57:37 compute-0 sudo[299223]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:38 compute-0 ceph-mon[191930]: pgmap v583: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:38 compute-0 sudo[299375]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ayeolgrhwfgpwfqsukywqotksqgegyjd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147857.7720408-540-73437191180231/AnsiballZ_stat.py'
Oct 11 01:57:38 compute-0 sudo[299375]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:38 compute-0 python3.9[299377]: ansible-ansible.legacy.stat Invoked with path=/etc/libvirt/virtnodedevd.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:57:38 compute-0 sudo[299375]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v584: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:39 compute-0 sudo[299453]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lyxihpnofvwwrqaybveywsmpvpbcxhww ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147857.7720408-540-73437191180231/AnsiballZ_file.py'
Oct 11 01:57:39 compute-0 sudo[299453]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:39 compute-0 python3.9[299455]: ansible-ansible.legacy.file Invoked with group=libvirt mode=0640 owner=libvirt dest=/etc/libvirt/virtnodedevd.conf _original_basename=virtnodedevd.conf recurse=False state=file path=/etc/libvirt/virtnodedevd.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:57:39 compute-0 sudo[299453]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:57:40 compute-0 ceph-mon[191930]: pgmap v584: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:40 compute-0 sudo[299605]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qgifuxpegbabayayysbqshtmenxyxjxy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147859.6716688-540-3469371678437/AnsiballZ_stat.py'
Oct 11 01:57:40 compute-0 sudo[299605]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:40 compute-0 python3.9[299607]: ansible-ansible.legacy.stat Invoked with path=/etc/libvirt/virtproxyd.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:57:40 compute-0 sudo[299605]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v585: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:41 compute-0 sudo[299683]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bbdrtbcxczatyhbiutlywytrvlaiwelw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147859.6716688-540-3469371678437/AnsiballZ_file.py'
Oct 11 01:57:41 compute-0 sudo[299683]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:41 compute-0 python3.9[299685]: ansible-ansible.legacy.file Invoked with group=libvirt mode=0640 owner=libvirt dest=/etc/libvirt/virtproxyd.conf _original_basename=virtproxyd.conf recurse=False state=file path=/etc/libvirt/virtproxyd.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:57:41 compute-0 sudo[299683]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:42 compute-0 ceph-mon[191930]: pgmap v585: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:42 compute-0 sudo[299835]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pdnmnionqckrzwcuadhgvmqnewiyfcik ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147861.6065543-540-154881200848862/AnsiballZ_stat.py'
Oct 11 01:57:42 compute-0 sudo[299835]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:42 compute-0 python3.9[299837]: ansible-ansible.legacy.stat Invoked with path=/etc/libvirt/virtqemud.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:57:42 compute-0 sudo[299835]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v586: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:43 compute-0 sudo[299913]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gwtbvckqmzecvuzihkhowjwkodbgapqc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147861.6065543-540-154881200848862/AnsiballZ_file.py'
Oct 11 01:57:43 compute-0 sudo[299913]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:44 compute-0 ceph-mon[191930]: pgmap v586: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:44 compute-0 python3.9[299915]: ansible-ansible.legacy.file Invoked with group=libvirt mode=0640 owner=libvirt dest=/etc/libvirt/virtqemud.conf _original_basename=virtqemud.conf recurse=False state=file path=/etc/libvirt/virtqemud.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:57:44 compute-0 sudo[299913]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:57:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v587: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:45 compute-0 podman[299993]: 2025-10-11 01:57:45.24879954 +0000 UTC m=+0.130196640 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., com.redhat.component=ubi9-minimal-container, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=minimal rhel9, release=1755695350, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, container_name=openstack_network_exporter, name=ubi9-minimal, distribution-scope=public, url=https://catalog.redhat.com/en/search?searchType=containers, managed_by=edpm_ansible, version=9.6, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, vcs-type=git, io.buildah.version=1.33.7, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_id=edpm, vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal)
Oct 11 01:57:45 compute-0 podman[299992]: 2025-10-11 01:57:45.28920279 +0000 UTC m=+0.179081860 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:57:45 compute-0 sudo[300107]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qiodhzncykokpiphmmhigkrakgewfflw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147864.439356-540-125822976608605/AnsiballZ_stat.py'
Oct 11 01:57:45 compute-0 sudo[300107]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:45 compute-0 python3.9[300109]: ansible-ansible.legacy.stat Invoked with path=/etc/libvirt/qemu.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:57:45 compute-0 sudo[300107]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:46 compute-0 ceph-mon[191930]: pgmap v587: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:46 compute-0 sudo[300185]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-genemycdgafnersrxawrnglqazsizece ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147864.439356-540-125822976608605/AnsiballZ_file.py'
Oct 11 01:57:46 compute-0 sudo[300185]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:46 compute-0 python3.9[300187]: ansible-ansible.legacy.file Invoked with group=libvirt mode=0640 owner=libvirt dest=/etc/libvirt/qemu.conf _original_basename=qemu.conf.j2 recurse=False state=file path=/etc/libvirt/qemu.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:57:46 compute-0 sudo[300185]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v588: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:47 compute-0 sudo[300337]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-umsonsyffrhwocmduafbqivnkugptjvw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147866.9615502-540-168853109841700/AnsiballZ_stat.py'
Oct 11 01:57:47 compute-0 sudo[300337]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:47 compute-0 python3.9[300339]: ansible-ansible.legacy.stat Invoked with path=/etc/libvirt/virtsecretd.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:57:47 compute-0 sudo[300337]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:48 compute-0 ceph-mon[191930]: pgmap v588: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:48 compute-0 sudo[300415]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ddwpuqsezpjjqghowgzxmndypthsowqs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147866.9615502-540-168853109841700/AnsiballZ_file.py'
Oct 11 01:57:48 compute-0 sudo[300415]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:48 compute-0 python3.9[300417]: ansible-ansible.legacy.file Invoked with group=libvirt mode=0640 owner=libvirt dest=/etc/libvirt/virtsecretd.conf _original_basename=virtsecretd.conf recurse=False state=file path=/etc/libvirt/virtsecretd.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:57:48 compute-0 sudo[300415]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v589: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:49 compute-0 sudo[300580]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dxskdndqniotfjslrocwsdqjagtfzdcm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147868.9904363-540-228492642451343/AnsiballZ_stat.py'
Oct 11 01:57:49 compute-0 sudo[300580]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:49 compute-0 podman[300541]: 2025-10-11 01:57:49.607344197 +0000 UTC m=+0.142250129 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_ipmi)
Oct 11 01:57:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:57:49 compute-0 python3.9[300589]: ansible-ansible.legacy.stat Invoked with path=/etc/libvirt/auth.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:57:49 compute-0 sudo[300580]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:50 compute-0 ceph-mon[191930]: pgmap v589: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:50 compute-0 sudo[300666]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jvpnakysoqrfjaqzrfxafzpnxeeikunj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147868.9904363-540-228492642451343/AnsiballZ_file.py'
Oct 11 01:57:50 compute-0 sudo[300666]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:50 compute-0 python3.9[300668]: ansible-ansible.legacy.file Invoked with group=libvirt mode=0600 owner=libvirt dest=/etc/libvirt/auth.conf _original_basename=auth.conf recurse=False state=file path=/etc/libvirt/auth.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:57:50 compute-0 sudo[300666]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v590: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:51 compute-0 sudo[300818]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kzwmuenadxccwqhzktyjqttxveyaolwo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147870.9081697-540-264185048360008/AnsiballZ_stat.py'
Oct 11 01:57:51 compute-0 sudo[300818]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:51 compute-0 python3.9[300820]: ansible-ansible.legacy.stat Invoked with path=/etc/sasl2/libvirt.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:57:51 compute-0 sudo[300818]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:52 compute-0 sudo[300896]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yglcrcmcjwlktpvkrutolpmeytbljcdg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147870.9081697-540-264185048360008/AnsiballZ_file.py'
Oct 11 01:57:52 compute-0 sudo[300896]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:52 compute-0 ceph-mon[191930]: pgmap v590: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:52 compute-0 python3.9[300898]: ansible-ansible.legacy.file Invoked with group=libvirt mode=0640 owner=libvirt dest=/etc/sasl2/libvirt.conf _original_basename=sasl_libvirt.conf recurse=False state=file path=/etc/sasl2/libvirt.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:57:52 compute-0 sudo[300896]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v591: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:53 compute-0 sudo[301048]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dyhmsktsyxeboutzukwmpigrpknhygpb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147872.6707294-629-24761104060690/AnsiballZ_command.py'
Oct 11 01:57:53 compute-0 sudo[301048]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:53 compute-0 podman[301050]: 2025-10-11 01:57:53.472900392 +0000 UTC m=+0.153680639 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, org.label-schema.schema-version=1.0, container_name=ovn_metadata_agent, io.buildah.version=1.41.3)
Oct 11 01:57:53 compute-0 python3.9[301051]: ansible-ansible.legacy.command Invoked with cmd=saslpasswd2 -f /etc/libvirt/passwd.db -p -a libvirt -u openstack migration stdin=12345678 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None
Oct 11 01:57:53 compute-0 sudo[301048]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:54 compute-0 ceph-mon[191930]: pgmap v591: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:57:54.810 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 01:57:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:57:54.811 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 01:57:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:57:54.811 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 01:57:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:57:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v592: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:55 compute-0 sudo[301220]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eypceinfrppnoxcvtyaganpcpjimbbhr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147873.898104-638-268265849792777/AnsiballZ_file.py'
Oct 11 01:57:55 compute-0 sudo[301220]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:55 compute-0 python3.9[301222]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtlogd.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:57:55 compute-0 sudo[301220]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:56 compute-0 ceph-mon[191930]: pgmap v592: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:56 compute-0 sudo[301372]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-aqwvjjetuguwnfjkgrdgpxlgfsoufdbp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147875.7684152-638-219559431329665/AnsiballZ_file.py'
Oct 11 01:57:56 compute-0 sudo[301372]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:57:56
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['volumes', 'default.rgw.log', 'cephfs.cephfs.data', 'default.rgw.meta', 'default.rgw.control', 'backups', '.rgw.root', 'cephfs.cephfs.meta', '.mgr', 'vms', 'images']
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 01:57:56 compute-0 python3.9[301374]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtlogd-admin.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:57:56 compute-0 sudo[301372]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:57:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v593: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:57 compute-0 sudo[301524]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ncmxcddgmguixmkfmqlrwnfhkzmzwnet ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147877.263479-638-272939041771688/AnsiballZ_file.py'
Oct 11 01:57:57 compute-0 sudo[301524]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:58 compute-0 python3.9[301526]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtnodedevd.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:57:58 compute-0 sudo[301524]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:58 compute-0 ceph-mon[191930]: pgmap v593: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:58 compute-0 sudo[301676]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sevfbbgbaqfckpuquflhgtbklddcrmrk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147878.3018239-638-177351485586638/AnsiballZ_file.py'
Oct 11 01:57:58 compute-0 sudo[301676]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:57:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v594: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:57:59 compute-0 python3.9[301678]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtnodedevd-ro.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:57:59 compute-0 sudo[301676]: pam_unix(sudo:session): session closed for user root
Oct 11 01:57:59 compute-0 podman[157119]: time="2025-10-11T01:57:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:57:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:57:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 35732 "" "Go-http-client/1.1"
Oct 11 01:57:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:57:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 7272 "" "Go-http-client/1.1"
Oct 11 01:57:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:58:00 compute-0 podman[301802]: 2025-10-11 01:58:00.032598832 +0000 UTC m=+0.109606468 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:58:00 compute-0 sudo[301875]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-toxrpoocgdkzzvdcqtvghtygkmeufkrf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147879.4415083-638-46514110759279/AnsiballZ_file.py'
Oct 11 01:58:00 compute-0 sudo[301875]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:00 compute-0 podman[301804]: 2025-10-11 01:58:00.072036216 +0000 UTC m=+0.136168339 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, vcs-type=git, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, build-date=2024-09-18T21:23:30, com.redhat.component=ubi9-container, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9, version=9.4, io.openshift.expose-services=, summary=Provides the latest release of Red Hat Universal Base Image 9., container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, release=1214.1726694543, release-0.7.12=, io.buildah.version=1.29.0, name=ubi9, maintainer=Red Hat, Inc., architecture=x86_64)
Oct 11 01:58:00 compute-0 podman[301803]: 2025-10-11 01:58:00.136564755 +0000 UTC m=+0.202488858 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, container_name=ovn_controller, org.label-schema.license=GPLv2, tcib_managed=true, config_id=ovn_controller)
Oct 11 01:58:00 compute-0 ceph-mon[191930]: pgmap v594: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:00 compute-0 python3.9[301890]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtnodedevd-admin.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:00 compute-0 sudo[301875]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v595: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:01 compute-0 podman[302004]: 2025-10-11 01:58:01.266040941 +0000 UTC m=+0.151615175 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0)
Oct 11 01:58:01 compute-0 sudo[302067]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hfgmwthrznuzbbhpjpkacnpvcjliblct ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147880.7393577-638-269482561655354/AnsiballZ_file.py'
Oct 11 01:58:01 compute-0 sudo[302067]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:01 compute-0 openstack_network_exporter[159265]: ERROR   01:58:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:58:01 compute-0 openstack_network_exporter[159265]: ERROR   01:58:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:58:01 compute-0 openstack_network_exporter[159265]: ERROR   01:58:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:58:01 compute-0 openstack_network_exporter[159265]: ERROR   01:58:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:58:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:58:01 compute-0 openstack_network_exporter[159265]: ERROR   01:58:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:58:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:58:01 compute-0 python3.9[302069]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtproxyd.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:01 compute-0 sudo[302067]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:02 compute-0 ceph-mon[191930]: pgmap v595: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:02 compute-0 sudo[302219]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-byxgcuqfrcofshfqcymmnmdooxustuev ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147881.8693578-638-163069254185060/AnsiballZ_file.py'
Oct 11 01:58:02 compute-0 sudo[302219]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:02 compute-0 python3.9[302221]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtproxyd-ro.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:02 compute-0 sudo[302219]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v596: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:03 compute-0 sudo[302371]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fgzwnclxbzcqoxfeaiwicdfpqolsstsc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147882.9813025-638-138963360079508/AnsiballZ_file.py'
Oct 11 01:58:03 compute-0 sudo[302371]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:03 compute-0 python3.9[302373]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtproxyd-admin.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:03 compute-0 sudo[302371]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:04 compute-0 ceph-mon[191930]: pgmap v596: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:04 compute-0 sudo[302523]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ituukiwwjywvzhpbpjkihmviglumzouj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147884.1049128-638-57248500048795/AnsiballZ_file.py'
Oct 11 01:58:04 compute-0 sudo[302523]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:58:04 compute-0 python3.9[302525]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtqemud.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:04 compute-0 sudo[302523]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v597: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:05 compute-0 sudo[302675]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vvipezvcjgeyerxqoaecwnbgkjdvhqxs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147885.2045715-638-220216655605487/AnsiballZ_file.py'
Oct 11 01:58:05 compute-0 sudo[302675]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:06 compute-0 python3.9[302677]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtqemud-ro.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:06 compute-0 sudo[302675]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:06 compute-0 ceph-mon[191930]: pgmap v597: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 01:58:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v598: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:07 compute-0 sudo[302827]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yxlvrnzrguwgmlrcusmxkueyijcxidil ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147886.2875447-638-117149447983013/AnsiballZ_file.py'
Oct 11 01:58:07 compute-0 sudo[302827]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:07 compute-0 python3.9[302829]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtqemud-admin.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:07 compute-0 sudo[302827]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:08 compute-0 ceph-mon[191930]: pgmap v598: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:08 compute-0 sudo[302924]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:58:08 compute-0 sudo[302924]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:58:08 compute-0 sudo[302924]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:08 compute-0 sudo[302954]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:58:08 compute-0 sudo[302954]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:58:08 compute-0 sudo[302954]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:08 compute-0 sudo[302979]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:58:08 compute-0 sudo[302979]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:58:08 compute-0 sudo[302979]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:08 compute-0 sudo[303004]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 01:58:08 compute-0 sudo[303004]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:58:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v599: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:09 compute-0 sudo[303092]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fhdhlmtuitswxtvcxhktkuwszdmpqesd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147888.2151427-638-166958178729378/AnsiballZ_file.py'
Oct 11 01:58:09 compute-0 sudo[303092]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:09 compute-0 python3.9[303094]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtsecretd.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:09 compute-0 sudo[303092]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:09 compute-0 sudo[303004]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:58:09 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:58:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:58:09 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:58:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:58:09 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:58:09 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 8109509e-2ab7-4015-a00a-e42879c92469 does not exist
Oct 11 01:58:09 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev fc759b36-b39d-4c81-8779-fe59f64a55fb does not exist
Oct 11 01:58:09 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 0d5f72d4-1d99-4721-8f2e-b1e088ef1611 does not exist
Oct 11 01:58:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 01:58:09 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:58:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 01:58:09 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:58:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:58:09 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:58:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:58:09 compute-0 sudo[303154]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:58:09 compute-0 sudo[303154]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:58:09 compute-0 sudo[303154]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:10 compute-0 sudo[303207]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:58:10 compute-0 sudo[303207]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:58:10 compute-0 sudo[303207]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:10 compute-0 sudo[303261]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:58:10 compute-0 sudo[303261]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:58:10 compute-0 sudo[303261]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:10 compute-0 sudo[303310]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 01:58:10 compute-0 sudo[303310]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:58:10 compute-0 sudo[303359]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qsqdfjgxeqhwsjrkbzvhskvcektyhead ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147889.790473-638-24081798692487/AnsiballZ_file.py'
Oct 11 01:58:10 compute-0 sudo[303359]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:10 compute-0 ceph-mon[191930]: pgmap v599: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:10 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:58:10 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:58:10 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:58:10 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:58:10 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:58:10 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:58:10 compute-0 python3.9[303363]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtsecretd-ro.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:10 compute-0 sudo[303359]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:10 compute-0 podman[303434]: 2025-10-11 01:58:10.912532104 +0000 UTC m=+0.107753637 container create 98ef8c1a4742862bf0f34d5574dc011e0813b0da8cba55066c5a7d2723327ead (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_sammet, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 01:58:10 compute-0 podman[303434]: 2025-10-11 01:58:10.873311494 +0000 UTC m=+0.068533077 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:58:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v600: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:10 compute-0 systemd[1]: Started libpod-conmon-98ef8c1a4742862bf0f34d5574dc011e0813b0da8cba55066c5a7d2723327ead.scope.
Oct 11 01:58:11 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:58:11 compute-0 podman[303434]: 2025-10-11 01:58:11.062715384 +0000 UTC m=+0.257936887 container init 98ef8c1a4742862bf0f34d5574dc011e0813b0da8cba55066c5a7d2723327ead (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_sammet, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507)
Oct 11 01:58:11 compute-0 podman[303434]: 2025-10-11 01:58:11.081083449 +0000 UTC m=+0.276304942 container start 98ef8c1a4742862bf0f34d5574dc011e0813b0da8cba55066c5a7d2723327ead (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_sammet, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:58:11 compute-0 podman[303434]: 2025-10-11 01:58:11.086941126 +0000 UTC m=+0.282162649 container attach 98ef8c1a4742862bf0f34d5574dc011e0813b0da8cba55066c5a7d2723327ead (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_sammet, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:58:11 compute-0 keen_sammet[303495]: 167 167
Oct 11 01:58:11 compute-0 systemd[1]: libpod-98ef8c1a4742862bf0f34d5574dc011e0813b0da8cba55066c5a7d2723327ead.scope: Deactivated successfully.
Oct 11 01:58:11 compute-0 podman[303434]: 2025-10-11 01:58:11.094164826 +0000 UTC m=+0.289386309 container died 98ef8c1a4742862bf0f34d5574dc011e0813b0da8cba55066c5a7d2723327ead (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_sammet, ceph=True, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0)
Oct 11 01:58:11 compute-0 systemd[1]: var-lib-containers-storage-overlay-7843e26ca5900948ff1bc783ea9cc0c0c5a8a46718f267d4a2b0058b492959df-merged.mount: Deactivated successfully.
Oct 11 01:58:11 compute-0 podman[303434]: 2025-10-11 01:58:11.175479654 +0000 UTC m=+0.370701177 container remove 98ef8c1a4742862bf0f34d5574dc011e0813b0da8cba55066c5a7d2723327ead (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_sammet, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0)
Oct 11 01:58:11 compute-0 systemd[1]: libpod-conmon-98ef8c1a4742862bf0f34d5574dc011e0813b0da8cba55066c5a7d2723327ead.scope: Deactivated successfully.
Oct 11 01:58:11 compute-0 sudo[303589]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xqcrmamyuqlraxykqhjaybxbjfnldzjd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147890.8414743-638-15546258065410/AnsiballZ_file.py'
Oct 11 01:58:11 compute-0 sudo[303589]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:11 compute-0 podman[303592]: 2025-10-11 01:58:11.463744043 +0000 UTC m=+0.080904473 container create 27e22e02b8308cc6a15f4b95db4198e01d354713c454eb625ca77ba0e7864919 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=confident_cannon, io.buildah.version=1.39.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:58:11 compute-0 podman[303592]: 2025-10-11 01:58:11.436358549 +0000 UTC m=+0.053518989 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:58:11 compute-0 systemd[1]: Started libpod-conmon-27e22e02b8308cc6a15f4b95db4198e01d354713c454eb625ca77ba0e7864919.scope.
Oct 11 01:58:11 compute-0 python3.9[303595]: ansible-ansible.builtin.file Invoked with group=root mode=0755 owner=root path=/etc/systemd/system/virtsecretd-admin.socket.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:11 compute-0 sudo[303589]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:11 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:58:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/550de30725c67f6afdf198d1b324e1233d8c8a146bc65e356257be2ffd68759d/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:58:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/550de30725c67f6afdf198d1b324e1233d8c8a146bc65e356257be2ffd68759d/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:58:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/550de30725c67f6afdf198d1b324e1233d8c8a146bc65e356257be2ffd68759d/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:58:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/550de30725c67f6afdf198d1b324e1233d8c8a146bc65e356257be2ffd68759d/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:58:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/550de30725c67f6afdf198d1b324e1233d8c8a146bc65e356257be2ffd68759d/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:58:11 compute-0 podman[303592]: 2025-10-11 01:58:11.672570515 +0000 UTC m=+0.289730995 container init 27e22e02b8308cc6a15f4b95db4198e01d354713c454eb625ca77ba0e7864919 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=confident_cannon, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 01:58:11 compute-0 podman[303592]: 2025-10-11 01:58:11.693457001 +0000 UTC m=+0.310617441 container start 27e22e02b8308cc6a15f4b95db4198e01d354713c454eb625ca77ba0e7864919 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=confident_cannon, ceph=True, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 01:58:11 compute-0 podman[303592]: 2025-10-11 01:58:11.700342605 +0000 UTC m=+0.317503095 container attach 27e22e02b8308cc6a15f4b95db4198e01d354713c454eb625ca77ba0e7864919 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=confident_cannon, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:58:12 compute-0 ceph-mon[191930]: pgmap v600: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:12 compute-0 sudo[303768]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pqgbdufdpzytpqfzbdszvwtxbcctjsxf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147891.90377-737-41217240199162/AnsiballZ_stat.py'
Oct 11 01:58:12 compute-0 sudo[303768]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:12 compute-0 python3.9[303772]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtlogd.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:58:12 compute-0 sudo[303768]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:12 compute-0 confident_cannon[303610]: --> passed data devices: 0 physical, 3 LVM
Oct 11 01:58:12 compute-0 confident_cannon[303610]: --> relative data size: 1.0
Oct 11 01:58:12 compute-0 confident_cannon[303610]: --> All data devices are unavailable
Oct 11 01:58:12 compute-0 systemd[1]: libpod-27e22e02b8308cc6a15f4b95db4198e01d354713c454eb625ca77ba0e7864919.scope: Deactivated successfully.
Oct 11 01:58:12 compute-0 systemd[1]: libpod-27e22e02b8308cc6a15f4b95db4198e01d354713c454eb625ca77ba0e7864919.scope: Consumed 1.186s CPU time.
Oct 11 01:58:12 compute-0 podman[303592]: 2025-10-11 01:58:12.938105026 +0000 UTC m=+1.555265436 container died 27e22e02b8308cc6a15f4b95db4198e01d354713c454eb625ca77ba0e7864919 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=confident_cannon, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.build-date=20250507)
Oct 11 01:58:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v601: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:12 compute-0 systemd[1]: var-lib-containers-storage-overlay-550de30725c67f6afdf198d1b324e1233d8c8a146bc65e356257be2ffd68759d-merged.mount: Deactivated successfully.
Oct 11 01:58:13 compute-0 podman[303592]: 2025-10-11 01:58:13.050783194 +0000 UTC m=+1.667943604 container remove 27e22e02b8308cc6a15f4b95db4198e01d354713c454eb625ca77ba0e7864919 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=confident_cannon, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:58:13 compute-0 systemd[1]: libpod-conmon-27e22e02b8308cc6a15f4b95db4198e01d354713c454eb625ca77ba0e7864919.scope: Deactivated successfully.
Oct 11 01:58:13 compute-0 sudo[303310]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:13 compute-0 sudo[303892]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vxjsuceqiuxavsibskteldofdsquglia ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147891.90377-737-41217240199162/AnsiballZ_file.py'
Oct 11 01:58:13 compute-0 sudo[303892]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:13 compute-0 sudo[303869]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:58:13 compute-0 sudo[303869]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:58:13 compute-0 sudo[303869]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:13 compute-0 sudo[303907]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:58:13 compute-0 sudo[303907]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:58:13 compute-0 sudo[303907]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:13 compute-0 python3.9[303904]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/virtlogd.socket.d/override.conf _original_basename=libvirt-socket.unit.j2 recurse=False state=file path=/etc/systemd/system/virtlogd.socket.d/override.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:13 compute-0 sudo[303932]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:58:13 compute-0 sudo[303932]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:58:13 compute-0 sudo[303892]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:13 compute-0 sudo[303932]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:13 compute-0 sudo[303957]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 01:58:13 compute-0 sudo[303957]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:58:14 compute-0 podman[304120]: 2025-10-11 01:58:14.11101441 +0000 UTC m=+0.060305441 container create a598562c78a5126dfafaffa178f6491d6392c511f52c5900c322c27252baed15 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_bardeen, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default)
Oct 11 01:58:14 compute-0 systemd[1]: Started libpod-conmon-a598562c78a5126dfafaffa178f6491d6392c511f52c5900c322c27252baed15.scope.
Oct 11 01:58:14 compute-0 podman[304120]: 2025-10-11 01:58:14.08871746 +0000 UTC m=+0.038008511 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:58:14 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:58:14 compute-0 sudo[304189]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zmjffrfdabydpefdpgbaynkrmmbihkem ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147893.7996898-737-123613554448323/AnsiballZ_stat.py'
Oct 11 01:58:14 compute-0 sudo[304189]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:14 compute-0 podman[304120]: 2025-10-11 01:58:14.244742287 +0000 UTC m=+0.194033358 container init a598562c78a5126dfafaffa178f6491d6392c511f52c5900c322c27252baed15 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_bardeen, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, ceph=True, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3)
Oct 11 01:58:14 compute-0 podman[304120]: 2025-10-11 01:58:14.25876042 +0000 UTC m=+0.208051451 container start a598562c78a5126dfafaffa178f6491d6392c511f52c5900c322c27252baed15 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_bardeen, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True)
Oct 11 01:58:14 compute-0 podman[304120]: 2025-10-11 01:58:14.262895158 +0000 UTC m=+0.212186219 container attach a598562c78a5126dfafaffa178f6491d6392c511f52c5900c322c27252baed15 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_bardeen, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:58:14 compute-0 gracious_bardeen[304180]: 167 167
Oct 11 01:58:14 compute-0 systemd[1]: libpod-a598562c78a5126dfafaffa178f6491d6392c511f52c5900c322c27252baed15.scope: Deactivated successfully.
Oct 11 01:58:14 compute-0 conmon[304180]: conmon a598562c78a5126dfafa <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-a598562c78a5126dfafaffa178f6491d6392c511f52c5900c322c27252baed15.scope/container/memory.events
Oct 11 01:58:14 compute-0 podman[304120]: 2025-10-11 01:58:14.268002453 +0000 UTC m=+0.217293484 container died a598562c78a5126dfafaffa178f6491d6392c511f52c5900c322c27252baed15 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_bardeen, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:58:14 compute-0 systemd[1]: var-lib-containers-storage-overlay-ae5b3e10e94f516af3f82c5502106b980c09ad1c5844eded5416539540c45a1c-merged.mount: Deactivated successfully.
Oct 11 01:58:14 compute-0 podman[304120]: 2025-10-11 01:58:14.321068522 +0000 UTC m=+0.270359553 container remove a598562c78a5126dfafaffa178f6491d6392c511f52c5900c322c27252baed15 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_bardeen, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_REF=reef, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:58:14 compute-0 systemd[1]: libpod-conmon-a598562c78a5126dfafaffa178f6491d6392c511f52c5900c322c27252baed15.scope: Deactivated successfully.
Oct 11 01:58:14 compute-0 ceph-mon[191930]: pgmap v601: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:14 compute-0 python3.9[304191]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtlogd-admin.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:58:14 compute-0 sudo[304189]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:14 compute-0 podman[304212]: 2025-10-11 01:58:14.544410535 +0000 UTC m=+0.073255875 container create 2843a94b8733100a55e8e50850fecaaa73a394ab390e0e4bb849f41a3bd50fff (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=practical_kepler, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True)
Oct 11 01:58:14 compute-0 systemd[1]: Started libpod-conmon-2843a94b8733100a55e8e50850fecaaa73a394ab390e0e4bb849f41a3bd50fff.scope.
Oct 11 01:58:14 compute-0 podman[304212]: 2025-10-11 01:58:14.514714823 +0000 UTC m=+0.043560193 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:58:14 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:58:14 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d01b74521a6fff4f0705d597a10078f36944d65285c0ad7ba45124f242959ecf/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:58:14 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d01b74521a6fff4f0705d597a10078f36944d65285c0ad7ba45124f242959ecf/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:58:14 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d01b74521a6fff4f0705d597a10078f36944d65285c0ad7ba45124f242959ecf/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:58:14 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d01b74521a6fff4f0705d597a10078f36944d65285c0ad7ba45124f242959ecf/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:58:14 compute-0 podman[304212]: 2025-10-11 01:58:14.706167857 +0000 UTC m=+0.235013257 container init 2843a94b8733100a55e8e50850fecaaa73a394ab390e0e4bb849f41a3bd50fff (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=practical_kepler, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, io.buildah.version=1.39.3)
Oct 11 01:58:14 compute-0 podman[304212]: 2025-10-11 01:58:14.727033083 +0000 UTC m=+0.255878423 container start 2843a94b8733100a55e8e50850fecaaa73a394ab390e0e4bb849f41a3bd50fff (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=practical_kepler, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:58:14 compute-0 podman[304212]: 2025-10-11 01:58:14.733201725 +0000 UTC m=+0.262047095 container attach 2843a94b8733100a55e8e50850fecaaa73a394ab390e0e4bb849f41a3bd50fff (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=practical_kepler, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:58:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:58:14 compute-0 sudo[304308]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lkntakveifzkyywdzovpfspwgarhfmcw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147893.7996898-737-123613554448323/AnsiballZ_file.py'
Oct 11 01:58:14 compute-0 sudo[304308]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v602: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:15 compute-0 python3.9[304310]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/virtlogd-admin.socket.d/override.conf _original_basename=libvirt-socket.unit.j2 recurse=False state=file path=/etc/systemd/system/virtlogd-admin.socket.d/override.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:15 compute-0 sudo[304308]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:15 compute-0 practical_kepler[304250]: {
Oct 11 01:58:15 compute-0 practical_kepler[304250]:     "0": [
Oct 11 01:58:15 compute-0 practical_kepler[304250]:         {
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "devices": [
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "/dev/loop3"
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             ],
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "lv_name": "ceph_lv0",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "lv_size": "21470642176",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "name": "ceph_lv0",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "tags": {
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.cluster_name": "ceph",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.crush_device_class": "",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.encrypted": "0",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.osd_id": "0",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.type": "block",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.vdo": "0"
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             },
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "type": "block",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "vg_name": "ceph_vg0"
Oct 11 01:58:15 compute-0 practical_kepler[304250]:         }
Oct 11 01:58:15 compute-0 practical_kepler[304250]:     ],
Oct 11 01:58:15 compute-0 practical_kepler[304250]:     "1": [
Oct 11 01:58:15 compute-0 practical_kepler[304250]:         {
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "devices": [
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "/dev/loop4"
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             ],
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "lv_name": "ceph_lv1",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "lv_size": "21470642176",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "name": "ceph_lv1",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "tags": {
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.cluster_name": "ceph",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.crush_device_class": "",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.encrypted": "0",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.osd_id": "1",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.type": "block",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.vdo": "0"
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             },
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "type": "block",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "vg_name": "ceph_vg1"
Oct 11 01:58:15 compute-0 practical_kepler[304250]:         }
Oct 11 01:58:15 compute-0 practical_kepler[304250]:     ],
Oct 11 01:58:15 compute-0 practical_kepler[304250]:     "2": [
Oct 11 01:58:15 compute-0 practical_kepler[304250]:         {
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "devices": [
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "/dev/loop5"
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             ],
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "lv_name": "ceph_lv2",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "lv_size": "21470642176",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "name": "ceph_lv2",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "tags": {
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.cluster_name": "ceph",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.crush_device_class": "",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.encrypted": "0",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.osd_id": "2",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.type": "block",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:                 "ceph.vdo": "0"
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             },
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "type": "block",
Oct 11 01:58:15 compute-0 practical_kepler[304250]:             "vg_name": "ceph_vg2"
Oct 11 01:58:15 compute-0 practical_kepler[304250]:         }
Oct 11 01:58:15 compute-0 practical_kepler[304250]:     ]
Oct 11 01:58:15 compute-0 practical_kepler[304250]: }
Oct 11 01:58:15 compute-0 systemd[1]: libpod-2843a94b8733100a55e8e50850fecaaa73a394ab390e0e4bb849f41a3bd50fff.scope: Deactivated successfully.
Oct 11 01:58:15 compute-0 podman[304212]: 2025-10-11 01:58:15.667994793 +0000 UTC m=+1.196840143 container died 2843a94b8733100a55e8e50850fecaaa73a394ab390e0e4bb849f41a3bd50fff (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=practical_kepler, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:58:15 compute-0 systemd[1]: var-lib-containers-storage-overlay-d01b74521a6fff4f0705d597a10078f36944d65285c0ad7ba45124f242959ecf-merged.mount: Deactivated successfully.
Oct 11 01:58:15 compute-0 podman[304212]: 2025-10-11 01:58:15.764118826 +0000 UTC m=+1.292964166 container remove 2843a94b8733100a55e8e50850fecaaa73a394ab390e0e4bb849f41a3bd50fff (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=practical_kepler, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, OSD_FLAVOR=default)
Oct 11 01:58:15 compute-0 systemd[1]: libpod-conmon-2843a94b8733100a55e8e50850fecaaa73a394ab390e0e4bb849f41a3bd50fff.scope: Deactivated successfully.
Oct 11 01:58:15 compute-0 sudo[303957]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:15 compute-0 podman[304410]: 2025-10-11 01:58:15.838621932 +0000 UTC m=+0.106545418 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 01:58:15 compute-0 podman[304421]: 2025-10-11 01:58:15.85180091 +0000 UTC m=+0.117971377 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, url=https://catalog.redhat.com/en/search?searchType=containers, version=9.6, build-date=2025-08-20T13:12:41, com.redhat.component=ubi9-minimal-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=openstack_network_exporter, release=1755695350, io.openshift.expose-services=, managed_by=edpm_ansible, io.openshift.tags=minimal rhel9, vcs-type=git, vendor=Red Hat, Inc., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, maintainer=Red Hat, Inc., name=ubi9-minimal, config_id=edpm, io.buildah.version=1.33.7, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, distribution-scope=public, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9.)
Oct 11 01:58:15 compute-0 sudo[304487]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:58:15 compute-0 sudo[304487]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:58:15 compute-0 sudo[304487]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:15 compute-0 sudo[304551]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zcwjrevrxknsnwdpkcaknygyswrbtmss ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147895.439212-737-211111438809735/AnsiballZ_stat.py'
Oct 11 01:58:15 compute-0 sudo[304551]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:16 compute-0 sudo[304544]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:58:16 compute-0 sudo[304544]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:58:16 compute-0 sudo[304544]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:16 compute-0 sudo[304574]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:58:16 compute-0 sudo[304574]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:58:16 compute-0 sudo[304574]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:16 compute-0 python3.9[304566]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtnodedevd.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:58:16 compute-0 sudo[304551]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:16 compute-0 sudo[304599]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 01:58:16 compute-0 sudo[304599]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:58:16 compute-0 ceph-mon[191930]: pgmap v602: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:16 compute-0 sudo[304725]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wtgwxnknsqsyxgvwomgiwdvrqkcebwqg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147895.439212-737-211111438809735/AnsiballZ_file.py'
Oct 11 01:58:16 compute-0 sudo[304725]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:16 compute-0 podman[304739]: 2025-10-11 01:58:16.814637953 +0000 UTC m=+0.088890355 container create 485865e005ee373f35f1d94e98c626a06c74b0b753410d22fbec8f42d5d70b59 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=tender_allen, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 01:58:16 compute-0 podman[304739]: 2025-10-11 01:58:16.774752362 +0000 UTC m=+0.049004874 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:58:16 compute-0 systemd[1]: Started libpod-conmon-485865e005ee373f35f1d94e98c626a06c74b0b753410d22fbec8f42d5d70b59.scope.
Oct 11 01:58:16 compute-0 python3.9[304736]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/virtnodedevd.socket.d/override.conf _original_basename=libvirt-socket.unit.j2 recurse=False state=file path=/etc/systemd/system/virtnodedevd.socket.d/override.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:16 compute-0 sudo[304725]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:16 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:58:16 compute-0 podman[304739]: 2025-10-11 01:58:16.949181083 +0000 UTC m=+0.223433525 container init 485865e005ee373f35f1d94e98c626a06c74b0b753410d22fbec8f42d5d70b59 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=tender_allen, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:58:16 compute-0 podman[304739]: 2025-10-11 01:58:16.959338152 +0000 UTC m=+0.233590544 container start 485865e005ee373f35f1d94e98c626a06c74b0b753410d22fbec8f42d5d70b59 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=tender_allen, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:58:16 compute-0 podman[304739]: 2025-10-11 01:58:16.964573549 +0000 UTC m=+0.238826241 container attach 485865e005ee373f35f1d94e98c626a06c74b0b753410d22fbec8f42d5d70b59 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=tender_allen, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 01:58:16 compute-0 tender_allen[304754]: 167 167
Oct 11 01:58:16 compute-0 systemd[1]: libpod-485865e005ee373f35f1d94e98c626a06c74b0b753410d22fbec8f42d5d70b59.scope: Deactivated successfully.
Oct 11 01:58:16 compute-0 podman[304739]: 2025-10-11 01:58:16.974090556 +0000 UTC m=+0.248342978 container died 485865e005ee373f35f1d94e98c626a06c74b0b753410d22fbec8f42d5d70b59 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=tender_allen, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:58:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v603: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:17 compute-0 systemd[1]: var-lib-containers-storage-overlay-a02f46015303e1948c2bd398e93ad446d29c3f48000646d85a6776fa542de08d-merged.mount: Deactivated successfully.
Oct 11 01:58:17 compute-0 podman[304739]: 2025-10-11 01:58:17.046551048 +0000 UTC m=+0.320803450 container remove 485865e005ee373f35f1d94e98c626a06c74b0b753410d22fbec8f42d5d70b59 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=tender_allen, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef)
Oct 11 01:58:17 compute-0 systemd[1]: libpod-conmon-485865e005ee373f35f1d94e98c626a06c74b0b753410d22fbec8f42d5d70b59.scope: Deactivated successfully.
Oct 11 01:58:17 compute-0 podman[304814]: 2025-10-11 01:58:17.308752605 +0000 UTC m=+0.066568615 container create 2f0d784512589cff47e1ee4e3eedeb2b77be756b60804266c374fc0d45e562d6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stupefied_galois, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3)
Oct 11 01:58:17 compute-0 podman[304814]: 2025-10-11 01:58:17.278788188 +0000 UTC m=+0.036604288 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:58:17 compute-0 systemd[1]: Started libpod-conmon-2f0d784512589cff47e1ee4e3eedeb2b77be756b60804266c374fc0d45e562d6.scope.
Oct 11 01:58:17 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:58:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e32aab09973cab9656c740ab0f1865da6d970c91ea92bc1a1517c547c5654c64/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:58:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e32aab09973cab9656c740ab0f1865da6d970c91ea92bc1a1517c547c5654c64/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:58:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e32aab09973cab9656c740ab0f1865da6d970c91ea92bc1a1517c547c5654c64/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:58:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e32aab09973cab9656c740ab0f1865da6d970c91ea92bc1a1517c547c5654c64/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:58:17 compute-0 podman[304814]: 2025-10-11 01:58:17.517709848 +0000 UTC m=+0.275525898 container init 2f0d784512589cff47e1ee4e3eedeb2b77be756b60804266c374fc0d45e562d6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stupefied_galois, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, ceph=True, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:58:17 compute-0 podman[304814]: 2025-10-11 01:58:17.537838802 +0000 UTC m=+0.295654832 container start 2f0d784512589cff47e1ee4e3eedeb2b77be756b60804266c374fc0d45e562d6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stupefied_galois, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default)
Oct 11 01:58:17 compute-0 podman[304814]: 2025-10-11 01:58:17.546969973 +0000 UTC m=+0.304786083 container attach 2f0d784512589cff47e1ee4e3eedeb2b77be756b60804266c374fc0d45e562d6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stupefied_galois, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:58:18 compute-0 ceph-mon[191930]: pgmap v603: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:18 compute-0 stupefied_galois[304869]: {
Oct 11 01:58:18 compute-0 sudo[304973]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-piyhvaxkunclqxmsfkykovrxhgllmtkp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147897.267393-737-67413425744528/AnsiballZ_stat.py'
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:         "osd_id": 1,
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:         "type": "bluestore"
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:     },
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:         "osd_id": 2,
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:         "type": "bluestore"
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:     },
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:         "osd_id": 0,
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:         "type": "bluestore"
Oct 11 01:58:18 compute-0 stupefied_galois[304869]:     }
Oct 11 01:58:18 compute-0 stupefied_galois[304869]: }
Oct 11 01:58:18 compute-0 sudo[304973]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:18 compute-0 systemd[1]: libpod-2f0d784512589cff47e1ee4e3eedeb2b77be756b60804266c374fc0d45e562d6.scope: Deactivated successfully.
Oct 11 01:58:18 compute-0 systemd[1]: libpod-2f0d784512589cff47e1ee4e3eedeb2b77be756b60804266c374fc0d45e562d6.scope: Consumed 1.158s CPU time.
Oct 11 01:58:18 compute-0 podman[304814]: 2025-10-11 01:58:18.698803349 +0000 UTC m=+1.456619429 container died 2f0d784512589cff47e1ee4e3eedeb2b77be756b60804266c374fc0d45e562d6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stupefied_galois, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_REF=reef)
Oct 11 01:58:18 compute-0 systemd[1]: var-lib-containers-storage-overlay-e32aab09973cab9656c740ab0f1865da6d970c91ea92bc1a1517c547c5654c64-merged.mount: Deactivated successfully.
Oct 11 01:58:18 compute-0 podman[304814]: 2025-10-11 01:58:18.814902784 +0000 UTC m=+1.572718794 container remove 2f0d784512589cff47e1ee4e3eedeb2b77be756b60804266c374fc0d45e562d6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stupefied_galois, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef)
Oct 11 01:58:18 compute-0 systemd[1]: libpod-conmon-2f0d784512589cff47e1ee4e3eedeb2b77be756b60804266c374fc0d45e562d6.scope: Deactivated successfully.
Oct 11 01:58:18 compute-0 sudo[304599]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:58:18 compute-0 python3.9[304977]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtnodedevd-ro.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:58:18 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:58:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:58:18 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:58:18 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 43b90c51-5486-45fb-9ffc-2b4f3a5fc669 does not exist
Oct 11 01:58:18 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev fa1f8c56-4e7d-4858-b51e-8ad7c2ce1fbc does not exist
Oct 11 01:58:18 compute-0 sudo[304973]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v604: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:19 compute-0 sudo[304992]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:58:19 compute-0 sudo[304992]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:58:19 compute-0 sudo[304992]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:19 compute-0 sudo[305026]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:58:19 compute-0 sudo[305026]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:58:19 compute-0 sudo[305026]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:19 compute-0 sudo[305116]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hkmjupfssvgovvgffffldxpjnvuojnxx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147897.267393-737-67413425744528/AnsiballZ_file.py'
Oct 11 01:58:19 compute-0 sudo[305116]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:19 compute-0 python3.9[305118]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/virtnodedevd-ro.socket.d/override.conf _original_basename=libvirt-socket.unit.j2 recurse=False state=file path=/etc/systemd/system/virtnodedevd-ro.socket.d/override.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:19 compute-0 sudo[305116]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:58:19 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:58:19 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:58:19 compute-0 ceph-mon[191930]: pgmap v604: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:20 compute-0 podman[305196]: 2025-10-11 01:58:20.257027952 +0000 UTC m=+0.134706794 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 01:58:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v605: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:21 compute-0 sudo[305289]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tccrbgdcpsgoisgqytqdaacxvqpofjom ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147899.8870065-737-48872949978387/AnsiballZ_stat.py'
Oct 11 01:58:21 compute-0 sudo[305289]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:21 compute-0 python3.9[305291]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtnodedevd-admin.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:58:21 compute-0 sudo[305289]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:21 compute-0 sudo[305367]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mpjsqtjljxktnsejdiwhjybupouxcrhh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147899.8870065-737-48872949978387/AnsiballZ_file.py'
Oct 11 01:58:21 compute-0 sudo[305367]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:22 compute-0 ceph-mon[191930]: pgmap v605: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:22 compute-0 python3.9[305369]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/virtnodedevd-admin.socket.d/override.conf _original_basename=libvirt-socket.unit.j2 recurse=False state=file path=/etc/systemd/system/virtnodedevd-admin.socket.d/override.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:22 compute-0 sudo[305367]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:22 compute-0 sudo[305519]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-onulenffoqribibmcpjndzbscxpjuruk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147902.3579724-737-30863288264955/AnsiballZ_stat.py'
Oct 11 01:58:22 compute-0 sudo[305519]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v606: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:23 compute-0 python3.9[305521]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtproxyd.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:58:23 compute-0 sudo[305519]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:23 compute-0 sudo[305612]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jasuzdyvwnbxdobukylkwgtztxbdegui ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147902.3579724-737-30863288264955/AnsiballZ_file.py'
Oct 11 01:58:23 compute-0 podman[305571]: 2025-10-11 01:58:23.736597248 +0000 UTC m=+0.129192463 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, container_name=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:58:23 compute-0 sudo[305612]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:23 compute-0 python3.9[305617]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/virtproxyd.socket.d/override.conf _original_basename=libvirt-socket.unit.j2 recurse=False state=file path=/etc/systemd/system/virtproxyd.socket.d/override.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:24 compute-0 sudo[305612]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:24 compute-0 ceph-mon[191930]: pgmap v606: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:24 compute-0 sudo[305767]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xafuvrkdhngibjqdunxnigvyegjustvk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147904.2360032-737-231758465897980/AnsiballZ_stat.py'
Oct 11 01:58:24 compute-0 sudo[305767]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:58:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v607: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:25 compute-0 python3.9[305769]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtproxyd-ro.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:58:25 compute-0 sudo[305767]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:25 compute-0 sudo[305845]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rlrxwpchbwzrqjatyfdxwjvduxobdruh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147904.2360032-737-231758465897980/AnsiballZ_file.py'
Oct 11 01:58:25 compute-0 sudo[305845]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:25 compute-0 python3.9[305847]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/virtproxyd-ro.socket.d/override.conf _original_basename=libvirt-socket.unit.j2 recurse=False state=file path=/etc/systemd/system/virtproxyd-ro.socket.d/override.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:25 compute-0 sudo[305845]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:26 compute-0 ceph-mon[191930]: pgmap v607: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:58:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:58:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:58:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:58:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:58:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:58:26 compute-0 sudo[305997]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vomfxkjonknxyvjjtwgktnpllgzxzpgd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147906.0761476-737-109731845545963/AnsiballZ_stat.py'
Oct 11 01:58:26 compute-0 sudo[305997]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:26 compute-0 python3.9[305999]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtproxyd-admin.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:58:26 compute-0 sudo[305997]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v608: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:27 compute-0 sudo[306075]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xhsbqevmtwegwokildevtnwqijamfsiw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147906.0761476-737-109731845545963/AnsiballZ_file.py'
Oct 11 01:58:27 compute-0 sudo[306075]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:27 compute-0 python3.9[306077]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/virtproxyd-admin.socket.d/override.conf _original_basename=libvirt-socket.unit.j2 recurse=False state=file path=/etc/systemd/system/virtproxyd-admin.socket.d/override.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:27 compute-0 sudo[306075]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:28 compute-0 ceph-mon[191930]: pgmap v608: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:28 compute-0 sudo[306227]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hkwizxqbxnvzixrsrdwdrqizcwanxpws ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147907.7456806-737-51664753624173/AnsiballZ_stat.py'
Oct 11 01:58:28 compute-0 sudo[306227]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:28 compute-0 python3.9[306229]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtqemud.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:58:28 compute-0 sudo[306227]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v609: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:29 compute-0 sudo[306305]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ofmvxinbtyuiokxhsaexjmdnociwvjch ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147907.7456806-737-51664753624173/AnsiballZ_file.py'
Oct 11 01:58:29 compute-0 sudo[306305]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:29 compute-0 python3.9[306307]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/virtqemud.socket.d/override.conf _original_basename=libvirt-socket.unit.j2 recurse=False state=file path=/etc/systemd/system/virtqemud.socket.d/override.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:29 compute-0 sudo[306305]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:29 compute-0 podman[157119]: time="2025-10-11T01:58:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:58:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:58:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 35732 "" "Go-http-client/1.1"
Oct 11 01:58:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:58:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 7273 "" "Go-http-client/1.1"
Oct 11 01:58:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:58:30 compute-0 ceph-mon[191930]: pgmap v609: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:30 compute-0 podman[306431]: 2025-10-11 01:58:30.230785821 +0000 UTC m=+0.124733899 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 01:58:30 compute-0 sudo[306472]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-emiepnevisoctujfprdkwqlmjqlsmnjt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147909.5504558-737-52735834162682/AnsiballZ_stat.py'
Oct 11 01:58:30 compute-0 sudo[306472]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:30 compute-0 podman[306483]: 2025-10-11 01:58:30.409941982 +0000 UTC m=+0.126521949 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, summary=Provides the latest release of Red Hat Universal Base Image 9., architecture=x86_64, io.openshift.expose-services=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., release-0.7.12=, name=ubi9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, version=9.4, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, build-date=2024-09-18T21:23:30, managed_by=edpm_ansible, com.redhat.component=ubi9-container, io.k8s.display-name=Red Hat Universal Base Image 9, io.buildah.version=1.29.0, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., config_id=edpm, container_name=kepler, release=1214.1726694543, io.openshift.tags=base rhel9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public)
Oct 11 01:58:30 compute-0 podman[306482]: 2025-10-11 01:58:30.464176191 +0000 UTC m=+0.188601148 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, managed_by=edpm_ansible, tcib_managed=true, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.build-date=20251009)
Oct 11 01:58:30 compute-0 python3.9[306488]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtqemud-ro.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:58:30 compute-0 sudo[306472]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v610: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:31 compute-0 sudo[306599]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pmiqzhstfyamsntgaryzpvufbatdmkgj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147909.5504558-737-52735834162682/AnsiballZ_file.py'
Oct 11 01:58:31 compute-0 sudo[306599]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:31 compute-0 python3.9[306601]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/virtqemud-ro.socket.d/override.conf _original_basename=libvirt-socket.unit.j2 recurse=False state=file path=/etc/systemd/system/virtqemud-ro.socket.d/override.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:31 compute-0 sudo[306599]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:31 compute-0 openstack_network_exporter[159265]: ERROR   01:58:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:58:31 compute-0 openstack_network_exporter[159265]: ERROR   01:58:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:58:31 compute-0 openstack_network_exporter[159265]: ERROR   01:58:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:58:31 compute-0 openstack_network_exporter[159265]: ERROR   01:58:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:58:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:58:31 compute-0 openstack_network_exporter[159265]: ERROR   01:58:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:58:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:58:32 compute-0 ceph-mon[191930]: pgmap v610: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:32 compute-0 podman[306626]: 2025-10-11 01:58:32.246543199 +0000 UTC m=+0.137549591 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=edpm, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 01:58:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v611: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:33 compute-0 sudo[306770]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wksxmatnkhbstkzbbuakjatxefnjlqft ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147912.5838869-737-168360529547904/AnsiballZ_stat.py'
Oct 11 01:58:33 compute-0 sudo[306770]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:33 compute-0 python3.9[306772]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtqemud-admin.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:58:33 compute-0 sudo[306770]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:33 compute-0 sudo[306848]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sgvlbqdgmknejxmcjvmxeafidxsuyfds ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147912.5838869-737-168360529547904/AnsiballZ_file.py'
Oct 11 01:58:33 compute-0 sudo[306848]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:34 compute-0 python3.9[306850]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/virtqemud-admin.socket.d/override.conf _original_basename=libvirt-socket.unit.j2 recurse=False state=file path=/etc/systemd/system/virtqemud-admin.socket.d/override.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:34 compute-0 ceph-mon[191930]: pgmap v611: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:34 compute-0 sudo[306848]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:58:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v612: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:35 compute-0 sudo[307000]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oouneptpkcwtmdnxpnwrmtybwglegrga ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147914.8012257-737-274459680774294/AnsiballZ_stat.py'
Oct 11 01:58:35 compute-0 sudo[307000]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:35 compute-0 python3.9[307002]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtsecretd.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:58:35 compute-0 sudo[307000]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:36 compute-0 sudo[307078]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uuaadwmcbrxbsqxoofrzsjtzhrvcsrlm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147914.8012257-737-274459680774294/AnsiballZ_file.py'
Oct 11 01:58:36 compute-0 sudo[307078]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:36 compute-0 ceph-mon[191930]: pgmap v612: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:36 compute-0 python3.9[307080]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/virtsecretd.socket.d/override.conf _original_basename=libvirt-socket.unit.j2 recurse=False state=file path=/etc/systemd/system/virtsecretd.socket.d/override.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:36 compute-0 sudo[307078]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v613: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:37 compute-0 sudo[307230]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oixrvqvgkkubljyqwnbzjngqxrazheqj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147916.5087314-737-219045412829088/AnsiballZ_stat.py'
Oct 11 01:58:37 compute-0 sudo[307230]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:37 compute-0 python3.9[307232]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtsecretd-ro.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:58:37 compute-0 sudo[307230]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:37 compute-0 sudo[307308]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bgiotusugzczkugqdrkrfeibsrehlxte ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147916.5087314-737-219045412829088/AnsiballZ_file.py'
Oct 11 01:58:37 compute-0 sudo[307308]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:38 compute-0 python3.9[307310]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/virtsecretd-ro.socket.d/override.conf _original_basename=libvirt-socket.unit.j2 recurse=False state=file path=/etc/systemd/system/virtsecretd-ro.socket.d/override.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:38 compute-0 sudo[307308]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:38 compute-0 ceph-mon[191930]: pgmap v613: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:38 compute-0 sudo[307460]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ndijwxnksvejodajviliwtweajhezvix ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147918.2866428-737-208969814323203/AnsiballZ_stat.py'
Oct 11 01:58:38 compute-0 sudo[307460]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v614: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:39 compute-0 python3.9[307462]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virtsecretd-admin.socket.d/override.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:58:39 compute-0 sudo[307460]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:39 compute-0 sudo[307538]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ucnvqrsledcojkhvdaulpfmfwiqvsddk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147918.2866428-737-208969814323203/AnsiballZ_file.py'
Oct 11 01:58:39 compute-0 sudo[307538]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:58:39 compute-0 python3.9[307540]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/virtsecretd-admin.socket.d/override.conf _original_basename=libvirt-socket.unit.j2 recurse=False state=file path=/etc/systemd/system/virtsecretd-admin.socket.d/override.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:39 compute-0 sudo[307538]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:40 compute-0 ceph-mon[191930]: pgmap v614: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:40 compute-0 python3.9[307690]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail
                                             ls -lRZ /run/libvirt | grep -E ':container_\S+_t'
                                              _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:58:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v615: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:42 compute-0 sudo[307843]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ktuztnczriblzsnefgkgflulitjjeuqa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147921.3404944-901-132086817266536/AnsiballZ_seboolean.py'
Oct 11 01:58:42 compute-0 sudo[307843]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:42 compute-0 ceph-mon[191930]: pgmap v615: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:42 compute-0 python3.9[307845]: ansible-ansible.posix.seboolean Invoked with name=os_enable_vtpm persistent=True state=True ignore_selinux_state=False
Oct 11 01:58:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v616: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:43 compute-0 sudo[307843]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:44 compute-0 sudo[307995]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dkeuhtkgcpsnnikapexxjxewcnmvumjp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147923.4934394-909-14819323570855/AnsiballZ_copy.py'
Oct 11 01:58:44 compute-0 sudo[307995]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:44 compute-0 ceph-mon[191930]: pgmap v616: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:44 compute-0 python3.9[307997]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/libvirt/servercert.pem group=root mode=0644 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/tls.crt backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:44 compute-0 sudo[307995]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:58:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v617: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:45 compute-0 sudo[308147]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-usfyxqdpaizmpbswclnfuolvbjhwsyab ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147924.7750416-909-144504709066477/AnsiballZ_copy.py'
Oct 11 01:58:45 compute-0 sudo[308147]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:45 compute-0 python3.9[308149]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/libvirt/private/serverkey.pem group=root mode=0600 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/tls.key backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:45 compute-0 sudo[308147]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:46 compute-0 podman[308244]: 2025-10-11 01:58:46.230990839 +0000 UTC m=+0.118143322 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 01:58:46 compute-0 podman[308250]: 2025-10-11 01:58:46.260592416 +0000 UTC m=+0.147186737 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.component=ubi9-minimal-container, build-date=2025-08-20T13:12:41, config_id=edpm, distribution-scope=public, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.openshift.expose-services=, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, architecture=x86_64, io.buildah.version=1.33.7, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, release=1755695350, url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., managed_by=edpm_ansible, name=ubi9-minimal, version=9.6, vcs-type=git, io.openshift.tags=minimal rhel9, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=openstack_network_exporter, vendor=Red Hat, Inc.)
Oct 11 01:58:46 compute-0 ceph-mon[191930]: pgmap v617: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v618: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:47 compute-0 sudo[308346]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-flmfoedjskbvrwczwghlleqlvuqvzcws ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147925.8587024-909-182713368594903/AnsiballZ_copy.py'
Oct 11 01:58:47 compute-0 sudo[308346]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:47 compute-0 python3.9[308348]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/libvirt/clientcert.pem group=root mode=0644 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/tls.crt backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:47 compute-0 sudo[308346]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:48 compute-0 ceph-mon[191930]: pgmap v618: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:48 compute-0 sudo[308498]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jbmxxvzmbzdysfsithtjgjgwnqizavsg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147927.9624326-909-94742542838800/AnsiballZ_copy.py'
Oct 11 01:58:48 compute-0 sudo[308498]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:48 compute-0 python3.9[308500]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/libvirt/private/clientkey.pem group=root mode=0644 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/tls.key backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:48 compute-0 sudo[308498]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v619: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:58:50 compute-0 ceph-mon[191930]: pgmap v619: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:50 compute-0 sudo[308666]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-frkgummwsrtqfeupaocxrvlgfyxflrwu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147929.1624405-909-120362386166827/AnsiballZ_copy.py'
Oct 11 01:58:50 compute-0 sudo[308666]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:50 compute-0 podman[308625]: 2025-10-11 01:58:50.559112168 +0000 UTC m=+0.134760994 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=edpm, maintainer=OpenStack Kubernetes Operator team)
Oct 11 01:58:50 compute-0 python3.9[308672]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/CA/cacert.pem group=root mode=0644 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/ca.crt backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:50 compute-0 sudo[308666]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v620: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:51 compute-0 sudo[308822]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kdjrgfltahcwepywyezucatcwqkwblnv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147931.1595154-945-13781315090103/AnsiballZ_copy.py'
Oct 11 01:58:51 compute-0 sudo[308822]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:52 compute-0 python3.9[308824]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/qemu/server-cert.pem group=qemu mode=0640 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/tls.crt backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:52 compute-0 sudo[308822]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:52 compute-0 ceph-mon[191930]: pgmap v620: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:52 compute-0 sudo[308974]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-spriotebomhwyhnbkqmvzltzjstpwjbi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147932.35362-945-650319988382/AnsiballZ_copy.py'
Oct 11 01:58:52 compute-0 sudo[308974]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v621: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:53 compute-0 python3.9[308976]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/qemu/server-key.pem group=qemu mode=0640 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/tls.key backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:53 compute-0 sudo[308974]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:54 compute-0 sudo[309141]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qyslpntivdripjnbpurilsuscgbfhqna ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147933.4892325-945-174835142546521/AnsiballZ_copy.py'
Oct 11 01:58:54 compute-0 sudo[309141]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:54 compute-0 podman[309100]: 2025-10-11 01:58:54.129080985 +0000 UTC m=+0.123486243 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, tcib_managed=true)
Oct 11 01:58:54 compute-0 ceph-mon[191930]: pgmap v621: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:54 compute-0 python3.9[309146]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/qemu/client-cert.pem group=qemu mode=0640 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/tls.crt backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:54 compute-0 sudo[309141]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:58:54.811 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 01:58:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:58:54.812 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 01:58:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:58:54.812 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 01:58:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:58:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v622: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:55 compute-0 sudo[309296]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mqriuajrmquazhqjulzcbiygspohhqhr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147934.6247563-945-44291620343818/AnsiballZ_copy.py'
Oct 11 01:58:55 compute-0 sudo[309296]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:55 compute-0 python3.9[309298]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/qemu/client-key.pem group=qemu mode=0640 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/tls.key backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:55 compute-0 sudo[309296]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:56 compute-0 sudo[309448]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mziziczicefnvllreuklfybpzhofowxm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147935.6793182-945-269387588426167/AnsiballZ_copy.py'
Oct 11 01:58:56 compute-0 sudo[309448]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:56 compute-0 ceph-mon[191930]: pgmap v622: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:58:56
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['volumes', 'default.rgw.control', 'cephfs.cephfs.data', 'images', 'default.rgw.meta', '.mgr', 'backups', 'cephfs.cephfs.meta', 'vms', '.rgw.root', 'default.rgw.log']
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 01:58:56 compute-0 python3.9[309450]: ansible-ansible.legacy.copy Invoked with dest=/etc/pki/qemu/ca-cert.pem group=qemu mode=0640 owner=root remote_src=True src=/var/lib/openstack/certs/libvirt/default/ca.crt backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:56 compute-0 sudo[309448]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:58:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:58:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v623: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:57 compute-0 sudo[309600]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eihzumouhboazloaxnexjlgoftscuvzk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147936.920494-982-136423983900293/AnsiballZ_file.py'
Oct 11 01:58:57 compute-0 sudo[309600]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:57 compute-0 python3.9[309602]: ansible-ansible.builtin.file Invoked with mode=0755 path=/var/lib/openstack/config/ceph state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:58:57 compute-0 sudo[309600]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:58 compute-0 ceph-mon[191930]: pgmap v623: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:58 compute-0 sudo[309752]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-acbpdrjgidatwdxbonkzfgoqpksptbuf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147937.9781096-990-42005542155146/AnsiballZ_find.py'
Oct 11 01:58:58 compute-0 sudo[309752]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:58 compute-0 python3.9[309754]: ansible-ansible.builtin.find Invoked with paths=['/var/lib/openstack/config/ceph'] patterns=['*.conf'] read_whole_file=False file_type=file age_stamp=mtime recurse=False hidden=False follow=False get_checksum=False checksum_algorithm=sha1 use_regex=False exact_mode=True excludes=None contains=None age=None size=None depth=None mode=None encoding=None limit=None
Oct 11 01:58:58 compute-0 sudo[309752]: pam_unix(sudo:session): session closed for user root
Oct 11 01:58:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v624: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:58:59 compute-0 sudo[309904]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xdlpegzquuxohxyllymhzqhdscimcmja ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147939.0685368-998-161893418879665/AnsiballZ_command.py'
Oct 11 01:58:59 compute-0 sudo[309904]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:58:59 compute-0 podman[157119]: time="2025-10-11T01:58:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:58:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:58:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 35732 "" "Go-http-client/1.1"
Oct 11 01:58:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:58:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 7277 "" "Go-http-client/1.1"
Oct 11 01:58:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:58:59 compute-0 python3.9[309906]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail;
                                             echo ceph
                                             awk -F '=' '/fsid/ {print $2}' /var/lib/openstack/config/ceph/ceph.conf | xargs
                                              _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:59:00 compute-0 sudo[309904]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:00 compute-0 ceph-mon[191930]: pgmap v624: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v625: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:01 compute-0 podman[309963]: 2025-10-11 01:59:01.223882882 +0000 UTC m=+0.112105743 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 01:59:01 compute-0 podman[309972]: 2025-10-11 01:59:01.242941097 +0000 UTC m=+0.118592436 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.component=ubi9-container, container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, architecture=x86_64, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, release-0.7.12=, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=1214.1726694543, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-type=git, io.openshift.expose-services=, io.openshift.tags=base rhel9, io.buildah.version=1.29.0, managed_by=edpm_ansible, version=9.4, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, build-date=2024-09-18T21:23:30, maintainer=Red Hat, Inc., summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, distribution-scope=public, config_id=edpm)
Oct 11 01:59:01 compute-0 podman[309967]: 2025-10-11 01:59:01.290359207 +0000 UTC m=+0.168542558 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, config_id=ovn_controller, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team)
Oct 11 01:59:01 compute-0 openstack_network_exporter[159265]: ERROR   01:59:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:59:01 compute-0 openstack_network_exporter[159265]: ERROR   01:59:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:59:01 compute-0 openstack_network_exporter[159265]: ERROR   01:59:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:59:01 compute-0 openstack_network_exporter[159265]: ERROR   01:59:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:59:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:59:01 compute-0 openstack_network_exporter[159265]: ERROR   01:59:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:59:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:59:01 compute-0 python3.9[310125]: ansible-ansible.builtin.find Invoked with paths=['/var/lib/openstack/config/ceph'] patterns=['*.keyring'] read_whole_file=False file_type=file age_stamp=mtime recurse=False hidden=False follow=False get_checksum=False checksum_algorithm=sha1 use_regex=False exact_mode=True excludes=None contains=None age=None size=None depth=None mode=None encoding=None limit=None
Oct 11 01:59:02 compute-0 ceph-mon[191930]: pgmap v625: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v626: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:03 compute-0 podman[310238]: 2025-10-11 01:59:03.235381847 +0000 UTC m=+0.128619716 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_id=edpm, managed_by=edpm_ansible)
Oct 11 01:59:03 compute-0 python3.9[310294]: ansible-ansible.legacy.stat Invoked with path=/tmp/secret.xml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:59:04 compute-0 ceph-mon[191930]: pgmap v626: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:04 compute-0 python3.9[310415]: ansible-ansible.legacy.copy Invoked with dest=/tmp/secret.xml mode=0600 src=/home/zuul/.ansible/tmp/ansible-tmp-1760147942.65199-1017-149763394615933/.source.xml follow=False _original_basename=secret.xml.j2 checksum=3f551e5cea9e36cde17383b56e0046d02a19f883 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:59:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:59:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v627: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:05 compute-0 sudo[310565]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cabifjgjkjxmiqfqfixxzjfrtidypjaa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147944.732643-1032-225870806528288/AnsiballZ_command.py'
Oct 11 01:59:05 compute-0 sudo[310565]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:05 compute-0 python3.9[310567]: ansible-ansible.legacy.command Invoked with _raw_params=virsh secret-undefine 3c7617c3-7a20-523e-a9de-20c0d6ba41da
                                             virsh secret-define --file /tmp/secret.xml
                                              _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:59:05 compute-0 polkitd[6240]: Registered Authentication Agent for unix-process:310569:429337 (system bus name :1.4062 [/usr/bin/pkttyagent --process 310569 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Oct 11 01:59:05 compute-0 polkitd[6240]: Unregistered Authentication Agent for unix-process:310569:429337 (system bus name :1.4062, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Oct 11 01:59:05 compute-0 systemd[1]: Starting libvirt secret daemon...
Oct 11 01:59:05 compute-0 systemd[1]: Started libvirt secret daemon.
Oct 11 01:59:05 compute-0 polkitd[6240]: Registered Authentication Agent for unix-process:310568:429336 (system bus name :1.4063 [/usr/bin/pkttyagent --process 310568 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Oct 11 01:59:05 compute-0 polkitd[6240]: Unregistered Authentication Agent for unix-process:310568:429336 (system bus name :1.4063, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Oct 11 01:59:05 compute-0 sudo[310565]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 01:59:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 01:59:06 compute-0 ceph-mon[191930]: pgmap v627: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v628: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:07 compute-0 python3.9[310746]: ansible-ansible.builtin.file Invoked with path=/tmp/secret.xml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:59:07 compute-0 sudo[310896]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eljeiqlamtihvwlrefszeskaplfykanr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147947.410432-1048-32209575402274/AnsiballZ_command.py'
Oct 11 01:59:07 compute-0 sudo[310896]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.940 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.941 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.941 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.942 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f8ed27f97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb8c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.945 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.946 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f8ed27fbad0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.946 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.946 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f8ed27faff0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.946 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.947 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb1a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.947 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f8ed27fb110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.948 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f8ed27fb170>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.947 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb200>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.949 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f8ed27fb1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f8ed27fb230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.949 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed2874260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.951 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f8ed2874230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.952 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f8ed27fb290>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.952 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.953 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed3ab42f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.953 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f8ed5778d70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.954 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f8ed27fb650>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.954 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.955 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb350>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.955 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f8ed27fbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.956 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.956 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f8ed27fb320>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.956 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.955 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb90>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.957 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fa390>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.957 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f8ed27fbb60>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.958 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f8ed27fa3f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.957 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb3b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': [], 'network.incoming.packets.drop': [], 'disk.device.allocation': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.958 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbbf0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': [], 'network.incoming.packets.drop': [], 'disk.device.allocation': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.959 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f8ed27fb380>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.959 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.959 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f8ed27fbbc0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.959 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.959 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbc80>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': [], 'network.incoming.packets.drop': [], 'disk.device.allocation': [], 'disk.root.size': [], 'network.incoming.packets.error': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.960 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': [], 'network.incoming.packets.drop': [], 'disk.device.allocation': [], 'disk.root.size': [], 'network.incoming.packets.error': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.961 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f8ed27fbc50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.961 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f8ed27fbce0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.961 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': [], 'network.incoming.packets.drop': [], 'disk.device.allocation': [], 'disk.root.size': [], 'network.incoming.packets.error': [], 'network.outgoing.bytes': [], 'network.outgoing.bytes.delta': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.962 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': [], 'network.incoming.packets.drop': [], 'disk.device.allocation': [], 'disk.root.size': [], 'network.incoming.packets.error': [], 'network.outgoing.bytes': [], 'network.outgoing.bytes.delta': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.962 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f8ed27fbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.963 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.963 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f8ed27fb590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.963 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.963 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27f9610>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': [], 'network.incoming.packets.drop': [], 'disk.device.allocation': [], 'disk.root.size': [], 'network.incoming.packets.error': [], 'network.outgoing.bytes': [], 'network.outgoing.bytes.delta': [], 'network.outgoing.bytes.rate': [], 'memory.usage': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.964 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb620>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': [], 'network.incoming.packets.drop': [], 'disk.device.allocation': [], 'disk.root.size': [], 'network.incoming.packets.error': [], 'network.outgoing.bytes': [], 'network.outgoing.bytes.delta': [], 'network.outgoing.bytes.rate': [], 'memory.usage': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.964 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f8ed27f95e0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.965 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.965 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f8ed27fb5f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.965 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.964 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbe30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': [], 'network.incoming.packets.drop': [], 'disk.device.allocation': [], 'disk.root.size': [], 'network.incoming.packets.error': [], 'network.outgoing.bytes': [], 'network.outgoing.bytes.delta': [], 'network.outgoing.bytes.rate': [], 'memory.usage': [], 'cpu': [], 'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.965 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbec0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': [], 'network.incoming.packets.drop': [], 'disk.device.allocation': [], 'disk.root.size': [], 'network.incoming.packets.error': [], 'network.outgoing.bytes': [], 'network.outgoing.bytes.delta': [], 'network.outgoing.bytes.rate': [], 'memory.usage': [], 'cpu': [], 'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.966 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbf50>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.packets': [], 'disk.ephemeral.size': [], 'network.incoming.packets.drop': [], 'disk.device.allocation': [], 'disk.root.size': [], 'network.incoming.packets.error': [], 'network.outgoing.bytes': [], 'network.outgoing.bytes.delta': [], 'network.outgoing.bytes.rate': [], 'memory.usage': [], 'cpu': [], 'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.966 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f8ed27fbe00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.966 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.966 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f8ed27fbe90>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.966 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.966 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f8ed27fbf20>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.966 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.970 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.970 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.970 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.970 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.970 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.970 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 01:59:07.970 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 01:59:08 compute-0 sudo[310896]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:08 compute-0 ceph-mon[191930]: pgmap v628: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v629: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:09 compute-0 sudo[311050]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pxzanprwzmtimbsxrfuflmvuqysxjvyf ; FSID=3c7617c3-7a20-523e-a9de-20c0d6ba41da KEY=AQCDteloAAAAABAAqDHfKJKrnOb6idG+XXtwTw== /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147948.497778-1056-114200431424239/AnsiballZ_command.py'
Oct 11 01:59:09 compute-0 sudo[311050]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:09 compute-0 polkitd[6240]: Registered Authentication Agent for unix-process:311053:429709 (system bus name :1.4066 [/usr/bin/pkttyagent --process 311053 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Oct 11 01:59:09 compute-0 polkitd[6240]: Unregistered Authentication Agent for unix-process:311053:429709 (system bus name :1.4066, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Oct 11 01:59:09 compute-0 sudo[311050]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #30. Immutable memtables: 0.
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:59:09.858638) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 11] Flushing memtable with next log file: 30
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147949858738, "job": 11, "event": "flush_started", "num_memtables": 1, "num_entries": 1830, "num_deletes": 250, "total_data_size": 3103488, "memory_usage": 3146152, "flush_reason": "Manual Compaction"}
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 11] Level-0 flush table #31: started
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147949876027, "cf_name": "default", "job": 11, "event": "table_file_creation", "file_number": 31, "file_size": 1755677, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 11747, "largest_seqno": 13576, "table_properties": {"data_size": 1749753, "index_size": 2999, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1861, "raw_key_size": 14863, "raw_average_key_size": 20, "raw_value_size": 1736631, "raw_average_value_size": 2349, "num_data_blocks": 140, "num_entries": 739, "num_filter_entries": 739, "num_deletions": 250, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760147742, "oldest_key_time": 1760147742, "file_creation_time": 1760147949, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 31, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 11] Flush lasted 17493 microseconds, and 11031 cpu microseconds.
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:59:09.876134) [db/flush_job.cc:967] [default] [JOB 11] Level-0 flush table #31: 1755677 bytes OK
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:59:09.876166) [db/memtable_list.cc:519] [default] Level-0 commit table #31 started
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:59:09.878852) [db/memtable_list.cc:722] [default] Level-0 commit table #31: memtable #1 done
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:59:09.878877) EVENT_LOG_v1 {"time_micros": 1760147949878870, "job": 11, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:59:09.878904) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 11] Try to delete WAL files size 3095756, prev total WAL file size 3095756, number of live WAL files 2.
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000027.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:59:09.881022) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '6D67727374617400323532' seq:72057594037927935, type:22 .. '6D67727374617400353033' seq:0, type:0; will stop at (end)
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 12] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 11 Base level 0, inputs: [31(1714KB)], [29(7622KB)]
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147949881126, "job": 12, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [31], "files_L6": [29], "score": -1, "input_data_size": 9560741, "oldest_snapshot_seqno": -1}
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 12] Generated table #32: 4016 keys, 7549173 bytes, temperature: kUnknown
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147949934508, "cf_name": "default", "job": 12, "event": "table_file_creation", "file_number": 32, "file_size": 7549173, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 7520648, "index_size": 17407, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 10053, "raw_key_size": 95470, "raw_average_key_size": 23, "raw_value_size": 7446513, "raw_average_value_size": 1854, "num_data_blocks": 758, "num_entries": 4016, "num_filter_entries": 4016, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760147949, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 32, "seqno_to_time_mapping": "N/A"}}
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:59:09.934827) [db/compaction/compaction_job.cc:1663] [default] [JOB 12] Compacted 1@0 + 1@6 files to L6 => 7549173 bytes
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:59:09.936616) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 178.8 rd, 141.2 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(1.7, 7.4 +0.0 blob) out(7.2 +0.0 blob), read-write-amplify(9.7) write-amplify(4.3) OK, records in: 4430, records dropped: 414 output_compression: NoCompression
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:59:09.936637) EVENT_LOG_v1 {"time_micros": 1760147949936625, "job": 12, "event": "compaction_finished", "compaction_time_micros": 53474, "compaction_time_cpu_micros": 34746, "output_level": 6, "num_output_files": 1, "total_output_size": 7549173, "num_input_records": 4430, "num_output_records": 4016, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000031.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147949937068, "job": 12, "event": "table_file_deletion", "file_number": 31}
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000029.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760147949938357, "job": 12, "event": "table_file_deletion", "file_number": 29}
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:59:09.880774) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:59:09.938593) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:59:09.938604) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:59:09.938608) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:59:09.938611) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:59:09 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-01:59:09.938614) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 01:59:10 compute-0 sudo[311208]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qrdqevlbiddlcisutxqufeuyyelefhqj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147949.833181-1064-272773186364242/AnsiballZ_copy.py'
Oct 11 01:59:10 compute-0 ceph-mon[191930]: pgmap v629: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:10 compute-0 sudo[311208]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:10 compute-0 python3.9[311210]: ansible-ansible.legacy.copy Invoked with dest=/etc/ceph/ceph.conf group=root mode=0644 owner=root remote_src=True src=/var/lib/openstack/config/ceph/ceph.conf backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:59:10 compute-0 sudo[311208]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v630: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:11 compute-0 sudo[311360]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-erjdrgsimplhzzprfvajwnoxyeowcgis ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147950.9969573-1072-171331916665438/AnsiballZ_stat.py'
Oct 11 01:59:11 compute-0 sudo[311360]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:11 compute-0 python3.9[311362]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/libvirt.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:59:11 compute-0 sudo[311360]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:12 compute-0 sudo[311438]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hsxjdjflcfchipsgamaookmqsdnlhkdp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147950.9969573-1072-171331916665438/AnsiballZ_file.py'
Oct 11 01:59:12 compute-0 sudo[311438]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:12 compute-0 ceph-mon[191930]: pgmap v630: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:12 compute-0 python3.9[311440]: ansible-ansible.legacy.file Invoked with mode=0640 dest=/var/lib/edpm-config/firewall/libvirt.yaml _original_basename=firewall.yaml.j2 recurse=False state=file path=/var/lib/edpm-config/firewall/libvirt.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:59:12 compute-0 sudo[311438]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v631: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:13 compute-0 sudo[311590]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rcbztwhfoynmytakedxvesjvqzjpgusz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147952.9544485-1085-87721627065995/AnsiballZ_file.py'
Oct 11 01:59:13 compute-0 sudo[311590]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:13 compute-0 python3.9[311592]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/lib/edpm-config/firewall state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:59:13 compute-0 sudo[311590]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:14 compute-0 ceph-mon[191930]: pgmap v631: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:59:14 compute-0 sudo[311742]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-amktpkexrzheruthccmscosksvfwovap ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147954.2663171-1093-266664503383554/AnsiballZ_stat.py'
Oct 11 01:59:14 compute-0 sudo[311742]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v632: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:15 compute-0 python3.9[311744]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:59:15 compute-0 sudo[311742]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:15 compute-0 sudo[311820]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-reejkaguygoiauamvwdszvvivtgdnypa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147954.2663171-1093-266664503383554/AnsiballZ_file.py'
Oct 11 01:59:15 compute-0 sudo[311820]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:15 compute-0 python3.9[311822]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml _original_basename=base-rules.yaml.j2 recurse=False state=file path=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:59:15 compute-0 sudo[311820]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:16 compute-0 ceph-mon[191930]: pgmap v632: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v633: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:17 compute-0 podman[311923]: 2025-10-11 01:59:17.246811743 +0000 UTC m=+0.127750539 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 01:59:17 compute-0 podman[311928]: 2025-10-11 01:59:17.27972864 +0000 UTC m=+0.156343191 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, managed_by=edpm_ansible, build-date=2025-08-20T13:12:41, com.redhat.component=ubi9-minimal-container, config_id=edpm, io.openshift.tags=minimal rhel9, io.buildah.version=1.33.7, url=https://catalog.redhat.com/en/search?searchType=containers, vcs-type=git, name=ubi9-minimal, release=1755695350, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, version=9.6, architecture=x86_64, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vendor=Red Hat, Inc., container_name=openstack_network_exporter, distribution-scope=public)
Oct 11 01:59:17 compute-0 sudo[312018]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gyakeevkyxudywwtwgalsrddmavthrdm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147956.7195337-1105-153742891263266/AnsiballZ_stat.py'
Oct 11 01:59:17 compute-0 sudo[312018]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:17 compute-0 python3.9[312020]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:59:17 compute-0 sudo[312018]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:18 compute-0 sudo[312096]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bqsetrliagvkatlckrhpyravzzboazui ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147956.7195337-1105-153742891263266/AnsiballZ_file.py'
Oct 11 01:59:18 compute-0 sudo[312096]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:18 compute-0 python3.9[312098]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml _original_basename=.22kllovr recurse=False state=file path=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:59:18 compute-0 sudo[312096]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:18 compute-0 ceph-mon[191930]: pgmap v633: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v634: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:19 compute-0 sudo[312261]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-axrmacbcycdujihlzxfrumkmraoneyln ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147958.6700606-1117-242294862425987/AnsiballZ_stat.py'
Oct 11 01:59:19 compute-0 sudo[312261]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:19 compute-0 sudo[312237]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:59:19 compute-0 sudo[312237]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:59:19 compute-0 sudo[312237]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:19 compute-0 sudo[312276]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:59:19 compute-0 sudo[312276]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:59:19 compute-0 sudo[312276]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:19 compute-0 python3.9[312273]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/iptables.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:59:19 compute-0 sudo[312261]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:19 compute-0 sudo[312301]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:59:19 compute-0 sudo[312301]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:59:19 compute-0 sudo[312301]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:19 compute-0 sudo[312334]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 01:59:19 compute-0 sudo[312334]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:59:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:59:19 compute-0 sudo[312434]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nrgksolynzujglksmxixlybxoudsmjfb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147958.6700606-1117-242294862425987/AnsiballZ_file.py'
Oct 11 01:59:19 compute-0 sudo[312434]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:20 compute-0 python3.9[312441]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/iptables.nft _original_basename=iptables.nft recurse=False state=file path=/etc/nftables/iptables.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:59:20 compute-0 sudo[312434]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:20 compute-0 sudo[312334]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:59:20 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:59:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 01:59:20 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:59:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 01:59:20 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:59:20 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 93236a30-fb73-4313-ba29-bf5ec4f7a343 does not exist
Oct 11 01:59:20 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 0a34da67-5532-4f0a-84ba-c22dd6b52224 does not exist
Oct 11 01:59:20 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 200e6bc8-71b3-4a46-b9ad-517371352401 does not exist
Oct 11 01:59:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 01:59:20 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:59:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 01:59:20 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:59:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 01:59:20 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:59:20 compute-0 ceph-mon[191930]: pgmap v634: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:20 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:59:20 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 01:59:20 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:59:20 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 01:59:20 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 01:59:20 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 01:59:20 compute-0 sudo[312491]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:59:20 compute-0 sudo[312491]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:59:20 compute-0 sudo[312491]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:20 compute-0 sudo[312555]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:59:20 compute-0 sudo[312555]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:59:20 compute-0 sudo[312555]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:20 compute-0 podman[312539]: 2025-10-11 01:59:20.766541552 +0000 UTC m=+0.102128482 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm)
Oct 11 01:59:20 compute-0 sudo[312625]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:59:20 compute-0 sudo[312625]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:59:20 compute-0 sudo[312625]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:21 compute-0 sudo[312659]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 01:59:21 compute-0 sudo[312659]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:59:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v635: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:21 compute-0 sudo[312730]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fgcgwbgskznltnrgxfrifgszorizweqh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147960.5612519-1130-73891252900112/AnsiballZ_command.py'
Oct 11 01:59:21 compute-0 sudo[312730]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:21 compute-0 python3.9[312732]: ansible-ansible.legacy.command Invoked with _raw_params=nft -j list ruleset _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:59:21 compute-0 sudo[312730]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:21 compute-0 podman[312786]: 2025-10-11 01:59:21.593204539 +0000 UTC m=+0.089236945 container create 6cfc653b69eca5e0f3b1560e98a9afa9e2b68ac90eeafee694c01ace871dc010 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_khorana, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:59:21 compute-0 podman[312786]: 2025-10-11 01:59:21.562218213 +0000 UTC m=+0.058250679 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:59:21 compute-0 systemd[1]: Started libpod-conmon-6cfc653b69eca5e0f3b1560e98a9afa9e2b68ac90eeafee694c01ace871dc010.scope.
Oct 11 01:59:21 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:59:21 compute-0 podman[312786]: 2025-10-11 01:59:21.745714117 +0000 UTC m=+0.241746573 container init 6cfc653b69eca5e0f3b1560e98a9afa9e2b68ac90eeafee694c01ace871dc010 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_khorana, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 01:59:21 compute-0 podman[312786]: 2025-10-11 01:59:21.762041682 +0000 UTC m=+0.258074078 container start 6cfc653b69eca5e0f3b1560e98a9afa9e2b68ac90eeafee694c01ace871dc010 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_khorana, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3)
Oct 11 01:59:21 compute-0 podman[312786]: 2025-10-11 01:59:21.768111611 +0000 UTC m=+0.264144037 container attach 6cfc653b69eca5e0f3b1560e98a9afa9e2b68ac90eeafee694c01ace871dc010 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_khorana, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_REF=reef, OSD_FLAVOR=default)
Oct 11 01:59:21 compute-0 boring_khorana[312817]: 167 167
Oct 11 01:59:21 compute-0 systemd[1]: libpod-6cfc653b69eca5e0f3b1560e98a9afa9e2b68ac90eeafee694c01ace871dc010.scope: Deactivated successfully.
Oct 11 01:59:21 compute-0 conmon[312817]: conmon 6cfc653b69eca5e0f3b1 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-6cfc653b69eca5e0f3b1560e98a9afa9e2b68ac90eeafee694c01ace871dc010.scope/container/memory.events
Oct 11 01:59:21 compute-0 podman[312786]: 2025-10-11 01:59:21.772100922 +0000 UTC m=+0.268133318 container died 6cfc653b69eca5e0f3b1560e98a9afa9e2b68ac90eeafee694c01ace871dc010 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_khorana, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:59:21 compute-0 systemd[1]: var-lib-containers-storage-overlay-f8edbccf6f697f173a69b3b2128ef8f233b62935a1cb2fb2cf481840dd6ba188-merged.mount: Deactivated successfully.
Oct 11 01:59:21 compute-0 podman[312786]: 2025-10-11 01:59:21.858654096 +0000 UTC m=+0.354686522 container remove 6cfc653b69eca5e0f3b1560e98a9afa9e2b68ac90eeafee694c01ace871dc010 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_khorana, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=reef, OSD_FLAVOR=default)
Oct 11 01:59:21 compute-0 systemd[1]: libpod-conmon-6cfc653b69eca5e0f3b1560e98a9afa9e2b68ac90eeafee694c01ace871dc010.scope: Deactivated successfully.
Oct 11 01:59:22 compute-0 podman[312887]: 2025-10-11 01:59:22.156904301 +0000 UTC m=+0.086340568 container create d94c3a63c18b96c7e6e7f08845471c8ce9bc7666267d9cf8b48d474b4062fc76 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_clarke, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2)
Oct 11 01:59:22 compute-0 podman[312887]: 2025-10-11 01:59:22.113043938 +0000 UTC m=+0.042480255 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:59:22 compute-0 systemd[1]: Started libpod-conmon-d94c3a63c18b96c7e6e7f08845471c8ce9bc7666267d9cf8b48d474b4062fc76.scope.
Oct 11 01:59:22 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:59:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9b9f549c9a1117829443efc047a030ba188b93a64a8784ce69709283ace92875/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:59:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9b9f549c9a1117829443efc047a030ba188b93a64a8784ce69709283ace92875/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:59:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9b9f549c9a1117829443efc047a030ba188b93a64a8784ce69709283ace92875/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:59:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9b9f549c9a1117829443efc047a030ba188b93a64a8784ce69709283ace92875/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:59:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9b9f549c9a1117829443efc047a030ba188b93a64a8784ce69709283ace92875/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 01:59:22 compute-0 podman[312887]: 2025-10-11 01:59:22.321448269 +0000 UTC m=+0.250884616 container init d94c3a63c18b96c7e6e7f08845471c8ce9bc7666267d9cf8b48d474b4062fc76 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_clarke, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507)
Oct 11 01:59:22 compute-0 podman[312887]: 2025-10-11 01:59:22.34338848 +0000 UTC m=+0.272824747 container start d94c3a63c18b96c7e6e7f08845471c8ce9bc7666267d9cf8b48d474b4062fc76 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_clarke, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:59:22 compute-0 podman[312887]: 2025-10-11 01:59:22.349625177 +0000 UTC m=+0.279061474 container attach d94c3a63c18b96c7e6e7f08845471c8ce9bc7666267d9cf8b48d474b4062fc76 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_clarke, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 01:59:22 compute-0 sudo[312981]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jbagmxflhjkzuznvfqxnluynodwnjseo ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760147961.7917397-1138-206583291519901/AnsiballZ_edpm_nftables_from_files.py'
Oct 11 01:59:22 compute-0 sudo[312981]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:22 compute-0 ceph-mon[191930]: pgmap v635: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:22 compute-0 python3[312983]: ansible-edpm_nftables_from_files Invoked with src=/var/lib/edpm-config/firewall
Oct 11 01:59:22 compute-0 sudo[312981]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v636: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:23 compute-0 elastic_clarke[312932]: --> passed data devices: 0 physical, 3 LVM
Oct 11 01:59:23 compute-0 elastic_clarke[312932]: --> relative data size: 1.0
Oct 11 01:59:23 compute-0 elastic_clarke[312932]: --> All data devices are unavailable
Oct 11 01:59:23 compute-0 systemd[1]: libpod-d94c3a63c18b96c7e6e7f08845471c8ce9bc7666267d9cf8b48d474b4062fc76.scope: Deactivated successfully.
Oct 11 01:59:23 compute-0 podman[312887]: 2025-10-11 01:59:23.612319208 +0000 UTC m=+1.541755515 container died d94c3a63c18b96c7e6e7f08845471c8ce9bc7666267d9cf8b48d474b4062fc76 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_clarke, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2)
Oct 11 01:59:23 compute-0 systemd[1]: libpod-d94c3a63c18b96c7e6e7f08845471c8ce9bc7666267d9cf8b48d474b4062fc76.scope: Consumed 1.190s CPU time.
Oct 11 01:59:23 compute-0 systemd[1]: var-lib-containers-storage-overlay-9b9f549c9a1117829443efc047a030ba188b93a64a8784ce69709283ace92875-merged.mount: Deactivated successfully.
Oct 11 01:59:23 compute-0 sudo[313166]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mjcmafidolmunasakummmfyzzrawpmgq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147963.0638287-1146-189847302082765/AnsiballZ_stat.py'
Oct 11 01:59:23 compute-0 podman[312887]: 2025-10-11 01:59:23.719565933 +0000 UTC m=+1.649002210 container remove d94c3a63c18b96c7e6e7f08845471c8ce9bc7666267d9cf8b48d474b4062fc76 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_clarke, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:59:23 compute-0 sudo[313166]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:23 compute-0 systemd[1]: libpod-conmon-d94c3a63c18b96c7e6e7f08845471c8ce9bc7666267d9cf8b48d474b4062fc76.scope: Deactivated successfully.
Oct 11 01:59:23 compute-0 sudo[312659]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:23 compute-0 sudo[313172]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:59:23 compute-0 sudo[313172]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:59:23 compute-0 python3.9[313171]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:59:23 compute-0 sudo[313172]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:23 compute-0 sudo[313166]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:24 compute-0 sudo[313198]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:59:24 compute-0 sudo[313198]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:59:24 compute-0 sudo[313198]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:24 compute-0 sudo[313224]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:59:24 compute-0 sudo[313224]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:59:24 compute-0 sudo[313224]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:24 compute-0 sudo[313250]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 01:59:24 compute-0 sudo[313250]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:59:24 compute-0 podman[313248]: 2025-10-11 01:59:24.400276135 +0000 UTC m=+0.134791424 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0)
Oct 11 01:59:24 compute-0 ceph-mon[191930]: pgmap v636: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:59:24 compute-0 podman[313385]: 2025-10-11 01:59:24.893148148 +0000 UTC m=+0.081697728 container create 743dbee486c3ad7a33a24cb42f1fad306f7fc3f219acd6b2d57133a64b320f5c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_poincare, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 01:59:24 compute-0 sudo[313415]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ylaqemysdvgcfektsiotuhrsysmynzwq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147963.0638287-1146-189847302082765/AnsiballZ_file.py'
Oct 11 01:59:24 compute-0 sudo[313415]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:24 compute-0 podman[313385]: 2025-10-11 01:59:24.863459952 +0000 UTC m=+0.052009542 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:59:24 compute-0 systemd[1]: Started libpod-conmon-743dbee486c3ad7a33a24cb42f1fad306f7fc3f219acd6b2d57133a64b320f5c.scope.
Oct 11 01:59:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v637: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:25 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:59:25 compute-0 podman[313385]: 2025-10-11 01:59:25.080464044 +0000 UTC m=+0.269013624 container init 743dbee486c3ad7a33a24cb42f1fad306f7fc3f219acd6b2d57133a64b320f5c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_poincare, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:59:25 compute-0 podman[313385]: 2025-10-11 01:59:25.09945969 +0000 UTC m=+0.288009240 container start 743dbee486c3ad7a33a24cb42f1fad306f7fc3f219acd6b2d57133a64b320f5c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_poincare, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:59:25 compute-0 trusting_poincare[313420]: 167 167
Oct 11 01:59:25 compute-0 systemd[1]: libpod-743dbee486c3ad7a33a24cb42f1fad306f7fc3f219acd6b2d57133a64b320f5c.scope: Deactivated successfully.
Oct 11 01:59:25 compute-0 podman[313385]: 2025-10-11 01:59:25.114553334 +0000 UTC m=+0.303102884 container attach 743dbee486c3ad7a33a24cb42f1fad306f7fc3f219acd6b2d57133a64b320f5c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_poincare, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.build-date=20250507)
Oct 11 01:59:25 compute-0 podman[313385]: 2025-10-11 01:59:25.127579359 +0000 UTC m=+0.316128909 container died 743dbee486c3ad7a33a24cb42f1fad306f7fc3f219acd6b2d57133a64b320f5c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_poincare, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3)
Oct 11 01:59:25 compute-0 python3.9[313417]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-jumps.nft _original_basename=jump-chain.j2 recurse=False state=file path=/etc/nftables/edpm-jumps.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:59:25 compute-0 systemd[1]: var-lib-containers-storage-overlay-c6cc1a0f74f3e377368e881e4640266fe9f6a8117dd26479b4f89907518bf0e4-merged.mount: Deactivated successfully.
Oct 11 01:59:25 compute-0 sudo[313415]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:25 compute-0 podman[313385]: 2025-10-11 01:59:25.195820308 +0000 UTC m=+0.384369878 container remove 743dbee486c3ad7a33a24cb42f1fad306f7fc3f219acd6b2d57133a64b320f5c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_poincare, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.license=GPLv2)
Oct 11 01:59:25 compute-0 systemd[1]: libpod-conmon-743dbee486c3ad7a33a24cb42f1fad306f7fc3f219acd6b2d57133a64b320f5c.scope: Deactivated successfully.
Oct 11 01:59:25 compute-0 podman[313467]: 2025-10-11 01:59:25.429754027 +0000 UTC m=+0.085690309 container create 7758ca21721a7241cacddf05da041412d825aa75e2a4d969373c310874af02b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wizardly_euler, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0)
Oct 11 01:59:25 compute-0 podman[313467]: 2025-10-11 01:59:25.400180809 +0000 UTC m=+0.056117061 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:59:25 compute-0 systemd[1]: Started libpod-conmon-7758ca21721a7241cacddf05da041412d825aa75e2a4d969373c310874af02b7.scope.
Oct 11 01:59:25 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:59:25 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6f942aa03a1acf5e9ab3faff24e54101ecffd4c06d3c09a0267ba4a331159a80/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:59:25 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6f942aa03a1acf5e9ab3faff24e54101ecffd4c06d3c09a0267ba4a331159a80/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:59:25 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6f942aa03a1acf5e9ab3faff24e54101ecffd4c06d3c09a0267ba4a331159a80/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:59:25 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6f942aa03a1acf5e9ab3faff24e54101ecffd4c06d3c09a0267ba4a331159a80/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:59:25 compute-0 podman[313467]: 2025-10-11 01:59:25.590964073 +0000 UTC m=+0.246900355 container init 7758ca21721a7241cacddf05da041412d825aa75e2a4d969373c310874af02b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wizardly_euler, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2)
Oct 11 01:59:25 compute-0 podman[313467]: 2025-10-11 01:59:25.618042807 +0000 UTC m=+0.273979079 container start 7758ca21721a7241cacddf05da041412d825aa75e2a4d969373c310874af02b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wizardly_euler, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0)
Oct 11 01:59:25 compute-0 podman[313467]: 2025-10-11 01:59:25.625115552 +0000 UTC m=+0.281051804 container attach 7758ca21721a7241cacddf05da041412d825aa75e2a4d969373c310874af02b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wizardly_euler, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0)
Oct 11 01:59:26 compute-0 wizardly_euler[313509]: {
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:     "0": [
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:         {
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "devices": [
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "/dev/loop3"
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             ],
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "lv_name": "ceph_lv0",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "lv_size": "21470642176",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "name": "ceph_lv0",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "tags": {
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.cluster_name": "ceph",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.crush_device_class": "",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.encrypted": "0",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.osd_id": "0",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.type": "block",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.vdo": "0"
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             },
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "type": "block",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "vg_name": "ceph_vg0"
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:         }
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:     ],
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:     "1": [
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:         {
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "devices": [
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "/dev/loop4"
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             ],
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "lv_name": "ceph_lv1",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "lv_size": "21470642176",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "name": "ceph_lv1",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "tags": {
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.cluster_name": "ceph",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.crush_device_class": "",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.encrypted": "0",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.osd_id": "1",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.type": "block",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.vdo": "0"
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             },
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "type": "block",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "vg_name": "ceph_vg1"
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:         }
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:     ],
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:     "2": [
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:         {
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "devices": [
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "/dev/loop5"
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             ],
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "lv_name": "ceph_lv2",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "lv_size": "21470642176",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "name": "ceph_lv2",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "tags": {
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.cluster_name": "ceph",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.crush_device_class": "",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.encrypted": "0",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.osd_id": "2",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.type": "block",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:                 "ceph.vdo": "0"
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             },
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "type": "block",
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:             "vg_name": "ceph_vg2"
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:         }
Oct 11 01:59:26 compute-0 wizardly_euler[313509]:     ]
Oct 11 01:59:26 compute-0 wizardly_euler[313509]: }
Oct 11 01:59:26 compute-0 systemd[1]: libpod-7758ca21721a7241cacddf05da041412d825aa75e2a4d969373c310874af02b7.scope: Deactivated successfully.
Oct 11 01:59:26 compute-0 podman[313467]: 2025-10-11 01:59:26.445515073 +0000 UTC m=+1.101451355 container died 7758ca21721a7241cacddf05da041412d825aa75e2a4d969373c310874af02b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wizardly_euler, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:59:26 compute-0 systemd[1]: var-lib-containers-storage-overlay-6f942aa03a1acf5e9ab3faff24e54101ecffd4c06d3c09a0267ba4a331159a80-merged.mount: Deactivated successfully.
Oct 11 01:59:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:59:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:59:26 compute-0 ceph-mon[191930]: pgmap v637: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:26 compute-0 podman[313467]: 2025-10-11 01:59:26.566072358 +0000 UTC m=+1.222008640 container remove 7758ca21721a7241cacddf05da041412d825aa75e2a4d969373c310874af02b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wizardly_euler, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 01:59:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:59:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:59:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:59:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:59:26 compute-0 systemd[1]: libpod-conmon-7758ca21721a7241cacddf05da041412d825aa75e2a4d969373c310874af02b7.scope: Deactivated successfully.
Oct 11 01:59:26 compute-0 sudo[313250]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:26 compute-0 sudo[313644]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hfuuznvmqzffyvxduitqofmcielmvgfz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147965.4648857-1158-261127478147294/AnsiballZ_stat.py'
Oct 11 01:59:26 compute-0 sudo[313644]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:26 compute-0 sudo[313621]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:59:26 compute-0 sudo[313621]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:59:26 compute-0 sudo[313621]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:26 compute-0 sudo[313659]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 01:59:26 compute-0 sudo[313659]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:59:26 compute-0 sudo[313659]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:26 compute-0 python3.9[313656]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-update-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:59:26 compute-0 sudo[313644]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v638: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:27 compute-0 sudo[313685]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:59:27 compute-0 sudo[313685]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:59:27 compute-0 sudo[313685]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:27 compute-0 sudo[313728]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 01:59:27 compute-0 sudo[313728]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:59:27 compute-0 sudo[313817]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iqiqiipwzetrymxxuqyefjdfxhjrcmal ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147965.4648857-1158-261127478147294/AnsiballZ_file.py'
Oct 11 01:59:27 compute-0 sudo[313817]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:27 compute-0 python3.9[313822]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-update-jumps.nft _original_basename=jump-chain.j2 recurse=False state=file path=/etc/nftables/edpm-update-jumps.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:59:27 compute-0 sudo[313817]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:27 compute-0 podman[313851]: 2025-10-11 01:59:27.802323275 +0000 UTC m=+0.091134577 container create b607b71c8ccfc679867376e6f53b83428a62ffe2ad4446daa4a4d51d13d8dbef (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_albattani, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.schema-version=1.0)
Oct 11 01:59:27 compute-0 podman[313851]: 2025-10-11 01:59:27.771381908 +0000 UTC m=+0.060193250 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:59:27 compute-0 systemd[1]: Started libpod-conmon-b607b71c8ccfc679867376e6f53b83428a62ffe2ad4446daa4a4d51d13d8dbef.scope.
Oct 11 01:59:27 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:59:27 compute-0 podman[313851]: 2025-10-11 01:59:27.962131993 +0000 UTC m=+0.250943355 container init b607b71c8ccfc679867376e6f53b83428a62ffe2ad4446daa4a4d51d13d8dbef (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_albattani, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 01:59:27 compute-0 podman[313851]: 2025-10-11 01:59:27.980762024 +0000 UTC m=+0.269573286 container start b607b71c8ccfc679867376e6f53b83428a62ffe2ad4446daa4a4d51d13d8dbef (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_albattani, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True)
Oct 11 01:59:27 compute-0 podman[313851]: 2025-10-11 01:59:27.984993471 +0000 UTC m=+0.273804763 container attach b607b71c8ccfc679867376e6f53b83428a62ffe2ad4446daa4a4d51d13d8dbef (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_albattani, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.vendor=CentOS)
Oct 11 01:59:27 compute-0 recursing_albattani[313891]: 167 167
Oct 11 01:59:27 compute-0 systemd[1]: libpod-b607b71c8ccfc679867376e6f53b83428a62ffe2ad4446daa4a4d51d13d8dbef.scope: Deactivated successfully.
Oct 11 01:59:27 compute-0 podman[313851]: 2025-10-11 01:59:27.9951201 +0000 UTC m=+0.283931412 container died b607b71c8ccfc679867376e6f53b83428a62ffe2ad4446daa4a4d51d13d8dbef (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_albattani, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:59:28 compute-0 systemd[1]: var-lib-containers-storage-overlay-3fb848e96f48737cb2b010faacb9bb2a70851259a3c8caab458642da06d4239e-merged.mount: Deactivated successfully.
Oct 11 01:59:28 compute-0 podman[313851]: 2025-10-11 01:59:28.058093967 +0000 UTC m=+0.346905259 container remove b607b71c8ccfc679867376e6f53b83428a62ffe2ad4446daa4a4d51d13d8dbef (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_albattani, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507)
Oct 11 01:59:28 compute-0 systemd[1]: libpod-conmon-b607b71c8ccfc679867376e6f53b83428a62ffe2ad4446daa4a4d51d13d8dbef.scope: Deactivated successfully.
Oct 11 01:59:28 compute-0 podman[313974]: 2025-10-11 01:59:28.320691817 +0000 UTC m=+0.080715153 container create 07f56d607316762e96b502bb5ddc6044911fd178c7c6655e38153777d0844c87 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_perlman, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 01:59:28 compute-0 podman[313974]: 2025-10-11 01:59:28.288627457 +0000 UTC m=+0.048650883 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 01:59:28 compute-0 systemd[1]: Started libpod-conmon-07f56d607316762e96b502bb5ddc6044911fd178c7c6655e38153777d0844c87.scope.
Oct 11 01:59:28 compute-0 systemd[1]: Started libcrun container.
Oct 11 01:59:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bf2051f74cadfb57dad31c7526ce88cb9fdf49e0688cc9fcca861713ab8f418e/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 01:59:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bf2051f74cadfb57dad31c7526ce88cb9fdf49e0688cc9fcca861713ab8f418e/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 01:59:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bf2051f74cadfb57dad31c7526ce88cb9fdf49e0688cc9fcca861713ab8f418e/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 01:59:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bf2051f74cadfb57dad31c7526ce88cb9fdf49e0688cc9fcca861713ab8f418e/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 01:59:28 compute-0 podman[313974]: 2025-10-11 01:59:28.49484854 +0000 UTC m=+0.254871936 container init 07f56d607316762e96b502bb5ddc6044911fd178c7c6655e38153777d0844c87 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_perlman, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:59:28 compute-0 podman[313974]: 2025-10-11 01:59:28.513443482 +0000 UTC m=+0.273466808 container start 07f56d607316762e96b502bb5ddc6044911fd178c7c6655e38153777d0844c87 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_perlman, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 01:59:28 compute-0 podman[313974]: 2025-10-11 01:59:28.518046533 +0000 UTC m=+0.278069949 container attach 07f56d607316762e96b502bb5ddc6044911fd178c7c6655e38153777d0844c87 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_perlman, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 01:59:28 compute-0 ceph-mon[191930]: pgmap v638: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:28 compute-0 sudo[314061]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tgtzripykdfhorygdvmwaahvzqxiuzpn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147968.037725-1170-221139585142007/AnsiballZ_stat.py'
Oct 11 01:59:28 compute-0 sudo[314061]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:28 compute-0 python3.9[314063]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-flushes.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:59:28 compute-0 sudo[314061]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v639: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:29 compute-0 sudo[314147]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eveyyqlsrrlymezvobweveuuvbidsfmm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147968.037725-1170-221139585142007/AnsiballZ_file.py'
Oct 11 01:59:29 compute-0 sudo[314147]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:29 compute-0 python3.9[314152]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-flushes.nft _original_basename=flush-chain.j2 recurse=False state=file path=/etc/nftables/edpm-flushes.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:59:29 compute-0 sudo[314147]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:29 compute-0 nice_perlman[314006]: {
Oct 11 01:59:29 compute-0 nice_perlman[314006]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 01:59:29 compute-0 nice_perlman[314006]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:59:29 compute-0 nice_perlman[314006]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 01:59:29 compute-0 nice_perlman[314006]:         "osd_id": 1,
Oct 11 01:59:29 compute-0 nice_perlman[314006]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 01:59:29 compute-0 nice_perlman[314006]:         "type": "bluestore"
Oct 11 01:59:29 compute-0 nice_perlman[314006]:     },
Oct 11 01:59:29 compute-0 nice_perlman[314006]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 01:59:29 compute-0 nice_perlman[314006]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:59:29 compute-0 nice_perlman[314006]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 01:59:29 compute-0 nice_perlman[314006]:         "osd_id": 2,
Oct 11 01:59:29 compute-0 nice_perlman[314006]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 01:59:29 compute-0 nice_perlman[314006]:         "type": "bluestore"
Oct 11 01:59:29 compute-0 nice_perlman[314006]:     },
Oct 11 01:59:29 compute-0 nice_perlman[314006]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 01:59:29 compute-0 nice_perlman[314006]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 01:59:29 compute-0 nice_perlman[314006]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 01:59:29 compute-0 nice_perlman[314006]:         "osd_id": 0,
Oct 11 01:59:29 compute-0 nice_perlman[314006]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 01:59:29 compute-0 nice_perlman[314006]:         "type": "bluestore"
Oct 11 01:59:29 compute-0 nice_perlman[314006]:     }
Oct 11 01:59:29 compute-0 nice_perlman[314006]: }
Oct 11 01:59:29 compute-0 podman[157119]: time="2025-10-11T01:59:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:59:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:59:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 37299 "" "Go-http-client/1.1"
Oct 11 01:59:29 compute-0 podman[157119]: @ - - [11/Oct/2025:01:59:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 7278 "" "Go-http-client/1.1"
Oct 11 01:59:29 compute-0 systemd[1]: libpod-07f56d607316762e96b502bb5ddc6044911fd178c7c6655e38153777d0844c87.scope: Deactivated successfully.
Oct 11 01:59:29 compute-0 systemd[1]: libpod-07f56d607316762e96b502bb5ddc6044911fd178c7c6655e38153777d0844c87.scope: Consumed 1.261s CPU time.
Oct 11 01:59:29 compute-0 podman[313974]: 2025-10-11 01:59:29.797692599 +0000 UTC m=+1.557715955 container died 07f56d607316762e96b502bb5ddc6044911fd178c7c6655e38153777d0844c87 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_perlman, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS)
Oct 11 01:59:29 compute-0 systemd[1]: var-lib-containers-storage-overlay-bf2051f74cadfb57dad31c7526ce88cb9fdf49e0688cc9fcca861713ab8f418e-merged.mount: Deactivated successfully.
Oct 11 01:59:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:59:29 compute-0 podman[313974]: 2025-10-11 01:59:29.874615058 +0000 UTC m=+1.634638394 container remove 07f56d607316762e96b502bb5ddc6044911fd178c7c6655e38153777d0844c87 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_perlman, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 01:59:29 compute-0 systemd[1]: libpod-conmon-07f56d607316762e96b502bb5ddc6044911fd178c7c6655e38153777d0844c87.scope: Deactivated successfully.
Oct 11 01:59:29 compute-0 sudo[313728]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 01:59:29 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:59:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 01:59:29 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:59:29 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev a787e54e-8e1a-4f00-8e7a-8cdda4c8f174 does not exist
Oct 11 01:59:29 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 8e09de89-89dd-4ab7-8b72-67559a8d2d99 does not exist
Oct 11 01:59:30 compute-0 sudo[314219]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 01:59:30 compute-0 sudo[314219]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:59:30 compute-0 sudo[314219]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:30 compute-0 sudo[314267]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 01:59:30 compute-0 sudo[314267]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 01:59:30 compute-0 sudo[314267]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:30 compute-0 sudo[314381]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oxxssbsyobzggujidotfyyizxrlbjdyw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147970.0075672-1182-81063235646010/AnsiballZ_stat.py'
Oct 11 01:59:30 compute-0 sudo[314381]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:30 compute-0 ceph-mon[191930]: pgmap v639: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:30 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:59:30 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 01:59:30 compute-0 python3.9[314383]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-chains.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:59:30 compute-0 sudo[314381]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v640: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:31 compute-0 sudo[314459]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wurtwucflbddfdvjqrlxsccgqumifuqg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147970.0075672-1182-81063235646010/AnsiballZ_file.py'
Oct 11 01:59:31 compute-0 sudo[314459]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:31 compute-0 openstack_network_exporter[159265]: ERROR   01:59:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:59:31 compute-0 openstack_network_exporter[159265]: ERROR   01:59:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 01:59:31 compute-0 openstack_network_exporter[159265]: ERROR   01:59:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 01:59:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:59:31 compute-0 openstack_network_exporter[159265]: ERROR   01:59:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 01:59:31 compute-0 openstack_network_exporter[159265]: ERROR   01:59:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 01:59:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 01:59:31 compute-0 podman[314461]: 2025-10-11 01:59:31.46465075 +0000 UTC m=+0.128754604 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 01:59:31 compute-0 podman[314462]: 2025-10-11 01:59:31.484748159 +0000 UTC m=+0.141988846 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.component=ubi9-container, distribution-scope=public, release=1214.1726694543, io.openshift.tags=base rhel9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 9, managed_by=edpm_ansible, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., io.openshift.expose-services=, maintainer=Red Hat, Inc., name=ubi9, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.29.0, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, config_id=edpm, version=9.4, release-0.7.12=, build-date=2024-09-18T21:23:30)
Oct 11 01:59:31 compute-0 podman[314463]: 2025-10-11 01:59:31.534946848 +0000 UTC m=+0.185446726 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, config_id=ovn_controller, org.label-schema.schema-version=1.0, container_name=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3)
Oct 11 01:59:31 compute-0 python3.9[314464]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-chains.nft _original_basename=chains.j2 recurse=False state=file path=/etc/nftables/edpm-chains.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:59:31 compute-0 sudo[314459]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:32 compute-0 ceph-mon[191930]: pgmap v640: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:32 compute-0 sudo[314676]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-elcdpdutiiegfkqsycxgpmxggtwhbdfr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147971.98985-1194-215549729475971/AnsiballZ_stat.py'
Oct 11 01:59:32 compute-0 sudo[314676]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:32 compute-0 python3.9[314678]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-rules.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:59:32 compute-0 sudo[314676]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v641: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:33 compute-0 sudo[314754]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ymmsndkikcaefkmvnuywqchfopauvidv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147971.98985-1194-215549729475971/AnsiballZ_file.py'
Oct 11 01:59:33 compute-0 sudo[314754]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:33 compute-0 podman[314756]: 2025-10-11 01:59:33.523521014 +0000 UTC m=+0.127516643 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, config_id=edpm, container_name=ceilometer_agent_compute)
Oct 11 01:59:33 compute-0 python3.9[314757]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-rules.nft _original_basename=ruleset.j2 recurse=False state=file path=/etc/nftables/edpm-rules.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:59:33 compute-0 sudo[314754]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:34 compute-0 sudo[314926]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dvyjklnactikdsertwspmrdpurfcncjn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147973.9886718-1207-93826153882744/AnsiballZ_command.py'
Oct 11 01:59:34 compute-0 sudo[314926]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:34 compute-0 ceph-mon[191930]: pgmap v641: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:34 compute-0 python3.9[314928]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail; cat /etc/nftables/edpm-chains.nft /etc/nftables/edpm-flushes.nft /etc/nftables/edpm-rules.nft /etc/nftables/edpm-update-jumps.nft /etc/nftables/edpm-jumps.nft | nft -c -f - _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:59:34 compute-0 sudo[314926]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:59:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v642: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:35 compute-0 sudo[315081]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dwkkqbizfspcqeskdbzwhputqslswflb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147975.0992928-1215-186397634860546/AnsiballZ_blockinfile.py'
Oct 11 01:59:35 compute-0 sudo[315081]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:36 compute-0 python3.9[315083]: ansible-ansible.builtin.blockinfile Invoked with backup=False block=include "/etc/nftables/iptables.nft"
                                             include "/etc/nftables/edpm-chains.nft"
                                             include "/etc/nftables/edpm-rules.nft"
                                             include "/etc/nftables/edpm-jumps.nft"
                                              path=/etc/sysconfig/nftables.conf validate=nft -c -f %s state=present marker=# {mark} ANSIBLE MANAGED BLOCK create=False marker_begin=BEGIN marker_end=END append_newline=False prepend_newline=False unsafe_writes=False insertafter=None insertbefore=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:59:36 compute-0 sudo[315081]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:36 compute-0 ceph-mon[191930]: pgmap v642: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:36 compute-0 sudo[315233]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qadgvpuyfslpuwodgbijygziwwrfyqhf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147976.409124-1224-251607142275544/AnsiballZ_command.py'
Oct 11 01:59:36 compute-0 sudo[315233]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v643: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:37 compute-0 python3.9[315235]: ansible-ansible.legacy.command Invoked with _raw_params=nft -f /etc/nftables/edpm-chains.nft _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 01:59:37 compute-0 sudo[315233]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:38 compute-0 sudo[315386]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ixsineivewmmxyrwsakuoftuurnhfrgv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147977.49383-1232-226002984644608/AnsiballZ_stat.py'
Oct 11 01:59:38 compute-0 sudo[315386]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:38 compute-0 python3.9[315388]: ansible-ansible.builtin.stat Invoked with path=/etc/nftables/edpm-rules.nft.changed follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 01:59:38 compute-0 sudo[315386]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:38 compute-0 ceph-mon[191930]: pgmap v643: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v644: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:39 compute-0 sudo[315538]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lcqdxwzdjusxwtsbhsnutjckcvexjdso ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147978.6241143-1241-45232303273269/AnsiballZ_file.py'
Oct 11 01:59:39 compute-0 sudo[315538]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:39 compute-0 python3.9[315540]: ansible-ansible.builtin.file Invoked with path=/etc/nftables/edpm-rules.nft.changed state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:59:39 compute-0 sudo[315538]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:39 compute-0 ceph-mon[191930]: pgmap v644: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:59:40 compute-0 sudo[315690]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rlaqmpslknlgqcdwnululzaqfjmcixpg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147979.878627-1249-151296984729371/AnsiballZ_stat.py'
Oct 11 01:59:40 compute-0 sudo[315690]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:40 compute-0 python3.9[315692]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/edpm_libvirt.target follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:59:40 compute-0 sudo[315690]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v645: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:41 compute-0 sudo[315768]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xhvptzjtzflvtvxupyshzndcqkpztkxh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147979.878627-1249-151296984729371/AnsiballZ_file.py'
Oct 11 01:59:41 compute-0 sudo[315768]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:41 compute-0 python3.9[315770]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/etc/systemd/system/edpm_libvirt.target _original_basename=edpm_libvirt.target recurse=False state=file path=/etc/systemd/system/edpm_libvirt.target force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:59:41 compute-0 sudo[315768]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:42 compute-0 ceph-mon[191930]: pgmap v645: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:42 compute-0 sudo[315920]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lqtiycjldtapeksapapjdjzyroldfmtx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147981.6976976-1261-176976649108608/AnsiballZ_stat.py'
Oct 11 01:59:42 compute-0 sudo[315920]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:42 compute-0 python3.9[315922]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/edpm_libvirt_guests.service follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:59:42 compute-0 sudo[315920]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:43 compute-0 sudo[315998]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hussnyorhgtashtapjiefsvqvoxgvgxr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147981.6976976-1261-176976649108608/AnsiballZ_file.py'
Oct 11 01:59:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v646: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:43 compute-0 sudo[315998]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:43 compute-0 python3.9[316000]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/etc/systemd/system/edpm_libvirt_guests.service _original_basename=edpm_libvirt_guests.service recurse=False state=file path=/etc/systemd/system/edpm_libvirt_guests.service force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:59:43 compute-0 sudo[315998]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:44 compute-0 ceph-mon[191930]: pgmap v646: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:44 compute-0 sudo[316150]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gmzlmqozggawczaaddhehsuuzumqrrwb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147983.5646946-1273-165676363162533/AnsiballZ_stat.py'
Oct 11 01:59:44 compute-0 sudo[316150]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:44 compute-0 python3.9[316152]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/virt-guest-shutdown.target follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 01:59:44 compute-0 sudo[316150]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:59:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v647: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:45 compute-0 sudo[316228]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bvksubnpjwlnqrrzfbsrhuvujfhmaaji ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147983.5646946-1273-165676363162533/AnsiballZ_file.py'
Oct 11 01:59:45 compute-0 sudo[316228]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:45 compute-0 python3.9[316230]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/etc/systemd/system/virt-guest-shutdown.target _original_basename=virt-guest-shutdown.target recurse=False state=file path=/etc/systemd/system/virt-guest-shutdown.target force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 01:59:45 compute-0 sudo[316228]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:46 compute-0 ceph-mon[191930]: pgmap v647: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:46 compute-0 sshd-session[286737]: Connection closed by 192.168.122.30 port 46324
Oct 11 01:59:46 compute-0 sshd-session[286734]: pam_unix(sshd:session): session closed for user zuul
Oct 11 01:59:46 compute-0 systemd[1]: session-55.scope: Deactivated successfully.
Oct 11 01:59:46 compute-0 systemd[1]: session-55.scope: Consumed 3min 9.978s CPU time.
Oct 11 01:59:46 compute-0 systemd-logind[804]: Session 55 logged out. Waiting for processes to exit.
Oct 11 01:59:46 compute-0 systemd-logind[804]: Removed session 55.
Oct 11 01:59:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v648: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:48 compute-0 ceph-mon[191930]: pgmap v648: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:48 compute-0 podman[316255]: 2025-10-11 01:59:48.244152516 +0000 UTC m=+0.122520807 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 01:59:48 compute-0 podman[316256]: 2025-10-11 01:59:48.27063823 +0000 UTC m=+0.149273307 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.tags=minimal rhel9, name=ubi9-minimal, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, release=1755695350, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, vcs-type=git, container_name=openstack_network_exporter, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., managed_by=edpm_ansible, version=9.6, com.redhat.component=ubi9-minimal-container, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, config_id=edpm, build-date=2025-08-20T13:12:41, io.buildah.version=1.33.7, io.openshift.expose-services=, distribution-scope=public, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers)
Oct 11 01:59:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v649: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:59:50 compute-0 ceph-mon[191930]: pgmap v649: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v650: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:51 compute-0 podman[316298]: 2025-10-11 01:59:51.272803445 +0000 UTC m=+0.160053896 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_id=edpm, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 01:59:52 compute-0 sshd-session[316317]: Accepted publickey for zuul from 192.168.122.30 port 43262 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 01:59:52 compute-0 systemd-logind[804]: New session 56 of user zuul.
Oct 11 01:59:52 compute-0 systemd[1]: Started Session 56 of User zuul.
Oct 11 01:59:52 compute-0 sshd-session[316317]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 01:59:52 compute-0 ceph-mon[191930]: pgmap v650: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v651: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:53 compute-0 python3.9[316470]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 01:59:54 compute-0 ceph-mon[191930]: pgmap v651: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:59:54.813 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 01:59:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:59:54.814 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 01:59:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 01:59:54.814 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 01:59:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 01:59:54 compute-0 sudo[316641]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qnkpdmqlwesurmtkrfwlmgduuqptxbnz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147994.2266107-34-37471855896760/AnsiballZ_file.py'
Oct 11 01:59:54 compute-0 podman[316598]: 2025-10-11 01:59:54.962742719 +0000 UTC m=+0.110850911 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, container_name=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, config_id=ovn_metadata_agent, org.label-schema.license=GPLv2, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Oct 11 01:59:54 compute-0 sudo[316641]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v652: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:55 compute-0 python3.9[316645]: ansible-ansible.builtin.file Invoked with mode=0755 path=/etc/iscsi setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:59:55 compute-0 sudo[316641]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:56 compute-0 sudo[316795]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wapbyjlmimgvsibatceuplllwoidnmny ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147995.4503448-34-4622182626600/AnsiballZ_file.py'
Oct 11 01:59:56 compute-0 sudo[316795]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:56 compute-0 ceph-mon[191930]: pgmap v652: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:56 compute-0 python3.9[316797]: ansible-ansible.builtin.file Invoked with path=/etc/target setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:59:56 compute-0 sudo[316795]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_01:59:56
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['vms', 'default.rgw.meta', 'default.rgw.log', 'cephfs.cephfs.data', '.rgw.root', 'images', '.mgr', 'cephfs.cephfs.meta', 'volumes', 'backups', 'default.rgw.control']
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:59:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 01:59:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v653: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:57 compute-0 sudo[316947]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-plmeqdyfbbsevpdxcxsffpighlsfiiou ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147996.5601878-34-115469785085854/AnsiballZ_file.py'
Oct 11 01:59:57 compute-0 sudo[316947]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:57 compute-0 python3.9[316949]: ansible-ansible.builtin.file Invoked with path=/var/lib/iscsi setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 01:59:57 compute-0 sudo[316947]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:58 compute-0 ceph-mon[191930]: pgmap v653: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:58 compute-0 sudo[317099]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dwmuaqvhbslzqpbswegnebzlnlzftwly ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147998.2693048-34-168792757840084/AnsiballZ_file.py'
Oct 11 01:59:58 compute-0 sudo[317099]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 01:59:58 compute-0 python3.9[317101]: ansible-ansible.builtin.file Invoked with mode=0755 path=/var/lib/config-data selevel=s0 setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None attributes=None
Oct 11 01:59:59 compute-0 sudo[317099]: pam_unix(sudo:session): session closed for user root
Oct 11 01:59:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v654: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 01:59:59 compute-0 podman[157119]: time="2025-10-11T01:59:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 01:59:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:59:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 35732 "" "Go-http-client/1.1"
Oct 11 01:59:59 compute-0 podman[157119]: @ - - [11/Oct/2025:01:59:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 7268 "" "Go-http-client/1.1"
Oct 11 01:59:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:00:00 compute-0 ceph-mon[191930]: pgmap v654: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:00 compute-0 sudo[317251]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-juifcniaryytywvfpbfgbgbcvpcavywz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760147999.2729104-34-110680936677371/AnsiballZ_file.py'
Oct 11 02:00:00 compute-0 sudo[317251]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:00 compute-0 python3.9[317253]: ansible-ansible.builtin.file Invoked with mode=0755 path=/var/lib/config-data/ansible-generated/iscsid setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:00:00 compute-0 sudo[317251]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v655: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:01 compute-0 openstack_network_exporter[159265]: ERROR   02:00:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:00:01 compute-0 openstack_network_exporter[159265]: ERROR   02:00:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:00:01 compute-0 openstack_network_exporter[159265]: ERROR   02:00:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:00:01 compute-0 openstack_network_exporter[159265]: ERROR   02:00:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:00:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:00:01 compute-0 openstack_network_exporter[159265]: ERROR   02:00:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:00:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:00:01 compute-0 sudo[317449]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-geeulenrwgyvnlptizxvoqrkqchkmaqu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148001.0063937-70-158101691330296/AnsiballZ_stat.py'
Oct 11 02:00:01 compute-0 sudo[317449]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:01 compute-0 podman[317377]: 2025-10-11 02:00:01.924360935 +0000 UTC m=+0.122651278 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:00:01 compute-0 podman[317379]: 2025-10-11 02:00:01.932957405 +0000 UTC m=+0.118349574 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, name=ubi9, com.redhat.component=ubi9-container, config_id=edpm, container_name=kepler, io.k8s.display-name=Red Hat Universal Base Image 9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, version=9.4, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, vendor=Red Hat, Inc., build-date=2024-09-18T21:23:30, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=base rhel9, managed_by=edpm_ansible, distribution-scope=public, io.buildah.version=1.29.0, release=1214.1726694543, release-0.7.12=, summary=Provides the latest release of Red Hat Universal Base Image 9.)
Oct 11 02:00:01 compute-0 podman[317378]: 2025-10-11 02:00:01.96846777 +0000 UTC m=+0.159921843 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']})
Oct 11 02:00:02 compute-0 python3.9[317467]: ansible-ansible.builtin.stat Invoked with path=/lib/systemd/system/iscsid.socket follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:00:02 compute-0 sudo[317449]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:02 compute-0 ceph-mon[191930]: pgmap v655: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v656: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:03 compute-0 sudo[317626]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lgewzxcsdeitstyzzdeohexzmtacbqnb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148002.4531965-78-93665098382862/AnsiballZ_systemd.py'
Oct 11 02:00:03 compute-0 sudo[317626]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:03 compute-0 python3.9[317628]: ansible-ansible.builtin.systemd Invoked with enabled=False name=iscsid.socket state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:00:03 compute-0 systemd[1]: Reloading.
Oct 11 02:00:03 compute-0 podman[317630]: 2025-10-11 02:00:03.809726694 +0000 UTC m=+0.139291009 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007)
Oct 11 02:00:03 compute-0 systemd-sysv-generator[317680]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:00:03 compute-0 systemd-rc-local-generator[317674]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:00:04 compute-0 ceph-mon[191930]: pgmap v656: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:04 compute-0 sudo[317626]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:00:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v657: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:05 compute-0 sudo[317835]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zokrcnsfsaytpotvphoscdtmkhatmwkx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148004.5743625-86-261832206659090/AnsiballZ_service_facts.py'
Oct 11 02:00:05 compute-0 sudo[317835]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:05 compute-0 python3.9[317837]: ansible-ansible.builtin.service_facts Invoked
Oct 11 02:00:05 compute-0 network[317854]: You are using 'network' service provided by 'network-scripts', which are now deprecated.
Oct 11 02:00:05 compute-0 network[317855]: 'network-scripts' will be removed from distribution in near future.
Oct 11 02:00:05 compute-0 network[317856]: It is advised to switch to 'NetworkManager' instead for network management.
Oct 11 02:00:06 compute-0 ceph-mon[191930]: pgmap v657: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:00:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:00:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v658: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:08 compute-0 ceph-mon[191930]: pgmap v658: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v659: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:00:10 compute-0 ceph-mon[191930]: pgmap v659: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v660: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:11 compute-0 sudo[317835]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:12 compute-0 ceph-mon[191930]: pgmap v660: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v661: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:13 compute-0 sudo[318128]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xwxdhkhqssvtobuyqgxuayernxmhaava ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148012.6682186-94-692397541362/AnsiballZ_systemd.py'
Oct 11 02:00:13 compute-0 sudo[318128]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:13 compute-0 python3.9[318130]: ansible-ansible.builtin.systemd Invoked with enabled=False name=iscsi-starter.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:00:13 compute-0 systemd[1]: Reloading.
Oct 11 02:00:13 compute-0 systemd-rc-local-generator[318161]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:00:13 compute-0 systemd-sysv-generator[318164]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:00:14 compute-0 sudo[318128]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:14 compute-0 ceph-mon[191930]: pgmap v661: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:00:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v662: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:15 compute-0 python3.9[318319]: ansible-ansible.builtin.stat Invoked with path=/etc/iscsi/.initiator_reset follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:00:16 compute-0 ceph-mon[191930]: pgmap v662: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:16 compute-0 sudo[318469]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xpvdfzqrfofnhqrzqxqbhtxnteltweam ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148015.6704245-111-77242336189/AnsiballZ_podman_container.py'
Oct 11 02:00:16 compute-0 sudo[318469]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:16 compute-0 python3.9[318471]: ansible-containers.podman.podman_container Invoked with command=/usr/sbin/iscsi-iname detach=False image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified name=iscsid_config rm=True tty=True executable=podman state=started debug=False force_restart=False force_delete=True generate_systemd={} image_strict=False recreate=False annotation=None arch=None attach=None authfile=None blkio_weight=None blkio_weight_device=None cap_add=None cap_drop=None cgroup_conf=None cgroup_parent=None cgroupns=None cgroups=None chrootdirs=None cidfile=None cmd_args=None conmon_pidfile=None cpu_period=None cpu_quota=None cpu_rt_period=None cpu_rt_runtime=None cpu_shares=None cpus=None cpuset_cpus=None cpuset_mems=None decryption_key=None delete_depend=None delete_time=None delete_volumes=None detach_keys=None device=None device_cgroup_rule=None device_read_bps=None device_read_iops=None device_write_bps=None device_write_iops=None dns=None dns_option=None dns_search=None entrypoint=None env=None env_file=None env_host=None env_merge=None etc_hosts=None expose=None gidmap=None gpus=None group_add=None group_entry=None healthcheck=None healthcheck_interval=None healthcheck_retries=None healthcheck_start_period=None health_startup_cmd=None health_startup_interval=None health_startup_retries=None health_startup_success=None health_startup_timeout=None healthcheck_timeout=None healthcheck_failure_action=None hooks_dir=None hostname=None hostuser=None http_proxy=None image_volume=None init=None init_ctr=None init_path=None interactive=None ip=None ip6=None ipc=None kernel_memory=None label=None label_file=None log_driver=None log_level=None log_opt=None mac_address=None memory=None memory_reservation=None memory_swap=None memory_swappiness=None mount=None network=None network_aliases=None no_healthcheck=None no_hosts=None oom_kill_disable=None oom_score_adj=None os=None passwd=None passwd_entry=None personality=None pid=None pid_file=None pids_limit=None platform=None pod=None pod_id_file=None preserve_fd=None preserve_fds=None privileged=None publish=None publish_all=None pull=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None rdt_class=None read_only=None read_only_tmpfs=None requires=None restart_policy=None restart_time=None retry=None retry_delay=None rmi=None rootfs=None seccomp_policy=None secrets=NOT_LOGGING_PARAMETER sdnotify=None security_opt=None shm_size=None shm_size_systemd=None sig_proxy=None stop_signal=None stop_timeout=None stop_time=None subgidname=None subuidname=None sysctl=None systemd=None timeout=None timezone=None tls_verify=None tmpfs=None uidmap=None ulimit=None umask=None unsetenv=None unsetenv_all=None user=None userns=None uts=None variant=None volume=None volumes_from=None workdir=None
Oct 11 02:00:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v663: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:17 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 02:00:17 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 02:00:18 compute-0 ceph-mon[191930]: pgmap v663: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:18 compute-0 podman[318482]: 2025-10-11 02:00:18.903663348 +0000 UTC m=+2.142552488 image pull 5773abc4300b61c01f3353a0b9239f9a404bb272790b280574e4c56f72edaa72 quay.io/podified-antelope-centos9/openstack-iscsid:current-podified
Oct 11 02:00:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v664: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:19 compute-0 podman[318535]: 2025-10-11 02:00:19.111110121 +0000 UTC m=+0.069991601 container create 3c6568fcf2af5bbd334c534e0fbd3ff5a48e9de5198b048d6c06486ebbfca70f (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid_config, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Oct 11 02:00:19 compute-0 podman[318535]: 2025-10-11 02:00:19.075093524 +0000 UTC m=+0.033975084 image pull 5773abc4300b61c01f3353a0b9239f9a404bb272790b280574e4c56f72edaa72 quay.io/podified-antelope-centos9/openstack-iscsid:current-podified
Oct 11 02:00:19 compute-0 NetworkManager[44908]: <info>  [1760148019.1775] manager: (podman0): new Bridge device (/org/freedesktop/NetworkManager/Devices/21)
Oct 11 02:00:19 compute-0 kernel: podman0: port 1(veth0) entered blocking state
Oct 11 02:00:19 compute-0 kernel: podman0: port 1(veth0) entered disabled state
Oct 11 02:00:19 compute-0 kernel: veth0: entered allmulticast mode
Oct 11 02:00:19 compute-0 kernel: veth0: entered promiscuous mode
Oct 11 02:00:19 compute-0 kernel: podman0: port 1(veth0) entered blocking state
Oct 11 02:00:19 compute-0 kernel: podman0: port 1(veth0) entered forwarding state
Oct 11 02:00:19 compute-0 NetworkManager[44908]: <info>  [1760148019.2044] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/22)
Oct 11 02:00:19 compute-0 NetworkManager[44908]: <info>  [1760148019.2054] device (veth0): carrier: link connected
Oct 11 02:00:19 compute-0 NetworkManager[44908]: <info>  [1760148019.2057] device (podman0): carrier: link connected
Oct 11 02:00:19 compute-0 podman[318548]: 2025-10-11 02:00:19.222894346 +0000 UTC m=+0.118594338 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.tags=minimal rhel9, managed_by=edpm_ansible, vendor=Red Hat, Inc., architecture=x86_64, url=https://catalog.redhat.com/en/search?searchType=containers, build-date=2025-08-20T13:12:41, maintainer=Red Hat, Inc., config_id=edpm, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, version=9.6, com.redhat.component=ubi9-minimal-container, distribution-scope=public, container_name=openstack_network_exporter, vcs-type=git, io.buildah.version=1.33.7, name=ubi9-minimal, release=1755695350, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Oct 11 02:00:19 compute-0 systemd-udevd[318602]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:00:19 compute-0 systemd-udevd[318605]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:00:19 compute-0 podman[318547]: 2025-10-11 02:00:19.24229727 +0000 UTC m=+0.137967534 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:00:19 compute-0 NetworkManager[44908]: <info>  [1760148019.2551] device (podman0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 02:00:19 compute-0 NetworkManager[44908]: <info>  [1760148019.2566] device (podman0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external')
Oct 11 02:00:19 compute-0 NetworkManager[44908]: <info>  [1760148019.2576] device (podman0): Activation: starting connection 'podman0' (8a2937b6-dba6-4b95-a958-397c470729ad)
Oct 11 02:00:19 compute-0 NetworkManager[44908]: <info>  [1760148019.2580] device (podman0): state change: disconnected -> prepare (reason 'none', managed-type: 'external')
Oct 11 02:00:19 compute-0 NetworkManager[44908]: <info>  [1760148019.2611] device (podman0): state change: prepare -> config (reason 'none', managed-type: 'external')
Oct 11 02:00:19 compute-0 NetworkManager[44908]: <info>  [1760148019.2614] device (podman0): state change: config -> ip-config (reason 'none', managed-type: 'external')
Oct 11 02:00:19 compute-0 NetworkManager[44908]: <info>  [1760148019.2619] device (podman0): state change: ip-config -> ip-check (reason 'none', managed-type: 'external')
Oct 11 02:00:19 compute-0 systemd[1]: Starting Network Manager Script Dispatcher Service...
Oct 11 02:00:19 compute-0 systemd[1]: Started Network Manager Script Dispatcher Service.
Oct 11 02:00:19 compute-0 NetworkManager[44908]: <info>  [1760148019.3002] device (podman0): state change: ip-check -> secondaries (reason 'none', managed-type: 'external')
Oct 11 02:00:19 compute-0 NetworkManager[44908]: <info>  [1760148019.3007] device (podman0): state change: secondaries -> activated (reason 'none', managed-type: 'external')
Oct 11 02:00:19 compute-0 NetworkManager[44908]: <info>  [1760148019.3018] device (podman0): Activation: successful, device activated.
Oct 11 02:00:19 compute-0 systemd[1]: iscsi.service: Unit cannot be reloaded because it is inactive.
Oct 11 02:00:19 compute-0 systemd[1]: Started libpod-conmon-3c6568fcf2af5bbd334c534e0fbd3ff5a48e9de5198b048d6c06486ebbfca70f.scope.
Oct 11 02:00:19 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:00:19 compute-0 podman[318535]: 2025-10-11 02:00:19.758708598 +0000 UTC m=+0.717590098 container init 3c6568fcf2af5bbd334c534e0fbd3ff5a48e9de5198b048d6c06486ebbfca70f (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid_config, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0)
Oct 11 02:00:19 compute-0 podman[318535]: 2025-10-11 02:00:19.77884618 +0000 UTC m=+0.737727670 container start 3c6568fcf2af5bbd334c534e0fbd3ff5a48e9de5198b048d6c06486ebbfca70f (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid_config, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 02:00:19 compute-0 podman[318535]: 2025-10-11 02:00:19.784218794 +0000 UTC m=+0.743100294 container attach 3c6568fcf2af5bbd334c534e0fbd3ff5a48e9de5198b048d6c06486ebbfca70f (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid_config, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:00:19 compute-0 iscsid_config[318736]: iqn.1994-05.com.redhat:30f82dd389a0
Oct 11 02:00:19 compute-0 systemd[1]: libpod-3c6568fcf2af5bbd334c534e0fbd3ff5a48e9de5198b048d6c06486ebbfca70f.scope: Deactivated successfully.
Oct 11 02:00:19 compute-0 conmon[318736]: conmon 3c6568fcf2af5bbd334c <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-3c6568fcf2af5bbd334c534e0fbd3ff5a48e9de5198b048d6c06486ebbfca70f.scope/container/memory.events
Oct 11 02:00:19 compute-0 podman[318535]: 2025-10-11 02:00:19.793714082 +0000 UTC m=+0.752595592 container died 3c6568fcf2af5bbd334c534e0fbd3ff5a48e9de5198b048d6c06486ebbfca70f (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid_config, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:00:19 compute-0 kernel: podman0: port 1(veth0) entered disabled state
Oct 11 02:00:19 compute-0 kernel: veth0 (unregistering): left allmulticast mode
Oct 11 02:00:19 compute-0 kernel: veth0 (unregistering): left promiscuous mode
Oct 11 02:00:19 compute-0 kernel: podman0: port 1(veth0) entered disabled state
Oct 11 02:00:19 compute-0 NetworkManager[44908]: <info>  [1760148019.8651] device (podman0): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed')
Oct 11 02:00:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:00:20 compute-0 systemd[1]: run-netns-netns\x2dedaf26dd\x2d9ba1\x2d2a1b\x2dfd31\x2dfd4772aedabb.mount: Deactivated successfully.
Oct 11 02:00:20 compute-0 systemd[1]: var-lib-containers-storage-overlay-a7dd21c536465b7def09871bbd7f5a78efb698024ba95b56de47b714a2659408-merged.mount: Deactivated successfully.
Oct 11 02:00:20 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-3c6568fcf2af5bbd334c534e0fbd3ff5a48e9de5198b048d6c06486ebbfca70f-userdata-shm.mount: Deactivated successfully.
Oct 11 02:00:20 compute-0 podman[318535]: 2025-10-11 02:00:20.321741458 +0000 UTC m=+1.280622938 container remove 3c6568fcf2af5bbd334c534e0fbd3ff5a48e9de5198b048d6c06486ebbfca70f (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid_config, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:00:20 compute-0 systemd[1]: libpod-conmon-3c6568fcf2af5bbd334c534e0fbd3ff5a48e9de5198b048d6c06486ebbfca70f.scope: Deactivated successfully.
Oct 11 02:00:20 compute-0 python3.9[318471]: ansible-containers.podman.podman_container PODMAN-CONTAINER-DEBUG: podman run --name iscsid_config --detach=False --rm --tty=True quay.io/podified-antelope-centos9/openstack-iscsid:current-podified /usr/sbin/iscsi-iname
Oct 11 02:00:20 compute-0 ceph-mon[191930]: pgmap v664: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:20 compute-0 python3.9[318471]: ansible-containers.podman.podman_container PODMAN-CONTAINER-DEBUG: Error generating systemd: 
                                             DEPRECATED command:
                                             It is recommended to use Quadlets for running containers and pods under systemd.
                                             
                                             Please refer to podman-systemd.unit(5) for details.
                                             Error: iscsid_config does not refer to a container or pod: no pod with name or ID iscsid_config found: no such pod: no container with name or ID "iscsid_config" found: no such container
Oct 11 02:00:20 compute-0 sudo[318469]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v665: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:21 compute-0 sudo[318971]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pbddccttjpochdkgvllorpgshooaylxu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148020.8295097-119-36606963754537/AnsiballZ_stat.py'
Oct 11 02:00:21 compute-0 sudo[318971]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:21 compute-0 podman[318973]: 2025-10-11 02:00:21.606042906 +0000 UTC m=+0.159596512 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, tcib_managed=true, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, config_id=edpm)
Oct 11 02:00:21 compute-0 python3.9[318974]: ansible-ansible.legacy.stat Invoked with path=/etc/iscsi/initiatorname.iscsi follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:00:21 compute-0 sudo[318971]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:22 compute-0 ceph-mon[191930]: pgmap v665: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:22 compute-0 sudo[319115]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ajtjqrptrubvtkvxsfdtnntaqnlxgbnj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148020.8295097-119-36606963754537/AnsiballZ_copy.py'
Oct 11 02:00:22 compute-0 sudo[319115]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:22 compute-0 python3.9[319117]: ansible-ansible.legacy.copy Invoked with dest=/etc/iscsi/initiatorname.iscsi mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1760148020.8295097-119-36606963754537/.source.iscsi _original_basename=.1lutwoqm follow=False checksum=b1a3ee39018707f730c4f9148c445bf3bff72599 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:00:23 compute-0 sudo[319115]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v666: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:24 compute-0 sudo[319267]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tryxctbmbopozpxglokehrdfsnkecsyl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148023.2759824-134-99591337210689/AnsiballZ_file.py'
Oct 11 02:00:24 compute-0 sudo[319267]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:24 compute-0 ceph-mon[191930]: pgmap v666: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:24 compute-0 python3.9[319269]: ansible-ansible.builtin.file Invoked with mode=0600 path=/etc/iscsi/.initiator_reset state=touch recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:00:24 compute-0 sudo[319267]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:00:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v667: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:25 compute-0 podman[319352]: 2025-10-11 02:00:25.234563632 +0000 UTC m=+0.119451361 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_metadata_agent, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_managed=true, config_id=ovn_metadata_agent, managed_by=edpm_ansible)
Oct 11 02:00:25 compute-0 python3.9[319438]: ansible-ansible.builtin.stat Invoked with path=/etc/iscsi/iscsid.conf follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:00:26 compute-0 ceph-mon[191930]: pgmap v667: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:00:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:00:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:00:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:00:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:00:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:00:26 compute-0 sudo[319590]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iigxvjsmveojyybwqmsbfqskgfxqwrmb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148026.0233176-151-124960777002346/AnsiballZ_lineinfile.py'
Oct 11 02:00:26 compute-0 sudo[319590]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:26 compute-0 python3.9[319592]: ansible-ansible.builtin.lineinfile Invoked with insertafter=^#node.session.auth.chap.algs line=node.session.auth.chap_algs = SHA3-256,SHA256,SHA1,MD5 path=/etc/iscsi/iscsid.conf regexp=^node.session.auth.chap_algs state=present backrefs=False create=False backup=False firstmatch=False unsafe_writes=False search_string=None insertbefore=None validate=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:00:27 compute-0 sudo[319590]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v668: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:28 compute-0 sudo[319742]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qniujpzrfclqkbtjkqcbnffjtdafrysa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148027.4193687-160-248148965183928/AnsiballZ_file.py'
Oct 11 02:00:28 compute-0 sudo[319742]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:28 compute-0 python3.9[319744]: ansible-ansible.builtin.file Invoked with path=/var/local/libexec recurse=True setype=container_file_t state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:00:28 compute-0 sudo[319742]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:28 compute-0 ceph-mon[191930]: pgmap v668: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v669: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:29 compute-0 sudo[319894]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-knnzbmhejhlfjnefkrpzytpdkgdlmwqi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148028.519176-168-154723936844447/AnsiballZ_stat.py'
Oct 11 02:00:29 compute-0 sudo[319894]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:29 compute-0 python3.9[319896]: ansible-ansible.legacy.stat Invoked with path=/var/local/libexec/edpm-container-shutdown follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:00:29 compute-0 sudo[319894]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:29 compute-0 podman[157119]: time="2025-10-11T02:00:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:00:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:00:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 35732 "" "Go-http-client/1.1"
Oct 11 02:00:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:00:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 7261 "" "Go-http-client/1.1"
Oct 11 02:00:29 compute-0 sudo[319972]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dofurtuyyoxbuixludgdndvqhhxoksud ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148028.519176-168-154723936844447/AnsiballZ_file.py'
Oct 11 02:00:29 compute-0 sudo[319972]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:00:29 compute-0 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully.
Oct 11 02:00:30 compute-0 python3.9[319974]: ansible-ansible.legacy.file Invoked with group=root mode=0700 owner=root setype=container_file_t dest=/var/local/libexec/edpm-container-shutdown _original_basename=edpm-container-shutdown recurse=False state=file path=/var/local/libexec/edpm-container-shutdown force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:00:30 compute-0 sudo[319972]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:30 compute-0 sudo[320011]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:00:30 compute-0 sudo[320011]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:00:30 compute-0 sudo[320011]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:30 compute-0 ceph-mon[191930]: pgmap v669: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:30 compute-0 sudo[320070]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:00:30 compute-0 sudo[320070]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:00:30 compute-0 sudo[320070]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:30 compute-0 sudo[320124]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:00:30 compute-0 sudo[320124]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:00:30 compute-0 sudo[320124]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:30 compute-0 sudo[320149]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:00:30 compute-0 sudo[320149]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:00:30 compute-0 sudo[320224]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ruzpqnbgspczlnqpalckqxbxjlphrtvs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148030.2711012-168-134092342027518/AnsiballZ_stat.py'
Oct 11 02:00:30 compute-0 sudo[320224]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:30 compute-0 python3.9[320226]: ansible-ansible.legacy.stat Invoked with path=/var/local/libexec/edpm-start-podman-container follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:00:31 compute-0 sudo[320224]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v670: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:31 compute-0 sudo[320149]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:00:31 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:00:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:00:31 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:00:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:00:31 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:00:31 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 861b727a-02ed-4119-83d3-2d9ca0dff16d does not exist
Oct 11 02:00:31 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 773e3a92-81bb-44e8-91ef-c91bb0bd9133 does not exist
Oct 11 02:00:31 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev e31fc7fa-5a6c-41b2-b5c5-ff0c1811c70f does not exist
Oct 11 02:00:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:00:31 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:00:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:00:31 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:00:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:00:31 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:00:31 compute-0 sudo[320333]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-barlmfgghzhgqixeojoebtivfnmqojhk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148030.2711012-168-134092342027518/AnsiballZ_file.py'
Oct 11 02:00:31 compute-0 sudo[320333]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:31 compute-0 openstack_network_exporter[159265]: ERROR   02:00:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:00:31 compute-0 openstack_network_exporter[159265]: ERROR   02:00:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:00:31 compute-0 openstack_network_exporter[159265]: ERROR   02:00:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:00:31 compute-0 openstack_network_exporter[159265]: ERROR   02:00:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:00:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:00:31 compute-0 openstack_network_exporter[159265]: ERROR   02:00:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:00:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:00:31 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:00:31 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:00:31 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:00:31 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:00:31 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:00:31 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:00:31 compute-0 sudo[320334]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:00:31 compute-0 sudo[320334]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:00:31 compute-0 sudo[320334]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:31 compute-0 sudo[320361]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:00:31 compute-0 sudo[320361]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:00:31 compute-0 sudo[320361]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:31 compute-0 python3.9[320348]: ansible-ansible.legacy.file Invoked with group=root mode=0700 owner=root setype=container_file_t dest=/var/local/libexec/edpm-start-podman-container _original_basename=edpm-start-podman-container recurse=False state=file path=/var/local/libexec/edpm-start-podman-container force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:00:31 compute-0 sudo[320333]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:31 compute-0 sudo[320386]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:00:31 compute-0 sudo[320386]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:00:31 compute-0 sudo[320386]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:31 compute-0 sudo[320412]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:00:31 compute-0 sudo[320412]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:00:32 compute-0 podman[320528]: 2025-10-11 02:00:32.243437592 +0000 UTC m=+0.132814109 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:00:32 compute-0 podman[320535]: 2025-10-11 02:00:32.257418695 +0000 UTC m=+0.135318916 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.component=ubi9-container, release-0.7.12=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.4, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, release=1214.1726694543, config_id=edpm, architecture=x86_64, io.buildah.version=1.29.0, io.k8s.display-name=Red Hat Universal Base Image 9, managed_by=edpm_ansible, name=ubi9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, container_name=kepler, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-type=git, io.openshift.expose-services=, build-date=2024-09-18T21:23:30, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vendor=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, maintainer=Red Hat, Inc.)
Oct 11 02:00:32 compute-0 podman[320532]: 2025-10-11 02:00:32.272863275 +0000 UTC m=+0.149127039 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.license=GPLv2, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:00:32 compute-0 podman[320633]: 2025-10-11 02:00:32.356011834 +0000 UTC m=+0.072914304 container create 2bb79fa6e1bf8c78f06f347d6f917baf8b9f16489cb9db52a4acfe06ec7d8bf3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_noether, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:00:32 compute-0 podman[320633]: 2025-10-11 02:00:32.324499941 +0000 UTC m=+0.041402491 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:00:32 compute-0 systemd[1]: Started libpod-conmon-2bb79fa6e1bf8c78f06f347d6f917baf8b9f16489cb9db52a4acfe06ec7d8bf3.scope.
Oct 11 02:00:32 compute-0 ceph-mon[191930]: pgmap v670: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:32 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:00:32 compute-0 sudo[320704]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jqkmlltwlfvysmxrvpovabrkhywmfkzl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148031.968436-191-139373174501610/AnsiballZ_file.py'
Oct 11 02:00:32 compute-0 podman[320633]: 2025-10-11 02:00:32.511527228 +0000 UTC m=+0.228429778 container init 2bb79fa6e1bf8c78f06f347d6f917baf8b9f16489cb9db52a4acfe06ec7d8bf3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_noether, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:00:32 compute-0 sudo[320704]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:32 compute-0 podman[320633]: 2025-10-11 02:00:32.528990982 +0000 UTC m=+0.245893452 container start 2bb79fa6e1bf8c78f06f347d6f917baf8b9f16489cb9db52a4acfe06ec7d8bf3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_noether, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:00:32 compute-0 podman[320633]: 2025-10-11 02:00:32.534132049 +0000 UTC m=+0.251034519 container attach 2bb79fa6e1bf8c78f06f347d6f917baf8b9f16489cb9db52a4acfe06ec7d8bf3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_noether, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507)
Oct 11 02:00:32 compute-0 eager_noether[320691]: 167 167
Oct 11 02:00:32 compute-0 systemd[1]: libpod-2bb79fa6e1bf8c78f06f347d6f917baf8b9f16489cb9db52a4acfe06ec7d8bf3.scope: Deactivated successfully.
Oct 11 02:00:32 compute-0 podman[320633]: 2025-10-11 02:00:32.543647616 +0000 UTC m=+0.260550086 container died 2bb79fa6e1bf8c78f06f347d6f917baf8b9f16489cb9db52a4acfe06ec7d8bf3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_noether, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, OSD_FLAVOR=default)
Oct 11 02:00:32 compute-0 systemd[1]: var-lib-containers-storage-overlay-f95504e6e085e0def78b2148999b75f581ec77c8c560797a4cdfa4fa43d2f5ce-merged.mount: Deactivated successfully.
Oct 11 02:00:32 compute-0 podman[320633]: 2025-10-11 02:00:32.60286287 +0000 UTC m=+0.319765340 container remove 2bb79fa6e1bf8c78f06f347d6f917baf8b9f16489cb9db52a4acfe06ec7d8bf3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_noether, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:00:32 compute-0 systemd[1]: libpod-conmon-2bb79fa6e1bf8c78f06f347d6f917baf8b9f16489cb9db52a4acfe06ec7d8bf3.scope: Deactivated successfully.
Oct 11 02:00:32 compute-0 python3.9[320707]: ansible-ansible.builtin.file Invoked with mode=420 path=/etc/systemd/system-preset state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:00:32 compute-0 sudo[320704]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:32 compute-0 podman[320726]: 2025-10-11 02:00:32.868445589 +0000 UTC m=+0.070297993 container create b7afe594df651779b747b433346bafc3b90f4e6b0ffcfcf669ece8bcc507a43f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_northcutt, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 02:00:32 compute-0 systemd[1]: Started libpod-conmon-b7afe594df651779b747b433346bafc3b90f4e6b0ffcfcf669ece8bcc507a43f.scope.
Oct 11 02:00:32 compute-0 podman[320726]: 2025-10-11 02:00:32.843124912 +0000 UTC m=+0.044977306 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:00:32 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:00:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d3aab22319ab2f2a5b79034ca011d6869d602c32318b8fc4e586f041000363f3/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:00:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d3aab22319ab2f2a5b79034ca011d6869d602c32318b8fc4e586f041000363f3/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:00:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d3aab22319ab2f2a5b79034ca011d6869d602c32318b8fc4e586f041000363f3/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:00:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d3aab22319ab2f2a5b79034ca011d6869d602c32318b8fc4e586f041000363f3/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:00:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d3aab22319ab2f2a5b79034ca011d6869d602c32318b8fc4e586f041000363f3/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:00:33 compute-0 podman[320726]: 2025-10-11 02:00:33.002632798 +0000 UTC m=+0.204485172 container init b7afe594df651779b747b433346bafc3b90f4e6b0ffcfcf669ece8bcc507a43f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_northcutt, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 02:00:33 compute-0 podman[320726]: 2025-10-11 02:00:33.031621496 +0000 UTC m=+0.233473870 container start b7afe594df651779b747b433346bafc3b90f4e6b0ffcfcf669ece8bcc507a43f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_northcutt, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:00:33 compute-0 podman[320726]: 2025-10-11 02:00:33.035677884 +0000 UTC m=+0.237530258 container attach b7afe594df651779b747b433346bafc3b90f4e6b0ffcfcf669ece8bcc507a43f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_northcutt, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, ceph=True)
Oct 11 02:00:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v671: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:33 compute-0 sudo[320904]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-twdoxyjudpbnddgoxjjffqciphnphkkt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148033.4119127-199-95829205139976/AnsiballZ_stat.py'
Oct 11 02:00:33 compute-0 sudo[320904]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:34 compute-0 python3.9[320909]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/edpm-container-shutdown.service follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:00:34 compute-0 sudo[320904]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:34 compute-0 nostalgic_northcutt[320742]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:00:34 compute-0 nostalgic_northcutt[320742]: --> relative data size: 1.0
Oct 11 02:00:34 compute-0 nostalgic_northcutt[320742]: --> All data devices are unavailable
Oct 11 02:00:34 compute-0 systemd[1]: libpod-b7afe594df651779b747b433346bafc3b90f4e6b0ffcfcf669ece8bcc507a43f.scope: Deactivated successfully.
Oct 11 02:00:34 compute-0 conmon[320742]: conmon b7afe594df651779b747 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-b7afe594df651779b747b433346bafc3b90f4e6b0ffcfcf669ece8bcc507a43f.scope/container/memory.events
Oct 11 02:00:34 compute-0 systemd[1]: libpod-b7afe594df651779b747b433346bafc3b90f4e6b0ffcfcf669ece8bcc507a43f.scope: Consumed 1.280s CPU time.
Oct 11 02:00:34 compute-0 podman[320726]: 2025-10-11 02:00:34.417999843 +0000 UTC m=+1.619852257 container died b7afe594df651779b747b433346bafc3b90f4e6b0ffcfcf669ece8bcc507a43f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_northcutt, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0)
Oct 11 02:00:34 compute-0 ceph-mon[191930]: pgmap v671: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:34 compute-0 systemd[1]: var-lib-containers-storage-overlay-d3aab22319ab2f2a5b79034ca011d6869d602c32318b8fc4e586f041000363f3-merged.mount: Deactivated successfully.
Oct 11 02:00:34 compute-0 podman[320726]: 2025-10-11 02:00:34.543690347 +0000 UTC m=+1.745542721 container remove b7afe594df651779b747b433346bafc3b90f4e6b0ffcfcf669ece8bcc507a43f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_northcutt, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:00:34 compute-0 systemd[1]: libpod-conmon-b7afe594df651779b747b433346bafc3b90f4e6b0ffcfcf669ece8bcc507a43f.scope: Deactivated successfully.
Oct 11 02:00:34 compute-0 sudo[320412]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:34 compute-0 podman[320962]: 2025-10-11 02:00:34.59994437 +0000 UTC m=+0.141317094 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, container_name=ceilometer_agent_compute, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.4, org.label-schema.license=GPLv2)
Oct 11 02:00:34 compute-0 sudo[321003]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:00:34 compute-0 sudo[321003]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:00:34 compute-0 sudo[321003]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:34 compute-0 sudo[321028]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:00:34 compute-0 sudo[321028]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:00:34 compute-0 sudo[321028]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #33. Immutable memtables: 0.
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:00:34.887154) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 13] Flushing memtable with next log file: 33
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148034887283, "job": 13, "event": "flush_started", "num_memtables": 1, "num_entries": 1161, "num_deletes": 506, "total_data_size": 1252674, "memory_usage": 1286064, "flush_reason": "Manual Compaction"}
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 13] Level-0 flush table #34: started
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148034898592, "cf_name": "default", "job": 13, "event": "table_file_creation", "file_number": 34, "file_size": 1240490, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 13577, "largest_seqno": 14737, "table_properties": {"data_size": 1235338, "index_size": 2164, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1925, "raw_key_size": 13542, "raw_average_key_size": 17, "raw_value_size": 1223033, "raw_average_value_size": 1611, "num_data_blocks": 99, "num_entries": 759, "num_filter_entries": 759, "num_deletions": 506, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760147950, "oldest_key_time": 1760147950, "file_creation_time": 1760148034, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 34, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 13] Flush lasted 11538 microseconds, and 6812 cpu microseconds.
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:00:34.898701) [db/flush_job.cc:967] [default] [JOB 13] Level-0 flush table #34: 1240490 bytes OK
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:00:34.898735) [db/memtable_list.cc:519] [default] Level-0 commit table #34 started
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:00:34.901417) [db/memtable_list.cc:722] [default] Level-0 commit table #34: memtable #1 done
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:00:34.901441) EVENT_LOG_v1 {"time_micros": 1760148034901434, "job": 13, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:00:34.901469) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 13] Try to delete WAL files size 1246249, prev total WAL file size 1246249, number of live WAL files 2.
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000030.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:00:34.902648) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '6C6F676D0030' seq:72057594037927935, type:22 .. '6C6F676D00323532' seq:0, type:0; will stop at (end)
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 14] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 13 Base level 0, inputs: [34(1211KB)], [32(7372KB)]
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148034902742, "job": 14, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [34], "files_L6": [32], "score": -1, "input_data_size": 8789663, "oldest_snapshot_seqno": -1}
Oct 11 02:00:34 compute-0 sudo[321053]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 14] Generated table #35: 3750 keys, 6902565 bytes, temperature: kUnknown
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148034948566, "cf_name": "default", "job": 14, "event": "table_file_creation", "file_number": 35, "file_size": 6902565, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 6875952, "index_size": 16136, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 9413, "raw_key_size": 92067, "raw_average_key_size": 24, "raw_value_size": 6806421, "raw_average_value_size": 1815, "num_data_blocks": 684, "num_entries": 3750, "num_filter_entries": 3750, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760148034, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 35, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:00:34.949012) [db/compaction/compaction_job.cc:1663] [default] [JOB 14] Compacted 1@0 + 1@6 files to L6 => 6902565 bytes
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:00:34.951601) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 191.2 rd, 150.1 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(1.2, 7.2 +0.0 blob) out(6.6 +0.0 blob), read-write-amplify(12.7) write-amplify(5.6) OK, records in: 4775, records dropped: 1025 output_compression: NoCompression
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:00:34.951634) EVENT_LOG_v1 {"time_micros": 1760148034951619, "job": 14, "event": "compaction_finished", "compaction_time_micros": 45976, "compaction_time_cpu_micros": 34145, "output_level": 6, "num_output_files": 1, "total_output_size": 6902565, "num_input_records": 4775, "num_output_records": 3750, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000034.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148034952160, "job": 14, "event": "table_file_deletion", "file_number": 34}
Oct 11 02:00:34 compute-0 sudo[321053]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000032.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148034955392, "job": 14, "event": "table_file_deletion", "file_number": 32}
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:00:34.902425) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:00:34.955584) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:00:34.955592) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:00:34.955594) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:00:34.955597) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:00:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:00:34.955600) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:00:34 compute-0 sudo[321053]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v672: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:35 compute-0 sudo[321078]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:00:35 compute-0 sudo[321078]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:00:35 compute-0 sudo[321136]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-guuwbwelquzhjcmdgihrhzojwyafmdvu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148033.4119127-199-95829205139976/AnsiballZ_file.py'
Oct 11 02:00:35 compute-0 sudo[321136]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:35 compute-0 python3.9[321141]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/edpm-container-shutdown.service _original_basename=edpm-container-shutdown-service recurse=False state=file path=/etc/systemd/system/edpm-container-shutdown.service force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:00:35 compute-0 sudo[321136]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:35 compute-0 podman[321170]: 2025-10-11 02:00:35.638067273 +0000 UTC m=+0.077081756 container create c7f154712ef479192127ffbd7d470ef450cd032252fcd01d46abe8dbb82ce7a2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_hawking, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 02:00:35 compute-0 podman[321170]: 2025-10-11 02:00:35.607202513 +0000 UTC m=+0.046216976 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:00:35 compute-0 systemd[1]: Started libpod-conmon-c7f154712ef479192127ffbd7d470ef450cd032252fcd01d46abe8dbb82ce7a2.scope.
Oct 11 02:00:35 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:00:35 compute-0 podman[321170]: 2025-10-11 02:00:35.781569432 +0000 UTC m=+0.220583985 container init c7f154712ef479192127ffbd7d470ef450cd032252fcd01d46abe8dbb82ce7a2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_hawking, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS)
Oct 11 02:00:35 compute-0 podman[321170]: 2025-10-11 02:00:35.791893483 +0000 UTC m=+0.230907966 container start c7f154712ef479192127ffbd7d470ef450cd032252fcd01d46abe8dbb82ce7a2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_hawking, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:00:35 compute-0 podman[321170]: 2025-10-11 02:00:35.797920999 +0000 UTC m=+0.236935542 container attach c7f154712ef479192127ffbd7d470ef450cd032252fcd01d46abe8dbb82ce7a2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_hawking, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:00:35 compute-0 dazzling_hawking[321210]: 167 167
Oct 11 02:00:35 compute-0 systemd[1]: libpod-c7f154712ef479192127ffbd7d470ef450cd032252fcd01d46abe8dbb82ce7a2.scope: Deactivated successfully.
Oct 11 02:00:35 compute-0 podman[321170]: 2025-10-11 02:00:35.802353077 +0000 UTC m=+0.241367570 container died c7f154712ef479192127ffbd7d470ef450cd032252fcd01d46abe8dbb82ce7a2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_hawking, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2)
Oct 11 02:00:35 compute-0 systemd[1]: var-lib-containers-storage-overlay-dafad2306693314cc7c105dca7751c70bb03f74d1d75b2465930abcb632d24a9-merged.mount: Deactivated successfully.
Oct 11 02:00:35 compute-0 podman[321170]: 2025-10-11 02:00:35.880749527 +0000 UTC m=+0.319764040 container remove c7f154712ef479192127ffbd7d470ef450cd032252fcd01d46abe8dbb82ce7a2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_hawking, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, io.buildah.version=1.39.3)
Oct 11 02:00:35 compute-0 ceph-mon[191930]: pgmap v672: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:35 compute-0 systemd[1]: libpod-conmon-c7f154712ef479192127ffbd7d470ef450cd032252fcd01d46abe8dbb82ce7a2.scope: Deactivated successfully.
Oct 11 02:00:36 compute-0 podman[321279]: 2025-10-11 02:00:36.169563341 +0000 UTC m=+0.091883692 container create c1009c1360f0389871d21d1690f7a47c9ba0b6f5e49e2017cbdf1683f953e52b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_brown, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:00:36 compute-0 podman[321279]: 2025-10-11 02:00:36.135306504 +0000 UTC m=+0.057626865 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:00:36 compute-0 systemd[1]: Started libpod-conmon-c1009c1360f0389871d21d1690f7a47c9ba0b6f5e49e2017cbdf1683f953e52b.scope.
Oct 11 02:00:36 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:00:36 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6a0cbd7751fa9e197e73a5e8973093d5dc543851e7c2c48909133ea88ef19f73/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:00:36 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6a0cbd7751fa9e197e73a5e8973093d5dc543851e7c2c48909133ea88ef19f73/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:00:36 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6a0cbd7751fa9e197e73a5e8973093d5dc543851e7c2c48909133ea88ef19f73/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:00:36 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6a0cbd7751fa9e197e73a5e8973093d5dc543851e7c2c48909133ea88ef19f73/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:00:36 compute-0 podman[321279]: 2025-10-11 02:00:36.311020646 +0000 UTC m=+0.233341087 container init c1009c1360f0389871d21d1690f7a47c9ba0b6f5e49e2017cbdf1683f953e52b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_brown, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default)
Oct 11 02:00:36 compute-0 podman[321279]: 2025-10-11 02:00:36.332137242 +0000 UTC m=+0.254457623 container start c1009c1360f0389871d21d1690f7a47c9ba0b6f5e49e2017cbdf1683f953e52b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_brown, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 02:00:36 compute-0 podman[321279]: 2025-10-11 02:00:36.338607153 +0000 UTC m=+0.260927534 container attach c1009c1360f0389871d21d1690f7a47c9ba0b6f5e49e2017cbdf1683f953e52b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_brown, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Oct 11 02:00:36 compute-0 sudo[321379]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gmgvlwvybycoqsnnrqgkrnabqaehqomm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148035.9640737-211-224155548689848/AnsiballZ_stat.py'
Oct 11 02:00:36 compute-0 sudo[321379]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:36 compute-0 python3.9[321381]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system-preset/91-edpm-container-shutdown.preset follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:00:36 compute-0 sudo[321379]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v673: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:37 compute-0 strange_brown[321324]: {
Oct 11 02:00:37 compute-0 strange_brown[321324]:     "0": [
Oct 11 02:00:37 compute-0 strange_brown[321324]:         {
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "devices": [
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "/dev/loop3"
Oct 11 02:00:37 compute-0 strange_brown[321324]:             ],
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "lv_name": "ceph_lv0",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "lv_size": "21470642176",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "name": "ceph_lv0",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "tags": {
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.cluster_name": "ceph",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.crush_device_class": "",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.encrypted": "0",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.osd_id": "0",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.type": "block",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.vdo": "0"
Oct 11 02:00:37 compute-0 strange_brown[321324]:             },
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "type": "block",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "vg_name": "ceph_vg0"
Oct 11 02:00:37 compute-0 strange_brown[321324]:         }
Oct 11 02:00:37 compute-0 strange_brown[321324]:     ],
Oct 11 02:00:37 compute-0 strange_brown[321324]:     "1": [
Oct 11 02:00:37 compute-0 strange_brown[321324]:         {
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "devices": [
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "/dev/loop4"
Oct 11 02:00:37 compute-0 strange_brown[321324]:             ],
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "lv_name": "ceph_lv1",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "lv_size": "21470642176",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "name": "ceph_lv1",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "tags": {
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.cluster_name": "ceph",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.crush_device_class": "",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.encrypted": "0",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.osd_id": "1",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.type": "block",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.vdo": "0"
Oct 11 02:00:37 compute-0 strange_brown[321324]:             },
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "type": "block",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "vg_name": "ceph_vg1"
Oct 11 02:00:37 compute-0 strange_brown[321324]:         }
Oct 11 02:00:37 compute-0 strange_brown[321324]:     ],
Oct 11 02:00:37 compute-0 strange_brown[321324]:     "2": [
Oct 11 02:00:37 compute-0 strange_brown[321324]:         {
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "devices": [
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "/dev/loop5"
Oct 11 02:00:37 compute-0 strange_brown[321324]:             ],
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "lv_name": "ceph_lv2",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "lv_size": "21470642176",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "name": "ceph_lv2",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "tags": {
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.cluster_name": "ceph",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.crush_device_class": "",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.encrypted": "0",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.osd_id": "2",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.type": "block",
Oct 11 02:00:37 compute-0 strange_brown[321324]:                 "ceph.vdo": "0"
Oct 11 02:00:37 compute-0 strange_brown[321324]:             },
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "type": "block",
Oct 11 02:00:37 compute-0 strange_brown[321324]:             "vg_name": "ceph_vg2"
Oct 11 02:00:37 compute-0 strange_brown[321324]:         }
Oct 11 02:00:37 compute-0 strange_brown[321324]:     ]
Oct 11 02:00:37 compute-0 strange_brown[321324]: }
Oct 11 02:00:37 compute-0 systemd[1]: libpod-c1009c1360f0389871d21d1690f7a47c9ba0b6f5e49e2017cbdf1683f953e52b.scope: Deactivated successfully.
Oct 11 02:00:37 compute-0 podman[321279]: 2025-10-11 02:00:37.181978149 +0000 UTC m=+1.104298520 container died c1009c1360f0389871d21d1690f7a47c9ba0b6f5e49e2017cbdf1683f953e52b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_brown, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507)
Oct 11 02:00:37 compute-0 systemd[1]: var-lib-containers-storage-overlay-6a0cbd7751fa9e197e73a5e8973093d5dc543851e7c2c48909133ea88ef19f73-merged.mount: Deactivated successfully.
Oct 11 02:00:37 compute-0 sudo[321470]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zwmsnnfxjejypdluzadimuzkrjkjdsxn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148035.9640737-211-224155548689848/AnsiballZ_file.py'
Oct 11 02:00:37 compute-0 sudo[321470]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:37 compute-0 podman[321279]: 2025-10-11 02:00:37.308539883 +0000 UTC m=+1.230860244 container remove c1009c1360f0389871d21d1690f7a47c9ba0b6f5e49e2017cbdf1683f953e52b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_brown, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:00:37 compute-0 systemd[1]: libpod-conmon-c1009c1360f0389871d21d1690f7a47c9ba0b6f5e49e2017cbdf1683f953e52b.scope: Deactivated successfully.
Oct 11 02:00:37 compute-0 sudo[321078]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:37 compute-0 sudo[321479]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:00:37 compute-0 sudo[321479]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:00:37 compute-0 sudo[321479]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:37 compute-0 python3.9[321474]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system-preset/91-edpm-container-shutdown.preset _original_basename=91-edpm-container-shutdown-preset recurse=False state=file path=/etc/systemd/system-preset/91-edpm-container-shutdown.preset force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:00:37 compute-0 sudo[321470]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:37 compute-0 sudo[321504]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:00:37 compute-0 sudo[321504]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:00:37 compute-0 sudo[321504]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:37 compute-0 sudo[321535]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:00:37 compute-0 sudo[321535]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:00:37 compute-0 sudo[321535]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:37 compute-0 sudo[321578]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:00:37 compute-0 sudo[321578]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:00:38 compute-0 ceph-mon[191930]: pgmap v673: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:38 compute-0 podman[321750]: 2025-10-11 02:00:38.257343439 +0000 UTC m=+0.071028721 container create e27e59c916f9daaf1ab24c02f2937b1fdebac2635e85c415f6255e888d3df25b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_gould, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:00:38 compute-0 sudo[321778]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ezftdyxfravtsnknbtqqctyfaqwyympg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148037.7848043-223-104684603467539/AnsiballZ_systemd.py'
Oct 11 02:00:38 compute-0 sudo[321778]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:38 compute-0 systemd[1]: Started libpod-conmon-e27e59c916f9daaf1ab24c02f2937b1fdebac2635e85c415f6255e888d3df25b.scope.
Oct 11 02:00:38 compute-0 podman[321750]: 2025-10-11 02:00:38.227077025 +0000 UTC m=+0.040762357 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:00:38 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:00:38 compute-0 podman[321750]: 2025-10-11 02:00:38.399414219 +0000 UTC m=+0.213099581 container init e27e59c916f9daaf1ab24c02f2937b1fdebac2635e85c415f6255e888d3df25b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_gould, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507)
Oct 11 02:00:38 compute-0 podman[321750]: 2025-10-11 02:00:38.421173769 +0000 UTC m=+0.234859061 container start e27e59c916f9daaf1ab24c02f2937b1fdebac2635e85c415f6255e888d3df25b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_gould, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:00:38 compute-0 podman[321750]: 2025-10-11 02:00:38.427394344 +0000 UTC m=+0.241079716 container attach e27e59c916f9daaf1ab24c02f2937b1fdebac2635e85c415f6255e888d3df25b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_gould, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3)
Oct 11 02:00:38 compute-0 hopeful_gould[321785]: 167 167
Oct 11 02:00:38 compute-0 systemd[1]: libpod-e27e59c916f9daaf1ab24c02f2937b1fdebac2635e85c415f6255e888d3df25b.scope: Deactivated successfully.
Oct 11 02:00:38 compute-0 podman[321750]: 2025-10-11 02:00:38.432620896 +0000 UTC m=+0.246306178 container died e27e59c916f9daaf1ab24c02f2937b1fdebac2635e85c415f6255e888d3df25b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_gould, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 02:00:38 compute-0 systemd[1]: var-lib-containers-storage-overlay-cd5d44227d957a392bf40d1ae0e240016accfff979ed6e20d101ce1c4e89aaad-merged.mount: Deactivated successfully.
Oct 11 02:00:38 compute-0 podman[321750]: 2025-10-11 02:00:38.503292037 +0000 UTC m=+0.316977319 container remove e27e59c916f9daaf1ab24c02f2937b1fdebac2635e85c415f6255e888d3df25b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_gould, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 02:00:38 compute-0 systemd[1]: libpod-conmon-e27e59c916f9daaf1ab24c02f2937b1fdebac2635e85c415f6255e888d3df25b.scope: Deactivated successfully.
Oct 11 02:00:38 compute-0 python3.9[321782]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True enabled=True name=edpm-container-shutdown state=started daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:00:38 compute-0 systemd[1]: Reloading.
Oct 11 02:00:38 compute-0 systemd-rc-local-generator[321844]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:00:38 compute-0 systemd-sysv-generator[321849]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:00:38 compute-0 podman[321810]: 2025-10-11 02:00:38.78387794 +0000 UTC m=+0.079875397 container create 2b5bdda4bc07c46c6405a6051ca9f53cdbe5dd66b797f7597c1eb024f107bef5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_northcutt, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:00:38 compute-0 podman[321810]: 2025-10-11 02:00:38.75896417 +0000 UTC m=+0.054961637 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:00:39 compute-0 systemd[1]: Started libpod-conmon-2b5bdda4bc07c46c6405a6051ca9f53cdbe5dd66b797f7597c1eb024f107bef5.scope.
Oct 11 02:00:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v674: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:39 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:00:39 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a6e310d1c817666408dd12090c12aed89f8a54a8b4ed10abedd00322ffd7f995/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:00:39 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a6e310d1c817666408dd12090c12aed89f8a54a8b4ed10abedd00322ffd7f995/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:00:39 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a6e310d1c817666408dd12090c12aed89f8a54a8b4ed10abedd00322ffd7f995/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:00:39 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a6e310d1c817666408dd12090c12aed89f8a54a8b4ed10abedd00322ffd7f995/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:00:39 compute-0 podman[321810]: 2025-10-11 02:00:39.148696581 +0000 UTC m=+0.444694028 container init 2b5bdda4bc07c46c6405a6051ca9f53cdbe5dd66b797f7597c1eb024f107bef5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_northcutt, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:00:39 compute-0 sudo[321778]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:39 compute-0 podman[321810]: 2025-10-11 02:00:39.164978622 +0000 UTC m=+0.460976069 container start 2b5bdda4bc07c46c6405a6051ca9f53cdbe5dd66b797f7597c1eb024f107bef5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_northcutt, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:00:39 compute-0 podman[321810]: 2025-10-11 02:00:39.173673827 +0000 UTC m=+0.469671304 container attach 2b5bdda4bc07c46c6405a6051ca9f53cdbe5dd66b797f7597c1eb024f107bef5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_northcutt, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:00:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:00:40 compute-0 sudo[322024]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lhscaufscldnlvhpniazlwdgtarlhakx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148039.4889874-231-160220759688955/AnsiballZ_stat.py'
Oct 11 02:00:40 compute-0 sudo[322024]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:40 compute-0 ceph-mon[191930]: pgmap v674: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:40 compute-0 python3.9[322027]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/netns-placeholder.service follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:00:40 compute-0 sudo[322024]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]: {
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:         "osd_id": 1,
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:         "type": "bluestore"
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:     },
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:         "osd_id": 2,
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:         "type": "bluestore"
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:     },
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:         "osd_id": 0,
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:         "type": "bluestore"
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]:     }
Oct 11 02:00:40 compute-0 amazing_northcutt[321862]: }
Oct 11 02:00:40 compute-0 systemd[1]: libpod-2b5bdda4bc07c46c6405a6051ca9f53cdbe5dd66b797f7597c1eb024f107bef5.scope: Deactivated successfully.
Oct 11 02:00:40 compute-0 systemd[1]: libpod-2b5bdda4bc07c46c6405a6051ca9f53cdbe5dd66b797f7597c1eb024f107bef5.scope: Consumed 1.244s CPU time.
Oct 11 02:00:40 compute-0 podman[321810]: 2025-10-11 02:00:40.408684005 +0000 UTC m=+1.704681492 container died 2b5bdda4bc07c46c6405a6051ca9f53cdbe5dd66b797f7597c1eb024f107bef5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_northcutt, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:00:40 compute-0 systemd[1]: var-lib-containers-storage-overlay-a6e310d1c817666408dd12090c12aed89f8a54a8b4ed10abedd00322ffd7f995-merged.mount: Deactivated successfully.
Oct 11 02:00:40 compute-0 podman[321810]: 2025-10-11 02:00:40.512182644 +0000 UTC m=+1.808180091 container remove 2b5bdda4bc07c46c6405a6051ca9f53cdbe5dd66b797f7597c1eb024f107bef5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_northcutt, io.buildah.version=1.39.3, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:00:40 compute-0 systemd[1]: libpod-conmon-2b5bdda4bc07c46c6405a6051ca9f53cdbe5dd66b797f7597c1eb024f107bef5.scope: Deactivated successfully.
Oct 11 02:00:40 compute-0 sudo[321578]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:00:40 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:00:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:00:40 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:00:40 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 3dc3623f-51d8-4b85-8770-ddcbdef75898 does not exist
Oct 11 02:00:40 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 6b243789-eea2-4b51-ad94-cac96ed3e2ee does not exist
Oct 11 02:00:40 compute-0 sudo[322110]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:00:40 compute-0 sudo[322110]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:00:40 compute-0 sudo[322110]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:40 compute-0 sudo[322159]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-royhzzzcckizjiavvgnmnujhcszwedze ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148039.4889874-231-160220759688955/AnsiballZ_file.py'
Oct 11 02:00:40 compute-0 sudo[322159]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:40 compute-0 sudo[322163]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:00:40 compute-0 sudo[322163]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:00:40 compute-0 sudo[322163]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:40 compute-0 python3.9[322166]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/netns-placeholder.service _original_basename=netns-placeholder-service recurse=False state=file path=/etc/systemd/system/netns-placeholder.service force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:00:41 compute-0 sudo[322159]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v675: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:00:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:00:42 compute-0 sudo[322338]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-aebpdngmptecizuwvfrbbtfqddpxexcb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148041.3858902-243-25843881306667/AnsiballZ_stat.py'
Oct 11 02:00:42 compute-0 sudo[322338]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:42 compute-0 python3.9[322340]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system-preset/91-netns-placeholder.preset follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:00:42 compute-0 sudo[322338]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:42 compute-0 ceph-mon[191930]: pgmap v675: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:42 compute-0 sudo[322416]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vhjvkvdjxrkfznsbniyxyhqotmwowmgs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148041.3858902-243-25843881306667/AnsiballZ_file.py'
Oct 11 02:00:42 compute-0 sudo[322416]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:42 compute-0 python3.9[322418]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system-preset/91-netns-placeholder.preset _original_basename=91-netns-placeholder-preset recurse=False state=file path=/etc/systemd/system-preset/91-netns-placeholder.preset force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:00:42 compute-0 sudo[322416]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v676: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:43 compute-0 sudo[322568]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mfelfviaalkfdgwtdrdxrqgdlrcxncwd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148043.302182-255-252496854368718/AnsiballZ_systemd.py'
Oct 11 02:00:43 compute-0 sudo[322568]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:44 compute-0 python3.9[322570]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True enabled=True name=netns-placeholder state=started daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:00:44 compute-0 systemd[1]: Reloading.
Oct 11 02:00:44 compute-0 systemd-sysv-generator[322596]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:00:44 compute-0 systemd-rc-local-generator[322591]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:00:44 compute-0 ceph-mon[191930]: pgmap v676: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:00:44 compute-0 systemd[1]: Starting Create netns directory...
Oct 11 02:00:44 compute-0 systemd[1]: run-netns-placeholder.mount: Deactivated successfully.
Oct 11 02:00:44 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Oct 11 02:00:44 compute-0 systemd[1]: Finished Create netns directory.
Oct 11 02:00:45 compute-0 sudo[322568]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v677: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:46 compute-0 sudo[322762]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-aeldxxboalltxdrpzhdpqaudlydazdvs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148045.4883933-265-211814842185483/AnsiballZ_file.py'
Oct 11 02:00:46 compute-0 sudo[322762]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:46 compute-0 python3.9[322764]: ansible-ansible.builtin.file Invoked with group=zuul mode=0755 owner=zuul path=/var/lib/openstack/healthchecks setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:00:46 compute-0 sudo[322762]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:46 compute-0 ceph-mon[191930]: pgmap v677: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v678: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:48 compute-0 sudo[322914]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sbsacwqljimsysxieziyaervjwdwcjoi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148047.7093296-273-249502469019610/AnsiballZ_stat.py'
Oct 11 02:00:48 compute-0 sudo[322914]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:48 compute-0 python3.9[322916]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/iscsid/healthcheck follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:00:48 compute-0 sudo[322914]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:48 compute-0 ceph-mon[191930]: pgmap v678: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v679: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:49 compute-0 sudo[323037]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jdxqqtfgbcbgrdgjilvedxersthhgtps ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148047.7093296-273-249502469019610/AnsiballZ_copy.py'
Oct 11 02:00:49 compute-0 sudo[323037]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:49 compute-0 podman[323040]: 2025-10-11 02:00:49.473494206 +0000 UTC m=+0.108562446 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, url=https://catalog.redhat.com/en/search?searchType=containers, build-date=2025-08-20T13:12:41, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vendor=Red Hat, Inc., version=9.6, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=openstack_network_exporter, io.buildah.version=1.33.7, release=1755695350, com.redhat.component=ubi9-minimal-container, distribution-scope=public, maintainer=Red Hat, Inc., managed_by=edpm_ansible, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, name=ubi9-minimal, config_id=edpm, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.expose-services=, architecture=x86_64, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=minimal rhel9)
Oct 11 02:00:49 compute-0 podman[323039]: 2025-10-11 02:00:49.487871419 +0000 UTC m=+0.131127165 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:00:49 compute-0 python3.9[323041]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/healthchecks/iscsid/ group=zuul mode=0700 owner=zuul setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760148047.7093296-273-249502469019610/.source _original_basename=healthcheck follow=False checksum=2e1237e7fe015c809b173c52e24cfb87132f4344 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:00:49 compute-0 sudo[323037]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:00:50 compute-0 ceph-mon[191930]: pgmap v679: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v680: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:51 compute-0 sudo[323232]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pkpujoqlibiqysfyrpesmkphetdewrkz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148050.9709773-290-248697614731441/AnsiballZ_file.py'
Oct 11 02:00:51 compute-0 sudo[323232]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:51 compute-0 python3.9[323234]: ansible-ansible.builtin.file Invoked with path=/var/lib/kolla/config_files recurse=True setype=container_file_t state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:00:51 compute-0 sudo[323232]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:52 compute-0 podman[323282]: 2025-10-11 02:00:52.246744026 +0000 UTC m=+0.136325569 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ceilometer_agent_ipmi, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Oct 11 02:00:52 compute-0 sudo[323403]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rfqmohblhznlyxmlfgsbjgcbzcnpuagb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148052.0644944-298-110593907885963/AnsiballZ_stat.py'
Oct 11 02:00:52 compute-0 sudo[323403]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:52 compute-0 ceph-mon[191930]: pgmap v680: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:52 compute-0 python3.9[323405]: ansible-ansible.legacy.stat Invoked with path=/var/lib/kolla/config_files/iscsid.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:00:52 compute-0 sudo[323403]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v681: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:53 compute-0 sudo[323526]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mkbslxynnienewwvfcgvkcdsxtawftzo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148052.0644944-298-110593907885963/AnsiballZ_copy.py'
Oct 11 02:00:53 compute-0 sudo[323526]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:53 compute-0 python3.9[323528]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/kolla/config_files/iscsid.json mode=0600 src=/home/zuul/.ansible/tmp/ansible-tmp-1760148052.0644944-298-110593907885963/.source.json _original_basename=.e7buw42_ follow=False checksum=80e4f97460718c7e5c66b21ef8b846eba0e0dbc8 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:00:53 compute-0 sudo[323526]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:53 compute-0 ceph-mon[191930]: pgmap v681: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:54 compute-0 sudo[323678]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bigerzndefrpkneduzqarodjfnzpspqe ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148053.9319067-313-278795091839231/AnsiballZ_file.py'
Oct 11 02:00:54 compute-0 sudo[323678]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:54 compute-0 python3.9[323680]: ansible-ansible.builtin.file Invoked with mode=0755 path=/var/lib/edpm-config/container-startup-config/iscsid state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:00:54 compute-0 sudo[323678]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:00:54.815 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:00:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:00:54.817 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:00:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:00:54.817 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:00:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:00:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v682: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:55 compute-0 podman[323804]: 2025-10-11 02:00:55.592873565 +0000 UTC m=+0.101144447 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_managed=true)
Oct 11 02:00:55 compute-0 sudo[323849]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qzdhyuzzxhgppfizpityjdpnrlmpdqrq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148055.032372-321-37664879097312/AnsiballZ_stat.py'
Oct 11 02:00:55 compute-0 sudo[323849]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:55 compute-0 sudo[323849]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:56 compute-0 ceph-mon[191930]: pgmap v682: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:00:56
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['cephfs.cephfs.meta', 'volumes', '.rgw.root', 'backups', 'images', 'default.rgw.control', 'vms', 'default.rgw.log', 'default.rgw.meta', '.mgr', 'cephfs.cephfs.data']
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:00:56 compute-0 sudo[323972]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-loxofuyaihmibloihlznxazjazyzjpwj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148055.032372-321-37664879097312/AnsiballZ_copy.py'
Oct 11 02:00:56 compute-0 sudo[323972]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:00:56 compute-0 sudo[323972]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:00:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:00:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v683: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:57 compute-0 sudo[324124]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jvptxgenjwyfgrestgjqgmyyohdykfnz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148057.210442-338-12202385605488/AnsiballZ_container_config_data.py'
Oct 11 02:00:57 compute-0 sudo[324124]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:58 compute-0 ceph-mon[191930]: pgmap v683: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:58 compute-0 python3.9[324126]: ansible-container_config_data Invoked with config_overrides={} config_path=/var/lib/edpm-config/container-startup-config/iscsid config_pattern=*.json debug=False
Oct 11 02:00:58 compute-0 sudo[324124]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v684: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:00:59 compute-0 sudo[324276]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-groemxebwbijslvikwaebxdczfsqlfad ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148058.5680752-347-175388814056138/AnsiballZ_container_config_hash.py'
Oct 11 02:00:59 compute-0 sudo[324276]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:00:59 compute-0 python3.9[324278]: ansible-container_config_hash Invoked with check_mode=False config_vol_prefix=/var/lib/config-data
Oct 11 02:00:59 compute-0 sudo[324276]: pam_unix(sudo:session): session closed for user root
Oct 11 02:00:59 compute-0 podman[157119]: time="2025-10-11T02:00:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:00:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:00:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 35732 "" "Go-http-client/1.1"
Oct 11 02:00:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:00:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 7277 "" "Go-http-client/1.1"
Oct 11 02:00:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:01:00 compute-0 ceph-mon[191930]: pgmap v684: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:00 compute-0 sudo[324428]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-waqzaahcfycvmrmhfuudghaaodcjbxxo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148059.8907273-356-237635174448858/AnsiballZ_podman_container_info.py'
Oct 11 02:01:00 compute-0 sudo[324428]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:00 compute-0 python3.9[324430]: ansible-containers.podman.podman_container_info Invoked with executable=podman name=None
Oct 11 02:01:01 compute-0 sudo[324428]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v685: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:01 compute-0 openstack_network_exporter[159265]: ERROR   02:01:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:01:01 compute-0 openstack_network_exporter[159265]: ERROR   02:01:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:01:01 compute-0 openstack_network_exporter[159265]: ERROR   02:01:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:01:01 compute-0 openstack_network_exporter[159265]: ERROR   02:01:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:01:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:01:01 compute-0 openstack_network_exporter[159265]: ERROR   02:01:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:01:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:01:01 compute-0 CROND[324480]: (root) CMD (run-parts /etc/cron.hourly)
Oct 11 02:01:01 compute-0 run-parts[324483]: (/etc/cron.hourly) starting 0anacron
Oct 11 02:01:01 compute-0 run-parts[324489]: (/etc/cron.hourly) finished 0anacron
Oct 11 02:01:01 compute-0 CROND[324479]: (root) CMDEND (run-parts /etc/cron.hourly)
Oct 11 02:01:02 compute-0 ceph-mon[191930]: pgmap v685: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:02 compute-0 podman[324589]: 2025-10-11 02:01:02.964273822 +0000 UTC m=+0.109813600 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:01:02 compute-0 sudo[324658]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sbbslijhszklejsnjoayqzxveusmueka ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760148062.1596258-369-140625768451787/AnsiballZ_edpm_container_manage.py'
Oct 11 02:01:02 compute-0 sudo[324658]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:03 compute-0 podman[324591]: 2025-10-11 02:01:03.000936962 +0000 UTC m=+0.135591544 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, name=ubi9, release=1214.1726694543, managed_by=edpm_ansible, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, config_id=edpm, version=9.4, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, build-date=2024-09-18T21:23:30, com.redhat.component=ubi9-container, maintainer=Red Hat, Inc., summary=Provides the latest release of Red Hat Universal Base Image 9., container_name=kepler, io.buildah.version=1.29.0, io.openshift.expose-services=, io.k8s.display-name=Red Hat Universal Base Image 9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.openshift.tags=base rhel9, release-0.7.12=, vcs-type=git, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f)
Oct 11 02:01:03 compute-0 podman[324590]: 2025-10-11 02:01:03.016015162 +0000 UTC m=+0.154619356 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, config_id=ovn_controller, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, org.label-schema.build-date=20251009)
Oct 11 02:01:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v686: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:03 compute-0 python3[324675]: ansible-edpm_container_manage Invoked with concurrency=1 config_dir=/var/lib/edpm-config/container-startup-config/iscsid config_id=iscsid config_overrides={} config_patterns=*.json log_base_path=/var/log/containers/stdouts debug=False
Oct 11 02:01:03 compute-0 podman[324711]: 2025-10-11 02:01:03.538085806 +0000 UTC m=+0.047062772 image pull 5773abc4300b61c01f3353a0b9239f9a404bb272790b280574e4c56f72edaa72 quay.io/podified-antelope-centos9/openstack-iscsid:current-podified
Oct 11 02:01:03 compute-0 podman[324711]: 2025-10-11 02:01:03.629901073 +0000 UTC m=+0.138878019 container create b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, container_name=iscsid, io.buildah.version=1.41.3, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=iscsid, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:01:03 compute-0 python3[324675]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman create --name iscsid --conmon-pidfile /run/iscsid.pid --env KOLLA_CONFIG_STRATEGY=COPY_ALWAYS --healthcheck-command /openstack/healthcheck --label config_id=iscsid --label container_name=iscsid --label managed_by=edpm_ansible --label config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']} --log-driver journald --log-level info --network host --privileged=True --volume /etc/hosts:/etc/hosts:ro --volume /etc/localtime:/etc/localtime:ro --volume /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro --volume /etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro --volume /etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro --volume /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro --volume /etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro --volume /dev/log:/dev/log --volume /var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro --volume /dev:/dev --volume /run:/run --volume /sys:/sys --volume /lib/modules:/lib/modules:ro --volume /etc/iscsi:/etc/iscsi:z --volume /etc/target:/etc/target:z --volume /var/lib/iscsi:/var/lib/iscsi:z --volume /var/lib/openstack/healthchecks/iscsid:/openstack:ro,z quay.io/podified-antelope-centos9/openstack-iscsid:current-podified
Oct 11 02:01:03 compute-0 sudo[324658]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:04 compute-0 ceph-mon[191930]: pgmap v686: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:04 compute-0 sudo[324896]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kzxrffutcjfejuuesxyytddmfdktmsue ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148064.17461-377-179373023250208/AnsiballZ_stat.py'
Oct 11 02:01:04 compute-0 sudo[324896]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:04 compute-0 podman[324898]: 2025-10-11 02:01:04.851548429 +0000 UTC m=+0.129974254 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, managed_by=edpm_ansible)
Oct 11 02:01:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:01:04 compute-0 python3.9[324899]: ansible-ansible.builtin.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:01:04 compute-0 sudo[324896]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v687: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:05 compute-0 sudo[325070]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ybcuzqmmmcsoxgwerfuickeyufmixfre ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148065.2888136-386-199863373756531/AnsiballZ_file.py'
Oct 11 02:01:05 compute-0 sudo[325070]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:06 compute-0 python3.9[325072]: ansible-file Invoked with path=/etc/systemd/system/edpm_iscsid.requires state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:01:06 compute-0 sudo[325070]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:06 compute-0 ceph-mon[191930]: pgmap v687: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:01:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:01:06 compute-0 sudo[325146]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rftpuxwozmbbbfajcnkjvenlpcubdyzu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148065.2888136-386-199863373756531/AnsiballZ_stat.py'
Oct 11 02:01:06 compute-0 sudo[325146]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:06 compute-0 python3.9[325148]: ansible-stat Invoked with path=/etc/systemd/system/edpm_iscsid_healthcheck.timer follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:01:06 compute-0 sudo[325146]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v688: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:07 compute-0 sudo[325297]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vvnqrrwetbwnjwubxwtnxppnqwkorvxr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148067.0180094-386-171913118608767/AnsiballZ_copy.py'
Oct 11 02:01:07 compute-0 sudo[325297]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.941 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.942 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.942 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.943 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f8ed27f97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb8c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb1a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb200>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed2874260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed3ab42f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb350>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.947 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb90>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fa390>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb3b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbbf0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.947 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f8ed27fbad0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f8ed27faff0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f8ed27fb110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbc80>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.951 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f8ed27fb170>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.953 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f8ed27fb1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.953 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.953 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f8ed27fb230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.954 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f8ed2874230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.952 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.955 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.955 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27f9610>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.955 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb620>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.955 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbe30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.956 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbec0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.956 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbf50>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.956 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f8ed27fb290>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.957 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.957 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f8ed5778d70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.957 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.957 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f8ed27fb650>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.958 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f8ed27fbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.958 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f8ed27fb320>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.958 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f8ed27fbb60>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.959 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.959 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f8ed27fa3f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.959 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.959 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f8ed27fb380>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.959 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f8ed27fbbc0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.960 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f8ed27fbc50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.960 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f8ed27fbce0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.960 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.961 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f8ed27fbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.961 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f8ed27fb590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.962 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f8ed27f95e0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.962 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.962 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f8ed27fb5f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.962 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.962 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f8ed27fbe00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.962 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.963 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f8ed27fbe90>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.963 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.963 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f8ed27fbf20>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.963 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.964 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:01:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:01:08 compute-0 python3.9[325299]: ansible-copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760148067.0180094-386-171913118608767/source dest=/etc/systemd/system/edpm_iscsid.service mode=0644 owner=root group=root backup=False force=True remote_src=False follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:01:08 compute-0 sudo[325297]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:08 compute-0 ceph-mon[191930]: pgmap v688: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:08 compute-0 sudo[325374]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hcxdswiaiyrinfuqigylkoqxynxmvloc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148067.0180094-386-171913118608767/AnsiballZ_systemd.py'
Oct 11 02:01:08 compute-0 sudo[325374]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:08 compute-0 python3.9[325376]: ansible-systemd Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 02:01:08 compute-0 systemd[1]: Reloading.
Oct 11 02:01:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v689: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:09 compute-0 systemd-rc-local-generator[325400]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:01:09 compute-0 systemd-sysv-generator[325409]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:01:09 compute-0 sudo[325374]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:09 compute-0 systemd[1]: virtsecretd.service: Deactivated successfully.
Oct 11 02:01:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:01:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 1200.0 total, 600.0 interval
                                            Cumulative writes: 3321 writes, 14K keys, 3321 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.02 MB/s
                                            Cumulative WAL: 3321 writes, 3321 syncs, 1.00 writes per sync, written: 0.02 GB, 0.02 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 1282 writes, 5811 keys, 1282 commit groups, 1.0 writes per commit group, ingest: 8.50 MB, 0.01 MB/s
                                            Interval WAL: 1282 writes, 1282 syncs, 1.00 writes per sync, written: 0.01 GB, 0.01 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
                                            
                                            ** Compaction Stats [default] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0    125.5      0.12              0.07         7    0.017       0      0       0.0       0.0
                                              L6      1/0    6.58 MB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   2.7    176.5    145.8      0.28              0.17         6    0.046     24K   3201       0.0       0.0
                                             Sum      1/0    6.58 MB   0.0      0.0     0.0      0.0       0.1      0.0       0.0   3.7    123.0    139.6      0.40              0.24        13    0.031     24K   3201       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   4.9    157.1    157.4      0.22              0.13         8    0.027     17K   2467       0.0       0.0
                                            
                                            ** Compaction Stats [default] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Low      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0    176.5    145.8      0.28              0.17         6    0.046     24K   3201       0.0       0.0
                                            High      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0    129.7      0.12              0.07         6    0.019       0      0       0.0       0.0
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0     12.0      0.00              0.00         1    0.004       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.0 total, 600.0 interval
                                            Flush(GB): cumulative 0.015, interval 0.007
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.05 GB write, 0.05 MB/s write, 0.05 GB read, 0.04 MB/s read, 0.4 seconds
                                            Interval compaction: 0.03 GB write, 0.06 MB/s write, 0.03 GB read, 0.06 MB/s read, 0.2 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x55816e47f1f0#2 capacity: 308.00 MB usage: 1.74 MB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 0 last_secs: 7.3e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(102,1.52 MB,0.493731%) FilterBlock(14,74.67 KB,0.0236759%) IndexBlock(14,148.55 KB,0.0470991%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [default] **
Oct 11 02:01:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:01:09 compute-0 sudo[325488]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-obamdbqsjlfvkmgexcgazxgqkeqaoqqk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148067.0180094-386-171913118608767/AnsiballZ_systemd.py'
Oct 11 02:01:10 compute-0 sudo[325488]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:10 compute-0 python3.9[325490]: ansible-systemd Invoked with state=restarted name=edpm_iscsid.service enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:01:10 compute-0 ceph-mon[191930]: pgmap v689: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:10 compute-0 systemd[1]: Reloading.
Oct 11 02:01:10 compute-0 systemd-rc-local-generator[325514]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:01:10 compute-0 systemd-sysv-generator[325517]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:01:10 compute-0 systemd[1]: Starting iscsid container...
Oct 11 02:01:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v690: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:11 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:01:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9fab292fc1c9c31e78c29cfe5a0680c73a053bfcee2f89334e1bb89b98b9535c/merged/etc/target supports timestamps until 2038 (0x7fffffff)
Oct 11 02:01:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9fab292fc1c9c31e78c29cfe5a0680c73a053bfcee2f89334e1bb89b98b9535c/merged/etc/iscsi supports timestamps until 2038 (0x7fffffff)
Oct 11 02:01:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9fab292fc1c9c31e78c29cfe5a0680c73a053bfcee2f89334e1bb89b98b9535c/merged/var/lib/iscsi supports timestamps until 2038 (0x7fffffff)
Oct 11 02:01:11 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e.
Oct 11 02:01:11 compute-0 podman[325530]: 2025-10-11 02:01:11.197159064 +0000 UTC m=+0.245581564 container init b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, container_name=iscsid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, managed_by=edpm_ansible, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:01:11 compute-0 iscsid[325545]: + sudo -E kolla_set_configs
Oct 11 02:01:11 compute-0 podman[325530]: 2025-10-11 02:01:11.2513006 +0000 UTC m=+0.299723050 container start b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, container_name=iscsid, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=iscsid, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_managed=true)
Oct 11 02:01:11 compute-0 sudo[325551]:     root : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_set_configs
Oct 11 02:01:11 compute-0 podman[325530]: iscsid
Oct 11 02:01:11 compute-0 systemd[1]: Started iscsid container.
Oct 11 02:01:11 compute-0 systemd[1]: Created slice User Slice of UID 0.
Oct 11 02:01:11 compute-0 systemd[1]: Starting User Runtime Directory /run/user/0...
Oct 11 02:01:11 compute-0 sudo[325488]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:11 compute-0 systemd[1]: Finished User Runtime Directory /run/user/0.
Oct 11 02:01:11 compute-0 systemd[1]: Starting User Manager for UID 0...
Oct 11 02:01:11 compute-0 systemd[325569]: pam_unix(systemd-user:session): session opened for user root(uid=0) by root(uid=0)
Oct 11 02:01:11 compute-0 podman[325552]: 2025-10-11 02:01:11.435049975 +0000 UTC m=+0.161016804 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=starting, health_failing_streak=1, health_log=, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.vendor=CentOS, config_id=iscsid, io.buildah.version=1.41.3, managed_by=edpm_ansible, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=iscsid)
Oct 11 02:01:11 compute-0 systemd[1]: b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e-2cb90dd4ddd924db.service: Main process exited, code=exited, status=1/FAILURE
Oct 11 02:01:11 compute-0 systemd[1]: b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e-2cb90dd4ddd924db.service: Failed with result 'exit-code'.
Oct 11 02:01:11 compute-0 systemd[325569]: Queued start job for default target Main User Target.
Oct 11 02:01:11 compute-0 systemd[325569]: Created slice User Application Slice.
Oct 11 02:01:11 compute-0 systemd[325569]: Mark boot as successful after the user session has run 2 minutes was skipped because of an unmet condition check (ConditionUser=!@system).
Oct 11 02:01:11 compute-0 systemd[325569]: Started Daily Cleanup of User's Temporary Directories.
Oct 11 02:01:11 compute-0 systemd[325569]: Reached target Paths.
Oct 11 02:01:11 compute-0 systemd[325569]: Reached target Timers.
Oct 11 02:01:11 compute-0 systemd[325569]: Starting D-Bus User Message Bus Socket...
Oct 11 02:01:11 compute-0 systemd[325569]: Starting Create User's Volatile Files and Directories...
Oct 11 02:01:11 compute-0 systemd[325569]: Listening on D-Bus User Message Bus Socket.
Oct 11 02:01:11 compute-0 systemd[325569]: Reached target Sockets.
Oct 11 02:01:11 compute-0 systemd[325569]: Finished Create User's Volatile Files and Directories.
Oct 11 02:01:11 compute-0 systemd[325569]: Reached target Basic System.
Oct 11 02:01:11 compute-0 systemd[325569]: Reached target Main User Target.
Oct 11 02:01:11 compute-0 systemd[325569]: Startup finished in 235ms.
Oct 11 02:01:11 compute-0 systemd[1]: Started User Manager for UID 0.
Oct 11 02:01:11 compute-0 systemd[1]: Started Session c3 of User root.
Oct 11 02:01:11 compute-0 sudo[325551]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=0)
Oct 11 02:01:11 compute-0 iscsid[325545]: INFO:__main__:Loading config file at /var/lib/kolla/config_files/config.json
Oct 11 02:01:11 compute-0 iscsid[325545]: INFO:__main__:Validating config file
Oct 11 02:01:11 compute-0 iscsid[325545]: INFO:__main__:Kolla config strategy set to: COPY_ALWAYS
Oct 11 02:01:11 compute-0 iscsid[325545]: INFO:__main__:Writing out command to execute
Oct 11 02:01:11 compute-0 sudo[325551]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:11 compute-0 systemd[1]: session-c3.scope: Deactivated successfully.
Oct 11 02:01:11 compute-0 iscsid[325545]: ++ cat /run_command
Oct 11 02:01:11 compute-0 iscsid[325545]: + CMD='/usr/sbin/iscsid -f'
Oct 11 02:01:11 compute-0 iscsid[325545]: + ARGS=
Oct 11 02:01:11 compute-0 iscsid[325545]: + sudo kolla_copy_cacerts
Oct 11 02:01:11 compute-0 sudo[325667]:     root : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_copy_cacerts
Oct 11 02:01:11 compute-0 systemd[1]: Started Session c4 of User root.
Oct 11 02:01:11 compute-0 sudo[325667]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=0)
Oct 11 02:01:11 compute-0 sudo[325667]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:11 compute-0 iscsid[325545]: + [[ ! -n '' ]]
Oct 11 02:01:11 compute-0 iscsid[325545]: + . kolla_extend_start
Oct 11 02:01:11 compute-0 systemd[1]: session-c4.scope: Deactivated successfully.
Oct 11 02:01:11 compute-0 iscsid[325545]: ++ [[ ! -f /etc/iscsi/initiatorname.iscsi ]]
Oct 11 02:01:11 compute-0 iscsid[325545]: + echo 'Running command: '\''/usr/sbin/iscsid -f'\'''
Oct 11 02:01:11 compute-0 iscsid[325545]: Running command: '/usr/sbin/iscsid -f'
Oct 11 02:01:11 compute-0 iscsid[325545]: + umask 0022
Oct 11 02:01:11 compute-0 iscsid[325545]: + exec /usr/sbin/iscsid -f
Oct 11 02:01:11 compute-0 kernel: Loading iSCSI transport class v2.0-870.
Oct 11 02:01:12 compute-0 ceph-mon[191930]: pgmap v690: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:12 compute-0 python3.9[325753]: ansible-ansible.builtin.stat Invoked with path=/etc/iscsi/.iscsid_restart_required follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:01:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v691: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:13 compute-0 sudo[325903]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-irlpnorxwclavosnhotooaezevyqhnfg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148072.875193-423-163742446444879/AnsiballZ_file.py'
Oct 11 02:01:13 compute-0 sudo[325903]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:13 compute-0 python3.9[325905]: ansible-ansible.builtin.file Invoked with path=/etc/iscsi/.iscsid_restart_required state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:01:13 compute-0 sudo[325903]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:14 compute-0 ceph-mon[191930]: pgmap v691: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:14 compute-0 sudo[326055]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gsjdipwifwbmxcvyexopwqqqizzwynef ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148074.224224-434-195714537172508/AnsiballZ_service_facts.py'
Oct 11 02:01:14 compute-0 sudo[326055]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:01:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v692: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:15 compute-0 python3.9[326057]: ansible-ansible.builtin.service_facts Invoked
Oct 11 02:01:15 compute-0 network[326074]: You are using 'network' service provided by 'network-scripts', which are now deprecated.
Oct 11 02:01:15 compute-0 network[326075]: 'network-scripts' will be removed from distribution in near future.
Oct 11 02:01:15 compute-0 network[326076]: It is advised to switch to 'NetworkManager' instead for network management.
Oct 11 02:01:16 compute-0 ceph-mon[191930]: pgmap v692: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v693: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:18 compute-0 ceph-mon[191930]: pgmap v693: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v694: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:19 compute-0 podman[326145]: 2025-10-11 02:01:19.680061114 +0000 UTC m=+0.135006687 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., architecture=x86_64, url=https://catalog.redhat.com/en/search?searchType=containers, io.buildah.version=1.33.7, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., release=1755695350, vcs-type=git, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, vendor=Red Hat, Inc., io.openshift.tags=minimal rhel9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, version=9.6, io.openshift.expose-services=, name=ubi9-minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, config_id=edpm, build-date=2025-08-20T13:12:41, com.redhat.component=ubi9-minimal-container, managed_by=edpm_ansible, container_name=openstack_network_exporter, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal)
Oct 11 02:01:19 compute-0 podman[326146]: 2025-10-11 02:01:19.703848127 +0000 UTC m=+0.132500430 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:01:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:01:20 compute-0 ceph-mon[191930]: pgmap v694: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v695: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:21 compute-0 sudo[326055]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:22 compute-0 systemd[1]: Stopping User Manager for UID 0...
Oct 11 02:01:22 compute-0 systemd[325569]: Activating special unit Exit the Session...
Oct 11 02:01:22 compute-0 systemd[325569]: Stopped target Main User Target.
Oct 11 02:01:22 compute-0 systemd[325569]: Stopped target Basic System.
Oct 11 02:01:22 compute-0 systemd[325569]: Stopped target Paths.
Oct 11 02:01:22 compute-0 systemd[325569]: Stopped target Sockets.
Oct 11 02:01:22 compute-0 systemd[325569]: Stopped target Timers.
Oct 11 02:01:22 compute-0 systemd[325569]: Stopped Daily Cleanup of User's Temporary Directories.
Oct 11 02:01:22 compute-0 systemd[325569]: Closed D-Bus User Message Bus Socket.
Oct 11 02:01:22 compute-0 systemd[325569]: Stopped Create User's Volatile Files and Directories.
Oct 11 02:01:22 compute-0 systemd[325569]: Removed slice User Application Slice.
Oct 11 02:01:22 compute-0 systemd[325569]: Reached target Shutdown.
Oct 11 02:01:22 compute-0 systemd[325569]: Finished Exit the Session.
Oct 11 02:01:22 compute-0 systemd[325569]: Reached target Exit the Session.
Oct 11 02:01:22 compute-0 systemd[1]: user@0.service: Deactivated successfully.
Oct 11 02:01:22 compute-0 systemd[1]: Stopped User Manager for UID 0.
Oct 11 02:01:22 compute-0 systemd[1]: Stopping User Runtime Directory /run/user/0...
Oct 11 02:01:22 compute-0 systemd[1]: run-user-0.mount: Deactivated successfully.
Oct 11 02:01:22 compute-0 systemd[1]: user-runtime-dir@0.service: Deactivated successfully.
Oct 11 02:01:22 compute-0 systemd[1]: Stopped User Runtime Directory /run/user/0.
Oct 11 02:01:22 compute-0 systemd[1]: Removed slice User Slice of UID 0.
Oct 11 02:01:22 compute-0 ceph-mon[191930]: pgmap v695: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:22 compute-0 sudo[326407]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mvjimoktcnfafayttlifbtrmwrrfvisq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148082.1990318-444-192699548463636/AnsiballZ_file.py'
Oct 11 02:01:22 compute-0 sudo[326407]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:22 compute-0 podman[326367]: 2025-10-11 02:01:22.937090293 +0000 UTC m=+0.148460811 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_id=edpm, managed_by=edpm_ansible)
Oct 11 02:01:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v696: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:23 compute-0 python3.9[326413]: ansible-ansible.builtin.file Invoked with mode=0755 path=/etc/modules-load.d selevel=s0 setype=etc_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None attributes=None
Oct 11 02:01:23 compute-0 sudo[326407]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:24 compute-0 sudo[326564]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-giexjuqejzgdqncrfwfhycmkffpfaact ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148083.490808-452-209153853327411/AnsiballZ_modprobe.py'
Oct 11 02:01:24 compute-0 sudo[326564]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:24 compute-0 python3.9[326566]: ansible-community.general.modprobe Invoked with name=dm-multipath state=present params= persistent=disabled
Oct 11 02:01:24 compute-0 sudo[326564]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:24 compute-0 ceph-mon[191930]: pgmap v696: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:01:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v697: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:25 compute-0 sudo[326720]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zupecbruykogoswaljfryllrbyzpdjnb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148084.8079178-460-100099274501566/AnsiballZ_stat.py'
Oct 11 02:01:25 compute-0 sudo[326720]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:25 compute-0 python3.9[326722]: ansible-ansible.legacy.stat Invoked with path=/etc/modules-load.d/dm-multipath.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:01:25 compute-0 sudo[326720]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:26 compute-0 podman[326793]: 2025-10-11 02:01:26.238053828 +0000 UTC m=+0.128056413 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_managed=true)
Oct 11 02:01:26 compute-0 sudo[326863]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nkikdxlvibwrftncrhowhirgzxchztnd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148084.8079178-460-100099274501566/AnsiballZ_copy.py'
Oct 11 02:01:26 compute-0 sudo[326863]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:26 compute-0 ceph-mon[191930]: pgmap v697: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:01:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:01:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:01:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:01:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:01:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:01:26 compute-0 python3.9[326865]: ansible-ansible.legacy.copy Invoked with dest=/etc/modules-load.d/dm-multipath.conf mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1760148084.8079178-460-100099274501566/.source.conf follow=False _original_basename=module-load.conf.j2 checksum=065061c60917e4f67cecc70d12ce55e42f9d0b3f backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:01:26 compute-0 sudo[326863]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v698: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:27 compute-0 sudo[327015]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gelgemxemgrvesmbrauhdzvycxkxnhnp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148086.978476-476-30982956322413/AnsiballZ_lineinfile.py'
Oct 11 02:01:27 compute-0 sudo[327015]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:27 compute-0 python3.9[327017]: ansible-ansible.builtin.lineinfile Invoked with create=True dest=/etc/modules line=dm-multipath  mode=0644 state=present path=/etc/modules backrefs=False backup=False firstmatch=False unsafe_writes=False regexp=None search_string=None insertafter=None insertbefore=None validate=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:01:27 compute-0 sudo[327015]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:28 compute-0 ceph-mon[191930]: pgmap v698: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:28 compute-0 sudo[327167]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bbjddfingteidjsmxbidfjgvsxmugsad ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148088.1338022-484-271804766464525/AnsiballZ_systemd.py'
Oct 11 02:01:28 compute-0 sudo[327167]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:29 compute-0 python3.9[327169]: ansible-ansible.builtin.systemd Invoked with name=systemd-modules-load.service state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 02:01:29 compute-0 systemd[1]: systemd-modules-load.service: Deactivated successfully.
Oct 11 02:01:29 compute-0 systemd[1]: Stopped Load Kernel Modules.
Oct 11 02:01:29 compute-0 systemd[1]: Stopping Load Kernel Modules...
Oct 11 02:01:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v699: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:29 compute-0 systemd[1]: Starting Load Kernel Modules...
Oct 11 02:01:29 compute-0 systemd[1]: Finished Load Kernel Modules.
Oct 11 02:01:29 compute-0 sudo[327167]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:29 compute-0 podman[157119]: time="2025-10-11T02:01:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:01:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:01:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 38200 "" "Go-http-client/1.1"
Oct 11 02:01:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:01:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 7686 "" "Go-http-client/1.1"
Oct 11 02:01:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:01:30 compute-0 ceph-mon[191930]: pgmap v699: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:30 compute-0 sudo[327323]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gsgbdubouwmxkspvzrxerabvbdwstnnp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148089.481532-492-98319462075219/AnsiballZ_file.py'
Oct 11 02:01:30 compute-0 sudo[327323]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:31 compute-0 python3.9[327325]: ansible-ansible.builtin.file Invoked with mode=0755 path=/etc/multipath setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:01:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v700: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:31 compute-0 sudo[327323]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:31 compute-0 openstack_network_exporter[159265]: ERROR   02:01:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:01:31 compute-0 openstack_network_exporter[159265]: ERROR   02:01:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:01:31 compute-0 openstack_network_exporter[159265]: ERROR   02:01:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:01:31 compute-0 openstack_network_exporter[159265]: ERROR   02:01:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:01:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:01:31 compute-0 openstack_network_exporter[159265]: ERROR   02:01:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:01:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:01:32 compute-0 sudo[327475]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kvfdlxodvypalbvxretahwiqwrttymyn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148091.510167-501-182508740908482/AnsiballZ_stat.py'
Oct 11 02:01:32 compute-0 sudo[327475]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:32 compute-0 python3.9[327477]: ansible-ansible.builtin.stat Invoked with path=/etc/multipath.conf follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:01:32 compute-0 sudo[327475]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:32 compute-0 ceph-mon[191930]: pgmap v700: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v701: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:33 compute-0 podman[327480]: 2025-10-11 02:01:33.254021864 +0000 UTC m=+0.122959469 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.expose-services=, build-date=2024-09-18T21:23:30, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, com.redhat.component=ubi9-container, io.buildah.version=1.29.0, name=ubi9, architecture=x86_64, release=1214.1726694543, version=9.4, io.k8s.display-name=Red Hat Universal Base Image 9, summary=Provides the latest release of Red Hat Universal Base Image 9., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, managed_by=edpm_ansible, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, maintainer=Red Hat, Inc., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-type=git, vendor=Red Hat, Inc., container_name=kepler, distribution-scope=public, io.openshift.tags=base rhel9, release-0.7.12=)
Oct 11 02:01:33 compute-0 podman[327478]: 2025-10-11 02:01:33.255878974 +0000 UTC m=+0.137036899 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:01:33 compute-0 podman[327479]: 2025-10-11 02:01:33.292835177 +0000 UTC m=+0.171240593 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_id=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ovn_controller, io.buildah.version=1.41.3)
Oct 11 02:01:34 compute-0 sudo[327693]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-byxajgunandfmytavocdqfwnezvahwwg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148093.5741994-510-44741784327482/AnsiballZ_stat.py'
Oct 11 02:01:34 compute-0 sudo[327693]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:34 compute-0 python3.9[327695]: ansible-ansible.builtin.stat Invoked with path=/etc/multipath.conf follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:01:34 compute-0 sudo[327693]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:34 compute-0 ceph-mon[191930]: pgmap v701: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:01:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v702: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:35 compute-0 podman[327801]: 2025-10-11 02:01:35.247592452 +0000 UTC m=+0.132746813 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, org.label-schema.vendor=CentOS)
Oct 11 02:01:35 compute-0 sudo[327864]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wlmxuqwvgezctfosaidcpmyaqwrjwzsn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148094.7080777-518-6231393948219/AnsiballZ_stat.py'
Oct 11 02:01:35 compute-0 sudo[327864]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:35 compute-0 python3.9[327866]: ansible-ansible.legacy.stat Invoked with path=/etc/multipath.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:01:35 compute-0 sudo[327864]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:36 compute-0 sudo[327987]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-athsocxnlzjltmgmagjsyszvsummhfvm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148094.7080777-518-6231393948219/AnsiballZ_copy.py'
Oct 11 02:01:36 compute-0 sudo[327987]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:36 compute-0 python3.9[327989]: ansible-ansible.legacy.copy Invoked with dest=/etc/multipath.conf mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1760148094.7080777-518-6231393948219/.source.conf _original_basename=multipath.conf follow=False checksum=bf02ab264d3d648048a81f3bacec8bc58db93162 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:01:36 compute-0 sudo[327987]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:36 compute-0 ceph-mon[191930]: pgmap v702: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v703: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:37 compute-0 sudo[328139]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-aturyefckucbzlchgudoicwfqvhhjppu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148096.809565-533-90546605294097/AnsiballZ_command.py'
Oct 11 02:01:37 compute-0 sudo[328139]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:37 compute-0 python3.9[328141]: ansible-ansible.legacy.command Invoked with _raw_params=grep -q '^blacklist\s*{' /etc/multipath.conf _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:01:37 compute-0 sudo[328139]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:38 compute-0 ceph-mon[191930]: pgmap v703: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:38 compute-0 sudo[328292]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-svuxysmisjefebsoyzwirlhvawbaydgm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148098.0786142-541-87108414524353/AnsiballZ_lineinfile.py'
Oct 11 02:01:38 compute-0 sudo[328292]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:38 compute-0 python3.9[328294]: ansible-ansible.builtin.lineinfile Invoked with line=blacklist { path=/etc/multipath.conf state=present backrefs=False create=False backup=False firstmatch=False unsafe_writes=False regexp=None search_string=None insertafter=None insertbefore=None validate=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:01:38 compute-0 sudo[328292]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v704: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:01:39 compute-0 sudo[328444]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fvursshpvuerboxyiciqklljntmeiwqi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148099.2159488-549-164302085157488/AnsiballZ_replace.py'
Oct 11 02:01:40 compute-0 sudo[328444]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:40 compute-0 python3.9[328446]: ansible-ansible.builtin.replace Invoked with path=/etc/multipath.conf regexp=^(blacklist {) replace=\1\n} backup=False encoding=utf-8 unsafe_writes=False after=None before=None validate=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:01:40 compute-0 sudo[328444]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:40 compute-0 ceph-mon[191930]: pgmap v704: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:40 compute-0 sudo[328556]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:01:40 compute-0 sudo[328556]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:01:40 compute-0 sudo[328556]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:41 compute-0 sudo[328631]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cflwdudhucnwfniqybyjzlqofzlxmqts ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148100.5667868-557-100233106611205/AnsiballZ_replace.py'
Oct 11 02:01:41 compute-0 sudo[328631]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v705: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:41 compute-0 sudo[328613]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:01:41 compute-0 sudo[328613]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:01:41 compute-0 sudo[328613]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:41 compute-0 sudo[328651]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:01:41 compute-0 sudo[328651]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:01:41 compute-0 sudo[328651]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:41 compute-0 python3.9[328648]: ansible-ansible.builtin.replace Invoked with path=/etc/multipath.conf regexp=^blacklist\s*{\n[\s]+devnode \"\.\*\" replace=blacklist { backup=False encoding=utf-8 unsafe_writes=False after=None before=None validate=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:01:41 compute-0 sudo[328631]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:41 compute-0 sudo[328676]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:01:41 compute-0 sudo[328676]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:01:41 compute-0 unix_chkpwd[328724]: password check failed for user (root)
Oct 11 02:01:41 compute-0 sshd-session[328546]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 02:01:42 compute-0 sudo[328676]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:01:42 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:01:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:01:42 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:01:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:01:42 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:01:42 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 4809bd7f-d880-4bd1-bd26-668fb8b35f3c does not exist
Oct 11 02:01:42 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 041887d9-3a8c-47c6-b88c-2dbe43dfdac8 does not exist
Oct 11 02:01:42 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 5d9da0db-6edf-4b09-bf6f-49ff4b437835 does not exist
Oct 11 02:01:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:01:42 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:01:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:01:42 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:01:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:01:42 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:01:42 compute-0 podman[328831]: 2025-10-11 02:01:42.229633519 +0000 UTC m=+0.114427748 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=iscsid, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS)
Oct 11 02:01:42 compute-0 sudo[328864]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:01:42 compute-0 sudo[328864]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:01:42 compute-0 sudo[328864]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:42 compute-0 sudo[328923]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ckhwfqxiqjoezzxewatcavscpffwwtea ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148101.7645748-566-119732707451088/AnsiballZ_lineinfile.py'
Oct 11 02:01:42 compute-0 sudo[328923]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:42 compute-0 sudo[328924]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:01:42 compute-0 sudo[328924]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:01:42 compute-0 sudo[328924]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:42 compute-0 sudo[328951]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:01:42 compute-0 sudo[328951]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:01:42 compute-0 sudo[328951]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:42 compute-0 python3.9[328934]: ansible-ansible.builtin.lineinfile Invoked with firstmatch=True insertafter=^defaults line=        find_multipaths yes path=/etc/multipath.conf regexp=^\s+find_multipaths state=present backrefs=False create=False backup=False unsafe_writes=False search_string=None insertbefore=None validate=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:01:42 compute-0 sudo[328923]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:42 compute-0 sudo[328976]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:01:42 compute-0 sudo[328976]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:01:42 compute-0 ceph-mon[191930]: pgmap v705: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:01:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:01:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:01:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:01:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:01:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:01:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v706: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:43 compute-0 sshd-session[328546]: Failed password for root from 193.46.255.217 port 26308 ssh2
Oct 11 02:01:43 compute-0 podman[329040]: 2025-10-11 02:01:43.205621532 +0000 UTC m=+0.091930139 container create 334175f217495d7cb916634d9ec6794265b78ecd1d98ff20d07c48df3b7ae7c3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wizardly_burnell, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.license=GPLv2)
Oct 11 02:01:43 compute-0 podman[329040]: 2025-10-11 02:01:43.162738696 +0000 UTC m=+0.049047403 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:01:43 compute-0 systemd[1]: Started libpod-conmon-334175f217495d7cb916634d9ec6794265b78ecd1d98ff20d07c48df3b7ae7c3.scope.
Oct 11 02:01:43 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:01:43 compute-0 podman[329040]: 2025-10-11 02:01:43.380157349 +0000 UTC m=+0.266465986 container init 334175f217495d7cb916634d9ec6794265b78ecd1d98ff20d07c48df3b7ae7c3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wizardly_burnell, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:01:43 compute-0 podman[329040]: 2025-10-11 02:01:43.405353087 +0000 UTC m=+0.291661694 container start 334175f217495d7cb916634d9ec6794265b78ecd1d98ff20d07c48df3b7ae7c3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wizardly_burnell, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:01:43 compute-0 podman[329040]: 2025-10-11 02:01:43.411088068 +0000 UTC m=+0.297396705 container attach 334175f217495d7cb916634d9ec6794265b78ecd1d98ff20d07c48df3b7ae7c3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wizardly_burnell, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0)
Oct 11 02:01:43 compute-0 wizardly_burnell[329079]: 167 167
Oct 11 02:01:43 compute-0 systemd[1]: libpod-334175f217495d7cb916634d9ec6794265b78ecd1d98ff20d07c48df3b7ae7c3.scope: Deactivated successfully.
Oct 11 02:01:43 compute-0 podman[329040]: 2025-10-11 02:01:43.420972753 +0000 UTC m=+0.307281380 container died 334175f217495d7cb916634d9ec6794265b78ecd1d98ff20d07c48df3b7ae7c3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wizardly_burnell, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default)
Oct 11 02:01:43 compute-0 systemd[1]: var-lib-containers-storage-overlay-fca179ad5ece46c3f5dc961dc7b3a15da7a6c4b51f41ee34dd48e619063fc036-merged.mount: Deactivated successfully.
Oct 11 02:01:43 compute-0 podman[329040]: 2025-10-11 02:01:43.489045407 +0000 UTC m=+0.375354004 container remove 334175f217495d7cb916634d9ec6794265b78ecd1d98ff20d07c48df3b7ae7c3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=wizardly_burnell, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:01:43 compute-0 systemd[1]: libpod-conmon-334175f217495d7cb916634d9ec6794265b78ecd1d98ff20d07c48df3b7ae7c3.scope: Deactivated successfully.
Oct 11 02:01:43 compute-0 podman[329159]: 2025-10-11 02:01:43.733307166 +0000 UTC m=+0.076881089 container create c74961cf960f297d9cc4c3d42c3e053a9d1255d37fe3bf1d4b1c0485dab9cdfc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_wilson, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:01:43 compute-0 podman[329159]: 2025-10-11 02:01:43.700982732 +0000 UTC m=+0.044556695 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:01:43 compute-0 systemd[1]: Started libpod-conmon-c74961cf960f297d9cc4c3d42c3e053a9d1255d37fe3bf1d4b1c0485dab9cdfc.scope.
Oct 11 02:01:43 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:01:43 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fdff55633b7a07e5f539bf84c968c69f23208bbfe8ae9d5f00ca7e64540f94c1/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:01:43 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fdff55633b7a07e5f539bf84c968c69f23208bbfe8ae9d5f00ca7e64540f94c1/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:01:43 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fdff55633b7a07e5f539bf84c968c69f23208bbfe8ae9d5f00ca7e64540f94c1/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:01:43 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fdff55633b7a07e5f539bf84c968c69f23208bbfe8ae9d5f00ca7e64540f94c1/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:01:43 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fdff55633b7a07e5f539bf84c968c69f23208bbfe8ae9d5f00ca7e64540f94c1/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:01:43 compute-0 podman[329159]: 2025-10-11 02:01:43.944569843 +0000 UTC m=+0.288143786 container init c74961cf960f297d9cc4c3d42c3e053a9d1255d37fe3bf1d4b1c0485dab9cdfc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_wilson, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2)
Oct 11 02:01:43 compute-0 podman[329159]: 2025-10-11 02:01:43.969199365 +0000 UTC m=+0.312773268 container start c74961cf960f297d9cc4c3d42c3e053a9d1255d37fe3bf1d4b1c0485dab9cdfc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_wilson, io.buildah.version=1.39.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 02:01:43 compute-0 podman[329159]: 2025-10-11 02:01:43.974208339 +0000 UTC m=+0.317782272 container attach c74961cf960f297d9cc4c3d42c3e053a9d1255d37fe3bf1d4b1c0485dab9cdfc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_wilson, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:01:44 compute-0 sudo[329247]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lidxxcpclsuepdfizomfgugcdsaoivse ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148103.432248-566-273804133539168/AnsiballZ_lineinfile.py'
Oct 11 02:01:44 compute-0 sudo[329247]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:44 compute-0 python3.9[329249]: ansible-ansible.builtin.lineinfile Invoked with firstmatch=True insertafter=^defaults line=        recheck_wwid yes path=/etc/multipath.conf regexp=^\s+recheck_wwid state=present backrefs=False create=False backup=False unsafe_writes=False search_string=None insertbefore=None validate=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:01:44 compute-0 sudo[329247]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:44 compute-0 ceph-mon[191930]: pgmap v706: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:44 compute-0 unix_chkpwd[329330]: password check failed for user (root)
Oct 11 02:01:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:01:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v707: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:45 compute-0 goofy_wilson[329215]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:01:45 compute-0 goofy_wilson[329215]: --> relative data size: 1.0
Oct 11 02:01:45 compute-0 goofy_wilson[329215]: --> All data devices are unavailable
Oct 11 02:01:45 compute-0 systemd[1]: libpod-c74961cf960f297d9cc4c3d42c3e053a9d1255d37fe3bf1d4b1c0485dab9cdfc.scope: Deactivated successfully.
Oct 11 02:01:45 compute-0 systemd[1]: libpod-c74961cf960f297d9cc4c3d42c3e053a9d1255d37fe3bf1d4b1c0485dab9cdfc.scope: Consumed 1.303s CPU time.
Oct 11 02:01:45 compute-0 podman[329159]: 2025-10-11 02:01:45.339953877 +0000 UTC m=+1.683527860 container died c74961cf960f297d9cc4c3d42c3e053a9d1255d37fe3bf1d4b1c0485dab9cdfc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_wilson, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0)
Oct 11 02:01:45 compute-0 systemd[1]: var-lib-containers-storage-overlay-fdff55633b7a07e5f539bf84c968c69f23208bbfe8ae9d5f00ca7e64540f94c1-merged.mount: Deactivated successfully.
Oct 11 02:01:45 compute-0 podman[329159]: 2025-10-11 02:01:45.447722573 +0000 UTC m=+1.791296526 container remove c74961cf960f297d9cc4c3d42c3e053a9d1255d37fe3bf1d4b1c0485dab9cdfc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_wilson, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:01:45 compute-0 systemd[1]: libpod-conmon-c74961cf960f297d9cc4c3d42c3e053a9d1255d37fe3bf1d4b1c0485dab9cdfc.scope: Deactivated successfully.
Oct 11 02:01:45 compute-0 sudo[328976]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:45 compute-0 sudo[329363]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:01:45 compute-0 sudo[329363]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:01:45 compute-0 sudo[329363]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:45 compute-0 sudo[329389]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:01:45 compute-0 sudo[329389]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:01:45 compute-0 sudo[329389]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:45 compute-0 sudo[329436]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:01:45 compute-0 sudo[329436]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:01:45 compute-0 sudo[329436]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:46 compute-0 sudo[329485]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:01:46 compute-0 sudo[329485]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:01:46 compute-0 sudo[329534]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-riuqqhofqdpdvuzzrcdddlwaoncyjtzc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148104.5199218-566-144853041233625/AnsiballZ_lineinfile.py'
Oct 11 02:01:46 compute-0 sudo[329534]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:46 compute-0 python3.9[329538]: ansible-ansible.builtin.lineinfile Invoked with firstmatch=True insertafter=^defaults line=        skip_kpartx yes path=/etc/multipath.conf regexp=^\s+skip_kpartx state=present backrefs=False create=False backup=False unsafe_writes=False search_string=None insertbefore=None validate=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:01:46 compute-0 sudo[329534]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:46 compute-0 podman[329624]: 2025-10-11 02:01:46.645946081 +0000 UTC m=+0.074251261 container create 8654c155e8d4b38ee61e39efcc1693571252c648c71c1df25527619387fec5d6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gifted_sammet, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:01:46 compute-0 ceph-mon[191930]: pgmap v707: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:46 compute-0 podman[329624]: 2025-10-11 02:01:46.61863412 +0000 UTC m=+0.046939310 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:01:46 compute-0 systemd[1]: Started libpod-conmon-8654c155e8d4b38ee61e39efcc1693571252c648c71c1df25527619387fec5d6.scope.
Oct 11 02:01:46 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:01:46 compute-0 podman[329624]: 2025-10-11 02:01:46.810749804 +0000 UTC m=+0.239055044 container init 8654c155e8d4b38ee61e39efcc1693571252c648c71c1df25527619387fec5d6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gifted_sammet, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:01:46 compute-0 podman[329624]: 2025-10-11 02:01:46.824570711 +0000 UTC m=+0.252875861 container start 8654c155e8d4b38ee61e39efcc1693571252c648c71c1df25527619387fec5d6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gifted_sammet, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507)
Oct 11 02:01:46 compute-0 podman[329624]: 2025-10-11 02:01:46.830584175 +0000 UTC m=+0.258889345 container attach 8654c155e8d4b38ee61e39efcc1693571252c648c71c1df25527619387fec5d6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gifted_sammet, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS)
Oct 11 02:01:46 compute-0 sshd-session[328546]: Failed password for root from 193.46.255.217 port 26308 ssh2
Oct 11 02:01:46 compute-0 gifted_sammet[329671]: 167 167
Oct 11 02:01:46 compute-0 systemd[1]: libpod-8654c155e8d4b38ee61e39efcc1693571252c648c71c1df25527619387fec5d6.scope: Deactivated successfully.
Oct 11 02:01:46 compute-0 podman[329624]: 2025-10-11 02:01:46.84238456 +0000 UTC m=+0.270689740 container died 8654c155e8d4b38ee61e39efcc1693571252c648c71c1df25527619387fec5d6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gifted_sammet, ceph=True, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:01:46 compute-0 systemd[1]: var-lib-containers-storage-overlay-dba1ea371780b74e9dbaee77e361422f93f0edceeb8641611f0ecdf2789c4e51-merged.mount: Deactivated successfully.
Oct 11 02:01:46 compute-0 podman[329624]: 2025-10-11 02:01:46.911362484 +0000 UTC m=+0.339667664 container remove 8654c155e8d4b38ee61e39efcc1693571252c648c71c1df25527619387fec5d6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gifted_sammet, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3)
Oct 11 02:01:46 compute-0 systemd[1]: libpod-conmon-8654c155e8d4b38ee61e39efcc1693571252c648c71c1df25527619387fec5d6.scope: Deactivated successfully.
Oct 11 02:01:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v708: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:47 compute-0 podman[329741]: 2025-10-11 02:01:47.231123036 +0000 UTC m=+0.093130712 container create b724408389c03ee7bcacafde90261dc486bce53f9ea9e2ca9697ac5f1e6e2127 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_williams, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 02:01:47 compute-0 podman[329741]: 2025-10-11 02:01:47.193565536 +0000 UTC m=+0.055573272 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:01:47 compute-0 systemd[1]: Started libpod-conmon-b724408389c03ee7bcacafde90261dc486bce53f9ea9e2ca9697ac5f1e6e2127.scope.
Oct 11 02:01:47 compute-0 sudo[329780]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tzywbvokpqphywtozobezuxssuvjxixq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148106.5821776-566-64897076985124/AnsiballZ_lineinfile.py'
Oct 11 02:01:47 compute-0 sudo[329780]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:47 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:01:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/610d6d643af349b18cb5d9a31dfa2fd081f299bc0b808d28c8fe44141cd3261f/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:01:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/610d6d643af349b18cb5d9a31dfa2fd081f299bc0b808d28c8fe44141cd3261f/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:01:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/610d6d643af349b18cb5d9a31dfa2fd081f299bc0b808d28c8fe44141cd3261f/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:01:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/610d6d643af349b18cb5d9a31dfa2fd081f299bc0b808d28c8fe44141cd3261f/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:01:47 compute-0 podman[329741]: 2025-10-11 02:01:47.385830182 +0000 UTC m=+0.247837828 container init b724408389c03ee7bcacafde90261dc486bce53f9ea9e2ca9697ac5f1e6e2127 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_williams, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2)
Oct 11 02:01:47 compute-0 podman[329741]: 2025-10-11 02:01:47.410785537 +0000 UTC m=+0.272793193 container start b724408389c03ee7bcacafde90261dc486bce53f9ea9e2ca9697ac5f1e6e2127 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_williams, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2)
Oct 11 02:01:47 compute-0 podman[329741]: 2025-10-11 02:01:47.415703269 +0000 UTC m=+0.277710945 container attach b724408389c03ee7bcacafde90261dc486bce53f9ea9e2ca9697ac5f1e6e2127 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_williams, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 02:01:47 compute-0 python3.9[329786]: ansible-ansible.builtin.lineinfile Invoked with firstmatch=True insertafter=^defaults line=        user_friendly_names no path=/etc/multipath.conf regexp=^\s+user_friendly_names state=present backrefs=False create=False backup=False unsafe_writes=False search_string=None insertbefore=None validate=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:01:47 compute-0 sudo[329780]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:47 compute-0 unix_chkpwd[329866]: password check failed for user (root)
Oct 11 02:01:48 compute-0 heuristic_williams[329784]: {
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:     "0": [
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:         {
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "devices": [
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "/dev/loop3"
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             ],
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "lv_name": "ceph_lv0",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "lv_size": "21470642176",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "name": "ceph_lv0",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "tags": {
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.cluster_name": "ceph",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.crush_device_class": "",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.encrypted": "0",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.osd_id": "0",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.type": "block",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.vdo": "0"
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             },
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "type": "block",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "vg_name": "ceph_vg0"
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:         }
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:     ],
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:     "1": [
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:         {
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "devices": [
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "/dev/loop4"
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             ],
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "lv_name": "ceph_lv1",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "lv_size": "21470642176",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "name": "ceph_lv1",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "tags": {
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.cluster_name": "ceph",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.crush_device_class": "",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.encrypted": "0",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.osd_id": "1",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.type": "block",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.vdo": "0"
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             },
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "type": "block",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "vg_name": "ceph_vg1"
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:         }
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:     ],
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:     "2": [
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:         {
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "devices": [
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "/dev/loop5"
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             ],
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "lv_name": "ceph_lv2",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "lv_size": "21470642176",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "name": "ceph_lv2",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "tags": {
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.cluster_name": "ceph",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.crush_device_class": "",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.encrypted": "0",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.osd_id": "2",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.type": "block",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:                 "ceph.vdo": "0"
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             },
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "type": "block",
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:             "vg_name": "ceph_vg2"
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:         }
Oct 11 02:01:48 compute-0 heuristic_williams[329784]:     ]
Oct 11 02:01:48 compute-0 heuristic_williams[329784]: }
Oct 11 02:01:48 compute-0 systemd[1]: libpod-b724408389c03ee7bcacafde90261dc486bce53f9ea9e2ca9697ac5f1e6e2127.scope: Deactivated successfully.
Oct 11 02:01:48 compute-0 podman[329741]: 2025-10-11 02:01:48.234159886 +0000 UTC m=+1.096167562 container died b724408389c03ee7bcacafde90261dc486bce53f9ea9e2ca9697ac5f1e6e2127 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_williams, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2)
Oct 11 02:01:48 compute-0 systemd[1]: var-lib-containers-storage-overlay-610d6d643af349b18cb5d9a31dfa2fd081f299bc0b808d28c8fe44141cd3261f-merged.mount: Deactivated successfully.
Oct 11 02:01:48 compute-0 sudo[329955]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ztfsvznhrvyiwlnrfjoaisajahijoaus ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148107.8141985-595-208232691474554/AnsiballZ_stat.py'
Oct 11 02:01:48 compute-0 podman[329741]: 2025-10-11 02:01:48.377499401 +0000 UTC m=+1.239507047 container remove b724408389c03ee7bcacafde90261dc486bce53f9ea9e2ca9697ac5f1e6e2127 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_williams, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True)
Oct 11 02:01:48 compute-0 sudo[329955]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:48 compute-0 systemd[1]: libpod-conmon-b724408389c03ee7bcacafde90261dc486bce53f9ea9e2ca9697ac5f1e6e2127.scope: Deactivated successfully.
Oct 11 02:01:48 compute-0 sudo[329485]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:48 compute-0 sudo[329958]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:01:48 compute-0 sudo[329958]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:01:48 compute-0 sudo[329958]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:48 compute-0 python3.9[329957]: ansible-ansible.builtin.stat Invoked with path=/etc/multipath.conf follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:01:48 compute-0 sudo[329983]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:01:48 compute-0 sudo[329983]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:01:48 compute-0 ceph-mon[191930]: pgmap v708: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:48 compute-0 sudo[329983]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:48 compute-0 sudo[329955]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:48 compute-0 sudo[330010]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:01:48 compute-0 sudo[330010]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:01:48 compute-0 sudo[330010]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:48 compute-0 sudo[330058]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:01:48 compute-0 sudo[330058]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:01:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v709: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:49 compute-0 podman[330223]: 2025-10-11 02:01:49.480741707 +0000 UTC m=+0.087687754 container create d2b78a1311447373d48d64d4f3916c11933c4615d41467241049bc5fdc213feb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_banach, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:01:49 compute-0 sudo[330261]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hmlikpkzyrlhrraqvltnilootyjgdjyc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148108.9747894-603-101296440235794/AnsiballZ_file.py'
Oct 11 02:01:49 compute-0 sudo[330261]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:49 compute-0 podman[330223]: 2025-10-11 02:01:49.449194001 +0000 UTC m=+0.056140038 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:01:49 compute-0 systemd[1]: Started libpod-conmon-d2b78a1311447373d48d64d4f3916c11933c4615d41467241049bc5fdc213feb.scope.
Oct 11 02:01:49 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:01:49 compute-0 podman[330223]: 2025-10-11 02:01:49.620601905 +0000 UTC m=+0.227547942 container init d2b78a1311447373d48d64d4f3916c11933c4615d41467241049bc5fdc213feb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_banach, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:01:49 compute-0 podman[330223]: 2025-10-11 02:01:49.637321283 +0000 UTC m=+0.244267300 container start d2b78a1311447373d48d64d4f3916c11933c4615d41467241049bc5fdc213feb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_banach, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:01:49 compute-0 podman[330223]: 2025-10-11 02:01:49.643071964 +0000 UTC m=+0.250017981 container attach d2b78a1311447373d48d64d4f3916c11933c4615d41467241049bc5fdc213feb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_banach, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.schema-version=1.0)
Oct 11 02:01:49 compute-0 cranky_banach[330269]: 167 167
Oct 11 02:01:49 compute-0 podman[330223]: 2025-10-11 02:01:49.652328052 +0000 UTC m=+0.259274069 container died d2b78a1311447373d48d64d4f3916c11933c4615d41467241049bc5fdc213feb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_banach, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:01:49 compute-0 systemd[1]: libpod-d2b78a1311447373d48d64d4f3916c11933c4615d41467241049bc5fdc213feb.scope: Deactivated successfully.
Oct 11 02:01:49 compute-0 systemd[1]: var-lib-containers-storage-overlay-31d5cafd8393ed7e9db47e681cb9ed0da39998d0f968cac3bbc47ecd0d9f3d68-merged.mount: Deactivated successfully.
Oct 11 02:01:49 compute-0 python3.9[330268]: ansible-ansible.builtin.file Invoked with mode=0644 path=/etc/multipath/.multipath_restart_required state=touch recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:01:49 compute-0 podman[330223]: 2025-10-11 02:01:49.752425457 +0000 UTC m=+0.359371504 container remove d2b78a1311447373d48d64d4f3916c11933c4615d41467241049bc5fdc213feb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_banach, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:01:49 compute-0 systemd[1]: libpod-conmon-d2b78a1311447373d48d64d4f3916c11933c4615d41467241049bc5fdc213feb.scope: Deactivated successfully.
Oct 11 02:01:49 compute-0 sudo[330261]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:49 compute-0 podman[330283]: 2025-10-11 02:01:49.872520545 +0000 UTC m=+0.122573285 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.expose-services=, url=https://catalog.redhat.com/en/search?searchType=containers, config_id=edpm, distribution-scope=public, release=1755695350, vcs-type=git, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.tags=minimal rhel9, managed_by=edpm_ansible, architecture=x86_64, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, name=ubi9-minimal, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, container_name=openstack_network_exporter, vendor=Red Hat, Inc., build-date=2025-08-20T13:12:41, com.redhat.component=ubi9-minimal-container, io.buildah.version=1.33.7, version=9.6)
Oct 11 02:01:49 compute-0 podman[330285]: 2025-10-11 02:01:49.903676216 +0000 UTC m=+0.130496779 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:01:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:01:49 compute-0 podman[330359]: 2025-10-11 02:01:49.993499312 +0000 UTC m=+0.067191336 container create 43a5717269d6d3de601a23cda20fc4179ca5afa0877afb6020337bba2557ad72 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_galois, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:01:50 compute-0 systemd[1]: Started libpod-conmon-43a5717269d6d3de601a23cda20fc4179ca5afa0877afb6020337bba2557ad72.scope.
Oct 11 02:01:50 compute-0 podman[330359]: 2025-10-11 02:01:49.968329964 +0000 UTC m=+0.042022038 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:01:50 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:01:50 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4529fe74ff983651424d53e59fa837475412d627c6a2f494878c067287da6cd1/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:01:50 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4529fe74ff983651424d53e59fa837475412d627c6a2f494878c067287da6cd1/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:01:50 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4529fe74ff983651424d53e59fa837475412d627c6a2f494878c067287da6cd1/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:01:50 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4529fe74ff983651424d53e59fa837475412d627c6a2f494878c067287da6cd1/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:01:50 compute-0 podman[330359]: 2025-10-11 02:01:50.135521813 +0000 UTC m=+0.209213857 container init 43a5717269d6d3de601a23cda20fc4179ca5afa0877afb6020337bba2557ad72 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_galois, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:01:50 compute-0 podman[330359]: 2025-10-11 02:01:50.154395553 +0000 UTC m=+0.228087567 container start 43a5717269d6d3de601a23cda20fc4179ca5afa0877afb6020337bba2557ad72 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_galois, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:01:50 compute-0 podman[330359]: 2025-10-11 02:01:50.159020293 +0000 UTC m=+0.232712317 container attach 43a5717269d6d3de601a23cda20fc4179ca5afa0877afb6020337bba2557ad72 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_galois, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True)
Oct 11 02:01:50 compute-0 sshd-session[328546]: Failed password for root from 193.46.255.217 port 26308 ssh2
Oct 11 02:01:50 compute-0 ceph-mon[191930]: pgmap v709: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:50 compute-0 sudo[330505]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-udbafqoefkhsmfmzldfbmuejawpokawa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148110.124739-612-239876039035999/AnsiballZ_file.py'
Oct 11 02:01:50 compute-0 sudo[330505]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:51 compute-0 python3.9[330507]: ansible-ansible.builtin.file Invoked with path=/var/local/libexec recurse=True setype=container_file_t state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:01:51 compute-0 sudo[330505]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v710: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:51 compute-0 sshd-session[328546]: Received disconnect from 193.46.255.217 port 26308:11:  [preauth]
Oct 11 02:01:51 compute-0 sshd-session[328546]: Disconnected from authenticating user root 193.46.255.217 port 26308 [preauth]
Oct 11 02:01:51 compute-0 sshd-session[328546]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 02:01:51 compute-0 angry_galois[330382]: {
Oct 11 02:01:51 compute-0 angry_galois[330382]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:01:51 compute-0 angry_galois[330382]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:01:51 compute-0 angry_galois[330382]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:01:51 compute-0 angry_galois[330382]:         "osd_id": 1,
Oct 11 02:01:51 compute-0 angry_galois[330382]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:01:51 compute-0 angry_galois[330382]:         "type": "bluestore"
Oct 11 02:01:51 compute-0 angry_galois[330382]:     },
Oct 11 02:01:51 compute-0 angry_galois[330382]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:01:51 compute-0 angry_galois[330382]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:01:51 compute-0 angry_galois[330382]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:01:51 compute-0 angry_galois[330382]:         "osd_id": 2,
Oct 11 02:01:51 compute-0 angry_galois[330382]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:01:51 compute-0 angry_galois[330382]:         "type": "bluestore"
Oct 11 02:01:51 compute-0 angry_galois[330382]:     },
Oct 11 02:01:51 compute-0 angry_galois[330382]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:01:51 compute-0 angry_galois[330382]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:01:51 compute-0 angry_galois[330382]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:01:51 compute-0 angry_galois[330382]:         "osd_id": 0,
Oct 11 02:01:51 compute-0 angry_galois[330382]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:01:51 compute-0 angry_galois[330382]:         "type": "bluestore"
Oct 11 02:01:51 compute-0 angry_galois[330382]:     }
Oct 11 02:01:51 compute-0 angry_galois[330382]: }
Oct 11 02:01:51 compute-0 systemd[1]: libpod-43a5717269d6d3de601a23cda20fc4179ca5afa0877afb6020337bba2557ad72.scope: Deactivated successfully.
Oct 11 02:01:51 compute-0 systemd[1]: libpod-43a5717269d6d3de601a23cda20fc4179ca5afa0877afb6020337bba2557ad72.scope: Consumed 1.180s CPU time.
Oct 11 02:01:51 compute-0 podman[330359]: 2025-10-11 02:01:51.33561003 +0000 UTC m=+1.409302154 container died 43a5717269d6d3de601a23cda20fc4179ca5afa0877afb6020337bba2557ad72 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_galois, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:01:51 compute-0 systemd[1]: var-lib-containers-storage-overlay-4529fe74ff983651424d53e59fa837475412d627c6a2f494878c067287da6cd1-merged.mount: Deactivated successfully.
Oct 11 02:01:51 compute-0 podman[330359]: 2025-10-11 02:01:51.448208977 +0000 UTC m=+1.521901001 container remove 43a5717269d6d3de601a23cda20fc4179ca5afa0877afb6020337bba2557ad72 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_galois, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 02:01:51 compute-0 systemd[1]: libpod-conmon-43a5717269d6d3de601a23cda20fc4179ca5afa0877afb6020337bba2557ad72.scope: Deactivated successfully.
Oct 11 02:01:51 compute-0 sudo[330058]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:01:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:01:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:01:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:01:51 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 2e4ea372-7258-46fa-93e2-d74c28ef3140 does not exist
Oct 11 02:01:51 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev a8b467b6-d356-487e-b008-a20ab98cafbc does not exist
Oct 11 02:01:51 compute-0 sudo[330636]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:01:51 compute-0 sudo[330636]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:01:51 compute-0 sudo[330636]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:51 compute-0 sudo[330685]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:01:51 compute-0 sudo[330685]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:01:51 compute-0 sudo[330685]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:51 compute-0 sudo[330747]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cxkgbvxwyindpdiomiljoixfqbrmrvgw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148111.3297796-620-68027431229875/AnsiballZ_stat.py'
Oct 11 02:01:51 compute-0 sudo[330747]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:51 compute-0 unix_chkpwd[330750]: password check failed for user (root)
Oct 11 02:01:52 compute-0 sshd-session[330570]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 02:01:52 compute-0 python3.9[330749]: ansible-ansible.legacy.stat Invoked with path=/var/local/libexec/edpm-container-shutdown follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:01:52 compute-0 sudo[330747]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:52 compute-0 ceph-mon[191930]: pgmap v710: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:01:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:01:52 compute-0 sudo[330826]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sgizopdtpsmzxcodazafccbkazehnrzs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148111.3297796-620-68027431229875/AnsiballZ_file.py'
Oct 11 02:01:52 compute-0 sudo[330826]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:52 compute-0 python3.9[330828]: ansible-ansible.legacy.file Invoked with group=root mode=0700 owner=root setype=container_file_t dest=/var/local/libexec/edpm-container-shutdown _original_basename=edpm-container-shutdown recurse=False state=file path=/var/local/libexec/edpm-container-shutdown force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:01:52 compute-0 sudo[330826]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v711: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:53 compute-0 podman[330853]: 2025-10-11 02:01:53.270936907 +0000 UTC m=+0.147834393 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=edpm, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009)
Oct 11 02:01:53 compute-0 sshd-session[330570]: Failed password for root from 193.46.255.217 port 50798 ssh2
Oct 11 02:01:53 compute-0 unix_chkpwd[330980]: password check failed for user (root)
Oct 11 02:01:53 compute-0 sudo[330998]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ulgoieelbgsofnafmvzalffvoaqlkofe ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148113.2131047-620-34404414101276/AnsiballZ_stat.py'
Oct 11 02:01:53 compute-0 sudo[330998]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:53 compute-0 python3.9[331000]: ansible-ansible.legacy.stat Invoked with path=/var/local/libexec/edpm-start-podman-container follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:01:54 compute-0 sudo[330998]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:54 compute-0 ceph-mon[191930]: pgmap v711: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:54 compute-0 sudo[331076]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uipdndkdiuopqoxectbiekrwsgloupjl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148113.2131047-620-34404414101276/AnsiballZ_file.py'
Oct 11 02:01:54 compute-0 sudo[331076]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:54 compute-0 python3.9[331078]: ansible-ansible.legacy.file Invoked with group=root mode=0700 owner=root setype=container_file_t dest=/var/local/libexec/edpm-start-podman-container _original_basename=edpm-start-podman-container recurse=False state=file path=/var/local/libexec/edpm-start-podman-container force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:01:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:01:54.816 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:01:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:01:54.819 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.003s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:01:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:01:54.819 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:01:54 compute-0 sudo[331076]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:01:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v712: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:55 compute-0 sshd-session[330570]: Failed password for root from 193.46.255.217 port 50798 ssh2
Oct 11 02:01:56 compute-0 sudo[331228]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gcrqppwriaqwskpfgyyltpzcpejtbtkb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148115.637352-643-254669561494381/AnsiballZ_file.py'
Oct 11 02:01:56 compute-0 sudo[331228]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:56 compute-0 python3.9[331230]: ansible-ansible.builtin.file Invoked with mode=420 path=/etc/systemd/system-preset state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:01:56 compute-0 sudo[331228]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:01:56
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['.mgr', 'backups', 'images', 'default.rgw.control', 'volumes', 'cephfs.cephfs.meta', '.rgw.root', 'default.rgw.log', 'vms', 'default.rgw.meta', 'cephfs.cephfs.data']
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:01:56 compute-0 ceph-mon[191930]: pgmap v712: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:01:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:01:56 compute-0 unix_chkpwd[331255]: password check failed for user (root)
Oct 11 02:01:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v713: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:57 compute-0 podman[331256]: 2025-10-11 02:01:57.225151114 +0000 UTC m=+0.119936167 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 02:01:57 compute-0 sudo[331401]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mntcwqhcbrfszjnczrbnmhrupdiodtew ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148117.3550746-651-251317636347997/AnsiballZ_stat.py'
Oct 11 02:01:57 compute-0 sudo[331401]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:58 compute-0 python3.9[331403]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/edpm-container-shutdown.service follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:01:58 compute-0 sudo[331401]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:58 compute-0 ceph-mon[191930]: pgmap v713: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:58 compute-0 sudo[331479]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yhfxbmbenduyfjhugwsieanfmrngvmns ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148117.3550746-651-251317636347997/AnsiballZ_file.py'
Oct 11 02:01:58 compute-0 sudo[331479]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:58 compute-0 python3.9[331481]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/edpm-container-shutdown.service _original_basename=edpm-container-shutdown-service recurse=False state=file path=/etc/systemd/system/edpm-container-shutdown.service force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:01:58 compute-0 sudo[331479]: pam_unix(sudo:session): session closed for user root
Oct 11 02:01:58 compute-0 sshd-session[330570]: Failed password for root from 193.46.255.217 port 50798 ssh2
Oct 11 02:01:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v714: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:01:59 compute-0 sudo[331631]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cegnubtbgejuhflxrrwyhvthxfyzfjwr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148119.1085765-663-91486500651089/AnsiballZ_stat.py'
Oct 11 02:01:59 compute-0 sudo[331631]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:01:59 compute-0 podman[157119]: time="2025-10-11T02:01:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:01:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:01:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 38199 "" "Go-http-client/1.1"
Oct 11 02:01:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:01:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 7694 "" "Go-http-client/1.1"
Oct 11 02:01:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:01:59 compute-0 python3.9[331633]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system-preset/91-edpm-container-shutdown.preset follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:01:59 compute-0 sudo[331631]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:00 compute-0 sshd-session[330570]: Received disconnect from 193.46.255.217 port 50798:11:  [preauth]
Oct 11 02:02:00 compute-0 sshd-session[330570]: Disconnected from authenticating user root 193.46.255.217 port 50798 [preauth]
Oct 11 02:02:00 compute-0 sshd-session[330570]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 02:02:00 compute-0 sudo[331711]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cbjpxfjzpislagbvypxicswnukteymhm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148119.1085765-663-91486500651089/AnsiballZ_file.py'
Oct 11 02:02:00 compute-0 sudo[331711]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:00 compute-0 ceph-mon[191930]: pgmap v714: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:00 compute-0 python3.9[331713]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system-preset/91-edpm-container-shutdown.preset _original_basename=91-edpm-container-shutdown-preset recurse=False state=file path=/etc/systemd/system-preset/91-edpm-container-shutdown.preset force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:02:00 compute-0 sudo[331711]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:00 compute-0 unix_chkpwd[331784]: password check failed for user (root)
Oct 11 02:02:00 compute-0 sshd-session[331672]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 02:02:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v715: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:01 compute-0 openstack_network_exporter[159265]: ERROR   02:02:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:02:01 compute-0 openstack_network_exporter[159265]: ERROR   02:02:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:02:01 compute-0 openstack_network_exporter[159265]: ERROR   02:02:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:02:01 compute-0 openstack_network_exporter[159265]: ERROR   02:02:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:02:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:02:01 compute-0 openstack_network_exporter[159265]: ERROR   02:02:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:02:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:02:01 compute-0 sudo[331864]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bnvrygvnazvfbubqaodzqtncbdhyasez ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148120.8946664-675-169467688616004/AnsiballZ_systemd.py'
Oct 11 02:02:01 compute-0 sudo[331864]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:01 compute-0 python3.9[331866]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True enabled=True name=edpm-container-shutdown state=started daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:02:01 compute-0 systemd[1]: Reloading.
Oct 11 02:02:02 compute-0 systemd-rc-local-generator[331891]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:02:02 compute-0 systemd-sysv-generator[331895]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:02:02 compute-0 sudo[331864]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:02 compute-0 ceph-mon[191930]: pgmap v715: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:02 compute-0 sshd-session[331672]: Failed password for root from 193.46.255.217 port 50804 ssh2
Oct 11 02:02:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v716: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:03 compute-0 sudo[332053]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bwodryllwoxenttqclvqcdcspksvutou ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148122.7802484-683-174161526507729/AnsiballZ_stat.py'
Oct 11 02:02:03 compute-0 sudo[332053]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:03 compute-0 podman[332055]: 2025-10-11 02:02:03.529704746 +0000 UTC m=+0.109068141 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:02:03 compute-0 podman[332057]: 2025-10-11 02:02:03.545308099 +0000 UTC m=+0.113381993 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, build-date=2024-09-18T21:23:30, io.openshift.expose-services=, managed_by=edpm_ansible, config_id=edpm, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, io.openshift.tags=base rhel9, architecture=x86_64, distribution-scope=public, name=ubi9, io.buildah.version=1.29.0, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vendor=Red Hat, Inc., version=9.4, maintainer=Red Hat, Inc., release=1214.1726694543, com.redhat.component=ubi9-container, container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, release-0.7.12=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, summary=Provides the latest release of Red Hat Universal Base Image 9.)
Oct 11 02:02:03 compute-0 podman[332056]: 2025-10-11 02:02:03.603972007 +0000 UTC m=+0.170490504 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 02:02:03 compute-0 python3.9[332063]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system/netns-placeholder.service follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:02:03 compute-0 sudo[332053]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:04 compute-0 sudo[332195]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uhmcqyorntgswxisdmferaknmlxcpgjm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148122.7802484-683-174161526507729/AnsiballZ_file.py'
Oct 11 02:02:04 compute-0 sudo[332195]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:04 compute-0 unix_chkpwd[332198]: password check failed for user (root)
Oct 11 02:02:04 compute-0 python3.9[332197]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system/netns-placeholder.service _original_basename=netns-placeholder-service recurse=False state=file path=/etc/systemd/system/netns-placeholder.service force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:02:04 compute-0 sudo[332195]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:04 compute-0 ceph-mon[191930]: pgmap v716: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:02:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v717: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:05 compute-0 sudo[332348]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ipzuvtxdalzlhotxsvuqmmxzttngirsz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148124.703971-695-21541054601732/AnsiballZ_stat.py'
Oct 11 02:02:05 compute-0 sudo[332348]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:05 compute-0 podman[332350]: 2025-10-11 02:02:05.446904095 +0000 UTC m=+0.113387154 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:02:05 compute-0 python3.9[332351]: ansible-ansible.legacy.stat Invoked with path=/etc/systemd/system-preset/91-netns-placeholder.preset follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:02:05 compute-0 sudo[332348]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:06 compute-0 sshd-session[331672]: Failed password for root from 193.46.255.217 port 50804 ssh2
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:02:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:02:06 compute-0 sudo[332447]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kfrfvlzrpydkldivcxouaahwiczmiigu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148124.703971-695-21541054601732/AnsiballZ_file.py'
Oct 11 02:02:06 compute-0 sudo[332447]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:06 compute-0 ceph-mon[191930]: pgmap v717: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:06 compute-0 python3.9[332449]: ansible-ansible.legacy.file Invoked with group=root mode=0644 owner=root dest=/etc/systemd/system-preset/91-netns-placeholder.preset _original_basename=91-netns-placeholder-preset recurse=False state=file path=/etc/systemd/system-preset/91-netns-placeholder.preset force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:02:06 compute-0 sudo[332447]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v718: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:07 compute-0 unix_chkpwd[332549]: password check failed for user (root)
Oct 11 02:02:08 compute-0 sudo[332600]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ffcbbkjghdpnpjwtscswcbfuqvdstulk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148127.1019034-707-199976321897216/AnsiballZ_systemd.py'
Oct 11 02:02:08 compute-0 sudo[332600]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:08 compute-0 python3.9[332602]: ansible-ansible.builtin.systemd Invoked with daemon_reload=True enabled=True name=netns-placeholder state=started daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:02:08 compute-0 ceph-mon[191930]: pgmap v718: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:08 compute-0 systemd[1]: Reloading.
Oct 11 02:02:08 compute-0 systemd-sysv-generator[332631]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:02:08 compute-0 systemd-rc-local-generator[332627]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:02:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v719: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:09 compute-0 systemd[1]: Starting Create netns directory...
Oct 11 02:02:09 compute-0 systemd[1]: run-netns-placeholder.mount: Deactivated successfully.
Oct 11 02:02:09 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Oct 11 02:02:09 compute-0 systemd[1]: Finished Create netns directory.
Oct 11 02:02:09 compute-0 sudo[332600]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:09 compute-0 sshd-session[331672]: Failed password for root from 193.46.255.217 port 50804 ssh2
Oct 11 02:02:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:02:10 compute-0 sudo[332793]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sxegvkzkmzvtjfcospmdfrbutmihuzei ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148129.7461216-717-215430271984242/AnsiballZ_file.py'
Oct 11 02:02:10 compute-0 sudo[332793]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:10 compute-0 python3.9[332795]: ansible-ansible.builtin.file Invoked with group=zuul mode=0755 owner=zuul path=/var/lib/openstack/healthchecks setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:02:10 compute-0 sshd-session[331672]: Received disconnect from 193.46.255.217 port 50804:11:  [preauth]
Oct 11 02:02:10 compute-0 sshd-session[331672]: Disconnected from authenticating user root 193.46.255.217 port 50804 [preauth]
Oct 11 02:02:10 compute-0 sshd-session[331672]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 02:02:10 compute-0 sudo[332793]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:10 compute-0 ceph-mon[191930]: pgmap v719: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v720: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:11 compute-0 sudo[332945]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jagnsquvzmovjbwkvwhpbejprnfvpelt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148130.9281254-725-233256166406670/AnsiballZ_stat.py'
Oct 11 02:02:11 compute-0 sudo[332945]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:11 compute-0 python3.9[332947]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/multipathd/healthcheck follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:02:11 compute-0 sudo[332945]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:12 compute-0 sudo[333080]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dwbtvwyctfwvkiuxjpixwrdfkdvfppkt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148130.9281254-725-233256166406670/AnsiballZ_copy.py'
Oct 11 02:02:12 compute-0 sudo[333080]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:12 compute-0 podman[333042]: 2025-10-11 02:02:12.558635968 +0000 UTC m=+0.175107805 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:02:12 compute-0 ceph-mon[191930]: pgmap v720: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:12 compute-0 python3.9[333089]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/healthchecks/multipathd/ group=zuul mode=0700 owner=zuul setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760148130.9281254-725-233256166406670/.source _original_basename=healthcheck follow=False checksum=af9d0c1c8f3cb0e30ce9609be9d5b01924d0d23f backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:02:12 compute-0 sudo[333080]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v721: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:13 compute-0 sudo[333240]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dvfxahruwrlfozmqxmfzqbqmphhumbhm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148133.3512874-742-228733122688776/AnsiballZ_file.py'
Oct 11 02:02:13 compute-0 sudo[333240]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:14 compute-0 python3.9[333242]: ansible-ansible.builtin.file Invoked with path=/var/lib/kolla/config_files recurse=True setype=container_file_t state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:02:14 compute-0 sudo[333240]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:14 compute-0 ceph-mon[191930]: pgmap v721: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:02:15 compute-0 sudo[333392]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-txesvewhcauuhgafwdheygatwwwrjuvg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148134.5474946-750-7723764905559/AnsiballZ_stat.py'
Oct 11 02:02:15 compute-0 sudo[333392]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v722: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:15 compute-0 python3.9[333394]: ansible-ansible.legacy.stat Invoked with path=/var/lib/kolla/config_files/multipathd.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:02:15 compute-0 sudo[333392]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:16 compute-0 sudo[333515]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gpplfexukykifwagzwcwmbaitdqaztlp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148134.5474946-750-7723764905559/AnsiballZ_copy.py'
Oct 11 02:02:16 compute-0 sudo[333515]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:16 compute-0 python3.9[333517]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/kolla/config_files/multipathd.json mode=0600 src=/home/zuul/.ansible/tmp/ansible-tmp-1760148134.5474946-750-7723764905559/.source.json _original_basename=.j78f6bsi follow=False checksum=3f7959ee8ac9757398adcc451c3b416c957d7c14 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:02:16 compute-0 sudo[333515]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:16 compute-0 ceph-mon[191930]: pgmap v722: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v723: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:17 compute-0 sudo[333667]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oyoanhhawgvrnqryznlyeocroiefkzdv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148136.6528156-765-193653291604521/AnsiballZ_file.py'
Oct 11 02:02:17 compute-0 sudo[333667]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:17 compute-0 python3.9[333669]: ansible-ansible.builtin.file Invoked with mode=0755 path=/var/lib/edpm-config/container-startup-config/multipathd state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:02:17 compute-0 sudo[333667]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:18 compute-0 ceph-mon[191930]: pgmap v723: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:19 compute-0 sudo[333819]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xpwdfszwqsnhloagurbsmarfdqrhxufp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148137.8254182-773-122298734849784/AnsiballZ_stat.py'
Oct 11 02:02:19 compute-0 sudo[333819]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v724: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:19 compute-0 sudo[333819]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:02:19 compute-0 sudo[333943]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-crxnpnqbvfnmlrlqwwutdtnxsqybivpq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148137.8254182-773-122298734849784/AnsiballZ_copy.py'
Oct 11 02:02:19 compute-0 sudo[333943]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:20 compute-0 podman[333945]: 2025-10-11 02:02:20.131487388 +0000 UTC m=+0.147794643 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:02:20 compute-0 podman[333946]: 2025-10-11 02:02:20.143198227 +0000 UTC m=+0.155347150 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, architecture=x86_64, com.redhat.component=ubi9-minimal-container, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.6, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, name=ubi9-minimal, container_name=openstack_network_exporter, io.buildah.version=1.33.7, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, io.openshift.expose-services=, io.openshift.tags=minimal rhel9, vcs-type=git, vendor=Red Hat, Inc., url=https://catalog.redhat.com/en/search?searchType=containers, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., maintainer=Red Hat, Inc., release=1755695350, config_id=edpm, managed_by=edpm_ansible)
Oct 11 02:02:20 compute-0 sudo[333943]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:20 compute-0 ceph-mon[191930]: pgmap v724: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v725: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:22 compute-0 sudo[334134]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qbqdladhwqrrhsdwdowafeejrgcdbede ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148141.5494037-790-272887131071061/AnsiballZ_container_config_data.py'
Oct 11 02:02:22 compute-0 sudo[334134]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:22 compute-0 python3.9[334136]: ansible-container_config_data Invoked with config_overrides={} config_path=/var/lib/edpm-config/container-startup-config/multipathd config_pattern=*.json debug=False
Oct 11 02:02:22 compute-0 sudo[334134]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:22 compute-0 ceph-mon[191930]: pgmap v725: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v726: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:23 compute-0 sudo[334286]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rutefpadrhukcngavncjdufljevmnhkl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148142.729139-799-63349589748370/AnsiballZ_container_config_hash.py'
Oct 11 02:02:23 compute-0 sudo[334286]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:23 compute-0 podman[334288]: 2025-10-11 02:02:23.502423873 +0000 UTC m=+0.148628772 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']})
Oct 11 02:02:23 compute-0 python3.9[334289]: ansible-container_config_hash Invoked with check_mode=False config_vol_prefix=/var/lib/config-data
Oct 11 02:02:23 compute-0 sudo[334286]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:23 compute-0 ceph-mon[191930]: pgmap v726: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:24 compute-0 sudo[334456]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gtorkmtncvouptxghvimpgaacabtfazk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148143.9708042-808-31753291481558/AnsiballZ_podman_container_info.py'
Oct 11 02:02:24 compute-0 sudo[334456]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:24 compute-0 python3.9[334458]: ansible-containers.podman.podman_container_info Invoked with executable=podman name=None
Oct 11 02:02:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:02:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v727: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:25 compute-0 sudo[334456]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:26 compute-0 ceph-mon[191930]: pgmap v727: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:02:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:02:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:02:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:02:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:02:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:02:26 compute-0 sudo[334634]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mbevempnozftukawmzsbcachmyiwgnup ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760148146.293602-821-25852755271024/AnsiballZ_edpm_container_manage.py'
Oct 11 02:02:26 compute-0 sudo[334634]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v728: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:27 compute-0 python3[334636]: ansible-edpm_container_manage Invoked with concurrency=1 config_dir=/var/lib/edpm-config/container-startup-config/multipathd config_id=multipathd config_overrides={} config_patterns=*.json log_base_path=/var/log/containers/stdouts debug=False
Oct 11 02:02:28 compute-0 podman[334669]: 2025-10-11 02:02:28.218008803 +0000 UTC m=+0.105716417 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_managed=true, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, config_id=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, managed_by=edpm_ansible)
Oct 11 02:02:28 compute-0 ceph-mon[191930]: pgmap v728: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:29 compute-0 podman[334651]: 2025-10-11 02:02:29.119665061 +0000 UTC m=+1.793480604 image pull afce23cfe475a7c4b16d233ab936a7b07069ccb13842b1c95ba43e4b3f92adfb quay.io/podified-antelope-centos9/openstack-multipathd:current-podified
Oct 11 02:02:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v729: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:29 compute-0 podman[334724]: 2025-10-11 02:02:29.39547344 +0000 UTC m=+0.093846067 container create 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, io.buildah.version=1.41.3, tcib_managed=true, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, config_id=multipathd, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.license=GPLv2, container_name=multipathd, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:02:29 compute-0 podman[334724]: 2025-10-11 02:02:29.342093413 +0000 UTC m=+0.040466060 image pull afce23cfe475a7c4b16d233ab936a7b07069ccb13842b1c95ba43e4b3f92adfb quay.io/podified-antelope-centos9/openstack-multipathd:current-podified
Oct 11 02:02:29 compute-0 python3[334636]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman create --name multipathd --conmon-pidfile /run/multipathd.pid --env KOLLA_CONFIG_STRATEGY=COPY_ALWAYS --healthcheck-command /openstack/healthcheck --label config_id=multipathd --label container_name=multipathd --label managed_by=edpm_ansible --label config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']} --log-driver journald --log-level info --network host --privileged=True --volume /etc/hosts:/etc/hosts:ro --volume /etc/localtime:/etc/localtime:ro --volume /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro --volume /etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro --volume /etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro --volume /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro --volume /etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro --volume /dev/log:/dev/log --volume /var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro --volume /dev:/dev --volume /run/udev:/run/udev --volume /sys:/sys --volume /lib/modules:/lib/modules:ro --volume /etc/iscsi:/etc/iscsi:ro --volume /var/lib/iscsi:/var/lib/iscsi:z --volume /etc/multipath:/etc/multipath:z --volume /etc/multipath.conf:/etc/multipath.conf:ro --volume /var/lib/openstack/healthchecks/multipathd:/openstack:ro,z quay.io/podified-antelope-centos9/openstack-multipathd:current-podified
Oct 11 02:02:29 compute-0 sudo[334634]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:29 compute-0 podman[157119]: time="2025-10-11T02:02:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:02:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:02:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 40781 "" "Go-http-client/1.1"
Oct 11 02:02:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:02:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 7693 "" "Go-http-client/1.1"
Oct 11 02:02:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:02:30 compute-0 ceph-mon[191930]: pgmap v729: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:30 compute-0 sudo[334910]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lyjbjdpbigeeetihrnrvamxhqoimrzva ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148150.007436-829-237663780334226/AnsiballZ_stat.py'
Oct 11 02:02:30 compute-0 sudo[334910]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:30 compute-0 python3.9[334912]: ansible-ansible.builtin.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:02:30 compute-0 sudo[334910]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v730: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:31 compute-0 openstack_network_exporter[159265]: ERROR   02:02:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:02:31 compute-0 openstack_network_exporter[159265]: ERROR   02:02:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:02:31 compute-0 openstack_network_exporter[159265]: ERROR   02:02:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:02:31 compute-0 openstack_network_exporter[159265]: ERROR   02:02:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:02:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:02:31 compute-0 openstack_network_exporter[159265]: ERROR   02:02:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:02:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:02:32 compute-0 ceph-mon[191930]: pgmap v730: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:32 compute-0 sudo[335064]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jtrmusxqjljqucmqxflhaqviayfbldjx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148151.2322516-838-4598041506122/AnsiballZ_file.py'
Oct 11 02:02:32 compute-0 sudo[335064]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:32 compute-0 python3.9[335066]: ansible-file Invoked with path=/etc/systemd/system/edpm_multipathd.requires state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:02:32 compute-0 sudo[335064]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v731: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:33 compute-0 sudo[335140]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sapduiryckkxzaedzpkwuzhhaqbyerpd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148151.2322516-838-4598041506122/AnsiballZ_stat.py'
Oct 11 02:02:33 compute-0 sudo[335140]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:33 compute-0 python3.9[335142]: ansible-stat Invoked with path=/etc/systemd/system/edpm_multipathd_healthcheck.timer follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:02:33 compute-0 sudo[335140]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:34 compute-0 podman[335195]: 2025-10-11 02:02:34.223354016 +0000 UTC m=+0.100720805 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:02:34 compute-0 podman[335203]: 2025-10-11 02:02:34.253459294 +0000 UTC m=+0.125329713 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, container_name=kepler, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, maintainer=Red Hat, Inc., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1214.1726694543, vendor=Red Hat, Inc., config_id=edpm, io.buildah.version=1.29.0, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, name=ubi9, release-0.7.12=, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-type=git, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, managed_by=edpm_ansible, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9, build-date=2024-09-18T21:23:30, com.redhat.component=ubi9-container, io.openshift.expose-services=, io.openshift.tags=base rhel9, distribution-scope=public, version=9.4)
Oct 11 02:02:34 compute-0 ceph-mon[191930]: pgmap v731: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:34 compute-0 podman[335201]: 2025-10-11 02:02:34.296301669 +0000 UTC m=+0.171412432 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, container_name=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=ovn_controller)
Oct 11 02:02:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:02:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v732: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:35 compute-0 sudo[335355]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sxsccxojfuurricdlsxeepgvvqimvsdh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148153.7702055-838-27124695376869/AnsiballZ_copy.py'
Oct 11 02:02:35 compute-0 sudo[335355]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:35 compute-0 python3.9[335357]: ansible-copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760148153.7702055-838-27124695376869/source dest=/etc/systemd/system/edpm_multipathd.service mode=0644 owner=root group=root backup=False force=True remote_src=False follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:02:35 compute-0 sudo[335355]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:36 compute-0 sudo[335445]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pdoozengxrujraitejovkxhltgbqxcoz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148153.7702055-838-27124695376869/AnsiballZ_systemd.py'
Oct 11 02:02:36 compute-0 sudo[335445]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:36 compute-0 podman[335405]: 2025-10-11 02:02:36.101355313 +0000 UTC m=+0.145924597 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.schema-version=1.0)
Oct 11 02:02:36 compute-0 ceph-mon[191930]: pgmap v732: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:36 compute-0 python3.9[335450]: ansible-systemd Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 02:02:36 compute-0 systemd[1]: Reloading.
Oct 11 02:02:36 compute-0 systemd-sysv-generator[335480]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:02:36 compute-0 systemd-rc-local-generator[335477]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:02:36 compute-0 sudo[335445]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v733: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:37 compute-0 sudo[335561]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sgvykeyytndtvnehzixrwmtgbghisfzp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148153.7702055-838-27124695376869/AnsiballZ_systemd.py'
Oct 11 02:02:37 compute-0 sudo[335561]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:37 compute-0 python3.9[335563]: ansible-systemd Invoked with state=restarted name=edpm_multipathd.service enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:02:37 compute-0 systemd[1]: Reloading.
Oct 11 02:02:38 compute-0 systemd-sysv-generator[335594]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:02:38 compute-0 systemd-rc-local-generator[335591]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:02:38 compute-0 ceph-mon[191930]: pgmap v733: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:38 compute-0 systemd[1]: Starting multipathd container...
Oct 11 02:02:38 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:02:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b113886ba2feff84fc91d1f94810d45b50dca798955c4f62cd2fadad6579baa8/merged/etc/multipath supports timestamps until 2038 (0x7fffffff)
Oct 11 02:02:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b113886ba2feff84fc91d1f94810d45b50dca798955c4f62cd2fadad6579baa8/merged/var/lib/iscsi supports timestamps until 2038 (0x7fffffff)
Oct 11 02:02:38 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c.
Oct 11 02:02:38 compute-0 podman[335602]: 2025-10-11 02:02:38.819989824 +0000 UTC m=+0.265675627 container init 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=multipathd, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 02:02:38 compute-0 multipathd[335617]: + sudo -E kolla_set_configs
Oct 11 02:02:38 compute-0 podman[335602]: 2025-10-11 02:02:38.869580807 +0000 UTC m=+0.315266590 container start 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, org.label-schema.vendor=CentOS, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009)
Oct 11 02:02:38 compute-0 sudo[335623]:     root : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_set_configs
Oct 11 02:02:38 compute-0 sudo[335623]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Oct 11 02:02:38 compute-0 sudo[335623]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=0)
Oct 11 02:02:38 compute-0 podman[335602]: multipathd
Oct 11 02:02:38 compute-0 systemd[1]: Started multipathd container.
Oct 11 02:02:38 compute-0 multipathd[335617]: INFO:__main__:Loading config file at /var/lib/kolla/config_files/config.json
Oct 11 02:02:38 compute-0 multipathd[335617]: INFO:__main__:Validating config file
Oct 11 02:02:38 compute-0 multipathd[335617]: INFO:__main__:Kolla config strategy set to: COPY_ALWAYS
Oct 11 02:02:38 compute-0 multipathd[335617]: INFO:__main__:Writing out command to execute
Oct 11 02:02:38 compute-0 sudo[335623]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:38 compute-0 multipathd[335617]: ++ cat /run_command
Oct 11 02:02:38 compute-0 multipathd[335617]: + CMD='/usr/sbin/multipathd -d'
Oct 11 02:02:38 compute-0 multipathd[335617]: + ARGS=
Oct 11 02:02:38 compute-0 multipathd[335617]: + sudo kolla_copy_cacerts
Oct 11 02:02:38 compute-0 sudo[335561]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:38 compute-0 sudo[335642]:     root : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_copy_cacerts
Oct 11 02:02:38 compute-0 sudo[335642]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Oct 11 02:02:38 compute-0 sudo[335642]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=0)
Oct 11 02:02:38 compute-0 sudo[335642]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:38 compute-0 multipathd[335617]: + [[ ! -n '' ]]
Oct 11 02:02:38 compute-0 multipathd[335617]: + . kolla_extend_start
Oct 11 02:02:38 compute-0 multipathd[335617]: Running command: '/usr/sbin/multipathd -d'
Oct 11 02:02:38 compute-0 multipathd[335617]: + echo 'Running command: '\''/usr/sbin/multipathd -d'\'''
Oct 11 02:02:38 compute-0 multipathd[335617]: + umask 0022
Oct 11 02:02:38 compute-0 multipathd[335617]: + exec /usr/sbin/multipathd -d
Oct 11 02:02:39 compute-0 multipathd[335617]: 4506.800789 | --------start up--------
Oct 11 02:02:39 compute-0 multipathd[335617]: 4506.800819 | read /etc/multipath.conf
Oct 11 02:02:39 compute-0 multipathd[335617]: 4506.813425 | path checkers start up
Oct 11 02:02:39 compute-0 podman[335624]: 2025-10-11 02:02:39.039457601 +0000 UTC m=+0.144333349 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=starting, health_failing_streak=1, health_log=, tcib_managed=true, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS)
Oct 11 02:02:39 compute-0 systemd[1]: 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c-b46218734197af3.service: Main process exited, code=exited, status=1/FAILURE
Oct 11 02:02:39 compute-0 systemd[1]: 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c-b46218734197af3.service: Failed with result 'exit-code'.
Oct 11 02:02:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v734: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:02:40 compute-0 python3.9[335806]: ansible-ansible.builtin.stat Invoked with path=/etc/multipath/.multipath_restart_required follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:02:40 compute-0 ceph-mon[191930]: pgmap v734: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:40 compute-0 sudo[335958]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tpedfyhdyqyhqfiscvkwaxtqjfevqffs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148160.366782-874-247098926257652/AnsiballZ_command.py'
Oct 11 02:02:40 compute-0 sudo[335958]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v735: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:41 compute-0 python3.9[335960]: ansible-ansible.legacy.command Invoked with _raw_params=podman ps --filter volume=/etc/multipath.conf --format {{.Names}} _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:02:41 compute-0 sudo[335958]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:42 compute-0 sudo[336123]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ssixnumllmszdhygdvvardlhyafdvovd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148161.7281137-882-213702710441990/AnsiballZ_systemd.py'
Oct 11 02:02:42 compute-0 sudo[336123]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:42 compute-0 ceph-mon[191930]: pgmap v735: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:42 compute-0 python3.9[336125]: ansible-ansible.builtin.systemd Invoked with name=edpm_multipathd state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 02:02:42 compute-0 systemd[1]: Stopping multipathd container...
Oct 11 02:02:42 compute-0 multipathd[335617]: 4510.576507 | exit (signal)
Oct 11 02:02:42 compute-0 multipathd[335617]: 4510.576733 | --------shut down-------
Oct 11 02:02:42 compute-0 podman[336127]: 2025-10-11 02:02:42.821383219 +0000 UTC m=+0.133448058 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, config_id=iscsid, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_managed=true)
Oct 11 02:02:42 compute-0 systemd[1]: libpod-1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c.scope: Deactivated successfully.
Oct 11 02:02:42 compute-0 podman[336135]: 2025-10-11 02:02:42.840903665 +0000 UTC m=+0.115463210 container died 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, container_name=multipathd, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:02:42 compute-0 systemd[1]: 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c-b46218734197af3.timer: Deactivated successfully.
Oct 11 02:02:42 compute-0 systemd[1]: Stopped /usr/bin/podman healthcheck run 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c.
Oct 11 02:02:42 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c-userdata-shm.mount: Deactivated successfully.
Oct 11 02:02:42 compute-0 systemd[1]: var-lib-containers-storage-overlay-b113886ba2feff84fc91d1f94810d45b50dca798955c4f62cd2fadad6579baa8-merged.mount: Deactivated successfully.
Oct 11 02:02:42 compute-0 podman[336135]: 2025-10-11 02:02:42.928223153 +0000 UTC m=+0.202782708 container cleanup 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, config_id=multipathd, org.label-schema.vendor=CentOS)
Oct 11 02:02:42 compute-0 podman[336135]: multipathd
Oct 11 02:02:43 compute-0 podman[336172]: multipathd
Oct 11 02:02:43 compute-0 systemd[1]: edpm_multipathd.service: Deactivated successfully.
Oct 11 02:02:43 compute-0 systemd[1]: Stopped multipathd container.
Oct 11 02:02:43 compute-0 systemd[1]: Starting multipathd container...
Oct 11 02:02:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v736: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:43 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:02:43 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b113886ba2feff84fc91d1f94810d45b50dca798955c4f62cd2fadad6579baa8/merged/etc/multipath supports timestamps until 2038 (0x7fffffff)
Oct 11 02:02:43 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b113886ba2feff84fc91d1f94810d45b50dca798955c4f62cd2fadad6579baa8/merged/var/lib/iscsi supports timestamps until 2038 (0x7fffffff)
Oct 11 02:02:43 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c.
Oct 11 02:02:43 compute-0 podman[336182]: 2025-10-11 02:02:43.259732335 +0000 UTC m=+0.200201642 container init 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=multipathd, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:02:43 compute-0 multipathd[336197]: + sudo -E kolla_set_configs
Oct 11 02:02:43 compute-0 podman[336182]: 2025-10-11 02:02:43.300365094 +0000 UTC m=+0.240834301 container start 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=multipathd, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:02:43 compute-0 podman[336182]: multipathd
Oct 11 02:02:43 compute-0 systemd[1]: Started multipathd container.
Oct 11 02:02:43 compute-0 sudo[336203]:     root : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_set_configs
Oct 11 02:02:43 compute-0 sudo[336203]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Oct 11 02:02:43 compute-0 sudo[336203]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=0)
Oct 11 02:02:43 compute-0 sudo[336123]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:43 compute-0 multipathd[336197]: INFO:__main__:Loading config file at /var/lib/kolla/config_files/config.json
Oct 11 02:02:43 compute-0 multipathd[336197]: INFO:__main__:Validating config file
Oct 11 02:02:43 compute-0 multipathd[336197]: INFO:__main__:Kolla config strategy set to: COPY_ALWAYS
Oct 11 02:02:43 compute-0 multipathd[336197]: INFO:__main__:Writing out command to execute
Oct 11 02:02:43 compute-0 sudo[336203]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:43 compute-0 multipathd[336197]: ++ cat /run_command
Oct 11 02:02:43 compute-0 multipathd[336197]: + CMD='/usr/sbin/multipathd -d'
Oct 11 02:02:43 compute-0 multipathd[336197]: + ARGS=
Oct 11 02:02:43 compute-0 multipathd[336197]: + sudo kolla_copy_cacerts
Oct 11 02:02:43 compute-0 sudo[336225]:     root : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_copy_cacerts
Oct 11 02:02:43 compute-0 sudo[336225]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Oct 11 02:02:43 compute-0 sudo[336225]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=0)
Oct 11 02:02:43 compute-0 sudo[336225]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:43 compute-0 multipathd[336197]: + [[ ! -n '' ]]
Oct 11 02:02:43 compute-0 multipathd[336197]: + . kolla_extend_start
Oct 11 02:02:43 compute-0 multipathd[336197]: Running command: '/usr/sbin/multipathd -d'
Oct 11 02:02:43 compute-0 multipathd[336197]: + echo 'Running command: '\''/usr/sbin/multipathd -d'\'''
Oct 11 02:02:43 compute-0 multipathd[336197]: + umask 0022
Oct 11 02:02:43 compute-0 multipathd[336197]: + exec /usr/sbin/multipathd -d
Oct 11 02:02:43 compute-0 podman[336204]: 2025-10-11 02:02:43.455682954 +0000 UTC m=+0.129265006 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=starting, health_failing_streak=1, health_log=, container_name=multipathd, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=multipathd, io.buildah.version=1.41.3, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:02:43 compute-0 systemd[1]: 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c-968737eb8dc3fff.service: Main process exited, code=exited, status=1/FAILURE
Oct 11 02:02:43 compute-0 systemd[1]: 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c-968737eb8dc3fff.service: Failed with result 'exit-code'.
Oct 11 02:02:43 compute-0 multipathd[336197]: 4511.248236 | --------start up--------
Oct 11 02:02:43 compute-0 multipathd[336197]: 4511.248273 | read /etc/multipath.conf
Oct 11 02:02:43 compute-0 multipathd[336197]: 4511.262088 | path checkers start up
Oct 11 02:02:44 compute-0 sudo[336385]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-veizixwzphqkfdstzezgapjuioinsdgy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148163.7167299-890-258591749120820/AnsiballZ_file.py'
Oct 11 02:02:44 compute-0 sudo[336385]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:44 compute-0 ceph-mon[191930]: pgmap v736: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:44 compute-0 python3.9[336387]: ansible-ansible.builtin.file Invoked with path=/etc/multipath/.multipath_restart_required state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:02:44 compute-0 sudo[336385]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:02:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v737: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:46 compute-0 ceph-mon[191930]: pgmap v737: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:46 compute-0 sudo[336537]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xcemhygfddhmhphaacvmymdmmmqktaej ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148165.1702244-902-165285619136/AnsiballZ_file.py'
Oct 11 02:02:46 compute-0 sudo[336537]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:46 compute-0 python3.9[336539]: ansible-ansible.builtin.file Invoked with mode=0755 path=/etc/modules-load.d selevel=s0 setype=etc_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None attributes=None
Oct 11 02:02:46 compute-0 sudo[336537]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v738: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:47 compute-0 sudo[336689]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xpnwvbruuyiffpwpirttehzxoqimfuhc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148167.2869763-910-182986744723864/AnsiballZ_modprobe.py'
Oct 11 02:02:47 compute-0 sudo[336689]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:48 compute-0 python3.9[336691]: ansible-community.general.modprobe Invoked with name=nvme-fabrics state=present params= persistent=disabled
Oct 11 02:02:48 compute-0 kernel: Key type psk registered
Oct 11 02:02:48 compute-0 sudo[336689]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:48 compute-0 ceph-mon[191930]: pgmap v738: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v739: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:49 compute-0 sudo[336855]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rszxivwhkghicunxjokoqfepdfnbnchx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148168.8519692-918-233441240245230/AnsiballZ_stat.py'
Oct 11 02:02:49 compute-0 sudo[336855]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:49 compute-0 python3.9[336857]: ansible-ansible.legacy.stat Invoked with path=/etc/modules-load.d/nvme-fabrics.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:02:49 compute-0 sudo[336855]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:02:50 compute-0 sudo[337017]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xncxaolcboabspjnluqumimpnnswlasz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148168.8519692-918-233441240245230/AnsiballZ_copy.py'
Oct 11 02:02:50 compute-0 sudo[337017]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:50 compute-0 podman[336953]: 2025-10-11 02:02:50.380823424 +0000 UTC m=+0.133114389 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:02:50 compute-0 ceph-mon[191930]: pgmap v739: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:50 compute-0 podman[336954]: 2025-10-11 02:02:50.415790843 +0000 UTC m=+0.147641854 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://catalog.redhat.com/en/search?searchType=containers, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., config_id=edpm, build-date=2025-08-20T13:12:41, io.buildah.version=1.33.7, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, architecture=x86_64, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vendor=Red Hat, Inc., distribution-scope=public, managed_by=edpm_ansible, com.redhat.component=ubi9-minimal-container, vcs-type=git, release=1755695350, container_name=openstack_network_exporter, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.openshift.expose-services=, name=ubi9-minimal, version=9.6, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:02:50 compute-0 python3.9[337023]: ansible-ansible.legacy.copy Invoked with dest=/etc/modules-load.d/nvme-fabrics.conf mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1760148168.8519692-918-233441240245230/.source.conf follow=False _original_basename=module-load.conf.j2 checksum=783c778f0c68cc414f35486f234cbb1cf3f9bbff backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:02:50 compute-0 sudo[337017]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v740: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:51 compute-0 sudo[337173]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tsibbhvufwysotgvgbrehshctjzrruax ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148171.0172877-934-125989908049298/AnsiballZ_lineinfile.py'
Oct 11 02:02:51 compute-0 sudo[337173]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:51 compute-0 python3.9[337175]: ansible-ansible.builtin.lineinfile Invoked with create=True dest=/etc/modules line=nvme-fabrics  mode=0644 state=present path=/etc/modules backrefs=False backup=False firstmatch=False unsafe_writes=False regexp=None search_string=None insertafter=None insertbefore=None validate=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:02:51 compute-0 sudo[337173]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:51 compute-0 sudo[337176]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:02:51 compute-0 sudo[337176]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:02:51 compute-0 sudo[337176]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:52 compute-0 sudo[337214]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:02:52 compute-0 sudo[337214]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:02:52 compute-0 sudo[337214]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:52 compute-0 sudo[337250]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:02:52 compute-0 sudo[337250]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:02:52 compute-0 sudo[337250]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:52 compute-0 sudo[337304]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:02:52 compute-0 sudo[337304]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:02:52 compute-0 ceph-mon[191930]: pgmap v740: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:52 compute-0 sudo[337439]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iixxewopojsnenheefzkwxoadnpsnxvx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148172.1666384-942-8482870032563/AnsiballZ_systemd.py'
Oct 11 02:02:52 compute-0 sudo[337439]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:52 compute-0 sudo[337304]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:02:52 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:02:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:02:52 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:02:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:02:52 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:02:52 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 1ed82cb3-9db4-4ed1-a7b2-2c261e8999c4 does not exist
Oct 11 02:02:52 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 9c51c9f2-353b-40b0-a215-f3f3caca5dd0 does not exist
Oct 11 02:02:52 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev b8a4694f-c652-46a3-ba2a-43cdf96607f7 does not exist
Oct 11 02:02:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:02:52 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:02:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:02:52 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:02:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:02:52 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:02:52 compute-0 python3.9[337443]: ansible-ansible.builtin.systemd Invoked with name=systemd-modules-load.service state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 02:02:52 compute-0 systemd[1]: systemd-modules-load.service: Deactivated successfully.
Oct 11 02:02:52 compute-0 systemd[1]: Stopped Load Kernel Modules.
Oct 11 02:02:52 compute-0 systemd[1]: Stopping Load Kernel Modules...
Oct 11 02:02:52 compute-0 systemd[1]: Starting Load Kernel Modules...
Oct 11 02:02:53 compute-0 sudo[337458]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:02:53 compute-0 sudo[337458]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:02:53 compute-0 systemd[1]: Finished Load Kernel Modules.
Oct 11 02:02:53 compute-0 sudo[337458]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:53 compute-0 sudo[337439]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:53 compute-0 sudo[337487]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:02:53 compute-0 sudo[337487]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:02:53 compute-0 sudo[337487]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v741: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:53 compute-0 sudo[337535]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:02:53 compute-0 sudo[337535]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:02:53 compute-0 sudo[337535]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:53 compute-0 sudo[337561]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:02:53 compute-0 sudo[337561]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:02:53 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:02:53 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:02:53 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:02:53 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:02:53 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:02:53 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:02:54 compute-0 podman[337724]: 2025-10-11 02:02:54.026714398 +0000 UTC m=+0.087189649 container create befeb2b296557396d9c3ad79fa66948a9793ca5f6631f95b37c80c3b0a3d2ac2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_faraday, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2)
Oct 11 02:02:54 compute-0 podman[337724]: 2025-10-11 02:02:53.993823246 +0000 UTC m=+0.054298557 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:02:54 compute-0 systemd[1]: Started libpod-conmon-befeb2b296557396d9c3ad79fa66948a9793ca5f6631f95b37c80c3b0a3d2ac2.scope.
Oct 11 02:02:54 compute-0 sudo[337778]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kipzvodtdboufdhwspikafgutzxvhsio ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148173.4753265-950-254933534578831/AnsiballZ_setup.py'
Oct 11 02:02:54 compute-0 sudo[337778]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:54 compute-0 podman[337726]: 2025-10-11 02:02:54.109179684 +0000 UTC m=+0.132573449 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_id=edpm, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:02:54 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:02:54 compute-0 podman[337724]: 2025-10-11 02:02:54.168173022 +0000 UTC m=+0.228648273 container init befeb2b296557396d9c3ad79fa66948a9793ca5f6631f95b37c80c3b0a3d2ac2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_faraday, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0)
Oct 11 02:02:54 compute-0 podman[337724]: 2025-10-11 02:02:54.180882469 +0000 UTC m=+0.241357720 container start befeb2b296557396d9c3ad79fa66948a9793ca5f6631f95b37c80c3b0a3d2ac2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_faraday, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3)
Oct 11 02:02:54 compute-0 podman[337724]: 2025-10-11 02:02:54.18651668 +0000 UTC m=+0.246992001 container attach befeb2b296557396d9c3ad79fa66948a9793ca5f6631f95b37c80c3b0a3d2ac2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_faraday, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:02:54 compute-0 loving_faraday[337785]: 167 167
Oct 11 02:02:54 compute-0 systemd[1]: libpod-befeb2b296557396d9c3ad79fa66948a9793ca5f6631f95b37c80c3b0a3d2ac2.scope: Deactivated successfully.
Oct 11 02:02:54 compute-0 podman[337724]: 2025-10-11 02:02:54.196207643 +0000 UTC m=+0.256682924 container died befeb2b296557396d9c3ad79fa66948a9793ca5f6631f95b37c80c3b0a3d2ac2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_faraday, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0)
Oct 11 02:02:54 compute-0 systemd[1]: var-lib-containers-storage-overlay-ee5215c453ff5c73ea3653670a1717c027a6458411dddcdfaeae562c244dfc45-merged.mount: Deactivated successfully.
Oct 11 02:02:54 compute-0 podman[337724]: 2025-10-11 02:02:54.259201973 +0000 UTC m=+0.319677214 container remove befeb2b296557396d9c3ad79fa66948a9793ca5f6631f95b37c80c3b0a3d2ac2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_faraday, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 02:02:54 compute-0 systemd[1]: libpod-conmon-befeb2b296557396d9c3ad79fa66948a9793ca5f6631f95b37c80c3b0a3d2ac2.scope: Deactivated successfully.
Oct 11 02:02:54 compute-0 ceph-mon[191930]: pgmap v741: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:54 compute-0 python3.9[337787]: ansible-ansible.legacy.setup Invoked with filter=['ansible_pkg_mgr'] gather_subset=['!all'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 02:02:54 compute-0 podman[337810]: 2025-10-11 02:02:54.537602768 +0000 UTC m=+0.087409949 container create 46b867310f6165acd8b4a1501abae3dba01aee0a54e26ea29a0d7a028bb7909d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_hoover, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Oct 11 02:02:54 compute-0 podman[337810]: 2025-10-11 02:02:54.502486069 +0000 UTC m=+0.052293390 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:02:54 compute-0 systemd[1]: Started libpod-conmon-46b867310f6165acd8b4a1501abae3dba01aee0a54e26ea29a0d7a028bb7909d.scope.
Oct 11 02:02:54 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:02:54 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2823a0ddf4750f85290a5c8543b02155714242c70e07d227e9f9315fdf3736ba/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:02:54 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2823a0ddf4750f85290a5c8543b02155714242c70e07d227e9f9315fdf3736ba/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:02:54 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2823a0ddf4750f85290a5c8543b02155714242c70e07d227e9f9315fdf3736ba/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:02:54 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2823a0ddf4750f85290a5c8543b02155714242c70e07d227e9f9315fdf3736ba/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:02:54 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2823a0ddf4750f85290a5c8543b02155714242c70e07d227e9f9315fdf3736ba/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:02:54 compute-0 podman[337810]: 2025-10-11 02:02:54.719126431 +0000 UTC m=+0.268933582 container init 46b867310f6165acd8b4a1501abae3dba01aee0a54e26ea29a0d7a028bb7909d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_hoover, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:02:54 compute-0 podman[337810]: 2025-10-11 02:02:54.742463771 +0000 UTC m=+0.292270912 container start 46b867310f6165acd8b4a1501abae3dba01aee0a54e26ea29a0d7a028bb7909d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_hoover, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True)
Oct 11 02:02:54 compute-0 podman[337810]: 2025-10-11 02:02:54.748422911 +0000 UTC m=+0.298230082 container attach 46b867310f6165acd8b4a1501abae3dba01aee0a54e26ea29a0d7a028bb7909d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_hoover, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.license=GPLv2)
Oct 11 02:02:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:02:54.818 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:02:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:02:54.819 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:02:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:02:54.819 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:02:54 compute-0 sudo[337778]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:02:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v742: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:55 compute-0 sudo[337919]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-upqcddkzjprhfomsjczpqnfvywtwdfhy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148173.4753265-950-254933534578831/AnsiballZ_dnf.py'
Oct 11 02:02:55 compute-0 sudo[337919]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:02:55 compute-0 python3.9[337923]: ansible-ansible.legacy.dnf Invoked with name=['nvme-cli'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 02:02:56 compute-0 unruffled_hoover[337834]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:02:56 compute-0 unruffled_hoover[337834]: --> relative data size: 1.0
Oct 11 02:02:56 compute-0 unruffled_hoover[337834]: --> All data devices are unavailable
Oct 11 02:02:56 compute-0 systemd[1]: libpod-46b867310f6165acd8b4a1501abae3dba01aee0a54e26ea29a0d7a028bb7909d.scope: Deactivated successfully.
Oct 11 02:02:56 compute-0 systemd[1]: libpod-46b867310f6165acd8b4a1501abae3dba01aee0a54e26ea29a0d7a028bb7909d.scope: Consumed 1.335s CPU time.
Oct 11 02:02:56 compute-0 podman[337810]: 2025-10-11 02:02:56.163879864 +0000 UTC m=+1.713687055 container died 46b867310f6165acd8b4a1501abae3dba01aee0a54e26ea29a0d7a028bb7909d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_hoover, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, OSD_FLAVOR=default)
Oct 11 02:02:56 compute-0 systemd[1]: var-lib-containers-storage-overlay-2823a0ddf4750f85290a5c8543b02155714242c70e07d227e9f9315fdf3736ba-merged.mount: Deactivated successfully.
Oct 11 02:02:56 compute-0 podman[337810]: 2025-10-11 02:02:56.27517313 +0000 UTC m=+1.824980291 container remove 46b867310f6165acd8b4a1501abae3dba01aee0a54e26ea29a0d7a028bb7909d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_hoover, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0)
Oct 11 02:02:56 compute-0 systemd[1]: libpod-conmon-46b867310f6165acd8b4a1501abae3dba01aee0a54e26ea29a0d7a028bb7909d.scope: Deactivated successfully.
Oct 11 02:02:56 compute-0 sudo[337561]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:56 compute-0 ceph-mon[191930]: pgmap v742: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:56 compute-0 sudo[337951]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:02:56 compute-0 sudo[337951]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:02:56 compute-0 sudo[337951]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:02:56
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['backups', 'default.rgw.meta', 'default.rgw.log', 'cephfs.cephfs.meta', 'volumes', 'cephfs.cephfs.data', 'vms', '.mgr', '.rgw.root', 'images', 'default.rgw.control']
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:02:56 compute-0 sudo[337976]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:02:56 compute-0 sudo[337976]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:02:56 compute-0 sudo[337976]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:56 compute-0 sudo[338001]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:02:56 compute-0 sudo[338001]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:02:56 compute-0 sudo[338001]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:02:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:02:56 compute-0 sudo[338026]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:02:56 compute-0 sudo[338026]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:02:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v743: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:57 compute-0 podman[338092]: 2025-10-11 02:02:57.558114433 +0000 UTC m=+0.083414556 container create 3f9e9b59f36d37b91d45a3ab27b5ca8a4266ff9e88d39f765b05f9551b2e137e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_zhukovsky, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:02:57 compute-0 systemd[1]: Started libpod-conmon-3f9e9b59f36d37b91d45a3ab27b5ca8a4266ff9e88d39f765b05f9551b2e137e.scope.
Oct 11 02:02:57 compute-0 podman[338092]: 2025-10-11 02:02:57.532292138 +0000 UTC m=+0.057592301 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:02:57 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:02:57 compute-0 podman[338092]: 2025-10-11 02:02:57.694368225 +0000 UTC m=+0.219668378 container init 3f9e9b59f36d37b91d45a3ab27b5ca8a4266ff9e88d39f765b05f9551b2e137e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_zhukovsky, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2)
Oct 11 02:02:57 compute-0 podman[338092]: 2025-10-11 02:02:57.7202839 +0000 UTC m=+0.245584013 container start 3f9e9b59f36d37b91d45a3ab27b5ca8a4266ff9e88d39f765b05f9551b2e137e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_zhukovsky, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default)
Oct 11 02:02:57 compute-0 systemd[1]: libpod-3f9e9b59f36d37b91d45a3ab27b5ca8a4266ff9e88d39f765b05f9551b2e137e.scope: Deactivated successfully.
Oct 11 02:02:57 compute-0 infallible_zhukovsky[338108]: 167 167
Oct 11 02:02:57 compute-0 conmon[338108]: conmon 3f9e9b59f36d37b91d45 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-3f9e9b59f36d37b91d45a3ab27b5ca8a4266ff9e88d39f765b05f9551b2e137e.scope/container/memory.events
Oct 11 02:02:57 compute-0 podman[338092]: 2025-10-11 02:02:57.73151059 +0000 UTC m=+0.256810703 container attach 3f9e9b59f36d37b91d45a3ab27b5ca8a4266ff9e88d39f765b05f9551b2e137e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_zhukovsky, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:02:57 compute-0 podman[338092]: 2025-10-11 02:02:57.73185341 +0000 UTC m=+0.257153523 container died 3f9e9b59f36d37b91d45a3ab27b5ca8a4266ff9e88d39f765b05f9551b2e137e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_zhukovsky, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:02:57 compute-0 systemd[1]: var-lib-containers-storage-overlay-d16182dc235ef42d6d3034eadeb1c2535b489eece068f0d8848e81bf123cff3f-merged.mount: Deactivated successfully.
Oct 11 02:02:57 compute-0 podman[338092]: 2025-10-11 02:02:57.788518081 +0000 UTC m=+0.313818194 container remove 3f9e9b59f36d37b91d45a3ab27b5ca8a4266ff9e88d39f765b05f9551b2e137e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_zhukovsky, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:02:57 compute-0 systemd[1]: libpod-conmon-3f9e9b59f36d37b91d45a3ab27b5ca8a4266ff9e88d39f765b05f9551b2e137e.scope: Deactivated successfully.
Oct 11 02:02:58 compute-0 podman[338132]: 2025-10-11 02:02:58.042664148 +0000 UTC m=+0.079873922 container create d9092d7c790dc1cb419888209b5253b3fadc5b113e432b0696adf2f5069f53dc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_shaw, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:02:58 compute-0 podman[338132]: 2025-10-11 02:02:58.015974655 +0000 UTC m=+0.053184469 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:02:58 compute-0 systemd[1]: Started libpod-conmon-d9092d7c790dc1cb419888209b5253b3fadc5b113e432b0696adf2f5069f53dc.scope.
Oct 11 02:02:58 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:02:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/316e7e93b4a13a2b99119cd70bbedc7fc6b34eb02531962e2a9f500d5ef1c82b/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:02:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/316e7e93b4a13a2b99119cd70bbedc7fc6b34eb02531962e2a9f500d5ef1c82b/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:02:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/316e7e93b4a13a2b99119cd70bbedc7fc6b34eb02531962e2a9f500d5ef1c82b/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:02:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/316e7e93b4a13a2b99119cd70bbedc7fc6b34eb02531962e2a9f500d5ef1c82b/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:02:58 compute-0 podman[338132]: 2025-10-11 02:02:58.171905093 +0000 UTC m=+0.209114877 container init d9092d7c790dc1cb419888209b5253b3fadc5b113e432b0696adf2f5069f53dc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_shaw, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, OSD_FLAVOR=default)
Oct 11 02:02:58 compute-0 podman[338132]: 2025-10-11 02:02:58.191448649 +0000 UTC m=+0.228658453 container start d9092d7c790dc1cb419888209b5253b3fadc5b113e432b0696adf2f5069f53dc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_shaw, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef)
Oct 11 02:02:58 compute-0 podman[338132]: 2025-10-11 02:02:58.198957526 +0000 UTC m=+0.236167310 container attach d9092d7c790dc1cb419888209b5253b3fadc5b113e432b0696adf2f5069f53dc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_shaw, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:02:58 compute-0 systemd[1]: Reloading.
Oct 11 02:02:58 compute-0 ceph-mon[191930]: pgmap v743: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:58 compute-0 podman[338155]: 2025-10-11 02:02:58.515902473 +0000 UTC m=+0.165111733 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_id=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, io.buildah.version=1.41.3)
Oct 11 02:02:58 compute-0 systemd-rc-local-generator[338196]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:02:58 compute-0 systemd-sysv-generator[338202]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:02:58 compute-0 systemd[1]: Reloading.
Oct 11 02:02:58 compute-0 systemd-sysv-generator[338234]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:02:58 compute-0 systemd-rc-local-generator[338231]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]: {
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:     "0": [
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:         {
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "devices": [
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "/dev/loop3"
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             ],
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "lv_name": "ceph_lv0",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "lv_size": "21470642176",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "name": "ceph_lv0",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "tags": {
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.cluster_name": "ceph",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.crush_device_class": "",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.encrypted": "0",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.osd_id": "0",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.type": "block",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.vdo": "0"
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             },
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "type": "block",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "vg_name": "ceph_vg0"
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:         }
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:     ],
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:     "1": [
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:         {
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "devices": [
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "/dev/loop4"
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             ],
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "lv_name": "ceph_lv1",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "lv_size": "21470642176",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "name": "ceph_lv1",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "tags": {
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.cluster_name": "ceph",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.crush_device_class": "",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.encrypted": "0",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.osd_id": "1",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.type": "block",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.vdo": "0"
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             },
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "type": "block",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "vg_name": "ceph_vg1"
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:         }
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:     ],
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:     "2": [
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:         {
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "devices": [
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "/dev/loop5"
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             ],
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "lv_name": "ceph_lv2",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "lv_size": "21470642176",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "name": "ceph_lv2",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "tags": {
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.cluster_name": "ceph",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.crush_device_class": "",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.encrypted": "0",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.osd_id": "2",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.type": "block",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:                 "ceph.vdo": "0"
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             },
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "type": "block",
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:             "vg_name": "ceph_vg2"
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:         }
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]:     ]
Oct 11 02:02:59 compute-0 nostalgic_shaw[338148]: }
Oct 11 02:02:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v744: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:02:59 compute-0 systemd[1]: libpod-d9092d7c790dc1cb419888209b5253b3fadc5b113e432b0696adf2f5069f53dc.scope: Deactivated successfully.
Oct 11 02:02:59 compute-0 podman[338248]: 2025-10-11 02:02:59.25221571 +0000 UTC m=+0.058596259 container died d9092d7c790dc1cb419888209b5253b3fadc5b113e432b0696adf2f5069f53dc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_shaw, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, io.buildah.version=1.39.3, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:02:59 compute-0 systemd[1]: var-lib-containers-storage-overlay-316e7e93b4a13a2b99119cd70bbedc7fc6b34eb02531962e2a9f500d5ef1c82b-merged.mount: Deactivated successfully.
Oct 11 02:02:59 compute-0 podman[338248]: 2025-10-11 02:02:59.348408412 +0000 UTC m=+0.154788941 container remove d9092d7c790dc1cb419888209b5253b3fadc5b113e432b0696adf2f5069f53dc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_shaw, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, OSD_FLAVOR=default, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:02:59 compute-0 systemd[1]: libpod-conmon-d9092d7c790dc1cb419888209b5253b3fadc5b113e432b0696adf2f5069f53dc.scope: Deactivated successfully.
Oct 11 02:02:59 compute-0 sudo[338026]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:59 compute-0 systemd-logind[804]: Watching system buttons on /dev/input/event0 (Power Button)
Oct 11 02:02:59 compute-0 systemd-logind[804]: Watching system buttons on /dev/input/event1 (AT Translated Set 2 keyboard)
Oct 11 02:02:59 compute-0 lvm[338321]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Oct 11 02:02:59 compute-0 lvm[338321]: VG ceph_vg0 finished
Oct 11 02:02:59 compute-0 lvm[338320]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Oct 11 02:02:59 compute-0 lvm[338320]: VG ceph_vg1 finished
Oct 11 02:02:59 compute-0 lvm[338322]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Oct 11 02:02:59 compute-0 lvm[338322]: VG ceph_vg2 finished
Oct 11 02:02:59 compute-0 sudo[338298]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:02:59 compute-0 sudo[338298]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:02:59 compute-0 sudo[338298]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:59 compute-0 sudo[338343]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:02:59 compute-0 sudo[338343]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:02:59 compute-0 sudo[338343]: pam_unix(sudo:session): session closed for user root
Oct 11 02:02:59 compute-0 systemd[1]: Started /usr/bin/systemctl start man-db-cache-update.
Oct 11 02:02:59 compute-0 systemd[1]: Starting man-db-cache-update.service...
Oct 11 02:02:59 compute-0 podman[157119]: time="2025-10-11T02:02:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:02:59 compute-0 systemd[1]: Reloading.
Oct 11 02:02:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:02:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 40790 "" "Go-http-client/1.1"
Oct 11 02:02:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:02:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8106 "" "Go-http-client/1.1"
Oct 11 02:02:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:02:59 compute-0 systemd-rc-local-generator[338426]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:02:59 compute-0 systemd-sysv-generator[338431]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:03:00 compute-0 sudo[338375]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:03:00 compute-0 systemd[1]: Queuing reload/restart jobs for marked units…
Oct 11 02:03:00 compute-0 sudo[338375]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:03:00 compute-0 sudo[338375]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:00 compute-0 sudo[338618]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:03:00 compute-0 sudo[338618]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:03:00 compute-0 ceph-mon[191930]: pgmap v744: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:00 compute-0 systemd[1]: Starting PackageKit Daemon...
Oct 11 02:03:00 compute-0 PackageKit[338804]: daemon start
Oct 11 02:03:00 compute-0 systemd[1]: Started PackageKit Daemon.
Oct 11 02:03:00 compute-0 sudo[337919]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:00 compute-0 podman[339034]: 2025-10-11 02:03:00.932333781 +0000 UTC m=+0.072963584 container create e7d912d088962818e4eb4e5b928b768575752a72e943769e8f288611ea16548e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jovial_beaver, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, ceph=True)
Oct 11 02:03:00 compute-0 systemd[1]: Started libpod-conmon-e7d912d088962818e4eb4e5b928b768575752a72e943769e8f288611ea16548e.scope.
Oct 11 02:03:00 compute-0 podman[339034]: 2025-10-11 02:03:00.906584666 +0000 UTC m=+0.047214499 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:03:01 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:03:01 compute-0 podman[339034]: 2025-10-11 02:03:01.055199297 +0000 UTC m=+0.195829130 container init e7d912d088962818e4eb4e5b928b768575752a72e943769e8f288611ea16548e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jovial_beaver, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:03:01 compute-0 podman[339034]: 2025-10-11 02:03:01.07053592 +0000 UTC m=+0.211165723 container start e7d912d088962818e4eb4e5b928b768575752a72e943769e8f288611ea16548e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jovial_beaver, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:03:01 compute-0 podman[339034]: 2025-10-11 02:03:01.076918609 +0000 UTC m=+0.217548452 container attach e7d912d088962818e4eb4e5b928b768575752a72e943769e8f288611ea16548e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jovial_beaver, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True)
Oct 11 02:03:01 compute-0 jovial_beaver[339172]: 167 167
Oct 11 02:03:01 compute-0 systemd[1]: libpod-e7d912d088962818e4eb4e5b928b768575752a72e943769e8f288611ea16548e.scope: Deactivated successfully.
Oct 11 02:03:01 compute-0 podman[339034]: 2025-10-11 02:03:01.083452897 +0000 UTC m=+0.224082700 container died e7d912d088962818e4eb4e5b928b768575752a72e943769e8f288611ea16548e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jovial_beaver, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:03:01 compute-0 systemd[1]: var-lib-containers-storage-overlay-4a9eb889275274dd8109c4049f7203ba8e46a00e9383f1b2c79414c1838daf37-merged.mount: Deactivated successfully.
Oct 11 02:03:01 compute-0 podman[339034]: 2025-10-11 02:03:01.148814774 +0000 UTC m=+0.289444577 container remove e7d912d088962818e4eb4e5b928b768575752a72e943769e8f288611ea16548e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jovial_beaver, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Oct 11 02:03:01 compute-0 systemd[1]: libpod-conmon-e7d912d088962818e4eb4e5b928b768575752a72e943769e8f288611ea16548e.scope: Deactivated successfully.
Oct 11 02:03:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v745: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:01 compute-0 podman[339439]: 2025-10-11 02:03:01.369633539 +0000 UTC m=+0.068019983 container create e472d985c3641c55a9aad03c3857ffb010cc001c3d927f09fc2ca6c57e4734a1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_goldberg, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:03:01 compute-0 openstack_network_exporter[159265]: ERROR   02:03:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:03:01 compute-0 openstack_network_exporter[159265]: ERROR   02:03:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:03:01 compute-0 openstack_network_exporter[159265]: ERROR   02:03:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:03:01 compute-0 openstack_network_exporter[159265]: ERROR   02:03:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:03:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:03:01 compute-0 openstack_network_exporter[159265]: ERROR   02:03:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:03:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:03:01 compute-0 podman[339439]: 2025-10-11 02:03:01.342003327 +0000 UTC m=+0.040389801 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:03:01 compute-0 systemd[1]: Started libpod-conmon-e472d985c3641c55a9aad03c3857ffb010cc001c3d927f09fc2ca6c57e4734a1.scope.
Oct 11 02:03:01 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:03:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/72cbbe0ccc671b1a0f181d79d568f1ae84e6bfc0593f3771962ba978bb39c440/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:03:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/72cbbe0ccc671b1a0f181d79d568f1ae84e6bfc0593f3771962ba978bb39c440/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:03:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/72cbbe0ccc671b1a0f181d79d568f1ae84e6bfc0593f3771962ba978bb39c440/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:03:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/72cbbe0ccc671b1a0f181d79d568f1ae84e6bfc0593f3771962ba978bb39c440/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:03:01 compute-0 podman[339439]: 2025-10-11 02:03:01.522537842 +0000 UTC m=+0.220924286 container init e472d985c3641c55a9aad03c3857ffb010cc001c3d927f09fc2ca6c57e4734a1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_goldberg, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS)
Oct 11 02:03:01 compute-0 podman[339439]: 2025-10-11 02:03:01.538342645 +0000 UTC m=+0.236729079 container start e472d985c3641c55a9aad03c3857ffb010cc001c3d927f09fc2ca6c57e4734a1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_goldberg, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:03:01 compute-0 podman[339439]: 2025-10-11 02:03:01.542438927 +0000 UTC m=+0.240825391 container attach e472d985c3641c55a9aad03c3857ffb010cc001c3d927f09fc2ca6c57e4734a1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_goldberg, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:03:02 compute-0 systemd[1]: man-db-cache-update.service: Deactivated successfully.
Oct 11 02:03:02 compute-0 systemd[1]: Finished man-db-cache-update.service.
Oct 11 02:03:02 compute-0 systemd[1]: man-db-cache-update.service: Consumed 2.606s CPU time.
Oct 11 02:03:02 compute-0 systemd[1]: run-rde7613c691ad4772bdbc0cd25ead0f3f.service: Deactivated successfully.
Oct 11 02:03:02 compute-0 sudo[339857]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lvfjbatkigtpqjwagimkciwqdxitbfzv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148181.1431549-962-179422733191104/AnsiballZ_file.py'
Oct 11 02:03:02 compute-0 sudo[339857]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:02 compute-0 ceph-mon[191930]: pgmap v745: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:02 compute-0 python3.9[339862]: ansible-ansible.builtin.file Invoked with mode=0600 path=/etc/iscsi/.iscsid_restart_required state=touch recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]: {
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:         "osd_id": 1,
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:         "type": "bluestore"
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:     },
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:         "osd_id": 2,
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:         "type": "bluestore"
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:     },
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:         "osd_id": 0,
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:         "type": "bluestore"
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]:     }
Oct 11 02:03:02 compute-0 quizzical_goldberg[339555]: }
Oct 11 02:03:02 compute-0 sudo[339857]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:02 compute-0 systemd[1]: libpod-e472d985c3641c55a9aad03c3857ffb010cc001c3d927f09fc2ca6c57e4734a1.scope: Deactivated successfully.
Oct 11 02:03:02 compute-0 systemd[1]: libpod-e472d985c3641c55a9aad03c3857ffb010cc001c3d927f09fc2ca6c57e4734a1.scope: Consumed 1.120s CPU time.
Oct 11 02:03:02 compute-0 podman[339439]: 2025-10-11 02:03:02.668644185 +0000 UTC m=+1.367030659 container died e472d985c3641c55a9aad03c3857ffb010cc001c3d927f09fc2ca6c57e4734a1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_goldberg, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=reef)
Oct 11 02:03:02 compute-0 systemd[1]: var-lib-containers-storage-overlay-72cbbe0ccc671b1a0f181d79d568f1ae84e6bfc0593f3771962ba978bb39c440-merged.mount: Deactivated successfully.
Oct 11 02:03:02 compute-0 podman[339439]: 2025-10-11 02:03:02.743938134 +0000 UTC m=+1.442324568 container remove e472d985c3641c55a9aad03c3857ffb010cc001c3d927f09fc2ca6c57e4734a1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_goldberg, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:03:02 compute-0 systemd[1]: libpod-conmon-e472d985c3641c55a9aad03c3857ffb010cc001c3d927f09fc2ca6c57e4734a1.scope: Deactivated successfully.
Oct 11 02:03:02 compute-0 sudo[338618]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:03:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:03:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:03:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:03:02 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev bd6bec1a-d686-4d86-910a-19fa13c08d13 does not exist
Oct 11 02:03:02 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev a07e374c-7cf4-496d-abcc-6efc454a0113 does not exist
Oct 11 02:03:02 compute-0 sudo[339911]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:03:02 compute-0 sudo[339911]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:03:02 compute-0 sudo[339911]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:03 compute-0 sudo[339959]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:03:03 compute-0 sudo[339959]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:03:03 compute-0 sudo[339959]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v746: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:03 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:03:03 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:03:03 compute-0 ceph-mon[191930]: pgmap v746: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:03 compute-0 python3.9[340086]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 02:03:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:03:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v747: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:05 compute-0 sudo[340285]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nqjtvoxxxyyapgpbxcxbmpldwsjvdjdj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148184.6417265-980-81819113475858/AnsiballZ_file.py'
Oct 11 02:03:05 compute-0 podman[340214]: 2025-10-11 02:03:05.217434667 +0000 UTC m=+0.120759354 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:03:05 compute-0 sudo[340285]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:05 compute-0 podman[340216]: 2025-10-11 02:03:05.258575341 +0000 UTC m=+0.152179848 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., release=1214.1726694543, distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.29.0, io.k8s.display-name=Red Hat Universal Base Image 9, build-date=2024-09-18T21:23:30, com.redhat.component=ubi9-container, vcs-type=git, config_id=edpm, vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, release-0.7.12=, version=9.4, summary=Provides the latest release of Red Hat Universal Base Image 9., container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, managed_by=edpm_ansible, name=ubi9, io.openshift.expose-services=)
Oct 11 02:03:05 compute-0 podman[340215]: 2025-10-11 02:03:05.262193228 +0000 UTC m=+0.165829758 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, container_name=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller)
Oct 11 02:03:05 compute-0 python3.9[340305]: ansible-ansible.builtin.file Invoked with mode=0644 path=/etc/ssh/ssh_known_hosts state=touch recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:03:05 compute-0 sudo[340285]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:06 compute-0 ceph-mon[191930]: pgmap v747: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:03:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:03:07 compute-0 sudo[340471]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rtyhmqdbfoqvexdkxhqlztbpyjacxudc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148186.0392678-991-226757691490121/AnsiballZ_systemd_service.py'
Oct 11 02:03:07 compute-0 sudo[340471]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:07 compute-0 podman[340432]: 2025-10-11 02:03:07.031824744 +0000 UTC m=+0.145332697 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, config_id=edpm, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:03:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v748: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:07 compute-0 python3.9[340478]: ansible-ansible.builtin.systemd_service Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 02:03:07 compute-0 systemd[1]: Reloading.
Oct 11 02:03:07 compute-0 systemd-rc-local-generator[340506]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:03:07 compute-0 systemd-sysv-generator[340510]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.942 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.943 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.943 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.944 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f8ed27f97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb8c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb1a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb200>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed2874260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.946 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.950 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.951 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed3ab42f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.951 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.952 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb350>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.952 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb90>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.952 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fa390>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.952 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb3b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.953 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbbf0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.953 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbc80>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.953 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.953 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.953 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.954 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27f9610>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.954 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb620>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.954 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbe30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.954 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbec0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.954 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbf50>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f8ed27fbad0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.955 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.955 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f8ed27faff0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.956 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.956 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f8ed27fb110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.956 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.956 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f8ed27fb170>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.956 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.957 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f8ed27fb1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.957 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.957 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f8ed27fb230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.957 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.957 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f8ed2874230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.958 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f8ed27fb290>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.958 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f8ed5778d70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.959 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f8ed27fb650>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.959 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.959 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f8ed27fbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.959 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.959 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f8ed27fb320>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.960 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f8ed27fbb60>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.960 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f8ed27fa3f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.960 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.961 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f8ed27fb380>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.961 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f8ed27fbbc0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.961 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f8ed27fbc50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.962 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f8ed27fbce0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.962 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.962 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f8ed27fbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.962 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.962 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f8ed27fb590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.963 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.963 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f8ed27f95e0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.963 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.963 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f8ed27fb5f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.963 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.964 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f8ed27fbe00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.964 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.964 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f8ed27fbe90>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.964 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.964 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f8ed27fbf20>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.964 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.965 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.966 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.970 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:03:07.970 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:03:07 compute-0 sudo[340471]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:08 compute-0 ceph-mon[191930]: pgmap v748: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:09 compute-0 python3.9[340664]: ansible-ansible.builtin.service_facts Invoked
Oct 11 02:03:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v749: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:09 compute-0 network[340681]: You are using 'network' service provided by 'network-scripts', which are now deprecated.
Oct 11 02:03:09 compute-0 network[340682]: 'network-scripts' will be removed from distribution in near future.
Oct 11 02:03:09 compute-0 network[340683]: It is advised to switch to 'NetworkManager' instead for network management.
Oct 11 02:03:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:03:10 compute-0 ceph-mon[191930]: pgmap v749: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v750: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:03:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Cumulative writes: 5639 writes, 23K keys, 5639 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.02 MB/s
                                            Cumulative WAL: 5639 writes, 885 syncs, 6.37 writes per sync, written: 0.02 GB, 0.02 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 212 writes, 318 keys, 212 commit groups, 1.0 writes per commit group, ingest: 0.11 MB, 0.00 MB/s
                                            Interval WAL: 212 writes, 106 syncs, 2.00 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
                                            
                                            ** Compaction Stats [default] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      2/0    2.63 KB   0.2      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                             Sum      2/0    2.63 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [default] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 4e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [default] **
                                            
                                            ** Compaction Stats [m-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 4e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-0] **
                                            
                                            ** Compaction Stats [m-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 4e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-1] **
                                            
                                            ** Compaction Stats [m-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 4e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-2] **
                                            
                                            ** Compaction Stats [p-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      1/0    1.56 KB   0.1      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.01              0.00         1    0.006       0      0       0.0       0.0
                                             Sum      1/0    1.56 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.01              0.00         1    0.006       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.3      0.01              0.00         1    0.006       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 4e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-0] **
                                            
                                            ** Compaction Stats [p-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 4e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-1] **
                                            
                                            ** Compaction Stats [p-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 4e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-2] **
                                            
                                            ** Compaction Stats [O-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4430#2 capacity: 224.00 MB usage: 0.45 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 2 last_secs: 8e-06 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1,0.20 KB,8.85555e-05%) FilterBlock(1,0.11 KB,4.76837e-05%) IndexBlock(1,0.14 KB,6.13076e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-0] **
                                            
                                            ** Compaction Stats [O-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4430#2 capacity: 224.00 MB usage: 0.45 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 2 last_secs: 8e-06 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1,0.20 KB,8.85555e-05%) FilterBlock(1,0.11 KB,4.76837e-05%) IndexBlock(1,0.14 KB,6.13076e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-1] **
                                            
                                            ** Compaction Stats [O-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      1/0    1.25 KB   0.1      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.00              0.00         1    0.005       0      0       0.0       0.0
                                             Sum      1/0    1.25 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.00              0.00         1    0.005       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.2      0.00              0.00         1    0.005       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4430#2 capacity: 224.00 MB usage: 0.45 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 2 last_secs: 8e-06 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1,0.20 KB,8.85555e-05%) FilterBlock(1,0.11 KB,4.76837e-05%) IndexBlock(1,0.14 KB,6.13076e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-2] **
                                            
                                            ** Compaction Stats [L] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [L] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 4e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [L] **
                                            
                                            ** Compaction Stats [P] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [P] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5602580c4dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 4e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [P] **
Oct 11 02:03:12 compute-0 ceph-mon[191930]: pgmap v750: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:13 compute-0 podman[340759]: 2025-10-11 02:03:13.076122572 +0000 UTC m=+0.150396119 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, container_name=iscsid, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, tcib_managed=true, io.buildah.version=1.41.3)
Oct 11 02:03:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v751: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:13 compute-0 podman[340796]: 2025-10-11 02:03:13.695792041 +0000 UTC m=+0.146327718 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=multipathd, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:03:14 compute-0 ceph-mon[191930]: pgmap v751: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:03:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v752: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:15 compute-0 sudo[340999]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jjtzeajbqtcgmrlhoyomtngldzfymxft ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148195.30601-1010-104831358927075/AnsiballZ_systemd_service.py'
Oct 11 02:03:15 compute-0 sudo[340999]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:16 compute-0 python3.9[341001]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_compute.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:03:16 compute-0 sudo[340999]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:16 compute-0 ceph-mon[191930]: pgmap v752: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v753: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:17 compute-0 sudo[341152]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hbeqfotbbxbsjobdxvvfmfhlcammvsvl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148196.618065-1010-158583141669691/AnsiballZ_systemd_service.py'
Oct 11 02:03:17 compute-0 sudo[341152]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:17 compute-0 python3.9[341154]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_migration_target.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:03:17 compute-0 sudo[341152]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:18 compute-0 ceph-mon[191930]: pgmap v753: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:18 compute-0 sudo[341305]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-veausexvxenjkmywynhdhshjokhmwknj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148197.9255097-1010-76490361860048/AnsiballZ_systemd_service.py'
Oct 11 02:03:18 compute-0 sudo[341305]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:03:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Cumulative writes: 6947 writes, 28K keys, 6947 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.02 MB/s
                                            Cumulative WAL: 6947 writes, 1249 syncs, 5.56 writes per sync, written: 0.02 GB, 0.02 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 180 writes, 271 keys, 180 commit groups, 1.0 writes per commit group, ingest: 0.09 MB, 0.00 MB/s
                                            Interval WAL: 180 writes, 90 syncs, 2.00 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
                                            
                                            ** Compaction Stats [default] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      2/0    2.63 KB   0.2      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.01              0.00         1    0.005       0      0       0.0       0.0
                                             Sum      2/0    2.63 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.01              0.00         1    0.005       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [default] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.2      0.01              0.00         1    0.005       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 5.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [default] **
                                            
                                            ** Compaction Stats [m-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 5.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-0] **
                                            
                                            ** Compaction Stats [m-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 5.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-1] **
                                            
                                            ** Compaction Stats [m-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 5.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-2] **
                                            
                                            ** Compaction Stats [p-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      1/0    1.56 KB   0.1      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.00              0.00         1    0.005       0      0       0.0       0.0
                                             Sum      1/0    1.56 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.00              0.00         1    0.005       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.3      0.00              0.00         1    0.005       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 5.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-0] **
                                            
                                            ** Compaction Stats [p-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 5.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-1] **
                                            
                                            ** Compaction Stats [p-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 5.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-2] **
                                            
                                            ** Compaction Stats [O-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16430#2 capacity: 224.00 MB usage: 0.45 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 2 last_secs: 1.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1,0.20 KB,8.85555e-05%) FilterBlock(1,0.11 KB,4.76837e-05%) IndexBlock(1,0.14 KB,6.13076e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-0] **
                                            
                                            ** Compaction Stats [O-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16430#2 capacity: 224.00 MB usage: 0.45 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 2 last_secs: 1.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1,0.20 KB,8.85555e-05%) FilterBlock(1,0.11 KB,4.76837e-05%) IndexBlock(1,0.14 KB,6.13076e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-1] **
                                            
                                            ** Compaction Stats [O-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      1/0    1.25 KB   0.1      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.00              0.00         1    0.004       0      0       0.0       0.0
                                             Sum      1/0    1.25 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.3      0.00              0.00         1    0.004       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.3      0.00              0.00         1    0.004       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16430#2 capacity: 224.00 MB usage: 0.45 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 2 last_secs: 1.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1,0.20 KB,8.85555e-05%) FilterBlock(1,0.11 KB,4.76837e-05%) IndexBlock(1,0.14 KB,6.13076e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-2] **
                                            
                                            ** Compaction Stats [L] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [L] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.002       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 5.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [L] **
                                            
                                            ** Compaction Stats [P] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [P] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.1 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x559c9de16dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 5.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [P] **
Oct 11 02:03:18 compute-0 python3.9[341307]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_api_cron.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:03:18 compute-0 sudo[341305]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v754: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:19 compute-0 sudo[341459]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mvmrutqzveglzfquhenxgafsefrodosy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148199.193294-1010-84814144782607/AnsiballZ_systemd_service.py'
Oct 11 02:03:19 compute-0 sudo[341459]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:03:20 compute-0 python3.9[341461]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_api.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:03:20 compute-0 sudo[341459]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:20 compute-0 ceph-mon[191930]: pgmap v754: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:21 compute-0 sudo[341642]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iwappikpjtwyvmejddrkjovcltsizqpi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148200.3970838-1010-140415136185039/AnsiballZ_systemd_service.py'
Oct 11 02:03:21 compute-0 sudo[341642]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:21 compute-0 podman[341587]: 2025-10-11 02:03:21.026031726 +0000 UTC m=+0.133363494 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.expose-services=, architecture=x86_64, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, url=https://catalog.redhat.com/en/search?searchType=containers, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, vendor=Red Hat, Inc., vcs-type=git, managed_by=edpm_ansible, release=1755695350, maintainer=Red Hat, Inc., io.openshift.tags=minimal rhel9, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, container_name=openstack_network_exporter, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_id=edpm, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, version=9.6, com.redhat.component=ubi9-minimal-container, name=ubi9-minimal, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, io.buildah.version=1.33.7)
Oct 11 02:03:21 compute-0 podman[341586]: 2025-10-11 02:03:21.026126677 +0000 UTC m=+0.136007681 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:03:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v755: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:21 compute-0 python3.9[341655]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_conductor.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:03:21 compute-0 sudo[341642]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:22 compute-0 sudo[341806]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fadvlrsijkctenbgqhcxgzogcjyoqdhl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148201.6625006-1010-206845922726674/AnsiballZ_systemd_service.py'
Oct 11 02:03:22 compute-0 sudo[341806]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:22 compute-0 ceph-mon[191930]: pgmap v755: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:22 compute-0 python3.9[341808]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_metadata.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:03:22 compute-0 sudo[341806]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v756: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:23 compute-0 sudo[341959]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-leuykufsvtirzifxnwubspyrurnemnhe ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148202.942417-1010-122243417358898/AnsiballZ_systemd_service.py'
Oct 11 02:03:23 compute-0 sudo[341959]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:23 compute-0 python3.9[341961]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_scheduler.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:03:23 compute-0 sudo[341959]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:24 compute-0 ceph-mon[191930]: pgmap v756: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:03:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v757: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:25 compute-0 podman[342086]: 2025-10-11 02:03:25.215467471 +0000 UTC m=+0.117153267 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_id=edpm)
Oct 11 02:03:25 compute-0 sudo[342126]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-emfqhrwrbmidshralewzuwrfnqirvwek ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148204.5935626-1010-4905159966327/AnsiballZ_systemd_service.py'
Oct 11 02:03:25 compute-0 sudo[342126]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:25 compute-0 python3.9[342131]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_nova_vnc_proxy.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:03:25 compute-0 sudo[342126]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:26 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:03:26 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 1200.2 total, 600.0 interval
                                            Cumulative writes: 5728 writes, 24K keys, 5728 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.02 MB/s
                                            Cumulative WAL: 5728 writes, 920 syncs, 6.23 writes per sync, written: 0.02 GB, 0.02 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 180 writes, 270 keys, 180 commit groups, 1.0 writes per commit group, ingest: 0.09 MB, 0.00 MB/s
                                            Interval WAL: 180 writes, 90 syncs, 2.00 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
                                            
                                            ** Compaction Stats [default] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      2/0    2.63 KB   0.2      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.014       0      0       0.0       0.0
                                             Sum      2/0    2.63 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.014       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [default] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.1      0.01              0.00         1    0.014       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 6.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [default] **
                                            
                                            ** Compaction Stats [m-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 6.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-0] **
                                            
                                            ** Compaction Stats [m-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 6.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-1] **
                                            
                                            ** Compaction Stats [m-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [m-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 6.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [m-2] **
                                            
                                            ** Compaction Stats [p-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      1/0    1.56 KB   0.1      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.01              0.00         1    0.009       0      0       0.0       0.0
                                             Sum      1/0    1.56 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.2      0.01              0.00         1    0.009       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.2      0.01              0.00         1    0.009       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 6.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-0] **
                                            
                                            ** Compaction Stats [p-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 6.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-1] **
                                            
                                            ** Compaction Stats [p-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [p-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 6.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [p-2] **
                                            
                                            ** Compaction Stats [O-0] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-0] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856430#2 capacity: 224.00 MB usage: 0.45 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 2 last_secs: 0.000115 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1,0.20 KB,8.85555e-05%) FilterBlock(1,0.11 KB,4.76837e-05%) IndexBlock(1,0.14 KB,6.13076e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-0] **
                                            
                                            ** Compaction Stats [O-1] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-1] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856430#2 capacity: 224.00 MB usage: 0.45 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 2 last_secs: 0.000115 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1,0.20 KB,8.85555e-05%) FilterBlock(1,0.11 KB,4.76837e-05%) IndexBlock(1,0.14 KB,6.13076e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-1] **
                                            
                                            ** Compaction Stats [O-2] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      1/0    1.25 KB   0.1      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                             Sum      1/0    1.25 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [O-2] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.1      0.01              0.00         1    0.011       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856430#2 capacity: 224.00 MB usage: 0.45 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 2 last_secs: 0.000115 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1,0.20 KB,8.85555e-05%) FilterBlock(1,0.11 KB,4.76837e-05%) IndexBlock(1,0.14 KB,6.13076e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [O-2] **
                                            
                                            ** Compaction Stats [L] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.005       0      0       0.0       0.0
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.005       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [L] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         1    0.005       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 6.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [L] **
                                            
                                            ** Compaction Stats [P] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Sum      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0      0.0      0.00              0.00         0    0.000       0      0       0.0       0.0
                                            
                                            ** Compaction Stats [P] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1200.2 total, 600.0 interval
                                            Flush(GB): cumulative 0.000, interval 0.000
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x5626b1856dd0#2 capacity: 1.12 GB usage: 2.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 3 last_copies: 8 last_secs: 6.1e-05 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(3,1.42 KB,0.000120534%) FilterBlock(3,0.33 KB,2.78155e-05%) IndexBlock(3,0.34 KB,2.914e-05%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [P] **
Oct 11 02:03:26 compute-0 ceph-mgr[192233]: [devicehealth INFO root] Check health
Oct 11 02:03:26 compute-0 ceph-mon[191930]: pgmap v757: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:03:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:03:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:03:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:03:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:03:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:03:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v758: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:27 compute-0 sudo[342282]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rolvobjkmyyoernxcehqcmzoynqnqrtp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148207.0706015-1069-32129193400213/AnsiballZ_file.py'
Oct 11 02:03:27 compute-0 sudo[342282]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:27 compute-0 python3.9[342284]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_compute.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:03:27 compute-0 sudo[342282]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:28 compute-0 ceph-mon[191930]: pgmap v758: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:28 compute-0 sudo[342434]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fyozdksgugvpalbciinwttfqfmmoueoo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148208.148844-1069-208221351365985/AnsiballZ_file.py'
Oct 11 02:03:28 compute-0 sudo[342434]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:28 compute-0 python3.9[342436]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_migration_target.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:03:28 compute-0 sudo[342434]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v759: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:29 compute-0 podman[342461]: 2025-10-11 02:03:29.255961542 +0000 UTC m=+0.145182636 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_metadata_agent, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.build-date=20251009, config_id=ovn_metadata_agent, io.buildah.version=1.41.3)
Oct 11 02:03:29 compute-0 podman[157119]: time="2025-10-11T02:03:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:03:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:03:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 40789 "" "Go-http-client/1.1"
Oct 11 02:03:29 compute-0 sudo[342605]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ukzduwaraohwxklutuyhpiiboaxccpzy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148209.1970913-1069-20607915882400/AnsiballZ_file.py'
Oct 11 02:03:29 compute-0 sudo[342605]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:03:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8108 "" "Go-http-client/1.1"
Oct 11 02:03:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:03:30 compute-0 python3.9[342607]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_api_cron.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:03:30 compute-0 sudo[342605]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:30 compute-0 ceph-mon[191930]: pgmap v759: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:31 compute-0 sudo[342757]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nobsnzfmvwsiranlgokdyjiocqzpisnm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148210.4215777-1069-40609457923464/AnsiballZ_file.py'
Oct 11 02:03:31 compute-0 sudo[342757]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v760: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:31 compute-0 python3.9[342759]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_api.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:03:31 compute-0 sudo[342757]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:31 compute-0 openstack_network_exporter[159265]: ERROR   02:03:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:03:31 compute-0 openstack_network_exporter[159265]: ERROR   02:03:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:03:31 compute-0 openstack_network_exporter[159265]: ERROR   02:03:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:03:31 compute-0 openstack_network_exporter[159265]: ERROR   02:03:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:03:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:03:31 compute-0 openstack_network_exporter[159265]: ERROR   02:03:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:03:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #36. Immutable memtables: 0.
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:03:31.464556) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 15] Flushing memtable with next log file: 36
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148211464636, "job": 15, "event": "flush_started", "num_memtables": 1, "num_entries": 1585, "num_deletes": 251, "total_data_size": 2633730, "memory_usage": 2663600, "flush_reason": "Manual Compaction"}
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 15] Level-0 flush table #37: started
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148211478752, "cf_name": "default", "job": 15, "event": "table_file_creation", "file_number": 37, "file_size": 2587989, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 14738, "largest_seqno": 16322, "table_properties": {"data_size": 2580560, "index_size": 4436, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1925, "raw_key_size": 14616, "raw_average_key_size": 19, "raw_value_size": 2565894, "raw_average_value_size": 3439, "num_data_blocks": 203, "num_entries": 746, "num_filter_entries": 746, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760148035, "oldest_key_time": 1760148035, "file_creation_time": 1760148211, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 37, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 15] Flush lasted 14238 microseconds, and 7330 cpu microseconds.
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:03:31.478816) [db/flush_job.cc:967] [default] [JOB 15] Level-0 flush table #37: 2587989 bytes OK
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:03:31.478836) [db/memtable_list.cc:519] [default] Level-0 commit table #37 started
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:03:31.481138) [db/memtable_list.cc:722] [default] Level-0 commit table #37: memtable #1 done
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:03:31.481153) EVENT_LOG_v1 {"time_micros": 1760148211481148, "job": 15, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:03:31.481173) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 15] Try to delete WAL files size 2626896, prev total WAL file size 2626896, number of live WAL files 2.
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000033.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:03:31.482324) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F730031303034' seq:72057594037927935, type:22 .. '7061786F730031323536' seq:0, type:0; will stop at (end)
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 16] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 15 Base level 0, inputs: [37(2527KB)], [35(6740KB)]
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148211482430, "job": 16, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [37], "files_L6": [35], "score": -1, "input_data_size": 9490554, "oldest_snapshot_seqno": -1}
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 16] Generated table #38: 3982 keys, 7725029 bytes, temperature: kUnknown
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148211532717, "cf_name": "default", "job": 16, "event": "table_file_creation", "file_number": 38, "file_size": 7725029, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 7696127, "index_size": 17848, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 9989, "raw_key_size": 97256, "raw_average_key_size": 24, "raw_value_size": 7621685, "raw_average_value_size": 1914, "num_data_blocks": 757, "num_entries": 3982, "num_filter_entries": 3982, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760148211, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 38, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:03:31.532981) [db/compaction/compaction_job.cc:1663] [default] [JOB 16] Compacted 1@0 + 1@6 files to L6 => 7725029 bytes
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:03:31.534746) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 188.5 rd, 153.4 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(2.5, 6.6 +0.0 blob) out(7.4 +0.0 blob), read-write-amplify(6.7) write-amplify(3.0) OK, records in: 4496, records dropped: 514 output_compression: NoCompression
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:03:31.534764) EVENT_LOG_v1 {"time_micros": 1760148211534754, "job": 16, "event": "compaction_finished", "compaction_time_micros": 50352, "compaction_time_cpu_micros": 35208, "output_level": 6, "num_output_files": 1, "total_output_size": 7725029, "num_input_records": 4496, "num_output_records": 3982, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000037.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148211535501, "job": 16, "event": "table_file_deletion", "file_number": 37}
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000035.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148211537540, "job": 16, "event": "table_file_deletion", "file_number": 35}
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:03:31.482045) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:03:31.537752) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:03:31.537759) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:03:31.537762) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:03:31.537764) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:03:31 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:03:31.537766) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:03:32 compute-0 sudo[342909]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-upqknrjdmldlkfozhebrczfciougjujb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148211.505967-1069-239410891961473/AnsiballZ_file.py'
Oct 11 02:03:32 compute-0 sudo[342909]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:32 compute-0 python3.9[342911]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_conductor.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:03:32 compute-0 sudo[342909]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:32 compute-0 ceph-mon[191930]: pgmap v760: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:32 compute-0 sudo[343061]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-acuujefcwnstowgecuesooekxyorpswx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148212.5368233-1069-110988525359470/AnsiballZ_file.py'
Oct 11 02:03:32 compute-0 sudo[343061]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v761: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:33 compute-0 python3.9[343063]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_metadata.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:03:33 compute-0 sudo[343061]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:34 compute-0 sudo[343213]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-epwhkefebwmzgonbrckjkcsuqndnkwkv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148213.4919975-1069-263826029576336/AnsiballZ_file.py'
Oct 11 02:03:34 compute-0 sudo[343213]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:34 compute-0 python3.9[343215]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_scheduler.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:03:34 compute-0 sudo[343213]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:34 compute-0 ceph-mon[191930]: pgmap v761: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:03:35 compute-0 sudo[343365]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bowkrypwlzqwvvzcefppjibqmxwwqyit ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148214.6413622-1069-212284512362587/AnsiballZ_file.py'
Oct 11 02:03:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v762: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:35 compute-0 sudo[343365]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:35 compute-0 python3.9[343367]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_nova_vnc_proxy.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:03:35 compute-0 sudo[343365]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:36 compute-0 podman[343481]: 2025-10-11 02:03:36.212746052 +0000 UTC m=+0.098018010 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, container_name=kepler, io.openshift.expose-services=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=base rhel9, io.k8s.display-name=Red Hat Universal Base Image 9, release-0.7.12=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., maintainer=Red Hat, Inc., managed_by=edpm_ansible, config_id=edpm, com.redhat.component=ubi9-container, io.buildah.version=1.29.0, vcs-type=git, version=9.4, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of Red Hat Universal Base Image 9., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, distribution-scope=public, release=1214.1726694543, name=ubi9, architecture=x86_64, build-date=2024-09-18T21:23:30, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:03:36 compute-0 podman[343471]: 2025-10-11 02:03:36.23387566 +0000 UTC m=+0.127158420 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:03:36 compute-0 podman[343476]: 2025-10-11 02:03:36.24555227 +0000 UTC m=+0.136656198 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.schema-version=1.0)
Oct 11 02:03:36 compute-0 sudo[343579]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gotcmwudfolvkpvscflinjwuwwhynjpq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148215.7776334-1126-124379111378610/AnsiballZ_file.py'
Oct 11 02:03:36 compute-0 sudo[343579]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:36 compute-0 python3.9[343581]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_compute.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:03:36 compute-0 sudo[343579]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:36 compute-0 ceph-mon[191930]: pgmap v762: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v763: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:37 compute-0 podman[343685]: 2025-10-11 02:03:37.268669532 +0000 UTC m=+0.157670324 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, config_id=edpm, container_name=ceilometer_agent_compute, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 02:03:37 compute-0 sudo[343751]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gomlmgwmeztifknbgrgrzuqlrhfvcyrp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148216.7761388-1126-202065564192012/AnsiballZ_file.py'
Oct 11 02:03:37 compute-0 sudo[343751]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:37 compute-0 python3.9[343753]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_migration_target.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:03:37 compute-0 sudo[343751]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:38 compute-0 sudo[343903]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gbhacyfkibtinpabhfrrwhkqbbwypdre ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148217.8469524-1126-230236254565494/AnsiballZ_file.py'
Oct 11 02:03:38 compute-0 sudo[343903]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:38 compute-0 ceph-mon[191930]: pgmap v763: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:38 compute-0 python3.9[343905]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_api_cron.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:03:38 compute-0 sudo[343903]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v764: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:39 compute-0 sudo[344055]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ffrvqjlffqyqlmfoyvfaevfckeyrgels ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148218.9908252-1126-122270747588025/AnsiballZ_file.py'
Oct 11 02:03:39 compute-0 sudo[344055]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:39 compute-0 python3.9[344057]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_api.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:03:39 compute-0 sudo[344055]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:03:40 compute-0 ceph-mon[191930]: pgmap v764: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:40 compute-0 sudo[344207]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ymryoegrsasazkxmfanbkjknygkirmif ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148219.9955792-1126-11051009210146/AnsiballZ_file.py'
Oct 11 02:03:40 compute-0 sudo[344207]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:40 compute-0 python3.9[344209]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_conductor.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:03:40 compute-0 sudo[344207]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v765: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:41 compute-0 sudo[344359]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wxnyqdncvwidvualywvapzyzdlcifpkb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148221.0625732-1126-101388826458368/AnsiballZ_file.py'
Oct 11 02:03:41 compute-0 sudo[344359]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:41 compute-0 python3.9[344361]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_metadata.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:03:41 compute-0 sudo[344359]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:42 compute-0 auditd[699]: Audit daemon rotating log files
Oct 11 02:03:42 compute-0 ceph-mon[191930]: pgmap v765: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:42 compute-0 sudo[344511]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cgygvaldmqacgbyiuatkifudhsqrxqhv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148222.1097488-1126-243954129337105/AnsiballZ_file.py'
Oct 11 02:03:42 compute-0 sudo[344511]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:42 compute-0 python3.9[344513]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_scheduler.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:03:42 compute-0 sudo[344511]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v766: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:43 compute-0 sudo[344676]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-elktvohwgphnofeykgggnmcfnksqzfkw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148223.136021-1126-113942915475769/AnsiballZ_file.py'
Oct 11 02:03:43 compute-0 sudo[344676]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:43 compute-0 podman[344637]: 2025-10-11 02:03:43.720118191 +0000 UTC m=+0.137273674 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, container_name=iscsid, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:03:43 compute-0 podman[344686]: 2025-10-11 02:03:43.874101236 +0000 UTC m=+0.115761582 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_id=multipathd, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, container_name=multipathd, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:03:43 compute-0 python3.9[344684]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_nova_vnc_proxy.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:03:43 compute-0 sudo[344676]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:44 compute-0 ceph-mon[191930]: pgmap v766: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:03:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v767: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:45 compute-0 sudo[344853]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-soltclitsfkqgdrvfwrguqevbgljtbvx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148224.2640827-1184-184333147044109/AnsiballZ_command.py'
Oct 11 02:03:45 compute-0 sudo[344853]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:46 compute-0 python3.9[344855]: ansible-ansible.legacy.command Invoked with _raw_params=if systemctl is-active certmonger.service; then
                                               systemctl disable --now certmonger.service
                                               test -f /etc/systemd/system/certmonger.service || systemctl mask certmonger.service
                                             fi
                                              _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:03:46 compute-0 sudo[344853]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:46 compute-0 ceph-mon[191930]: pgmap v767: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v768: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:48 compute-0 python3.9[345007]: ansible-ansible.builtin.find Invoked with file_type=any hidden=True paths=['/var/lib/certmonger/requests'] patterns=[] read_whole_file=False age_stamp=mtime recurse=False follow=False get_checksum=False checksum_algorithm=sha1 use_regex=False exact_mode=True excludes=None contains=None age=None size=None depth=None mode=None encoding=None limit=None
Oct 11 02:03:48 compute-0 ceph-mon[191930]: pgmap v768: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v769: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:49 compute-0 sudo[345158]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-angwbdlwghpoxjhiuilkyywmrsyrrpvl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148228.9846625-1202-171022820742986/AnsiballZ_systemd_service.py'
Oct 11 02:03:49 compute-0 sudo[345158]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:03:49 compute-0 python3.9[345160]: ansible-ansible.builtin.systemd_service Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 02:03:49 compute-0 systemd[1]: Reloading.
Oct 11 02:03:50 compute-0 systemd-rc-local-generator[345187]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:03:50 compute-0 systemd-sysv-generator[345192]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:03:50 compute-0 sudo[345158]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:50 compute-0 ceph-mon[191930]: pgmap v769: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v770: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:51 compute-0 podman[345296]: 2025-10-11 02:03:51.227061515 +0000 UTC m=+0.117480820 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:03:51 compute-0 podman[345297]: 2025-10-11 02:03:51.272024058 +0000 UTC m=+0.149325808 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, version=9.6, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, distribution-scope=public, vcs-type=git, com.redhat.component=ubi9-minimal-container, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc., build-date=2025-08-20T13:12:41, container_name=openstack_network_exporter, release=1755695350, config_id=edpm, managed_by=edpm_ansible, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.buildah.version=1.33.7, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, architecture=x86_64, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://catalog.redhat.com/en/search?searchType=containers, io.openshift.expose-services=, name=ubi9-minimal, io.openshift.tags=minimal rhel9)
Oct 11 02:03:51 compute-0 sudo[345389]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sofxtrkujldgfuwobzjqrblgwrxqynjt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148230.838799-1210-7672768661069/AnsiballZ_command.py'
Oct 11 02:03:51 compute-0 sudo[345389]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:51 compute-0 python3.9[345391]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_compute.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:03:51 compute-0 sudo[345389]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:52 compute-0 sudo[345542]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-janyhdadziqpkzyafigxgrtdnysftfzb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148232.0092645-1210-107222494243311/AnsiballZ_command.py'
Oct 11 02:03:52 compute-0 sudo[345542]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:52 compute-0 ceph-mon[191930]: pgmap v770: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:52 compute-0 python3.9[345544]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_migration_target.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:03:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v771: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:53 compute-0 sudo[345542]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:54 compute-0 ceph-mon[191930]: pgmap v771: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:54 compute-0 sudo[345695]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wvcdgbryslkmrmgirgvhrlzumospepkb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148234.0992694-1210-33118469543815/AnsiballZ_command.py'
Oct 11 02:03:54 compute-0 sudo[345695]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:03:54.819 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:03:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:03:54.820 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:03:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:03:54.820 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:03:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:03:54 compute-0 python3.9[345697]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_api_cron.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:03:54 compute-0 sudo[345695]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v772: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:55 compute-0 sudo[345863]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qstyvlyismqtkupcdljwclvxlyetuteb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148235.2614374-1210-147413821831752/AnsiballZ_command.py'
Oct 11 02:03:55 compute-0 sudo[345863]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:55 compute-0 podman[345822]: 2025-10-11 02:03:55.885154224 +0000 UTC m=+0.151058106 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm)
Oct 11 02:03:56 compute-0 python3.9[345867]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_api.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:03:56 compute-0 sudo[345863]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:03:56
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.log', 'volumes', 'cephfs.cephfs.data', '.mgr', 'images', '.rgw.root', 'default.rgw.control', 'default.rgw.meta', 'vms', 'backups', 'cephfs.cephfs.meta']
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:03:56 compute-0 ceph-mon[191930]: pgmap v772: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:03:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:03:56 compute-0 sudo[346019]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jdgnvdozhmyqnsqviqyywbecvezuortw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148236.4093862-1210-262251230417665/AnsiballZ_command.py'
Oct 11 02:03:56 compute-0 sudo[346019]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:57 compute-0 python3.9[346021]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_conductor.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:03:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v773: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:57 compute-0 sudo[346019]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:58 compute-0 sudo[346172]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nmwbtjjtzqiowxagzlcbaiawirusipre ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148237.5634835-1210-109393610243992/AnsiballZ_command.py'
Oct 11 02:03:58 compute-0 sudo[346172]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:58 compute-0 python3.9[346174]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_metadata.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:03:58 compute-0 sudo[346172]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:58 compute-0 ceph-mon[191930]: pgmap v773: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:59 compute-0 sudo[346325]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oyijztksoclxymjokxsevbdsrstaayoj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148238.6744018-1210-197831170270252/AnsiballZ_command.py'
Oct 11 02:03:59 compute-0 sudo[346325]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:03:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v774: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:03:59 compute-0 python3.9[346327]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_scheduler.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:03:59 compute-0 sudo[346325]: pam_unix(sudo:session): session closed for user root
Oct 11 02:03:59 compute-0 podman[346329]: 2025-10-11 02:03:59.507254649 +0000 UTC m=+0.105264225 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.build-date=20251009, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:03:59 compute-0 podman[157119]: time="2025-10-11T02:03:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:03:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:03:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 40789 "" "Go-http-client/1.1"
Oct 11 02:03:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:03:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8099 "" "Go-http-client/1.1"
Oct 11 02:03:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:04:00 compute-0 ceph-mon[191930]: pgmap v774: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:01 compute-0 sudo[346497]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-opchfbpcuvzlwwmshewsnqbfabeowdca ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148240.6147625-1210-44886151027010/AnsiballZ_command.py'
Oct 11 02:04:01 compute-0 sudo[346497]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v775: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:01 compute-0 python3.9[346499]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_nova_vnc_proxy.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:04:01 compute-0 openstack_network_exporter[159265]: ERROR   02:04:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:04:01 compute-0 openstack_network_exporter[159265]: ERROR   02:04:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:04:01 compute-0 openstack_network_exporter[159265]: ERROR   02:04:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:04:01 compute-0 openstack_network_exporter[159265]: ERROR   02:04:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:04:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:04:01 compute-0 openstack_network_exporter[159265]: ERROR   02:04:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:04:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:04:01 compute-0 sudo[346497]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:02 compute-0 ceph-mon[191930]: pgmap v775: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:03 compute-0 sudo[346580]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:04:03 compute-0 sudo[346580]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:03 compute-0 sudo[346580]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v776: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:03 compute-0 sudo[346632]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:04:03 compute-0 sudo[346632]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:03 compute-0 sudo[346632]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:03 compute-0 sudo[346721]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nqlvdxllyxkjojrpemxtaniyudegusjm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148242.8969524-1289-220204177582955/AnsiballZ_file.py'
Oct 11 02:04:03 compute-0 sudo[346721]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:03 compute-0 sudo[346683]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:04:03 compute-0 sudo[346683]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:03 compute-0 sudo[346683]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:03 compute-0 sudo[346728]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 check-host
Oct 11 02:04:03 compute-0 sudo[346728]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:03 compute-0 python3.9[346725]: ansible-ansible.builtin.file Invoked with group=zuul mode=0755 owner=zuul path=/var/lib/openstack/config/nova setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:04:03 compute-0 sudo[346721]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:03 compute-0 sudo[346728]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:04:03 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:04:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:04:03 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:04:04 compute-0 sudo[346834]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:04:04 compute-0 sudo[346834]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:04 compute-0 sudo[346834]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:04 compute-0 sudo[346877]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:04:04 compute-0 sudo[346877]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:04 compute-0 sudo[346877]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:04 compute-0 sudo[346927]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:04:04 compute-0 sudo[346927]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:04 compute-0 sudo[346927]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:04 compute-0 sudo[347002]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ittypzfxoyzizalfwjjvqwhhqtdsojeg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148243.9230328-1289-79199100790599/AnsiballZ_file.py'
Oct 11 02:04:04 compute-0 sudo[347002]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:04 compute-0 sudo[346985]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:04:04 compute-0 sudo[346985]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:04 compute-0 python3.9[347019]: ansible-ansible.builtin.file Invoked with group=zuul mode=0755 owner=zuul path=/var/lib/openstack/config/containers setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:04:04 compute-0 sudo[347002]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:04 compute-0 ceph-mon[191930]: pgmap v776: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:04 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:04:04 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:04:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:04:05 compute-0 sudo[346985]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:04:05 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:04:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:04:05 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:04:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:04:05 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:04:05 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 5e0d65a8-a911-4d7d-b7d5-c0f3ab7023f2 does not exist
Oct 11 02:04:05 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev fae46999-35bb-4848-8da6-911940987595 does not exist
Oct 11 02:04:05 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 6d08bc25-d130-459c-a8f5-1658a73c1141 does not exist
Oct 11 02:04:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:04:05 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:04:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:04:05 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:04:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:04:05 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:04:05 compute-0 sudo[347155]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:04:05 compute-0 sudo[347155]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:05 compute-0 sudo[347155]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v777: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:05 compute-0 sudo[347206]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:04:05 compute-0 sudo[347248]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zgnpmvvnyxujnnslckttmwgifxughvle ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148244.866331-1289-5435553826526/AnsiballZ_file.py'
Oct 11 02:04:05 compute-0 sudo[347206]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:05 compute-0 sudo[347248]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:05 compute-0 sudo[347206]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:05 compute-0 sudo[347253]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:04:05 compute-0 sudo[347253]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:05 compute-0 sudo[347253]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:05 compute-0 python3.9[347252]: ansible-ansible.builtin.file Invoked with group=zuul mode=0755 owner=zuul path=/var/lib/openstack/config/nova_nvme_cleaner setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:04:05 compute-0 sudo[347278]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:04:05 compute-0 sudo[347278]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:05 compute-0 sudo[347248]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:04:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:04:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:04:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:04:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:04:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:04:06 compute-0 podman[347407]: 2025-10-11 02:04:06.078673603 +0000 UTC m=+0.073903761 container create f43e5f47b62fbcb67c94ff54d84ab7fdff54ef4561c0ee5d663d959b9c737364 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_feynman, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.license=GPLv2)
Oct 11 02:04:06 compute-0 podman[347407]: 2025-10-11 02:04:06.039098766 +0000 UTC m=+0.034328904 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:04:06 compute-0 systemd[1]: Started libpod-conmon-f43e5f47b62fbcb67c94ff54d84ab7fdff54ef4561c0ee5d663d959b9c737364.scope.
Oct 11 02:04:06 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:04:06 compute-0 podman[347407]: 2025-10-11 02:04:06.230460236 +0000 UTC m=+0.225690424 container init f43e5f47b62fbcb67c94ff54d84ab7fdff54ef4561c0ee5d663d959b9c737364 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_feynman, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:04:06 compute-0 podman[347407]: 2025-10-11 02:04:06.249392451 +0000 UTC m=+0.244622609 container start f43e5f47b62fbcb67c94ff54d84ab7fdff54ef4561c0ee5d663d959b9c737364 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_feynman, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, ceph=True, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:04:06 compute-0 podman[347407]: 2025-10-11 02:04:06.257299092 +0000 UTC m=+0.252529250 container attach f43e5f47b62fbcb67c94ff54d84ab7fdff54ef4561c0ee5d663d959b9c737364 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_feynman, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, OSD_FLAVOR=default)
Oct 11 02:04:06 compute-0 unruffled_feynman[347453]: 167 167
Oct 11 02:04:06 compute-0 systemd[1]: libpod-f43e5f47b62fbcb67c94ff54d84ab7fdff54ef4561c0ee5d663d959b9c737364.scope: Deactivated successfully.
Oct 11 02:04:06 compute-0 podman[347407]: 2025-10-11 02:04:06.260197472 +0000 UTC m=+0.255427630 container died f43e5f47b62fbcb67c94ff54d84ab7fdff54ef4561c0ee5d663d959b9c737364 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_feynman, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:04:06 compute-0 systemd[1]: var-lib-containers-storage-overlay-a6ba32217de024ac750072ff3273d633810d1d6dc77b6bacfdc57f34670d2faf-merged.mount: Deactivated successfully.
Oct 11 02:04:06 compute-0 podman[347407]: 2025-10-11 02:04:06.354377442 +0000 UTC m=+0.349607570 container remove f43e5f47b62fbcb67c94ff54d84ab7fdff54ef4561c0ee5d663d959b9c737364 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_feynman, org.label-schema.build-date=20250507, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:04:06 compute-0 systemd[1]: libpod-conmon-f43e5f47b62fbcb67c94ff54d84ab7fdff54ef4561c0ee5d663d959b9c737364.scope: Deactivated successfully.
Oct 11 02:04:06 compute-0 podman[347480]: 2025-10-11 02:04:06.4406606 +0000 UTC m=+0.125923648 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, container_name=kepler, io.buildah.version=1.29.0, architecture=x86_64, distribution-scope=public, maintainer=Red Hat, Inc., summary=Provides the latest release of Red Hat Universal Base Image 9., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, build-date=2024-09-18T21:23:30, com.redhat.component=ubi9-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, managed_by=edpm_ansible, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.tags=base rhel9, name=ubi9, release=1214.1726694543, vendor=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.openshift.expose-services=, config_id=edpm, release-0.7.12=, version=9.4, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f)
Oct 11 02:04:06 compute-0 podman[347469]: 2025-10-11 02:04:06.446798243 +0000 UTC m=+0.134595477 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:04:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:04:06 compute-0 podman[347475]: 2025-10-11 02:04:06.471311965 +0000 UTC m=+0.162615265 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, container_name=ovn_controller, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:04:06 compute-0 sudo[347589]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ugmowopygqbziaoykczmfzjxyczuhmuk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148245.9262657-1311-182263095827936/AnsiballZ_file.py'
Oct 11 02:04:06 compute-0 sudo[347589]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:06 compute-0 podman[347599]: 2025-10-11 02:04:06.56410499 +0000 UTC m=+0.061320312 container create c497c4feeb50d14f97bb9161554b764af2599cf62941f6fc26b42592f8cca828 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_faraday, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507)
Oct 11 02:04:06 compute-0 podman[347599]: 2025-10-11 02:04:06.536672638 +0000 UTC m=+0.033887980 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:04:06 compute-0 systemd[1]: Started libpod-conmon-c497c4feeb50d14f97bb9161554b764af2599cf62941f6fc26b42592f8cca828.scope.
Oct 11 02:04:06 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:04:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b793c610c46d664e786a056e55241ca63efbbbf5153c761607e0a8fd02070cf6/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:04:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b793c610c46d664e786a056e55241ca63efbbbf5153c761607e0a8fd02070cf6/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:04:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b793c610c46d664e786a056e55241ca63efbbbf5153c761607e0a8fd02070cf6/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:04:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b793c610c46d664e786a056e55241ca63efbbbf5153c761607e0a8fd02070cf6/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:04:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b793c610c46d664e786a056e55241ca63efbbbf5153c761607e0a8fd02070cf6/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:04:06 compute-0 python3.9[347598]: ansible-ansible.builtin.file Invoked with group=zuul mode=0755 owner=zuul path=/var/lib/nova setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:04:06 compute-0 ceph-mon[191930]: pgmap v777: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:06 compute-0 podman[347599]: 2025-10-11 02:04:06.718301268 +0000 UTC m=+0.215516600 container init c497c4feeb50d14f97bb9161554b764af2599cf62941f6fc26b42592f8cca828 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_faraday, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2)
Oct 11 02:04:06 compute-0 sudo[347589]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:06 compute-0 podman[347599]: 2025-10-11 02:04:06.754282678 +0000 UTC m=+0.251498040 container start c497c4feeb50d14f97bb9161554b764af2599cf62941f6fc26b42592f8cca828 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_faraday, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:04:06 compute-0 podman[347599]: 2025-10-11 02:04:06.760572983 +0000 UTC m=+0.257788305 container attach c497c4feeb50d14f97bb9161554b764af2599cf62941f6fc26b42592f8cca828 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_faraday, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:04:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v778: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:07 compute-0 sudo[347794]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wbxrulqaqutitflvyljvosdvqrpjsmpl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148247.0261312-1311-96414067334474/AnsiballZ_file.py'
Oct 11 02:04:07 compute-0 sudo[347794]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:07 compute-0 podman[347750]: 2025-10-11 02:04:07.663057433 +0000 UTC m=+0.177731161 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.build-date=20251007)
Oct 11 02:04:07 compute-0 python3.9[347805]: ansible-ansible.builtin.file Invoked with group=zuul mode=0755 owner=zuul path=/var/lib/_nova_secontext setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:04:07 compute-0 sudo[347794]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:07 compute-0 competent_faraday[347615]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:04:07 compute-0 competent_faraday[347615]: --> relative data size: 1.0
Oct 11 02:04:07 compute-0 competent_faraday[347615]: --> All data devices are unavailable
Oct 11 02:04:07 compute-0 systemd[1]: libpod-c497c4feeb50d14f97bb9161554b764af2599cf62941f6fc26b42592f8cca828.scope: Deactivated successfully.
Oct 11 02:04:07 compute-0 systemd[1]: libpod-c497c4feeb50d14f97bb9161554b764af2599cf62941f6fc26b42592f8cca828.scope: Consumed 1.129s CPU time.
Oct 11 02:04:07 compute-0 podman[347599]: 2025-10-11 02:04:07.939962673 +0000 UTC m=+1.437178005 container died c497c4feeb50d14f97bb9161554b764af2599cf62941f6fc26b42592f8cca828 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_faraday, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default)
Oct 11 02:04:07 compute-0 systemd[1]: var-lib-containers-storage-overlay-b793c610c46d664e786a056e55241ca63efbbbf5153c761607e0a8fd02070cf6-merged.mount: Deactivated successfully.
Oct 11 02:04:08 compute-0 podman[347599]: 2025-10-11 02:04:08.045332758 +0000 UTC m=+1.542548070 container remove c497c4feeb50d14f97bb9161554b764af2599cf62941f6fc26b42592f8cca828 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_faraday, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.license=GPLv2, io.buildah.version=1.39.3)
Oct 11 02:04:08 compute-0 systemd[1]: libpod-conmon-c497c4feeb50d14f97bb9161554b764af2599cf62941f6fc26b42592f8cca828.scope: Deactivated successfully.
Oct 11 02:04:08 compute-0 sudo[347278]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:08 compute-0 sudo[347878]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:04:08 compute-0 sudo[347878]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:08 compute-0 sudo[347878]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:08 compute-0 sudo[347927]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:04:08 compute-0 sudo[347927]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:08 compute-0 sudo[347927]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:08 compute-0 sudo[347980]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:04:08 compute-0 sudo[347980]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:08 compute-0 sudo[347980]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:08 compute-0 sudo[348029]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:04:08 compute-0 sudo[348029]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:08 compute-0 sudo[348080]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sqlkgpggasuozlinsjleeocpjfvcsobm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148248.1301398-1311-132336230735845/AnsiballZ_file.py'
Oct 11 02:04:08 compute-0 sudo[348080]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:08 compute-0 ceph-mon[191930]: pgmap v778: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:08 compute-0 python3.9[348082]: ansible-ansible.builtin.file Invoked with group=zuul mode=0755 owner=zuul path=/var/lib/nova/instances setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:04:08 compute-0 sudo[348080]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v779: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:09 compute-0 podman[348164]: 2025-10-11 02:04:09.231724445 +0000 UTC m=+0.069317125 container create 092fa46f134a9bbf34e06bf7d66222a68b9aafb2097776af9c597ff3192b5f28 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_chandrasekhar, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0)
Oct 11 02:04:09 compute-0 systemd[1]: Started libpod-conmon-092fa46f134a9bbf34e06bf7d66222a68b9aafb2097776af9c597ff3192b5f28.scope.
Oct 11 02:04:09 compute-0 podman[348164]: 2025-10-11 02:04:09.207600363 +0000 UTC m=+0.045193073 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:04:09 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:04:09 compute-0 podman[348164]: 2025-10-11 02:04:09.375199613 +0000 UTC m=+0.212792323 container init 092fa46f134a9bbf34e06bf7d66222a68b9aafb2097776af9c597ff3192b5f28 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_chandrasekhar, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:04:09 compute-0 podman[348164]: 2025-10-11 02:04:09.394455184 +0000 UTC m=+0.232047894 container start 092fa46f134a9bbf34e06bf7d66222a68b9aafb2097776af9c597ff3192b5f28 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_chandrasekhar, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True)
Oct 11 02:04:09 compute-0 podman[348164]: 2025-10-11 02:04:09.401679879 +0000 UTC m=+0.239272559 container attach 092fa46f134a9bbf34e06bf7d66222a68b9aafb2097776af9c597ff3192b5f28 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_chandrasekhar, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True)
Oct 11 02:04:09 compute-0 thirsty_chandrasekhar[348213]: 167 167
Oct 11 02:04:09 compute-0 systemd[1]: libpod-092fa46f134a9bbf34e06bf7d66222a68b9aafb2097776af9c597ff3192b5f28.scope: Deactivated successfully.
Oct 11 02:04:09 compute-0 podman[348164]: 2025-10-11 02:04:09.409328409 +0000 UTC m=+0.246921109 container died 092fa46f134a9bbf34e06bf7d66222a68b9aafb2097776af9c597ff3192b5f28 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_chandrasekhar, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_REF=reef)
Oct 11 02:04:09 compute-0 systemd[1]: var-lib-containers-storage-overlay-4b7c5712479a9deb8895caa7cd617c07f969f5cfa3cb9f994d6c5134708b308f-merged.mount: Deactivated successfully.
Oct 11 02:04:09 compute-0 podman[348164]: 2025-10-11 02:04:09.488215853 +0000 UTC m=+0.325808563 container remove 092fa46f134a9bbf34e06bf7d66222a68b9aafb2097776af9c597ff3192b5f28 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_chandrasekhar, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef)
Oct 11 02:04:09 compute-0 systemd[1]: libpod-conmon-092fa46f134a9bbf34e06bf7d66222a68b9aafb2097776af9c597ff3192b5f28.scope: Deactivated successfully.
Oct 11 02:04:09 compute-0 sudo[348319]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wxhlneasxucaxieqodmrklvinhzsawub ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148249.1694622-1311-59640393836189/AnsiballZ_file.py'
Oct 11 02:04:09 compute-0 sudo[348319]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:09 compute-0 podman[348294]: 2025-10-11 02:04:09.768452359 +0000 UTC m=+0.095696540 container create 543ab336e8677d19f2d742dea46a9994d28570f462e9821291183476a0a9054e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ecstatic_fermi, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507)
Oct 11 02:04:09 compute-0 podman[348294]: 2025-10-11 02:04:09.728900386 +0000 UTC m=+0.056144647 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:04:09 compute-0 systemd[1]: Started libpod-conmon-543ab336e8677d19f2d742dea46a9994d28570f462e9821291183476a0a9054e.scope.
Oct 11 02:04:09 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:04:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4f01da75a193d77e258cc3752981efdcdd71eb5424e2d0aedf94b96a799b6a0d/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:04:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4f01da75a193d77e258cc3752981efdcdd71eb5424e2d0aedf94b96a799b6a0d/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:04:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4f01da75a193d77e258cc3752981efdcdd71eb5424e2d0aedf94b96a799b6a0d/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:04:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4f01da75a193d77e258cc3752981efdcdd71eb5424e2d0aedf94b96a799b6a0d/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:04:09 compute-0 podman[348294]: 2025-10-11 02:04:09.947415057 +0000 UTC m=+0.274659308 container init 543ab336e8677d19f2d742dea46a9994d28570f462e9821291183476a0a9054e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ecstatic_fermi, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, OSD_FLAVOR=default)
Oct 11 02:04:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:04:09 compute-0 podman[348294]: 2025-10-11 02:04:09.971512529 +0000 UTC m=+0.298756710 container start 543ab336e8677d19f2d742dea46a9994d28570f462e9821291183476a0a9054e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ecstatic_fermi, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0)
Oct 11 02:04:09 compute-0 podman[348294]: 2025-10-11 02:04:09.977425371 +0000 UTC m=+0.304669622 container attach 543ab336e8677d19f2d742dea46a9994d28570f462e9821291183476a0a9054e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ecstatic_fermi, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507)
Oct 11 02:04:10 compute-0 python3.9[348324]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/etc/ceph setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:04:10 compute-0 sudo[348319]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:10 compute-0 ceph-mon[191930]: pgmap v779: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]: {
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:     "0": [
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:         {
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "devices": [
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "/dev/loop3"
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             ],
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "lv_name": "ceph_lv0",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "lv_size": "21470642176",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "name": "ceph_lv0",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "tags": {
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.cluster_name": "ceph",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.crush_device_class": "",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.encrypted": "0",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.osd_id": "0",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.type": "block",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.vdo": "0"
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             },
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "type": "block",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "vg_name": "ceph_vg0"
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:         }
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:     ],
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:     "1": [
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:         {
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "devices": [
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "/dev/loop4"
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             ],
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "lv_name": "ceph_lv1",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "lv_size": "21470642176",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "name": "ceph_lv1",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "tags": {
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.cluster_name": "ceph",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.crush_device_class": "",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.encrypted": "0",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.osd_id": "1",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.type": "block",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.vdo": "0"
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             },
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "type": "block",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "vg_name": "ceph_vg1"
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:         }
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:     ],
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:     "2": [
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:         {
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "devices": [
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "/dev/loop5"
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             ],
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "lv_name": "ceph_lv2",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "lv_size": "21470642176",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "name": "ceph_lv2",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "tags": {
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.cluster_name": "ceph",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.crush_device_class": "",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.encrypted": "0",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.osd_id": "2",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.type": "block",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:                 "ceph.vdo": "0"
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             },
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "type": "block",
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:             "vg_name": "ceph_vg2"
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:         }
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]:     ]
Oct 11 02:04:10 compute-0 ecstatic_fermi[348329]: }
Oct 11 02:04:10 compute-0 systemd[1]: libpod-543ab336e8677d19f2d742dea46a9994d28570f462e9821291183476a0a9054e.scope: Deactivated successfully.
Oct 11 02:04:10 compute-0 podman[348294]: 2025-10-11 02:04:10.843795836 +0000 UTC m=+1.171040007 container died 543ab336e8677d19f2d742dea46a9994d28570f462e9821291183476a0a9054e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ecstatic_fermi, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.39.3)
Oct 11 02:04:10 compute-0 systemd[1]: var-lib-containers-storage-overlay-4f01da75a193d77e258cc3752981efdcdd71eb5424e2d0aedf94b96a799b6a0d-merged.mount: Deactivated successfully.
Oct 11 02:04:10 compute-0 podman[348294]: 2025-10-11 02:04:10.927151036 +0000 UTC m=+1.254395207 container remove 543ab336e8677d19f2d742dea46a9994d28570f462e9821291183476a0a9054e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=ecstatic_fermi, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2)
Oct 11 02:04:10 compute-0 systemd[1]: libpod-conmon-543ab336e8677d19f2d742dea46a9994d28570f462e9821291183476a0a9054e.scope: Deactivated successfully.
Oct 11 02:04:10 compute-0 sudo[348029]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:10 compute-0 sudo[348499]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iqkgrxhjylxjxkjlwxqgvmngkekztzch ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148250.3919363-1311-44385110533786/AnsiballZ_file.py'
Oct 11 02:04:10 compute-0 sudo[348499]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:11 compute-0 sudo[348501]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:04:11 compute-0 sudo[348501]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:11 compute-0 sudo[348501]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:11 compute-0 python3.9[348502]: ansible-ansible.builtin.file Invoked with group=zuul owner=zuul path=/etc/multipath setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:04:11 compute-0 sudo[348499]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v780: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:11 compute-0 sudo[348527]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:04:11 compute-0 sudo[348527]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:11 compute-0 sudo[348527]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:11 compute-0 sudo[348559]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:04:11 compute-0 sudo[348559]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:11 compute-0 sudo[348559]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:11 compute-0 sudo[348604]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:04:11 compute-0 sudo[348604]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:11 compute-0 sudo[348789]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iwyibylxvfvjxzdgduruyqyrvzvmbbxv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148251.4035447-1311-28387701306043/AnsiballZ_file.py'
Oct 11 02:04:11 compute-0 sudo[348789]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:11 compute-0 podman[348790]: 2025-10-11 02:04:11.998587722 +0000 UTC m=+0.086545355 container create 6bac3886207ec96192ad9dd4d7f590d1b5cbb42bbe136e7970457221dc965c1b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_dirac, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 02:04:12 compute-0 podman[348790]: 2025-10-11 02:04:11.963473605 +0000 UTC m=+0.051431298 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:04:12 compute-0 systemd[1]: Started libpod-conmon-6bac3886207ec96192ad9dd4d7f590d1b5cbb42bbe136e7970457221dc965c1b.scope.
Oct 11 02:04:12 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:04:12 compute-0 podman[348790]: 2025-10-11 02:04:12.159628403 +0000 UTC m=+0.247586046 container init 6bac3886207ec96192ad9dd4d7f590d1b5cbb42bbe136e7970457221dc965c1b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_dirac, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507)
Oct 11 02:04:12 compute-0 python3.9[348798]: ansible-ansible.builtin.file Invoked with group=zuul owner=zuul path=/etc/iscsi setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:04:12 compute-0 podman[348790]: 2025-10-11 02:04:12.177204607 +0000 UTC m=+0.265162200 container start 6bac3886207ec96192ad9dd4d7f590d1b5cbb42bbe136e7970457221dc965c1b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_dirac, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 02:04:12 compute-0 podman[348790]: 2025-10-11 02:04:12.182729344 +0000 UTC m=+0.270686977 container attach 6bac3886207ec96192ad9dd4d7f590d1b5cbb42bbe136e7970457221dc965c1b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_dirac, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0)
Oct 11 02:04:12 compute-0 mystifying_dirac[348808]: 167 167
Oct 11 02:04:12 compute-0 systemd[1]: libpod-6bac3886207ec96192ad9dd4d7f590d1b5cbb42bbe136e7970457221dc965c1b.scope: Deactivated successfully.
Oct 11 02:04:12 compute-0 podman[348790]: 2025-10-11 02:04:12.19575078 +0000 UTC m=+0.283708413 container died 6bac3886207ec96192ad9dd4d7f590d1b5cbb42bbe136e7970457221dc965c1b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_dirac, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:04:12 compute-0 sudo[348789]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:12 compute-0 systemd[1]: var-lib-containers-storage-overlay-9d5ab8d5de1536373d96f081fa1eda606bd0680ac00e6e84bfa04f7c368b9a6e-merged.mount: Deactivated successfully.
Oct 11 02:04:12 compute-0 podman[348790]: 2025-10-11 02:04:12.268423149 +0000 UTC m=+0.356380772 container remove 6bac3886207ec96192ad9dd4d7f590d1b5cbb42bbe136e7970457221dc965c1b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_dirac, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 02:04:12 compute-0 systemd[1]: libpod-conmon-6bac3886207ec96192ad9dd4d7f590d1b5cbb42bbe136e7970457221dc965c1b.scope: Deactivated successfully.
Oct 11 02:04:12 compute-0 podman[348854]: 2025-10-11 02:04:12.501935577 +0000 UTC m=+0.066325464 container create 8ca6c41ee62ffed83ad31b24ded27c56fd66a096d3810fcd6c994e7e005f5972 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_galileo, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2)
Oct 11 02:04:12 compute-0 systemd[1]: Started libpod-conmon-8ca6c41ee62ffed83ad31b24ded27c56fd66a096d3810fcd6c994e7e005f5972.scope.
Oct 11 02:04:12 compute-0 podman[348854]: 2025-10-11 02:04:12.479827216 +0000 UTC m=+0.044217203 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:04:12 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:04:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd5d52cc0373aad0c45c123c3d07722a6d0b95ca362a976d63967face54bcd2a/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:04:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd5d52cc0373aad0c45c123c3d07722a6d0b95ca362a976d63967face54bcd2a/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:04:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd5d52cc0373aad0c45c123c3d07722a6d0b95ca362a976d63967face54bcd2a/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:04:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd5d52cc0373aad0c45c123c3d07722a6d0b95ca362a976d63967face54bcd2a/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:04:12 compute-0 podman[348854]: 2025-10-11 02:04:12.685939038 +0000 UTC m=+0.250329005 container init 8ca6c41ee62ffed83ad31b24ded27c56fd66a096d3810fcd6c994e7e005f5972 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_galileo, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 02:04:12 compute-0 podman[348854]: 2025-10-11 02:04:12.707509293 +0000 UTC m=+0.271899190 container start 8ca6c41ee62ffed83ad31b24ded27c56fd66a096d3810fcd6c994e7e005f5972 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_galileo, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507)
Oct 11 02:04:12 compute-0 podman[348854]: 2025-10-11 02:04:12.714407735 +0000 UTC m=+0.278797662 container attach 8ca6c41ee62ffed83ad31b24ded27c56fd66a096d3810fcd6c994e7e005f5972 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_galileo, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507)
Oct 11 02:04:12 compute-0 ceph-mon[191930]: pgmap v780: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v781: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:13 compute-0 sudo[349014]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ouggaqftbvwarpzwxfqomoybqufjihpe ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148253.1704264-1311-246136297793270/AnsiballZ_file.py'
Oct 11 02:04:13 compute-0 sudo[349014]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:13 compute-0 fervent_galileo[348870]: {
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:         "osd_id": 1,
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:         "type": "bluestore"
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:     },
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:         "osd_id": 2,
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:         "type": "bluestore"
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:     },
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:         "osd_id": 0,
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:         "type": "bluestore"
Oct 11 02:04:13 compute-0 fervent_galileo[348870]:     }
Oct 11 02:04:13 compute-0 fervent_galileo[348870]: }
Oct 11 02:04:13 compute-0 systemd[1]: libpod-8ca6c41ee62ffed83ad31b24ded27c56fd66a096d3810fcd6c994e7e005f5972.scope: Deactivated successfully.
Oct 11 02:04:13 compute-0 podman[348854]: 2025-10-11 02:04:13.892795858 +0000 UTC m=+1.457185735 container died 8ca6c41ee62ffed83ad31b24ded27c56fd66a096d3810fcd6c994e7e005f5972 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_galileo, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:04:13 compute-0 systemd[1]: libpod-8ca6c41ee62ffed83ad31b24ded27c56fd66a096d3810fcd6c994e7e005f5972.scope: Consumed 1.175s CPU time.
Oct 11 02:04:13 compute-0 python3.9[349019]: ansible-ansible.builtin.file Invoked with group=zuul owner=zuul path=/var/lib/iscsi setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:04:13 compute-0 systemd[1]: var-lib-containers-storage-overlay-fd5d52cc0373aad0c45c123c3d07722a6d0b95ca362a976d63967face54bcd2a-merged.mount: Deactivated successfully.
Oct 11 02:04:13 compute-0 sudo[349014]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:13 compute-0 podman[348854]: 2025-10-11 02:04:13.980351962 +0000 UTC m=+1.544741849 container remove 8ca6c41ee62ffed83ad31b24ded27c56fd66a096d3810fcd6c994e7e005f5972 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_galileo, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:04:13 compute-0 systemd[1]: libpod-conmon-8ca6c41ee62ffed83ad31b24ded27c56fd66a096d3810fcd6c994e7e005f5972.scope: Deactivated successfully.
Oct 11 02:04:14 compute-0 sudo[348604]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:04:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:04:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:04:14 compute-0 podman[349032]: 2025-10-11 02:04:14.060258207 +0000 UTC m=+0.111363654 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, container_name=multipathd, io.buildah.version=1.41.3, org.label-schema.license=GPLv2)
Oct 11 02:04:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:04:14 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev eb6dc4f9-51e4-4cfe-96f4-cbe4e2d70dc1 does not exist
Oct 11 02:04:14 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 5d2aa23f-645f-4ec7-846f-f0f8db74c4ac does not exist
Oct 11 02:04:14 compute-0 podman[349040]: 2025-10-11 02:04:14.078944432 +0000 UTC m=+0.118167395 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, config_id=iscsid, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true)
Oct 11 02:04:14 compute-0 sudo[349105]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:04:14 compute-0 sudo[349105]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:14 compute-0 sudo[349105]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:14 compute-0 sudo[349151]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:04:14 compute-0 sudo[349151]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:04:14 compute-0 sudo[349151]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:14 compute-0 sudo[349280]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-aqvkpvfcgdvophyxeodlgpjhkdnpndxp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148254.1852248-1311-74617227254903/AnsiballZ_file.py'
Oct 11 02:04:14 compute-0 sudo[349280]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:14 compute-0 ceph-mon[191930]: pgmap v781: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:04:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:04:14 compute-0 python3.9[349282]: ansible-ansible.builtin.file Invoked with group=zuul owner=zuul path=/etc/nvme setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:04:14 compute-0 sudo[349280]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:04:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v782: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:16 compute-0 sudo[349432]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-igisgeihkilioppslebwvnfhdwosllnt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148255.6594465-1311-81377571901967/AnsiballZ_file.py'
Oct 11 02:04:16 compute-0 sudo[349432]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:16 compute-0 python3.9[349434]: ansible-ansible.builtin.file Invoked with group=zuul owner=zuul path=/run/openvswitch setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:04:16 compute-0 sudo[349432]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:16 compute-0 ceph-mon[191930]: pgmap v782: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v783: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:18 compute-0 ceph-mon[191930]: pgmap v783: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v784: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:04:20 compute-0 ceph-mon[191930]: pgmap v784: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v785: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:21 compute-0 ceph-mon[191930]: pgmap v785: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:22 compute-0 podman[349515]: 2025-10-11 02:04:22.252887309 +0000 UTC m=+0.135133472 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:04:22 compute-0 podman[349519]: 2025-10-11 02:04:22.274644676 +0000 UTC m=+0.162528648 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, release=1755695350, vendor=Red Hat, Inc., io.openshift.tags=minimal rhel9, architecture=x86_64, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=openstack_network_exporter, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.buildah.version=1.33.7, maintainer=Red Hat, Inc., version=9.6, com.redhat.component=ubi9-minimal-container, config_id=edpm, build-date=2025-08-20T13:12:41, name=ubi9-minimal, vcs-type=git, managed_by=edpm_ansible, io.openshift.expose-services=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Oct 11 02:04:22 compute-0 sudo[349628]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ytollbpddiqiafiggypsqqqrdjpgaujc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148261.6102529-1514-149775223841085/AnsiballZ_getent.py'
Oct 11 02:04:22 compute-0 sudo[349628]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:22 compute-0 python3.9[349630]: ansible-ansible.builtin.getent Invoked with database=passwd key=nova fail_key=True service=None split=None
Oct 11 02:04:22 compute-0 sudo[349628]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v786: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:24 compute-0 ceph-mon[191930]: pgmap v786: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:24 compute-0 sudo[349781]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zvznzywltgewpmeunfrybutomjjgxoue ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148263.5852633-1522-28321281710176/AnsiballZ_group.py'
Oct 11 02:04:24 compute-0 sudo[349781]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:24 compute-0 python3.9[349783]: ansible-ansible.builtin.group Invoked with gid=42436 name=nova state=present force=False system=False local=False non_unique=False gid_min=None gid_max=None
Oct 11 02:04:24 compute-0 groupadd[349784]: group added to /etc/group: name=nova, GID=42436
Oct 11 02:04:24 compute-0 groupadd[349784]: group added to /etc/gshadow: name=nova
Oct 11 02:04:24 compute-0 groupadd[349784]: new group: name=nova, GID=42436
Oct 11 02:04:24 compute-0 sudo[349781]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:04:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v787: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:26 compute-0 podman[349907]: 2025-10-11 02:04:26.206842477 +0000 UTC m=+0.097262264 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, config_id=edpm, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:04:26 compute-0 sudo[349958]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kehalstramdoatmzzssjwpgnszlncnfv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148265.552568-1530-126261595231176/AnsiballZ_user.py'
Oct 11 02:04:26 compute-0 sudo[349958]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:26 compute-0 ceph-mon[191930]: pgmap v787: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:26 compute-0 python3.9[349960]: ansible-ansible.builtin.user Invoked with comment=nova user group=nova groups=['libvirt'] name=nova shell=/bin/sh state=present uid=42436 non_unique=False force=False remove=False create_home=True system=False move_home=False append=False ssh_key_bits=0 ssh_key_type=rsa ssh_key_comment=ansible-generated on compute-0 update_password=always home=None password=NOT_LOGGING_PARAMETER login_class=None password_expire_max=None password_expire_min=None password_expire_warn=None hidden=None seuser=None skeleton=None generate_ssh_key=None ssh_key_file=None ssh_key_passphrase=NOT_LOGGING_PARAMETER expires=None password_lock=None local=None profile=None authorization=None role=None umask=None password_expire_account_disable=None uid_min=None uid_max=None
Oct 11 02:04:26 compute-0 useradd[349962]: new user: name=nova, UID=42436, GID=42436, home=/home/nova, shell=/bin/sh, from=/dev/pts/0
Oct 11 02:04:26 compute-0 useradd[349962]: add 'nova' to group 'libvirt'
Oct 11 02:04:26 compute-0 useradd[349962]: add 'nova' to shadow group 'libvirt'
Oct 11 02:04:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:04:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:04:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:04:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:04:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:04:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:04:26 compute-0 sudo[349958]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v788: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:27 compute-0 sshd-session[349993]: Accepted publickey for zuul from 192.168.122.30 port 42182 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 02:04:27 compute-0 systemd-logind[804]: New session 58 of user zuul.
Oct 11 02:04:27 compute-0 systemd[1]: Started Session 58 of User zuul.
Oct 11 02:04:27 compute-0 sshd-session[349993]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 02:04:27 compute-0 sshd-session[349996]: Received disconnect from 192.168.122.30 port 42182:11: disconnected by user
Oct 11 02:04:27 compute-0 sshd-session[349996]: Disconnected from user zuul 192.168.122.30 port 42182
Oct 11 02:04:27 compute-0 sshd-session[349993]: pam_unix(sshd:session): session closed for user zuul
Oct 11 02:04:27 compute-0 systemd[1]: session-58.scope: Deactivated successfully.
Oct 11 02:04:27 compute-0 systemd-logind[804]: Session 58 logged out. Waiting for processes to exit.
Oct 11 02:04:27 compute-0 systemd-logind[804]: Removed session 58.
Oct 11 02:04:28 compute-0 ceph-mon[191930]: pgmap v788: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:28 compute-0 python3.9[350146]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/nova/config.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:04:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v789: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:29 compute-0 python3.9[350267]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/nova/config.json mode=0644 setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760148267.9760017-1555-182575304395081/.source.json follow=False _original_basename=config.json.j2 checksum=2c2474b5f24ef7c9ed37f49680082593e0d1100b backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:04:29 compute-0 podman[157119]: time="2025-10-11T02:04:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:04:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:04:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 40789 "" "Go-http-client/1.1"
Oct 11 02:04:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:04:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8109 "" "Go-http-client/1.1"
Oct 11 02:04:29 compute-0 podman[350268]: 2025-10-11 02:04:29.910820597 +0000 UTC m=+0.132370983 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:04:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:04:30 compute-0 ceph-mon[191930]: pgmap v789: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:30 compute-0 python3.9[350435]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/nova/nova-blank.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:04:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v790: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:31 compute-0 openstack_network_exporter[159265]: ERROR   02:04:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:04:31 compute-0 openstack_network_exporter[159265]: ERROR   02:04:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:04:31 compute-0 openstack_network_exporter[159265]: ERROR   02:04:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:04:31 compute-0 openstack_network_exporter[159265]: ERROR   02:04:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:04:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:04:31 compute-0 openstack_network_exporter[159265]: ERROR   02:04:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:04:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:04:31 compute-0 python3.9[350511]: ansible-ansible.legacy.file Invoked with mode=0644 setype=container_file_t dest=/var/lib/openstack/config/nova/nova-blank.conf _original_basename=nova-blank.conf recurse=False state=file path=/var/lib/openstack/config/nova/nova-blank.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:04:32 compute-0 ceph-mon[191930]: pgmap v790: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:32 compute-0 python3.9[350661]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/nova/ssh-config follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:04:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v791: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:33 compute-0 python3.9[350782]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/nova/ssh-config mode=0644 setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760148271.7007134-1555-9613202923510/.source follow=False _original_basename=ssh-config checksum=4297f735c41bdc1ff52d72e6f623a02242f37958 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:04:34 compute-0 ceph-mon[191930]: pgmap v791: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:04:35 compute-0 python3.9[350932]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/nova/02-nova-host-specific.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:04:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v792: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:36 compute-0 python3.9[351053]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/nova/02-nova-host-specific.conf mode=0644 setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760148273.6229632-1555-268903782889971/.source.conf follow=False _original_basename=02-nova-host-specific.conf.j2 checksum=1feba546d0beacad9258164ab79b8a747685ccc8 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:04:36 compute-0 ceph-mon[191930]: pgmap v792: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:37 compute-0 podman[351130]: 2025-10-11 02:04:37.227005589 +0000 UTC m=+0.112594096 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 02:04:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v793: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:37 compute-0 podman[351132]: 2025-10-11 02:04:37.277146533 +0000 UTC m=+0.152262091 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, container_name=kepler, architecture=x86_64, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, release=1214.1726694543, name=ubi9, config_id=edpm, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-type=git, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release-0.7.12=, version=9.4, io.buildah.version=1.29.0, io.openshift.expose-services=, io.k8s.display-name=Red Hat Universal Base Image 9, vendor=Red Hat, Inc., build-date=2024-09-18T21:23:30, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., io.openshift.tags=base rhel9, com.redhat.component=ubi9-container)
Oct 11 02:04:37 compute-0 podman[351131]: 2025-10-11 02:04:37.348976343 +0000 UTC m=+0.229548238 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, managed_by=edpm_ansible, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:04:37 compute-0 python3.9[351267]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/nova/nova_statedir_ownership.py follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:04:38 compute-0 podman[351319]: 2025-10-11 02:04:38.271199501 +0000 UTC m=+0.162746180 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251007, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, config_id=edpm)
Oct 11 02:04:38 compute-0 ceph-mon[191930]: pgmap v793: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:38 compute-0 python3.9[351407]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/nova/nova_statedir_ownership.py mode=0644 setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760148276.8307536-1555-84357541205147/.source.py follow=False _original_basename=nova_statedir_ownership.py checksum=c6c8a3cfefa5efd60ceb1408c4e977becedb71e2 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:04:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v794: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:39 compute-0 sudo[351557]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xuzepvhdsxjxuiruexcpwwqumwvvygbb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148279.0436797-1624-24596888643085/AnsiballZ_file.py'
Oct 11 02:04:39 compute-0 sudo[351557]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:39 compute-0 python3.9[351559]: ansible-ansible.builtin.file Invoked with group=nova mode=0700 owner=nova path=/home/nova/.ssh state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:04:39 compute-0 sudo[351557]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:04:40 compute-0 ceph-mon[191930]: pgmap v794: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:40 compute-0 sudo[351709]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ntubaezlslbeyfrgbkhcbgbnfgdupbxp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148280.1386554-1632-231174522902179/AnsiballZ_copy.py'
Oct 11 02:04:40 compute-0 sudo[351709]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:40 compute-0 python3.9[351711]: ansible-ansible.legacy.copy Invoked with dest=/home/nova/.ssh/authorized_keys group=nova mode=0600 owner=nova remote_src=True src=/var/lib/openstack/config/nova/ssh-publickey backup=False force=True follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:04:40 compute-0 sudo[351709]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v795: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:41 compute-0 sudo[351861]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tcecqbymrnltvfcvafdafayjultmdoki ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148281.2131417-1640-11831034030352/AnsiballZ_stat.py'
Oct 11 02:04:41 compute-0 sudo[351861]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:41 compute-0 python3.9[351863]: ansible-ansible.builtin.stat Invoked with path=/var/lib/nova/compute_id follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:04:41 compute-0 sudo[351861]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:42 compute-0 ceph-mon[191930]: pgmap v795: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:42 compute-0 sudo[352013]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uehflawbtlymkimiqlcjybhmtfdjhoca ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148282.2755501-1648-51600365582072/AnsiballZ_stat.py'
Oct 11 02:04:42 compute-0 sudo[352013]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:43 compute-0 python3.9[352015]: ansible-ansible.legacy.stat Invoked with path=/var/lib/nova/compute_id follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:04:43 compute-0 sudo[352013]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v796: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:43 compute-0 sudo[352136]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jprnyxxjnqrscwiasvrluntlrtnckmxn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148282.2755501-1648-51600365582072/AnsiballZ_copy.py'
Oct 11 02:04:43 compute-0 sudo[352136]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:44 compute-0 python3.9[352138]: ansible-ansible.legacy.copy Invoked with attributes=+i dest=/var/lib/nova/compute_id group=nova mode=0400 owner=nova src=/home/zuul/.ansible/tmp/ansible-tmp-1760148282.2755501-1648-51600365582072/.source _original_basename=.z4grk3oh follow=False checksum=8b1ba0b00ae8208c47f4573f1b89ed869043b5be backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None seuser=None serole=None selevel=None setype=None
Oct 11 02:04:44 compute-0 sudo[352136]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:44 compute-0 podman[352142]: 2025-10-11 02:04:44.216603303 +0000 UTC m=+0.098088995 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, config_id=iscsid, container_name=iscsid, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0)
Oct 11 02:04:44 compute-0 podman[352141]: 2025-10-11 02:04:44.234843444 +0000 UTC m=+0.121955395 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, container_name=multipathd, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:04:44 compute-0 ceph-mon[191930]: pgmap v796: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:04:45 compute-0 python3.9[352326]: ansible-ansible.builtin.stat Invoked with path=/var/lib/openstack/cacerts/nova/tls-ca-bundle.pem follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:04:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v797: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:46 compute-0 ceph-mon[191930]: pgmap v797: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:46 compute-0 python3.9[352478]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/containers/nova_compute.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:04:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v798: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:48 compute-0 ceph-mon[191930]: pgmap v798: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:48 compute-0 python3.9[352599]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/containers/nova_compute.json mode=0644 setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760148285.4779975-1674-251333923750020/.source.json follow=False _original_basename=nova_compute.json.j2 checksum=f022386746472553146d29f689b545df70fa8a60 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:04:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v799: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:49 compute-0 python3.9[352749]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/containers/nova_compute_init.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:04:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:04:50 compute-0 python3.9[352871]: ansible-ansible.legacy.copy Invoked with dest=/var/lib/openstack/config/containers/nova_compute_init.json mode=0700 setype=container_file_t src=/home/zuul/.ansible/tmp/ansible-tmp-1760148288.7824984-1689-21719773245291/.source.json follow=False _original_basename=nova_compute_init.json.j2 checksum=60b024e6db49dc6e700fc0d50263944d98d4c034 backup=False force=True remote_src=False unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:04:50 compute-0 ceph-mon[191930]: pgmap v799: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:04:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v800: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 5.0 KiB/s rd, 0 B/s wr, 8 op/s
Oct 11 02:04:51 compute-0 sudo[353021]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qojbtbscsaoutjabqzgzdxhysijrluor ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148290.9044833-1706-187473780362656/AnsiballZ_container_config_data.py'
Oct 11 02:04:51 compute-0 sudo[353021]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:51 compute-0 python3.9[353023]: ansible-container_config_data Invoked with config_overrides={} config_path=/var/lib/openstack/config/containers config_pattern=nova_compute_init.json debug=False
Oct 11 02:04:51 compute-0 sudo[353021]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:52 compute-0 ceph-mon[191930]: pgmap v800: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 5.0 KiB/s rd, 0 B/s wr, 8 op/s
Oct 11 02:04:52 compute-0 sudo[353204]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lcjycfigshzudqyqamnhazagcxldnpkm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148292.140455-1715-129960647464493/AnsiballZ_container_config_hash.py'
Oct 11 02:04:52 compute-0 sudo[353204]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:52 compute-0 podman[353147]: 2025-10-11 02:04:52.760839847 +0000 UTC m=+0.135043531 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:04:52 compute-0 podman[353148]: 2025-10-11 02:04:52.793706561 +0000 UTC m=+0.162242465 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, io.buildah.version=1.33.7, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vcs-type=git, url=https://catalog.redhat.com/en/search?searchType=containers, version=9.6, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, architecture=x86_64, config_id=edpm, vendor=Red Hat, Inc., container_name=openstack_network_exporter, distribution-scope=public, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi9-minimal-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.expose-services=, io.openshift.tags=minimal rhel9, build-date=2025-08-20T13:12:41, maintainer=Red Hat, Inc.)
Oct 11 02:04:52 compute-0 python3.9[353219]: ansible-container_config_hash Invoked with check_mode=False config_vol_prefix=/var/lib/config-data
Oct 11 02:04:53 compute-0 sudo[353204]: pam_unix(sudo:session): session closed for user root
Oct 11 02:04:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v801: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 16 KiB/s rd, 0 B/s wr, 27 op/s
Oct 11 02:04:53 compute-0 sudo[353369]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vpgkmbkxrsceerxbtqtprlhdhcxjhblc ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760148293.4403965-1725-35396641062245/AnsiballZ_edpm_container_manage.py'
Oct 11 02:04:53 compute-0 sudo[353369]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:04:54 compute-0 python3[353371]: ansible-edpm_container_manage Invoked with concurrency=1 config_dir=/var/lib/openstack/config/containers config_id=edpm config_overrides={} config_patterns=nova_compute_init.json log_base_path=/var/log/containers/stdouts debug=False
Oct 11 02:04:54 compute-0 ceph-mon[191930]: pgmap v801: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 16 KiB/s rd, 0 B/s wr, 27 op/s
Oct 11 02:04:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:04:54.820 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:04:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:04:54.821 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:04:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:04:54.822 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:04:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:04:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v802: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 29 KiB/s rd, 0 B/s wr, 49 op/s
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:04:56
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['vms', 'default.rgw.control', 'cephfs.cephfs.meta', 'default.rgw.log', 'default.rgw.meta', 'backups', '.mgr', 'images', '.rgw.root', 'volumes', 'cephfs.cephfs.data']
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:04:56 compute-0 ceph-mon[191930]: pgmap v802: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 29 KiB/s rd, 0 B/s wr, 49 op/s
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:04:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:04:57 compute-0 podman[353406]: 2025-10-11 02:04:57.1670811 +0000 UTC m=+0.068027151 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, tcib_managed=true)
Oct 11 02:04:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v803: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:04:58 compute-0 ceph-mon[191930]: pgmap v803: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:04:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v804: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:04:59 compute-0 podman[157119]: time="2025-10-11T02:04:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:04:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:04:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 40789 "" "Go-http-client/1.1"
Oct 11 02:04:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:04:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8115 "" "Go-http-client/1.1"
Oct 11 02:04:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:05:00 compute-0 ceph-mon[191930]: pgmap v804: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:05:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v805: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:05:01 compute-0 openstack_network_exporter[159265]: ERROR   02:05:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:05:01 compute-0 openstack_network_exporter[159265]: ERROR   02:05:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:05:01 compute-0 openstack_network_exporter[159265]: ERROR   02:05:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:05:01 compute-0 openstack_network_exporter[159265]: ERROR   02:05:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:05:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:05:01 compute-0 openstack_network_exporter[159265]: ERROR   02:05:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:05:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:05:02 compute-0 ceph-mon[191930]: pgmap v805: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:05:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v806: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 31 KiB/s rd, 0 B/s wr, 51 op/s
Oct 11 02:05:03 compute-0 podman[353442]: 2025-10-11 02:05:03.640690317 +0000 UTC m=+3.544837181 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, managed_by=edpm_ansible)
Oct 11 02:05:04 compute-0 ceph-mon[191930]: pgmap v806: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 31 KiB/s rd, 0 B/s wr, 51 op/s
Oct 11 02:05:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:05:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v807: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 0 B/s wr, 32 op/s
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:05:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:05:06 compute-0 ceph-mon[191930]: pgmap v807: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 0 B/s wr, 32 op/s
Oct 11 02:05:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v808: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 6.2 KiB/s rd, 0 B/s wr, 10 op/s
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.942 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.943 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.944 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f8ed27f97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.944 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb8c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb1a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb200>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed2874260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed3ab42f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb350>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb90>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fa390>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb3b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbbf0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbc80>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.946 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.947 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.947 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f8ed27fbad0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.947 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27f9610>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.947 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb620>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.947 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.948 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f8ed27faff0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbe30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbec0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.948 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f8ed27fb110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f8ed27fb170>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f8ed27fb1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f8ed27fb230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f8ed2874230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.949 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f8ed27fb290>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbf50>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'power.state': [], 'disk.device.write.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.949 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f8ed5778d70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f8ed27fb650>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f8ed27fbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f8ed27fb320>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f8ed27fbb60>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.950 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.950 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f8ed27fa3f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f8ed27fb380>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f8ed27fbbc0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f8ed27fbc50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f8ed27fbce0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f8ed27fbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f8ed27fb590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.951 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f8ed27f95e0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.951 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.952 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f8ed27fb5f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.952 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f8ed27fbe00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.952 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f8ed27fbe90>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.952 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f8ed27fbf20>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.952 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.952 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.952 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.953 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.953 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.953 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.953 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.953 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.953 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.953 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.953 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.953 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.954 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.955 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:05:07.955 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:05:08 compute-0 ceph-mon[191930]: pgmap v808: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 6.2 KiB/s rd, 0 B/s wr, 10 op/s
Oct 11 02:05:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v809: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:09 compute-0 ceph-mon[191930]: pgmap v809: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:05:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v810: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:11 compute-0 podman[353474]: 2025-10-11 02:05:11.463792712 +0000 UTC m=+3.357966689 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:05:11 compute-0 podman[353476]: 2025-10-11 02:05:11.474501584 +0000 UTC m=+3.370384519 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.component=ubi9-container, io.openshift.tags=base rhel9, container_name=kepler, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2024-09-18T21:23:30, io.openshift.expose-services=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.k8s.display-name=Red Hat Universal Base Image 9, release-0.7.12=, architecture=x86_64, release=1214.1726694543, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, config_id=edpm, summary=Provides the latest release of Red Hat Universal Base Image 9., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, vcs-type=git, maintainer=Red Hat, Inc., version=9.4, io.buildah.version=1.29.0, managed_by=edpm_ansible, name=ubi9, vendor=Red Hat, Inc.)
Oct 11 02:05:11 compute-0 podman[353502]: 2025-10-11 02:05:11.480706589 +0000 UTC m=+2.379483273 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, org.label-schema.name=CentOS Stream 10 Base Image, io.buildah.version=1.41.4, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute)
Oct 11 02:05:11 compute-0 podman[353475]: 2025-10-11 02:05:11.496027869 +0000 UTC m=+3.390256237 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_id=ovn_controller, container_name=ovn_controller, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:05:11 compute-0 podman[353383]: 2025-10-11 02:05:11.515790185 +0000 UTC m=+17.124690938 image pull 95311272d2962a6b8537a6d19b94bc44c5c3621a6e21a2e983fd64d147646bc9 quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified
Oct 11 02:05:11 compute-0 podman[353577]: 2025-10-11 02:05:11.836587345 +0000 UTC m=+0.126992557 container create ebc3893876b006f2554389d6534fd757431488dafe40f7ef25b5026f5246c4b8 (image=quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified, name=nova_compute_init, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, container_name=nova_compute_init, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': False, 'user': 'root', 'restart': 'never', 'command': 'bash -c $* -- eval python3 /sbin/nova_statedir_ownership.py | logger -t nova_compute_init', 'net': 'none', 'security_opt': ['label=disable'], 'detach': False, 'environment': {'NOVA_STATEDIR_OWNERSHIP_SKIP': '/var/lib/nova/compute_id', '__OS_DEBUG': False}, 'volumes': ['/dev/log:/dev/log', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/_nova_secontext:/var/lib/_nova_secontext:shared,z', '/var/lib/openstack/config/nova/nova_statedir_ownership.py:/sbin/nova_statedir_ownership.py:z']}, config_id=edpm, io.buildah.version=1.41.3, managed_by=edpm_ansible)
Oct 11 02:05:11 compute-0 podman[353577]: 2025-10-11 02:05:11.771309383 +0000 UTC m=+0.061714645 image pull 95311272d2962a6b8537a6d19b94bc44c5c3621a6e21a2e983fd64d147646bc9 quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified
Oct 11 02:05:11 compute-0 python3[353371]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman create --name nova_compute_init --conmon-pidfile /run/nova_compute_init.pid --env NOVA_STATEDIR_OWNERSHIP_SKIP=/var/lib/nova/compute_id --env __OS_DEBUG=False --label config_id=edpm --label container_name=nova_compute_init --label managed_by=edpm_ansible --label config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': False, 'user': 'root', 'restart': 'never', 'command': 'bash -c $* -- eval python3 /sbin/nova_statedir_ownership.py | logger -t nova_compute_init', 'net': 'none', 'security_opt': ['label=disable'], 'detach': False, 'environment': {'NOVA_STATEDIR_OWNERSHIP_SKIP': '/var/lib/nova/compute_id', '__OS_DEBUG': False}, 'volumes': ['/dev/log:/dev/log', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/_nova_secontext:/var/lib/_nova_secontext:shared,z', '/var/lib/openstack/config/nova/nova_statedir_ownership.py:/sbin/nova_statedir_ownership.py:z']} --log-driver journald --log-level info --network none --privileged=False --security-opt label=disable --user root --volume /dev/log:/dev/log --volume /var/lib/nova:/var/lib/nova:shared --volume /var/lib/_nova_secontext:/var/lib/_nova_secontext:shared,z --volume /var/lib/openstack/config/nova/nova_statedir_ownership.py:/sbin/nova_statedir_ownership.py:z quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified bash -c $* -- eval python3 /sbin/nova_statedir_ownership.py | logger -t nova_compute_init
Oct 11 02:05:12 compute-0 sudo[353369]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:12 compute-0 ceph-mon[191930]: pgmap v810: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:13 compute-0 sudo[353765]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kjtrndcfebfjlgvwzxnrkxmxjpimgreu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148312.4362257-1733-148049667622571/AnsiballZ_stat.py'
Oct 11 02:05:13 compute-0 sudo[353765]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:05:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v811: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:13 compute-0 python3.9[353767]: ansible-ansible.builtin.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:05:13 compute-0 sudo[353765]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:14 compute-0 ceph-mon[191930]: pgmap v811: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:14 compute-0 sudo[353794]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:05:14 compute-0 sudo[353794]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:05:14 compute-0 sudo[353794]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:14 compute-0 podman[353819]: 2025-10-11 02:05:14.523686247 +0000 UTC m=+0.100737324 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, container_name=multipathd, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_id=multipathd, org.label-schema.build-date=20251009, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:05:14 compute-0 sudo[353849]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:05:14 compute-0 sudo[353849]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:05:14 compute-0 sudo[353849]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:14 compute-0 podman[353824]: 2025-10-11 02:05:14.550297497 +0000 UTC m=+0.116275177 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=iscsid, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, config_id=iscsid, io.buildah.version=1.41.3, org.label-schema.build-date=20251009)
Oct 11 02:05:14 compute-0 sudo[353929]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:05:14 compute-0 sudo[353929]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:05:14 compute-0 sudo[353929]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:14 compute-0 sudo[353978]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:05:14 compute-0 sudo[353978]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:05:14 compute-0 sudo[354055]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hzmjcelpseluestvtzbdzptajutcpigw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148314.4791434-1745-239043064859015/AnsiballZ_container_config_data.py'
Oct 11 02:05:14 compute-0 sudo[354055]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:05:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:05:15 compute-0 python3.9[354064]: ansible-container_config_data Invoked with config_overrides={} config_path=/var/lib/openstack/config/containers config_pattern=nova_compute.json debug=False
Oct 11 02:05:15 compute-0 sudo[354055]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v812: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:15 compute-0 sudo[353978]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"} v 0) v1
Oct 11 02:05:15 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"}]: dispatch
Oct 11 02:05:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:05:15 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:05:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:05:15 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:05:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:05:15 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:05:15 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev c328130e-19c2-4a89-8210-2d2636644b83 does not exist
Oct 11 02:05:15 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 60825553-a0bc-4532-a87b-f2c85751da2d does not exist
Oct 11 02:05:15 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 39dc795f-e0f2-49f2-883c-6a673bbb3883 does not exist
Oct 11 02:05:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:05:15 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:05:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:05:15 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:05:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:05:15 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:05:15 compute-0 sudo[354135]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:05:15 compute-0 sudo[354135]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:05:15 compute-0 sudo[354135]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:15 compute-0 sudo[354178]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:05:15 compute-0 sudo[354178]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:05:15 compute-0 sudo[354178]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:15 compute-0 sudo[354227]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:05:15 compute-0 sudo[354227]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:05:15 compute-0 sudo[354227]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:16 compute-0 sudo[354280]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:05:16 compute-0 sudo[354280]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:05:16 compute-0 sudo[354337]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wosliikhehouuotxkxkxcfokkylqbrmd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148315.5999172-1754-79374679693081/AnsiballZ_container_config_hash.py'
Oct 11 02:05:16 compute-0 sudo[354337]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:05:16 compute-0 ceph-mon[191930]: pgmap v812: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:16 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"}]: dispatch
Oct 11 02:05:16 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:05:16 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:05:16 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:05:16 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:05:16 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:05:16 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:05:16 compute-0 python3.9[354339]: ansible-container_config_hash Invoked with check_mode=False config_vol_prefix=/var/lib/config-data
Oct 11 02:05:16 compute-0 sudo[354337]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:16 compute-0 podman[354405]: 2025-10-11 02:05:16.735420542 +0000 UTC m=+0.090127672 container create 5ae64d91d674a727abe4da9f201cf4810f519030b4182bfa53bd955158629fe1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jolly_banzai, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:05:16 compute-0 podman[354405]: 2025-10-11 02:05:16.68784701 +0000 UTC m=+0.042554140 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:05:16 compute-0 systemd[1]: Started libpod-conmon-5ae64d91d674a727abe4da9f201cf4810f519030b4182bfa53bd955158629fe1.scope.
Oct 11 02:05:16 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:05:16 compute-0 podman[354405]: 2025-10-11 02:05:16.891535029 +0000 UTC m=+0.246242199 container init 5ae64d91d674a727abe4da9f201cf4810f519030b4182bfa53bd955158629fe1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jolly_banzai, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.39.3, ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:05:16 compute-0 podman[354405]: 2025-10-11 02:05:16.910429358 +0000 UTC m=+0.265136458 container start 5ae64d91d674a727abe4da9f201cf4810f519030b4182bfa53bd955158629fe1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jolly_banzai, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0)
Oct 11 02:05:16 compute-0 podman[354405]: 2025-10-11 02:05:16.915653294 +0000 UTC m=+0.270360474 container attach 5ae64d91d674a727abe4da9f201cf4810f519030b4182bfa53bd955158629fe1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jolly_banzai, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:05:16 compute-0 systemd[1]: libpod-5ae64d91d674a727abe4da9f201cf4810f519030b4182bfa53bd955158629fe1.scope: Deactivated successfully.
Oct 11 02:05:16 compute-0 jolly_banzai[354420]: 167 167
Oct 11 02:05:16 compute-0 conmon[354420]: conmon 5ae64d91d674a727abe4 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-5ae64d91d674a727abe4da9f201cf4810f519030b4182bfa53bd955158629fe1.scope/container/memory.events
Oct 11 02:05:16 compute-0 podman[354405]: 2025-10-11 02:05:16.926528898 +0000 UTC m=+0.281235998 container died 5ae64d91d674a727abe4da9f201cf4810f519030b4182bfa53bd955158629fe1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jolly_banzai, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:05:16 compute-0 systemd[1]: var-lib-containers-storage-overlay-5d7cab2214d824aee1e55fb22308ceda790ff09683528ff7f9c38bc24cefba42-merged.mount: Deactivated successfully.
Oct 11 02:05:16 compute-0 podman[354405]: 2025-10-11 02:05:16.999460568 +0000 UTC m=+0.354167668 container remove 5ae64d91d674a727abe4da9f201cf4810f519030b4182bfa53bd955158629fe1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jolly_banzai, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_REF=reef)
Oct 11 02:05:17 compute-0 systemd[1]: libpod-conmon-5ae64d91d674a727abe4da9f201cf4810f519030b4182bfa53bd955158629fe1.scope: Deactivated successfully.
Oct 11 02:05:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v813: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:17 compute-0 podman[354518]: 2025-10-11 02:05:17.303406865 +0000 UTC m=+0.110624388 container create bdea0813622a7346e750412db09036ffeab0a0554a0a16386d360a1e9c1f3b9f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_joliot, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:05:17 compute-0 podman[354518]: 2025-10-11 02:05:17.265893459 +0000 UTC m=+0.073110982 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:05:17 compute-0 systemd[1]: Started libpod-conmon-bdea0813622a7346e750412db09036ffeab0a0554a0a16386d360a1e9c1f3b9f.scope.
Oct 11 02:05:17 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:05:17 compute-0 sudo[354586]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lxzgkygtmsljhmckxpsjiaupdwgcxsxx ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760148316.8977144-1764-88855506214688/AnsiballZ_edpm_container_manage.py'
Oct 11 02:05:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5068e486ce8624ff53be3b99dc3860e46c69e3f06ad0b99c26ce918e9162a9b3/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5068e486ce8624ff53be3b99dc3860e46c69e3f06ad0b99c26ce918e9162a9b3/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5068e486ce8624ff53be3b99dc3860e46c69e3f06ad0b99c26ce918e9162a9b3/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5068e486ce8624ff53be3b99dc3860e46c69e3f06ad0b99c26ce918e9162a9b3/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5068e486ce8624ff53be3b99dc3860e46c69e3f06ad0b99c26ce918e9162a9b3/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:17 compute-0 sudo[354586]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:05:17 compute-0 podman[354518]: 2025-10-11 02:05:17.518930999 +0000 UTC m=+0.326148562 container init bdea0813622a7346e750412db09036ffeab0a0554a0a16386d360a1e9c1f3b9f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_joliot, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Oct 11 02:05:17 compute-0 podman[354518]: 2025-10-11 02:05:17.54459042 +0000 UTC m=+0.351807933 container start bdea0813622a7346e750412db09036ffeab0a0554a0a16386d360a1e9c1f3b9f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_joliot, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:05:17 compute-0 podman[354518]: 2025-10-11 02:05:17.553684376 +0000 UTC m=+0.360901899 container attach bdea0813622a7346e750412db09036ffeab0a0554a0a16386d360a1e9c1f3b9f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_joliot, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3)
Oct 11 02:05:17 compute-0 python3[354589]: ansible-edpm_container_manage Invoked with concurrency=1 config_dir=/var/lib/openstack/config/containers config_id=edpm config_overrides={} config_patterns=nova_compute.json log_base_path=/var/log/containers/stdouts debug=False
Oct 11 02:05:18 compute-0 podman[354623]: 2025-10-11 02:05:18.235414689 +0000 UTC m=+0.131010734 container create 33206644db1c18e3480ef44964f3a6a1eb2aea2f1be2a206e1475b915a3d4955 (image=quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified, name=nova_compute, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': True, 'user': 'nova', 'restart': 'always', 'command': 'kolla_start', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'volumes': ['/var/lib/openstack/config/nova:/var/lib/kolla/config_files:ro', '/var/lib/openstack/cacerts/nova/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/etc/localtime:/etc/localtime:ro', '/lib/modules:/lib/modules:ro', '/dev:/dev', '/var/lib/libvirt:/var/lib/libvirt', '/run/libvirt:/run/libvirt:shared', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/etc/iscsi:/etc/iscsi:ro', '/etc/nvme:/etc/nvme', '/var/lib/openstack/config/ceph:/var/lib/kolla/config_files/ceph:ro', '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro']}, io.buildah.version=1.41.3, managed_by=edpm_ansible, container_name=nova_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm)
Oct 11 02:05:18 compute-0 podman[354623]: 2025-10-11 02:05:18.165672783 +0000 UTC m=+0.061268868 image pull 95311272d2962a6b8537a6d19b94bc44c5c3621a6e21a2e983fd64d147646bc9 quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified
Oct 11 02:05:18 compute-0 python3[354589]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman create --name nova_compute --conmon-pidfile /run/nova_compute.pid --env KOLLA_CONFIG_STRATEGY=COPY_ALWAYS --label config_id=edpm --label container_name=nova_compute --label managed_by=edpm_ansible --label config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': True, 'user': 'nova', 'restart': 'always', 'command': 'kolla_start', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'volumes': ['/var/lib/openstack/config/nova:/var/lib/kolla/config_files:ro', '/var/lib/openstack/cacerts/nova/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/etc/localtime:/etc/localtime:ro', '/lib/modules:/lib/modules:ro', '/dev:/dev', '/var/lib/libvirt:/var/lib/libvirt', '/run/libvirt:/run/libvirt:shared', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/etc/iscsi:/etc/iscsi:ro', '/etc/nvme:/etc/nvme', '/var/lib/openstack/config/ceph:/var/lib/kolla/config_files/ceph:ro', '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro']} --log-driver journald --log-level info --network host --privileged=True --user nova --volume /var/lib/openstack/config/nova:/var/lib/kolla/config_files:ro --volume /var/lib/openstack/cacerts/nova/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z --volume /etc/localtime:/etc/localtime:ro --volume /lib/modules:/lib/modules:ro --volume /dev:/dev --volume /var/lib/libvirt:/var/lib/libvirt --volume /run/libvirt:/run/libvirt:shared --volume /var/lib/nova:/var/lib/nova:shared --volume /var/lib/iscsi:/var/lib/iscsi:z --volume /etc/multipath:/etc/multipath:z --volume /etc/multipath.conf:/etc/multipath.conf:ro --volume /etc/iscsi:/etc/iscsi:ro --volume /etc/nvme:/etc/nvme --volume /var/lib/openstack/config/ceph:/var/lib/kolla/config_files/ceph:ro --volume /etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified kolla_start
Oct 11 02:05:18 compute-0 ceph-mon[191930]: pgmap v813: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:18 compute-0 sudo[354586]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:18 compute-0 heuristic_joliot[354584]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:05:18 compute-0 heuristic_joliot[354584]: --> relative data size: 1.0
Oct 11 02:05:18 compute-0 heuristic_joliot[354584]: --> All data devices are unavailable
Oct 11 02:05:18 compute-0 systemd[1]: libpod-bdea0813622a7346e750412db09036ffeab0a0554a0a16386d360a1e9c1f3b9f.scope: Deactivated successfully.
Oct 11 02:05:18 compute-0 podman[354518]: 2025-10-11 02:05:18.831923513 +0000 UTC m=+1.639141036 container died bdea0813622a7346e750412db09036ffeab0a0554a0a16386d360a1e9c1f3b9f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_joliot, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 02:05:18 compute-0 systemd[1]: libpod-bdea0813622a7346e750412db09036ffeab0a0554a0a16386d360a1e9c1f3b9f.scope: Consumed 1.206s CPU time.
Oct 11 02:05:18 compute-0 systemd[1]: var-lib-containers-storage-overlay-5068e486ce8624ff53be3b99dc3860e46c69e3f06ad0b99c26ce918e9162a9b3-merged.mount: Deactivated successfully.
Oct 11 02:05:18 compute-0 podman[354518]: 2025-10-11 02:05:18.949404222 +0000 UTC m=+1.756621705 container remove bdea0813622a7346e750412db09036ffeab0a0554a0a16386d360a1e9c1f3b9f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_joliot, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, ceph=True)
Oct 11 02:05:18 compute-0 systemd[1]: libpod-conmon-bdea0813622a7346e750412db09036ffeab0a0554a0a16386d360a1e9c1f3b9f.scope: Deactivated successfully.
Oct 11 02:05:19 compute-0 sudo[354280]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:19 compute-0 sudo[354795]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:05:19 compute-0 sudo[354795]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:05:19 compute-0 sudo[354795]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v814: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:19 compute-0 sudo[354844]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:05:19 compute-0 sudo[354844]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:05:19 compute-0 sudo[354844]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:19 compute-0 sudo[354895]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qevybtpjszphudshonsryyomghdquadn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148318.803388-1772-20577961714906/AnsiballZ_stat.py'
Oct 11 02:05:19 compute-0 sudo[354895]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:05:19 compute-0 sudo[354896]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:05:19 compute-0 sudo[354896]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:05:19 compute-0 sudo[354896]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:19 compute-0 sudo[354923]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:05:19 compute-0 sudo[354923]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:05:19 compute-0 python3.9[354905]: ansible-ansible.builtin.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:05:19 compute-0 sudo[354895]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:05:20 compute-0 podman[355037]: 2025-10-11 02:05:20.129392932 +0000 UTC m=+0.075857152 container create 135489d380a20a4f753baa4dd7ad1c158222b5e0149f1834681356e74aaf4a98 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_beaver, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 02:05:20 compute-0 podman[355037]: 2025-10-11 02:05:20.100872031 +0000 UTC m=+0.047336261 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:05:20 compute-0 systemd[1]: Started libpod-conmon-135489d380a20a4f753baa4dd7ad1c158222b5e0149f1834681356e74aaf4a98.scope.
Oct 11 02:05:20 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:05:20 compute-0 podman[355037]: 2025-10-11 02:05:20.24672703 +0000 UTC m=+0.193191320 container init 135489d380a20a4f753baa4dd7ad1c158222b5e0149f1834681356e74aaf4a98 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_beaver, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 02:05:20 compute-0 podman[355037]: 2025-10-11 02:05:20.261620677 +0000 UTC m=+0.208084907 container start 135489d380a20a4f753baa4dd7ad1c158222b5e0149f1834681356e74aaf4a98 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_beaver, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:05:20 compute-0 podman[355037]: 2025-10-11 02:05:20.267768782 +0000 UTC m=+0.214233062 container attach 135489d380a20a4f753baa4dd7ad1c158222b5e0149f1834681356e74aaf4a98 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_beaver, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3)
Oct 11 02:05:20 compute-0 admiring_beaver[355082]: 167 167
Oct 11 02:05:20 compute-0 systemd[1]: libpod-135489d380a20a4f753baa4dd7ad1c158222b5e0149f1834681356e74aaf4a98.scope: Deactivated successfully.
Oct 11 02:05:20 compute-0 podman[355037]: 2025-10-11 02:05:20.271139357 +0000 UTC m=+0.217603587 container died 135489d380a20a4f753baa4dd7ad1c158222b5e0149f1834681356e74aaf4a98 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_beaver, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_REF=reef)
Oct 11 02:05:20 compute-0 systemd[1]: var-lib-containers-storage-overlay-84c426e5e797cf6b77a8738568e7049945e1db0025e48cd7447fbad0d0c173d8-merged.mount: Deactivated successfully.
Oct 11 02:05:20 compute-0 podman[355037]: 2025-10-11 02:05:20.340019524 +0000 UTC m=+0.286483744 container remove 135489d380a20a4f753baa4dd7ad1c158222b5e0149f1834681356e74aaf4a98 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_beaver, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:05:20 compute-0 systemd[1]: libpod-conmon-135489d380a20a4f753baa4dd7ad1c158222b5e0149f1834681356e74aaf4a98.scope: Deactivated successfully.
Oct 11 02:05:20 compute-0 ceph-mon[191930]: pgmap v814: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:20 compute-0 sudo[355189]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fwkgpdtjdlhjiljdanhafrsmnwwhwmjq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148320.0540583-1781-271359456446669/AnsiballZ_file.py'
Oct 11 02:05:20 compute-0 sudo[355189]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:05:20 compute-0 podman[355155]: 2025-10-11 02:05:20.616955076 +0000 UTC m=+0.090381955 container create 88fd6209d270d902729328d53e525610b80fa50f4dee2d76efd440dfd6b91806 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_bassi, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:05:20 compute-0 podman[355155]: 2025-10-11 02:05:20.580712074 +0000 UTC m=+0.054139023 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:05:20 compute-0 systemd[1]: Started libpod-conmon-88fd6209d270d902729328d53e525610b80fa50f4dee2d76efd440dfd6b91806.scope.
Oct 11 02:05:20 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:05:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/251d8f868ecb479059120bfe63d6f969563817efa4d6ddf1cfddb7595886faec/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/251d8f868ecb479059120bfe63d6f969563817efa4d6ddf1cfddb7595886faec/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/251d8f868ecb479059120bfe63d6f969563817efa4d6ddf1cfddb7595886faec/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/251d8f868ecb479059120bfe63d6f969563817efa4d6ddf1cfddb7595886faec/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:20 compute-0 podman[355155]: 2025-10-11 02:05:20.755780711 +0000 UTC m=+0.229207590 container init 88fd6209d270d902729328d53e525610b80fa50f4dee2d76efd440dfd6b91806 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_bassi, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 02:05:20 compute-0 podman[355155]: 2025-10-11 02:05:20.777591081 +0000 UTC m=+0.251017950 container start 88fd6209d270d902729328d53e525610b80fa50f4dee2d76efd440dfd6b91806 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_bassi, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True)
Oct 11 02:05:20 compute-0 podman[355155]: 2025-10-11 02:05:20.783983688 +0000 UTC m=+0.257410577 container attach 88fd6209d270d902729328d53e525610b80fa50f4dee2d76efd440dfd6b91806 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_bassi, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0)
Oct 11 02:05:20 compute-0 python3.9[355193]: ansible-file Invoked with path=/etc/systemd/system/edpm_nova_compute.requires state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:05:20 compute-0 sudo[355189]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v815: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:21 compute-0 laughing_bassi[355196]: {
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:     "0": [
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:         {
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "devices": [
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "/dev/loop3"
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             ],
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "lv_name": "ceph_lv0",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "lv_size": "21470642176",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "name": "ceph_lv0",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "tags": {
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.cluster_name": "ceph",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.crush_device_class": "",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.encrypted": "0",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.osd_id": "0",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.type": "block",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.vdo": "0"
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             },
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "type": "block",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "vg_name": "ceph_vg0"
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:         }
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:     ],
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:     "1": [
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:         {
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "devices": [
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "/dev/loop4"
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             ],
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "lv_name": "ceph_lv1",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "lv_size": "21470642176",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "name": "ceph_lv1",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "tags": {
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.cluster_name": "ceph",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.crush_device_class": "",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.encrypted": "0",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.osd_id": "1",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.type": "block",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.vdo": "0"
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             },
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "type": "block",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "vg_name": "ceph_vg1"
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:         }
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:     ],
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:     "2": [
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:         {
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "devices": [
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "/dev/loop5"
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             ],
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "lv_name": "ceph_lv2",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "lv_size": "21470642176",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "name": "ceph_lv2",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "tags": {
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.cluster_name": "ceph",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.crush_device_class": "",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.encrypted": "0",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.osd_id": "2",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.type": "block",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:                 "ceph.vdo": "0"
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             },
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "type": "block",
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:             "vg_name": "ceph_vg2"
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:         }
Oct 11 02:05:21 compute-0 laughing_bassi[355196]:     ]
Oct 11 02:05:21 compute-0 laughing_bassi[355196]: }
Oct 11 02:05:21 compute-0 systemd[1]: libpod-88fd6209d270d902729328d53e525610b80fa50f4dee2d76efd440dfd6b91806.scope: Deactivated successfully.
Oct 11 02:05:21 compute-0 podman[355155]: 2025-10-11 02:05:21.721124016 +0000 UTC m=+1.194550895 container died 88fd6209d270d902729328d53e525610b80fa50f4dee2d76efd440dfd6b91806 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_bassi, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507)
Oct 11 02:05:21 compute-0 systemd[1]: var-lib-containers-storage-overlay-251d8f868ecb479059120bfe63d6f969563817efa4d6ddf1cfddb7595886faec-merged.mount: Deactivated successfully.
Oct 11 02:05:21 compute-0 podman[355155]: 2025-10-11 02:05:21.836937948 +0000 UTC m=+1.310364817 container remove 88fd6209d270d902729328d53e525610b80fa50f4dee2d76efd440dfd6b91806 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_bassi, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:05:21 compute-0 systemd[1]: libpod-conmon-88fd6209d270d902729328d53e525610b80fa50f4dee2d76efd440dfd6b91806.scope: Deactivated successfully.
Oct 11 02:05:21 compute-0 sudo[355366]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cpmtzvvknrompauekuvnfhuhwzkihrxv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148321.0150142-1781-14180953696485/AnsiballZ_copy.py'
Oct 11 02:05:21 compute-0 sudo[354923]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:21 compute-0 sudo[355366]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:05:22 compute-0 sudo[355369]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:05:22 compute-0 sudo[355369]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:05:22 compute-0 sudo[355369]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:22 compute-0 python3.9[355368]: ansible-copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760148321.0150142-1781-14180953696485/source dest=/etc/systemd/system/edpm_nova_compute.service mode=0644 owner=root group=root backup=False force=True remote_src=False follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:05:22 compute-0 sudo[355394]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:05:22 compute-0 sudo[355394]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:05:22 compute-0 sudo[355394]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:22 compute-0 sudo[355366]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:22 compute-0 sudo[355419]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:05:22 compute-0 sudo[355419]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:05:22 compute-0 sudo[355419]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:22 compute-0 sudo[355467]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:05:22 compute-0 sudo[355467]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:05:22 compute-0 ceph-mon[191930]: pgmap v815: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:22 compute-0 sudo[355552]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eispcxfodvyfhrdarklphufrbegxmfmb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148321.0150142-1781-14180953696485/AnsiballZ_systemd.py'
Oct 11 02:05:22 compute-0 sudo[355552]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:05:22 compute-0 python3.9[355559]: ansible-systemd Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 02:05:23 compute-0 systemd[1]: Reloading.
Oct 11 02:05:23 compute-0 podman[355586]: 2025-10-11 02:05:23.076485597 +0000 UTC m=+0.112102614 container create 2a876b2dc9b2588749980438e99e51d8cab664998e1c38f7f6b0d099fa5a4f4a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_darwin, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_REF=reef)
Oct 11 02:05:23 compute-0 podman[355586]: 2025-10-11 02:05:23.034032259 +0000 UTC m=+0.069649336 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:05:23 compute-0 podman[355600]: 2025-10-11 02:05:23.149582638 +0000 UTC m=+0.106989200 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vendor=Red Hat, Inc., maintainer=Red Hat, Inc., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, build-date=2025-08-20T13:12:41, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, container_name=openstack_network_exporter, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, managed_by=edpm_ansible, name=ubi9-minimal, url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, version=9.6, com.redhat.component=ubi9-minimal-container, io.openshift.tags=minimal rhel9, distribution-scope=public, release=1755695350, vcs-type=git, config_id=edpm, io.buildah.version=1.33.7, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., architecture=x86_64, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b)
Oct 11 02:05:23 compute-0 podman[355598]: 2025-10-11 02:05:23.168842262 +0000 UTC m=+0.126416855 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:05:23 compute-0 systemd-sysv-generator[355666]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:05:23 compute-0 systemd-rc-local-generator[355663]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:05:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v816: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:23 compute-0 systemd[1]: Started libpod-conmon-2a876b2dc9b2588749980438e99e51d8cab664998e1c38f7f6b0d099fa5a4f4a.scope.
Oct 11 02:05:23 compute-0 sudo[355552]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:23 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:05:23 compute-0 podman[355586]: 2025-10-11 02:05:23.582623096 +0000 UTC m=+0.618240163 container init 2a876b2dc9b2588749980438e99e51d8cab664998e1c38f7f6b0d099fa5a4f4a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_darwin, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef)
Oct 11 02:05:23 compute-0 podman[355586]: 2025-10-11 02:05:23.605148174 +0000 UTC m=+0.640765191 container start 2a876b2dc9b2588749980438e99e51d8cab664998e1c38f7f6b0d099fa5a4f4a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_darwin, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:05:23 compute-0 podman[355586]: 2025-10-11 02:05:23.613169139 +0000 UTC m=+0.648786166 container attach 2a876b2dc9b2588749980438e99e51d8cab664998e1c38f7f6b0d099fa5a4f4a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_darwin, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.vendor=CentOS)
Oct 11 02:05:23 compute-0 silly_darwin[355676]: 167 167
Oct 11 02:05:23 compute-0 systemd[1]: libpod-2a876b2dc9b2588749980438e99e51d8cab664998e1c38f7f6b0d099fa5a4f4a.scope: Deactivated successfully.
Oct 11 02:05:23 compute-0 podman[355586]: 2025-10-11 02:05:23.620520266 +0000 UTC m=+0.656137313 container died 2a876b2dc9b2588749980438e99e51d8cab664998e1c38f7f6b0d099fa5a4f4a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_darwin, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_REF=reef)
Oct 11 02:05:23 compute-0 systemd[1]: var-lib-containers-storage-overlay-dd367dee0d03eb59f048170b81b043a8eae71b98ead7f843802626ccde0a7ada-merged.mount: Deactivated successfully.
Oct 11 02:05:23 compute-0 podman[355586]: 2025-10-11 02:05:23.707444753 +0000 UTC m=+0.743061770 container remove 2a876b2dc9b2588749980438e99e51d8cab664998e1c38f7f6b0d099fa5a4f4a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_darwin, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:05:23 compute-0 systemd[1]: libpod-conmon-2a876b2dc9b2588749980438e99e51d8cab664998e1c38f7f6b0d099fa5a4f4a.scope: Deactivated successfully.
Oct 11 02:05:24 compute-0 podman[355745]: 2025-10-11 02:05:24.028210578 +0000 UTC m=+0.092136343 container create e5edbcf01b97c9a7ce6af011a7b83ce95a1f5d5f1d755d569870ccef31924940 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jolly_mayer, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_REF=reef)
Oct 11 02:05:24 compute-0 podman[355745]: 2025-10-11 02:05:23.996518073 +0000 UTC m=+0.060443878 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:05:24 compute-0 sudo[355783]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jfqqqzzknccugaebnipwakauivnolrvx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148321.0150142-1781-14180953696485/AnsiballZ_systemd.py'
Oct 11 02:05:24 compute-0 sudo[355783]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:05:24 compute-0 systemd[1]: Started libpod-conmon-e5edbcf01b97c9a7ce6af011a7b83ce95a1f5d5f1d755d569870ccef31924940.scope.
Oct 11 02:05:24 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:05:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ed7962554fe41481adc37b052440c76d73e4c7e6a5720784bd7e00d6ba05cd4a/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ed7962554fe41481adc37b052440c76d73e4c7e6a5720784bd7e00d6ba05cd4a/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ed7962554fe41481adc37b052440c76d73e4c7e6a5720784bd7e00d6ba05cd4a/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ed7962554fe41481adc37b052440c76d73e4c7e6a5720784bd7e00d6ba05cd4a/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:24 compute-0 podman[355745]: 2025-10-11 02:05:24.21129585 +0000 UTC m=+0.275221675 container init e5edbcf01b97c9a7ce6af011a7b83ce95a1f5d5f1d755d569870ccef31924940 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jolly_mayer, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:05:24 compute-0 podman[355745]: 2025-10-11 02:05:24.236311734 +0000 UTC m=+0.300237529 container start e5edbcf01b97c9a7ce6af011a7b83ce95a1f5d5f1d755d569870ccef31924940 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jolly_mayer, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:05:24 compute-0 podman[355745]: 2025-10-11 02:05:24.24355622 +0000 UTC m=+0.307482025 container attach e5edbcf01b97c9a7ce6af011a7b83ce95a1f5d5f1d755d569870ccef31924940 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jolly_mayer, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True)
Oct 11 02:05:24 compute-0 ceph-mon[191930]: pgmap v816: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:24 compute-0 python3.9[355788]: ansible-systemd Invoked with state=restarted name=edpm_nova_compute.service enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:05:24 compute-0 systemd[1]: Reloading.
Oct 11 02:05:24 compute-0 systemd-rc-local-generator[355821]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:05:24 compute-0 systemd-sysv-generator[355826]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:05:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:05:25 compute-0 systemd[1]: Starting nova_compute container...
Oct 11 02:05:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v817: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:25 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:05:25 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d44f800e10782acb3d4d088c5d8deb67cdce0dbf2953c4eeea2e1749a4c9150e/merged/etc/nvme supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:25 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d44f800e10782acb3d4d088c5d8deb67cdce0dbf2953c4eeea2e1749a4c9150e/merged/etc/multipath supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:25 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d44f800e10782acb3d4d088c5d8deb67cdce0dbf2953c4eeea2e1749a4c9150e/merged/var/lib/libvirt supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:25 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d44f800e10782acb3d4d088c5d8deb67cdce0dbf2953c4eeea2e1749a4c9150e/merged/var/lib/nova supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:25 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d44f800e10782acb3d4d088c5d8deb67cdce0dbf2953c4eeea2e1749a4c9150e/merged/var/lib/iscsi supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:25 compute-0 podman[355844]: 2025-10-11 02:05:25.350158946 +0000 UTC m=+0.184097233 container init 33206644db1c18e3480ef44964f3a6a1eb2aea2f1be2a206e1475b915a3d4955 (image=quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified, name=nova_compute, org.label-schema.license=GPLv2, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': True, 'user': 'nova', 'restart': 'always', 'command': 'kolla_start', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'volumes': ['/var/lib/openstack/config/nova:/var/lib/kolla/config_files:ro', '/var/lib/openstack/cacerts/nova/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/etc/localtime:/etc/localtime:ro', '/lib/modules:/lib/modules:ro', '/dev:/dev', '/var/lib/libvirt:/var/lib/libvirt', '/run/libvirt:/run/libvirt:shared', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/etc/iscsi:/etc/iscsi:ro', '/etc/nvme:/etc/nvme', '/var/lib/openstack/config/ceph:/var/lib/kolla/config_files/ceph:ro', '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro']}, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, container_name=nova_compute, io.buildah.version=1.41.3)
Oct 11 02:05:25 compute-0 podman[355844]: 2025-10-11 02:05:25.374416832 +0000 UTC m=+0.208355099 container start 33206644db1c18e3480ef44964f3a6a1eb2aea2f1be2a206e1475b915a3d4955 (image=quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified, name=nova_compute, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': True, 'user': 'nova', 'restart': 'always', 'command': 'kolla_start', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'volumes': ['/var/lib/openstack/config/nova:/var/lib/kolla/config_files:ro', '/var/lib/openstack/cacerts/nova/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/etc/localtime:/etc/localtime:ro', '/lib/modules:/lib/modules:ro', '/dev:/dev', '/var/lib/libvirt:/var/lib/libvirt', '/run/libvirt:/run/libvirt:shared', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/etc/iscsi:/etc/iscsi:ro', '/etc/nvme:/etc/nvme', '/var/lib/openstack/config/ceph:/var/lib/kolla/config_files/ceph:ro', '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro']}, container_name=nova_compute, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, config_id=edpm)
Oct 11 02:05:25 compute-0 podman[355844]: nova_compute
Oct 11 02:05:25 compute-0 nova_compute[355868]: + sudo -E kolla_set_configs
Oct 11 02:05:25 compute-0 systemd[1]: Started nova_compute container.
Oct 11 02:05:25 compute-0 jolly_mayer[355789]: {
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:         "osd_id": 1,
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:         "type": "bluestore"
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:     },
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:         "osd_id": 2,
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:         "type": "bluestore"
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:     },
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:         "osd_id": 0,
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:         "type": "bluestore"
Oct 11 02:05:25 compute-0 jolly_mayer[355789]:     }
Oct 11 02:05:25 compute-0 jolly_mayer[355789]: }
Oct 11 02:05:25 compute-0 systemd[1]: libpod-e5edbcf01b97c9a7ce6af011a7b83ce95a1f5d5f1d755d569870ccef31924940.scope: Deactivated successfully.
Oct 11 02:05:25 compute-0 systemd[1]: libpod-e5edbcf01b97c9a7ce6af011a7b83ce95a1f5d5f1d755d569870ccef31924940.scope: Consumed 1.149s CPU time.
Oct 11 02:05:25 compute-0 podman[355745]: 2025-10-11 02:05:25.439193155 +0000 UTC m=+1.503118940 container died e5edbcf01b97c9a7ce6af011a7b83ce95a1f5d5f1d755d569870ccef31924940 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jolly_mayer, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:05:25 compute-0 sudo[355783]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Loading config file at /var/lib/kolla/config_files/config.json
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Validating config file
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Kolla config strategy set to: COPY_ALWAYS
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Copying service configuration files
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Deleting /etc/nova/nova.conf
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Copying /var/lib/kolla/config_files/nova-blank.conf to /etc/nova/nova.conf
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Setting permission for /etc/nova/nova.conf
Oct 11 02:05:25 compute-0 systemd[1]: var-lib-containers-storage-overlay-ed7962554fe41481adc37b052440c76d73e4c7e6a5720784bd7e00d6ba05cd4a-merged.mount: Deactivated successfully.
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Copying /var/lib/kolla/config_files/01-nova.conf to /etc/nova/nova.conf.d/01-nova.conf
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Setting permission for /etc/nova/nova.conf.d/01-nova.conf
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Copying /var/lib/kolla/config_files/03-ceph-nova.conf to /etc/nova/nova.conf.d/03-ceph-nova.conf
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Setting permission for /etc/nova/nova.conf.d/03-ceph-nova.conf
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Copying /var/lib/kolla/config_files/25-nova-extra.conf to /etc/nova/nova.conf.d/25-nova-extra.conf
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Setting permission for /etc/nova/nova.conf.d/25-nova-extra.conf
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Copying /var/lib/kolla/config_files/nova-blank.conf to /etc/nova/nova.conf.d/nova-blank.conf
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Setting permission for /etc/nova/nova.conf.d/nova-blank.conf
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Copying /var/lib/kolla/config_files/02-nova-host-specific.conf to /etc/nova/nova.conf.d/02-nova-host-specific.conf
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Setting permission for /etc/nova/nova.conf.d/02-nova-host-specific.conf
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Deleting /etc/ceph
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Creating directory /etc/ceph
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Setting permission for /etc/ceph
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Copying /var/lib/kolla/config_files/ceph/ceph.conf to /etc/ceph/ceph.conf
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Setting permission for /etc/ceph/ceph.conf
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Copying /var/lib/kolla/config_files/ceph/ceph.client.openstack.keyring to /etc/ceph/ceph.client.openstack.keyring
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Setting permission for /etc/ceph/ceph.client.openstack.keyring
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Copying /var/lib/kolla/config_files/ssh-privatekey to /var/lib/nova/.ssh/ssh-privatekey
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Setting permission for /var/lib/nova/.ssh/ssh-privatekey
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Copying /var/lib/kolla/config_files/ssh-config to /var/lib/nova/.ssh/config
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Setting permission for /var/lib/nova/.ssh/config
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Writing out command to execute
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Setting permission for /etc/ceph/ceph.conf
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Setting permission for /etc/ceph/ceph.client.openstack.keyring
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Setting permission for /var/lib/nova/.ssh/
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Setting permission for /var/lib/nova/.ssh/ssh-privatekey
Oct 11 02:05:25 compute-0 nova_compute[355868]: INFO:__main__:Setting permission for /var/lib/nova/.ssh/config
Oct 11 02:05:25 compute-0 nova_compute[355868]: ++ cat /run_command
Oct 11 02:05:25 compute-0 nova_compute[355868]: + CMD=nova-compute
Oct 11 02:05:25 compute-0 nova_compute[355868]: + ARGS=
Oct 11 02:05:25 compute-0 nova_compute[355868]: + sudo kolla_copy_cacerts
Oct 11 02:05:25 compute-0 podman[355745]: 2025-10-11 02:05:25.537984338 +0000 UTC m=+1.601910093 container remove e5edbcf01b97c9a7ce6af011a7b83ce95a1f5d5f1d755d569870ccef31924940 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jolly_mayer, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:05:25 compute-0 systemd[1]: libpod-conmon-e5edbcf01b97c9a7ce6af011a7b83ce95a1f5d5f1d755d569870ccef31924940.scope: Deactivated successfully.
Oct 11 02:05:25 compute-0 nova_compute[355868]: + [[ ! -n '' ]]
Oct 11 02:05:25 compute-0 nova_compute[355868]: + . kolla_extend_start
Oct 11 02:05:25 compute-0 nova_compute[355868]: Running command: 'nova-compute'
Oct 11 02:05:25 compute-0 nova_compute[355868]: + echo 'Running command: '\''nova-compute'\'''
Oct 11 02:05:25 compute-0 nova_compute[355868]: + umask 0022
Oct 11 02:05:25 compute-0 nova_compute[355868]: + exec nova-compute
Oct 11 02:05:25 compute-0 sudo[355467]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:05:25 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:05:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:05:25 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:05:25 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 2b75ee19-4cfe-4c48-9cf3-58097e906aa6 does not exist
Oct 11 02:05:25 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 86ef8f04-9c54-4bb9-a56b-3f05543de57f does not exist
Oct 11 02:05:25 compute-0 sudo[355925]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:05:25 compute-0 sudo[355925]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:05:25 compute-0 sudo[355925]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:25 compute-0 sudo[355950]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:05:25 compute-0 sudo[355950]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:05:25 compute-0 sudo[355950]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:05:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:05:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:05:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:05:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:05:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:05:26 compute-0 ceph-mon[191930]: pgmap v817: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:05:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:05:26 compute-0 python3.9[356101]: ansible-ansible.builtin.stat Invoked with path=/etc/systemd/system/edpm_nova_nvme_cleaner_healthcheck.service follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:05:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v818: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:27 compute-0 nova_compute[355868]: 2025-10-11 02:05:27.865 2 DEBUG os_vif [-] Loaded VIF plugin class '<class 'vif_plug_linux_bridge.linux_bridge.LinuxBridgePlugin'>' with name 'linux_bridge' initialize /usr/lib/python3.9/site-packages/os_vif/__init__.py:44
Oct 11 02:05:27 compute-0 nova_compute[355868]: 2025-10-11 02:05:27.866 2 DEBUG os_vif [-] Loaded VIF plugin class '<class 'vif_plug_noop.noop.NoOpPlugin'>' with name 'noop' initialize /usr/lib/python3.9/site-packages/os_vif/__init__.py:44
Oct 11 02:05:27 compute-0 nova_compute[355868]: 2025-10-11 02:05:27.866 2 DEBUG os_vif [-] Loaded VIF plugin class '<class 'vif_plug_ovs.ovs.OvsPlugin'>' with name 'ovs' initialize /usr/lib/python3.9/site-packages/os_vif/__init__.py:44
Oct 11 02:05:27 compute-0 nova_compute[355868]: 2025-10-11 02:05:27.867 2 INFO os_vif [-] Loaded VIF plugins: linux_bridge, noop, ovs
Oct 11 02:05:28 compute-0 nova_compute[355868]: 2025-10-11 02:05:28.053 2 DEBUG oslo_concurrency.processutils [-] Running cmd (subprocess): grep -F node.session.scan /sbin/iscsiadm execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:05:28 compute-0 nova_compute[355868]: 2025-10-11 02:05:28.094 2 DEBUG oslo_concurrency.processutils [-] CMD "grep -F node.session.scan /sbin/iscsiadm" returned: 0 in 0.041s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:05:28 compute-0 podman[356183]: 2025-10-11 02:05:28.29717985 +0000 UTC m=+0.182415186 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi)
Oct 11 02:05:28 compute-0 ceph-mon[191930]: pgmap v818: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:28 compute-0 python3.9[356274]: ansible-ansible.builtin.stat Invoked with path=/etc/systemd/system/edpm_nova_nvme_cleaner.service follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.000 2 INFO nova.virt.driver [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] Loading compute driver 'libvirt.LibvirtDriver'
Oct 11 02:05:29 compute-0 unix_chkpwd[356299]: password check failed for user (root)
Oct 11 02:05:29 compute-0 sshd-session[356180]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.176 2 INFO nova.compute.provider_config [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] No provider configs found in /etc/nova/provider_config/. If files are present, ensure the Nova process has access.
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.198 2 DEBUG oslo_concurrency.lockutils [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] Acquiring lock "singleton_lock" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.198 2 DEBUG oslo_concurrency.lockutils [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] Acquired lock "singleton_lock" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.198 2 DEBUG oslo_concurrency.lockutils [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] Releasing lock "singleton_lock" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.199 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] Full set of CONF: _wait_for_exit_or_signal /usr/lib/python3.9/site-packages/oslo_service/service.py:362
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.199 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2589
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.199 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] Configuration options gathered from: log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2590
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.199 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] command line args: [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2591
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.199 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] config files: ['/etc/nova/nova.conf', '/etc/nova/nova-compute.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2592
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.200 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ================================================================================ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2594
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.200 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] allow_resize_to_same_host      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.200 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] arq_binding_timeout            = 300 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.200 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] backdoor_port                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.200 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] backdoor_socket                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.201 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] block_device_allocate_retries  = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.201 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] block_device_allocate_retries_interval = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.201 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cert                           = self.pem log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.201 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] compute_driver                 = libvirt.LibvirtDriver log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.201 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] compute_monitors               = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.201 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] config_dir                     = ['/etc/nova/nova.conf.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.202 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] config_drive_format            = iso9660 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.202 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] config_file                    = ['/etc/nova/nova.conf', '/etc/nova/nova-compute.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.202 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] config_source                  = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.202 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] console_host                   = compute-0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.202 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] control_exchange               = nova log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.202 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cpu_allocation_ratio           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.202 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] daemon                         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.202 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] debug                          = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.203 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] default_access_ip_network_name = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.203 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] default_availability_zone      = nova log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.203 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] default_ephemeral_format       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.203 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] default_log_levels             = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'oslo_messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'requests.packages.urllib3.util.retry=WARN', 'urllib3.util.retry=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'taskflow=WARN', 'keystoneauth=WARN', 'oslo.cache=INFO', 'oslo_policy=INFO', 'dogpile.core.dogpile=INFO', 'glanceclient=WARN', 'oslo.privsep.daemon=INFO'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.203 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] default_schedule_zone          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.203 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] disk_allocation_ratio          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.204 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] enable_new_services            = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.204 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] enabled_apis                   = ['osapi_compute', 'metadata'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.204 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] enabled_ssl_apis               = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.204 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] flat_injected                  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.204 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] force_config_drive             = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.204 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] force_raw_images               = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.204 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] graceful_shutdown_timeout      = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.205 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] heal_instance_info_cache_interval = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.205 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] host                           = compute-0.ctlplane.example.com log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.205 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] initial_cpu_allocation_ratio   = 4.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.205 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] initial_disk_allocation_ratio  = 0.9 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.205 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] initial_ram_allocation_ratio   = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.205 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] injected_network_template      = /usr/lib/python3.9/site-packages/nova/virt/interfaces.template log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.206 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] instance_build_timeout         = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.206 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] instance_delete_interval       = 300 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.206 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] instance_format                = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.206 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] instance_name_template         = instance-%08x log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.206 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] instance_usage_audit           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.206 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] instance_usage_audit_period    = month log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.206 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] instance_uuid_format           = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.207 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] instances_path                 = /var/lib/nova/instances log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.207 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] internal_service_availability_zone = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.207 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] key                            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.207 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] live_migration_retry_count     = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.207 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] log_config_append              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.207 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] log_date_format                = %Y-%m-%d %H:%M:%S log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.208 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] log_dir                        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.208 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] log_file                       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.208 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] log_options                    = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.208 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] log_rotate_interval            = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.208 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] log_rotate_interval_type       = days log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.208 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] log_rotation_type              = size log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.208 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] logging_context_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.209 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] logging_debug_format_suffix    = %(funcName)s %(pathname)s:%(lineno)d log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.209 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] logging_default_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.209 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] logging_exception_prefix       = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.209 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] logging_user_identity_format   = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.209 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] long_rpc_timeout               = 1800 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.209 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] max_concurrent_builds          = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.210 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] max_concurrent_live_migrations = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.210 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] max_concurrent_snapshots       = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.210 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] max_local_block_devices        = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.210 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] max_logfile_count              = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.210 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] max_logfile_size_mb            = 20 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.210 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] maximum_instance_delete_attempts = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.211 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] metadata_listen                = 0.0.0.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.211 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] metadata_listen_port           = 8775 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.211 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] metadata_workers               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.211 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] migrate_max_retries            = -1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.211 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] mkisofs_cmd                    = /usr/bin/mkisofs log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.211 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] my_block_storage_ip            = 192.168.122.100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.211 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] my_ip                          = 192.168.122.100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.212 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] network_allocate_retries       = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.212 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] non_inheritable_image_properties = ['cache_in_nova', 'bittorrent'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.212 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] osapi_compute_listen           = 0.0.0.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.212 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] osapi_compute_listen_port      = 8774 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.212 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] osapi_compute_unique_server_name_scope =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.212 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] osapi_compute_workers          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.212 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] password_length                = 12 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.213 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] periodic_enable                = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.213 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] periodic_fuzzy_delay           = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.213 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] pointer_model                  = usbtablet log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.213 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] preallocate_images             = none log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.213 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] publish_errors                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.213 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] pybasedir                      = /usr/lib/python3.9/site-packages log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.213 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ram_allocation_ratio           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.214 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] rate_limit_burst               = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.214 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] rate_limit_except_level        = CRITICAL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.214 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] rate_limit_interval            = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.214 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] reboot_timeout                 = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.214 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] reclaim_instance_interval      = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.214 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] record                         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.214 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] reimage_timeout_per_gb         = 20 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.215 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] report_interval                = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.215 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] rescue_timeout                 = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.215 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] reserved_host_cpus             = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.215 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] reserved_host_disk_mb          = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.215 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] reserved_host_memory_mb        = 512 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.215 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] reserved_huge_pages            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.216 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] resize_confirm_window          = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.216 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] resize_fs_using_block_device   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.216 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] resume_guests_state_on_host_boot = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.216 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] rootwrap_config                = /etc/nova/rootwrap.conf log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.216 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] rpc_response_timeout           = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.216 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] run_external_periodic_tasks    = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.216 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] running_deleted_instance_action = reap log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.217 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] running_deleted_instance_poll_interval = 1800 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.217 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] running_deleted_instance_timeout = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.217 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] scheduler_instance_sync_interval = 120 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.217 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] service_down_time              = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.217 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] servicegroup_driver            = db log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.217 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] shelved_offload_time           = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.218 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] shelved_poll_interval          = 3600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.218 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] shutdown_timeout               = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.218 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] source_is_ipv6                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.218 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ssl_only                       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.218 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] state_path                     = /var/lib/nova log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.218 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] sync_power_state_interval      = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.218 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] sync_power_state_pool_size     = 1000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.218 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] syslog_log_facility            = LOG_USER log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.219 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] tempdir                        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.219 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] timeout_nbd                    = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.219 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] transport_url                  = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.219 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] update_resources_interval      = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.219 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] use_cow_images                 = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.219 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] use_eventlog                   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.219 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] use_journal                    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.220 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] use_json                       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.220 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] use_rootwrap_daemon            = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.220 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] use_stderr                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.220 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] use_syslog                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.220 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vcpu_pin_set                   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.220 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vif_plugging_is_fatal          = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.220 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vif_plugging_timeout           = 300 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.221 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] virt_mkfs                      = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.221 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] volume_usage_poll_interval     = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.221 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] watch_log_file                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.221 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] web                            = /usr/share/spice-html5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.221 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_concurrency.disable_process_locking = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.221 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_concurrency.lock_path     = /var/lib/nova/tmp log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.222 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_metrics.metrics_buffer_size = 1000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.222 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_metrics.metrics_enabled = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.222 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_metrics.metrics_process_name =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.222 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_metrics.metrics_socket_file = /var/tmp/metrics_collector.sock log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.222 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_metrics.metrics_thread_stop_timeout = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.222 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.auth_strategy              = keystone log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.223 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.compute_link_prefix        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.223 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.config_drive_skip_versions = 1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.223 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.dhcp_domain                =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.223 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.enable_instance_password   = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.223 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.glance_link_prefix         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.224 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.instance_list_cells_batch_fixed_size = 100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.224 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.instance_list_cells_batch_strategy = distributed log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.224 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.instance_list_per_project_cells = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.224 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.list_records_by_skipping_down_cells = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.224 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.local_metadata_per_cell    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.224 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.max_limit                  = 1000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.224 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.metadata_cache_expiration  = 15 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.225 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.neutron_default_tenant_id  = default log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.225 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.use_forwarded_for          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.225 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.use_neutron_default_nets   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.225 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.vendordata_dynamic_connect_timeout = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.225 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.vendordata_dynamic_failure_fatal = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.225 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.vendordata_dynamic_read_timeout = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.225 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.vendordata_dynamic_ssl_certfile =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.226 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.vendordata_dynamic_targets = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.226 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.vendordata_jsonfile_path   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.226 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api.vendordata_providers       = ['StaticJSON'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.226 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.backend                  = oslo_cache.dict log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.226 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.backend_argument         = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.226 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.config_prefix            = cache.oslo log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.227 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.dead_timeout             = 60.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.227 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.debug_cache_backend      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.227 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.enable_retry_client      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.227 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.enable_socket_keepalive  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.227 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.enabled                  = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.227 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.expiration_time          = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.227 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.hashclient_retry_attempts = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.228 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.hashclient_retry_delay   = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.228 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.memcache_dead_retry      = 300 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.228 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.memcache_password        =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.228 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.memcache_pool_connection_get_timeout = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.228 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.memcache_pool_flush_on_reconnect = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.228 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.memcache_pool_maxsize    = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.228 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.memcache_pool_unused_timeout = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.229 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.memcache_sasl_enabled    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.229 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.memcache_servers         = ['localhost:11211'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.229 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.memcache_socket_timeout  = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.229 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.memcache_username        =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.229 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.proxies                  = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.229 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.retry_attempts           = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.229 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.retry_delay              = 0.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.230 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.socket_keepalive_count   = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.230 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.socket_keepalive_idle    = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.230 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.socket_keepalive_interval = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.230 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.tls_allowed_ciphers      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.230 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.tls_cafile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.230 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.tls_certfile             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.230 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.tls_enabled              = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.231 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cache.tls_keyfile              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.231 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cinder.auth_section            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.231 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cinder.auth_type               = password log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.231 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cinder.cafile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.231 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cinder.catalog_info            = volumev3:cinderv3:internalURL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.231 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cinder.certfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.232 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cinder.collect_timing          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.232 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cinder.cross_az_attach         = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.232 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cinder.debug                   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.232 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cinder.endpoint_template       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.232 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cinder.http_retries            = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.232 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cinder.insecure                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.232 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cinder.keyfile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.233 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cinder.os_region_name          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.233 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cinder.split_loggers           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.233 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cinder.timeout                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.233 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] compute.consecutive_build_service_disable_threshold = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.233 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] compute.cpu_dedicated_set      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.233 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] compute.cpu_shared_set         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.234 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] compute.image_type_exclude_list = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.234 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] compute.live_migration_wait_for_vif_plug = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.234 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] compute.max_concurrent_disk_ops = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.234 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] compute.max_disk_devices_to_attach = -1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.234 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] compute.packing_host_numa_cells_allocation_strategy = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.234 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] compute.provider_config_location = /etc/nova/provider_config/ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.234 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] compute.resource_provider_association_refresh = 300 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.235 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] compute.shutdown_retry_interval = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.235 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] compute.vmdk_allowed_types     = ['streamOptimized', 'monolithicSparse'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.235 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] conductor.workers              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.235 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] console.allowed_origins        = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.235 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] console.ssl_ciphers            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.235 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] console.ssl_minimum_version    = default log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.235 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] consoleauth.token_ttl          = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.235 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cyborg.cafile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.236 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cyborg.certfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.236 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cyborg.collect_timing          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.236 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cyborg.connect_retries         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.236 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cyborg.connect_retry_delay     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.236 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cyborg.endpoint_override       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.236 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cyborg.insecure                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.236 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cyborg.keyfile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.237 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cyborg.max_version             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.237 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cyborg.min_version             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.237 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cyborg.region_name             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.237 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cyborg.service_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.237 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cyborg.service_type            = accelerator log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.237 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cyborg.split_loggers           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.238 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cyborg.status_code_retries     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.238 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cyborg.status_code_retry_delay = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.238 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cyborg.timeout                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.238 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cyborg.valid_interfaces        = ['internal', 'public'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.238 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] cyborg.version                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.238 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.backend               = sqlalchemy log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.239 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.connection            = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.239 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.connection_debug      = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.239 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.connection_parameters =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.239 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.connection_recycle_time = 3600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.239 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.connection_trace      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.239 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.db_inc_retry_interval = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.239 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.db_max_retries        = 20 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.240 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.db_max_retry_interval = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.240 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.db_retry_interval     = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.240 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.max_overflow          = 50 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.240 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.max_pool_size         = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.240 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.max_retries           = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.240 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.mysql_enable_ndb      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.240 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.mysql_sql_mode        = TRADITIONAL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.241 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.mysql_wsrep_sync_wait = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.241 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.pool_timeout          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.241 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.retry_interval        = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.241 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.slave_connection      = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.241 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] database.sqlite_synchronous    = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.241 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.backend           = sqlalchemy log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.242 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.connection        = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.242 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.connection_debug  = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.242 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.connection_parameters =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.242 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.connection_recycle_time = 3600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.242 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.connection_trace  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.242 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.db_inc_retry_interval = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.242 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.db_max_retries    = 20 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.243 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.db_max_retry_interval = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.243 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.db_retry_interval = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.243 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.max_overflow      = 50 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.243 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.max_pool_size     = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.243 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.max_retries       = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.243 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.mysql_enable_ndb  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.244 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.mysql_sql_mode    = TRADITIONAL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.244 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.mysql_wsrep_sync_wait = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.244 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.pool_timeout      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.244 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.retry_interval    = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.244 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.slave_connection  = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.244 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] api_database.sqlite_synchronous = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.245 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] devices.enabled_mdev_types     = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.245 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ephemeral_storage_encryption.cipher = aes-xts-plain64 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.245 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ephemeral_storage_encryption.enabled = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.245 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ephemeral_storage_encryption.key_size = 512 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.245 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.api_servers             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.245 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.cafile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.246 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.certfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.246 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.collect_timing          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.246 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.connect_retries         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.246 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.connect_retry_delay     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.246 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.debug                   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.246 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.default_trusted_certificate_ids = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.247 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.enable_certificate_validation = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.247 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.enable_rbd_download     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.247 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.endpoint_override       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.247 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.insecure                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.247 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.keyfile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.247 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.max_version             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.247 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.min_version             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.248 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.num_retries             = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.248 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.rbd_ceph_conf           =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.248 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.rbd_connect_timeout     = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.248 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.rbd_pool                =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.248 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.rbd_user                =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.248 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.region_name             = regionOne log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.249 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.service_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.249 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.service_type            = image log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.249 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.split_loggers           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.249 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.status_code_retries     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.249 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.status_code_retry_delay = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.249 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.timeout                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.249 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.valid_interfaces        = ['internal'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.250 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.verify_glance_signatures = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.250 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] glance.version                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.250 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] guestfs.debug                  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.250 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] hyperv.config_drive_cdrom      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.250 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] hyperv.config_drive_inject_password = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.250 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] hyperv.dynamic_memory_ratio    = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.251 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] hyperv.enable_instance_metrics_collection = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.251 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] hyperv.enable_remotefx         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.251 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] hyperv.instances_path_share    =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.251 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] hyperv.iscsi_initiator_list    = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.251 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] hyperv.limit_cpu_features      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.251 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] hyperv.mounted_disk_query_retry_count = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.251 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] hyperv.mounted_disk_query_retry_interval = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.252 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] hyperv.power_state_check_timeframe = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.252 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] hyperv.power_state_event_polling_interval = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.252 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] hyperv.qemu_img_cmd            = qemu-img.exe log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.252 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] hyperv.use_multipath_io        = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.252 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] hyperv.volume_attach_retry_count = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.252 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] hyperv.volume_attach_retry_interval = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.253 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] hyperv.vswitch_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.253 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] hyperv.wait_soft_reboot_seconds = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.253 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] mks.enabled                    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.253 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] mks.mksproxy_base_url          = http://127.0.0.1:6090/ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.253 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] image_cache.manager_interval   = 2400 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.254 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] image_cache.precache_concurrency = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.254 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] image_cache.remove_unused_base_images = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.254 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] image_cache.remove_unused_original_minimum_age_seconds = 86400 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.254 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] image_cache.remove_unused_resized_minimum_age_seconds = 3600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.255 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] image_cache.subdirectory_name  = _base log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.255 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.api_max_retries         = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.255 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.api_retry_interval      = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.255 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.auth_section            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.255 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.auth_type               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.256 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.cafile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.256 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.certfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.256 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.collect_timing          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.256 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.connect_retries         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.256 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.connect_retry_delay     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.256 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.endpoint_override       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.256 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.insecure                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.257 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.keyfile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.257 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.max_version             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.257 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.min_version             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.257 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.partition_key           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.257 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.peer_list               = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.257 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.region_name             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.257 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.serial_console_state_timeout = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.258 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.service_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.258 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.service_type            = baremetal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.258 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.split_loggers           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.258 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.status_code_retries     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.258 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.status_code_retry_delay = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.258 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.timeout                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.259 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.valid_interfaces        = ['internal', 'public'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.259 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ironic.version                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.259 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] key_manager.backend            = barbican log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.259 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] key_manager.fixed_key          = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.259 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican.auth_endpoint         = http://localhost/identity/v3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.259 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican.barbican_api_version  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.260 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican.barbican_endpoint     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.260 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican.barbican_endpoint_type = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.260 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican.barbican_region_name  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.260 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican.cafile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.260 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican.certfile              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.261 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican.collect_timing        = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.261 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican.insecure              = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.261 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican.keyfile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.261 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican.number_of_retries     = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.261 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican.retry_delay           = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.262 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican.send_service_user_token = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.262 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican.split_loggers         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.262 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican.timeout               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.262 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican.verify_ssl            = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.262 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican.verify_ssl_path       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.263 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican_service_user.auth_section = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.263 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican_service_user.auth_type = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.263 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican_service_user.cafile   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.263 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican_service_user.certfile = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.263 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican_service_user.collect_timing = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.264 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican_service_user.insecure = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.264 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican_service_user.keyfile  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.264 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican_service_user.split_loggers = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.264 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] barbican_service_user.timeout  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.264 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vault.approle_role_id          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.264 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vault.approle_secret_id        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.265 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vault.cafile                   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.265 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vault.certfile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.265 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vault.collect_timing           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.265 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vault.insecure                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.266 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vault.keyfile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.266 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vault.kv_mountpoint            = secret log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.266 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vault.kv_version               = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.266 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vault.namespace                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.266 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vault.root_token_id            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.267 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vault.split_loggers            = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.267 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vault.ssl_ca_crt_file          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.267 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vault.timeout                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.267 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vault.use_ssl                  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.267 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vault.vault_url                = http://127.0.0.1:8200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.267 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] keystone.cafile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.268 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] keystone.certfile              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.268 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] keystone.collect_timing        = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.268 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] keystone.connect_retries       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.268 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] keystone.connect_retry_delay   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.268 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] keystone.endpoint_override     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.268 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] keystone.insecure              = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.268 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] keystone.keyfile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.269 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] keystone.max_version           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.269 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] keystone.min_version           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.269 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] keystone.region_name           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.269 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] keystone.service_name          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.269 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] keystone.service_type          = identity log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.269 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] keystone.split_loggers         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.270 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] keystone.status_code_retries   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.270 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] keystone.status_code_retry_delay = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.270 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] keystone.timeout               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.270 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] keystone.valid_interfaces      = ['internal', 'public'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.270 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] keystone.version               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.270 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.connection_uri         =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.271 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.cpu_mode               = host-model log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.271 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.cpu_model_extra_flags  = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.271 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.cpu_models             = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.271 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.cpu_power_governor_high = performance log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.271 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.cpu_power_governor_low = powersave log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.271 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.cpu_power_management   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.272 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.cpu_power_management_strategy = cpu_state log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.272 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.device_detach_attempts = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.272 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.device_detach_timeout  = 20 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.272 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.disk_cachemodes        = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.272 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.disk_prefix            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.272 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.enabled_perf_events    = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.272 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.file_backed_memory     = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.273 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.gid_maps               = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.273 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.hw_disk_discard        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.273 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.hw_machine_type        = ['x86_64=q35'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.273 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.images_rbd_ceph_conf   = /etc/ceph/ceph.conf log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.273 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.images_rbd_glance_copy_poll_interval = 15 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.273 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.images_rbd_glance_copy_timeout = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.274 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.images_rbd_glance_store_name = default_backend log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.274 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.images_rbd_pool        = vms log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.274 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.images_type            = rbd log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.274 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.images_volume_group    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.274 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.inject_key             = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.274 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.inject_partition       = -2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.274 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.inject_password        = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.275 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.iscsi_iface            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.275 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.iser_use_multipath     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.275 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.live_migration_bandwidth = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.275 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.live_migration_completion_timeout = 800 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.275 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.live_migration_downtime = 500 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.275 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.live_migration_downtime_delay = 75 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.276 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.live_migration_downtime_steps = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.276 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.live_migration_inbound_addr = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.276 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.live_migration_permit_auto_converge = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.276 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.live_migration_permit_post_copy = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.276 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.live_migration_scheme  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.276 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.live_migration_timeout_action = force_complete log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.277 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.live_migration_tunnelled = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.277 2 WARNING oslo_config.cfg [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] Deprecated: Option "live_migration_uri" from group "libvirt" is deprecated for removal (
Oct 11 02:05:29 compute-0 nova_compute[355868]: live_migration_uri is deprecated for removal in favor of two other options that
Oct 11 02:05:29 compute-0 nova_compute[355868]: allow to change live migration scheme and target URI: ``live_migration_scheme``
Oct 11 02:05:29 compute-0 nova_compute[355868]: and ``live_migration_inbound_addr`` respectively.
Oct 11 02:05:29 compute-0 nova_compute[355868]: ).  Its value may be silently ignored in the future.
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.277 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.live_migration_uri     = qemu+tls://%s/system log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.277 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.live_migration_with_native_tls = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.277 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.max_queues             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.277 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.mem_stats_period_seconds = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.278 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.nfs_mount_options      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.278 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.nfs_mount_point_base   = /var/lib/nova/mnt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.278 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.num_aoe_discover_tries = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.278 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.num_iser_scan_tries    = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.278 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.num_memory_encrypted_guests = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.278 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.num_nvme_discover_tries = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.279 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.num_pcie_ports         = 24 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.279 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.num_volume_scan_tries  = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.279 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.pmem_namespaces        = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.279 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.quobyte_client_cfg     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.279 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.quobyte_mount_point_base = /var/lib/nova/mnt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.279 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.rbd_connect_timeout    = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.280 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.rbd_destroy_volume_retries = 12 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.280 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.rbd_destroy_volume_retry_interval = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.280 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.rbd_secret_uuid        = 3c7617c3-7a20-523e-a9de-20c0d6ba41da log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.280 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.rbd_user               = openstack log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.280 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.realtime_scheduler_priority = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.280 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.remote_filesystem_transport = ssh log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.280 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.rescue_image_id        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.281 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.rescue_kernel_id       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.281 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.rescue_ramdisk_id      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.281 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.rng_dev_path           = /dev/urandom log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.281 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.rx_queue_size          = 512 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.281 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.smbfs_mount_options    =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.281 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.smbfs_mount_point_base = /var/lib/nova/mnt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.281 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.snapshot_compression   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.282 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.snapshot_image_format  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.282 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.snapshots_directory    = /var/lib/nova/instances/snapshots log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.282 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.sparse_logical_volumes = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.282 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.swtpm_enabled          = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.282 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.swtpm_group            = tss log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.282 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.swtpm_user             = tss log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.283 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.sysinfo_serial         = unique log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.283 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.tx_queue_size          = 512 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.283 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.uid_maps               = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.283 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.use_virtio_for_bridges = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.283 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.virt_type              = kvm log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.283 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.volume_clear           = zero log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.284 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.volume_clear_size      = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.284 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.volume_use_multipath   = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.284 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.vzstorage_cache_path   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.284 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.vzstorage_log_path     = /var/log/vstorage/%(cluster_name)s/nova.log.gz log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.284 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.vzstorage_mount_group  = qemu log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.284 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.vzstorage_mount_opts   = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.284 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.vzstorage_mount_perms  = 0770 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.285 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.vzstorage_mount_point_base = /var/lib/nova/mnt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.285 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.vzstorage_mount_user   = stack log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.285 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] libvirt.wait_soft_reboot_seconds = 120 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.285 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.auth_section           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.285 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.auth_type              = password log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.285 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.cafile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.286 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.certfile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.286 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.collect_timing         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.286 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.connect_retries        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.286 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.connect_retry_delay    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.286 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.default_floating_pool  = nova log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.286 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.endpoint_override      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.286 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.extension_sync_interval = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.287 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.http_retries           = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.287 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.insecure               = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.287 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.keyfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.287 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.max_version            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.287 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.metadata_proxy_shared_secret = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.287 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.min_version            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.288 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.ovs_bridge             = br-int log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.288 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.physnets               = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.288 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.region_name            = regionOne log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v819: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.288 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.service_metadata_proxy = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.288 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.service_name           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.288 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.service_type           = network log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.288 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.split_loggers          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.289 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.status_code_retries    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.289 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.status_code_retry_delay = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.289 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.timeout                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.289 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.valid_interfaces       = ['internal'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.289 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] neutron.version                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.290 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] notifications.bdms_in_notifications = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.290 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] notifications.default_level    = INFO log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.290 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] notifications.notification_format = unversioned log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.290 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] notifications.notify_on_state_change = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.290 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] notifications.versioned_notifications_topics = ['versioned_notifications'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.291 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] pci.alias                      = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.291 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] pci.device_spec                = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.291 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] pci.report_in_placement        = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.291 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.auth_section         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.291 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.auth_type            = password log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.291 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.auth_url             = https://keystone-internal.openstack.svc:5000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.292 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.cafile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.292 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.certfile             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.292 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.collect_timing       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.292 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.connect_retries      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.292 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.connect_retry_delay  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.292 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.default_domain_id    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.293 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.default_domain_name  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.293 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.domain_id            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.293 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.domain_name          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.293 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.endpoint_override    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.293 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.insecure             = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.293 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.keyfile              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.293 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.max_version          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.294 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.min_version          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.294 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.password             = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.294 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.project_domain_id    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.294 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.project_domain_name  = Default log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.294 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.project_id           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.294 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.project_name         = service log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.295 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.region_name          = regionOne log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.295 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.service_name         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.295 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.service_type         = placement log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.295 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.split_loggers        = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.295 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.status_code_retries  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.295 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.status_code_retry_delay = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.295 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.system_scope         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.296 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.timeout              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.296 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.trust_id             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.296 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.user_domain_id       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.296 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.user_domain_name     = Default log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.296 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.user_id              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.296 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.username             = nova log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.296 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.valid_interfaces     = ['internal'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.297 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] placement.version              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.297 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] quota.cores                    = 20 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.297 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] quota.count_usage_from_placement = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.297 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] quota.driver                   = nova.quota.DbQuotaDriver log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.297 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] quota.injected_file_content_bytes = 10240 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.297 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] quota.injected_file_path_length = 255 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.298 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] quota.injected_files           = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.298 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] quota.instances                = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.298 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] quota.key_pairs                = 100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.298 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] quota.metadata_items           = 128 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.298 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] quota.ram                      = 51200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.298 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] quota.recheck_quota            = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.298 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] quota.server_group_members     = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.299 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] quota.server_groups            = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.299 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] rdp.enabled                    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.299 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] rdp.html5_proxy_base_url       = http://127.0.0.1:6083/ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.299 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] scheduler.discover_hosts_in_cells_interval = -1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.299 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] scheduler.enable_isolated_aggregate_filtering = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.299 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] scheduler.image_metadata_prefilter = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.300 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] scheduler.limit_tenants_to_placement_aggregate = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.300 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] scheduler.max_attempts         = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.300 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] scheduler.max_placement_results = 1000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.300 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] scheduler.placement_aggregate_required_for_tenants = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.300 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] scheduler.query_placement_for_availability_zone = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.300 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] scheduler.query_placement_for_image_type_support = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.301 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] scheduler.query_placement_for_routed_network_aggregates = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.301 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] scheduler.workers              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.301 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.aggregate_image_properties_isolation_namespace = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.301 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.aggregate_image_properties_isolation_separator = . log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.301 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.available_filters = ['nova.scheduler.filters.all_filters'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.301 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.build_failure_weight_multiplier = 1000000.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.301 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.cpu_weight_multiplier = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.302 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.cross_cell_move_weight_multiplier = 1000000.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.302 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.disk_weight_multiplier = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.302 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.enabled_filters = ['ComputeFilter', 'ComputeCapabilitiesFilter', 'ImagePropertiesFilter', 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.302 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.host_subset_size = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.302 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.image_properties_default_architecture = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.302 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.io_ops_weight_multiplier = -1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.303 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.isolated_hosts = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.303 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.isolated_images = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.303 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.max_instances_per_host = 50 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.303 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.max_io_ops_per_host = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.303 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.pci_in_placement = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.303 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.pci_weight_multiplier = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.303 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.ram_weight_multiplier = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.304 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.restrict_isolated_hosts_to_isolated_images = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.304 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.shuffle_best_same_weighed_hosts = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.304 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.soft_affinity_weight_multiplier = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.304 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.soft_anti_affinity_weight_multiplier = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.304 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.track_instance_changes = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.304 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] filter_scheduler.weight_classes = ['nova.scheduler.weights.all_weighers'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.304 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] metrics.required               = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.305 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] metrics.weight_multiplier      = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.305 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] metrics.weight_of_unavailable  = -10000.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.305 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] metrics.weight_setting         = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.305 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] serial_console.base_url        = ws://127.0.0.1:6083/ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.305 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] serial_console.enabled         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.306 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] serial_console.port_range      = 10000:20000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.306 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] serial_console.proxyclient_address = 127.0.0.1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.306 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] serial_console.serialproxy_host = 0.0.0.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.306 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] serial_console.serialproxy_port = 6083 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.306 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] service_user.auth_section      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.306 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] service_user.auth_type         = password log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.306 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] service_user.cafile            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.307 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] service_user.certfile          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.307 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] service_user.collect_timing    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.307 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] service_user.insecure          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.307 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] service_user.keyfile           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.307 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] service_user.send_service_user_token = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.307 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] service_user.split_loggers     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.307 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] service_user.timeout           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.308 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] spice.agent_enabled            = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.308 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] spice.enabled                  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.308 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] spice.html5proxy_base_url      = http://127.0.0.1:6082/spice_auto.html log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.308 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] spice.html5proxy_host          = 0.0.0.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.308 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] spice.html5proxy_port          = 6082 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.308 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] spice.image_compression        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.309 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] spice.jpeg_compression         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.309 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] spice.playback_compression     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.309 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] spice.server_listen            = 127.0.0.1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.309 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] spice.server_proxyclient_address = 127.0.0.1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.309 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] spice.streaming_mode           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.309 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] spice.zlib_compression         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.310 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] upgrade_levels.baseapi         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.310 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] upgrade_levels.cert            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.310 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] upgrade_levels.compute         = auto log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.310 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] upgrade_levels.conductor       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.310 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] upgrade_levels.scheduler       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.310 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vendordata_dynamic_auth.auth_section = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.311 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vendordata_dynamic_auth.auth_type = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.311 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vendordata_dynamic_auth.cafile = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.311 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vendordata_dynamic_auth.certfile = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.311 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vendordata_dynamic_auth.collect_timing = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.311 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vendordata_dynamic_auth.insecure = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.311 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vendordata_dynamic_auth.keyfile = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.311 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vendordata_dynamic_auth.split_loggers = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.312 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vendordata_dynamic_auth.timeout = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.312 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.api_retry_count         = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.312 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.ca_file                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.312 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.cache_prefix            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.312 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.cluster_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.313 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.connection_pool_size    = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.313 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.console_delay_seconds   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.313 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.datastore_regex         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.313 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.host_ip                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.313 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.host_password           = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.314 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.host_port               = 443 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.314 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.host_username           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.314 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.insecure                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.314 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.integration_bridge      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.314 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.maximum_objects         = 100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.314 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.pbm_default_policy      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.315 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.pbm_enabled             = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.315 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.pbm_wsdl_location       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.315 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.serial_log_dir          = /opt/vmware/vspc log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.315 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.serial_port_proxy_uri   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.315 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.serial_port_service_uri = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.316 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.task_poll_interval      = 0.5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.316 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.use_linked_clone        = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.316 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.vnc_keymap              = en-us log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.316 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.vnc_port                = 5900 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.316 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vmware.vnc_port_total          = 10000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.316 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vnc.auth_schemes               = ['none'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.317 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vnc.enabled                    = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.317 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vnc.novncproxy_base_url        = https://nova-novncproxy-cell1-public-openstack.apps-crc.testing/vnc_lite.html log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.317 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vnc.novncproxy_host            = 0.0.0.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.317 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vnc.novncproxy_port            = 6080 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.317 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vnc.server_listen              = ::0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.317 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vnc.server_proxyclient_address = 192.168.122.100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.318 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vnc.vencrypt_ca_certs          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.318 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vnc.vencrypt_client_cert       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.318 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vnc.vencrypt_client_key        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.318 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.disable_compute_service_check_for_ffu = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.318 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.disable_deep_image_inspection = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.318 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.disable_fallback_pcpu_query = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.319 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.disable_group_policy_check_upcall = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.319 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.disable_libvirt_livesnapshot = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.319 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.disable_rootwrap   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.319 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.enable_numa_live_migration = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.319 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.enable_qemu_monitor_announce_self = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.319 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.ensure_libvirt_rbd_instance_dir_cleanup = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.320 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.handle_virt_lifecycle_events = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.320 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.libvirt_disable_apic = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.320 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.never_download_image_if_on_rbd = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.320 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.qemu_monitor_announce_self_count = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.320 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.qemu_monitor_announce_self_interval = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.320 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.reserve_disk_resource_for_image_cache = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.320 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.skip_cpu_compare_at_startup = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.321 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.skip_cpu_compare_on_dest = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.321 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.skip_hypervisor_version_check_on_lm = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.321 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.skip_reserve_in_use_ironic_nodes = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.321 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.unified_limits_count_pcpu_as_vcpu = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.321 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] workarounds.wait_for_vif_plugged_event_during_hard_reboot = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.321 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] wsgi.api_paste_config          = api-paste.ini log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.322 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] wsgi.client_socket_timeout     = 900 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.322 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] wsgi.default_pool_size         = 1000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.322 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] wsgi.keep_alive                = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.322 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] wsgi.max_header_line           = 16384 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.322 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] wsgi.secure_proxy_ssl_header   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.322 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] wsgi.ssl_ca_file               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.323 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] wsgi.ssl_cert_file             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.323 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] wsgi.ssl_key_file              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.323 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] wsgi.tcp_keepidle              = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.323 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] wsgi.wsgi_log_format           = %(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.323 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] zvm.ca_file                    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.323 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] zvm.cloud_connector_url        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.324 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] zvm.image_tmp_path             = /var/lib/nova/images log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.324 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] zvm.reachable_timeout          = 300 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.324 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_policy.enforce_new_defaults = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.324 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_policy.enforce_scope      = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.324 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_policy.policy_default_rule = default log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.324 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_policy.policy_dirs        = ['policy.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.325 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_policy.policy_file        = policy.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.325 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_policy.remote_content_type = application/x-www-form-urlencoded log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.325 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_policy.remote_ssl_ca_crt_file = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.325 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_policy.remote_ssl_client_crt_file = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.325 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_policy.remote_ssl_client_key_file = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.325 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_policy.remote_ssl_verify_server_crt = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.326 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_versionedobjects.fatal_exception_format_errors = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.326 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_middleware.http_basic_auth_user_file = /etc/htpasswd log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.326 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] remote_debug.host              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.326 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] remote_debug.port              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.326 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.amqp_auto_delete = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.326 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.amqp_durable_queues = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.327 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.conn_pool_min_size = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.327 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.conn_pool_ttl = 1200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.327 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.direct_mandatory_flag = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.327 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.enable_cancel_on_failover = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.327 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.heartbeat_in_pthread = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.327 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.heartbeat_rate = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.328 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.heartbeat_timeout_threshold = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.328 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.kombu_compression = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.328 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.kombu_failover_strategy = round-robin log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.328 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.kombu_missing_consumer_retry_timeout = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.328 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.kombu_reconnect_delay = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.328 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.rabbit_ha_queues = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.328 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.rabbit_interval_max = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.329 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.rabbit_login_method = AMQPLAIN log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.329 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.rabbit_qos_prefetch_count = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.329 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.rabbit_quorum_delivery_limit = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.329 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.rabbit_quorum_max_memory_bytes = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.329 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.rabbit_quorum_max_memory_length = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.329 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.rabbit_quorum_queue = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.330 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.rabbit_retry_backoff = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.330 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.rabbit_retry_interval = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.330 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.rabbit_transient_queues_ttl = 1800 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.330 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.rpc_conn_pool_size = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.330 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.ssl      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.330 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.ssl_ca_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.331 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.ssl_cert_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.331 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.ssl_enforce_fips_mode = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.331 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.ssl_key_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.331 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_rabbit.ssl_version =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.331 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_notifications.driver = ['noop'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.332 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_notifications.retry = -1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.332 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_notifications.topics = ['notifications'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.332 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_messaging_notifications.transport_url = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.332 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.auth_section        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.332 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.auth_type           = password log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.333 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.auth_url            = https://keystone-internal.openstack.svc:5000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.333 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.cafile              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.333 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.certfile            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.333 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.collect_timing      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.333 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.connect_retries     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.334 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.connect_retry_delay = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.334 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.default_domain_id   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.334 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.default_domain_name = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.334 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.domain_id           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.334 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.domain_name         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.335 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.endpoint_id         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.335 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.endpoint_override   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.335 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.insecure            = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.335 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.keyfile             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.335 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.max_version         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.335 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.min_version         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.335 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.password            = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.336 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.project_domain_id   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.336 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.project_domain_name = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.336 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.project_id          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.336 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.project_name        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.336 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.region_name         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.336 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.service_name        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.337 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.service_type        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.337 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.split_loggers       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.337 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.status_code_retries = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.337 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.status_code_retry_delay = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.337 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.system_scope        = all log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.337 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.timeout             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.337 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.trust_id            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.338 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.user_domain_id      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.338 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.user_domain_name    = Default log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.338 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.user_id             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.338 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.username            = nova log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.338 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.valid_interfaces    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.338 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_limit.version             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.339 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_reports.file_event_handler = /var/lib/nova log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.339 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_reports.file_event_handler_interval = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.339 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] oslo_reports.log_dir           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.339 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vif_plug_linux_bridge_privileged.capabilities = [12] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.339 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vif_plug_linux_bridge_privileged.group = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.339 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vif_plug_linux_bridge_privileged.helper_command = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.340 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vif_plug_linux_bridge_privileged.logger_name = oslo_privsep.daemon log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.340 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vif_plug_linux_bridge_privileged.thread_pool_size = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.340 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vif_plug_linux_bridge_privileged.user = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.340 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vif_plug_ovs_privileged.capabilities = [12, 1] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.340 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vif_plug_ovs_privileged.group  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.340 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vif_plug_ovs_privileged.helper_command = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.341 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vif_plug_ovs_privileged.logger_name = oslo_privsep.daemon log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.341 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vif_plug_ovs_privileged.thread_pool_size = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.341 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] vif_plug_ovs_privileged.user   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.341 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] os_vif_linux_bridge.flat_interface = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.341 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] os_vif_linux_bridge.forward_bridge_interface = ['all'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.341 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] os_vif_linux_bridge.iptables_bottom_regex =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.342 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] os_vif_linux_bridge.iptables_drop_action = DROP log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.342 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] os_vif_linux_bridge.iptables_top_regex =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.342 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] os_vif_linux_bridge.network_device_mtu = 1500 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.342 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] os_vif_linux_bridge.use_ipv6   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.342 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] os_vif_linux_bridge.vlan_interface = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.342 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] os_vif_ovs.isolate_vif         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.343 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] os_vif_ovs.network_device_mtu  = 1500 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.343 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] os_vif_ovs.ovs_vsctl_timeout   = 120 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.343 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] os_vif_ovs.ovsdb_connection    = tcp:127.0.0.1:6640 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.343 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] os_vif_ovs.ovsdb_interface     = native log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.343 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] os_vif_ovs.per_port_bridge     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.343 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] os_brick.lock_path             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.344 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] os_brick.wait_mpath_device_attempts = 4 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.344 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] os_brick.wait_mpath_device_interval = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.344 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] privsep_osbrick.capabilities   = [21] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.344 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] privsep_osbrick.group          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.344 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] privsep_osbrick.helper_command = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.344 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] privsep_osbrick.logger_name    = os_brick.privileged log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.345 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] privsep_osbrick.thread_pool_size = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.345 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] privsep_osbrick.user           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.345 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] nova_sys_admin.capabilities    = [0, 1, 2, 3, 12, 21] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.345 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] nova_sys_admin.group           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.345 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] nova_sys_admin.helper_command  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.345 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] nova_sys_admin.logger_name     = oslo_privsep.daemon log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.346 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] nova_sys_admin.thread_pool_size = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.346 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] nova_sys_admin.user            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.346 2 DEBUG oslo_service.service [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2613
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.347 2 INFO nova.service [-] Starting compute node (version 27.5.2-0.20250829104910.6f8decf.el9)
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.361 2 DEBUG nova.virt.libvirt.host [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Starting native event thread _init_events /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:492
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.362 2 DEBUG nova.virt.libvirt.host [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Starting green dispatch thread _init_events /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:498
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.362 2 DEBUG nova.virt.libvirt.host [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Starting connection event dispatch thread initialize /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:620
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.363 2 DEBUG nova.virt.libvirt.host [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Connecting to libvirt: qemu:///system _get_new_connection /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:503
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.388 2 DEBUG nova.virt.libvirt.host [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Registering for lifecycle events <nova.virt.libvirt.host.Host object at 0x7f6d478b5be0> _get_new_connection /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:509
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.398 2 DEBUG nova.virt.libvirt.host [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Registering for connection events: <nova.virt.libvirt.host.Host object at 0x7f6d478b5be0> _get_new_connection /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:530
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.401 2 INFO nova.virt.libvirt.driver [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Connection event '1' reason 'None'
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.420 2 WARNING nova.virt.libvirt.driver [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Cannot update service status on host "compute-0.ctlplane.example.com" since it is not registered.: nova.exception_Remote.ComputeHostNotFound_Remote: Compute host compute-0.ctlplane.example.com could not be found.
Oct 11 02:05:29 compute-0 nova_compute[355868]: 2025-10-11 02:05:29.420 2 DEBUG nova.virt.libvirt.volume.mount [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Initialising _HostMountState generation 0 host_up /usr/lib/python3.9/site-packages/nova/virt/libvirt/volume/mount.py:130
Oct 11 02:05:29 compute-0 podman[157119]: time="2025-10-11T02:05:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:05:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:05:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45043 "" "Go-http-client/1.1"
Oct 11 02:05:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:05:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8527 "" "Go-http-client/1.1"
Oct 11 02:05:29 compute-0 python3.9[356458]: ansible-ansible.builtin.stat Invoked with path=/etc/systemd/system/edpm_nova_nvme_cleaner.service.requires follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:05:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:05:30 compute-0 ceph-mon[191930]: pgmap v819: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:30 compute-0 nova_compute[355868]: 2025-10-11 02:05:30.649 2 INFO nova.virt.libvirt.host [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Libvirt host capabilities <capabilities>
Oct 11 02:05:30 compute-0 nova_compute[355868]: 
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <host>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <uuid>c0909b4b-0860-4b28-ab6b-0ab32acb5a0f</uuid>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <cpu>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <arch>x86_64</arch>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model>EPYC-Rome-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <vendor>AMD</vendor>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <microcode version='16777317'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <signature family='23' model='49' stepping='0'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <topology sockets='8' dies='1' clusters='1' cores='1' threads='1'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <maxphysaddr mode='emulate' bits='40'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='x2apic'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='tsc-deadline'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='osxsave'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='hypervisor'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='tsc_adjust'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='spec-ctrl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='stibp'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='arch-capabilities'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='ssbd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='cmp_legacy'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='topoext'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='virt-ssbd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='lbrv'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='tsc-scale'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='vmcb-clean'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='pause-filter'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='pfthreshold'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='svme-addr-chk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='rdctl-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='skip-l1dfl-vmentry'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='mds-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature name='pschange-mc-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <pages unit='KiB' size='4'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <pages unit='KiB' size='2048'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <pages unit='KiB' size='1048576'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </cpu>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <power_management>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <suspend_mem/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </power_management>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <iommu support='no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <migration_features>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <live/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <uri_transports>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <uri_transport>tcp</uri_transport>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <uri_transport>rdma</uri_transport>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </uri_transports>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </migration_features>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <topology>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <cells num='1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <cell id='0'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:           <memory unit='KiB'>7864348</memory>
Oct 11 02:05:30 compute-0 nova_compute[355868]:           <pages unit='KiB' size='4'>1966087</pages>
Oct 11 02:05:30 compute-0 nova_compute[355868]:           <pages unit='KiB' size='2048'>0</pages>
Oct 11 02:05:30 compute-0 nova_compute[355868]:           <pages unit='KiB' size='1048576'>0</pages>
Oct 11 02:05:30 compute-0 nova_compute[355868]:           <distances>
Oct 11 02:05:30 compute-0 nova_compute[355868]:             <sibling id='0' value='10'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:           </distances>
Oct 11 02:05:30 compute-0 nova_compute[355868]:           <cpus num='8'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:             <cpu id='0' socket_id='0' die_id='0' cluster_id='65535' core_id='0' siblings='0'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:             <cpu id='1' socket_id='1' die_id='1' cluster_id='65535' core_id='0' siblings='1'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:             <cpu id='2' socket_id='2' die_id='2' cluster_id='65535' core_id='0' siblings='2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:             <cpu id='3' socket_id='3' die_id='3' cluster_id='65535' core_id='0' siblings='3'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:             <cpu id='4' socket_id='4' die_id='4' cluster_id='65535' core_id='0' siblings='4'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:             <cpu id='5' socket_id='5' die_id='5' cluster_id='65535' core_id='0' siblings='5'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:             <cpu id='6' socket_id='6' die_id='6' cluster_id='65535' core_id='0' siblings='6'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:             <cpu id='7' socket_id='7' die_id='7' cluster_id='65535' core_id='0' siblings='7'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:           </cpus>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         </cell>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </cells>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </topology>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <cache>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <bank id='0' level='2' type='both' size='512' unit='KiB' cpus='0'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <bank id='1' level='2' type='both' size='512' unit='KiB' cpus='1'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <bank id='2' level='2' type='both' size='512' unit='KiB' cpus='2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <bank id='3' level='2' type='both' size='512' unit='KiB' cpus='3'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <bank id='4' level='2' type='both' size='512' unit='KiB' cpus='4'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <bank id='5' level='2' type='both' size='512' unit='KiB' cpus='5'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <bank id='6' level='2' type='both' size='512' unit='KiB' cpus='6'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <bank id='7' level='2' type='both' size='512' unit='KiB' cpus='7'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <bank id='0' level='3' type='both' size='16' unit='MiB' cpus='0'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <bank id='1' level='3' type='both' size='16' unit='MiB' cpus='1'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <bank id='2' level='3' type='both' size='16' unit='MiB' cpus='2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <bank id='3' level='3' type='both' size='16' unit='MiB' cpus='3'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <bank id='4' level='3' type='both' size='16' unit='MiB' cpus='4'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <bank id='5' level='3' type='both' size='16' unit='MiB' cpus='5'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <bank id='6' level='3' type='both' size='16' unit='MiB' cpus='6'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <bank id='7' level='3' type='both' size='16' unit='MiB' cpus='7'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </cache>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <secmodel>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model>selinux</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <doi>0</doi>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <baselabel type='kvm'>system_u:system_r:svirt_t:s0</baselabel>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <baselabel type='qemu'>system_u:system_r:svirt_tcg_t:s0</baselabel>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </secmodel>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <secmodel>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model>dac</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <doi>0</doi>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <baselabel type='kvm'>+107:+107</baselabel>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <baselabel type='qemu'>+107:+107</baselabel>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </secmodel>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   </host>
Oct 11 02:05:30 compute-0 nova_compute[355868]: 
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <guest>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <os_type>hvm</os_type>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <arch name='i686'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <wordsize>32</wordsize>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <emulator>/usr/libexec/qemu-kvm</emulator>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='240' deprecated='yes'>pc-i440fx-rhel7.6.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine canonical='pc-i440fx-rhel7.6.0' maxCpus='240' deprecated='yes'>pc</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='4096'>pc-q35-rhel9.6.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine canonical='pc-q35-rhel9.6.0' maxCpus='4096'>q35</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.6.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710'>pc-q35-rhel9.4.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.5.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.3.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel7.6.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.4.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710'>pc-q35-rhel9.2.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.2.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710'>pc-q35-rhel9.0.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.0.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.1.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <domain type='qemu'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <domain type='kvm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </arch>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <features>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <pae/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <nonpae/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <acpi default='on' toggle='yes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <apic default='on' toggle='no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <cpuselection/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <deviceboot/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <disksnapshot default='on' toggle='no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <externalSnapshot/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </features>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   </guest>
Oct 11 02:05:30 compute-0 nova_compute[355868]: 
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <guest>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <os_type>hvm</os_type>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <arch name='x86_64'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <wordsize>64</wordsize>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <emulator>/usr/libexec/qemu-kvm</emulator>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='240' deprecated='yes'>pc-i440fx-rhel7.6.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine canonical='pc-i440fx-rhel7.6.0' maxCpus='240' deprecated='yes'>pc</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='4096'>pc-q35-rhel9.6.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine canonical='pc-q35-rhel9.6.0' maxCpus='4096'>q35</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.6.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710'>pc-q35-rhel9.4.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.5.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.3.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel7.6.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.4.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710'>pc-q35-rhel9.2.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.2.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710'>pc-q35-rhel9.0.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.0.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.1.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <domain type='qemu'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <domain type='kvm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </arch>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <features>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <acpi default='on' toggle='yes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <apic default='on' toggle='no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <cpuselection/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <deviceboot/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <disksnapshot default='on' toggle='no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <externalSnapshot/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </features>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   </guest>
Oct 11 02:05:30 compute-0 nova_compute[355868]: 
Oct 11 02:05:30 compute-0 nova_compute[355868]: </capabilities>
Oct 11 02:05:30 compute-0 nova_compute[355868]: 
Oct 11 02:05:30 compute-0 nova_compute[355868]: 2025-10-11 02:05:30.664 2 DEBUG nova.virt.libvirt.host [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Getting domain capabilities for i686 via machine types: {'pc', 'q35'} _get_machine_types /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:952
Oct 11 02:05:30 compute-0 sshd-session[356180]: Failed password for root from 121.227.153.123 port 55626 ssh2
Oct 11 02:05:30 compute-0 nova_compute[355868]: 2025-10-11 02:05:30.722 2 DEBUG nova.virt.libvirt.host [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Libvirt host hypervisor capabilities for arch=i686 and machine_type=pc:
Oct 11 02:05:30 compute-0 nova_compute[355868]: <domainCapabilities>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <path>/usr/libexec/qemu-kvm</path>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <domain>kvm</domain>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <machine>pc-i440fx-rhel7.6.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <arch>i686</arch>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <vcpu max='240'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <iothreads supported='yes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <os supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <enum name='firmware'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <loader supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <value>/usr/share/OVMF/OVMF_CODE.secboot.fd</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='type'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>rom</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>pflash</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='readonly'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>yes</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>no</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='secure'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>no</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </loader>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   </os>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <cpu>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <mode name='host-passthrough' supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='hostPassthroughMigratable'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>on</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>off</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </mode>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <mode name='maximum' supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='maximumMigratable'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>on</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>off</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </mode>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <mode name='host-model' supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model fallback='forbid'>EPYC-Rome</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <vendor>AMD</vendor>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <maxphysaddr mode='passthrough' limit='40'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='x2apic'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='tsc-deadline'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='hypervisor'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='tsc_adjust'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='spec-ctrl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='stibp'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='arch-capabilities'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='ssbd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='cmp_legacy'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='overflow-recov'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='succor'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='ibrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='amd-ssbd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='virt-ssbd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='lbrv'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='tsc-scale'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='vmcb-clean'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='flushbyasid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='pause-filter'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='pfthreshold'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='svme-addr-chk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='lfence-always-serializing'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='rdctl-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='skip-l1dfl-vmentry'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='mds-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='pschange-mc-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='gds-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='rfds-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='disable' name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </mode>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <mode name='custom' supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='486-v1'>486</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>486-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Broadwell-v1'>Broadwell</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Broadwell-v3'>Broadwell-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Broadwell-v2'>Broadwell-noTSX</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-noTSX'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Broadwell-v4'>Broadwell-noTSX-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-noTSX-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Broadwell-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Broadwell-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Broadwell-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Broadwell-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Cascadelake-Server-v1'>Cascadelake-Server</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Cascadelake-Server-v3'>Cascadelake-Server-noTSX</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-noTSX'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v5</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v5'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='Intel' canonical='Conroe-v1'>Conroe</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='Intel'>Conroe-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Cooperlake-v1'>Cooperlake</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cooperlake'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cooperlake-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cooperlake-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cooperlake-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cooperlake-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Denverton-v1'>Denverton</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Denverton'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mpx'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Denverton-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Denverton-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mpx'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Denverton-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Denverton-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Denverton-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Denverton-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Hygon' canonical='Dhyana-v1'>Dhyana</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Hygon'>Dhyana-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Hygon'>Dhyana-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Dhyana-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD' canonical='EPYC-v1'>EPYC</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='EPYC-Genoa-v1'>EPYC-Genoa</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Genoa'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amd-psfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='auto-ibrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='stibp-always-on'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Genoa-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Genoa-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amd-psfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='auto-ibrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='stibp-always-on'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD' canonical='EPYC-v2'>EPYC-IBPB</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='EPYC-Milan-v1'>EPYC-Milan</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Milan'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Milan-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Milan-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Milan-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Milan-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amd-psfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='stibp-always-on'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='EPYC-Rome-v1'>EPYC-Rome</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Rome'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Rome-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Rome-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Rome-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Rome-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Rome-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Rome-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD'>EPYC-Rome-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD'>EPYC-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD'>EPYC-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='GraniteRapids-v1'>GraniteRapids</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='GraniteRapids'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='prefetchiti'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>GraniteRapids-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='GraniteRapids-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='prefetchiti'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>GraniteRapids-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='GraniteRapids-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx10'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx10-128'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx10-256'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx10-512'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='prefetchiti'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Haswell-v1'>Haswell</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Haswell-v3'>Haswell-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Haswell-v2'>Haswell-noTSX</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-noTSX'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Haswell-v4'>Haswell-noTSX-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-noTSX-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Haswell-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Haswell-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Haswell-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Haswell-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Icelake-Server-v1'>Icelake-Server</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Icelake-Server-v2'>Icelake-Server-noTSX</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-noTSX'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v5</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v5'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v6</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v6'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v7</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v7'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='IvyBridge-v1'>IvyBridge</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='IvyBridge'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='IvyBridge-v2'>IvyBridge-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='IvyBridge-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>IvyBridge-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='IvyBridge-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>IvyBridge-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='IvyBridge-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='KnightsMill-v1'>KnightsMill</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='KnightsMill'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-4fmaps'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-4vnniw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512er'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512pf'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>KnightsMill-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='KnightsMill-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-4fmaps'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-4vnniw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512er'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512pf'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='Nehalem-v1'>Nehalem</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='Nehalem-v2'>Nehalem-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>Nehalem-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>Nehalem-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G1-v1'>Opteron_G1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G1-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G2-v1'>Opteron_G2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G2-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G3-v1'>Opteron_G3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G3-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='Opteron_G4-v1'>Opteron_G4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Opteron_G4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fma4'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xop'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>Opteron_G4-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Opteron_G4-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fma4'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xop'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='Opteron_G5-v1'>Opteron_G5</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Opteron_G5'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fma4'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tbm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xop'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>Opteron_G5-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Opteron_G5-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fma4'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tbm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xop'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='Intel' canonical='Penryn-v1'>Penryn</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='Intel'>Penryn-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='SandyBridge-v1'>SandyBridge</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='SandyBridge-v2'>SandyBridge-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>SandyBridge-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>SandyBridge-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='SapphireRapids-v1'>SapphireRapids</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='SapphireRapids'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>SapphireRapids-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='SapphireRapids-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>SapphireRapids-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='SapphireRapids-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>SapphireRapids-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='SapphireRapids-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='SierraForest-v1'>SierraForest</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='SierraForest'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-ne-convert'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cmpccxadd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>SierraForest-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='SierraForest-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-ne-convert'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cmpccxadd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v1'>Skylake-Client</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v2'>Skylake-Client-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v3'>Skylake-Client-noTSX-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-noTSX-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Client-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Client-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Client-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Client-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v1'>Skylake-Server</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v2'>Skylake-Server-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v3'>Skylake-Server-noTSX-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-noTSX-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v5</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v5'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Snowridge-v1'>Snowridge</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Snowridge'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='core-capability'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mpx'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='split-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Snowridge-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Snowridge-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='core-capability'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mpx'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='split-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Snowridge-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Snowridge-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='core-capability'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='split-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Snowridge-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Snowridge-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='core-capability'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='split-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Snowridge-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Snowridge-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='Westmere-v1'>Westmere</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='Westmere-v2'>Westmere-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>Westmere-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>Westmere-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='AMD' canonical='athlon-v1'>athlon</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='athlon'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='3dnow'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='3dnowext'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='AMD'>athlon-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='athlon-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='3dnow'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='3dnowext'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='core2duo-v1'>core2duo</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='core2duo'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel'>core2duo-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='core2duo-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='coreduo-v1'>coreduo</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='coreduo'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel'>coreduo-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='coreduo-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='kvm32-v1'>kvm32</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>kvm32-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='kvm64-v1'>kvm64</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>kvm64-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='n270-v1'>n270</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='n270'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel'>n270-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='n270-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium-v1'>pentium</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium2-v1'>pentium2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium2-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium3-v1'>pentium3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium3-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='AMD' canonical='phenom-v1'>phenom</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='phenom'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='3dnow'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='3dnowext'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='AMD'>phenom-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='phenom-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='3dnow'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='3dnowext'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='qemu32-v1'>qemu32</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>qemu32-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='qemu64-v1'>qemu64</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>qemu64-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </mode>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   </cpu>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <memoryBacking supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <enum name='sourceType'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <value>file</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <value>anonymous</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <value>memfd</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   </memoryBacking>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <devices>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <disk supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='diskDevice'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>disk</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>cdrom</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>floppy</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>lun</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='bus'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>ide</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>fdc</value>
Oct 11 02:05:30 compute-0 sudo[356620]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qpxtcuhxzkfxwxpodqibbojkycxztlur ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148330.252407-1841-159585472474926/AnsiballZ_podman_container.py'
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>scsi</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>virtio</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>usb</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>sata</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='model'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>virtio</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>virtio-transitional</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>virtio-non-transitional</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </disk>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <graphics supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='type'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>vnc</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>egl-headless</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>dbus</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </graphics>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <video supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='modelType'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>vga</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>cirrus</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>virtio</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>none</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>bochs</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>ramfb</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </video>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <hostdev supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='mode'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>subsystem</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='startupPolicy'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>default</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>mandatory</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>requisite</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>optional</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='subsysType'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>usb</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>pci</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>scsi</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='capsType'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='pciBackend'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </hostdev>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <rng supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='model'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>virtio</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>virtio-transitional</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>virtio-non-transitional</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='backendModel'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>random</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>egd</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>builtin</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </rng>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <filesystem supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='driverType'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>path</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>handle</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>virtiofs</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </filesystem>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <tpm supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='model'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>tpm-tis</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>tpm-crb</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='backendModel'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>emulator</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>external</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='backendVersion'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>2.0</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </tpm>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <redirdev supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='bus'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>usb</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </redirdev>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <channel supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='type'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>pty</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>unix</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </channel>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <crypto supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='model'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='type'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>qemu</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='backendModel'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>builtin</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </crypto>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <interface supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='backendType'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>default</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>passt</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </interface>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <panic supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='model'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>isa</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>hyperv</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </panic>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   </devices>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <features>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <gic supported='no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <vmcoreinfo supported='yes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <genid supported='yes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <backingStoreInput supported='yes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <backup supported='yes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <async-teardown supported='yes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <ps2 supported='yes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <sev supported='no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <sgx supported='no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <hyperv supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='features'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>relaxed</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>vapic</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>spinlocks</value>
Oct 11 02:05:30 compute-0 sudo[356620]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>vpindex</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>runtime</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>synic</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>stimer</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>reset</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>vendor_id</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>frequencies</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>reenlightenment</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>tlbflush</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>ipi</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>avic</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>emsr_bitmap</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>xmm_input</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </hyperv>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <launchSecurity supported='no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   </features>
Oct 11 02:05:30 compute-0 nova_compute[355868]: </domainCapabilities>
Oct 11 02:05:30 compute-0 nova_compute[355868]:  _get_domain_capabilities /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1037
Oct 11 02:05:30 compute-0 nova_compute[355868]: 2025-10-11 02:05:30.737 2 DEBUG nova.virt.libvirt.host [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Libvirt host hypervisor capabilities for arch=i686 and machine_type=q35:
Oct 11 02:05:30 compute-0 nova_compute[355868]: <domainCapabilities>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <path>/usr/libexec/qemu-kvm</path>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <domain>kvm</domain>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <machine>pc-q35-rhel9.6.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <arch>i686</arch>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <vcpu max='4096'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <iothreads supported='yes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <os supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <enum name='firmware'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <loader supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <value>/usr/share/OVMF/OVMF_CODE.secboot.fd</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='type'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>rom</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>pflash</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='readonly'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>yes</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>no</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='secure'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>no</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </loader>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   </os>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <cpu>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <mode name='host-passthrough' supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='hostPassthroughMigratable'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>on</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>off</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </mode>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <mode name='maximum' supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='maximumMigratable'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>on</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>off</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </mode>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <mode name='host-model' supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model fallback='forbid'>EPYC-Rome</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <vendor>AMD</vendor>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <maxphysaddr mode='passthrough' limit='40'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='x2apic'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='tsc-deadline'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='hypervisor'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='tsc_adjust'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='spec-ctrl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='stibp'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='arch-capabilities'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='ssbd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='cmp_legacy'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='overflow-recov'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='succor'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='ibrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='amd-ssbd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='virt-ssbd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='lbrv'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='tsc-scale'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='vmcb-clean'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='flushbyasid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='pause-filter'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='pfthreshold'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='svme-addr-chk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='lfence-always-serializing'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='rdctl-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='skip-l1dfl-vmentry'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='mds-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='pschange-mc-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='gds-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='rfds-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='disable' name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </mode>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <mode name='custom' supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='486-v1'>486</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>486-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Broadwell-v1'>Broadwell</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Broadwell-v3'>Broadwell-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Broadwell-v2'>Broadwell-noTSX</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-noTSX'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Broadwell-v4'>Broadwell-noTSX-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-noTSX-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Broadwell-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Broadwell-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Broadwell-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Broadwell-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Cascadelake-Server-v1'>Cascadelake-Server</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Cascadelake-Server-v3'>Cascadelake-Server-noTSX</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-noTSX'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v5</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v5'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='Intel' canonical='Conroe-v1'>Conroe</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='Intel'>Conroe-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Cooperlake-v1'>Cooperlake</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cooperlake'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cooperlake-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cooperlake-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cooperlake-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cooperlake-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Denverton-v1'>Denverton</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Denverton'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mpx'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Denverton-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Denverton-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mpx'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Denverton-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Denverton-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Denverton-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Denverton-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Hygon' canonical='Dhyana-v1'>Dhyana</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Hygon'>Dhyana-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Hygon'>Dhyana-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Dhyana-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD' canonical='EPYC-v1'>EPYC</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='EPYC-Genoa-v1'>EPYC-Genoa</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Genoa'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amd-psfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='auto-ibrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='stibp-always-on'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Genoa-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Genoa-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amd-psfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='auto-ibrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='stibp-always-on'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD' canonical='EPYC-v2'>EPYC-IBPB</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='EPYC-Milan-v1'>EPYC-Milan</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Milan'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Milan-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Milan-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Milan-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Milan-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amd-psfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='stibp-always-on'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='EPYC-Rome-v1'>EPYC-Rome</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Rome'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Rome-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Rome-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Rome-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Rome-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Rome-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Rome-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD'>EPYC-Rome-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD'>EPYC-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD'>EPYC-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='GraniteRapids-v1'>GraniteRapids</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='GraniteRapids'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='prefetchiti'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>GraniteRapids-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='GraniteRapids-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='prefetchiti'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>GraniteRapids-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='GraniteRapids-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx10'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx10-128'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx10-256'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx10-512'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='prefetchiti'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Haswell-v1'>Haswell</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Haswell-v3'>Haswell-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Haswell-v2'>Haswell-noTSX</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-noTSX'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Haswell-v4'>Haswell-noTSX-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-noTSX-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Haswell-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Haswell-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Haswell-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Haswell-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Icelake-Server-v1'>Icelake-Server</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Icelake-Server-v2'>Icelake-Server-noTSX</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-noTSX'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v5</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v5'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v6</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v6'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v7</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v7'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='IvyBridge-v1'>IvyBridge</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='IvyBridge'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='IvyBridge-v2'>IvyBridge-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='IvyBridge-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>IvyBridge-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='IvyBridge-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>IvyBridge-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='IvyBridge-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='KnightsMill-v1'>KnightsMill</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='KnightsMill'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-4fmaps'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-4vnniw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512er'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512pf'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>KnightsMill-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='KnightsMill-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-4fmaps'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-4vnniw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512er'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512pf'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='Nehalem-v1'>Nehalem</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='Nehalem-v2'>Nehalem-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>Nehalem-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>Nehalem-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G1-v1'>Opteron_G1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G1-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G2-v1'>Opteron_G2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G2-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G3-v1'>Opteron_G3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G3-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='Opteron_G4-v1'>Opteron_G4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Opteron_G4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fma4'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xop'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>Opteron_G4-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Opteron_G4-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fma4'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xop'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='Opteron_G5-v1'>Opteron_G5</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Opteron_G5'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fma4'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tbm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xop'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>Opteron_G5-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Opteron_G5-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fma4'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tbm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xop'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='Intel' canonical='Penryn-v1'>Penryn</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='Intel'>Penryn-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='SandyBridge-v1'>SandyBridge</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='SandyBridge-v2'>SandyBridge-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>SandyBridge-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>SandyBridge-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='SapphireRapids-v1'>SapphireRapids</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='SapphireRapids'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>SapphireRapids-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='SapphireRapids-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>SapphireRapids-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='SapphireRapids-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>SapphireRapids-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='SapphireRapids-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='SierraForest-v1'>SierraForest</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='SierraForest'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-ne-convert'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cmpccxadd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>SierraForest-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='SierraForest-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-ne-convert'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cmpccxadd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v1'>Skylake-Client</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v2'>Skylake-Client-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v3'>Skylake-Client-noTSX-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-noTSX-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Client-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Client-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Client-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Client-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v1'>Skylake-Server</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v2'>Skylake-Server-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v3'>Skylake-Server-noTSX-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-noTSX-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v5</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v5'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Snowridge-v1'>Snowridge</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Snowridge'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='core-capability'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mpx'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='split-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Snowridge-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Snowridge-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='core-capability'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mpx'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='split-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Snowridge-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Snowridge-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='core-capability'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='split-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Snowridge-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Snowridge-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='core-capability'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='split-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Snowridge-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Snowridge-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='Westmere-v1'>Westmere</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='Westmere-v2'>Westmere-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>Westmere-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>Westmere-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='AMD' canonical='athlon-v1'>athlon</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='athlon'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='3dnow'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='3dnowext'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='AMD'>athlon-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='athlon-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='3dnow'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='3dnowext'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='core2duo-v1'>core2duo</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='core2duo'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel'>core2duo-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='core2duo-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='coreduo-v1'>coreduo</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='coreduo'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel'>coreduo-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='coreduo-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='kvm32-v1'>kvm32</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>kvm32-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='kvm64-v1'>kvm64</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>kvm64-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='n270-v1'>n270</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='n270'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel'>n270-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='n270-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium-v1'>pentium</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium2-v1'>pentium2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium2-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium3-v1'>pentium3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium3-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='AMD' canonical='phenom-v1'>phenom</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='phenom'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='3dnow'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='3dnowext'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='AMD'>phenom-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='phenom-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='3dnow'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='3dnowext'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='qemu32-v1'>qemu32</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>qemu32-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='qemu64-v1'>qemu64</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>qemu64-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </mode>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   </cpu>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <memoryBacking supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <enum name='sourceType'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <value>file</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <value>anonymous</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <value>memfd</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   </memoryBacking>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <devices>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <disk supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='diskDevice'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>disk</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>cdrom</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>floppy</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>lun</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='bus'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>fdc</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>scsi</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>virtio</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>usb</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>sata</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='model'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>virtio</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>virtio-transitional</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>virtio-non-transitional</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </disk>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <graphics supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='type'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>vnc</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>egl-headless</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>dbus</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </graphics>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <video supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='modelType'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>vga</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>cirrus</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>virtio</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>none</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>bochs</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>ramfb</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </video>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <hostdev supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='mode'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>subsystem</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='startupPolicy'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>default</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>mandatory</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>requisite</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>optional</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='subsysType'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>usb</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>pci</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>scsi</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='capsType'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='pciBackend'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </hostdev>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <rng supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='model'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>virtio</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>virtio-transitional</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>virtio-non-transitional</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='backendModel'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>random</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>egd</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>builtin</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </rng>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <filesystem supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='driverType'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>path</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>handle</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>virtiofs</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </filesystem>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <tpm supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='model'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>tpm-tis</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>tpm-crb</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='backendModel'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>emulator</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>external</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='backendVersion'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>2.0</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </tpm>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <redirdev supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='bus'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>usb</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </redirdev>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <channel supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='type'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>pty</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>unix</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </channel>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <crypto supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='model'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='type'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>qemu</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='backendModel'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>builtin</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </crypto>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <interface supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='backendType'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>default</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>passt</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </interface>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <panic supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='model'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>isa</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>hyperv</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </panic>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   </devices>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <features>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <gic supported='no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <vmcoreinfo supported='yes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <genid supported='yes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <backingStoreInput supported='yes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <backup supported='yes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <async-teardown supported='yes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <ps2 supported='yes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <sev supported='no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <sgx supported='no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <hyperv supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='features'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>relaxed</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>vapic</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>spinlocks</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>vpindex</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>runtime</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>synic</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>stimer</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>reset</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>vendor_id</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>frequencies</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>reenlightenment</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>tlbflush</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>ipi</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>avic</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>emsr_bitmap</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>xmm_input</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </hyperv>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <launchSecurity supported='no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   </features>
Oct 11 02:05:30 compute-0 nova_compute[355868]: </domainCapabilities>
Oct 11 02:05:30 compute-0 nova_compute[355868]:  _get_domain_capabilities /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1037
Oct 11 02:05:30 compute-0 nova_compute[355868]: 2025-10-11 02:05:30.788 2 DEBUG nova.virt.libvirt.host [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Getting domain capabilities for x86_64 via machine types: {'pc', 'q35'} _get_machine_types /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:952
Oct 11 02:05:30 compute-0 nova_compute[355868]: 2025-10-11 02:05:30.798 2 DEBUG nova.virt.libvirt.host [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Libvirt host hypervisor capabilities for arch=x86_64 and machine_type=pc:
Oct 11 02:05:30 compute-0 nova_compute[355868]: <domainCapabilities>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <path>/usr/libexec/qemu-kvm</path>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <domain>kvm</domain>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <machine>pc-i440fx-rhel7.6.0</machine>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <arch>x86_64</arch>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <vcpu max='240'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <iothreads supported='yes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <os supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <enum name='firmware'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <loader supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <value>/usr/share/OVMF/OVMF_CODE.secboot.fd</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='type'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>rom</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>pflash</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='readonly'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>yes</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>no</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='secure'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>no</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </loader>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   </os>
Oct 11 02:05:30 compute-0 nova_compute[355868]:   <cpu>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <mode name='host-passthrough' supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='hostPassthroughMigratable'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>on</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>off</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </mode>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <mode name='maximum' supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <enum name='maximumMigratable'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>on</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <value>off</value>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </mode>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <mode name='host-model' supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model fallback='forbid'>EPYC-Rome</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <vendor>AMD</vendor>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <maxphysaddr mode='passthrough' limit='40'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='x2apic'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='tsc-deadline'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='hypervisor'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='tsc_adjust'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='spec-ctrl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='stibp'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='arch-capabilities'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='ssbd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='cmp_legacy'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='overflow-recov'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='succor'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='ibrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='amd-ssbd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='virt-ssbd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='lbrv'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='tsc-scale'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='vmcb-clean'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='flushbyasid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='pause-filter'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='pfthreshold'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='svme-addr-chk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='lfence-always-serializing'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='rdctl-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='skip-l1dfl-vmentry'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='mds-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='pschange-mc-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='gds-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='require' name='rfds-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <feature policy='disable' name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     </mode>
Oct 11 02:05:30 compute-0 nova_compute[355868]:     <mode name='custom' supported='yes'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='486-v1'>486</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>486-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Broadwell-v1'>Broadwell</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Broadwell-v3'>Broadwell-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Broadwell-v2'>Broadwell-noTSX</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-noTSX'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Broadwell-v4'>Broadwell-noTSX-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-noTSX-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Broadwell-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Broadwell-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Broadwell-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Broadwell-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Broadwell-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Cascadelake-Server-v1'>Cascadelake-Server</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Cascadelake-Server-v3'>Cascadelake-Server-noTSX</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-noTSX'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v5</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v5'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='Intel' canonical='Conroe-v1'>Conroe</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='Intel'>Conroe-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Cooperlake-v1'>Cooperlake</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cooperlake'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cooperlake-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cooperlake-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cooperlake-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Cooperlake-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Denverton-v1'>Denverton</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Denverton'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mpx'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Denverton-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Denverton-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mpx'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Denverton-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Denverton-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Denverton-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Denverton-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Hygon' canonical='Dhyana-v1'>Dhyana</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Hygon'>Dhyana-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Hygon'>Dhyana-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Dhyana-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD' canonical='EPYC-v1'>EPYC</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='EPYC-Genoa-v1'>EPYC-Genoa</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Genoa'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amd-psfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='auto-ibrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='stibp-always-on'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Genoa-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Genoa-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amd-psfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='auto-ibrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='stibp-always-on'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD' canonical='EPYC-v2'>EPYC-IBPB</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='EPYC-Milan-v1'>EPYC-Milan</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Milan'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Milan-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Milan-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Milan-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Milan-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amd-psfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='stibp-always-on'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='EPYC-Rome-v1'>EPYC-Rome</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Rome'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Rome-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Rome-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Rome-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Rome-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Rome-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-Rome-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD'>EPYC-Rome-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD'>EPYC-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD'>EPYC-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='EPYC-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='GraniteRapids-v1'>GraniteRapids</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='GraniteRapids'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='prefetchiti'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>GraniteRapids-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='GraniteRapids-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='prefetchiti'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>GraniteRapids-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='GraniteRapids-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx10'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx10-128'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx10-256'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx10-512'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='prefetchiti'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Haswell-v1'>Haswell</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Haswell-v3'>Haswell-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Haswell-v2'>Haswell-noTSX</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-noTSX'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Haswell-v4'>Haswell-noTSX-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-noTSX-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Haswell-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Haswell-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Haswell-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Haswell-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Haswell-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Icelake-Server-v1'>Icelake-Server</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Icelake-Server-v2'>Icelake-Server-noTSX</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-noTSX'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v5</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v5'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v6</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v6'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v7</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v7'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='IvyBridge-v1'>IvyBridge</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='IvyBridge'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='IvyBridge-v2'>IvyBridge-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='IvyBridge-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>IvyBridge-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='IvyBridge-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>IvyBridge-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='IvyBridge-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='KnightsMill-v1'>KnightsMill</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='KnightsMill'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-4fmaps'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-4vnniw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512er'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512pf'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>KnightsMill-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='KnightsMill-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-4fmaps'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-4vnniw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512er'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512pf'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='Nehalem-v1'>Nehalem</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='Nehalem-v2'>Nehalem-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>Nehalem-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>Nehalem-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G1-v1'>Opteron_G1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G1-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G2-v1'>Opteron_G2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G2-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G3-v1'>Opteron_G3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G3-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='Opteron_G4-v1'>Opteron_G4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Opteron_G4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fma4'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xop'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>Opteron_G4-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Opteron_G4-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fma4'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xop'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='Opteron_G5-v1'>Opteron_G5</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Opteron_G5'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fma4'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tbm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xop'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>Opteron_G5-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Opteron_G5-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fma4'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tbm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xop'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='Intel' canonical='Penryn-v1'>Penryn</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='Intel'>Penryn-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='SandyBridge-v1'>SandyBridge</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='SandyBridge-v2'>SandyBridge-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>SandyBridge-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>SandyBridge-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='SapphireRapids-v1'>SapphireRapids</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='SapphireRapids'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>SapphireRapids-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='SapphireRapids-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>SapphireRapids-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='SapphireRapids-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>SapphireRapids-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='SapphireRapids-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='SierraForest-v1'>SierraForest</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='SierraForest'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-ne-convert'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cmpccxadd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>SierraForest-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='SierraForest-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-ifma'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-ne-convert'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx-vnni-int8'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='cmpccxadd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v1'>Skylake-Client</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v2'>Skylake-Client-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v3'>Skylake-Client-noTSX-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-noTSX-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Client-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Client-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Client-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-v3'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Client-v4</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-v4'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v1'>Skylake-Server</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v2'>Skylake-Server-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v3'>Skylake-Server-noTSX-IBRS</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-noTSX-IBRS'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v1</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v1'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v2</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v2'>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v3</model>
Oct 11 02:05:30 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v3'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v4</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v4'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v5</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v5'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Snowridge-v1'>Snowridge</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Snowridge'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='core-capability'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='mpx'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='split-lock-detect'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Snowridge-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Snowridge-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='core-capability'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='mpx'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='split-lock-detect'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Snowridge-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Snowridge-v2'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='core-capability'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='split-lock-detect'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Snowridge-v3</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Snowridge-v3'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='core-capability'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='split-lock-detect'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Snowridge-v4</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Snowridge-v4'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='Westmere-v1'>Westmere</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='Westmere-v2'>Westmere-IBRS</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>Westmere-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>Westmere-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='AMD' canonical='athlon-v1'>athlon</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='athlon'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='3dnow'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='3dnowext'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='AMD'>athlon-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='athlon-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='3dnow'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='3dnowext'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='core2duo-v1'>core2duo</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='core2duo'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel'>core2duo-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='core2duo-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='coreduo-v1'>coreduo</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='coreduo'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel'>coreduo-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='coreduo-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='kvm32-v1'>kvm32</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>kvm32-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='kvm64-v1'>kvm64</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>kvm64-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='n270-v1'>n270</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='n270'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel'>n270-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='n270-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium-v1'>pentium</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium2-v1'>pentium2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium2-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium3-v1'>pentium3</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium3-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='AMD' canonical='phenom-v1'>phenom</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='phenom'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='3dnow'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='3dnowext'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='AMD'>phenom-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='phenom-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='3dnow'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='3dnowext'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='qemu32-v1'>qemu32</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>qemu32-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='qemu64-v1'>qemu64</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>qemu64-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </mode>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   </cpu>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   <memoryBacking supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <enum name='sourceType'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <value>file</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <value>anonymous</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <value>memfd</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   </memoryBacking>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   <devices>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <disk supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='diskDevice'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>disk</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>cdrom</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>floppy</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>lun</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='bus'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>ide</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>fdc</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>scsi</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>virtio</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>usb</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>sata</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='model'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>virtio</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>virtio-transitional</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>virtio-non-transitional</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </disk>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <graphics supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='type'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>vnc</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>egl-headless</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>dbus</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </graphics>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <video supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='modelType'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>vga</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>cirrus</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>virtio</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>none</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>bochs</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>ramfb</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </video>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <hostdev supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='mode'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>subsystem</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='startupPolicy'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>default</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>mandatory</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>requisite</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>optional</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='subsysType'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>usb</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>pci</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>scsi</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='capsType'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='pciBackend'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </hostdev>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <rng supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='model'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>virtio</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>virtio-transitional</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>virtio-non-transitional</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='backendModel'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>random</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>egd</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>builtin</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </rng>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <filesystem supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='driverType'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>path</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>handle</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>virtiofs</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </filesystem>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <tpm supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='model'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>tpm-tis</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>tpm-crb</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='backendModel'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>emulator</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>external</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='backendVersion'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>2.0</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </tpm>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <redirdev supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='bus'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>usb</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </redirdev>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <channel supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='type'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>pty</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>unix</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </channel>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <crypto supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='model'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='type'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>qemu</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='backendModel'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>builtin</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </crypto>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <interface supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='backendType'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>default</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>passt</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </interface>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <panic supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='model'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>isa</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>hyperv</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </panic>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   </devices>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   <features>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <gic supported='no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <vmcoreinfo supported='yes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <genid supported='yes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <backingStoreInput supported='yes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <backup supported='yes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <async-teardown supported='yes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <ps2 supported='yes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <sev supported='no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <sgx supported='no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <hyperv supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='features'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>relaxed</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>vapic</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>spinlocks</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>vpindex</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>runtime</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>synic</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>stimer</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>reset</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>vendor_id</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>frequencies</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>reenlightenment</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>tlbflush</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>ipi</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>avic</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>emsr_bitmap</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>xmm_input</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </hyperv>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <launchSecurity supported='no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   </features>
Oct 11 02:05:31 compute-0 nova_compute[355868]: </domainCapabilities>
Oct 11 02:05:31 compute-0 nova_compute[355868]:  _get_domain_capabilities /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1037
Oct 11 02:05:31 compute-0 nova_compute[355868]: 2025-10-11 02:05:30.905 2 DEBUG nova.virt.libvirt.host [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Libvirt host hypervisor capabilities for arch=x86_64 and machine_type=q35:
Oct 11 02:05:31 compute-0 nova_compute[355868]: <domainCapabilities>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   <path>/usr/libexec/qemu-kvm</path>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   <domain>kvm</domain>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   <machine>pc-q35-rhel9.6.0</machine>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   <arch>x86_64</arch>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   <vcpu max='4096'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   <iothreads supported='yes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   <os supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <enum name='firmware'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <value>efi</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <loader supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <value>/usr/share/edk2/ovmf/OVMF_CODE.secboot.fd</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <value>/usr/share/edk2/ovmf/OVMF_CODE.fd</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <value>/usr/share/edk2/ovmf/OVMF.amdsev.fd</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <value>/usr/share/edk2/ovmf/OVMF.inteltdx.secboot.fd</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='type'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>rom</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>pflash</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='readonly'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>yes</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>no</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='secure'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>yes</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>no</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </loader>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   </os>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   <cpu>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <mode name='host-passthrough' supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='hostPassthroughMigratable'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>on</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>off</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </mode>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <mode name='maximum' supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='maximumMigratable'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>on</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>off</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </mode>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <mode name='host-model' supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model fallback='forbid'>EPYC-Rome</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <vendor>AMD</vendor>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <maxphysaddr mode='passthrough' limit='40'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='x2apic'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='tsc-deadline'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='hypervisor'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='tsc_adjust'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='spec-ctrl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='stibp'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='arch-capabilities'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='ssbd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='cmp_legacy'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='overflow-recov'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='succor'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='ibrs'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='amd-ssbd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='virt-ssbd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='lbrv'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='tsc-scale'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='vmcb-clean'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='flushbyasid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='pause-filter'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='pfthreshold'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='svme-addr-chk'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='lfence-always-serializing'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='rdctl-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='skip-l1dfl-vmentry'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='mds-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='pschange-mc-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='gds-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='require' name='rfds-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <feature policy='disable' name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </mode>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <mode name='custom' supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='486-v1'>486</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>486-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Broadwell-v1'>Broadwell</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Broadwell'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Broadwell-v3'>Broadwell-IBRS</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Broadwell-IBRS'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Broadwell-v2'>Broadwell-noTSX</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Broadwell-noTSX'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Broadwell-v4'>Broadwell-noTSX-IBRS</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Broadwell-noTSX-IBRS'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Broadwell-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Broadwell-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Broadwell-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Broadwell-v2'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Broadwell-v3</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Broadwell-v3'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Broadwell-v4</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Broadwell-v4'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Cascadelake-Server-v1'>Cascadelake-Server</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Cascadelake-Server-v3'>Cascadelake-Server-noTSX</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-noTSX'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v2'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v3</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v3'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v4</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v4'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v5</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Cascadelake-Server-v5'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='Intel' canonical='Conroe-v1'>Conroe</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='Intel'>Conroe-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Cooperlake-v1'>Cooperlake</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Cooperlake'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cooperlake-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Cooperlake-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Cooperlake-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Cooperlake-v2'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Denverton-v1'>Denverton</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Denverton'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='mpx'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Denverton-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Denverton-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='mpx'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Denverton-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Denverton-v2'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Denverton-v3</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Denverton-v3'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Hygon' canonical='Dhyana-v1'>Dhyana</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Hygon'>Dhyana-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Hygon'>Dhyana-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Dhyana-v2'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD' canonical='EPYC-v1'>EPYC</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='EPYC-Genoa-v1'>EPYC-Genoa</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='EPYC-Genoa'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amd-psfd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='auto-ibrs'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='stibp-always-on'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Genoa-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='EPYC-Genoa-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amd-psfd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='auto-ibrs'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='stibp-always-on'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD' canonical='EPYC-v2'>EPYC-IBPB</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='EPYC-Milan-v1'>EPYC-Milan</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='EPYC-Milan'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Milan-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='EPYC-Milan-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Milan-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='EPYC-Milan-v2'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amd-psfd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='stibp-always-on'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='EPYC-Rome-v1'>EPYC-Rome</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='EPYC-Rome'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Rome-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='EPYC-Rome-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Rome-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='EPYC-Rome-v2'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-Rome-v3</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='EPYC-Rome-v3'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD'>EPYC-Rome-v4</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD'>EPYC-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='AMD'>EPYC-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-v3</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='EPYC-v3'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>EPYC-v4</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='EPYC-v4'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='GraniteRapids-v1'>GraniteRapids</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='GraniteRapids'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-fp16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='prefetchiti'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>GraniteRapids-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='GraniteRapids-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-fp16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='prefetchiti'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>GraniteRapids-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='GraniteRapids-v2'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-fp16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx10'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx10-128'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx10-256'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx10-512'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='prefetchiti'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Haswell-v1'>Haswell</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Haswell'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Haswell-v3'>Haswell-IBRS</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Haswell-IBRS'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Haswell-v2'>Haswell-noTSX</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Haswell-noTSX'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Haswell-v4'>Haswell-noTSX-IBRS</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Haswell-noTSX-IBRS'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Haswell-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Haswell-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Haswell-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Haswell-v2'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Haswell-v3</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Haswell-v3'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Haswell-v4</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Haswell-v4'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Icelake-Server-v1'>Icelake-Server</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Icelake-Server-v2'>Icelake-Server-noTSX</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-noTSX'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v2'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v3</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v3'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v4</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v4'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v5</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v5'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v6</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v6'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Icelake-Server-v7</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Icelake-Server-v7'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='IvyBridge-v1'>IvyBridge</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='IvyBridge'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='IvyBridge-v2'>IvyBridge-IBRS</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='IvyBridge-IBRS'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>IvyBridge-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='IvyBridge-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>IvyBridge-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='IvyBridge-v2'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='KnightsMill-v1'>KnightsMill</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='KnightsMill'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-4fmaps'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-4vnniw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512er'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512pf'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>KnightsMill-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='KnightsMill-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-4fmaps'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-4vnniw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512er'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512pf'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='Nehalem-v1'>Nehalem</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='Nehalem-v2'>Nehalem-IBRS</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>Nehalem-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>Nehalem-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G1-v1'>Opteron_G1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G1-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G2-v1'>Opteron_G2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G2-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G3-v1'>Opteron_G3</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G3-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='Opteron_G4-v1'>Opteron_G4</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Opteron_G4'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fma4'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xop'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>Opteron_G4-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Opteron_G4-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fma4'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xop'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD' canonical='Opteron_G5-v1'>Opteron_G5</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Opteron_G5'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fma4'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='tbm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xop'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='AMD'>Opteron_G5-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Opteron_G5-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fma4'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='tbm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xop'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='Intel' canonical='Penryn-v1'>Penryn</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='Intel'>Penryn-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='SandyBridge-v1'>SandyBridge</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='SandyBridge-v2'>SandyBridge-IBRS</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>SandyBridge-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>SandyBridge-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='SapphireRapids-v1'>SapphireRapids</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='SapphireRapids'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>SapphireRapids-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='SapphireRapids-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>SapphireRapids-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='SapphireRapids-v2'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>SapphireRapids-v3</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='SapphireRapids-v3'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-bf16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-int8'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='amx-tile'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-bf16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-fp16'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bitalg'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512ifma'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrc'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fzrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='la57'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='taa-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xfd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='SierraForest-v1'>SierraForest</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='SierraForest'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx-ifma'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx-ne-convert'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx-vnni-int8'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='cmpccxadd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>SierraForest-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='SierraForest-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx-ifma'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx-ne-convert'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx-vnni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx-vnni-int8'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='cmpccxadd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fbsdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='fsrs'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ibrs-all'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='mcdt-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pbrsb-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='psdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='serialize'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vaes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v1'>Skylake-Client</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v2'>Skylake-Client-IBRS</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-IBRS'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v3'>Skylake-Client-noTSX-IBRS</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-noTSX-IBRS'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Client-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Client-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-v2'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Client-v3</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-v3'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Client-v4</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Skylake-Client-v4'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v1'>Skylake-Server</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v2'>Skylake-Server-IBRS</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-IBRS'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v3'>Skylake-Server-noTSX-IBRS</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-noTSX-IBRS'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v2'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='hle'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='rtm'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v3</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v3'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v4</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v4'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Skylake-Server-v5</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Skylake-Server-v5'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512bw'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512cd'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512dq'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512f'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='avx512vl'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='invpcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pcid'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='pku'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel' canonical='Snowridge-v1'>Snowridge</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Snowridge'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='core-capability'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='mpx'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='split-lock-detect'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Snowridge-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Snowridge-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='core-capability'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='mpx'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='split-lock-detect'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Snowridge-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Snowridge-v2'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='core-capability'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='split-lock-detect'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Snowridge-v3</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Snowridge-v3'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='core-capability'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='split-lock-detect'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' vendor='Intel'>Snowridge-v4</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='Snowridge-v4'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='cldemote'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='erms'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='gfni'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdir64b'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='movdiri'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='xsaves'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='Westmere-v1'>Westmere</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel' canonical='Westmere-v2'>Westmere-IBRS</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>Westmere-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' vendor='Intel'>Westmere-v2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='AMD' canonical='athlon-v1'>athlon</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='athlon'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='3dnow'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='3dnowext'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='AMD'>athlon-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='athlon-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='3dnow'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='3dnowext'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='core2duo-v1'>core2duo</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='core2duo'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel'>core2duo-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='core2duo-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='coreduo-v1'>coreduo</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='coreduo'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel'>coreduo-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='coreduo-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='kvm32-v1'>kvm32</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>kvm32-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='kvm64-v1'>kvm64</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>kvm64-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='n270-v1'>n270</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='n270'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='Intel'>n270-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='n270-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='ss'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium-v1'>pentium</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium2-v1'>pentium2</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium2-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium3-v1'>pentium3</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium3-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='AMD' canonical='phenom-v1'>phenom</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='phenom'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='3dnow'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='3dnowext'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='no' deprecated='yes' vendor='AMD'>phenom-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <blockers model='phenom-v1'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='3dnow'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <feature name='3dnowext'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </blockers>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='qemu32-v1'>qemu32</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>qemu32-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='qemu64-v1'>qemu64</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <model usable='yes' deprecated='yes' vendor='unknown'>qemu64-v1</model>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </mode>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   </cpu>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   <memoryBacking supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <enum name='sourceType'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <value>file</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <value>anonymous</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <value>memfd</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   </memoryBacking>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   <devices>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <disk supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='diskDevice'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>disk</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>cdrom</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>floppy</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>lun</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='bus'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>fdc</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>scsi</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>virtio</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>usb</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>sata</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='model'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>virtio</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>virtio-transitional</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>virtio-non-transitional</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </disk>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <graphics supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='type'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>vnc</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>egl-headless</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>dbus</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </graphics>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <video supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='modelType'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>vga</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>cirrus</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>virtio</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>none</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>bochs</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>ramfb</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </video>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <hostdev supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='mode'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>subsystem</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='startupPolicy'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>default</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>mandatory</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>requisite</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>optional</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='subsysType'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>usb</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>pci</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>scsi</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='capsType'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='pciBackend'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </hostdev>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <rng supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='model'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>virtio</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>virtio-transitional</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>virtio-non-transitional</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='backendModel'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>random</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>egd</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>builtin</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </rng>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <filesystem supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='driverType'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>path</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>handle</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>virtiofs</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </filesystem>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <tpm supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='model'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>tpm-tis</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>tpm-crb</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='backendModel'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>emulator</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>external</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='backendVersion'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>2.0</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </tpm>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <redirdev supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='bus'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>usb</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </redirdev>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <channel supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='type'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>pty</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>unix</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </channel>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <crypto supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='model'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='type'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>qemu</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='backendModel'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>builtin</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </crypto>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <interface supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='backendType'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>default</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>passt</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </interface>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <panic supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='model'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>isa</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>hyperv</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </panic>
Oct 11 02:05:31 compute-0 python3.9[356622]: ansible-containers.podman.podman_container Invoked with name=nova_nvme_cleaner state=absent executable=podman detach=True debug=False force_restart=False force_delete=True generate_systemd={} image_strict=False recreate=False image=None annotation=None arch=None attach=None authfile=None blkio_weight=None blkio_weight_device=None cap_add=None cap_drop=None cgroup_conf=None cgroup_parent=None cgroupns=None cgroups=None chrootdirs=None cidfile=None cmd_args=None conmon_pidfile=None command=None cpu_period=None cpu_quota=None cpu_rt_period=None cpu_rt_runtime=None cpu_shares=None cpus=None cpuset_cpus=None cpuset_mems=None decryption_key=None delete_depend=None delete_time=None delete_volumes=None detach_keys=None device=None device_cgroup_rule=None device_read_bps=None device_read_iops=None device_write_bps=None device_write_iops=None dns=None dns_option=None dns_search=None entrypoint=None env=None env_file=None env_host=None env_merge=None etc_hosts=None expose=None gidmap=None gpus=None group_add=None group_entry=None healthcheck=None healthcheck_interval=None healthcheck_retries=None healthcheck_start_period=None health_startup_cmd=None health_startup_interval=None health_startup_retries=None health_startup_success=None health_startup_timeout=None healthcheck_timeout=None healthcheck_failure_action=None hooks_dir=None hostname=None hostuser=None http_proxy=None image_volume=None init=None init_ctr=None init_path=None interactive=None ip=None ip6=None ipc=None kernel_memory=None label=None label_file=None log_driver=None log_level=None log_opt=None mac_address=None memory=None memory_reservation=None memory_swap=None memory_swappiness=None mount=None network=None network_aliases=None no_healthcheck=None no_hosts=None oom_kill_disable=None oom_score_adj=None os=None passwd=None passwd_entry=None personality=None pid=None pid_file=None pids_limit=None platform=None pod=None pod_id_file=None preserve_fd=None preserve_fds=None privileged=None publish=None publish_all=None pull=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None rdt_class=None read_only=None read_only_tmpfs=None requires=None restart_policy=None restart_time=None retry=None retry_delay=None rm=None rmi=None rootfs=None seccomp_policy=None secrets=NOT_LOGGING_PARAMETER sdnotify=None security_opt=None shm_size=None shm_size_systemd=None sig_proxy=None stop_signal=None stop_timeout=None stop_time=None subgidname=None subuidname=None sysctl=None systemd=None timeout=None timezone=None tls_verify=None tmpfs=None tty=None uidmap=None ulimit=None umask=None unsetenv=None unsetenv_all=None user=None userns=None uts=None variant=None volume=None volumes_from=None workdir=None
Oct 11 02:05:31 compute-0 nova_compute[355868]:   </devices>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   <features>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <gic supported='no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <vmcoreinfo supported='yes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <genid supported='yes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <backingStoreInput supported='yes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <backup supported='yes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <async-teardown supported='yes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <ps2 supported='yes'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <sev supported='no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <sgx supported='no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <hyperv supported='yes'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       <enum name='features'>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>relaxed</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>vapic</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>spinlocks</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>vpindex</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>runtime</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>synic</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>stimer</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>reset</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>vendor_id</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>frequencies</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>reenlightenment</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>tlbflush</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>ipi</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>avic</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>emsr_bitmap</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:         <value>xmm_input</value>
Oct 11 02:05:31 compute-0 nova_compute[355868]:       </enum>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     </hyperv>
Oct 11 02:05:31 compute-0 nova_compute[355868]:     <launchSecurity supported='no'/>
Oct 11 02:05:31 compute-0 nova_compute[355868]:   </features>
Oct 11 02:05:31 compute-0 nova_compute[355868]: </domainCapabilities>
Oct 11 02:05:31 compute-0 nova_compute[355868]:  _get_domain_capabilities /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1037
Oct 11 02:05:31 compute-0 nova_compute[355868]: 2025-10-11 02:05:30.999 2 DEBUG nova.virt.libvirt.host [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Checking secure boot support for host arch (x86_64) supports_secure_boot /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1782
Oct 11 02:05:31 compute-0 nova_compute[355868]: 2025-10-11 02:05:30.999 2 DEBUG nova.virt.libvirt.host [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Checking secure boot support for host arch (x86_64) supports_secure_boot /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1782
Oct 11 02:05:31 compute-0 nova_compute[355868]: 2025-10-11 02:05:30.999 2 DEBUG nova.virt.libvirt.host [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Checking secure boot support for host arch (x86_64) supports_secure_boot /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1782
Oct 11 02:05:31 compute-0 nova_compute[355868]: 2025-10-11 02:05:30.999 2 INFO nova.virt.libvirt.host [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Secure Boot support detected
Oct 11 02:05:31 compute-0 nova_compute[355868]: 2025-10-11 02:05:31.004 2 INFO nova.virt.libvirt.driver [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] The live_migration_permit_post_copy is set to True and post copy live migration is available so auto-converge will not be in use.
Oct 11 02:05:31 compute-0 nova_compute[355868]: 2025-10-11 02:05:31.004 2 INFO nova.virt.libvirt.driver [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] The live_migration_permit_post_copy is set to True and post copy live migration is available so auto-converge will not be in use.
Oct 11 02:05:31 compute-0 nova_compute[355868]: 2025-10-11 02:05:31.022 2 DEBUG nova.virt.libvirt.driver [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Enabling emulated TPM support _check_vtpm_support /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:1097
Oct 11 02:05:31 compute-0 nova_compute[355868]: 2025-10-11 02:05:31.077 2 INFO nova.virt.node [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Determined node identity 256b11da-7f71-42c0-941c-ea1e909a35f8 from /var/lib/nova/compute_id
Oct 11 02:05:31 compute-0 nova_compute[355868]: 2025-10-11 02:05:31.108 2 WARNING nova.compute.manager [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Compute nodes ['256b11da-7f71-42c0-941c-ea1e909a35f8'] for host compute-0.ctlplane.example.com were not found in the database. If this is the first time this service is starting on this host, then you can ignore this warning.
Oct 11 02:05:31 compute-0 nova_compute[355868]: 2025-10-11 02:05:31.169 2 INFO nova.compute.manager [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Looking for unclaimed instances stuck in BUILDING status for nodes managed by this host
Oct 11 02:05:31 compute-0 nova_compute[355868]: 2025-10-11 02:05:31.236 2 WARNING nova.compute.manager [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] No compute node record found for host compute-0.ctlplane.example.com. If this is the first time this service is starting on this host, then you can ignore this warning.: nova.exception_Remote.ComputeHostNotFound_Remote: Compute host compute-0.ctlplane.example.com could not be found.
Oct 11 02:05:31 compute-0 nova_compute[355868]: 2025-10-11 02:05:31.236 2 DEBUG oslo_concurrency.lockutils [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:05:31 compute-0 nova_compute[355868]: 2025-10-11 02:05:31.237 2 DEBUG oslo_concurrency.lockutils [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:05:31 compute-0 nova_compute[355868]: 2025-10-11 02:05:31.237 2 DEBUG oslo_concurrency.lockutils [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:05:31 compute-0 nova_compute[355868]: 2025-10-11 02:05:31.237 2 DEBUG nova.compute.resource_tracker [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:05:31 compute-0 nova_compute[355868]: 2025-10-11 02:05:31.238 2 DEBUG oslo_concurrency.processutils [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:05:31 compute-0 sudo[356620]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v820: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:31 compute-0 openstack_network_exporter[159265]: ERROR   02:05:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:05:31 compute-0 openstack_network_exporter[159265]: ERROR   02:05:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:05:31 compute-0 openstack_network_exporter[159265]: ERROR   02:05:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:05:31 compute-0 openstack_network_exporter[159265]: ERROR   02:05:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:05:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:05:31 compute-0 openstack_network_exporter[159265]: ERROR   02:05:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:05:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:05:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:05:31 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/49217017' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:05:31 compute-0 nova_compute[355868]: 2025-10-11 02:05:31.732 2 DEBUG oslo_concurrency.processutils [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.494s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:05:31 compute-0 systemd[1]: Starting libvirt nodedev daemon...
Oct 11 02:05:31 compute-0 systemd[1]: Started libvirt nodedev daemon.
Oct 11 02:05:32 compute-0 sudo[356839]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vklnwhqadximaihtfnrotfzjuecjyxjt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148331.6201358-1849-52306339385833/AnsiballZ_systemd.py'
Oct 11 02:05:32 compute-0 sudo[356839]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:05:32 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 02:05:32 compute-0 sshd-session[356180]: Connection closed by authenticating user root 121.227.153.123 port 55626 [preauth]
Oct 11 02:05:32 compute-0 nova_compute[355868]: 2025-10-11 02:05:32.454 2 WARNING nova.virt.libvirt.driver [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:05:32 compute-0 nova_compute[355868]: 2025-10-11 02:05:32.456 2 DEBUG nova.compute.resource_tracker [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=4578MB free_disk=59.98828125GB free_vcpus=8 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:05:32 compute-0 nova_compute[355868]: 2025-10-11 02:05:32.457 2 DEBUG oslo_concurrency.lockutils [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:05:32 compute-0 nova_compute[355868]: 2025-10-11 02:05:32.457 2 DEBUG oslo_concurrency.lockutils [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:05:32 compute-0 nova_compute[355868]: 2025-10-11 02:05:32.471 2 WARNING nova.compute.resource_tracker [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] No compute node record for compute-0.ctlplane.example.com:256b11da-7f71-42c0-941c-ea1e909a35f8: nova.exception_Remote.ComputeHostNotFound_Remote: Compute host 256b11da-7f71-42c0-941c-ea1e909a35f8 could not be found.
Oct 11 02:05:32 compute-0 nova_compute[355868]: 2025-10-11 02:05:32.493 2 INFO nova.compute.resource_tracker [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Compute node record created for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com with uuid: 256b11da-7f71-42c0-941c-ea1e909a35f8
Oct 11 02:05:32 compute-0 python3.9[356841]: ansible-ansible.builtin.systemd Invoked with name=edpm_nova_compute.service state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 02:05:32 compute-0 nova_compute[355868]: 2025-10-11 02:05:32.562 2 DEBUG nova.compute.resource_tracker [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 0 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:05:32 compute-0 nova_compute[355868]: 2025-10-11 02:05:32.562 2 DEBUG nova.compute.resource_tracker [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=512MB phys_disk=59GB used_disk=0GB total_vcpus=8 used_vcpus=0 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:05:32 compute-0 systemd[1]: Stopping nova_compute container...
Oct 11 02:05:32 compute-0 ceph-mon[191930]: pgmap v820: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:32 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/49217017' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:05:32 compute-0 nova_compute[355868]: 2025-10-11 02:05:32.671 2 DEBUG oslo_concurrency.lockutils [None req-97f28ffb-b63f-4250-9436-e289a0982ef2 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.214s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:05:32 compute-0 nova_compute[355868]: 2025-10-11 02:05:32.672 2 DEBUG oslo_concurrency.lockutils [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] Acquiring lock "singleton_lock" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:05:32 compute-0 nova_compute[355868]: 2025-10-11 02:05:32.672 2 DEBUG oslo_concurrency.lockutils [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] Acquired lock "singleton_lock" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:05:32 compute-0 nova_compute[355868]: 2025-10-11 02:05:32.672 2 DEBUG oslo_concurrency.lockutils [None req-bfed79cb-f596-4a98-a7b6-6e69b8bd2ff4 - - - - - -] Releasing lock "singleton_lock" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:05:33 compute-0 virtqemud[153560]: End of file while reading data: Input/output error
Oct 11 02:05:33 compute-0 systemd[1]: libpod-33206644db1c18e3480ef44964f3a6a1eb2aea2f1be2a206e1475b915a3d4955.scope: Deactivated successfully.
Oct 11 02:05:33 compute-0 systemd[1]: libpod-33206644db1c18e3480ef44964f3a6a1eb2aea2f1be2a206e1475b915a3d4955.scope: Consumed 4.460s CPU time.
Oct 11 02:05:33 compute-0 podman[356845]: 2025-10-11 02:05:33.245466319 +0000 UTC m=+0.654976182 container died 33206644db1c18e3480ef44964f3a6a1eb2aea2f1be2a206e1475b915a3d4955 (image=quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified, name=nova_compute, managed_by=edpm_ansible, org.label-schema.license=GPLv2, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': True, 'user': 'nova', 'restart': 'always', 'command': 'kolla_start', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'volumes': ['/var/lib/openstack/config/nova:/var/lib/kolla/config_files:ro', '/var/lib/openstack/cacerts/nova/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/etc/localtime:/etc/localtime:ro', '/lib/modules:/lib/modules:ro', '/dev:/dev', '/var/lib/libvirt:/var/lib/libvirt', '/run/libvirt:/run/libvirt:shared', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/etc/iscsi:/etc/iscsi:ro', '/etc/nvme:/etc/nvme', '/var/lib/openstack/config/ceph:/var/lib/kolla/config_files/ceph:ro', '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=nova_compute)
Oct 11 02:05:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v821: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:33 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-33206644db1c18e3480ef44964f3a6a1eb2aea2f1be2a206e1475b915a3d4955-userdata-shm.mount: Deactivated successfully.
Oct 11 02:05:33 compute-0 systemd[1]: var-lib-containers-storage-overlay-d44f800e10782acb3d4d088c5d8deb67cdce0dbf2953c4eeea2e1749a4c9150e-merged.mount: Deactivated successfully.
Oct 11 02:05:33 compute-0 unix_chkpwd[356872]: password check failed for user (root)
Oct 11 02:05:33 compute-0 sshd-session[356859]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:05:34 compute-0 podman[356845]: 2025-10-11 02:05:34.157870916 +0000 UTC m=+1.567380779 container cleanup 33206644db1c18e3480ef44964f3a6a1eb2aea2f1be2a206e1475b915a3d4955 (image=quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified, name=nova_compute, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, org.label-schema.license=GPLv2, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': True, 'user': 'nova', 'restart': 'always', 'command': 'kolla_start', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'volumes': ['/var/lib/openstack/config/nova:/var/lib/kolla/config_files:ro', '/var/lib/openstack/cacerts/nova/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/etc/localtime:/etc/localtime:ro', '/lib/modules:/lib/modules:ro', '/dev:/dev', '/var/lib/libvirt:/var/lib/libvirt', '/run/libvirt:/run/libvirt:shared', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/etc/iscsi:/etc/iscsi:ro', '/etc/nvme:/etc/nvme', '/var/lib/openstack/config/ceph:/var/lib/kolla/config_files/ceph:ro', '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro']}, org.label-schema.schema-version=1.0, tcib_managed=true, container_name=nova_compute)
Oct 11 02:05:34 compute-0 podman[356845]: nova_compute
Oct 11 02:05:34 compute-0 podman[356875]: nova_compute
Oct 11 02:05:34 compute-0 systemd[1]: edpm_nova_compute.service: Deactivated successfully.
Oct 11 02:05:34 compute-0 systemd[1]: Stopped nova_compute container.
Oct 11 02:05:34 compute-0 systemd[1]: edpm_nova_compute.service: Consumed 1.223s CPU time, 20.0M memory peak, read 0B from disk, written 127.5K to disk.
Oct 11 02:05:34 compute-0 systemd[1]: Starting nova_compute container...
Oct 11 02:05:34 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:05:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d44f800e10782acb3d4d088c5d8deb67cdce0dbf2953c4eeea2e1749a4c9150e/merged/etc/nvme supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d44f800e10782acb3d4d088c5d8deb67cdce0dbf2953c4eeea2e1749a4c9150e/merged/etc/multipath supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d44f800e10782acb3d4d088c5d8deb67cdce0dbf2953c4eeea2e1749a4c9150e/merged/var/lib/libvirt supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d44f800e10782acb3d4d088c5d8deb67cdce0dbf2953c4eeea2e1749a4c9150e/merged/var/lib/nova supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d44f800e10782acb3d4d088c5d8deb67cdce0dbf2953c4eeea2e1749a4c9150e/merged/var/lib/iscsi supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:34 compute-0 podman[356887]: 2025-10-11 02:05:34.492867829 +0000 UTC m=+0.176050118 container init 33206644db1c18e3480ef44964f3a6a1eb2aea2f1be2a206e1475b915a3d4955 (image=quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified, name=nova_compute, container_name=nova_compute, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': True, 'user': 'nova', 'restart': 'always', 'command': 'kolla_start', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'volumes': ['/var/lib/openstack/config/nova:/var/lib/kolla/config_files:ro', '/var/lib/openstack/cacerts/nova/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/etc/localtime:/etc/localtime:ro', '/lib/modules:/lib/modules:ro', '/dev:/dev', '/var/lib/libvirt:/var/lib/libvirt', '/run/libvirt:/run/libvirt:shared', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/etc/iscsi:/etc/iscsi:ro', '/etc/nvme:/etc/nvme', '/var/lib/openstack/config/ceph:/var/lib/kolla/config_files/ceph:ro', '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro']}, config_id=edpm)
Oct 11 02:05:34 compute-0 podman[356887]: 2025-10-11 02:05:34.510836779 +0000 UTC m=+0.194019008 container start 33206644db1c18e3480ef44964f3a6a1eb2aea2f1be2a206e1475b915a3d4955 (image=quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified, name=nova_compute, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=edpm, container_name=nova_compute, managed_by=edpm_ansible, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': True, 'user': 'nova', 'restart': 'always', 'command': 'kolla_start', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'volumes': ['/var/lib/openstack/config/nova:/var/lib/kolla/config_files:ro', '/var/lib/openstack/cacerts/nova/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/etc/localtime:/etc/localtime:ro', '/lib/modules:/lib/modules:ro', '/dev:/dev', '/var/lib/libvirt:/var/lib/libvirt', '/run/libvirt:/run/libvirt:shared', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/etc/iscsi:/etc/iscsi:ro', '/etc/nvme:/etc/nvme', '/var/lib/openstack/config/ceph:/var/lib/kolla/config_files/ceph:ro', '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:05:34 compute-0 podman[356887]: nova_compute
Oct 11 02:05:34 compute-0 nova_compute[356901]: + sudo -E kolla_set_configs
Oct 11 02:05:34 compute-0 systemd[1]: Started nova_compute container.
Oct 11 02:05:34 compute-0 sudo[356839]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Loading config file at /var/lib/kolla/config_files/config.json
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Validating config file
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Kolla config strategy set to: COPY_ALWAYS
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Copying service configuration files
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Deleting /etc/nova/nova.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Copying /var/lib/kolla/config_files/nova-blank.conf to /etc/nova/nova.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Setting permission for /etc/nova/nova.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Deleting /etc/nova/nova.conf.d/01-nova.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Copying /var/lib/kolla/config_files/01-nova.conf to /etc/nova/nova.conf.d/01-nova.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Setting permission for /etc/nova/nova.conf.d/01-nova.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Deleting /etc/nova/nova.conf.d/03-ceph-nova.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Copying /var/lib/kolla/config_files/03-ceph-nova.conf to /etc/nova/nova.conf.d/03-ceph-nova.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Setting permission for /etc/nova/nova.conf.d/03-ceph-nova.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Deleting /etc/nova/nova.conf.d/25-nova-extra.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Copying /var/lib/kolla/config_files/25-nova-extra.conf to /etc/nova/nova.conf.d/25-nova-extra.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Setting permission for /etc/nova/nova.conf.d/25-nova-extra.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Deleting /etc/nova/nova.conf.d/nova-blank.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Copying /var/lib/kolla/config_files/nova-blank.conf to /etc/nova/nova.conf.d/nova-blank.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Setting permission for /etc/nova/nova.conf.d/nova-blank.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Deleting /etc/nova/nova.conf.d/02-nova-host-specific.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Copying /var/lib/kolla/config_files/02-nova-host-specific.conf to /etc/nova/nova.conf.d/02-nova-host-specific.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Setting permission for /etc/nova/nova.conf.d/02-nova-host-specific.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Deleting /etc/ceph
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Creating directory /etc/ceph
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Setting permission for /etc/ceph
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Copying /var/lib/kolla/config_files/ceph/ceph.conf to /etc/ceph/ceph.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Setting permission for /etc/ceph/ceph.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Copying /var/lib/kolla/config_files/ceph/ceph.client.openstack.keyring to /etc/ceph/ceph.client.openstack.keyring
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Setting permission for /etc/ceph/ceph.client.openstack.keyring
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Deleting /var/lib/nova/.ssh/ssh-privatekey
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Copying /var/lib/kolla/config_files/ssh-privatekey to /var/lib/nova/.ssh/ssh-privatekey
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Setting permission for /var/lib/nova/.ssh/ssh-privatekey
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Deleting /var/lib/nova/.ssh/config
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Copying /var/lib/kolla/config_files/ssh-config to /var/lib/nova/.ssh/config
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Setting permission for /var/lib/nova/.ssh/config
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Writing out command to execute
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Setting permission for /etc/ceph/ceph.conf
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Setting permission for /etc/ceph/ceph.client.openstack.keyring
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Setting permission for /var/lib/nova/.ssh/
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Setting permission for /var/lib/nova/.ssh/ssh-privatekey
Oct 11 02:05:34 compute-0 nova_compute[356901]: INFO:__main__:Setting permission for /var/lib/nova/.ssh/config
Oct 11 02:05:34 compute-0 nova_compute[356901]: ++ cat /run_command
Oct 11 02:05:34 compute-0 nova_compute[356901]: + CMD=nova-compute
Oct 11 02:05:34 compute-0 nova_compute[356901]: + ARGS=
Oct 11 02:05:34 compute-0 nova_compute[356901]: + sudo kolla_copy_cacerts
Oct 11 02:05:34 compute-0 nova_compute[356901]: + [[ ! -n '' ]]
Oct 11 02:05:34 compute-0 nova_compute[356901]: + . kolla_extend_start
Oct 11 02:05:34 compute-0 nova_compute[356901]: Running command: 'nova-compute'
Oct 11 02:05:34 compute-0 nova_compute[356901]: + echo 'Running command: '\''nova-compute'\'''
Oct 11 02:05:34 compute-0 nova_compute[356901]: + umask 0022
Oct 11 02:05:34 compute-0 nova_compute[356901]: + exec nova-compute
Oct 11 02:05:34 compute-0 ceph-mon[191930]: pgmap v821: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:05:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v822: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:35 compute-0 sudo[357063]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-powuivomxtemsjlzcarkluapebjymhgb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148334.9385278-1858-158943915977935/AnsiballZ_podman_container.py'
Oct 11 02:05:35 compute-0 sudo[357063]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:05:35 compute-0 python3.9[357065]: ansible-containers.podman.podman_container Invoked with name=nova_compute_init state=started executable=podman detach=True debug=False force_restart=False force_delete=True generate_systemd={} image_strict=False recreate=False image=None annotation=None arch=None attach=None authfile=None blkio_weight=None blkio_weight_device=None cap_add=None cap_drop=None cgroup_conf=None cgroup_parent=None cgroupns=None cgroups=None chrootdirs=None cidfile=None cmd_args=None conmon_pidfile=None command=None cpu_period=None cpu_quota=None cpu_rt_period=None cpu_rt_runtime=None cpu_shares=None cpus=None cpuset_cpus=None cpuset_mems=None decryption_key=None delete_depend=None delete_time=None delete_volumes=None detach_keys=None device=None device_cgroup_rule=None device_read_bps=None device_read_iops=None device_write_bps=None device_write_iops=None dns=None dns_option=None dns_search=None entrypoint=None env=None env_file=None env_host=None env_merge=None etc_hosts=None expose=None gidmap=None gpus=None group_add=None group_entry=None healthcheck=None healthcheck_interval=None healthcheck_retries=None healthcheck_start_period=None health_startup_cmd=None health_startup_interval=None health_startup_retries=None health_startup_success=None health_startup_timeout=None healthcheck_timeout=None healthcheck_failure_action=None hooks_dir=None hostname=None hostuser=None http_proxy=None image_volume=None init=None init_ctr=None init_path=None interactive=None ip=None ip6=None ipc=None kernel_memory=None label=None label_file=None log_driver=None log_level=None log_opt=None mac_address=None memory=None memory_reservation=None memory_swap=None memory_swappiness=None mount=None network=None network_aliases=None no_healthcheck=None no_hosts=None oom_kill_disable=None oom_score_adj=None os=None passwd=None passwd_entry=None personality=None pid=None pid_file=None pids_limit=None platform=None pod=None pod_id_file=None preserve_fd=None preserve_fds=None privileged=None publish=None publish_all=None pull=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None rdt_class=None read_only=None read_only_tmpfs=None requires=None restart_policy=None restart_time=None retry=None retry_delay=None rm=None rmi=None rootfs=None seccomp_policy=None secrets=NOT_LOGGING_PARAMETER sdnotify=None security_opt=None shm_size=None shm_size_systemd=None sig_proxy=None stop_signal=None stop_timeout=None stop_time=None subgidname=None subuidname=None sysctl=None systemd=None timeout=None timezone=None tls_verify=None tmpfs=None tty=None uidmap=None ulimit=None umask=None unsetenv=None unsetenv_all=None user=None userns=None uts=None variant=None volume=None volumes_from=None workdir=None
Oct 11 02:05:35 compute-0 sshd-session[356859]: Failed password for root from 121.227.153.123 port 52998 ssh2
Oct 11 02:05:36 compute-0 systemd[1]: Started libpod-conmon-ebc3893876b006f2554389d6534fd757431488dafe40f7ef25b5026f5246c4b8.scope.
Oct 11 02:05:36 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:05:36 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f0b6676db5cddb5888111fc7c420395b9707fe2e6102b4e9ff5e3d3dd34f15dc/merged/usr/sbin/nova_statedir_ownership.py supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:36 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f0b6676db5cddb5888111fc7c420395b9707fe2e6102b4e9ff5e3d3dd34f15dc/merged/var/lib/_nova_secontext supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:36 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f0b6676db5cddb5888111fc7c420395b9707fe2e6102b4e9ff5e3d3dd34f15dc/merged/var/lib/nova supports timestamps until 2038 (0x7fffffff)
Oct 11 02:05:36 compute-0 podman[357089]: 2025-10-11 02:05:36.137097788 +0000 UTC m=+0.193301991 container init ebc3893876b006f2554389d6534fd757431488dafe40f7ef25b5026f5246c4b8 (image=quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified, name=nova_compute_init, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=nova_compute_init, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': False, 'user': 'root', 'restart': 'never', 'command': 'bash -c $* -- eval python3 /sbin/nova_statedir_ownership.py | logger -t nova_compute_init', 'net': 'none', 'security_opt': ['label=disable'], 'detach': False, 'environment': {'NOVA_STATEDIR_OWNERSHIP_SKIP': '/var/lib/nova/compute_id', '__OS_DEBUG': False}, 'volumes': ['/dev/log:/dev/log', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/_nova_secontext:/var/lib/_nova_secontext:shared,z', '/var/lib/openstack/config/nova/nova_statedir_ownership.py:/sbin/nova_statedir_ownership.py:z']}, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 02:05:36 compute-0 podman[357089]: 2025-10-11 02:05:36.15337752 +0000 UTC m=+0.209581693 container start ebc3893876b006f2554389d6534fd757431488dafe40f7ef25b5026f5246c4b8 (image=quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified, name=nova_compute_init, config_id=edpm, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=nova_compute_init, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': False, 'user': 'root', 'restart': 'never', 'command': 'bash -c $* -- eval python3 /sbin/nova_statedir_ownership.py | logger -t nova_compute_init', 'net': 'none', 'security_opt': ['label=disable'], 'detach': False, 'environment': {'NOVA_STATEDIR_OWNERSHIP_SKIP': '/var/lib/nova/compute_id', '__OS_DEBUG': False}, 'volumes': ['/dev/log:/dev/log', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/_nova_secontext:/var/lib/_nova_secontext:shared,z', '/var/lib/openstack/config/nova/nova_statedir_ownership.py:/sbin/nova_statedir_ownership.py:z']})
Oct 11 02:05:36 compute-0 python3.9[357065]: ansible-containers.podman.podman_container PODMAN-CONTAINER-DEBUG: podman start nova_compute_init
Oct 11 02:05:36 compute-0 nova_compute_init[357111]: INFO:nova_statedir:Applying nova statedir ownership
Oct 11 02:05:36 compute-0 nova_compute_init[357111]: INFO:nova_statedir:Target ownership for /var/lib/nova: 42436:42436
Oct 11 02:05:36 compute-0 nova_compute_init[357111]: INFO:nova_statedir:Checking uid: 1000 gid: 1000 path: /var/lib/nova/
Oct 11 02:05:36 compute-0 nova_compute_init[357111]: INFO:nova_statedir:Changing ownership of /var/lib/nova from 1000:1000 to 42436:42436
Oct 11 02:05:36 compute-0 nova_compute_init[357111]: INFO:nova_statedir:Setting selinux context of /var/lib/nova to system_u:object_r:container_file_t:s0
Oct 11 02:05:36 compute-0 nova_compute_init[357111]: INFO:nova_statedir:Checking uid: 1000 gid: 1000 path: /var/lib/nova/instances/
Oct 11 02:05:36 compute-0 nova_compute_init[357111]: INFO:nova_statedir:Changing ownership of /var/lib/nova/instances from 1000:1000 to 42436:42436
Oct 11 02:05:36 compute-0 nova_compute_init[357111]: INFO:nova_statedir:Setting selinux context of /var/lib/nova/instances to system_u:object_r:container_file_t:s0
Oct 11 02:05:36 compute-0 nova_compute_init[357111]: INFO:nova_statedir:Checking uid: 42436 gid: 42436 path: /var/lib/nova/.ssh/
Oct 11 02:05:36 compute-0 nova_compute_init[357111]: INFO:nova_statedir:Ownership of /var/lib/nova/.ssh already 42436:42436
Oct 11 02:05:36 compute-0 nova_compute_init[357111]: INFO:nova_statedir:Setting selinux context of /var/lib/nova/.ssh to system_u:object_r:container_file_t:s0
Oct 11 02:05:36 compute-0 nova_compute_init[357111]: INFO:nova_statedir:Checking uid: 42436 gid: 42436 path: /var/lib/nova/.ssh/ssh-privatekey
Oct 11 02:05:36 compute-0 nova_compute_init[357111]: INFO:nova_statedir:Checking uid: 42436 gid: 42436 path: /var/lib/nova/.ssh/config
Oct 11 02:05:36 compute-0 nova_compute_init[357111]: INFO:nova_statedir:Nova statedir ownership complete
Oct 11 02:05:36 compute-0 systemd[1]: libpod-ebc3893876b006f2554389d6534fd757431488dafe40f7ef25b5026f5246c4b8.scope: Deactivated successfully.
Oct 11 02:05:36 compute-0 podman[357112]: 2025-10-11 02:05:36.266517383 +0000 UTC m=+0.058726860 container died ebc3893876b006f2554389d6534fd757431488dafe40f7ef25b5026f5246c4b8 (image=quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified, name=nova_compute_init, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': False, 'user': 'root', 'restart': 'never', 'command': 'bash -c $* -- eval python3 /sbin/nova_statedir_ownership.py | logger -t nova_compute_init', 'net': 'none', 'security_opt': ['label=disable'], 'detach': False, 'environment': {'NOVA_STATEDIR_OWNERSHIP_SKIP': '/var/lib/nova/compute_id', '__OS_DEBUG': False}, 'volumes': ['/dev/log:/dev/log', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/_nova_secontext:/var/lib/_nova_secontext:shared,z', '/var/lib/openstack/config/nova/nova_statedir_ownership.py:/sbin/nova_statedir_ownership.py:z']}, container_name=nova_compute_init, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_managed=true, config_id=edpm)
Oct 11 02:05:36 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-ebc3893876b006f2554389d6534fd757431488dafe40f7ef25b5026f5246c4b8-userdata-shm.mount: Deactivated successfully.
Oct 11 02:05:36 compute-0 systemd[1]: var-lib-containers-storage-overlay-f0b6676db5cddb5888111fc7c420395b9707fe2e6102b4e9ff5e3d3dd34f15dc-merged.mount: Deactivated successfully.
Oct 11 02:05:36 compute-0 podman[357122]: 2025-10-11 02:05:36.405346728 +0000 UTC m=+0.133415669 container cleanup ebc3893876b006f2554389d6534fd757431488dafe40f7ef25b5026f5246c4b8 (image=quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified, name=nova_compute_init, config_id=edpm, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': False, 'user': 'root', 'restart': 'never', 'command': 'bash -c $* -- eval python3 /sbin/nova_statedir_ownership.py | logger -t nova_compute_init', 'net': 'none', 'security_opt': ['label=disable'], 'detach': False, 'environment': {'NOVA_STATEDIR_OWNERSHIP_SKIP': '/var/lib/nova/compute_id', '__OS_DEBUG': False}, 'volumes': ['/dev/log:/dev/log', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/_nova_secontext:/var/lib/_nova_secontext:shared,z', '/var/lib/openstack/config/nova/nova_statedir_ownership.py:/sbin/nova_statedir_ownership.py:z']}, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=nova_compute_init, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS)
Oct 11 02:05:36 compute-0 systemd[1]: libpod-conmon-ebc3893876b006f2554389d6534fd757431488dafe40f7ef25b5026f5246c4b8.scope: Deactivated successfully.
Oct 11 02:05:36 compute-0 sudo[357063]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:36 compute-0 nova_compute[356901]: 2025-10-11 02:05:36.680 2 DEBUG os_vif [-] Loaded VIF plugin class '<class 'vif_plug_linux_bridge.linux_bridge.LinuxBridgePlugin'>' with name 'linux_bridge' initialize /usr/lib/python3.9/site-packages/os_vif/__init__.py:44
Oct 11 02:05:36 compute-0 nova_compute[356901]: 2025-10-11 02:05:36.681 2 DEBUG os_vif [-] Loaded VIF plugin class '<class 'vif_plug_noop.noop.NoOpPlugin'>' with name 'noop' initialize /usr/lib/python3.9/site-packages/os_vif/__init__.py:44
Oct 11 02:05:36 compute-0 nova_compute[356901]: 2025-10-11 02:05:36.681 2 DEBUG os_vif [-] Loaded VIF plugin class '<class 'vif_plug_ovs.ovs.OvsPlugin'>' with name 'ovs' initialize /usr/lib/python3.9/site-packages/os_vif/__init__.py:44
Oct 11 02:05:36 compute-0 nova_compute[356901]: 2025-10-11 02:05:36.682 2 INFO os_vif [-] Loaded VIF plugins: linux_bridge, noop, ovs
Oct 11 02:05:36 compute-0 ceph-mon[191930]: pgmap v822: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:36 compute-0 nova_compute[356901]: 2025-10-11 02:05:36.850 2 DEBUG oslo_concurrency.processutils [-] Running cmd (subprocess): grep -F node.session.scan /sbin/iscsiadm execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:05:36 compute-0 nova_compute[356901]: 2025-10-11 02:05:36.886 2 DEBUG oslo_concurrency.processutils [-] CMD "grep -F node.session.scan /sbin/iscsiadm" returned: 0 in 0.036s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:05:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v823: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:37 compute-0 sshd-session[356859]: Connection closed by authenticating user root 121.227.153.123 port 52998 [preauth]
Oct 11 02:05:37 compute-0 sshd-session[316320]: Connection closed by 192.168.122.30 port 43262
Oct 11 02:05:37 compute-0 sshd-session[316317]: pam_unix(sshd:session): session closed for user zuul
Oct 11 02:05:37 compute-0 systemd[1]: session-56.scope: Deactivated successfully.
Oct 11 02:05:37 compute-0 systemd[1]: session-56.scope: Consumed 5min 7.801s CPU time.
Oct 11 02:05:37 compute-0 systemd-logind[804]: Session 56 logged out. Waiting for processes to exit.
Oct 11 02:05:37 compute-0 systemd-logind[804]: Removed session 56.
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.427 2 INFO nova.virt.driver [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] Loading compute driver 'libvirt.LibvirtDriver'
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.561 2 INFO nova.compute.provider_config [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] No provider configs found in /etc/nova/provider_config/. If files are present, ensure the Nova process has access.
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.575 2 DEBUG oslo_concurrency.lockutils [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] Acquiring lock "singleton_lock" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.575 2 DEBUG oslo_concurrency.lockutils [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] Acquired lock "singleton_lock" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.576 2 DEBUG oslo_concurrency.lockutils [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] Releasing lock "singleton_lock" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.576 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] Full set of CONF: _wait_for_exit_or_signal /usr/lib/python3.9/site-packages/oslo_service/service.py:362
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.576 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2589
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.576 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] Configuration options gathered from: log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2590
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.576 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] command line args: [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2591
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.577 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] config files: ['/etc/nova/nova.conf', '/etc/nova/nova-compute.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2592
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.577 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ================================================================================ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2594
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.577 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] allow_resize_to_same_host      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.577 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] arq_binding_timeout            = 300 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.577 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] backdoor_port                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.577 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] backdoor_socket                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.577 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] block_device_allocate_retries  = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.578 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] block_device_allocate_retries_interval = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.578 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cert                           = self.pem log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.578 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] compute_driver                 = libvirt.LibvirtDriver log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.578 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] compute_monitors               = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.578 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] config_dir                     = ['/etc/nova/nova.conf.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.578 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] config_drive_format            = iso9660 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.578 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] config_file                    = ['/etc/nova/nova.conf', '/etc/nova/nova-compute.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.579 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] config_source                  = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.579 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] console_host                   = compute-0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.579 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] control_exchange               = nova log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.579 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cpu_allocation_ratio           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.579 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] daemon                         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.579 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] debug                          = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.579 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] default_access_ip_network_name = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.580 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] default_availability_zone      = nova log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.580 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] default_ephemeral_format       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.580 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] default_log_levels             = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'oslo_messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'requests.packages.urllib3.util.retry=WARN', 'urllib3.util.retry=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'taskflow=WARN', 'keystoneauth=WARN', 'oslo.cache=INFO', 'oslo_policy=INFO', 'dogpile.core.dogpile=INFO', 'glanceclient=WARN', 'oslo.privsep.daemon=INFO'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.580 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] default_schedule_zone          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.580 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] disk_allocation_ratio          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.580 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] enable_new_services            = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.581 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] enabled_apis                   = ['osapi_compute', 'metadata'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.581 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] enabled_ssl_apis               = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.581 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] flat_injected                  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.581 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] force_config_drive             = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.581 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] force_raw_images               = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.581 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] graceful_shutdown_timeout      = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.581 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] heal_instance_info_cache_interval = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.582 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] host                           = compute-0.ctlplane.example.com log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.582 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] initial_cpu_allocation_ratio   = 4.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.582 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] initial_disk_allocation_ratio  = 0.9 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.582 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] initial_ram_allocation_ratio   = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.582 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] injected_network_template      = /usr/lib/python3.9/site-packages/nova/virt/interfaces.template log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.583 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] instance_build_timeout         = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.583 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] instance_delete_interval       = 300 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.583 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] instance_format                = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.583 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] instance_name_template         = instance-%08x log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.583 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] instance_usage_audit           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.583 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] instance_usage_audit_period    = month log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.584 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] instance_uuid_format           = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.584 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] instances_path                 = /var/lib/nova/instances log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.584 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] internal_service_availability_zone = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.584 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] key                            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.584 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] live_migration_retry_count     = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.584 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] log_config_append              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.584 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] log_date_format                = %Y-%m-%d %H:%M:%S log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.585 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] log_dir                        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.585 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] log_file                       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.585 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] log_options                    = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.585 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] log_rotate_interval            = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.585 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] log_rotate_interval_type       = days log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.585 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] log_rotation_type              = size log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.585 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] logging_context_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.586 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] logging_debug_format_suffix    = %(funcName)s %(pathname)s:%(lineno)d log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.586 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] logging_default_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.586 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] logging_exception_prefix       = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.586 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] logging_user_identity_format   = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.586 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] long_rpc_timeout               = 1800 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.586 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] max_concurrent_builds          = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.586 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] max_concurrent_live_migrations = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.587 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] max_concurrent_snapshots       = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.587 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] max_local_block_devices        = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.587 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] max_logfile_count              = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.587 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] max_logfile_size_mb            = 20 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.587 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] maximum_instance_delete_attempts = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.587 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] metadata_listen                = 0.0.0.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.588 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] metadata_listen_port           = 8775 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.588 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] metadata_workers               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.588 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] migrate_max_retries            = -1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.588 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] mkisofs_cmd                    = /usr/bin/mkisofs log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.588 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] my_block_storage_ip            = 192.168.122.100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.588 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] my_ip                          = 192.168.122.100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.588 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] network_allocate_retries       = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.589 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] non_inheritable_image_properties = ['cache_in_nova', 'bittorrent'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.589 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] osapi_compute_listen           = 0.0.0.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.589 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] osapi_compute_listen_port      = 8774 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.589 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] osapi_compute_unique_server_name_scope =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.589 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] osapi_compute_workers          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.589 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] password_length                = 12 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.589 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] periodic_enable                = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.590 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] periodic_fuzzy_delay           = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.590 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] pointer_model                  = usbtablet log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.590 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] preallocate_images             = none log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.590 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] publish_errors                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.590 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] pybasedir                      = /usr/lib/python3.9/site-packages log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.590 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ram_allocation_ratio           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.590 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] rate_limit_burst               = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.591 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] rate_limit_except_level        = CRITICAL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.591 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] rate_limit_interval            = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.591 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] reboot_timeout                 = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.591 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] reclaim_instance_interval      = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.591 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] record                         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.591 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] reimage_timeout_per_gb         = 20 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.592 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] report_interval                = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.592 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] rescue_timeout                 = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.592 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] reserved_host_cpus             = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.592 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] reserved_host_disk_mb          = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.592 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] reserved_host_memory_mb        = 512 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.592 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] reserved_huge_pages            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.592 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] resize_confirm_window          = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.592 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] resize_fs_using_block_device   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.593 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] resume_guests_state_on_host_boot = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.593 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] rootwrap_config                = /etc/nova/rootwrap.conf log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.593 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] rpc_response_timeout           = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.593 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] run_external_periodic_tasks    = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.593 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] running_deleted_instance_action = reap log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.593 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] running_deleted_instance_poll_interval = 1800 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.593 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] running_deleted_instance_timeout = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.594 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] scheduler_instance_sync_interval = 120 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.594 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] service_down_time              = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.594 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] servicegroup_driver            = db log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.594 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] shelved_offload_time           = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.594 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] shelved_poll_interval          = 3600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.594 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] shutdown_timeout               = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.594 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] source_is_ipv6                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.595 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ssl_only                       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.595 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] state_path                     = /var/lib/nova log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.595 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] sync_power_state_interval      = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.595 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] sync_power_state_pool_size     = 1000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.595 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] syslog_log_facility            = LOG_USER log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.595 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] tempdir                        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.595 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] timeout_nbd                    = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.596 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] transport_url                  = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.596 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] update_resources_interval      = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.596 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] use_cow_images                 = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.596 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] use_eventlog                   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.596 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] use_journal                    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.596 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] use_json                       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.596 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] use_rootwrap_daemon            = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.597 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] use_stderr                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.597 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] use_syslog                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.597 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vcpu_pin_set                   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.597 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vif_plugging_is_fatal          = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.597 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vif_plugging_timeout           = 300 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.597 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] virt_mkfs                      = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.597 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] volume_usage_poll_interval     = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.598 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] watch_log_file                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.598 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] web                            = /usr/share/spice-html5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.598 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_concurrency.disable_process_locking = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.598 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_concurrency.lock_path     = /var/lib/nova/tmp log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.598 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_metrics.metrics_buffer_size = 1000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.598 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_metrics.metrics_enabled = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.599 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_metrics.metrics_process_name =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.599 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_metrics.metrics_socket_file = /var/tmp/metrics_collector.sock log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.599 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_metrics.metrics_thread_stop_timeout = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.599 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.auth_strategy              = keystone log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.600 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.compute_link_prefix        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.600 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.config_drive_skip_versions = 1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.600 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.dhcp_domain                =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.600 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.enable_instance_password   = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.601 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.glance_link_prefix         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.601 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.instance_list_cells_batch_fixed_size = 100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.601 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.instance_list_cells_batch_strategy = distributed log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.601 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.instance_list_per_project_cells = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.601 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.list_records_by_skipping_down_cells = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.601 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.local_metadata_per_cell    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.601 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.max_limit                  = 1000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.602 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.metadata_cache_expiration  = 15 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.602 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.neutron_default_tenant_id  = default log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.602 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.use_forwarded_for          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.602 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.use_neutron_default_nets   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.602 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.vendordata_dynamic_connect_timeout = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.602 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.vendordata_dynamic_failure_fatal = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.602 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.vendordata_dynamic_read_timeout = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.603 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.vendordata_dynamic_ssl_certfile =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.603 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.vendordata_dynamic_targets = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.603 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.vendordata_jsonfile_path   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.603 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api.vendordata_providers       = ['StaticJSON'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.604 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.backend                  = oslo_cache.dict log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.604 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.backend_argument         = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.605 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.config_prefix            = cache.oslo log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.605 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.dead_timeout             = 60.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.605 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.debug_cache_backend      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.605 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.enable_retry_client      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.605 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.enable_socket_keepalive  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.605 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.enabled                  = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.605 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.expiration_time          = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.606 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.hashclient_retry_attempts = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.606 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.hashclient_retry_delay   = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.606 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.memcache_dead_retry      = 300 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.606 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.memcache_password        =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.606 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.memcache_pool_connection_get_timeout = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.606 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.memcache_pool_flush_on_reconnect = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.606 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.memcache_pool_maxsize    = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.607 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.memcache_pool_unused_timeout = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.607 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.memcache_sasl_enabled    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.607 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.memcache_servers         = ['localhost:11211'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.607 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.memcache_socket_timeout  = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.607 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.memcache_username        =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.607 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.proxies                  = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.608 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.retry_attempts           = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.608 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.retry_delay              = 0.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.608 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.socket_keepalive_count   = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.608 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.socket_keepalive_idle    = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.608 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.socket_keepalive_interval = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.608 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.tls_allowed_ciphers      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.608 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.tls_cafile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.609 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.tls_certfile             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.609 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.tls_enabled              = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.609 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cache.tls_keyfile              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.609 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cinder.auth_section            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.609 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cinder.auth_type               = password log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.609 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cinder.cafile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.610 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cinder.catalog_info            = volumev3:cinderv3:internalURL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.610 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cinder.certfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.610 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cinder.collect_timing          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.610 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cinder.cross_az_attach         = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.610 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cinder.debug                   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.610 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cinder.endpoint_template       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.610 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cinder.http_retries            = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.611 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cinder.insecure                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.611 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cinder.keyfile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.611 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cinder.os_region_name          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.611 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cinder.split_loggers           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.611 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cinder.timeout                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.611 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] compute.consecutive_build_service_disable_threshold = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.612 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] compute.cpu_dedicated_set      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.612 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] compute.cpu_shared_set         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.612 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] compute.image_type_exclude_list = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.612 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] compute.live_migration_wait_for_vif_plug = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.612 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] compute.max_concurrent_disk_ops = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.612 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] compute.max_disk_devices_to_attach = -1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.612 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] compute.packing_host_numa_cells_allocation_strategy = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.613 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] compute.provider_config_location = /etc/nova/provider_config/ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.613 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] compute.resource_provider_association_refresh = 300 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.613 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] compute.shutdown_retry_interval = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.613 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] compute.vmdk_allowed_types     = ['streamOptimized', 'monolithicSparse'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.613 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] conductor.workers              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.613 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] console.allowed_origins        = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.613 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] console.ssl_ciphers            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.614 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] console.ssl_minimum_version    = default log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.614 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] consoleauth.token_ttl          = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.614 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cyborg.cafile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.614 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cyborg.certfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.614 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cyborg.collect_timing          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.614 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cyborg.connect_retries         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.614 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cyborg.connect_retry_delay     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.615 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cyborg.endpoint_override       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.615 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cyborg.insecure                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.615 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cyborg.keyfile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.615 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cyborg.max_version             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.615 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cyborg.min_version             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.615 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cyborg.region_name             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.616 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cyborg.service_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.616 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cyborg.service_type            = accelerator log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.616 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cyborg.split_loggers           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.616 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cyborg.status_code_retries     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.616 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cyborg.status_code_retry_delay = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.616 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cyborg.timeout                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.616 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cyborg.valid_interfaces        = ['internal', 'public'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.617 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] cyborg.version                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.617 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.backend               = sqlalchemy log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.617 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.connection            = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.617 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.connection_debug      = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.617 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.connection_parameters =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.617 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.connection_recycle_time = 3600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.617 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.connection_trace      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.618 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.db_inc_retry_interval = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.618 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.db_max_retries        = 20 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.618 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.db_max_retry_interval = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.618 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.db_retry_interval     = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.618 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.max_overflow          = 50 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.618 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.max_pool_size         = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.618 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.max_retries           = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.619 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.mysql_enable_ndb      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.619 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.mysql_sql_mode        = TRADITIONAL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.619 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.mysql_wsrep_sync_wait = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.619 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.pool_timeout          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.619 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.retry_interval        = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.619 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.slave_connection      = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.620 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] database.sqlite_synchronous    = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.620 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.backend           = sqlalchemy log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.620 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.connection        = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.620 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.connection_debug  = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.620 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.connection_parameters =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.620 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.connection_recycle_time = 3600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.620 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.connection_trace  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.621 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.db_inc_retry_interval = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.621 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.db_max_retries    = 20 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.621 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.db_max_retry_interval = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.621 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.db_retry_interval = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.621 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.max_overflow      = 50 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.621 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.max_pool_size     = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.622 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.max_retries       = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.622 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.mysql_enable_ndb  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.622 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.mysql_sql_mode    = TRADITIONAL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.622 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.mysql_wsrep_sync_wait = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.622 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.pool_timeout      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.622 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.retry_interval    = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.623 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.slave_connection  = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.623 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] api_database.sqlite_synchronous = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.623 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] devices.enabled_mdev_types     = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.623 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ephemeral_storage_encryption.cipher = aes-xts-plain64 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.623 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ephemeral_storage_encryption.enabled = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.623 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ephemeral_storage_encryption.key_size = 512 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.624 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.api_servers             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.624 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.cafile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.624 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.certfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.624 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.collect_timing          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.624 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.connect_retries         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.624 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.connect_retry_delay     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.624 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.debug                   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.625 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.default_trusted_certificate_ids = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.625 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.enable_certificate_validation = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.625 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.enable_rbd_download     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.625 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.endpoint_override       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.625 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.insecure                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.625 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.keyfile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.625 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.max_version             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.626 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.min_version             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.626 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.num_retries             = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.626 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.rbd_ceph_conf           =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.626 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.rbd_connect_timeout     = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.626 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.rbd_pool                =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.626 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.rbd_user                =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.626 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.region_name             = regionOne log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.627 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.service_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.627 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.service_type            = image log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.627 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.split_loggers           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.627 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.status_code_retries     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.627 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.status_code_retry_delay = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.627 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.timeout                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.628 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.valid_interfaces        = ['internal'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.628 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.verify_glance_signatures = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.628 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] glance.version                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.628 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] guestfs.debug                  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.628 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] hyperv.config_drive_cdrom      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.628 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] hyperv.config_drive_inject_password = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.628 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] hyperv.dynamic_memory_ratio    = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.629 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] hyperv.enable_instance_metrics_collection = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.629 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] hyperv.enable_remotefx         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.629 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] hyperv.instances_path_share    =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.629 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] hyperv.iscsi_initiator_list    = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.629 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] hyperv.limit_cpu_features      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.629 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] hyperv.mounted_disk_query_retry_count = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.629 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] hyperv.mounted_disk_query_retry_interval = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.630 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] hyperv.power_state_check_timeframe = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.630 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] hyperv.power_state_event_polling_interval = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.630 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] hyperv.qemu_img_cmd            = qemu-img.exe log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.630 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] hyperv.use_multipath_io        = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.630 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] hyperv.volume_attach_retry_count = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.630 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] hyperv.volume_attach_retry_interval = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.630 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] hyperv.vswitch_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.631 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] hyperv.wait_soft_reboot_seconds = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.631 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] mks.enabled                    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.632 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] mks.mksproxy_base_url          = http://127.0.0.1:6090/ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.632 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] image_cache.manager_interval   = 2400 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.632 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] image_cache.precache_concurrency = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.632 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] image_cache.remove_unused_base_images = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.632 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] image_cache.remove_unused_original_minimum_age_seconds = 86400 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.633 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] image_cache.remove_unused_resized_minimum_age_seconds = 3600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.633 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] image_cache.subdirectory_name  = _base log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.633 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.api_max_retries         = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.633 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.api_retry_interval      = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.633 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.auth_section            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.633 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.auth_type               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.633 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.cafile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.634 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.certfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.634 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.collect_timing          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.634 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.connect_retries         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.634 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.connect_retry_delay     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.634 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.endpoint_override       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.634 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.insecure                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.634 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.keyfile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.635 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.max_version             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.635 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.min_version             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.635 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.partition_key           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.635 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.peer_list               = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.635 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.region_name             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.635 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.serial_console_state_timeout = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.636 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.service_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.636 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.service_type            = baremetal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.636 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.split_loggers           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.636 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.status_code_retries     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.636 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.status_code_retry_delay = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.636 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.timeout                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.636 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.valid_interfaces        = ['internal', 'public'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.637 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ironic.version                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.637 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] key_manager.backend            = barbican log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.637 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] key_manager.fixed_key          = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.637 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican.auth_endpoint         = http://localhost/identity/v3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.637 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican.barbican_api_version  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.637 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican.barbican_endpoint     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.638 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican.barbican_endpoint_type = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.638 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican.barbican_region_name  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.638 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican.cafile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.638 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican.certfile              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.638 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican.collect_timing        = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.638 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican.insecure              = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.638 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican.keyfile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.639 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican.number_of_retries     = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.639 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican.retry_delay           = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.639 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican.send_service_user_token = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.639 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican.split_loggers         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.639 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican.timeout               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.639 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican.verify_ssl            = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.640 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican.verify_ssl_path       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.640 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican_service_user.auth_section = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.640 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican_service_user.auth_type = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.640 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican_service_user.cafile   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.640 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican_service_user.certfile = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.640 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican_service_user.collect_timing = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.641 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican_service_user.insecure = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.641 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican_service_user.keyfile  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.641 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican_service_user.split_loggers = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.641 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] barbican_service_user.timeout  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.641 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vault.approle_role_id          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.641 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vault.approle_secret_id        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.641 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vault.cafile                   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.642 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vault.certfile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.642 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vault.collect_timing           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.642 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vault.insecure                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.642 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vault.keyfile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.642 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vault.kv_mountpoint            = secret log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.642 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vault.kv_version               = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.642 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vault.namespace                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.642 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vault.root_token_id            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.643 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vault.split_loggers            = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.643 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vault.ssl_ca_crt_file          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.643 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vault.timeout                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.643 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vault.use_ssl                  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.643 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vault.vault_url                = http://127.0.0.1:8200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.643 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] keystone.cafile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.644 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] keystone.certfile              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.644 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] keystone.collect_timing        = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.644 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] keystone.connect_retries       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.644 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] keystone.connect_retry_delay   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.644 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] keystone.endpoint_override     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.644 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] keystone.insecure              = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.644 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] keystone.keyfile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.645 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] keystone.max_version           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.645 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] keystone.min_version           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.645 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] keystone.region_name           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.645 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] keystone.service_name          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.645 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] keystone.service_type          = identity log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.645 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] keystone.split_loggers         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.645 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] keystone.status_code_retries   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.646 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] keystone.status_code_retry_delay = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.646 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] keystone.timeout               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.646 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] keystone.valid_interfaces      = ['internal', 'public'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.646 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] keystone.version               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.646 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.connection_uri         =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.646 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.cpu_mode               = host-model log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.646 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.cpu_model_extra_flags  = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.647 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.cpu_models             = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.647 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.cpu_power_governor_high = performance log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.647 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.cpu_power_governor_low = powersave log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.647 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.cpu_power_management   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.648 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.cpu_power_management_strategy = cpu_state log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.648 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.device_detach_attempts = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.648 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.device_detach_timeout  = 20 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.648 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.disk_cachemodes        = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.648 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.disk_prefix            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.648 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.enabled_perf_events    = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.648 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.file_backed_memory     = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.649 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.gid_maps               = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.649 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.hw_disk_discard        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.649 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.hw_machine_type        = ['x86_64=q35'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.649 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.images_rbd_ceph_conf   = /etc/ceph/ceph.conf log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.649 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.images_rbd_glance_copy_poll_interval = 15 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.649 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.images_rbd_glance_copy_timeout = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.649 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.images_rbd_glance_store_name = default_backend log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.650 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.images_rbd_pool        = vms log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.650 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.images_type            = rbd log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.650 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.images_volume_group    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.650 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.inject_key             = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.650 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.inject_partition       = -2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.650 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.inject_password        = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.650 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.iscsi_iface            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.651 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.iser_use_multipath     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.651 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.live_migration_bandwidth = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.651 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.live_migration_completion_timeout = 800 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.651 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.live_migration_downtime = 500 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.651 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.live_migration_downtime_delay = 75 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.651 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.live_migration_downtime_steps = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.652 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.live_migration_inbound_addr = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.652 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.live_migration_permit_auto_converge = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.652 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.live_migration_permit_post_copy = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.652 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.live_migration_scheme  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.652 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.live_migration_timeout_action = force_complete log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.652 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.live_migration_tunnelled = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.653 2 WARNING oslo_config.cfg [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] Deprecated: Option "live_migration_uri" from group "libvirt" is deprecated for removal (
Oct 11 02:05:37 compute-0 nova_compute[356901]: live_migration_uri is deprecated for removal in favor of two other options that
Oct 11 02:05:37 compute-0 nova_compute[356901]: allow to change live migration scheme and target URI: ``live_migration_scheme``
Oct 11 02:05:37 compute-0 nova_compute[356901]: and ``live_migration_inbound_addr`` respectively.
Oct 11 02:05:37 compute-0 nova_compute[356901]: ).  Its value may be silently ignored in the future.
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.653 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.live_migration_uri     = qemu+tls://%s/system log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.653 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.live_migration_with_native_tls = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.653 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.max_queues             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.653 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.mem_stats_period_seconds = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.653 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.nfs_mount_options      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.654 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.nfs_mount_point_base   = /var/lib/nova/mnt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.654 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.num_aoe_discover_tries = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.654 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.num_iser_scan_tries    = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.654 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.num_memory_encrypted_guests = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.654 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.num_nvme_discover_tries = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.654 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.num_pcie_ports         = 24 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.654 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.num_volume_scan_tries  = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.655 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.pmem_namespaces        = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.655 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.quobyte_client_cfg     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.655 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.quobyte_mount_point_base = /var/lib/nova/mnt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.655 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.rbd_connect_timeout    = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.655 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.rbd_destroy_volume_retries = 12 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.655 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.rbd_destroy_volume_retry_interval = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.656 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.rbd_secret_uuid        = 3c7617c3-7a20-523e-a9de-20c0d6ba41da log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.656 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.rbd_user               = openstack log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.656 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.realtime_scheduler_priority = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.656 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.remote_filesystem_transport = ssh log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.656 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.rescue_image_id        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.656 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.rescue_kernel_id       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.656 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.rescue_ramdisk_id      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.657 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.rng_dev_path           = /dev/urandom log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.657 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.rx_queue_size          = 512 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.657 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.smbfs_mount_options    =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.657 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.smbfs_mount_point_base = /var/lib/nova/mnt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.657 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.snapshot_compression   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.657 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.snapshot_image_format  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.658 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.snapshots_directory    = /var/lib/nova/instances/snapshots log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.658 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.sparse_logical_volumes = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.658 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.swtpm_enabled          = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.658 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.swtpm_group            = tss log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.658 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.swtpm_user             = tss log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.658 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.sysinfo_serial         = unique log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.658 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.tx_queue_size          = 512 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.659 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.uid_maps               = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.659 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.use_virtio_for_bridges = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.659 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.virt_type              = kvm log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.659 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.volume_clear           = zero log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.659 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.volume_clear_size      = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.659 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.volume_use_multipath   = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.660 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.vzstorage_cache_path   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.660 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.vzstorage_log_path     = /var/log/vstorage/%(cluster_name)s/nova.log.gz log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.660 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.vzstorage_mount_group  = qemu log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.660 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.vzstorage_mount_opts   = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.660 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.vzstorage_mount_perms  = 0770 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.660 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.vzstorage_mount_point_base = /var/lib/nova/mnt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.660 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.vzstorage_mount_user   = stack log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.661 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] libvirt.wait_soft_reboot_seconds = 120 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.661 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.auth_section           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.661 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.auth_type              = password log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.661 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.cafile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.661 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.certfile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.661 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.collect_timing         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.661 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.connect_retries        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.662 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.connect_retry_delay    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.662 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.default_floating_pool  = nova log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.662 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.endpoint_override      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.662 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.extension_sync_interval = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.662 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.http_retries           = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.662 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.insecure               = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.662 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.keyfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.663 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.max_version            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.663 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.metadata_proxy_shared_secret = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.663 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.min_version            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.663 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.ovs_bridge             = br-int log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.663 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.physnets               = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.663 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.region_name            = regionOne log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.664 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.service_metadata_proxy = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.664 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.service_name           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.664 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.service_type           = network log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.664 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.split_loggers          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.664 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.status_code_retries    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.664 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.status_code_retry_delay = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.664 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.timeout                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.665 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.valid_interfaces       = ['internal'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.665 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] neutron.version                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.665 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] notifications.bdms_in_notifications = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.665 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] notifications.default_level    = INFO log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.665 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] notifications.notification_format = unversioned log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.665 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] notifications.notify_on_state_change = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.666 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] notifications.versioned_notifications_topics = ['versioned_notifications'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.666 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] pci.alias                      = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.666 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] pci.device_spec                = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.666 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] pci.report_in_placement        = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.666 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.auth_section         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.666 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.auth_type            = password log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.666 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.auth_url             = https://keystone-internal.openstack.svc:5000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.667 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.cafile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.667 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.certfile             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.667 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.collect_timing       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.667 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.connect_retries      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.667 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.connect_retry_delay  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.667 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.default_domain_id    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.668 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.default_domain_name  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.668 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.domain_id            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.668 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.domain_name          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.668 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.endpoint_override    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.668 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.insecure             = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.668 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.keyfile              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.668 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.max_version          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.669 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.min_version          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.669 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.password             = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.669 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.project_domain_id    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.669 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.project_domain_name  = Default log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.669 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.project_id           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.669 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.project_name         = service log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.670 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.region_name          = regionOne log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.670 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.service_name         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.670 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.service_type         = placement log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.670 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.split_loggers        = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.670 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.status_code_retries  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.670 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.status_code_retry_delay = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.670 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.system_scope         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.671 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.timeout              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.671 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.trust_id             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.671 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.user_domain_id       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.671 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.user_domain_name     = Default log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.671 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.user_id              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.672 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.username             = nova log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.672 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.valid_interfaces     = ['internal'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.672 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] placement.version              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.672 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] quota.cores                    = 20 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.672 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] quota.count_usage_from_placement = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.672 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] quota.driver                   = nova.quota.DbQuotaDriver log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.673 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] quota.injected_file_content_bytes = 10240 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.673 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] quota.injected_file_path_length = 255 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.673 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] quota.injected_files           = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.673 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] quota.instances                = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.673 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] quota.key_pairs                = 100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.673 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] quota.metadata_items           = 128 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.674 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] quota.ram                      = 51200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.674 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] quota.recheck_quota            = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.674 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] quota.server_group_members     = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.674 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] quota.server_groups            = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.674 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] rdp.enabled                    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.674 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] rdp.html5_proxy_base_url       = http://127.0.0.1:6083/ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.675 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] scheduler.discover_hosts_in_cells_interval = -1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.675 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] scheduler.enable_isolated_aggregate_filtering = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.675 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] scheduler.image_metadata_prefilter = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.675 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] scheduler.limit_tenants_to_placement_aggregate = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.675 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] scheduler.max_attempts         = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.676 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] scheduler.max_placement_results = 1000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.676 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] scheduler.placement_aggregate_required_for_tenants = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.676 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] scheduler.query_placement_for_availability_zone = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.676 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] scheduler.query_placement_for_image_type_support = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.676 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] scheduler.query_placement_for_routed_network_aggregates = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.676 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] scheduler.workers              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.676 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.aggregate_image_properties_isolation_namespace = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.677 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.aggregate_image_properties_isolation_separator = . log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.677 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.available_filters = ['nova.scheduler.filters.all_filters'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.677 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.build_failure_weight_multiplier = 1000000.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.677 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.cpu_weight_multiplier = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.677 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.cross_cell_move_weight_multiplier = 1000000.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.677 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.disk_weight_multiplier = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.678 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.enabled_filters = ['ComputeFilter', 'ComputeCapabilitiesFilter', 'ImagePropertiesFilter', 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.678 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.host_subset_size = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.678 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.image_properties_default_architecture = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.678 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.io_ops_weight_multiplier = -1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.678 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.isolated_hosts = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.678 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.isolated_images = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.679 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.max_instances_per_host = 50 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.679 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.max_io_ops_per_host = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.679 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.pci_in_placement = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.679 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.pci_weight_multiplier = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.679 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.ram_weight_multiplier = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.679 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.restrict_isolated_hosts_to_isolated_images = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.680 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.shuffle_best_same_weighed_hosts = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.680 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.soft_affinity_weight_multiplier = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.680 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.soft_anti_affinity_weight_multiplier = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.680 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.track_instance_changes = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.680 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] filter_scheduler.weight_classes = ['nova.scheduler.weights.all_weighers'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.680 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] metrics.required               = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.681 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] metrics.weight_multiplier      = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.681 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] metrics.weight_of_unavailable  = -10000.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.681 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] metrics.weight_setting         = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.681 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] serial_console.base_url        = ws://127.0.0.1:6083/ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.681 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] serial_console.enabled         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.681 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] serial_console.port_range      = 10000:20000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.682 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] serial_console.proxyclient_address = 127.0.0.1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.682 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] serial_console.serialproxy_host = 0.0.0.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.682 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] serial_console.serialproxy_port = 6083 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.682 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] service_user.auth_section      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.682 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] service_user.auth_type         = password log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.682 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] service_user.cafile            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.682 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] service_user.certfile          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.683 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] service_user.collect_timing    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.683 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] service_user.insecure          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.683 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] service_user.keyfile           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.683 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] service_user.send_service_user_token = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.683 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] service_user.split_loggers     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.684 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] service_user.timeout           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.684 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] spice.agent_enabled            = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.684 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] spice.enabled                  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.684 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] spice.html5proxy_base_url      = http://127.0.0.1:6082/spice_auto.html log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.684 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] spice.html5proxy_host          = 0.0.0.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.684 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] spice.html5proxy_port          = 6082 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.685 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] spice.image_compression        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.685 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] spice.jpeg_compression         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.685 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] spice.playback_compression     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.685 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] spice.server_listen            = 127.0.0.1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.685 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] spice.server_proxyclient_address = 127.0.0.1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.685 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] spice.streaming_mode           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.685 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] spice.zlib_compression         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.686 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] upgrade_levels.baseapi         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.686 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] upgrade_levels.cert            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.686 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] upgrade_levels.compute         = auto log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.686 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] upgrade_levels.conductor       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.686 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] upgrade_levels.scheduler       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.686 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vendordata_dynamic_auth.auth_section = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.687 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vendordata_dynamic_auth.auth_type = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.687 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vendordata_dynamic_auth.cafile = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.687 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vendordata_dynamic_auth.certfile = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.687 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vendordata_dynamic_auth.collect_timing = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.687 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vendordata_dynamic_auth.insecure = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.687 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vendordata_dynamic_auth.keyfile = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.688 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vendordata_dynamic_auth.split_loggers = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.688 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vendordata_dynamic_auth.timeout = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.688 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.api_retry_count         = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.688 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.ca_file                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.688 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.cache_prefix            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.688 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.cluster_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.688 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.connection_pool_size    = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.689 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.console_delay_seconds   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.689 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.datastore_regex         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.689 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.host_ip                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.689 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.host_password           = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.689 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.host_port               = 443 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.689 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.host_username           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.690 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.insecure                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.690 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.integration_bridge      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.690 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.maximum_objects         = 100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.690 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.pbm_default_policy      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.690 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.pbm_enabled             = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.690 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.pbm_wsdl_location       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.691 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.serial_log_dir          = /opt/vmware/vspc log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.691 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.serial_port_proxy_uri   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.691 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.serial_port_service_uri = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.691 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.task_poll_interval      = 0.5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.691 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.use_linked_clone        = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.691 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.vnc_keymap              = en-us log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.692 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.vnc_port                = 5900 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.692 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vmware.vnc_port_total          = 10000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.692 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vnc.auth_schemes               = ['none'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.692 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vnc.enabled                    = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.692 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vnc.novncproxy_base_url        = https://nova-novncproxy-cell1-public-openstack.apps-crc.testing/vnc_lite.html log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.693 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vnc.novncproxy_host            = 0.0.0.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.693 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vnc.novncproxy_port            = 6080 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.693 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vnc.server_listen              = ::0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.693 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vnc.server_proxyclient_address = 192.168.122.100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.693 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vnc.vencrypt_ca_certs          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.693 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vnc.vencrypt_client_cert       = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.693 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vnc.vencrypt_client_key        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.694 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.disable_compute_service_check_for_ffu = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.694 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.disable_deep_image_inspection = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.694 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.disable_fallback_pcpu_query = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.694 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.disable_group_policy_check_upcall = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.694 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.disable_libvirt_livesnapshot = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.694 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.disable_rootwrap   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.695 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.enable_numa_live_migration = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.695 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.enable_qemu_monitor_announce_self = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.695 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.ensure_libvirt_rbd_instance_dir_cleanup = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.695 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.handle_virt_lifecycle_events = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.695 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.libvirt_disable_apic = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.695 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.never_download_image_if_on_rbd = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.696 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.qemu_monitor_announce_self_count = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.696 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.qemu_monitor_announce_self_interval = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.696 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.reserve_disk_resource_for_image_cache = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.696 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.skip_cpu_compare_at_startup = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.696 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.skip_cpu_compare_on_dest = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.696 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.skip_hypervisor_version_check_on_lm = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.697 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.skip_reserve_in_use_ironic_nodes = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.697 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.unified_limits_count_pcpu_as_vcpu = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.697 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] workarounds.wait_for_vif_plugged_event_during_hard_reboot = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.697 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] wsgi.api_paste_config          = api-paste.ini log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.697 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] wsgi.client_socket_timeout     = 900 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.697 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] wsgi.default_pool_size         = 1000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.697 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] wsgi.keep_alive                = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.698 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] wsgi.max_header_line           = 16384 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.698 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] wsgi.secure_proxy_ssl_header   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.698 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] wsgi.ssl_ca_file               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.698 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] wsgi.ssl_cert_file             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.698 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] wsgi.ssl_key_file              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.698 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] wsgi.tcp_keepidle              = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.699 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] wsgi.wsgi_log_format           = %(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.699 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] zvm.ca_file                    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.699 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] zvm.cloud_connector_url        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.699 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] zvm.image_tmp_path             = /var/lib/nova/images log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.699 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] zvm.reachable_timeout          = 300 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.699 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_policy.enforce_new_defaults = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.700 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_policy.enforce_scope      = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.700 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_policy.policy_default_rule = default log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.700 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_policy.policy_dirs        = ['policy.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.700 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_policy.policy_file        = policy.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.701 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_policy.remote_content_type = application/x-www-form-urlencoded log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.701 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_policy.remote_ssl_ca_crt_file = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.701 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_policy.remote_ssl_client_crt_file = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.701 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_policy.remote_ssl_client_key_file = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.701 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_policy.remote_ssl_verify_server_crt = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.701 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_versionedobjects.fatal_exception_format_errors = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.701 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_middleware.http_basic_auth_user_file = /etc/htpasswd log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.702 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] remote_debug.host              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.702 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] remote_debug.port              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.702 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.amqp_auto_delete = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.702 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.amqp_durable_queues = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.702 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.conn_pool_min_size = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.702 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.conn_pool_ttl = 1200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.702 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.direct_mandatory_flag = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.703 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.enable_cancel_on_failover = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.703 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.heartbeat_in_pthread = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.703 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.heartbeat_rate = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.703 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.heartbeat_timeout_threshold = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.703 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.kombu_compression = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.704 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.kombu_failover_strategy = round-robin log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.704 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.kombu_missing_consumer_retry_timeout = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.704 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.kombu_reconnect_delay = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.704 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.rabbit_ha_queues = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.704 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.rabbit_interval_max = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.704 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.rabbit_login_method = AMQPLAIN log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.704 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.rabbit_qos_prefetch_count = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.705 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.rabbit_quorum_delivery_limit = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.705 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.rabbit_quorum_max_memory_bytes = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.705 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.rabbit_quorum_max_memory_length = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.705 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.rabbit_quorum_queue = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.705 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.rabbit_retry_backoff = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.705 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.rabbit_retry_interval = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.705 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.rabbit_transient_queues_ttl = 1800 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.706 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.rpc_conn_pool_size = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.706 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.ssl      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.706 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.ssl_ca_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.706 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.ssl_cert_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.706 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.ssl_enforce_fips_mode = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.706 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.ssl_key_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.707 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_rabbit.ssl_version =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.707 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_notifications.driver = ['noop'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.707 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_notifications.retry = -1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.707 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_notifications.topics = ['notifications'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.707 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_messaging_notifications.transport_url = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.708 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.auth_section        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.708 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.auth_type           = password log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.708 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.auth_url            = https://keystone-internal.openstack.svc:5000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.708 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.cafile              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.708 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.certfile            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.708 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.collect_timing      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.708 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.connect_retries     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.709 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.connect_retry_delay = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.709 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.default_domain_id   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.709 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.default_domain_name = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.709 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.domain_id           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.709 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.domain_name         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.709 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.endpoint_id         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.709 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.endpoint_override   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.710 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.insecure            = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.710 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.keyfile             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.710 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.max_version         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.710 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.min_version         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.710 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.password            = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.710 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.project_domain_id   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.710 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.project_domain_name = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.711 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.project_id          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.711 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.project_name        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.711 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.region_name         = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.711 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.service_name        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.711 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.service_type        = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.711 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.split_loggers       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.712 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.status_code_retries = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.712 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.status_code_retry_delay = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.712 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.system_scope        = all log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.712 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.timeout             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.712 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.trust_id            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.712 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.user_domain_id      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.712 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.user_domain_name    = Default log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.713 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.user_id             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.713 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.username            = nova log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.713 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.valid_interfaces    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.713 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_limit.version             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.713 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_reports.file_event_handler = /var/lib/nova log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.713 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_reports.file_event_handler_interval = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.713 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] oslo_reports.log_dir           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.714 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vif_plug_linux_bridge_privileged.capabilities = [12] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.714 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vif_plug_linux_bridge_privileged.group = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.714 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vif_plug_linux_bridge_privileged.helper_command = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.714 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vif_plug_linux_bridge_privileged.logger_name = oslo_privsep.daemon log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.714 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vif_plug_linux_bridge_privileged.thread_pool_size = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.714 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vif_plug_linux_bridge_privileged.user = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.714 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vif_plug_ovs_privileged.capabilities = [12, 1] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.715 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vif_plug_ovs_privileged.group  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.715 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vif_plug_ovs_privileged.helper_command = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.715 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vif_plug_ovs_privileged.logger_name = oslo_privsep.daemon log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.715 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vif_plug_ovs_privileged.thread_pool_size = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.715 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] vif_plug_ovs_privileged.user   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.716 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] os_vif_linux_bridge.flat_interface = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.716 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] os_vif_linux_bridge.forward_bridge_interface = ['all'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.716 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] os_vif_linux_bridge.iptables_bottom_regex =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.716 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] os_vif_linux_bridge.iptables_drop_action = DROP log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.716 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] os_vif_linux_bridge.iptables_top_regex =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.716 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] os_vif_linux_bridge.network_device_mtu = 1500 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.716 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] os_vif_linux_bridge.use_ipv6   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.717 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] os_vif_linux_bridge.vlan_interface = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.717 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] os_vif_ovs.isolate_vif         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.717 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] os_vif_ovs.network_device_mtu  = 1500 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.717 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] os_vif_ovs.ovs_vsctl_timeout   = 120 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.717 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] os_vif_ovs.ovsdb_connection    = tcp:127.0.0.1:6640 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.717 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] os_vif_ovs.ovsdb_interface     = native log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.718 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] os_vif_ovs.per_port_bridge     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.718 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] os_brick.lock_path             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.718 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] os_brick.wait_mpath_device_attempts = 4 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.718 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] os_brick.wait_mpath_device_interval = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.718 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] privsep_osbrick.capabilities   = [21] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.718 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] privsep_osbrick.group          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.718 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] privsep_osbrick.helper_command = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.719 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] privsep_osbrick.logger_name    = os_brick.privileged log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.719 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] privsep_osbrick.thread_pool_size = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.719 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] privsep_osbrick.user           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.719 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] nova_sys_admin.capabilities    = [0, 1, 2, 3, 12, 21] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.719 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] nova_sys_admin.group           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.719 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] nova_sys_admin.helper_command  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.720 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] nova_sys_admin.logger_name     = oslo_privsep.daemon log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.720 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] nova_sys_admin.thread_pool_size = 8 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.720 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] nova_sys_admin.user            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.720 2 DEBUG oslo_service.service [None req-9363d6d7-37a3-4294-aa42-03803790b42f - - - - - -] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2613
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.721 2 INFO nova.service [-] Starting compute node (version 27.5.2-0.20250829104910.6f8decf.el9)
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.737 2 INFO nova.virt.node [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Determined node identity 256b11da-7f71-42c0-941c-ea1e909a35f8 from /var/lib/nova/compute_id
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.737 2 DEBUG nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Starting native event thread _init_events /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:492
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.738 2 DEBUG nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Starting green dispatch thread _init_events /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:498
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.738 2 DEBUG nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Starting connection event dispatch thread initialize /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:620
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.739 2 DEBUG nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Connecting to libvirt: qemu:///system _get_new_connection /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:503
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.765 2 DEBUG nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Registering for lifecycle events <nova.virt.libvirt.host.Host object at 0x7fcb3f4914c0> _get_new_connection /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:509
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.771 2 DEBUG nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Registering for connection events: <nova.virt.libvirt.host.Host object at 0x7fcb3f4914c0> _get_new_connection /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:530
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.773 2 INFO nova.virt.libvirt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Connection event '1' reason 'None'
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.785 2 INFO nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Libvirt host capabilities <capabilities>
Oct 11 02:05:37 compute-0 nova_compute[356901]: 
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <host>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <uuid>c0909b4b-0860-4b28-ab6b-0ab32acb5a0f</uuid>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <cpu>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <arch>x86_64</arch>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model>EPYC-Rome-v4</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <vendor>AMD</vendor>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <microcode version='16777317'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <signature family='23' model='49' stepping='0'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <topology sockets='8' dies='1' clusters='1' cores='1' threads='1'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <maxphysaddr mode='emulate' bits='40'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='x2apic'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='tsc-deadline'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='osxsave'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='hypervisor'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='tsc_adjust'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='spec-ctrl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='stibp'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='arch-capabilities'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='ssbd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='cmp_legacy'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='topoext'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='virt-ssbd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='lbrv'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='tsc-scale'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='vmcb-clean'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='pause-filter'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='pfthreshold'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='svme-addr-chk'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='rdctl-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='skip-l1dfl-vmentry'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='mds-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature name='pschange-mc-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <pages unit='KiB' size='4'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <pages unit='KiB' size='2048'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <pages unit='KiB' size='1048576'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </cpu>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <power_management>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <suspend_mem/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </power_management>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <iommu support='no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <migration_features>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <live/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <uri_transports>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <uri_transport>tcp</uri_transport>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <uri_transport>rdma</uri_transport>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </uri_transports>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </migration_features>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <topology>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <cells num='1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <cell id='0'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:           <memory unit='KiB'>7864348</memory>
Oct 11 02:05:37 compute-0 nova_compute[356901]:           <pages unit='KiB' size='4'>1966087</pages>
Oct 11 02:05:37 compute-0 nova_compute[356901]:           <pages unit='KiB' size='2048'>0</pages>
Oct 11 02:05:37 compute-0 nova_compute[356901]:           <pages unit='KiB' size='1048576'>0</pages>
Oct 11 02:05:37 compute-0 nova_compute[356901]:           <distances>
Oct 11 02:05:37 compute-0 nova_compute[356901]:             <sibling id='0' value='10'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:           </distances>
Oct 11 02:05:37 compute-0 nova_compute[356901]:           <cpus num='8'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:             <cpu id='0' socket_id='0' die_id='0' cluster_id='65535' core_id='0' siblings='0'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:             <cpu id='1' socket_id='1' die_id='1' cluster_id='65535' core_id='0' siblings='1'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:             <cpu id='2' socket_id='2' die_id='2' cluster_id='65535' core_id='0' siblings='2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:             <cpu id='3' socket_id='3' die_id='3' cluster_id='65535' core_id='0' siblings='3'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:             <cpu id='4' socket_id='4' die_id='4' cluster_id='65535' core_id='0' siblings='4'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:             <cpu id='5' socket_id='5' die_id='5' cluster_id='65535' core_id='0' siblings='5'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:             <cpu id='6' socket_id='6' die_id='6' cluster_id='65535' core_id='0' siblings='6'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:             <cpu id='7' socket_id='7' die_id='7' cluster_id='65535' core_id='0' siblings='7'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:           </cpus>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         </cell>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </cells>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </topology>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <cache>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <bank id='0' level='2' type='both' size='512' unit='KiB' cpus='0'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <bank id='1' level='2' type='both' size='512' unit='KiB' cpus='1'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <bank id='2' level='2' type='both' size='512' unit='KiB' cpus='2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <bank id='3' level='2' type='both' size='512' unit='KiB' cpus='3'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <bank id='4' level='2' type='both' size='512' unit='KiB' cpus='4'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <bank id='5' level='2' type='both' size='512' unit='KiB' cpus='5'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <bank id='6' level='2' type='both' size='512' unit='KiB' cpus='6'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <bank id='7' level='2' type='both' size='512' unit='KiB' cpus='7'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <bank id='0' level='3' type='both' size='16' unit='MiB' cpus='0'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <bank id='1' level='3' type='both' size='16' unit='MiB' cpus='1'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <bank id='2' level='3' type='both' size='16' unit='MiB' cpus='2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <bank id='3' level='3' type='both' size='16' unit='MiB' cpus='3'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <bank id='4' level='3' type='both' size='16' unit='MiB' cpus='4'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <bank id='5' level='3' type='both' size='16' unit='MiB' cpus='5'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <bank id='6' level='3' type='both' size='16' unit='MiB' cpus='6'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <bank id='7' level='3' type='both' size='16' unit='MiB' cpus='7'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </cache>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <secmodel>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model>selinux</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <doi>0</doi>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <baselabel type='kvm'>system_u:system_r:svirt_t:s0</baselabel>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <baselabel type='qemu'>system_u:system_r:svirt_tcg_t:s0</baselabel>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </secmodel>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <secmodel>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model>dac</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <doi>0</doi>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <baselabel type='kvm'>+107:+107</baselabel>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <baselabel type='qemu'>+107:+107</baselabel>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </secmodel>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   </host>
Oct 11 02:05:37 compute-0 nova_compute[356901]: 
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <guest>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <os_type>hvm</os_type>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <arch name='i686'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <wordsize>32</wordsize>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <emulator>/usr/libexec/qemu-kvm</emulator>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='240' deprecated='yes'>pc-i440fx-rhel7.6.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine canonical='pc-i440fx-rhel7.6.0' maxCpus='240' deprecated='yes'>pc</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='4096'>pc-q35-rhel9.6.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine canonical='pc-q35-rhel9.6.0' maxCpus='4096'>q35</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.6.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710'>pc-q35-rhel9.4.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.5.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.3.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel7.6.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.4.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710'>pc-q35-rhel9.2.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.2.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710'>pc-q35-rhel9.0.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.0.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.1.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <domain type='qemu'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <domain type='kvm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </arch>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <features>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <pae/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <nonpae/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <acpi default='on' toggle='yes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <apic default='on' toggle='no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <cpuselection/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <deviceboot/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <disksnapshot default='on' toggle='no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <externalSnapshot/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </features>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   </guest>
Oct 11 02:05:37 compute-0 nova_compute[356901]: 
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <guest>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <os_type>hvm</os_type>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <arch name='x86_64'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <wordsize>64</wordsize>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <emulator>/usr/libexec/qemu-kvm</emulator>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='240' deprecated='yes'>pc-i440fx-rhel7.6.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine canonical='pc-i440fx-rhel7.6.0' maxCpus='240' deprecated='yes'>pc</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='4096'>pc-q35-rhel9.6.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine canonical='pc-q35-rhel9.6.0' maxCpus='4096'>q35</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.6.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710'>pc-q35-rhel9.4.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.5.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.3.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel7.6.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.4.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710'>pc-q35-rhel9.2.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.2.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710'>pc-q35-rhel9.0.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.0.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <machine maxCpus='710' deprecated='yes'>pc-q35-rhel8.1.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <domain type='qemu'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <domain type='kvm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </arch>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <features>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <acpi default='on' toggle='yes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <apic default='on' toggle='no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <cpuselection/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <deviceboot/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <disksnapshot default='on' toggle='no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <externalSnapshot/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </features>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   </guest>
Oct 11 02:05:37 compute-0 nova_compute[356901]: 
Oct 11 02:05:37 compute-0 nova_compute[356901]: </capabilities>
Oct 11 02:05:37 compute-0 nova_compute[356901]: 
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.788 2 DEBUG nova.virt.libvirt.volume.mount [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Initialising _HostMountState generation 0 host_up /usr/lib/python3.9/site-packages/nova/virt/libvirt/volume/mount.py:130
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.795 2 DEBUG nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Getting domain capabilities for i686 via machine types: {'pc', 'q35'} _get_machine_types /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:952
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.803 2 DEBUG nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Libvirt host hypervisor capabilities for arch=i686 and machine_type=pc:
Oct 11 02:05:37 compute-0 nova_compute[356901]: <domainCapabilities>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <path>/usr/libexec/qemu-kvm</path>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <domain>kvm</domain>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <machine>pc-i440fx-rhel7.6.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <arch>i686</arch>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <vcpu max='240'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <iothreads supported='yes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <os supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <enum name='firmware'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <loader supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <value>/usr/share/OVMF/OVMF_CODE.secboot.fd</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='type'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>rom</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>pflash</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='readonly'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>yes</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>no</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='secure'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>no</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </loader>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   </os>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <cpu>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <mode name='host-passthrough' supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='hostPassthroughMigratable'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>on</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>off</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </mode>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <mode name='maximum' supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='maximumMigratable'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>on</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>off</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </mode>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <mode name='host-model' supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model fallback='forbid'>EPYC-Rome</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <vendor>AMD</vendor>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <maxphysaddr mode='passthrough' limit='40'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='x2apic'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='tsc-deadline'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='hypervisor'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='tsc_adjust'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='spec-ctrl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='stibp'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='arch-capabilities'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='ssbd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='cmp_legacy'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='overflow-recov'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='succor'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='ibrs'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='amd-ssbd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='virt-ssbd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='lbrv'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='tsc-scale'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='vmcb-clean'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='flushbyasid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='pause-filter'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='pfthreshold'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='svme-addr-chk'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='lfence-always-serializing'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='rdctl-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='skip-l1dfl-vmentry'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='mds-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='pschange-mc-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='gds-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='rfds-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='disable' name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </mode>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <mode name='custom' supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='486-v1'>486</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>486-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Broadwell-v1'>Broadwell</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Broadwell'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Broadwell-v3'>Broadwell-IBRS</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Broadwell-IBRS'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Broadwell-v2'>Broadwell-noTSX</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Broadwell-noTSX'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Broadwell-v4'>Broadwell-noTSX-IBRS</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Broadwell-noTSX-IBRS'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Broadwell-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Broadwell-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Broadwell-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Broadwell-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Broadwell-v3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Broadwell-v3'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Broadwell-v4</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Broadwell-v4'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Cascadelake-Server-v1'>Cascadelake-Server</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Cascadelake-Server-v3'>Cascadelake-Server-noTSX</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-noTSX'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v3'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v4</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v4'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v5</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v5'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='Intel' canonical='Conroe-v1'>Conroe</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='Intel'>Conroe-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Cooperlake-v1'>Cooperlake</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cooperlake'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cooperlake-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cooperlake-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cooperlake-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cooperlake-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Denverton-v1'>Denverton</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Denverton'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='mpx'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Denverton-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Denverton-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='mpx'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Denverton-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Denverton-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Denverton-v3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Denverton-v3'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Hygon' canonical='Dhyana-v1'>Dhyana</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Hygon'>Dhyana-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Hygon'>Dhyana-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Dhyana-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD' canonical='EPYC-v1'>EPYC</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='EPYC-Genoa-v1'>EPYC-Genoa</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-Genoa'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amd-psfd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='auto-ibrs'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='stibp-always-on'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Genoa-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-Genoa-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amd-psfd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='auto-ibrs'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='stibp-always-on'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD' canonical='EPYC-v2'>EPYC-IBPB</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='EPYC-Milan-v1'>EPYC-Milan</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-Milan'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Milan-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-Milan-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Milan-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-Milan-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amd-psfd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='stibp-always-on'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='EPYC-Rome-v1'>EPYC-Rome</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-Rome'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Rome-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-Rome-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Rome-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-Rome-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Rome-v3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-Rome-v3'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD'>EPYC-Rome-v4</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD'>EPYC-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD'>EPYC-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-v3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-v3'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-v4</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-v4'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='GraniteRapids-v1'>GraniteRapids</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='GraniteRapids'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-fp16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='prefetchiti'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>GraniteRapids-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='GraniteRapids-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-fp16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='prefetchiti'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>GraniteRapids-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='GraniteRapids-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-fp16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx10'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx10-128'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx10-256'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx10-512'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='prefetchiti'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Haswell-v1'>Haswell</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Haswell'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Haswell-v3'>Haswell-IBRS</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Haswell-IBRS'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Haswell-v2'>Haswell-noTSX</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Haswell-noTSX'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Haswell-v4'>Haswell-noTSX-IBRS</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Haswell-noTSX-IBRS'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Haswell-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Haswell-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Haswell-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Haswell-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Haswell-v3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Haswell-v3'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Haswell-v4</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Haswell-v4'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Icelake-Server-v1'>Icelake-Server</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Icelake-Server-v2'>Icelake-Server-noTSX</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-noTSX'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v3'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v4</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v4'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v5</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v5'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v6</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v6'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v7</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v7'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='IvyBridge-v1'>IvyBridge</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='IvyBridge'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='IvyBridge-v2'>IvyBridge-IBRS</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='IvyBridge-IBRS'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>IvyBridge-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='IvyBridge-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>IvyBridge-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='IvyBridge-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='KnightsMill-v1'>KnightsMill</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='KnightsMill'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-4fmaps'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-4vnniw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512er'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512pf'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>KnightsMill-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='KnightsMill-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-4fmaps'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-4vnniw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512er'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512pf'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='Nehalem-v1'>Nehalem</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='Nehalem-v2'>Nehalem-IBRS</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>Nehalem-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>Nehalem-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G1-v1'>Opteron_G1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G1-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G2-v1'>Opteron_G2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G2-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G3-v1'>Opteron_G3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G3-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='Opteron_G4-v1'>Opteron_G4</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Opteron_G4'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fma4'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xop'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>Opteron_G4-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Opteron_G4-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fma4'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xop'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='Opteron_G5-v1'>Opteron_G5</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Opteron_G5'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fma4'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='tbm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xop'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>Opteron_G5-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Opteron_G5-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fma4'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='tbm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xop'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='Intel' canonical='Penryn-v1'>Penryn</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='Intel'>Penryn-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='SandyBridge-v1'>SandyBridge</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='SandyBridge-v2'>SandyBridge-IBRS</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>SandyBridge-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>SandyBridge-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='SapphireRapids-v1'>SapphireRapids</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='SapphireRapids'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>SapphireRapids-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='SapphireRapids-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>SapphireRapids-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='SapphireRapids-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>SapphireRapids-v3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='SapphireRapids-v3'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='SierraForest-v1'>SierraForest</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='SierraForest'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx-ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx-ne-convert'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx-vnni-int8'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='cmpccxadd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>SierraForest-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='SierraForest-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx-ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx-ne-convert'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx-vnni-int8'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='cmpccxadd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v1'>Skylake-Client</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v2'>Skylake-Client-IBRS</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-IBRS'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v3'>Skylake-Client-noTSX-IBRS</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-noTSX-IBRS'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Client-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Client-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Client-v3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-v3'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Client-v4</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-v4'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v1'>Skylake-Server</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v2'>Skylake-Server-IBRS</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-IBRS'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v3'>Skylake-Server-noTSX-IBRS</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-noTSX-IBRS'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v3'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v4</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v4'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v5</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v5'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Snowridge-v1'>Snowridge</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Snowridge'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='core-capability'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='mpx'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='split-lock-detect'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Snowridge-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Snowridge-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='core-capability'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='mpx'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='split-lock-detect'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Snowridge-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Snowridge-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='core-capability'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='split-lock-detect'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Snowridge-v3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Snowridge-v3'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='core-capability'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='split-lock-detect'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Snowridge-v4</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Snowridge-v4'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='Westmere-v1'>Westmere</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='Westmere-v2'>Westmere-IBRS</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>Westmere-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>Westmere-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='AMD' canonical='athlon-v1'>athlon</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='athlon'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='3dnow'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='3dnowext'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='AMD'>athlon-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='athlon-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='3dnow'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='3dnowext'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='core2duo-v1'>core2duo</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='core2duo'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel'>core2duo-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='core2duo-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='coreduo-v1'>coreduo</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='coreduo'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel'>coreduo-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='coreduo-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='kvm32-v1'>kvm32</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>kvm32-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='kvm64-v1'>kvm64</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>kvm64-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='n270-v1'>n270</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='n270'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel'>n270-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='n270-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium-v1'>pentium</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium2-v1'>pentium2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium2-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium3-v1'>pentium3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium3-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='AMD' canonical='phenom-v1'>phenom</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='phenom'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='3dnow'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='3dnowext'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='AMD'>phenom-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='phenom-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='3dnow'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='3dnowext'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='qemu32-v1'>qemu32</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>qemu32-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='qemu64-v1'>qemu64</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>qemu64-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </mode>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <memoryBacking supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <enum name='sourceType'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <value>file</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <value>anonymous</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <value>memfd</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   </memoryBacking>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <disk supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='diskDevice'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>disk</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>cdrom</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>floppy</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>lun</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='bus'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>ide</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>fdc</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>scsi</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>virtio</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>usb</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>sata</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='model'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>virtio</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>virtio-transitional</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>virtio-non-transitional</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <graphics supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='type'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>vnc</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>egl-headless</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>dbus</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </graphics>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <video supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='modelType'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>vga</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>cirrus</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>virtio</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>none</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>bochs</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>ramfb</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </video>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <hostdev supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='mode'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>subsystem</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='startupPolicy'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>default</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>mandatory</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>requisite</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>optional</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='subsysType'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>usb</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>pci</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>scsi</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='capsType'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='pciBackend'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </hostdev>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <rng supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='model'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>virtio</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>virtio-transitional</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>virtio-non-transitional</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='backendModel'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>random</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>egd</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>builtin</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <filesystem supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='driverType'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>path</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>handle</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>virtiofs</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </filesystem>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <tpm supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='model'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>tpm-tis</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>tpm-crb</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='backendModel'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>emulator</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>external</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='backendVersion'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>2.0</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </tpm>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <redirdev supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='bus'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>usb</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </redirdev>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <channel supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='type'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>pty</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>unix</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </channel>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <crypto supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='model'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='type'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>qemu</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='backendModel'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>builtin</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </crypto>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <interface supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='backendType'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>default</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>passt</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </interface>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <panic supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='model'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>isa</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>hyperv</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </panic>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <features>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <gic supported='no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <vmcoreinfo supported='yes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <genid supported='yes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <backingStoreInput supported='yes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <backup supported='yes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <async-teardown supported='yes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <ps2 supported='yes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <sev supported='no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <sgx supported='no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <hyperv supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='features'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>relaxed</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>vapic</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>spinlocks</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>vpindex</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>runtime</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>synic</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>stimer</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>reset</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>vendor_id</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>frequencies</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>reenlightenment</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>tlbflush</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>ipi</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>avic</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>emsr_bitmap</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>xmm_input</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </hyperv>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <launchSecurity supported='no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   </features>
Oct 11 02:05:37 compute-0 nova_compute[356901]: </domainCapabilities>
Oct 11 02:05:37 compute-0 nova_compute[356901]:  _get_domain_capabilities /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1037
Oct 11 02:05:37 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.810 2 DEBUG nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Libvirt host hypervisor capabilities for arch=i686 and machine_type=q35:
Oct 11 02:05:37 compute-0 nova_compute[356901]: <domainCapabilities>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <path>/usr/libexec/qemu-kvm</path>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <domain>kvm</domain>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <machine>pc-q35-rhel9.6.0</machine>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <arch>i686</arch>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <vcpu max='4096'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <iothreads supported='yes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <os supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <enum name='firmware'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <loader supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <value>/usr/share/OVMF/OVMF_CODE.secboot.fd</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='type'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>rom</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>pflash</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='readonly'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>yes</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>no</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='secure'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>no</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </loader>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   </os>
Oct 11 02:05:37 compute-0 nova_compute[356901]:   <cpu>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <mode name='host-passthrough' supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='hostPassthroughMigratable'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>on</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>off</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </mode>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <mode name='maximum' supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <enum name='maximumMigratable'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>on</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <value>off</value>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </mode>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <mode name='host-model' supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model fallback='forbid'>EPYC-Rome</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <vendor>AMD</vendor>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <maxphysaddr mode='passthrough' limit='40'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='x2apic'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='tsc-deadline'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='hypervisor'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='tsc_adjust'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='spec-ctrl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='stibp'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='arch-capabilities'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='ssbd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='cmp_legacy'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='overflow-recov'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='succor'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='ibrs'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='amd-ssbd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='virt-ssbd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='lbrv'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='tsc-scale'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='vmcb-clean'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='flushbyasid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='pause-filter'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='pfthreshold'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='svme-addr-chk'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='lfence-always-serializing'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='rdctl-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='skip-l1dfl-vmentry'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='mds-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='pschange-mc-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='gds-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='require' name='rfds-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <feature policy='disable' name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     </mode>
Oct 11 02:05:37 compute-0 nova_compute[356901]:     <mode name='custom' supported='yes'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='486-v1'>486</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>486-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Broadwell-v1'>Broadwell</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Broadwell'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Broadwell-v3'>Broadwell-IBRS</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Broadwell-IBRS'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Broadwell-v2'>Broadwell-noTSX</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Broadwell-noTSX'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Broadwell-v4'>Broadwell-noTSX-IBRS</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Broadwell-noTSX-IBRS'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Broadwell-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Broadwell-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Broadwell-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Broadwell-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Broadwell-v3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Broadwell-v3'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Broadwell-v4</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Broadwell-v4'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Cascadelake-Server-v1'>Cascadelake-Server</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Cascadelake-Server-v3'>Cascadelake-Server-noTSX</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-noTSX'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v3'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v4</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v4'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v5</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v5'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='Intel' canonical='Conroe-v1'>Conroe</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='Intel'>Conroe-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Cooperlake-v1'>Cooperlake</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cooperlake'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cooperlake-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cooperlake-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cooperlake-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Cooperlake-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Denverton-v1'>Denverton</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Denverton'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='mpx'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Denverton-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Denverton-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='mpx'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Denverton-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Denverton-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Denverton-v3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Denverton-v3'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Hygon' canonical='Dhyana-v1'>Dhyana</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Hygon'>Dhyana-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Hygon'>Dhyana-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Dhyana-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD' canonical='EPYC-v1'>EPYC</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='EPYC-Genoa-v1'>EPYC-Genoa</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-Genoa'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amd-psfd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='auto-ibrs'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='stibp-always-on'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Genoa-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-Genoa-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amd-psfd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='auto-ibrs'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='stibp-always-on'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD' canonical='EPYC-v2'>EPYC-IBPB</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='EPYC-Milan-v1'>EPYC-Milan</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-Milan'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Milan-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-Milan-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Milan-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-Milan-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amd-psfd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='stibp-always-on'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='EPYC-Rome-v1'>EPYC-Rome</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-Rome'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Rome-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-Rome-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Rome-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-Rome-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Rome-v3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-Rome-v3'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD'>EPYC-Rome-v4</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD'>EPYC-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD'>EPYC-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-v3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-v3'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-v4</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='EPYC-v4'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='GraniteRapids-v1'>GraniteRapids</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='GraniteRapids'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-fp16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='prefetchiti'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>GraniteRapids-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='GraniteRapids-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-fp16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='prefetchiti'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>GraniteRapids-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='GraniteRapids-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-fp16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx10'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx10-128'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx10-256'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx10-512'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='prefetchiti'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Haswell-v1'>Haswell</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Haswell'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Haswell-v3'>Haswell-IBRS</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Haswell-IBRS'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Haswell-v2'>Haswell-noTSX</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Haswell-noTSX'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Haswell-v4'>Haswell-noTSX-IBRS</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Haswell-noTSX-IBRS'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Haswell-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Haswell-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Haswell-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Haswell-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Haswell-v3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Haswell-v3'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Haswell-v4</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Haswell-v4'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Icelake-Server-v1'>Icelake-Server</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Icelake-Server-v2'>Icelake-Server-noTSX</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-noTSX'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v1</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v1'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v2</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v2'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v3</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v3'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v4</model>
Oct 11 02:05:37 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v4'>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:37 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v5</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v5'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v6</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v6'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v7</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v7'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='IvyBridge-v1'>IvyBridge</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='IvyBridge'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='IvyBridge-v2'>IvyBridge-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='IvyBridge-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>IvyBridge-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='IvyBridge-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>IvyBridge-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='IvyBridge-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='KnightsMill-v1'>KnightsMill</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='KnightsMill'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-4fmaps'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-4vnniw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512er'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512pf'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>KnightsMill-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='KnightsMill-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-4fmaps'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-4vnniw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512er'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512pf'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='Nehalem-v1'>Nehalem</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='Nehalem-v2'>Nehalem-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>Nehalem-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>Nehalem-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G1-v1'>Opteron_G1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G1-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G2-v1'>Opteron_G2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G2-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G3-v1'>Opteron_G3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G3-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='Opteron_G4-v1'>Opteron_G4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Opteron_G4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fma4'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xop'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>Opteron_G4-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Opteron_G4-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fma4'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xop'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='Opteron_G5-v1'>Opteron_G5</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Opteron_G5'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fma4'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tbm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xop'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>Opteron_G5-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Opteron_G5-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fma4'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tbm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xop'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='Intel' canonical='Penryn-v1'>Penryn</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='Intel'>Penryn-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='SandyBridge-v1'>SandyBridge</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='SandyBridge-v2'>SandyBridge-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>SandyBridge-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>SandyBridge-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='SapphireRapids-v1'>SapphireRapids</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='SapphireRapids'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>SapphireRapids-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='SapphireRapids-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>SapphireRapids-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='SapphireRapids-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>SapphireRapids-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='SapphireRapids-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='SierraForest-v1'>SierraForest</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='SierraForest'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-ne-convert'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cmpccxadd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>SierraForest-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='SierraForest-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-ne-convert'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cmpccxadd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v1'>Skylake-Client</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v2'>Skylake-Client-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v3'>Skylake-Client-noTSX-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-noTSX-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Client-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Client-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Client-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Client-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-v4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v1'>Skylake-Server</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v2'>Skylake-Server-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v3'>Skylake-Server-noTSX-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-noTSX-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v5</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v5'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Snowridge-v1'>Snowridge</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Snowridge'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='core-capability'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mpx'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='split-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Snowridge-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Snowridge-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='core-capability'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mpx'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='split-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Snowridge-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Snowridge-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='core-capability'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='split-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Snowridge-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Snowridge-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='core-capability'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='split-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Snowridge-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Snowridge-v4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='Westmere-v1'>Westmere</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='Westmere-v2'>Westmere-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>Westmere-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>Westmere-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='AMD' canonical='athlon-v1'>athlon</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='athlon'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnow'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnowext'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='AMD'>athlon-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='athlon-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnow'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnowext'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='core2duo-v1'>core2duo</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='core2duo'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel'>core2duo-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='core2duo-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='coreduo-v1'>coreduo</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='coreduo'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel'>coreduo-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='coreduo-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='kvm32-v1'>kvm32</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>kvm32-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='kvm64-v1'>kvm64</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>kvm64-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='n270-v1'>n270</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='n270'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel'>n270-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='n270-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium-v1'>pentium</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium2-v1'>pentium2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium2-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium3-v1'>pentium3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium3-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='AMD' canonical='phenom-v1'>phenom</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='phenom'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnow'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnowext'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='AMD'>phenom-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='phenom-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnow'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnowext'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='qemu32-v1'>qemu32</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>qemu32-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='qemu64-v1'>qemu64</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>qemu64-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </mode>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <memoryBacking supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <enum name='sourceType'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <value>file</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <value>anonymous</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <value>memfd</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   </memoryBacking>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <disk supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='diskDevice'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>disk</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>cdrom</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>floppy</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>lun</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='bus'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>fdc</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>scsi</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>usb</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>sata</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='model'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio-transitional</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio-non-transitional</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <graphics supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='type'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>vnc</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>egl-headless</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>dbus</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </graphics>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <video supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='modelType'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>vga</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>cirrus</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>none</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>bochs</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>ramfb</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </video>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <hostdev supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='mode'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>subsystem</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='startupPolicy'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>default</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>mandatory</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>requisite</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>optional</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='subsysType'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>usb</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>pci</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>scsi</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='capsType'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='pciBackend'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </hostdev>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <rng supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='model'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio-transitional</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio-non-transitional</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='backendModel'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>random</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>egd</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>builtin</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <filesystem supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='driverType'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>path</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>handle</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtiofs</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </filesystem>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <tpm supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='model'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>tpm-tis</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>tpm-crb</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='backendModel'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>emulator</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>external</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='backendVersion'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>2.0</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </tpm>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <redirdev supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='bus'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>usb</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </redirdev>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <channel supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='type'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>pty</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>unix</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </channel>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <crypto supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='model'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='type'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>qemu</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='backendModel'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>builtin</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </crypto>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <interface supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='backendType'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>default</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>passt</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </interface>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <panic supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='model'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>isa</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>hyperv</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </panic>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <features>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <gic supported='no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <vmcoreinfo supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <genid supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <backingStoreInput supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <backup supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <async-teardown supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <ps2 supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <sev supported='no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <sgx supported='no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <hyperv supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='features'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>relaxed</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>vapic</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>spinlocks</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>vpindex</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>runtime</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>synic</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>stimer</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>reset</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>vendor_id</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>frequencies</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>reenlightenment</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>tlbflush</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>ipi</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>avic</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>emsr_bitmap</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>xmm_input</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </hyperv>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <launchSecurity supported='no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   </features>
Oct 11 02:05:38 compute-0 nova_compute[356901]: </domainCapabilities>
Oct 11 02:05:38 compute-0 nova_compute[356901]:  _get_domain_capabilities /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1037
Oct 11 02:05:38 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.913 2 DEBUG nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Getting domain capabilities for x86_64 via machine types: {'pc', 'q35'} _get_machine_types /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:952
Oct 11 02:05:38 compute-0 nova_compute[356901]: 2025-10-11 02:05:37.920 2 DEBUG nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Libvirt host hypervisor capabilities for arch=x86_64 and machine_type=pc:
Oct 11 02:05:38 compute-0 nova_compute[356901]: <domainCapabilities>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <path>/usr/libexec/qemu-kvm</path>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <domain>kvm</domain>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <machine>pc-i440fx-rhel7.6.0</machine>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <arch>x86_64</arch>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <vcpu max='240'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <iothreads supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <os supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <enum name='firmware'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <loader supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <value>/usr/share/OVMF/OVMF_CODE.secboot.fd</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='type'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>rom</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>pflash</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='readonly'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>yes</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>no</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='secure'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>no</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </loader>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   </os>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <cpu>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <mode name='host-passthrough' supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='hostPassthroughMigratable'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>on</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>off</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </mode>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <mode name='maximum' supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='maximumMigratable'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>on</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>off</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </mode>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <mode name='host-model' supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model fallback='forbid'>EPYC-Rome</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <vendor>AMD</vendor>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <maxphysaddr mode='passthrough' limit='40'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='x2apic'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='tsc-deadline'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='hypervisor'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='tsc_adjust'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='spec-ctrl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='stibp'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='arch-capabilities'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='ssbd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='cmp_legacy'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='overflow-recov'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='succor'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='ibrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='amd-ssbd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='virt-ssbd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='lbrv'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='tsc-scale'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='vmcb-clean'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='flushbyasid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='pause-filter'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='pfthreshold'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='svme-addr-chk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='lfence-always-serializing'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='rdctl-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='skip-l1dfl-vmentry'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='mds-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='pschange-mc-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='gds-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='rfds-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='disable' name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </mode>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <mode name='custom' supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='486-v1'>486</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>486-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Broadwell-v1'>Broadwell</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Broadwell'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Broadwell-v3'>Broadwell-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Broadwell-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Broadwell-v2'>Broadwell-noTSX</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Broadwell-noTSX'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Broadwell-v4'>Broadwell-noTSX-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Broadwell-noTSX-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Broadwell-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Broadwell-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Broadwell-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Broadwell-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Broadwell-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Broadwell-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Broadwell-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Broadwell-v4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Cascadelake-Server-v1'>Cascadelake-Server</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Cascadelake-Server-v3'>Cascadelake-Server-noTSX</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-noTSX'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v5</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v5'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='Intel' canonical='Conroe-v1'>Conroe</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='Intel'>Conroe-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Cooperlake-v1'>Cooperlake</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cooperlake'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cooperlake-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cooperlake-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cooperlake-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cooperlake-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Denverton-v1'>Denverton</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Denverton'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mpx'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Denverton-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Denverton-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mpx'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Denverton-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Denverton-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Denverton-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Denverton-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Hygon' canonical='Dhyana-v1'>Dhyana</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Hygon'>Dhyana-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Hygon'>Dhyana-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Dhyana-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD' canonical='EPYC-v1'>EPYC</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='EPYC-Genoa-v1'>EPYC-Genoa</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-Genoa'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amd-psfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='auto-ibrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='stibp-always-on'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Genoa-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-Genoa-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amd-psfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='auto-ibrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='stibp-always-on'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD' canonical='EPYC-v2'>EPYC-IBPB</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='EPYC-Milan-v1'>EPYC-Milan</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-Milan'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Milan-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-Milan-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Milan-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-Milan-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amd-psfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='stibp-always-on'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='EPYC-Rome-v1'>EPYC-Rome</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-Rome'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Rome-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-Rome-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Rome-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-Rome-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Rome-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-Rome-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD'>EPYC-Rome-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD'>EPYC-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD'>EPYC-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-v4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='GraniteRapids-v1'>GraniteRapids</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='GraniteRapids'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='prefetchiti'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>GraniteRapids-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='GraniteRapids-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='prefetchiti'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>GraniteRapids-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='GraniteRapids-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx10'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx10-128'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx10-256'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx10-512'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='prefetchiti'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Haswell-v1'>Haswell</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Haswell'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Haswell-v3'>Haswell-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Haswell-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Haswell-v2'>Haswell-noTSX</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Haswell-noTSX'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Haswell-v4'>Haswell-noTSX-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Haswell-noTSX-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Haswell-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Haswell-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Haswell-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Haswell-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Haswell-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Haswell-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Haswell-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Haswell-v4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Icelake-Server-v1'>Icelake-Server</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Icelake-Server-v2'>Icelake-Server-noTSX</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-noTSX'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v5</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v5'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v6</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v6'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v7</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v7'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='IvyBridge-v1'>IvyBridge</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='IvyBridge'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='IvyBridge-v2'>IvyBridge-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='IvyBridge-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>IvyBridge-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='IvyBridge-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>IvyBridge-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='IvyBridge-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='KnightsMill-v1'>KnightsMill</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='KnightsMill'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-4fmaps'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-4vnniw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512er'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512pf'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>KnightsMill-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='KnightsMill-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-4fmaps'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-4vnniw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512er'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512pf'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='Nehalem-v1'>Nehalem</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='Nehalem-v2'>Nehalem-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>Nehalem-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>Nehalem-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G1-v1'>Opteron_G1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G1-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G2-v1'>Opteron_G2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G2-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G3-v1'>Opteron_G3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G3-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='Opteron_G4-v1'>Opteron_G4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Opteron_G4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fma4'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xop'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>Opteron_G4-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Opteron_G4-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fma4'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xop'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='Opteron_G5-v1'>Opteron_G5</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Opteron_G5'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fma4'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tbm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xop'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>Opteron_G5-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Opteron_G5-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fma4'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tbm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xop'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='Intel' canonical='Penryn-v1'>Penryn</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='Intel'>Penryn-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='SandyBridge-v1'>SandyBridge</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='SandyBridge-v2'>SandyBridge-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>SandyBridge-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>SandyBridge-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='SapphireRapids-v1'>SapphireRapids</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='SapphireRapids'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>SapphireRapids-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='SapphireRapids-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>SapphireRapids-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='SapphireRapids-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>SapphireRapids-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='SapphireRapids-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='SierraForest-v1'>SierraForest</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='SierraForest'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-ne-convert'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cmpccxadd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>SierraForest-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='SierraForest-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-ne-convert'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cmpccxadd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v1'>Skylake-Client</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v2'>Skylake-Client-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v3'>Skylake-Client-noTSX-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-noTSX-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Client-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Client-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Client-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Client-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-v4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v1'>Skylake-Server</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v2'>Skylake-Server-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v3'>Skylake-Server-noTSX-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-noTSX-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v5</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v5'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Snowridge-v1'>Snowridge</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Snowridge'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='core-capability'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mpx'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='split-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Snowridge-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Snowridge-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='core-capability'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mpx'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='split-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Snowridge-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Snowridge-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='core-capability'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='split-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Snowridge-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Snowridge-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='core-capability'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='split-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Snowridge-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Snowridge-v4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='Westmere-v1'>Westmere</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='Westmere-v2'>Westmere-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>Westmere-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>Westmere-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='AMD' canonical='athlon-v1'>athlon</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='athlon'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnow'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnowext'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='AMD'>athlon-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='athlon-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnow'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnowext'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='core2duo-v1'>core2duo</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='core2duo'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel'>core2duo-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='core2duo-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='coreduo-v1'>coreduo</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='coreduo'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel'>coreduo-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='coreduo-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='kvm32-v1'>kvm32</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>kvm32-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='kvm64-v1'>kvm64</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>kvm64-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='n270-v1'>n270</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='n270'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel'>n270-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='n270-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium-v1'>pentium</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium2-v1'>pentium2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium2-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium3-v1'>pentium3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium3-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='AMD' canonical='phenom-v1'>phenom</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='phenom'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnow'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnowext'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='AMD'>phenom-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='phenom-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnow'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnowext'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='qemu32-v1'>qemu32</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>qemu32-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='qemu64-v1'>qemu64</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>qemu64-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </mode>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <memoryBacking supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <enum name='sourceType'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <value>file</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <value>anonymous</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <value>memfd</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   </memoryBacking>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <disk supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='diskDevice'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>disk</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>cdrom</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>floppy</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>lun</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='bus'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>ide</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>fdc</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>scsi</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>usb</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>sata</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='model'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio-transitional</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio-non-transitional</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <graphics supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='type'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>vnc</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>egl-headless</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>dbus</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </graphics>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <video supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='modelType'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>vga</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>cirrus</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>none</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>bochs</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>ramfb</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </video>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <hostdev supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='mode'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>subsystem</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='startupPolicy'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>default</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>mandatory</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>requisite</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>optional</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='subsysType'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>usb</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>pci</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>scsi</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='capsType'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='pciBackend'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </hostdev>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <rng supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='model'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio-transitional</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio-non-transitional</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='backendModel'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>random</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>egd</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>builtin</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <filesystem supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='driverType'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>path</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>handle</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtiofs</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </filesystem>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <tpm supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='model'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>tpm-tis</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>tpm-crb</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='backendModel'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>emulator</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>external</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='backendVersion'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>2.0</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </tpm>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <redirdev supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='bus'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>usb</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </redirdev>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <channel supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='type'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>pty</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>unix</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </channel>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <crypto supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='model'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='type'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>qemu</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='backendModel'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>builtin</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </crypto>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <interface supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='backendType'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>default</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>passt</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </interface>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <panic supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='model'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>isa</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>hyperv</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </panic>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <features>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <gic supported='no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <vmcoreinfo supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <genid supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <backingStoreInput supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <backup supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <async-teardown supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <ps2 supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <sev supported='no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <sgx supported='no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <hyperv supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='features'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>relaxed</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>vapic</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>spinlocks</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>vpindex</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>runtime</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>synic</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>stimer</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>reset</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>vendor_id</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>frequencies</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>reenlightenment</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>tlbflush</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>ipi</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>avic</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>emsr_bitmap</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>xmm_input</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </hyperv>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <launchSecurity supported='no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   </features>
Oct 11 02:05:38 compute-0 nova_compute[356901]: </domainCapabilities>
Oct 11 02:05:38 compute-0 nova_compute[356901]:  _get_domain_capabilities /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1037
Oct 11 02:05:38 compute-0 nova_compute[356901]: 2025-10-11 02:05:38.067 2 DEBUG nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Libvirt host hypervisor capabilities for arch=x86_64 and machine_type=q35:
Oct 11 02:05:38 compute-0 nova_compute[356901]: <domainCapabilities>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <path>/usr/libexec/qemu-kvm</path>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <domain>kvm</domain>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <machine>pc-q35-rhel9.6.0</machine>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <arch>x86_64</arch>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <vcpu max='4096'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <iothreads supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <os supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <enum name='firmware'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <value>efi</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <loader supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <value>/usr/share/edk2/ovmf/OVMF_CODE.secboot.fd</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <value>/usr/share/edk2/ovmf/OVMF_CODE.fd</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <value>/usr/share/edk2/ovmf/OVMF.amdsev.fd</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <value>/usr/share/edk2/ovmf/OVMF.inteltdx.secboot.fd</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='type'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>rom</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>pflash</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='readonly'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>yes</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>no</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='secure'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>yes</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>no</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </loader>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   </os>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <cpu>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <mode name='host-passthrough' supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='hostPassthroughMigratable'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>on</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>off</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </mode>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <mode name='maximum' supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='maximumMigratable'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>on</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>off</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </mode>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <mode name='host-model' supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model fallback='forbid'>EPYC-Rome</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <vendor>AMD</vendor>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <maxphysaddr mode='passthrough' limit='40'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='x2apic'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='tsc-deadline'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='hypervisor'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='tsc_adjust'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='spec-ctrl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='stibp'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='arch-capabilities'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='ssbd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='cmp_legacy'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='overflow-recov'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='succor'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='ibrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='amd-ssbd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='virt-ssbd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='lbrv'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='tsc-scale'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='vmcb-clean'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='flushbyasid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='pause-filter'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='pfthreshold'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='svme-addr-chk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='lfence-always-serializing'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='rdctl-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='skip-l1dfl-vmentry'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='mds-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='pschange-mc-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='gds-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='require' name='rfds-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <feature policy='disable' name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </mode>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <mode name='custom' supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='486-v1'>486</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>486-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Broadwell-v1'>Broadwell</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Broadwell'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Broadwell-v3'>Broadwell-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Broadwell-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Broadwell-v2'>Broadwell-noTSX</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Broadwell-noTSX'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Broadwell-v4'>Broadwell-noTSX-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Broadwell-noTSX-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Broadwell-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Broadwell-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Broadwell-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Broadwell-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Broadwell-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Broadwell-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Broadwell-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Broadwell-v4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Cascadelake-Server-v1'>Cascadelake-Server</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Cascadelake-Server-v3'>Cascadelake-Server-noTSX</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-noTSX'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cascadelake-Server-v5</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cascadelake-Server-v5'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='Intel' canonical='Conroe-v1'>Conroe</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='Intel'>Conroe-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Cooperlake-v1'>Cooperlake</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cooperlake'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cooperlake-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cooperlake-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Cooperlake-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Cooperlake-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Denverton-v1'>Denverton</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Denverton'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mpx'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Denverton-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Denverton-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mpx'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Denverton-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Denverton-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Denverton-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Denverton-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Hygon' canonical='Dhyana-v1'>Dhyana</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Hygon'>Dhyana-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Hygon'>Dhyana-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Dhyana-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD' canonical='EPYC-v1'>EPYC</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='EPYC-Genoa-v1'>EPYC-Genoa</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-Genoa'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amd-psfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='auto-ibrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='stibp-always-on'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Genoa-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-Genoa-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amd-psfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='auto-ibrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='stibp-always-on'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD' canonical='EPYC-v2'>EPYC-IBPB</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='EPYC-Milan-v1'>EPYC-Milan</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-Milan'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Milan-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-Milan-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Milan-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-Milan-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amd-psfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='no-nested-data-bp'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='null-sel-clr-base'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='stibp-always-on'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='EPYC-Rome-v1'>EPYC-Rome</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-Rome'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Rome-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-Rome-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Rome-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-Rome-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-Rome-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-Rome-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD'>EPYC-Rome-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD'>EPYC-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='AMD'>EPYC-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>EPYC-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='EPYC-v4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='GraniteRapids-v1'>GraniteRapids</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='GraniteRapids'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='prefetchiti'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>GraniteRapids-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='GraniteRapids-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='prefetchiti'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>GraniteRapids-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='GraniteRapids-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx10'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx10-128'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx10-256'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx10-512'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='prefetchiti'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Haswell-v1'>Haswell</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Haswell'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Haswell-v3'>Haswell-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Haswell-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Haswell-v2'>Haswell-noTSX</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Haswell-noTSX'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Haswell-v4'>Haswell-noTSX-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Haswell-noTSX-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Haswell-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Haswell-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Haswell-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Haswell-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Haswell-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Haswell-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Haswell-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Haswell-v4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Icelake-Server-v1'>Icelake-Server</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Icelake-Server-v2'>Icelake-Server-noTSX</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-noTSX'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v5</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v5'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v6</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v6'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Icelake-Server-v7</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Icelake-Server-v7'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='IvyBridge-v1'>IvyBridge</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='IvyBridge'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='IvyBridge-v2'>IvyBridge-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='IvyBridge-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>IvyBridge-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='IvyBridge-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>IvyBridge-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='IvyBridge-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='KnightsMill-v1'>KnightsMill</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='KnightsMill'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-4fmaps'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-4vnniw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512er'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512pf'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>KnightsMill-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='KnightsMill-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-4fmaps'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-4vnniw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512er'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512pf'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='Nehalem-v1'>Nehalem</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='Nehalem-v2'>Nehalem-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>Nehalem-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>Nehalem-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G1-v1'>Opteron_G1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G1-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G2-v1'>Opteron_G2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G2-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD' canonical='Opteron_G3-v1'>Opteron_G3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='AMD'>Opteron_G3-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='Opteron_G4-v1'>Opteron_G4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Opteron_G4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fma4'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xop'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>Opteron_G4-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Opteron_G4-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fma4'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xop'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD' canonical='Opteron_G5-v1'>Opteron_G5</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Opteron_G5'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fma4'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tbm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xop'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='AMD'>Opteron_G5-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Opteron_G5-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fma4'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tbm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xop'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='Intel' canonical='Penryn-v1'>Penryn</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='Intel'>Penryn-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='SandyBridge-v1'>SandyBridge</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='SandyBridge-v2'>SandyBridge-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>SandyBridge-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>SandyBridge-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='SapphireRapids-v1'>SapphireRapids</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='SapphireRapids'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>SapphireRapids-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='SapphireRapids-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>SapphireRapids-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='SapphireRapids-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>SapphireRapids-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='SapphireRapids-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='amx-tile'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-bf16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-fp16'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512-vpopcntdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bitalg'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vbmi2'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrc'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fzrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='la57'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='taa-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='tsx-ldtrk'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xfd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='SierraForest-v1'>SierraForest</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='SierraForest'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-ne-convert'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cmpccxadd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>SierraForest-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='SierraForest-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-ifma'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-ne-convert'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx-vnni-int8'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='bus-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cmpccxadd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fbsdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='fsrs'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ibrs-all'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mcdt-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pbrsb-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='psdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='sbdr-ssdp-no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='serialize'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vaes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='vpclmulqdq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v1'>Skylake-Client</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v2'>Skylake-Client-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Client-v3'>Skylake-Client-noTSX-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-noTSX-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Client-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Client-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Client-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Client-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Client-v4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v1'>Skylake-Server</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v2'>Skylake-Server-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Skylake-Server-v3'>Skylake-Server-noTSX-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-noTSX-IBRS'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='hle'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='rtm'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Skylake-Server-v5</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Skylake-Server-v5'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512bw'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512cd'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512dq'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512f'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='avx512vl'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='invpcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pcid'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='pku'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel' canonical='Snowridge-v1'>Snowridge</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Snowridge'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='core-capability'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mpx'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='split-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Snowridge-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Snowridge-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='core-capability'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='mpx'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='split-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Snowridge-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Snowridge-v2'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='core-capability'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='split-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Snowridge-v3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Snowridge-v3'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='core-capability'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='split-lock-detect'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' vendor='Intel'>Snowridge-v4</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='Snowridge-v4'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='cldemote'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='erms'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='gfni'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdir64b'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='movdiri'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='xsaves'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='Westmere-v1'>Westmere</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel' canonical='Westmere-v2'>Westmere-IBRS</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>Westmere-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' vendor='Intel'>Westmere-v2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='AMD' canonical='athlon-v1'>athlon</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='athlon'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnow'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnowext'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='AMD'>athlon-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='athlon-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnow'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnowext'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='core2duo-v1'>core2duo</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='core2duo'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel'>core2duo-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='core2duo-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='coreduo-v1'>coreduo</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='coreduo'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel'>coreduo-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='coreduo-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='kvm32-v1'>kvm32</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>kvm32-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='kvm64-v1'>kvm64</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>kvm64-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel' canonical='n270-v1'>n270</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='n270'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='Intel'>n270-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='n270-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='ss'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium-v1'>pentium</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium2-v1'>pentium2</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium2-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='pentium3-v1'>pentium3</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>pentium3-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='AMD' canonical='phenom-v1'>phenom</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='phenom'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnow'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnowext'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='no' deprecated='yes' vendor='AMD'>phenom-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <blockers model='phenom-v1'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnow'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <feature name='3dnowext'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </blockers>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='qemu32-v1'>qemu32</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>qemu32-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown' canonical='qemu64-v1'>qemu64</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <model usable='yes' deprecated='yes' vendor='unknown'>qemu64-v1</model>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </mode>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <memoryBacking supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <enum name='sourceType'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <value>file</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <value>anonymous</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <value>memfd</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   </memoryBacking>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <disk supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='diskDevice'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>disk</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>cdrom</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>floppy</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>lun</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='bus'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>fdc</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>scsi</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>usb</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>sata</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='model'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio-transitional</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio-non-transitional</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <graphics supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='type'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>vnc</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>egl-headless</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>dbus</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </graphics>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <video supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='modelType'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>vga</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>cirrus</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>none</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>bochs</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>ramfb</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </video>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <hostdev supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='mode'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>subsystem</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='startupPolicy'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>default</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>mandatory</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>requisite</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>optional</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='subsysType'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>usb</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>pci</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>scsi</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='capsType'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='pciBackend'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </hostdev>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <rng supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='model'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio-transitional</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtio-non-transitional</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='backendModel'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>random</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>egd</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>builtin</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <filesystem supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='driverType'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>path</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>handle</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>virtiofs</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </filesystem>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <tpm supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='model'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>tpm-tis</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>tpm-crb</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='backendModel'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>emulator</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>external</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='backendVersion'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>2.0</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </tpm>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <redirdev supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='bus'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>usb</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </redirdev>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <channel supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='type'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>pty</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>unix</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </channel>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <crypto supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='model'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='type'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>qemu</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='backendModel'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>builtin</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </crypto>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <interface supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='backendType'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>default</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>passt</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </interface>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <panic supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='model'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>isa</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>hyperv</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </panic>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   <features>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <gic supported='no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <vmcoreinfo supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <genid supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <backingStoreInput supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <backup supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <async-teardown supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <ps2 supported='yes'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <sev supported='no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <sgx supported='no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <hyperv supported='yes'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       <enum name='features'>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>relaxed</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>vapic</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>spinlocks</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>vpindex</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>runtime</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>synic</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>stimer</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>reset</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>vendor_id</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>frequencies</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>reenlightenment</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>tlbflush</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>ipi</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>avic</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>emsr_bitmap</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:         <value>xmm_input</value>
Oct 11 02:05:38 compute-0 nova_compute[356901]:       </enum>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     </hyperv>
Oct 11 02:05:38 compute-0 nova_compute[356901]:     <launchSecurity supported='no'/>
Oct 11 02:05:38 compute-0 nova_compute[356901]:   </features>
Oct 11 02:05:38 compute-0 nova_compute[356901]: </domainCapabilities>
Oct 11 02:05:38 compute-0 nova_compute[356901]:  _get_domain_capabilities /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1037
Oct 11 02:05:38 compute-0 nova_compute[356901]: 2025-10-11 02:05:38.168 2 DEBUG nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Checking secure boot support for host arch (x86_64) supports_secure_boot /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1782
Oct 11 02:05:38 compute-0 nova_compute[356901]: 2025-10-11 02:05:38.169 2 DEBUG nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Checking secure boot support for host arch (x86_64) supports_secure_boot /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1782
Oct 11 02:05:38 compute-0 nova_compute[356901]: 2025-10-11 02:05:38.169 2 DEBUG nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Checking secure boot support for host arch (x86_64) supports_secure_boot /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1782
Oct 11 02:05:38 compute-0 nova_compute[356901]: 2025-10-11 02:05:38.169 2 INFO nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Secure Boot support detected
Oct 11 02:05:38 compute-0 nova_compute[356901]: 2025-10-11 02:05:38.173 2 INFO nova.virt.libvirt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] The live_migration_permit_post_copy is set to True and post copy live migration is available so auto-converge will not be in use.
Oct 11 02:05:38 compute-0 nova_compute[356901]: 2025-10-11 02:05:38.173 2 INFO nova.virt.libvirt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] The live_migration_permit_post_copy is set to True and post copy live migration is available so auto-converge will not be in use.
Oct 11 02:05:38 compute-0 nova_compute[356901]: 2025-10-11 02:05:38.195 2 DEBUG nova.virt.libvirt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Enabling emulated TPM support _check_vtpm_support /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:1097
Oct 11 02:05:38 compute-0 nova_compute[356901]: 2025-10-11 02:05:38.231 2 INFO nova.virt.node [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Determined node identity 256b11da-7f71-42c0-941c-ea1e909a35f8 from /var/lib/nova/compute_id
Oct 11 02:05:38 compute-0 nova_compute[356901]: 2025-10-11 02:05:38.256 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Verified node 256b11da-7f71-42c0-941c-ea1e909a35f8 matches my host compute-0.ctlplane.example.com _check_for_host_rename /usr/lib/python3.9/site-packages/nova/compute/manager.py:1568
Oct 11 02:05:38 compute-0 nova_compute[356901]: 2025-10-11 02:05:38.283 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Looking for unclaimed instances stuck in BUILDING status for nodes managed by this host
Oct 11 02:05:38 compute-0 unix_chkpwd[357203]: password check failed for user (root)
Oct 11 02:05:38 compute-0 sshd-session[357180]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:05:38 compute-0 ceph-mon[191930]: pgmap v823: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:38 compute-0 nova_compute[356901]: 2025-10-11 02:05:38.806 2 ERROR nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Could not retrieve compute node resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8 and therefore unable to error out any instances stuck in BUILDING state. Error: Failed to retrieve allocations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8: {"errors": [{"status": 404, "title": "Not Found", "detail": "The resource could not be found.\n\n Resource provider '256b11da-7f71-42c0-941c-ea1e909a35f8' not found: No resource provider with uuid 256b11da-7f71-42c0-941c-ea1e909a35f8 found  ", "request_id": "req-0a2b0c23-5276-4b38-b2cd-36a046d81336"}]}: nova.exception.ResourceProviderAllocationRetrievalFailed: Failed to retrieve allocations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8: {"errors": [{"status": 404, "title": "Not Found", "detail": "The resource could not be found.\n\n Resource provider '256b11da-7f71-42c0-941c-ea1e909a35f8' not found: No resource provider with uuid 256b11da-7f71-42c0-941c-ea1e909a35f8 found  ", "request_id": "req-0a2b0c23-5276-4b38-b2cd-36a046d81336"}]}
Oct 11 02:05:38 compute-0 nova_compute[356901]: 2025-10-11 02:05:38.877 2 DEBUG oslo_concurrency.lockutils [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:05:38 compute-0 nova_compute[356901]: 2025-10-11 02:05:38.878 2 DEBUG oslo_concurrency.lockutils [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:05:38 compute-0 nova_compute[356901]: 2025-10-11 02:05:38.879 2 DEBUG oslo_concurrency.lockutils [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:05:38 compute-0 nova_compute[356901]: 2025-10-11 02:05:38.879 2 DEBUG nova.compute.resource_tracker [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:05:38 compute-0 nova_compute[356901]: 2025-10-11 02:05:38.880 2 DEBUG oslo_concurrency.processutils [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:05:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v824: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:05:39 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/414064105' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:05:39 compute-0 nova_compute[356901]: 2025-10-11 02:05:39.336 2 DEBUG oslo_concurrency.processutils [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.456s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:05:39 compute-0 nova_compute[356901]: 2025-10-11 02:05:39.775 2 WARNING nova.virt.libvirt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:05:39 compute-0 nova_compute[356901]: 2025-10-11 02:05:39.776 2 DEBUG nova.compute.resource_tracker [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=4571MB free_disk=59.98828125GB free_vcpus=8 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:05:39 compute-0 nova_compute[356901]: 2025-10-11 02:05:39.776 2 DEBUG oslo_concurrency.lockutils [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:05:39 compute-0 nova_compute[356901]: 2025-10-11 02:05:39.777 2 DEBUG oslo_concurrency.lockutils [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:05:39 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/414064105' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:05:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:05:40 compute-0 nova_compute[356901]: 2025-10-11 02:05:40.245 2 ERROR nova.compute.resource_tracker [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Skipping removal of allocations for deleted instances: Failed to retrieve allocations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8: {"errors": [{"status": 404, "title": "Not Found", "detail": "The resource could not be found.\n\n Resource provider '256b11da-7f71-42c0-941c-ea1e909a35f8' not found: No resource provider with uuid 256b11da-7f71-42c0-941c-ea1e909a35f8 found  ", "request_id": "req-b3e0e2e1-4c76-4fb7-8833-3702c509342c"}]}: nova.exception.ResourceProviderAllocationRetrievalFailed: Failed to retrieve allocations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8: {"errors": [{"status": 404, "title": "Not Found", "detail": "The resource could not be found.\n\n Resource provider '256b11da-7f71-42c0-941c-ea1e909a35f8' not found: No resource provider with uuid 256b11da-7f71-42c0-941c-ea1e909a35f8 found  ", "request_id": "req-b3e0e2e1-4c76-4fb7-8833-3702c509342c"}]}
Oct 11 02:05:40 compute-0 nova_compute[356901]: 2025-10-11 02:05:40.247 2 DEBUG nova.compute.resource_tracker [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Total usable vcpus: 8, total allocated vcpus: 0 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:05:40 compute-0 nova_compute[356901]: 2025-10-11 02:05:40.247 2 DEBUG nova.compute.resource_tracker [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=512MB phys_disk=59GB used_disk=0GB total_vcpus=8 used_vcpus=0 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:05:40 compute-0 nova_compute[356901]: 2025-10-11 02:05:40.380 2 INFO nova.scheduler.client.report [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [req-b949bcdf-a0a0-422b-a66b-ea1fcaf568a2] Created resource provider record via placement API for resource provider with UUID 256b11da-7f71-42c0-941c-ea1e909a35f8 and name compute-0.ctlplane.example.com.
Oct 11 02:05:40 compute-0 nova_compute[356901]: 2025-10-11 02:05:40.403 2 DEBUG oslo_concurrency.processutils [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:05:40 compute-0 sshd-session[357180]: Failed password for root from 121.227.153.123 port 53008 ssh2
Oct 11 02:05:40 compute-0 ceph-mon[191930]: pgmap v824: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:05:40 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2847632310' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:05:40 compute-0 nova_compute[356901]: 2025-10-11 02:05:40.940 2 DEBUG oslo_concurrency.processutils [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.537s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:05:40 compute-0 nova_compute[356901]: 2025-10-11 02:05:40.952 2 DEBUG nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] /sys/module/kvm_amd/parameters/sev contains [N
Oct 11 02:05:40 compute-0 nova_compute[356901]: ] _kernel_supports_amd_sev /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1803
Oct 11 02:05:40 compute-0 nova_compute[356901]: 2025-10-11 02:05:40.953 2 INFO nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] kernel doesn't support AMD SEV
Oct 11 02:05:40 compute-0 nova_compute[356901]: 2025-10-11 02:05:40.954 2 DEBUG nova.compute.provider_tree [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Updating inventory in ProviderTree for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 with inventory: {'MEMORY_MB': {'total': 7680, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0, 'reserved': 512}, 'VCPU': {'total': 8, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0, 'reserved': 0}, 'DISK_GB': {'total': 59, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9, 'reserved': 0}} update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:176
Oct 11 02:05:40 compute-0 nova_compute[356901]: 2025-10-11 02:05:40.955 2 DEBUG nova.virt.libvirt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] CPU mode 'host-model' models '' was chosen, with extra flags: '' _get_guest_cpu_model_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:5396
Oct 11 02:05:41 compute-0 nova_compute[356901]: 2025-10-11 02:05:41.019 2 DEBUG nova.scheduler.client.report [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Updated inventory for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 with generation 0 in Placement from set_inventory_for_provider using data: {'MEMORY_MB': {'total': 7680, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0, 'reserved': 512}, 'VCPU': {'total': 8, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0, 'reserved': 0}, 'DISK_GB': {'total': 59, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9, 'reserved': 0}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:957
Oct 11 02:05:41 compute-0 nova_compute[356901]: 2025-10-11 02:05:41.019 2 DEBUG nova.compute.provider_tree [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Updating resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8 generation from 0 to 1 during operation: update_inventory _update_generation /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:164
Oct 11 02:05:41 compute-0 nova_compute[356901]: 2025-10-11 02:05:41.020 2 DEBUG nova.compute.provider_tree [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Updating inventory in ProviderTree for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 with inventory: {'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:176
Oct 11 02:05:41 compute-0 nova_compute[356901]: 2025-10-11 02:05:41.129 2 DEBUG nova.compute.provider_tree [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Updating resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8 generation from 1 to 2 during operation: update_traits _update_generation /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:164
Oct 11 02:05:41 compute-0 nova_compute[356901]: 2025-10-11 02:05:41.166 2 DEBUG nova.compute.resource_tracker [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:05:41 compute-0 nova_compute[356901]: 2025-10-11 02:05:41.166 2 DEBUG oslo_concurrency.lockutils [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 1.390s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:05:41 compute-0 nova_compute[356901]: 2025-10-11 02:05:41.167 2 DEBUG nova.service [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Creating RPC server for service compute start /usr/lib/python3.9/site-packages/nova/service.py:182
Oct 11 02:05:41 compute-0 nova_compute[356901]: 2025-10-11 02:05:41.275 2 DEBUG nova.service [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Join ServiceGroup membership for this service compute start /usr/lib/python3.9/site-packages/nova/service.py:199
Oct 11 02:05:41 compute-0 nova_compute[356901]: 2025-10-11 02:05:41.276 2 DEBUG nova.servicegroup.drivers.db [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] DB_Driver: join new ServiceGroup member compute-0.ctlplane.example.com to the compute group, service = <Service: host=compute-0.ctlplane.example.com, binary=nova-compute, manager_class_name=nova.compute.manager.ComputeManager> join /usr/lib/python3.9/site-packages/nova/servicegroup/drivers/db.py:44
Oct 11 02:05:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v825: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:41 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2847632310' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:05:42 compute-0 sshd-session[357180]: Connection closed by authenticating user root 121.227.153.123 port 53008 [preauth]
Oct 11 02:05:42 compute-0 podman[357248]: 2025-10-11 02:05:42.228588 +0000 UTC m=+0.106825608 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:05:42 compute-0 podman[357251]: 2025-10-11 02:05:42.237755606 +0000 UTC m=+0.103994528 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, managed_by=edpm_ansible, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:05:42 compute-0 podman[357250]: 2025-10-11 02:05:42.241300154 +0000 UTC m=+0.114771462 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image)
Oct 11 02:05:42 compute-0 podman[357252]: 2025-10-11 02:05:42.256943789 +0000 UTC m=+0.117301839 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, name=ubi9, container_name=kepler, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, config_id=edpm, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, distribution-scope=public, maintainer=Red Hat, Inc., vendor=Red Hat, Inc., version=9.4, release-0.7.12=, architecture=x86_64, io.openshift.tags=base rhel9, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.buildah.version=1.29.0, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, build-date=2024-09-18T21:23:30, io.openshift.expose-services=, release=1214.1726694543, com.redhat.component=ubi9-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, summary=Provides the latest release of Red Hat Universal Base Image 9.)
Oct 11 02:05:42 compute-0 podman[357249]: 2025-10-11 02:05:42.27317349 +0000 UTC m=+0.150180196 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_controller, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, org.label-schema.build-date=20251009, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.license=GPLv2)
Oct 11 02:05:42 compute-0 ceph-mon[191930]: pgmap v825: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:43 compute-0 sshd-session[357348]: Accepted publickey for zuul from 192.168.122.30 port 34916 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 02:05:43 compute-0 systemd-logind[804]: New session 59 of user zuul.
Oct 11 02:05:43 compute-0 systemd[1]: Started Session 59 of User zuul.
Oct 11 02:05:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v826: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:43 compute-0 sshd-session[357348]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 02:05:43 compute-0 unix_chkpwd[357376]: password check failed for user (root)
Oct 11 02:05:43 compute-0 sshd-session[357346]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:05:44 compute-0 python3.9[357502]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 02:05:44 compute-0 ceph-mon[191930]: pgmap v826: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:44 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 02:05:44 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 02:05:44 compute-0 podman[357503]: 2025-10-11 02:05:44.865381141 +0000 UTC m=+0.136642902 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, container_name=multipathd, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_managed=true)
Oct 11 02:05:44 compute-0 podman[357504]: 2025-10-11 02:05:44.866582614 +0000 UTC m=+0.134790423 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, container_name=iscsid, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, tcib_managed=true, config_id=iscsid, managed_by=edpm_ansible, org.label-schema.build-date=20251009, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:05:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:05:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v827: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:45 compute-0 ceph-mon[191930]: pgmap v827: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:46 compute-0 sshd-session[357346]: Failed password for root from 121.227.153.123 port 54958 ssh2
Oct 11 02:05:46 compute-0 sudo[357691]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-htdasozbtchasshdclzcaygflacqfzxs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148345.446872-36-157998761058243/AnsiballZ_systemd_service.py'
Oct 11 02:05:46 compute-0 sudo[357691]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:05:46 compute-0 python3.9[357693]: ansible-ansible.builtin.systemd_service Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 02:05:46 compute-0 systemd[1]: Reloading.
Oct 11 02:05:46 compute-0 sshd-session[357346]: Connection closed by authenticating user root 121.227.153.123 port 54958 [preauth]
Oct 11 02:05:46 compute-0 systemd-rc-local-generator[357723]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:05:46 compute-0 systemd-sysv-generator[357726]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:05:47 compute-0 sudo[357691]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v828: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:48 compute-0 ceph-mon[191930]: pgmap v828: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:48 compute-0 unix_chkpwd[357882]: password check failed for user (root)
Oct 11 02:05:48 compute-0 sshd-session[357730]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:05:48 compute-0 python3.9[357881]: ansible-ansible.builtin.service_facts Invoked
Oct 11 02:05:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v829: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:49 compute-0 network[357900]: You are using 'network' service provided by 'network-scripts', which are now deprecated.
Oct 11 02:05:49 compute-0 network[357901]: 'network-scripts' will be removed from distribution in near future.
Oct 11 02:05:49 compute-0 network[357902]: It is advised to switch to 'NetworkManager' instead for network management.
Oct 11 02:05:49 compute-0 sshd-session[357730]: Failed password for root from 121.227.153.123 port 54974 ssh2
Oct 11 02:05:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:05:50 compute-0 ceph-mon[191930]: pgmap v829: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v830: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:51 compute-0 sshd-session[357730]: Connection closed by authenticating user root 121.227.153.123 port 54974 [preauth]
Oct 11 02:05:52 compute-0 ceph-mon[191930]: pgmap v830: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:53 compute-0 unix_chkpwd[357972]: password check failed for user (root)
Oct 11 02:05:53 compute-0 sshd-session[357939]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:05:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v831: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:53 compute-0 podman[357982]: 2025-10-11 02:05:53.664833943 +0000 UTC m=+0.122029658 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:05:53 compute-0 podman[357983]: 2025-10-11 02:05:53.691674726 +0000 UTC m=+0.146170873 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, url=https://catalog.redhat.com/en/search?searchType=containers, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, name=ubi9-minimal, vcs-type=git, version=9.6, io.openshift.expose-services=, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, architecture=x86_64, managed_by=edpm_ansible, distribution-scope=public, maintainer=Red Hat, Inc., container_name=openstack_network_exporter, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, config_id=edpm, release=1755695350, vendor=Red Hat, Inc., build-date=2025-08-20T13:12:41, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.component=ubi9-minimal-container, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b)
Oct 11 02:05:54 compute-0 ceph-mon[191930]: pgmap v831: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:54 compute-0 sshd-session[357939]: Failed password for root from 121.227.153.123 port 35342 ssh2
Oct 11 02:05:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:05:54.821 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:05:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:05:54.821 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:05:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:05:54.822 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:05:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:05:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v832: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:56 compute-0 ceph-mon[191930]: pgmap v832: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:56 compute-0 sudo[358219]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zixyvcxukqchpmgxsldcusmodzqzbeqn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148355.8584995-55-250052682349646/AnsiballZ_systemd_service.py'
Oct 11 02:05:56 compute-0 sudo[358219]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:05:56
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.log', 'backups', 'default.rgw.meta', 'vms', 'cephfs.cephfs.meta', 'default.rgw.control', 'volumes', '.rgw.root', '.mgr', 'cephfs.cephfs.data', 'images']
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:05:56 compute-0 sshd-session[357939]: Connection closed by authenticating user root 121.227.153.123 port 35342 [preauth]
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:05:56 compute-0 python3.9[358221]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_ceilometer_agent_compute.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:05:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:05:56 compute-0 sudo[358219]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v833: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:58 compute-0 sudo[358374]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cubeuvysqgfthcpdighuhuyhlkshjwxt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148357.2986028-65-239392196280200/AnsiballZ_file.py'
Oct 11 02:05:58 compute-0 sudo[358374]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:05:58 compute-0 unix_chkpwd[358376]: password check failed for user (root)
Oct 11 02:05:58 compute-0 sshd-session[358222]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:05:58 compute-0 python3.9[358377]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_ceilometer_agent_compute.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:05:58 compute-0 sudo[358374]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:58 compute-0 ceph-mon[191930]: pgmap v833: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:59 compute-0 sudo[358545]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zylsbiorcpchqmjwhexwysewlsrnahum ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148358.5660002-73-197752679289663/AnsiballZ_file.py'
Oct 11 02:05:59 compute-0 podman[358501]: 2025-10-11 02:05:59.09528192 +0000 UTC m=+0.117398720 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, org.label-schema.vendor=CentOS)
Oct 11 02:05:59 compute-0 sudo[358545]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:05:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v834: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:05:59 compute-0 python3.9[358547]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_ceilometer_agent_compute.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:05:59 compute-0 sudo[358545]: pam_unix(sudo:session): session closed for user root
Oct 11 02:05:59 compute-0 podman[157119]: time="2025-10-11T02:05:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:05:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:05:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:05:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:05:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8536 "" "Go-http-client/1.1"
Oct 11 02:05:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:06:00 compute-0 sshd-session[358222]: Failed password for root from 121.227.153.123 port 35350 ssh2
Oct 11 02:06:00 compute-0 ceph-mon[191930]: pgmap v834: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:00 compute-0 sudo[358697]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-membkxcsvmxuvcsfpbqvldewcbqppywh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148359.6967812-82-104361436077030/AnsiballZ_command.py'
Oct 11 02:06:00 compute-0 sudo[358697]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:06:00 compute-0 python3.9[358699]: ansible-ansible.legacy.command Invoked with _raw_params=if systemctl is-active certmonger.service; then
                                               systemctl disable --now certmonger.service
                                               test -f /etc/systemd/system/certmonger.service || systemctl mask certmonger.service
                                             fi
                                              _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:06:00 compute-0 sudo[358697]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v835: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:01 compute-0 openstack_network_exporter[159265]: ERROR   02:06:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:06:01 compute-0 openstack_network_exporter[159265]: ERROR   02:06:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:06:01 compute-0 openstack_network_exporter[159265]: ERROR   02:06:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:06:01 compute-0 openstack_network_exporter[159265]: ERROR   02:06:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:06:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:06:01 compute-0 openstack_network_exporter[159265]: ERROR   02:06:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:06:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:06:01 compute-0 sshd-session[358222]: Connection closed by authenticating user root 121.227.153.123 port 35350 [preauth]
Oct 11 02:06:01 compute-0 anacron[26555]: Job `cron.weekly' started
Oct 11 02:06:01 compute-0 anacron[26555]: Job `cron.weekly' terminated
Oct 11 02:06:02 compute-0 ceph-mon[191930]: pgmap v835: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:02 compute-0 python3.9[358855]: ansible-ansible.builtin.find Invoked with file_type=any hidden=True paths=['/var/lib/certmonger/requests'] patterns=[] read_whole_file=False age_stamp=mtime recurse=False follow=False get_checksum=False checksum_algorithm=sha1 use_regex=False exact_mode=True excludes=None contains=None age=None size=None depth=None mode=None encoding=None limit=None
Oct 11 02:06:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v836: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:03 compute-0 unix_chkpwd[358955]: password check failed for user (root)
Oct 11 02:06:03 compute-0 sshd-session[358780]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:06:03 compute-0 sudo[359006]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xpdthqtfqorglsztiaglzrlcldfiqxug ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148363.1789167-100-132371752602290/AnsiballZ_systemd_service.py'
Oct 11 02:06:03 compute-0 sudo[359006]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:06:04 compute-0 python3.9[359008]: ansible-ansible.builtin.systemd_service Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 02:06:04 compute-0 systemd[1]: Reloading.
Oct 11 02:06:04 compute-0 systemd-rc-local-generator[359026]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:06:04 compute-0 systemd-sysv-generator[359035]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:06:04 compute-0 ceph-mon[191930]: pgmap v836: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:04 compute-0 sudo[359006]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:06:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v837: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:05 compute-0 sudo[359193]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-undjjubpalisexnevugperlznmfmyhdu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148364.9907944-108-28570881410132/AnsiballZ_command.py'
Oct 11 02:06:05 compute-0 sudo[359193]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:06:05 compute-0 python3.9[359195]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_ceilometer_agent_compute.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:06:05 compute-0 sudo[359193]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:05 compute-0 sshd-session[358780]: Failed password for root from 121.227.153.123 port 50376 ssh2
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:06:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:06:06 compute-0 ceph-mon[191930]: pgmap v837: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:06 compute-0 sudo[359346]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dywnfjsgiujtrbfhdzfrittzxyybfqix ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148366.1132987-117-117891993869132/AnsiballZ_file.py'
Oct 11 02:06:06 compute-0 sudo[359346]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:06:06 compute-0 sshd-session[358780]: Connection closed by authenticating user root 121.227.153.123 port 50376 [preauth]
Oct 11 02:06:06 compute-0 python3.9[359348]: ansible-ansible.builtin.file Invoked with group=zuul mode=0750 owner=zuul path=/var/lib/openstack/config/telemetry recurse=True setype=container_file_t state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:06:07 compute-0 sudo[359346]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v838: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:08 compute-0 python3.9[359500]: ansible-ansible.builtin.stat Invoked with path=/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:06:08 compute-0 unix_chkpwd[359503]: password check failed for user (root)
Oct 11 02:06:08 compute-0 sshd-session[359353]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:06:08 compute-0 ceph-mon[191930]: pgmap v838: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v839: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:09 compute-0 python3.9[359653]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/ceilometer-host-specific.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:06:10 compute-0 python3.9[359729]: ansible-ansible.legacy.file Invoked with mode=0644 setype=container_file_t dest=/var/lib/openstack/config/telemetry/ceilometer-host-specific.conf _original_basename=ceilometer-host-specific.conf.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry/ceilometer-host-specific.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:06:10 compute-0 sshd-session[359353]: Failed password for root from 121.227.153.123 port 50388 ssh2
Oct 11 02:06:10 compute-0 ceph-mon[191930]: pgmap v839: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:11 compute-0 sudo[359879]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wkofzchnbnrdsciysbbismvzrmrpvasl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148370.552031-145-30553550994692/AnsiballZ_group.py'
Oct 11 02:06:11 compute-0 sudo[359879]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:06:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v840: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:11 compute-0 python3.9[359881]: ansible-ansible.builtin.group Invoked with name=libvirt state=present force=False system=False local=False non_unique=False gid=None gid_min=None gid_max=None
Oct 11 02:06:11 compute-0 sudo[359879]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:11 compute-0 sshd-session[359353]: Connection closed by authenticating user root 121.227.153.123 port 50388 [preauth]
Oct 11 02:06:12 compute-0 ceph-mon[191930]: pgmap v840: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:12 compute-0 podman[360007]: 2025-10-11 02:06:12.712066382 +0000 UTC m=+0.117291299 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:06:12 compute-0 sudo[360110]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mepususlxldkohslabpzjqfknqkytakl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148371.9863818-156-227819323867559/AnsiballZ_getent.py'
Oct 11 02:06:12 compute-0 sudo[360110]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:06:12 compute-0 podman[360008]: 2025-10-11 02:06:12.754474909 +0000 UTC m=+0.157438612 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, container_name=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.build-date=20251009, tcib_managed=true, config_id=ovn_controller, io.buildah.version=1.41.3)
Oct 11 02:06:12 compute-0 podman[360011]: 2025-10-11 02:06:12.755889474 +0000 UTC m=+0.140084839 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, build-date=2024-09-18T21:23:30, name=ubi9, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.4, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, com.redhat.component=ubi9-container, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.openshift.tags=base rhel9, vcs-type=git, container_name=kepler, io.buildah.version=1.29.0, vendor=Red Hat, Inc., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release-0.7.12=, io.k8s.display-name=Red Hat Universal Base Image 9, release=1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., maintainer=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, managed_by=edpm_ansible, io.openshift.expose-services=, distribution-scope=public, config_id=edpm, architecture=x86_64, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f)
Oct 11 02:06:12 compute-0 podman[360010]: 2025-10-11 02:06:12.758149848 +0000 UTC m=+0.149499668 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:06:12 compute-0 podman[360009]: 2025-10-11 02:06:12.778504643 +0000 UTC m=+0.169484929 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, container_name=ceilometer_agent_compute, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']})
Oct 11 02:06:12 compute-0 python3.9[360133]: ansible-ansible.builtin.getent Invoked with database=passwd key=ceilometer fail_key=True service=None split=None
Oct 11 02:06:12 compute-0 sudo[360110]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v841: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:13 compute-0 unix_chkpwd[360186]: password check failed for user (root)
Oct 11 02:06:13 compute-0 sshd-session[359929]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:06:14 compute-0 python3.9[360288]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/ceilometer.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:14 compute-0 ceph-mon[191930]: pgmap v841: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:06:15 compute-0 podman[360292]: 2025-10-11 02:06:15.260898354 +0000 UTC m=+0.140408653 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, tcib_managed=true, config_id=iscsid, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 02:06:15 compute-0 podman[360291]: 2025-10-11 02:06:15.289068611 +0000 UTC m=+0.172898695 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=multipathd, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, container_name=multipathd, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']})
Oct 11 02:06:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v842: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:15 compute-0 sshd-session[359929]: Failed password for root from 121.227.153.123 port 50216 ssh2
Oct 11 02:06:16 compute-0 python3.9[360402]: ansible-ansible.legacy.file Invoked with mode=0640 dest=/var/lib/openstack/config/telemetry/ceilometer.conf _original_basename=ceilometer.conf recurse=False state=file path=/var/lib/openstack/config/telemetry/ceilometer.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:06:16 compute-0 ceph-mon[191930]: pgmap v842: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:17 compute-0 python3.9[360552]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/polling.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:17 compute-0 sshd-session[359929]: Connection closed by authenticating user root 121.227.153.123 port 50216 [preauth]
Oct 11 02:06:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v843: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:18 compute-0 python3.9[360630]: ansible-ansible.legacy.file Invoked with mode=0640 dest=/var/lib/openstack/config/telemetry/polling.yaml _original_basename=polling.yaml recurse=False state=file path=/var/lib/openstack/config/telemetry/polling.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:06:18 compute-0 ceph-mon[191930]: pgmap v843: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:18 compute-0 unix_chkpwd[360655]: password check failed for user (root)
Oct 11 02:06:18 compute-0 sshd-session[360555]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:06:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v844: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:19 compute-0 python3.9[360781]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/custom.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:06:20 compute-0 nova_compute[356901]: 2025-10-11 02:06:20.278 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_power_states run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:06:20 compute-0 nova_compute[356901]: 2025-10-11 02:06:20.308 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_running_deleted_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:06:20 compute-0 python3.9[360858]: ansible-ansible.legacy.file Invoked with mode=0640 dest=/var/lib/openstack/config/telemetry/custom.conf _original_basename=custom.conf recurse=False state=file path=/var/lib/openstack/config/telemetry/custom.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:06:20 compute-0 ceph-mon[191930]: pgmap v844: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:20 compute-0 sshd-session[360555]: Failed password for root from 121.227.153.123 port 50220 ssh2
Oct 11 02:06:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v845: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:21 compute-0 python3.9[361008]: ansible-ansible.builtin.stat Invoked with path=/var/lib/openstack/certs/telemetry/default/tls.crt follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:06:21 compute-0 sshd-session[360555]: Connection closed by authenticating user root 121.227.153.123 port 50220 [preauth]
Oct 11 02:06:22 compute-0 python3.9[361162]: ansible-ansible.builtin.stat Invoked with path=/var/lib/openstack/certs/telemetry/default/tls.key follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:06:22 compute-0 ceph-mon[191930]: pgmap v845: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v846: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:23 compute-0 unix_chkpwd[361315]: password check failed for user (root)
Oct 11 02:06:23 compute-0 sshd-session[361154]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:06:23 compute-0 python3.9[361314]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:24 compute-0 podman[361366]: 2025-10-11 02:06:24.178326441 +0000 UTC m=+0.124780463 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, io.buildah.version=1.33.7, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vcs-type=git, url=https://catalog.redhat.com/en/search?searchType=containers, vendor=Red Hat, Inc., managed_by=edpm_ansible, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, version=9.6, distribution-scope=public, architecture=x86_64, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, maintainer=Red Hat, Inc., container_name=openstack_network_exporter, com.redhat.component=ubi9-minimal-container, io.openshift.expose-services=, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.tags=minimal rhel9, release=1755695350)
Oct 11 02:06:24 compute-0 podman[361365]: 2025-10-11 02:06:24.199203373 +0000 UTC m=+0.155983246 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:06:24 compute-0 python3.9[361423]: ansible-ansible.legacy.file Invoked with mode=420 dest=/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json _original_basename=ceilometer-agent-compute.json.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:06:24 compute-0 ceph-mon[191930]: pgmap v846: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:24 compute-0 sshd-session[361154]: Failed password for root from 121.227.153.123 port 57576 ssh2
Oct 11 02:06:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:06:25 compute-0 sshd-session[361154]: Connection closed by authenticating user root 121.227.153.123 port 57576 [preauth]
Oct 11 02:06:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v847: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:25 compute-0 python3.9[361588]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/ceilometer-host-specific.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:26 compute-0 python3.9[361666]: ansible-ansible.legacy.file Invoked with mode=420 dest=/var/lib/openstack/config/telemetry/ceilometer-host-specific.conf _original_basename=ceilometer-host-specific.conf.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry/ceilometer-host-specific.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:06:26 compute-0 sudo[361667]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:06:26 compute-0 sudo[361667]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:06:26 compute-0 sudo[361667]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:26 compute-0 sudo[361693]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:06:26 compute-0 sudo[361693]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:06:26 compute-0 sudo[361693]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:26 compute-0 sudo[361745]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:06:26 compute-0 sudo[361745]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:06:26 compute-0 sudo[361745]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:26 compute-0 sudo[361801]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:06:26 compute-0 sudo[361801]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:06:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:06:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:06:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:06:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:06:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:06:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:06:26 compute-0 ceph-mon[191930]: pgmap v847: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:26 compute-0 unix_chkpwd[361903]: password check failed for user (root)
Oct 11 02:06:26 compute-0 sshd-session[361612]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:06:27 compute-0 python3.9[361930]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/ceilometer_agent_compute.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:27 compute-0 sudo[361801]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:06:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:06:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:06:27 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:06:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:06:27 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:06:27 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 07dd3c3c-d21a-4e9f-9f97-761a00dadb3c does not exist
Oct 11 02:06:27 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 4e4db5a5-7da7-4993-b8d7-09bac2ea4ee8 does not exist
Oct 11 02:06:27 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 9e1f6a4d-71c7-4e3e-b37f-920c79e0b550 does not exist
Oct 11 02:06:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:06:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:06:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:06:27 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:06:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:06:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:06:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v848: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:27 compute-0 sudo[361992]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:06:27 compute-0 sudo[361992]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:06:27 compute-0 sudo[361992]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:27 compute-0 sudo[362048]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:06:27 compute-0 sudo[362048]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:06:27 compute-0 sudo[362048]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:27 compute-0 sudo[362073]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:06:27 compute-0 python3.9[362047]: ansible-ansible.legacy.file Invoked with mode=420 dest=/var/lib/openstack/config/telemetry/ceilometer_agent_compute.json _original_basename=ceilometer_agent_compute.json.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry/ceilometer_agent_compute.json force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:06:27 compute-0 sudo[362073]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:06:27 compute-0 sudo[362073]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:06:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:06:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:06:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:06:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:06:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:06:27 compute-0 sudo[362098]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:06:27 compute-0 sudo[362098]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:06:28 compute-0 podman[362260]: 2025-10-11 02:06:28.333823983 +0000 UTC m=+0.086046884 container create ff2f1facee3ac86e72e1b9f86ff7dcc75cb035342a2b24b6217f49f0a6f0d6fa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_engelbart, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:06:28 compute-0 podman[362260]: 2025-10-11 02:06:28.296913475 +0000 UTC m=+0.049136456 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:06:28 compute-0 systemd[1]: Started libpod-conmon-ff2f1facee3ac86e72e1b9f86ff7dcc75cb035342a2b24b6217f49f0a6f0d6fa.scope.
Oct 11 02:06:28 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:06:28 compute-0 podman[362260]: 2025-10-11 02:06:28.464930391 +0000 UTC m=+0.217153312 container init ff2f1facee3ac86e72e1b9f86ff7dcc75cb035342a2b24b6217f49f0a6f0d6fa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_engelbart, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.license=GPLv2, io.buildah.version=1.39.3)
Oct 11 02:06:28 compute-0 podman[362260]: 2025-10-11 02:06:28.483043725 +0000 UTC m=+0.235266656 container start ff2f1facee3ac86e72e1b9f86ff7dcc75cb035342a2b24b6217f49f0a6f0d6fa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_engelbart, CEPH_REF=reef, org.label-schema.build-date=20250507, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3)
Oct 11 02:06:28 compute-0 podman[362260]: 2025-10-11 02:06:28.490532686 +0000 UTC m=+0.242755607 container attach ff2f1facee3ac86e72e1b9f86ff7dcc75cb035342a2b24b6217f49f0a6f0d6fa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_engelbart, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, CEPH_REF=reef)
Oct 11 02:06:28 compute-0 zealous_engelbart[362314]: 167 167
Oct 11 02:06:28 compute-0 systemd[1]: libpod-ff2f1facee3ac86e72e1b9f86ff7dcc75cb035342a2b24b6217f49f0a6f0d6fa.scope: Deactivated successfully.
Oct 11 02:06:28 compute-0 podman[362260]: 2025-10-11 02:06:28.494057467 +0000 UTC m=+0.246280369 container died ff2f1facee3ac86e72e1b9f86ff7dcc75cb035342a2b24b6217f49f0a6f0d6fa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_engelbart, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:06:28 compute-0 systemd[1]: var-lib-containers-storage-overlay-9307f3b38c62ba2e15a148b985742156d7f9ccc91ddd08785a994be831cdfc9e-merged.mount: Deactivated successfully.
Oct 11 02:06:28 compute-0 podman[362260]: 2025-10-11 02:06:28.556582232 +0000 UTC m=+0.308805153 container remove ff2f1facee3ac86e72e1b9f86ff7dcc75cb035342a2b24b6217f49f0a6f0d6fa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_engelbart, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True)
Oct 11 02:06:28 compute-0 systemd[1]: libpod-conmon-ff2f1facee3ac86e72e1b9f86ff7dcc75cb035342a2b24b6217f49f0a6f0d6fa.scope: Deactivated successfully.
Oct 11 02:06:28 compute-0 python3.9[362328]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:28 compute-0 ceph-mon[191930]: pgmap v848: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:28 compute-0 podman[362356]: 2025-10-11 02:06:28.84738415 +0000 UTC m=+0.092468896 container create 0f693c8e095671df2e96fcf9ada585d73c545a582480b5350b96afaf7fa28f87 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_ptolemy, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0)
Oct 11 02:06:28 compute-0 podman[362356]: 2025-10-11 02:06:28.815651785 +0000 UTC m=+0.060736611 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:06:28 compute-0 sshd-session[361612]: Failed password for root from 121.227.153.123 port 57582 ssh2
Oct 11 02:06:28 compute-0 systemd[1]: Started libpod-conmon-0f693c8e095671df2e96fcf9ada585d73c545a582480b5350b96afaf7fa28f87.scope.
Oct 11 02:06:28 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:06:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6023118e600a87f5657ea3eef6d6d381a0cf38b0a483c61f51d093772e32dde2/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:06:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6023118e600a87f5657ea3eef6d6d381a0cf38b0a483c61f51d093772e32dde2/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:06:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6023118e600a87f5657ea3eef6d6d381a0cf38b0a483c61f51d093772e32dde2/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:06:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6023118e600a87f5657ea3eef6d6d381a0cf38b0a483c61f51d093772e32dde2/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:06:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6023118e600a87f5657ea3eef6d6d381a0cf38b0a483c61f51d093772e32dde2/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:06:29 compute-0 podman[362356]: 2025-10-11 02:06:29.007804357 +0000 UTC m=+0.252889103 container init 0f693c8e095671df2e96fcf9ada585d73c545a582480b5350b96afaf7fa28f87 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_ptolemy, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:06:29 compute-0 podman[362356]: 2025-10-11 02:06:29.028554016 +0000 UTC m=+0.273638742 container start 0f693c8e095671df2e96fcf9ada585d73c545a582480b5350b96afaf7fa28f87 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_ptolemy, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 02:06:29 compute-0 podman[362356]: 2025-10-11 02:06:29.034063618 +0000 UTC m=+0.279148424 container attach 0f693c8e095671df2e96fcf9ada585d73c545a582480b5350b96afaf7fa28f87 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_ptolemy, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:06:29 compute-0 python3.9[362445]: ansible-ansible.legacy.file Invoked with mode=420 dest=/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml _original_basename=ceilometer_prom_exporter.yaml.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:06:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v849: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:29 compute-0 podman[362446]: 2025-10-11 02:06:29.520004874 +0000 UTC m=+0.125124324 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.3, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:06:29 compute-0 podman[157119]: time="2025-10-11T02:06:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:06:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:06:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46741 "" "Go-http-client/1.1"
Oct 11 02:06:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:06:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8949 "" "Go-http-client/1.1"
Oct 11 02:06:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:06:30 compute-0 sshd-session[361612]: Connection closed by authenticating user root 121.227.153.123 port 57582 [preauth]
Oct 11 02:06:30 compute-0 thirsty_ptolemy[362413]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:06:30 compute-0 thirsty_ptolemy[362413]: --> relative data size: 1.0
Oct 11 02:06:30 compute-0 thirsty_ptolemy[362413]: --> All data devices are unavailable
Oct 11 02:06:30 compute-0 systemd[1]: libpod-0f693c8e095671df2e96fcf9ada585d73c545a582480b5350b96afaf7fa28f87.scope: Deactivated successfully.
Oct 11 02:06:30 compute-0 podman[362356]: 2025-10-11 02:06:30.426069102 +0000 UTC m=+1.671153898 container died 0f693c8e095671df2e96fcf9ada585d73c545a582480b5350b96afaf7fa28f87 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_ptolemy, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default)
Oct 11 02:06:30 compute-0 systemd[1]: libpod-0f693c8e095671df2e96fcf9ada585d73c545a582480b5350b96afaf7fa28f87.scope: Consumed 1.332s CPU time.
Oct 11 02:06:30 compute-0 systemd[1]: var-lib-containers-storage-overlay-6023118e600a87f5657ea3eef6d6d381a0cf38b0a483c61f51d093772e32dde2-merged.mount: Deactivated successfully.
Oct 11 02:06:30 compute-0 podman[362356]: 2025-10-11 02:06:30.550069287 +0000 UTC m=+1.795154063 container remove 0f693c8e095671df2e96fcf9ada585d73c545a582480b5350b96afaf7fa28f87 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_ptolemy, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:06:30 compute-0 systemd[1]: libpod-conmon-0f693c8e095671df2e96fcf9ada585d73c545a582480b5350b96afaf7fa28f87.scope: Deactivated successfully.
Oct 11 02:06:30 compute-0 sudo[362098]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:30 compute-0 ceph-mon[191930]: pgmap v849: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:30 compute-0 sudo[362602]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:06:30 compute-0 sudo[362602]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:06:30 compute-0 sudo[362602]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:30 compute-0 sudo[362657]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:06:30 compute-0 sudo[362657]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:06:30 compute-0 sudo[362657]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:31 compute-0 sudo[362703]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:06:31 compute-0 sudo[362703]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:06:31 compute-0 sudo[362703]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:31 compute-0 python3.9[362700]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/firewall.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:31 compute-0 sudo[362728]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:06:31 compute-0 sudo[362728]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:06:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v850: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:31 compute-0 openstack_network_exporter[159265]: ERROR   02:06:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:06:31 compute-0 openstack_network_exporter[159265]: ERROR   02:06:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:06:31 compute-0 openstack_network_exporter[159265]: ERROR   02:06:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:06:31 compute-0 openstack_network_exporter[159265]: ERROR   02:06:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:06:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:06:31 compute-0 openstack_network_exporter[159265]: ERROR   02:06:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:06:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:06:31 compute-0 podman[362870]: 2025-10-11 02:06:31.76831087 +0000 UTC m=+0.084518735 container create 5b65a312069fdb103ac8b2ca76efce5e8434c97ebef60df2ef6b3bd8b262c453 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_aryabhata, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:06:31 compute-0 python3.9[362862]: ansible-ansible.legacy.file Invoked with mode=420 dest=/var/lib/openstack/config/telemetry/firewall.yaml _original_basename=firewall.yaml.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry/firewall.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:06:31 compute-0 unix_chkpwd[362884]: password check failed for user (root)
Oct 11 02:06:31 compute-0 sshd-session[362557]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:06:31 compute-0 podman[362870]: 2025-10-11 02:06:31.736165501 +0000 UTC m=+0.052373426 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:06:31 compute-0 systemd[1]: Started libpod-conmon-5b65a312069fdb103ac8b2ca76efce5e8434c97ebef60df2ef6b3bd8b262c453.scope.
Oct 11 02:06:31 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:06:31 compute-0 podman[362870]: 2025-10-11 02:06:31.939148094 +0000 UTC m=+0.255355979 container init 5b65a312069fdb103ac8b2ca76efce5e8434c97ebef60df2ef6b3bd8b262c453 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_aryabhata, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, OSD_FLAVOR=default, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.license=GPLv2)
Oct 11 02:06:31 compute-0 podman[362870]: 2025-10-11 02:06:31.957942098 +0000 UTC m=+0.274149963 container start 5b65a312069fdb103ac8b2ca76efce5e8434c97ebef60df2ef6b3bd8b262c453 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_aryabhata, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3)
Oct 11 02:06:31 compute-0 podman[362870]: 2025-10-11 02:06:31.962820398 +0000 UTC m=+0.279028263 container attach 5b65a312069fdb103ac8b2ca76efce5e8434c97ebef60df2ef6b3bd8b262c453 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_aryabhata, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507)
Oct 11 02:06:31 compute-0 priceless_aryabhata[362887]: 167 167
Oct 11 02:06:31 compute-0 systemd[1]: libpod-5b65a312069fdb103ac8b2ca76efce5e8434c97ebef60df2ef6b3bd8b262c453.scope: Deactivated successfully.
Oct 11 02:06:31 compute-0 podman[362870]: 2025-10-11 02:06:31.970339874 +0000 UTC m=+0.286547739 container died 5b65a312069fdb103ac8b2ca76efce5e8434c97ebef60df2ef6b3bd8b262c453 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_aryabhata, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3)
Oct 11 02:06:32 compute-0 systemd[1]: var-lib-containers-storage-overlay-5fd48849de97f9f2b37697be42cd8f083844def1fdc1cbd2b625fb8ba11cbcea-merged.mount: Deactivated successfully.
Oct 11 02:06:32 compute-0 podman[362870]: 2025-10-11 02:06:32.023723397 +0000 UTC m=+0.339931262 container remove 5b65a312069fdb103ac8b2ca76efce5e8434c97ebef60df2ef6b3bd8b262c453 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_aryabhata, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS)
Oct 11 02:06:32 compute-0 systemd[1]: libpod-conmon-5b65a312069fdb103ac8b2ca76efce5e8434c97ebef60df2ef6b3bd8b262c453.scope: Deactivated successfully.
Oct 11 02:06:32 compute-0 podman[362934]: 2025-10-11 02:06:32.353739789 +0000 UTC m=+0.126640942 container create 7f73333e50fb59851ec126b8488da55e272f72bb279025580342de85ec1d565f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_moser, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:06:32 compute-0 podman[362934]: 2025-10-11 02:06:32.296003518 +0000 UTC m=+0.068904721 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:06:32 compute-0 systemd[1]: Started libpod-conmon-7f73333e50fb59851ec126b8488da55e272f72bb279025580342de85ec1d565f.scope.
Oct 11 02:06:32 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:06:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/aae16cfa1391346a9389a601f0c5b270704f66dd2f9b0c1d46f4e2edb98d4b05/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:06:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/aae16cfa1391346a9389a601f0c5b270704f66dd2f9b0c1d46f4e2edb98d4b05/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:06:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/aae16cfa1391346a9389a601f0c5b270704f66dd2f9b0c1d46f4e2edb98d4b05/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:06:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/aae16cfa1391346a9389a601f0c5b270704f66dd2f9b0c1d46f4e2edb98d4b05/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:06:32 compute-0 podman[362934]: 2025-10-11 02:06:32.514116078 +0000 UTC m=+0.287017281 container init 7f73333e50fb59851ec126b8488da55e272f72bb279025580342de85ec1d565f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_moser, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:06:32 compute-0 podman[362934]: 2025-10-11 02:06:32.545119265 +0000 UTC m=+0.318020388 container start 7f73333e50fb59851ec126b8488da55e272f72bb279025580342de85ec1d565f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_moser, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:06:32 compute-0 podman[362934]: 2025-10-11 02:06:32.550507195 +0000 UTC m=+0.323408398 container attach 7f73333e50fb59851ec126b8488da55e272f72bb279025580342de85ec1d565f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_moser, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:06:32 compute-0 ceph-mon[191930]: pgmap v850: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v851: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:33 compute-0 vigorous_moser[362951]: {
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:     "0": [
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:         {
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "devices": [
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "/dev/loop3"
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             ],
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "lv_name": "ceph_lv0",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "lv_size": "21470642176",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "name": "ceph_lv0",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "tags": {
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.cluster_name": "ceph",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.crush_device_class": "",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.encrypted": "0",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.osd_id": "0",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.type": "block",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.vdo": "0"
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             },
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "type": "block",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "vg_name": "ceph_vg0"
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:         }
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:     ],
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:     "1": [
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:         {
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "devices": [
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "/dev/loop4"
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             ],
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "lv_name": "ceph_lv1",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "lv_size": "21470642176",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "name": "ceph_lv1",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "tags": {
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.cluster_name": "ceph",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.crush_device_class": "",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.encrypted": "0",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.osd_id": "1",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.type": "block",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.vdo": "0"
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             },
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "type": "block",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "vg_name": "ceph_vg1"
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:         }
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:     ],
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:     "2": [
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:         {
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "devices": [
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "/dev/loop5"
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             ],
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "lv_name": "ceph_lv2",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "lv_size": "21470642176",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "name": "ceph_lv2",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "tags": {
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.cluster_name": "ceph",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.crush_device_class": "",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.encrypted": "0",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.osd_id": "2",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.type": "block",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:                 "ceph.vdo": "0"
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             },
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "type": "block",
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:             "vg_name": "ceph_vg2"
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:         }
Oct 11 02:06:33 compute-0 vigorous_moser[362951]:     ]
Oct 11 02:06:33 compute-0 vigorous_moser[362951]: }
Oct 11 02:06:33 compute-0 systemd[1]: libpod-7f73333e50fb59851ec126b8488da55e272f72bb279025580342de85ec1d565f.scope: Deactivated successfully.
Oct 11 02:06:33 compute-0 podman[362934]: 2025-10-11 02:06:33.44470765 +0000 UTC m=+1.217608803 container died 7f73333e50fb59851ec126b8488da55e272f72bb279025580342de85ec1d565f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_moser, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:06:33 compute-0 systemd[1]: var-lib-containers-storage-overlay-aae16cfa1391346a9389a601f0c5b270704f66dd2f9b0c1d46f4e2edb98d4b05-merged.mount: Deactivated successfully.
Oct 11 02:06:33 compute-0 podman[362934]: 2025-10-11 02:06:33.555200474 +0000 UTC m=+1.328101597 container remove 7f73333e50fb59851ec126b8488da55e272f72bb279025580342de85ec1d565f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_moser, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 02:06:33 compute-0 systemd[1]: libpod-conmon-7f73333e50fb59851ec126b8488da55e272f72bb279025580342de85ec1d565f.scope: Deactivated successfully.
Oct 11 02:06:33 compute-0 sudo[362728]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:33 compute-0 sshd-session[362557]: Failed password for root from 121.227.153.123 port 46244 ssh2
Oct 11 02:06:33 compute-0 python3.9[363086]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/node_exporter.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:33 compute-0 sudo[363098]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:06:33 compute-0 sudo[363098]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:06:33 compute-0 sudo[363098]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:33 compute-0 sudo[363125]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:06:33 compute-0 sudo[363125]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:06:33 compute-0 sudo[363125]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:34 compute-0 sudo[363173]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:06:34 compute-0 sudo[363173]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:06:34 compute-0 sudo[363173]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:34 compute-0 sudo[363220]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:06:34 compute-0 sudo[363220]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:06:34 compute-0 python3.9[363273]: ansible-ansible.legacy.file Invoked with mode=420 dest=/var/lib/openstack/config/telemetry/node_exporter.json _original_basename=node_exporter.json.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry/node_exporter.json force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:06:34 compute-0 podman[363342]: 2025-10-11 02:06:34.678329676 +0000 UTC m=+0.073265900 container create 3aecc2f7e5e1c3f46aef1d04fa8645bde5c0ee0fb48b29db299232bcdd679fec (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_feistel, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS)
Oct 11 02:06:34 compute-0 ceph-mon[191930]: pgmap v851: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:34 compute-0 systemd[1]: Started libpod-conmon-3aecc2f7e5e1c3f46aef1d04fa8645bde5c0ee0fb48b29db299232bcdd679fec.scope.
Oct 11 02:06:34 compute-0 podman[363342]: 2025-10-11 02:06:34.651959316 +0000 UTC m=+0.046895530 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:06:34 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:06:34 compute-0 podman[363342]: 2025-10-11 02:06:34.828726326 +0000 UTC m=+0.223662560 container init 3aecc2f7e5e1c3f46aef1d04fa8645bde5c0ee0fb48b29db299232bcdd679fec (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_feistel, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:06:34 compute-0 podman[363342]: 2025-10-11 02:06:34.841727788 +0000 UTC m=+0.236664022 container start 3aecc2f7e5e1c3f46aef1d04fa8645bde5c0ee0fb48b29db299232bcdd679fec (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_feistel, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True)
Oct 11 02:06:34 compute-0 podman[363342]: 2025-10-11 02:06:34.848665872 +0000 UTC m=+0.243602086 container attach 3aecc2f7e5e1c3f46aef1d04fa8645bde5c0ee0fb48b29db299232bcdd679fec (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_feistel, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.schema-version=1.0)
Oct 11 02:06:34 compute-0 compassionate_feistel[363396]: 167 167
Oct 11 02:06:34 compute-0 systemd[1]: libpod-3aecc2f7e5e1c3f46aef1d04fa8645bde5c0ee0fb48b29db299232bcdd679fec.scope: Deactivated successfully.
Oct 11 02:06:34 compute-0 podman[363342]: 2025-10-11 02:06:34.859530768 +0000 UTC m=+0.254466982 container died 3aecc2f7e5e1c3f46aef1d04fa8645bde5c0ee0fb48b29db299232bcdd679fec (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_feistel, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef)
Oct 11 02:06:34 compute-0 systemd[1]: var-lib-containers-storage-overlay-f01bcffffcc324795fc0440b5f5006bcec71fdafe0ffaa599d92fa0d237cb098-merged.mount: Deactivated successfully.
Oct 11 02:06:34 compute-0 podman[363342]: 2025-10-11 02:06:34.926082433 +0000 UTC m=+0.321018657 container remove 3aecc2f7e5e1c3f46aef1d04fa8645bde5c0ee0fb48b29db299232bcdd679fec (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_feistel, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0)
Oct 11 02:06:34 compute-0 systemd[1]: libpod-conmon-3aecc2f7e5e1c3f46aef1d04fa8645bde5c0ee0fb48b29db299232bcdd679fec.scope: Deactivated successfully.
Oct 11 02:06:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:06:35 compute-0 sshd-session[362557]: Connection closed by authenticating user root 121.227.153.123 port 46244 [preauth]
Oct 11 02:06:35 compute-0 podman[363480]: 2025-10-11 02:06:35.196635518 +0000 UTC m=+0.085269916 container create cd54d1f9503e5bf2eaf7903bec050474b540e7c60d06ba80c6e31421051f3ae6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=relaxed_hofstadter, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:06:35 compute-0 podman[363480]: 2025-10-11 02:06:35.156979786 +0000 UTC m=+0.045614284 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:06:35 compute-0 systemd[1]: Started libpod-conmon-cd54d1f9503e5bf2eaf7903bec050474b540e7c60d06ba80c6e31421051f3ae6.scope.
Oct 11 02:06:35 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:06:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/98ece39c73d2f6c3d0525e9564e9e850a769085661387a14308313f4a3c88c20/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:06:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/98ece39c73d2f6c3d0525e9564e9e850a769085661387a14308313f4a3c88c20/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:06:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/98ece39c73d2f6c3d0525e9564e9e850a769085661387a14308313f4a3c88c20/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:06:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/98ece39c73d2f6c3d0525e9564e9e850a769085661387a14308313f4a3c88c20/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:06:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v852: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:35 compute-0 podman[363480]: 2025-10-11 02:06:35.341062145 +0000 UTC m=+0.229696563 container init cd54d1f9503e5bf2eaf7903bec050474b540e7c60d06ba80c6e31421051f3ae6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=relaxed_hofstadter, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:06:35 compute-0 podman[363480]: 2025-10-11 02:06:35.364324907 +0000 UTC m=+0.252959315 container start cd54d1f9503e5bf2eaf7903bec050474b540e7c60d06ba80c6e31421051f3ae6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=relaxed_hofstadter, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:06:35 compute-0 podman[363480]: 2025-10-11 02:06:35.369718218 +0000 UTC m=+0.258352656 container attach cd54d1f9503e5bf2eaf7903bec050474b540e7c60d06ba80c6e31421051f3ae6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=relaxed_hofstadter, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2)
Oct 11 02:06:35 compute-0 python3.9[363513]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/node_exporter.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:36 compute-0 python3.9[363599]: ansible-ansible.legacy.file Invoked with mode=420 dest=/var/lib/openstack/config/telemetry/node_exporter.yaml _original_basename=node_exporter.yaml.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry/node_exporter.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]: {
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:         "osd_id": 1,
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:         "type": "bluestore"
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:     },
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:         "osd_id": 2,
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:         "type": "bluestore"
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:     },
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:         "osd_id": 0,
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:         "type": "bluestore"
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]:     }
Oct 11 02:06:36 compute-0 relaxed_hofstadter[363517]: }
Oct 11 02:06:36 compute-0 systemd[1]: libpod-cd54d1f9503e5bf2eaf7903bec050474b540e7c60d06ba80c6e31421051f3ae6.scope: Deactivated successfully.
Oct 11 02:06:36 compute-0 systemd[1]: libpod-cd54d1f9503e5bf2eaf7903bec050474b540e7c60d06ba80c6e31421051f3ae6.scope: Consumed 1.186s CPU time.
Oct 11 02:06:36 compute-0 podman[363480]: 2025-10-11 02:06:36.553967478 +0000 UTC m=+1.442601896 container died cd54d1f9503e5bf2eaf7903bec050474b540e7c60d06ba80c6e31421051f3ae6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=relaxed_hofstadter, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True)
Oct 11 02:06:36 compute-0 systemd[1]: var-lib-containers-storage-overlay-98ece39c73d2f6c3d0525e9564e9e850a769085661387a14308313f4a3c88c20-merged.mount: Deactivated successfully.
Oct 11 02:06:36 compute-0 podman[363480]: 2025-10-11 02:06:36.669921865 +0000 UTC m=+1.558556263 container remove cd54d1f9503e5bf2eaf7903bec050474b540e7c60d06ba80c6e31421051f3ae6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=relaxed_hofstadter, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:06:36 compute-0 unix_chkpwd[363764]: password check failed for user (root)
Oct 11 02:06:36 compute-0 sshd-session[363522]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:06:36 compute-0 systemd[1]: libpod-conmon-cd54d1f9503e5bf2eaf7903bec050474b540e7c60d06ba80c6e31421051f3ae6.scope: Deactivated successfully.
Oct 11 02:06:36 compute-0 sudo[363220]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:06:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:06:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:06:36 compute-0 ceph-mon[191930]: pgmap v852: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:06:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:06:36 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 2fd96cd6-2274-442f-8e1c-2ddbbf55a81b does not exist
Oct 11 02:06:36 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev bac9f4fe-26a0-4c87-bb69-5c8699277ce1 does not exist
Oct 11 02:06:36 compute-0 sudo[363789]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:06:36 compute-0 sudo[363789]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:06:36 compute-0 sudo[363789]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:36 compute-0 nova_compute[356901]: 2025-10-11 02:06:36.900 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:06:36 compute-0 nova_compute[356901]: 2025-10-11 02:06:36.903 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:06:36 compute-0 nova_compute[356901]: 2025-10-11 02:06:36.904 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:06:36 compute-0 nova_compute[356901]: 2025-10-11 02:06:36.904 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:06:36 compute-0 nova_compute[356901]: 2025-10-11 02:06:36.926 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944
Oct 11 02:06:36 compute-0 nova_compute[356901]: 2025-10-11 02:06:36.927 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:06:36 compute-0 nova_compute[356901]: 2025-10-11 02:06:36.928 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:06:36 compute-0 nova_compute[356901]: 2025-10-11 02:06:36.929 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:06:36 compute-0 nova_compute[356901]: 2025-10-11 02:06:36.929 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:06:36 compute-0 nova_compute[356901]: 2025-10-11 02:06:36.930 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:06:36 compute-0 nova_compute[356901]: 2025-10-11 02:06:36.931 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:06:36 compute-0 nova_compute[356901]: 2025-10-11 02:06:36.931 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:06:36 compute-0 nova_compute[356901]: 2025-10-11 02:06:36.932 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:06:36 compute-0 nova_compute[356901]: 2025-10-11 02:06:36.966 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:06:36 compute-0 nova_compute[356901]: 2025-10-11 02:06:36.967 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:06:36 compute-0 nova_compute[356901]: 2025-10-11 02:06:36.967 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:06:36 compute-0 nova_compute[356901]: 2025-10-11 02:06:36.968 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:06:36 compute-0 nova_compute[356901]: 2025-10-11 02:06:36.968 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:06:36 compute-0 sudo[363815]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:06:36 compute-0 sudo[363815]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:06:37 compute-0 sudo[363815]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:37 compute-0 python3.9[363791]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/openstack_network_exporter.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v853: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:37 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:06:37 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3378154148' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:06:37 compute-0 nova_compute[356901]: 2025-10-11 02:06:37.503 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.534s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:06:37 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:06:37 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3378154148' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:06:37 compute-0 python3.9[363935]: ansible-ansible.legacy.file Invoked with mode=420 dest=/var/lib/openstack/config/telemetry/openstack_network_exporter.json _original_basename=openstack_network_exporter.json.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry/openstack_network_exporter.json force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:06:37 compute-0 nova_compute[356901]: 2025-10-11 02:06:37.998 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:06:38 compute-0 nova_compute[356901]: 2025-10-11 02:06:38.000 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=4534MB free_disk=59.98828125GB free_vcpus=8 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:06:38 compute-0 nova_compute[356901]: 2025-10-11 02:06:38.000 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:06:38 compute-0 nova_compute[356901]: 2025-10-11 02:06:38.001 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:06:38 compute-0 nova_compute[356901]: 2025-10-11 02:06:38.067 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 0 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:06:38 compute-0 nova_compute[356901]: 2025-10-11 02:06:38.068 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=512MB phys_disk=59GB used_disk=0GB total_vcpus=8 used_vcpus=0 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:06:38 compute-0 nova_compute[356901]: 2025-10-11 02:06:38.087 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:06:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:06:38 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1240580832' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:06:38 compute-0 nova_compute[356901]: 2025-10-11 02:06:38.561 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.474s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:06:38 compute-0 nova_compute[356901]: 2025-10-11 02:06:38.575 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:06:38 compute-0 nova_compute[356901]: 2025-10-11 02:06:38.598 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:06:38 compute-0 nova_compute[356901]: 2025-10-11 02:06:38.602 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:06:38 compute-0 nova_compute[356901]: 2025-10-11 02:06:38.604 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.603s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:06:38 compute-0 ceph-mon[191930]: pgmap v853: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:38 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1240580832' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:06:38 compute-0 python3.9[364109]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:38 compute-0 sshd-session[363522]: Failed password for root from 121.227.153.123 port 46246 ssh2
Oct 11 02:06:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v854: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:39 compute-0 python3.9[364185]: ansible-ansible.legacy.file Invoked with mode=420 dest=/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml _original_basename=openstack_network_exporter.yaml.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:06:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:06:40 compute-0 sshd-session[363522]: Connection closed by authenticating user root 121.227.153.123 port 46246 [preauth]
Oct 11 02:06:40 compute-0 python3.9[364337]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/podman_exporter.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:40 compute-0 ceph-mon[191930]: pgmap v854: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:41 compute-0 python3.9[364413]: ansible-ansible.legacy.file Invoked with mode=420 dest=/var/lib/openstack/config/telemetry/podman_exporter.json _original_basename=podman_exporter.json.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry/podman_exporter.json force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:06:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v855: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:41 compute-0 unix_chkpwd[364461]: password check failed for user (root)
Oct 11 02:06:41 compute-0 sshd-session[364309]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:06:42 compute-0 python3.9[364564]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/podman_exporter.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:42 compute-0 ceph-mon[191930]: pgmap v855: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:43 compute-0 python3.9[364640]: ansible-ansible.legacy.file Invoked with mode=420 dest=/var/lib/openstack/config/telemetry/podman_exporter.yaml _original_basename=podman_exporter.yaml.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry/podman_exporter.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:06:43 compute-0 podman[364641]: 2025-10-11 02:06:43.259276001 +0000 UTC m=+0.135219104 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 02:06:43 compute-0 podman[364643]: 2025-10-11 02:06:43.274059028 +0000 UTC m=+0.140514728 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.build-date=20251007)
Oct 11 02:06:43 compute-0 podman[364644]: 2025-10-11 02:06:43.28609517 +0000 UTC m=+0.142195124 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_managed=true, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.schema-version=1.0)
Oct 11 02:06:43 compute-0 podman[364645]: 2025-10-11 02:06:43.306204186 +0000 UTC m=+0.156257284 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, build-date=2024-09-18T21:23:30, io.openshift.tags=base rhel9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.k8s.display-name=Red Hat Universal Base Image 9, distribution-scope=public, release-0.7.12=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, config_id=edpm, architecture=x86_64, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, container_name=kepler, release=1214.1726694543, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, summary=Provides the latest release of Red Hat Universal Base Image 9., vendor=Red Hat, Inc., maintainer=Red Hat, Inc., com.redhat.component=ubi9-container, io.openshift.expose-services=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.4, io.buildah.version=1.29.0, managed_by=edpm_ansible, name=ubi9)
Oct 11 02:06:43 compute-0 podman[364642]: 2025-10-11 02:06:43.3071315 +0000 UTC m=+0.182756227 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, container_name=ovn_controller, tcib_managed=true)
Oct 11 02:06:43 compute-0 rsyslogd[187706]: imjournal from <compute-0:podman>: begin to drop messages due to rate-limiting
Oct 11 02:06:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v856: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:43 compute-0 sshd-session[364309]: Failed password for root from 121.227.153.123 port 49902 ssh2
Oct 11 02:06:44 compute-0 python3.9[364889]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/node_exporter.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:44 compute-0 ceph-mon[191930]: pgmap v856: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:44 compute-0 sshd-session[364309]: Connection closed by authenticating user root 121.227.153.123 port 49902 [preauth]
Oct 11 02:06:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:06:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v857: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:45 compute-0 python3.9[364967]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/openstack/config/telemetry/node_exporter.yaml _original_basename=node_exporter.yaml.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry/node_exporter.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:06:45 compute-0 podman[364969]: 2025-10-11 02:06:45.753726651 +0000 UTC m=+0.153581133 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=iscsid, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, io.buildah.version=1.41.3, tcib_managed=true, config_id=iscsid, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:06:45 compute-0 podman[364968]: 2025-10-11 02:06:45.770527742 +0000 UTC m=+0.175821043 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.vendor=CentOS, config_id=multipathd, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:06:46 compute-0 unix_chkpwd[365130]: password check failed for user (root)
Oct 11 02:06:46 compute-0 sshd-session[364939]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:06:46 compute-0 python3.9[365157]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/podman_exporter.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:46 compute-0 ceph-mon[191930]: pgmap v857: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v858: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:48 compute-0 python3.9[365233]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/openstack/config/telemetry/podman_exporter.yaml _original_basename=podman_exporter.yaml.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry/podman_exporter.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:06:48 compute-0 sshd-session[364939]: Failed password for root from 121.227.153.123 port 49912 ssh2
Oct 11 02:06:48 compute-0 ceph-mon[191930]: pgmap v858: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:49 compute-0 python3.9[365383]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v859: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:49 compute-0 sshd-session[364939]: Connection closed by authenticating user root 121.227.153.123 port 49912 [preauth]
Oct 11 02:06:49 compute-0 python3.9[365460]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml _original_basename=ceilometer_prom_exporter.yaml.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:06:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:06:50 compute-0 sudo[365612]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xdlcoxwpssbbmkiipvkzrbnyyeddedje ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148410.1774497-393-179737111749442/AnsiballZ_file.py'
Oct 11 02:06:50 compute-0 sudo[365612]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:06:50 compute-0 ceph-mon[191930]: pgmap v859: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:51 compute-0 python3.9[365614]: ansible-ansible.builtin.file Invoked with group=ceilometer mode=0644 owner=ceilometer path=/var/lib/openstack/certs/telemetry/default/tls.crt recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False state=None _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:06:51 compute-0 sudo[365612]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:51 compute-0 unix_chkpwd[365619]: password check failed for user (root)
Oct 11 02:06:51 compute-0 sshd-session[365461]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:06:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v860: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:52 compute-0 sudo[365765]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tqiuyverakbtjbqldhaczhlkceuzbxcl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148411.3892345-401-171316293983320/AnsiballZ_file.py'
Oct 11 02:06:52 compute-0 sudo[365765]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:06:52 compute-0 python3.9[365767]: ansible-ansible.builtin.file Invoked with group=ceilometer mode=0644 owner=ceilometer path=/var/lib/openstack/certs/telemetry/default/tls.key recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False state=None _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:06:52 compute-0 sudo[365765]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:52 compute-0 ceph-mon[191930]: pgmap v860: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:53 compute-0 sudo[365917]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mcstqpydryakerhjhejvmvohqifnrjzo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148412.5351574-409-9354854913479/AnsiballZ_file.py'
Oct 11 02:06:53 compute-0 sudo[365917]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:06:53 compute-0 python3.9[365919]: ansible-ansible.builtin.file Invoked with group=zuul mode=0755 owner=zuul path=/var/lib/openstack/healthchecks setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:06:53 compute-0 sudo[365917]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v861: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:53 compute-0 sshd-session[365461]: Failed password for root from 121.227.153.123 port 34310 ssh2
Oct 11 02:06:53 compute-0 ceph-mon[191930]: pgmap v861: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:54 compute-0 sudo[366069]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-djhjzpobpntdimoaljiwyerzoczxrwra ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148413.6181338-417-68207387213936/AnsiballZ_systemd_service.py'
Oct 11 02:06:54 compute-0 sudo[366069]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:06:54 compute-0 podman[366071]: 2025-10-11 02:06:54.432373053 +0000 UTC m=+0.136910643 container health_status adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:06:54 compute-0 podman[366072]: 2025-10-11 02:06:54.432505286 +0000 UTC m=+0.136051951 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, distribution-scope=public, com.redhat.component=ubi9-minimal-container, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.tags=minimal rhel9, container_name=openstack_network_exporter, maintainer=Red Hat, Inc., url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1755695350, vendor=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, name=ubi9-minimal, version=9.6, architecture=x86_64, config_id=edpm, vcs-type=git, build-date=2025-08-20T13:12:41, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.expose-services=, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.buildah.version=1.33.7)
Oct 11 02:06:54 compute-0 sshd-session[365461]: Connection closed by authenticating user root 121.227.153.123 port 34310 [preauth]
Oct 11 02:06:54 compute-0 python3.9[366073]: ansible-ansible.builtin.systemd_service Invoked with enabled=True name=podman.socket state=started daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:06:54 compute-0 sudo[366069]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:06:54.822 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:06:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:06:54.823 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:06:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:06:54.823 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:06:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:06:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v862: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:55 compute-0 sudo[366268]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ksdlnkyyhnwmltaqahmrygxijahgtggf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148415.1470816-426-211290040159572/AnsiballZ_stat.py'
Oct 11 02:06:55 compute-0 sudo[366268]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:06:55 compute-0 unix_chkpwd[366271]: password check failed for user (root)
Oct 11 02:06:55 compute-0 sshd-session[366115]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:06:55 compute-0 python3.9[366270]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/ceilometer_agent_compute/healthcheck follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:55 compute-0 sudo[366268]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:56 compute-0 sudo[366347]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jwmslbaybhafycudfzazhoblubcaspwi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148415.1470816-426-211290040159572/AnsiballZ_file.py'
Oct 11 02:06:56 compute-0 sudo[366347]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:06:56 compute-0 ceph-mon[191930]: pgmap v862: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:06:56
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['.rgw.root', 'default.rgw.control', 'default.rgw.meta', 'images', 'cephfs.cephfs.meta', 'vms', 'cephfs.cephfs.data', '.mgr', 'default.rgw.log', 'backups', 'volumes']
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:06:56 compute-0 python3.9[366349]: ansible-ansible.legacy.file Invoked with group=zuul mode=0700 owner=zuul setype=container_file_t dest=/var/lib/openstack/healthchecks/ceilometer_agent_compute/ _original_basename=healthcheck recurse=False state=file path=/var/lib/openstack/healthchecks/ceilometer_agent_compute/ force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:06:56 compute-0 sudo[366347]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:06:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:06:57 compute-0 sudo[366423]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wucqirorgfrkogjvpmkmqxeeoeqpxihn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148415.1470816-426-211290040159572/AnsiballZ_stat.py'
Oct 11 02:06:57 compute-0 sudo[366423]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:06:57 compute-0 python3.9[366425]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/ceilometer_agent_compute/healthcheck.future follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:06:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v863: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:57 compute-0 sudo[366423]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:57 compute-0 sudo[366501]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mhtrtmhpcilbcdtanojrublhpfccosyp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148415.1470816-426-211290040159572/AnsiballZ_file.py'
Oct 11 02:06:57 compute-0 sudo[366501]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:06:57 compute-0 sshd-session[366115]: Failed password for root from 121.227.153.123 port 34320 ssh2
Oct 11 02:06:58 compute-0 python3.9[366503]: ansible-ansible.legacy.file Invoked with group=zuul mode=0700 owner=zuul setype=container_file_t dest=/var/lib/openstack/healthchecks/ceilometer_agent_compute/ _original_basename=healthcheck.future recurse=False state=file path=/var/lib/openstack/healthchecks/ceilometer_agent_compute/ force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:06:58 compute-0 sudo[366501]: pam_unix(sudo:session): session closed for user root
Oct 11 02:06:58 compute-0 ceph-mon[191930]: pgmap v863: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:59 compute-0 sshd-session[366115]: Connection closed by authenticating user root 121.227.153.123 port 34320 [preauth]
Oct 11 02:06:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v864: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:06:59 compute-0 podman[157119]: time="2025-10-11T02:06:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:06:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:06:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:06:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:06:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8532 "" "Go-http-client/1.1"
Oct 11 02:07:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:07:00 compute-0 sudo[366669]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nfsfmnpovibivtqfnkgtcvgmxlpxuzgy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148419.3395452-448-49452404590474/AnsiballZ_container_config_data.py'
Oct 11 02:07:00 compute-0 podman[366629]: 2025-10-11 02:07:00.24013457 +0000 UTC m=+0.151499035 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.vendor=CentOS)
Oct 11 02:07:00 compute-0 sudo[366669]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:00 compute-0 ceph-mon[191930]: pgmap v864: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:00 compute-0 python3.9[366674]: ansible-container_config_data Invoked with config_overrides={} config_path=/var/lib/openstack/config/telemetry config_pattern=ceilometer_agent_compute.json debug=False
Oct 11 02:07:00 compute-0 sudo[366669]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:00 compute-0 unix_chkpwd[366675]: password check failed for user (root)
Oct 11 02:07:00 compute-0 sshd-session[366566]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:07:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v865: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:01 compute-0 openstack_network_exporter[159265]: ERROR   02:07:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:07:01 compute-0 openstack_network_exporter[159265]: ERROR   02:07:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:07:01 compute-0 openstack_network_exporter[159265]: ERROR   02:07:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:07:01 compute-0 openstack_network_exporter[159265]: ERROR   02:07:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:07:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:07:01 compute-0 openstack_network_exporter[159265]: ERROR   02:07:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:07:01 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:07:02 compute-0 sudo[366825]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dxszkzpndkwippfohyjlcyhnlnypulnf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148421.379165-457-255046163676257/AnsiballZ_container_config_hash.py'
Oct 11 02:07:02 compute-0 sudo[366825]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:02 compute-0 python3.9[366827]: ansible-container_config_hash Invoked with check_mode=False config_vol_prefix=/var/lib/config-data
Oct 11 02:07:02 compute-0 sudo[366825]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:02 compute-0 ceph-mon[191930]: pgmap v865: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:02 compute-0 sshd-session[366566]: Failed password for root from 121.227.153.123 port 50488 ssh2
Oct 11 02:07:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v866: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:03 compute-0 sudo[366977]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fqrwryermszjexgytffrbadfqnyghvcp ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760148422.765373-467-157304380937724/AnsiballZ_edpm_container_manage.py'
Oct 11 02:07:03 compute-0 sudo[366977]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:03 compute-0 python3[366979]: ansible-edpm_container_manage Invoked with concurrency=1 config_dir=/var/lib/openstack/config/telemetry config_id=edpm config_overrides={} config_patterns=ceilometer_agent_compute.json log_base_path=/var/log/containers/stdouts debug=False
Oct 11 02:07:04 compute-0 sshd-session[366566]: Connection closed by authenticating user root 121.227.153.123 port 50488 [preauth]
Oct 11 02:07:04 compute-0 python3[366979]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: [
                                                {
                                                     "Id": "38f935dbe54986887092542d996d7929ffbcdc27e83d1ca11ffb47197c7c2f87",
                                                     "Digest": "sha256:0f700ff6d1cbbb8b1c30b2de201c8b2c9464b5075c18360fef41a605dbdfe7ac",
                                                     "RepoTags": [
                                                          "quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested"
                                                     ],
                                                     "RepoDigests": [
                                                          "quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute@sha256:0f700ff6d1cbbb8b1c30b2de201c8b2c9464b5075c18360fef41a605dbdfe7ac"
                                                     ],
                                                     "Parent": "",
                                                     "Comment": "",
                                                     "Created": "2025-10-10T05:11:42.145567128Z",
                                                     "Config": {
                                                          "User": "root",
                                                          "Env": [
                                                               "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
                                                               "LANG=en_US.UTF-8",
                                                               "TZ=UTC",
                                                               "container=oci"
                                                          ],
                                                          "Entrypoint": [
                                                               "dumb-init",
                                                               "--single-child",
                                                               "--"
                                                          ],
                                                          "Cmd": [
                                                               "kolla_start"
                                                          ],
                                                          "Labels": {
                                                               "io.buildah.version": "1.41.4",
                                                               "maintainer": "OpenStack Kubernetes Operator team",
                                                               "org.label-schema.build-date": "20251007",
                                                               "org.label-schema.license": "GPLv2",
                                                               "org.label-schema.name": "CentOS Stream 10 Base Image",
                                                               "org.label-schema.schema-version": "1.0",
                                                               "org.label-schema.vendor": "CentOS",
                                                               "tcib_build_tag": "d674bdc5502e72c153d04cef014162b0",
                                                               "tcib_managed": "true"
                                                          },
                                                          "StopSignal": "SIGTERM"
                                                     },
                                                     "Version": "",
                                                     "Author": "",
                                                     "Architecture": "amd64",
                                                     "Os": "linux",
                                                     "Size": 601320651,
                                                     "VirtualSize": 601320651,
                                                     "GraphDriver": {
                                                          "Name": "overlay",
                                                          "Data": {
                                                               "LowerDir": "/var/lib/containers/storage/overlay/27be3a4b0ff61b472a5540e48f946b7a0f53e0e45d15b993faeed2bc74109d2e/diff:/var/lib/containers/storage/overlay/f23d893bf4e5e232b4a10bccf286b96b2b896ac0f9615ec3d768a5b3d9cf124e/diff:/var/lib/containers/storage/overlay/b795fdb19284c493e0b40aa6731a57c198db20f515b82a3664d8b69e725f19e1/diff:/var/lib/containers/storage/overlay/0dbb0fa4e0c18f0a3915a60ff5470aadbb7165028a2c88aa7f1a15489a56b455/diff",
                                                               "UpperDir": "/var/lib/containers/storage/overlay/09221be55dba18a5ef51d4618adbfb94dc9991abad4708cb6ceff185a0d279e1/diff",
                                                               "WorkDir": "/var/lib/containers/storage/overlay/09221be55dba18a5ef51d4618adbfb94dc9991abad4708cb6ceff185a0d279e1/work"
                                                          }
                                                     },
                                                     "RootFS": {
                                                          "Type": "layers",
                                                          "Layers": [
                                                               "sha256:0dbb0fa4e0c18f0a3915a60ff5470aadbb7165028a2c88aa7f1a15489a56b455",
                                                               "sha256:f2d554268728f1a2b938ef34793691c9e7ab3b18bd6be71498d8f03825b6d93c",
                                                               "sha256:a25c2d35476bed41f3449a0dc406a780dc5b798712585bec71b5aaf7307aa5dc",
                                                               "sha256:e8df4b0d4c4e565371819e4ffa1f771a9dab6d01a4ddc43e8a8371b829e45bda",
                                                               "sha256:3dd2bb9f4707c7cea9fe7e8150eaf8549b158ade819ecd405a330fc52850f7f3"
                                                          ]
                                                     },
                                                     "Labels": {
                                                          "io.buildah.version": "1.41.4",
                                                          "maintainer": "OpenStack Kubernetes Operator team",
                                                          "org.label-schema.build-date": "20251007",
                                                          "org.label-schema.license": "GPLv2",
                                                          "org.label-schema.name": "CentOS Stream 10 Base Image",
                                                          "org.label-schema.schema-version": "1.0",
                                                          "org.label-schema.vendor": "CentOS",
                                                          "tcib_build_tag": "d674bdc5502e72c153d04cef014162b0",
                                                          "tcib_managed": "true"
                                                     },
                                                     "Annotations": {},
                                                     "ManifestType": "application/vnd.docker.distribution.manifest.v2+json",
                                                     "User": "root",
                                                     "History": [
                                                          {
                                                               "created": "2025-10-07T00:27:06.913958184Z",
                                                               "created_by": "/bin/sh -c #(nop) ADD file:cc6a915d9ce637d9d03ce0ad89c49ee69f3494e6aadd22234bffb016808a650c in / ",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-07T00:27:06.914046074Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL org.label-schema.schema-version=\"1.0\"     org.label-schema.name=\"CentOS Stream 10 Base Image\"     org.label-schema.vendor=\"CentOS\"     org.label-schema.license=\"GPLv2\"     org.label-schema.build-date=\"20251007\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-07T00:27:09.985738169Z",
                                                               "created_by": "/bin/sh -c #(nop) CMD [\"/bin/bash\"]"
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:08.99356085Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL maintainer=\"OpenStack Kubernetes Operator team\"",
                                                               "comment": "FROM quay.io/centos/centos:stream10",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:08.99359002Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL tcib_managed=true",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:08.993622981Z",
                                                               "created_by": "/bin/sh -c #(nop) ENV LANG=\"en_US.UTF-8\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:08.993670191Z",
                                                               "created_by": "/bin/sh -c #(nop) ENV TZ=\"UTC\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:08.993692921Z",
                                                               "created_by": "/bin/sh -c #(nop) ENV container=\"oci\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:08.993717541Z",
                                                               "created_by": "/bin/sh -c #(nop) USER root",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:09.399422656Z",
                                                               "created_by": "/bin/sh -c if [ -f \"/etc/yum.repos.d/ubi.repo\" ]; then rm -f /etc/yum.repos.d/ubi.repo && dnf clean all && rm -rf /var/cache/dnf; fi",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:10.048203541Z",
                                                               "created_by": "/bin/sh -c if [ -f \"/etc/yum.repos.d/centos.repo\" ]; then rm -f /etc/yum.repos.d/centos*.repo && dnf clean all && rm -rf /var/cache/dnf; fi",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:19.687012398Z",
                                                               "created_by": "/bin/sh -c dnf install -y crudini && crudini --del /etc/dnf/dnf.conf main override_install_langs && crudini --set /etc/dnf/dnf.conf main clean_requirements_on_remove True && crudini --set /etc/dnf/dnf.conf main exactarch 1 && crudini --set /etc/dnf/dnf.conf main gpgcheck 1 && crudini --set /etc/dnf/dnf.conf main install_weak_deps False && if [ 'centos' == 'centos' ];then crudini --set /etc/dnf/dnf.conf main best False; fi && crudini --set /etc/dnf/dnf.conf main installonly_limit 0 && crudini --set /etc/dnf/dnf.conf main keepcache 0 && crudini --set /etc/dnf/dnf.conf main obsoletes 1 && crudini --set /etc/dnf/dnf.conf main plugins 1 && crudini --set /etc/dnf/dnf.conf main skip_missing_names_on_install False && crudini --set /etc/dnf/dnf.conf main tsflags nodocs",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:22.497923369Z",
                                                               "created_by": "/bin/sh -c dnf install -y ca-certificates dumb-init glibc-langpack-en procps-ng python3 sudo util-linux-user which python-tcib-containers",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:22.850576388Z",
                                                               "created_by": "/bin/sh -c if [ ! -f \"/etc/pki/tls/cert.pem\" ]; then ln -s /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /etc/pki/tls/cert.pem; fi",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:23.194008878Z",
                                                               "created_by": "/bin/sh -c cp /usr/share/tcib/container-images/kolla/base/uid_gid_manage.sh /usr/local/bin/uid_gid_manage",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:23.541660244Z",
                                                               "created_by": "/bin/sh -c chmod 755 /usr/local/bin/uid_gid_manage",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:24.162375786Z",
                                                               "created_by": "/bin/sh -c bash /usr/local/bin/uid_gid_manage kolla hugetlbfs libvirt qemu",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:24.506689434Z",
                                                               "created_by": "/bin/sh -c touch /usr/local/bin/kolla_extend_start && chmod 755 /usr/local/bin/kolla_extend_start",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:24.852276182Z",
                                                               "created_by": "/bin/sh -c cp /usr/share/tcib/container-images/kolla/base/set_configs.py /usr/local/bin/kolla_set_configs",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:25.192835256Z",
                                                               "created_by": "/bin/sh -c chmod 755 /usr/local/bin/kolla_set_configs",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:25.528420697Z",
                                                               "created_by": "/bin/sh -c cp /usr/share/tcib/container-images/kolla/base/start.sh /usr/local/bin/kolla_start",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:25.869753058Z",
                                                               "created_by": "/bin/sh -c chmod 755 /usr/local/bin/kolla_start",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:26.189360919Z",
                                                               "created_by": "/bin/sh -c cp /usr/share/tcib/container-images/kolla/base/httpd_setup.sh /usr/local/bin/kolla_httpd_setup",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:26.535914737Z",
                                                               "created_by": "/bin/sh -c chmod 755 /usr/local/bin/kolla_httpd_setup",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:26.867621683Z",
                                                               "created_by": "/bin/sh -c cp /usr/share/tcib/container-images/kolla/base/copy_cacerts.sh /usr/local/bin/kolla_copy_cacerts",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:27.210186386Z",
                                                               "created_by": "/bin/sh -c chmod 755 /usr/local/bin/kolla_copy_cacerts",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:27.556878713Z",
                                                               "created_by": "/bin/sh -c cp /usr/share/tcib/container-images/kolla/base/sudoers /etc/sudoers",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:27.897277207Z",
                                                               "created_by": "/bin/sh -c chmod 440 /etc/sudoers",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:28.248346563Z",
                                                               "created_by": "/bin/sh -c sed -ri '/^(passwd:|group:)/ s/systemd//g' /etc/nsswitch.conf",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:30.680296423Z",
                                                               "created_by": "/bin/sh -c dnf -y reinstall which && rpm -e --nodeps tzdata && dnf -y install tzdata",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:31.023961935Z",
                                                               "created_by": "/bin/sh -c if [ ! -f \"/etc/localtime\" ]; then ln -s /usr/share/zoneinfo/Etc/UTC /etc/localtime; fi",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:31.343659897Z",
                                                               "created_by": "/bin/sh -c mkdir -p /openstack",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:32.379982399Z",
                                                               "created_by": "/bin/sh -c if [ 'centos' == 'centos' ];then if [ -n \"$(rpm -qa redhat-release)\" ];then rpm -e --nodeps redhat-release; fi ; dnf -y install centos-stream-release; fi",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:33.574608193Z",
                                                               "created_by": "/bin/sh -c dnf update --excludepkgs redhat-release -y && dnf clean all && rm -rf /var/cache/dnf",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:33.574664274Z",
                                                               "created_by": "/bin/sh -c #(nop) STOPSIGNAL SIGTERM",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:33.574676124Z",
                                                               "created_by": "/bin/sh -c #(nop) ENTRYPOINT [\"dumb-init\", \"--single-child\", \"--\"]",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:33.574685114Z",
                                                               "created_by": "/bin/sh -c #(nop) CMD [\"kolla_start\"]",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:04:35.127409275Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL \"tcib_build_tag\"=\"d674bdc5502e72c153d04cef014162b0\""
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:05:21.046305664Z",
                                                               "created_by": "/bin/sh -c #(nop) USER root",
                                                               "comment": "FROM quay.rdoproject.org/podified-master-centos10/openstack-base:d674bdc5502e72c153d04cef014162b0",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:05:41.621987045Z",
                                                               "created_by": "/bin/sh -c dnf install -y python3-barbicanclient python3-cinderclient python3-designateclient python3-glanceclient python3-ironicclient python3-keystoneclient python3-manilaclient python3-neutronclient python3-novaclient python3-observabilityclient python3-octaviaclient python3-openstackclient python3-swiftclient python3-pymemcache && dnf clean all && rm -rf /var/cache/dnf",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:05:45.303143978Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL \"tcib_build_tag\"=\"d674bdc5502e72c153d04cef014162b0\""
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:07:31.943045829Z",
                                                               "created_by": "/bin/sh -c #(nop) USER root",
                                                               "comment": "FROM quay.rdoproject.org/podified-master-centos10/openstack-os:d674bdc5502e72c153d04cef014162b0",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:07:39.13048444Z",
                                                               "created_by": "/bin/sh -c bash /usr/local/bin/uid_gid_manage ceilometer",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:07:51.501544794Z",
                                                               "created_by": "/bin/sh -c dnf -y install openstack-ceilometer-common && dnf clean all && rm -rf /var/cache/dnf",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:08:06.839440654Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL \"tcib_build_tag\"=\"d674bdc5502e72c153d04cef014162b0\""
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:11:26.932232287Z",
                                                               "created_by": "/bin/sh -c #(nop) USER root",
                                                               "comment": "FROM quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-base:d674bdc5502e72c153d04cef014162b0",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:11:39.204804878Z",
                                                               "created_by": "/bin/sh -c dnf -y install openstack-ceilometer-compute && dnf clean all && rm -rf /var/cache/dnf",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T05:11:47.967862974Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL \"tcib_build_tag\"=\"d674bdc5502e72c153d04cef014162b0\""
                                                          }
                                                     ],
                                                     "NamesHistory": [
                                                          "quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested"
                                                     ]
                                                }
                                           ]
                                           : quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested
Oct 11 02:07:04 compute-0 sudo[366977]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:04 compute-0 ceph-mon[191930]: pgmap v866: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:07:05 compute-0 sudo[367188]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nlwskheggpqdhrnocnaypbbdgocvhyas ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148424.730539-475-49932398131255/AnsiballZ_stat.py'
Oct 11 02:07:05 compute-0 sudo[367188]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v867: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:05 compute-0 python3.9[367190]: ansible-ansible.builtin.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:07:05 compute-0 sudo[367188]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:05 compute-0 unix_chkpwd[367220]: password check failed for user (root)
Oct 11 02:07:05 compute-0 sshd-session[367037]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:07:06 compute-0 sudo[367343]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ompzfteicjlqrhnfjmcswnmjcgmsblfw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148425.8689873-484-263584050200414/AnsiballZ_file.py'
Oct 11 02:07:06 compute-0 sudo[367343]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:07:06 compute-0 ceph-mon[191930]: pgmap v867: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:07:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:07:06 compute-0 python3.9[367345]: ansible-file Invoked with path=/etc/systemd/system/edpm_ceilometer_agent_compute.requires state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:07:06 compute-0 sudo[367343]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v868: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:07 compute-0 sshd-session[367037]: Failed password for root from 121.227.153.123 port 50496 ssh2
Oct 11 02:07:07 compute-0 sudo[367494]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yxrqvlocylptcmeqokribapwwqijenoh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148426.8270593-484-54251395740314/AnsiballZ_copy.py'
Oct 11 02:07:07 compute-0 sudo[367494]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.943 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.944 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.945 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.946 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f8ed27f97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.947 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb8c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.947 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb0e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.947 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.947 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb1a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb200>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed2874260>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.948 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb2f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.949 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed3ab42f0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.949 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.949 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb350>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.949 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbb90>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.949 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fa390>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.950 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb3b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.952 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbbf0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.952 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbc80>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.953 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.953 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.952 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.954 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f8ed27fbad0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.953 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.955 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27f9610>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.955 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fb620>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.955 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbe30>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.955 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbec0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.956 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f8ed27fbf50>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f8ed26e2c60>] with cache [{}], pollster history [{'disk.device.capacity': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.954 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.956 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f8ed27faff0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.957 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.957 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f8ed27fb110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.957 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.958 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f8ed27fb170>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.958 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f8ed27fb1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.958 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.959 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f8ed27fb230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.959 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.959 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f8ed2874230>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.960 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f8ed27fb290>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.960 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f8ed5778d70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.960 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.961 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f8ed27fb650>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.961 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f8ed27fbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.961 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.961 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f8ed27fb320>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.962 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.962 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f8ed27fbb60>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.962 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.962 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f8ed27fa3f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.962 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.962 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f8ed27fb380>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.962 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.963 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f8ed27fbbc0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.963 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.963 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f8ed27fbc50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.963 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.963 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f8ed27fbce0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.963 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.964 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f8ed27fbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.964 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.964 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f8ed27fb590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.964 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.965 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f8ed27f95e0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.965 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.965 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f8ed27fb5f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.965 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.965 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f8ed27fbe00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.965 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.966 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f8ed27fbe90>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.966 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.966 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f8ed27fbf20>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f8ed399b920>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.966 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.967 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.968 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.969 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.970 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.970 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.970 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.970 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.970 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:07 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:07.970 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:08 compute-0 python3.9[367496]: ansible-copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760148426.8270593-484-54251395740314/source dest=/etc/systemd/system/edpm_ceilometer_agent_compute.service mode=0644 owner=root group=root backup=False force=True remote_src=False follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:07:08 compute-0 sudo[367494]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:08 compute-0 ceph-mon[191930]: pgmap v868: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:08 compute-0 sudo[367571]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nqjvrsvoihhuguwkluxxgbagbmysndma ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148426.8270593-484-54251395740314/AnsiballZ_systemd.py'
Oct 11 02:07:08 compute-0 sudo[367571]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:09 compute-0 sshd-session[367037]: Connection closed by authenticating user root 121.227.153.123 port 50496 [preauth]
Oct 11 02:07:09 compute-0 python3.9[367573]: ansible-systemd Invoked with state=started name=edpm_ceilometer_agent_compute.service enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:07:09 compute-0 sudo[367571]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v869: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:07:10 compute-0 sudo[367727]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ygsdidbiznuzlljrozixdvuhanbcfcsb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148429.5898938-504-219606482512379/AnsiballZ_systemd.py'
Oct 11 02:07:10 compute-0 sudo[367727]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:10 compute-0 ceph-mon[191930]: pgmap v869: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:10 compute-0 python3.9[367729]: ansible-ansible.builtin.systemd Invoked with name=edpm_ceilometer_agent_compute.service state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 02:07:10 compute-0 systemd[1]: Stopping ceilometer_agent_compute container...
Oct 11 02:07:10 compute-0 unix_chkpwd[367739]: password check failed for user (root)
Oct 11 02:07:10 compute-0 sshd-session[367594]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:07:10 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:10.726 2 INFO cotyledon._service_manager [-] Caught SIGTERM signal, graceful exiting of master process
Oct 11 02:07:10 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:10.828 2 DEBUG cotyledon._service_manager [-] Killing services with signal SIGTERM _shutdown /usr/lib/python3.12/site-packages/cotyledon/_service_manager.py:319
Oct 11 02:07:10 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:10.828 2 DEBUG cotyledon._service_manager [-] Waiting services to terminate _shutdown /usr/lib/python3.12/site-packages/cotyledon/_service_manager.py:323
Oct 11 02:07:10 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:10.828 14 INFO cotyledon._service [-] Caught SIGTERM signal, graceful exiting of service AgentManager(0) [14]
Oct 11 02:07:10 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:10.829 12 INFO cotyledon._service [-] Caught SIGTERM signal, graceful exiting of service AgentHeartBeatManager(0) [12]
Oct 11 02:07:10 compute-0 ceilometer_agent_compute[153627]: 2025-10-11 02:07:10.851 2 DEBUG cotyledon._service_manager [-] Shutdown finish _shutdown /usr/lib/python3.12/site-packages/cotyledon/_service_manager.py:335
Oct 11 02:07:10 compute-0 virtqemud[153560]: End of file while reading data: Input/output error
Oct 11 02:07:10 compute-0 virtqemud[153560]: End of file while reading data: Input/output error
Oct 11 02:07:11 compute-0 systemd[1]: libpod-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope: Deactivated successfully.
Oct 11 02:07:11 compute-0 systemd[1]: libpod-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope: Consumed 4.917s CPU time.
Oct 11 02:07:11 compute-0 podman[367733]: 2025-10-11 02:07:11.118725552 +0000 UTC m=+0.491172630 container died c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, config_id=edpm, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, tcib_managed=true, io.buildah.version=1.41.4, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']})
Oct 11 02:07:11 compute-0 systemd[1]: c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6-2870149d5fcb13d2.timer: Deactivated successfully.
Oct 11 02:07:11 compute-0 systemd[1]: Stopped /usr/bin/podman healthcheck run c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.
Oct 11 02:07:11 compute-0 systemd[1]: c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6-2870149d5fcb13d2.service: Failed to open /run/systemd/transient/c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6-2870149d5fcb13d2.service: No such file or directory
Oct 11 02:07:11 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6-userdata-shm.mount: Deactivated successfully.
Oct 11 02:07:11 compute-0 systemd[1]: var-lib-containers-storage-overlay-b3d18363ce40705c64403cf6057216716d6bebcedd4dc52be32b80ac0420f1aa-merged.mount: Deactivated successfully.
Oct 11 02:07:11 compute-0 podman[367733]: 2025-10-11 02:07:11.232112602 +0000 UTC m=+0.604559700 container cleanup c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS)
Oct 11 02:07:11 compute-0 podman[367733]: ceilometer_agent_compute
Oct 11 02:07:11 compute-0 podman[367761]: ceilometer_agent_compute
Oct 11 02:07:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v870: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:11 compute-0 systemd[1]: edpm_ceilometer_agent_compute.service: Deactivated successfully.
Oct 11 02:07:11 compute-0 systemd[1]: Stopped ceilometer_agent_compute container.
Oct 11 02:07:11 compute-0 systemd[1]: edpm_ceilometer_agent_compute.service: Consumed 1.055s CPU time, 17.1M memory peak, read 0B from disk, written 107.5K to disk.
Oct 11 02:07:11 compute-0 systemd[1]: Starting ceilometer_agent_compute container...
Oct 11 02:07:11 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:07:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b3d18363ce40705c64403cf6057216716d6bebcedd4dc52be32b80ac0420f1aa/merged/etc/ceilometer/ceilometer_prom_exporter.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b3d18363ce40705c64403cf6057216716d6bebcedd4dc52be32b80ac0420f1aa/merged/etc/ceilometer/tls supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b3d18363ce40705c64403cf6057216716d6bebcedd4dc52be32b80ac0420f1aa/merged/var/lib/openstack/config supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b3d18363ce40705c64403cf6057216716d6bebcedd4dc52be32b80ac0420f1aa/merged/var/lib/kolla/config_files/config.json supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:11 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.
Oct 11 02:07:11 compute-0 podman[367772]: 2025-10-11 02:07:11.657883368 +0000 UTC m=+0.252906930 container init c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, io.buildah.version=1.41.4, tcib_managed=true, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, config_id=edpm, managed_by=edpm_ansible, org.label-schema.license=GPLv2, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 10 Base Image)
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: + sudo -E kolla_set_configs
Oct 11 02:07:11 compute-0 podman[367772]: 2025-10-11 02:07:11.711001062 +0000 UTC m=+0.306024574 container start c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, config_id=edpm, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_managed=true, container_name=ceilometer_agent_compute, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:07:11 compute-0 podman[367772]: ceilometer_agent_compute
Oct 11 02:07:11 compute-0 sudo[367794]: ceilometer : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_set_configs
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: sudo: unable to send audit message: Operation not permitted
Oct 11 02:07:11 compute-0 systemd[1]: Started ceilometer_agent_compute container.
Oct 11 02:07:11 compute-0 sudo[367794]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=42405)
Oct 11 02:07:11 compute-0 sudo[367727]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: INFO:__main__:Loading config file at /var/lib/kolla/config_files/config.json
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: INFO:__main__:Validating config file
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: INFO:__main__:Kolla config strategy set to: COPY_ALWAYS
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: INFO:__main__:Copying service configuration files
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: INFO:__main__:Deleting /etc/ceilometer/ceilometer.conf
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: INFO:__main__:Copying /var/lib/openstack/config/ceilometer.conf to /etc/ceilometer/ceilometer.conf
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: INFO:__main__:Setting permission for /etc/ceilometer/ceilometer.conf
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: INFO:__main__:Deleting /etc/ceilometer/polling.yaml
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: INFO:__main__:Copying /var/lib/openstack/config/polling.yaml to /etc/ceilometer/polling.yaml
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: INFO:__main__:Setting permission for /etc/ceilometer/polling.yaml
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: INFO:__main__:Deleting /etc/ceilometer/ceilometer.conf.d/01-ceilometer-custom.conf
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: INFO:__main__:Copying /var/lib/openstack/config/custom.conf to /etc/ceilometer/ceilometer.conf.d/01-ceilometer-custom.conf
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: INFO:__main__:Setting permission for /etc/ceilometer/ceilometer.conf.d/01-ceilometer-custom.conf
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: INFO:__main__:Deleting /etc/ceilometer/ceilometer.conf.d/02-ceilometer-host-specific.conf
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: INFO:__main__:Copying /var/lib/openstack/config/ceilometer-host-specific.conf to /etc/ceilometer/ceilometer.conf.d/02-ceilometer-host-specific.conf
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: INFO:__main__:Setting permission for /etc/ceilometer/ceilometer.conf.d/02-ceilometer-host-specific.conf
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: INFO:__main__:Writing out command to execute
Oct 11 02:07:11 compute-0 sudo[367794]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: ++ cat /run_command
Oct 11 02:07:11 compute-0 podman[367795]: 2025-10-11 02:07:11.868204411 +0000 UTC m=+0.135495403 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=starting, health_failing_streak=1, health_log=, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, container_name=ceilometer_agent_compute)
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: + CMD='/usr/bin/ceilometer-polling --polling-namespaces compute --logfile /dev/stdout'
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: + ARGS=
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: + sudo kolla_copy_cacerts
Oct 11 02:07:11 compute-0 sudo[367830]: ceilometer : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_copy_cacerts
Oct 11 02:07:11 compute-0 systemd[1]: c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6-4388892ecd7a1bb1.service: Main process exited, code=exited, status=1/FAILURE
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: sudo: unable to send audit message: Operation not permitted
Oct 11 02:07:11 compute-0 systemd[1]: c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6-4388892ecd7a1bb1.service: Failed with result 'exit-code'.
Oct 11 02:07:11 compute-0 sudo[367830]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=42405)
Oct 11 02:07:11 compute-0 sudo[367830]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: + [[ ! -n '' ]]
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: + . kolla_extend_start
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: Running command: '/usr/bin/ceilometer-polling --polling-namespaces compute --logfile /dev/stdout'
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: + echo 'Running command: '\''/usr/bin/ceilometer-polling --polling-namespaces compute --logfile /dev/stdout'\'''
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: + umask 0022
Oct 11 02:07:11 compute-0 ceilometer_agent_compute[367788]: + exec /usr/bin/ceilometer-polling --polling-namespaces compute --logfile /dev/stdout
Oct 11 02:07:12 compute-0 ceph-mon[191930]: pgmap v870: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:12 compute-0 sudo[367969]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uwaarwrimfuakzymcgqwcjqxlceddheq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148432.1075065-512-20544529074356/AnsiballZ_stat.py'
Oct 11 02:07:12 compute-0 sudo[367969]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:12 compute-0 python3.9[367971]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/node_exporter/healthcheck follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:07:12 compute-0 sudo[367969]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v871: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.422 2 DEBUG cotyledon.oslo_config_glue [-] Full set of CONF: _load_service_manager_options /usr/lib/python3.12/site-packages/cotyledon/oslo_config_glue.py:45
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.422 2 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2804
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.423 2 DEBUG cotyledon.oslo_config_glue [-] Configuration options gathered from: log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2805
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.423 2 DEBUG cotyledon.oslo_config_glue [-] command line args: ['--polling-namespaces', 'compute', '--logfile', '/dev/stdout'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2806
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.423 2 DEBUG cotyledon.oslo_config_glue [-] config files: ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2807
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.423 2 DEBUG cotyledon.oslo_config_glue [-] ================================================================================ log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2809
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.423 2 DEBUG cotyledon.oslo_config_glue [-] batch_size                     = 50 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.423 2 DEBUG cotyledon.oslo_config_glue [-] cfg_file                       = polling.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.423 2 DEBUG cotyledon.oslo_config_glue [-] config_dir                     = ['/etc/ceilometer/ceilometer.conf.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.424 2 DEBUG cotyledon.oslo_config_glue [-] config_file                    = ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.424 2 DEBUG cotyledon.oslo_config_glue [-] config_source                  = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.424 2 DEBUG cotyledon.oslo_config_glue [-] debug                          = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.424 2 DEBUG cotyledon.oslo_config_glue [-] default_log_levels             = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'oslo_messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'requests.packages.urllib3.util.retry=WARN', 'urllib3.util.retry=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'taskflow=WARN', 'keystoneauth=WARN', 'oslo.cache=INFO', 'oslo_policy=INFO', 'dogpile.core.dogpile=INFO', 'futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.424 2 DEBUG cotyledon.oslo_config_glue [-] enable_notifications           = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.424 2 DEBUG cotyledon.oslo_config_glue [-] enable_prometheus_exporter     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.424 2 DEBUG cotyledon.oslo_config_glue [-] event_pipeline_cfg_file        = event_pipeline.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.425 2 DEBUG cotyledon.oslo_config_glue [-] graceful_shutdown_timeout      = 60 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.425 2 DEBUG cotyledon.oslo_config_glue [-] heartbeat_socket_dir           = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.425 2 DEBUG cotyledon.oslo_config_glue [-] host                           = compute-0.ctlplane.example.com log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.425 2 DEBUG cotyledon.oslo_config_glue [-] http_timeout                   = 600 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.425 2 DEBUG cotyledon.oslo_config_glue [-] hypervisor_inspector           = libvirt log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.425 2 WARNING oslo_config.cfg [-] Deprecated: Option "tenant_name_discovery" from group "DEFAULT" is deprecated. Use option "identity_name_discovery" from group "DEFAULT".
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.425 2 DEBUG cotyledon.oslo_config_glue [-] identity_name_discovery        = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.426 2 DEBUG cotyledon.oslo_config_glue [-] ignore_disabled_projects       = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.426 2 DEBUG cotyledon.oslo_config_glue [-] instance_format                = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.426 2 DEBUG cotyledon.oslo_config_glue [-] instance_uuid_format           = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.426 2 DEBUG cotyledon.oslo_config_glue [-] libvirt_type                   = kvm log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.426 2 DEBUG cotyledon.oslo_config_glue [-] libvirt_uri                    =  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.426 2 DEBUG cotyledon.oslo_config_glue [-] log_color                      = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.426 2 DEBUG cotyledon.oslo_config_glue [-] log_config_append              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.426 2 DEBUG cotyledon.oslo_config_glue [-] log_date_format                = %Y-%m-%d %H:%M:%S log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.426 2 DEBUG cotyledon.oslo_config_glue [-] log_dir                        = /var/log/ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.427 2 DEBUG cotyledon.oslo_config_glue [-] log_file                       = /dev/stdout log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.427 2 DEBUG cotyledon.oslo_config_glue [-] log_options                    = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.427 2 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval            = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.427 2 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval_type       = days log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.427 2 DEBUG cotyledon.oslo_config_glue [-] log_rotation_type              = none log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.427 2 DEBUG cotyledon.oslo_config_glue [-] logging_context_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.427 2 DEBUG cotyledon.oslo_config_glue [-] logging_debug_format_suffix    = %(funcName)s %(pathname)s:%(lineno)d log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.427 2 DEBUG cotyledon.oslo_config_glue [-] logging_default_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.427 2 DEBUG cotyledon.oslo_config_glue [-] logging_exception_prefix       = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.428 2 DEBUG cotyledon.oslo_config_glue [-] logging_user_identity_format   = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.428 2 DEBUG cotyledon.oslo_config_glue [-] max_logfile_count              = 30 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.428 2 DEBUG cotyledon.oslo_config_glue [-] max_logfile_size_mb            = 200 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.428 2 DEBUG cotyledon.oslo_config_glue [-] max_parallel_requests          = 64 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.428 2 DEBUG cotyledon.oslo_config_glue [-] partitioning_group_prefix      = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.428 2 DEBUG cotyledon.oslo_config_glue [-] pipeline_cfg_file              = pipeline.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.428 2 DEBUG cotyledon.oslo_config_glue [-] polling_namespaces             = ['compute'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.428 2 DEBUG cotyledon.oslo_config_glue [-] pollsters_definitions_dirs     = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.428 2 DEBUG cotyledon.oslo_config_glue [-] prometheus_listen_addresses    = ['127.0.0.1:9101'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.429 2 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_certfile        = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.429 2 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_enable          = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.429 2 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_keyfile         = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.429 2 DEBUG cotyledon.oslo_config_glue [-] publish_errors                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.429 2 DEBUG cotyledon.oslo_config_glue [-] rate_limit_burst               = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.429 2 DEBUG cotyledon.oslo_config_glue [-] rate_limit_except_level        = CRITICAL log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.429 2 DEBUG cotyledon.oslo_config_glue [-] rate_limit_interval            = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.429 2 DEBUG cotyledon.oslo_config_glue [-] reseller_prefix                = AUTH_ log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.429 2 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_keys         = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.429 2 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_length       = 256 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.430 2 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_namespace    = ['metering.'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.430 2 DEBUG cotyledon.oslo_config_glue [-] rootwrap_config                = /etc/ceilometer/rootwrap.conf log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.430 2 DEBUG cotyledon.oslo_config_glue [-] sample_source                  = openstack log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.430 2 DEBUG cotyledon.oslo_config_glue [-] shell_completion               = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.430 2 DEBUG cotyledon.oslo_config_glue [-] syslog_log_facility            = LOG_USER log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.430 2 DEBUG cotyledon.oslo_config_glue [-] threads_to_process_pollsters   = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.430 2 DEBUG cotyledon.oslo_config_glue [-] use_journal                    = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.430 2 DEBUG cotyledon.oslo_config_glue [-] use_json                       = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.430 2 DEBUG cotyledon.oslo_config_glue [-] use_stderr                     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.431 2 DEBUG cotyledon.oslo_config_glue [-] use_syslog                     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.431 2 DEBUG cotyledon.oslo_config_glue [-] watch_log_file                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.431 2 DEBUG cotyledon.oslo_config_glue [-] compute.fetch_extra_metadata   = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.431 2 DEBUG cotyledon.oslo_config_glue [-] compute.instance_discovery_method = libvirt_metadata log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.431 2 DEBUG cotyledon.oslo_config_glue [-] compute.resource_cache_expiry  = 3600 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.431 2 DEBUG cotyledon.oslo_config_glue [-] compute.resource_update_interval = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.431 2 DEBUG cotyledon.oslo_config_glue [-] coordination.backend_url       = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.431 2 DEBUG cotyledon.oslo_config_glue [-] event.definitions_cfg_file     = event_definitions.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.432 2 DEBUG cotyledon.oslo_config_glue [-] event.drop_unmatched_notifications = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.432 2 DEBUG cotyledon.oslo_config_glue [-] event.store_raw                = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.432 2 DEBUG cotyledon.oslo_config_glue [-] ipmi.polling_retry             = 3 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.432 2 DEBUG cotyledon.oslo_config_glue [-] meter.meter_definitions_dirs   = ['/etc/ceilometer/meters.d', '/usr/lib/python3.12/site-packages/ceilometer/data/meters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.432 2 DEBUG cotyledon.oslo_config_glue [-] notification.ack_on_event_error = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.432 2 DEBUG cotyledon.oslo_config_glue [-] notification.batch_size        = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.432 2 DEBUG cotyledon.oslo_config_glue [-] notification.batch_timeout     = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.432 2 DEBUG cotyledon.oslo_config_glue [-] notification.messaging_urls    = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.433 2 DEBUG cotyledon.oslo_config_glue [-] notification.notification_control_exchanges = ['nova', 'glance', 'neutron', 'cinder', 'heat', 'keystone', 'trove', 'zaqar', 'swift', 'ceilometer', 'magnum', 'dns', 'ironic', 'aodh'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.433 2 DEBUG cotyledon.oslo_config_glue [-] notification.pipelines         = ['meter', 'event'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.433 2 DEBUG cotyledon.oslo_config_glue [-] notification.workers           = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.433 2 DEBUG cotyledon.oslo_config_glue [-] polling.batch_size             = 50 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.433 2 DEBUG cotyledon.oslo_config_glue [-] polling.cfg_file               = polling.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.433 2 DEBUG cotyledon.oslo_config_glue [-] polling.enable_notifications   = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.433 2 DEBUG cotyledon.oslo_config_glue [-] polling.enable_prometheus_exporter = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.433 2 DEBUG cotyledon.oslo_config_glue [-] polling.heartbeat_socket_dir   = /var/lib/ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.433 2 DEBUG cotyledon.oslo_config_glue [-] polling.identity_name_discovery = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.434 2 DEBUG cotyledon.oslo_config_glue [-] polling.ignore_disabled_projects = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.434 2 DEBUG cotyledon.oslo_config_glue [-] polling.partitioning_group_prefix = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.434 2 DEBUG cotyledon.oslo_config_glue [-] polling.pollsters_definitions_dirs = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.434 2 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_listen_addresses = ['[::]:9101'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.434 2 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_certfile = /etc/ceilometer/tls/tls.crt log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.434 2 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_enable  = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.434 2 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_keyfile = /etc/ceilometer/tls/tls.key log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.434 2 DEBUG cotyledon.oslo_config_glue [-] polling.threads_to_process_pollsters = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.435 2 DEBUG cotyledon.oslo_config_glue [-] publisher.telemetry_secret     = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.435 2 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.event_topic = event log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.435 2 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.metering_topic = metering log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.435 2 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.telemetry_driver = messagingv2 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.435 2 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.access_key = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.435 2 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.secret_key = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.435 2 DEBUG cotyledon.oslo_config_glue [-] rgw_client.implicit_tenants    = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.435 2 DEBUG cotyledon.oslo_config_glue [-] service_types.aodh             = alarming log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.435 2 DEBUG cotyledon.oslo_config_glue [-] service_types.cinder           = volumev3 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.436 2 DEBUG cotyledon.oslo_config_glue [-] service_types.glance           = image log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.436 2 DEBUG cotyledon.oslo_config_glue [-] service_types.neutron          = network log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.436 2 DEBUG cotyledon.oslo_config_glue [-] service_types.nova             = compute log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.436 2 DEBUG cotyledon.oslo_config_glue [-] service_types.radosgw          = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.436 2 DEBUG cotyledon.oslo_config_glue [-] service_types.swift            = object-store log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.436 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_section = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.436 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_type  = password log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.436 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.cafile     = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.436 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.certfile   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.437 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.collect_timing = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.437 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.insecure   = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.437 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.interface  = internalURL log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.437 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.keyfile    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.437 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.region_name = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.437 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.split_loggers = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.437 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.timeout    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.437 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_section           = service_credentials log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.437 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_type              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.438 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.cafile                 = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.438 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.certfile               = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.438 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.collect_timing         = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.438 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.insecure               = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.438 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.interface              = internal log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.438 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.keyfile                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.438 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.region_name            = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.438 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.split_loggers          = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.438 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.timeout                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.439 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_section             = service_credentials log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.439 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_type                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.439 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.cafile                   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.439 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.certfile                 = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.439 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.collect_timing           = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.439 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.insecure                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.440 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.interface                = internal log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.440 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.keyfile                  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.440 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.region_name              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.440 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.split_loggers            = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.440 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.timeout                  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.440 2 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.file_event_handler = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.440 2 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.file_event_handler_interval = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.441 2 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.log_dir           = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.441 2 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2828
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.471 12 INFO ceilometer.polling.manager [-] Starting heartbeat child service. Listening on /var/lib/ceilometer/ceilometer-compute.socket
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.472 12 DEBUG cotyledon.oslo_config_glue [-] Full set of CONF: _load_service_options /usr/lib/python3.12/site-packages/cotyledon/oslo_config_glue.py:53
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.472 12 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2804
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.473 12 DEBUG cotyledon.oslo_config_glue [-] Configuration options gathered from: log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2805
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.473 12 DEBUG cotyledon.oslo_config_glue [-] command line args: ['--polling-namespaces', 'compute', '--logfile', '/dev/stdout'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2806
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.473 12 DEBUG cotyledon.oslo_config_glue [-] config files: ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2807
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.473 12 DEBUG cotyledon.oslo_config_glue [-] ================================================================================ log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2809
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.473 12 DEBUG cotyledon.oslo_config_glue [-] batch_size                     = 50 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.474 12 DEBUG cotyledon.oslo_config_glue [-] cfg_file                       = polling.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.474 12 DEBUG cotyledon.oslo_config_glue [-] config_dir                     = ['/etc/ceilometer/ceilometer.conf.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.474 12 DEBUG cotyledon.oslo_config_glue [-] config_file                    = ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.474 12 DEBUG cotyledon.oslo_config_glue [-] config_source                  = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.474 12 DEBUG cotyledon.oslo_config_glue [-] debug                          = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.474 12 DEBUG cotyledon.oslo_config_glue [-] default_log_levels             = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'oslo_messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'requests.packages.urllib3.util.retry=WARN', 'urllib3.util.retry=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'taskflow=WARN', 'keystoneauth=WARN', 'oslo.cache=INFO', 'oslo_policy=INFO', 'dogpile.core.dogpile=INFO', 'futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.475 12 DEBUG cotyledon.oslo_config_glue [-] enable_notifications           = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.475 12 DEBUG cotyledon.oslo_config_glue [-] enable_prometheus_exporter     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.475 12 DEBUG cotyledon.oslo_config_glue [-] event_pipeline_cfg_file        = event_pipeline.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.475 12 DEBUG cotyledon.oslo_config_glue [-] graceful_shutdown_timeout      = 60 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.475 12 DEBUG cotyledon.oslo_config_glue [-] heartbeat_socket_dir           = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.476 12 DEBUG cotyledon.oslo_config_glue [-] host                           = compute-0.ctlplane.example.com log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.476 12 DEBUG cotyledon.oslo_config_glue [-] http_timeout                   = 600 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.476 12 DEBUG cotyledon.oslo_config_glue [-] hypervisor_inspector           = libvirt log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.476 12 DEBUG cotyledon.oslo_config_glue [-] identity_name_discovery        = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.476 12 DEBUG cotyledon.oslo_config_glue [-] ignore_disabled_projects       = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.476 12 DEBUG cotyledon.oslo_config_glue [-] instance_format                = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.476 12 DEBUG cotyledon.oslo_config_glue [-] instance_uuid_format           = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.476 12 DEBUG cotyledon.oslo_config_glue [-] libvirt_type                   = kvm log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.477 12 DEBUG cotyledon.oslo_config_glue [-] libvirt_uri                    =  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.477 12 DEBUG cotyledon.oslo_config_glue [-] log_color                      = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.477 12 DEBUG cotyledon.oslo_config_glue [-] log_config_append              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.477 12 DEBUG cotyledon.oslo_config_glue [-] log_date_format                = %Y-%m-%d %H:%M:%S log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.477 12 DEBUG cotyledon.oslo_config_glue [-] log_dir                        = /var/log/ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.477 12 DEBUG cotyledon.oslo_config_glue [-] log_file                       = /dev/stdout log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.477 12 DEBUG cotyledon.oslo_config_glue [-] log_options                    = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.478 12 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval            = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.478 12 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval_type       = days log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.478 12 DEBUG cotyledon.oslo_config_glue [-] log_rotation_type              = none log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.478 12 DEBUG cotyledon.oslo_config_glue [-] logging_context_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.478 12 DEBUG cotyledon.oslo_config_glue [-] logging_debug_format_suffix    = %(funcName)s %(pathname)s:%(lineno)d log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.478 12 DEBUG cotyledon.oslo_config_glue [-] logging_default_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.478 12 DEBUG cotyledon.oslo_config_glue [-] logging_exception_prefix       = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.479 12 DEBUG cotyledon.oslo_config_glue [-] logging_user_identity_format   = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.479 12 DEBUG cotyledon.oslo_config_glue [-] max_logfile_count              = 30 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.479 12 DEBUG cotyledon.oslo_config_glue [-] max_logfile_size_mb            = 200 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.479 12 DEBUG cotyledon.oslo_config_glue [-] max_parallel_requests          = 64 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.479 12 DEBUG cotyledon.oslo_config_glue [-] partitioning_group_prefix      = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.479 12 DEBUG cotyledon.oslo_config_glue [-] pipeline_cfg_file              = pipeline.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.480 12 DEBUG cotyledon.oslo_config_glue [-] polling_namespaces             = ['compute'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.480 12 DEBUG cotyledon.oslo_config_glue [-] pollsters_definitions_dirs     = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.480 12 DEBUG cotyledon.oslo_config_glue [-] prometheus_listen_addresses    = ['127.0.0.1:9101'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.480 12 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_certfile        = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.480 12 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_enable          = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.480 12 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_keyfile         = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.481 12 DEBUG cotyledon.oslo_config_glue [-] publish_errors                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.481 12 DEBUG cotyledon.oslo_config_glue [-] rate_limit_burst               = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.481 12 DEBUG cotyledon.oslo_config_glue [-] rate_limit_except_level        = CRITICAL log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.481 12 DEBUG cotyledon.oslo_config_glue [-] rate_limit_interval            = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.481 12 DEBUG cotyledon.oslo_config_glue [-] reseller_prefix                = AUTH_ log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.481 12 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_keys         = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.481 12 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_length       = 256 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.482 12 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_namespace    = ['metering.'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.482 12 DEBUG cotyledon.oslo_config_glue [-] rootwrap_config                = /etc/ceilometer/rootwrap.conf log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.482 12 DEBUG cotyledon.oslo_config_glue [-] sample_source                  = openstack log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.482 12 DEBUG cotyledon.oslo_config_glue [-] shell_completion               = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.482 12 DEBUG cotyledon.oslo_config_glue [-] syslog_log_facility            = LOG_USER log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.482 12 DEBUG cotyledon.oslo_config_glue [-] threads_to_process_pollsters   = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.482 12 DEBUG cotyledon.oslo_config_glue [-] use_journal                    = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.482 12 DEBUG cotyledon.oslo_config_glue [-] use_json                       = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.483 12 DEBUG cotyledon.oslo_config_glue [-] use_stderr                     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.483 12 DEBUG cotyledon.oslo_config_glue [-] use_syslog                     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.483 12 DEBUG cotyledon.oslo_config_glue [-] watch_log_file                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.483 12 DEBUG cotyledon.oslo_config_glue [-] compute.fetch_extra_metadata   = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.483 12 DEBUG cotyledon.oslo_config_glue [-] compute.instance_discovery_method = libvirt_metadata log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.483 12 DEBUG cotyledon.oslo_config_glue [-] compute.resource_cache_expiry  = 3600 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.484 12 DEBUG cotyledon.oslo_config_glue [-] compute.resource_update_interval = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.484 12 DEBUG cotyledon.oslo_config_glue [-] coordination.backend_url       = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.484 12 DEBUG cotyledon.oslo_config_glue [-] event.definitions_cfg_file     = event_definitions.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.484 12 DEBUG cotyledon.oslo_config_glue [-] event.drop_unmatched_notifications = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.484 12 DEBUG cotyledon.oslo_config_glue [-] event.store_raw                = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.484 12 DEBUG cotyledon.oslo_config_glue [-] ipmi.polling_retry             = 3 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.484 12 DEBUG cotyledon.oslo_config_glue [-] meter.meter_definitions_dirs   = ['/etc/ceilometer/meters.d', '/usr/lib/python3.12/site-packages/ceilometer/data/meters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.485 12 DEBUG cotyledon.oslo_config_glue [-] notification.ack_on_event_error = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.485 12 DEBUG cotyledon.oslo_config_glue [-] notification.batch_size        = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.485 12 DEBUG cotyledon.oslo_config_glue [-] notification.batch_timeout     = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.485 12 DEBUG cotyledon.oslo_config_glue [-] notification.messaging_urls    = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.485 12 DEBUG cotyledon.oslo_config_glue [-] notification.notification_control_exchanges = ['nova', 'glance', 'neutron', 'cinder', 'heat', 'keystone', 'trove', 'zaqar', 'swift', 'ceilometer', 'magnum', 'dns', 'ironic', 'aodh'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.485 12 DEBUG cotyledon.oslo_config_glue [-] notification.pipelines         = ['meter', 'event'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.486 12 DEBUG cotyledon.oslo_config_glue [-] notification.workers           = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.486 12 DEBUG cotyledon.oslo_config_glue [-] polling.batch_size             = 50 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.486 12 DEBUG cotyledon.oslo_config_glue [-] polling.cfg_file               = polling.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.486 12 DEBUG cotyledon.oslo_config_glue [-] polling.enable_notifications   = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.486 12 DEBUG cotyledon.oslo_config_glue [-] polling.enable_prometheus_exporter = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.486 12 DEBUG cotyledon.oslo_config_glue [-] polling.heartbeat_socket_dir   = /var/lib/ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.486 12 DEBUG cotyledon.oslo_config_glue [-] polling.identity_name_discovery = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.487 12 DEBUG cotyledon.oslo_config_glue [-] polling.ignore_disabled_projects = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.487 12 DEBUG cotyledon.oslo_config_glue [-] polling.partitioning_group_prefix = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.487 12 DEBUG cotyledon.oslo_config_glue [-] polling.pollsters_definitions_dirs = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.487 12 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_listen_addresses = ['[::]:9101'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.487 12 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_certfile = /etc/ceilometer/tls/tls.crt log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.487 12 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_enable  = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.487 12 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_keyfile = /etc/ceilometer/tls/tls.key log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.488 12 DEBUG cotyledon.oslo_config_glue [-] polling.threads_to_process_pollsters = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.488 12 DEBUG cotyledon.oslo_config_glue [-] publisher.telemetry_secret     = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.488 12 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.event_topic = event log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.488 12 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.metering_topic = metering log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.488 12 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.telemetry_driver = messagingv2 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.488 12 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.access_key = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.488 12 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.secret_key = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.489 12 DEBUG cotyledon.oslo_config_glue [-] rgw_client.implicit_tenants    = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.489 12 DEBUG cotyledon.oslo_config_glue [-] service_types.aodh             = alarming log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.489 12 DEBUG cotyledon.oslo_config_glue [-] service_types.cinder           = volumev3 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.489 12 DEBUG cotyledon.oslo_config_glue [-] service_types.glance           = image log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.489 12 DEBUG cotyledon.oslo_config_glue [-] service_types.neutron          = network log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.489 12 DEBUG cotyledon.oslo_config_glue [-] service_types.nova             = compute log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.489 12 DEBUG cotyledon.oslo_config_glue [-] service_types.radosgw          = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.490 12 DEBUG cotyledon.oslo_config_glue [-] service_types.swift            = object-store log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.490 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_section = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.490 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_type  = password log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.490 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.cafile     = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.490 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.certfile   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.490 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.collect_timing = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.490 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.insecure   = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.491 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.interface  = internalURL log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.491 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.keyfile    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.491 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.region_name = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.491 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.split_loggers = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.491 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.timeout    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.491 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_section           = service_credentials log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.491 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_type              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.492 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.cafile                 = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.492 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.certfile               = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.492 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.collect_timing         = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.492 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.insecure               = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.492 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.interface              = internal log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.492 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.keyfile                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.492 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.region_name            = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.492 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.split_loggers          = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.492 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.timeout                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.493 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_section             = service_credentials log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.493 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_type                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.493 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.cafile                   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 sshd-session[367594]: Failed password for root from 121.227.153.123 port 44104 ssh2
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.493 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.certfile                 = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.493 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.collect_timing           = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.493 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.insecure                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.493 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.interface                = internal log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.493 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.keyfile                  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.493 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.region_name              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.494 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.split_loggers            = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.494 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.timeout                  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.494 12 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.file_event_handler = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.494 12 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.file_event_handler_interval = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.494 12 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.log_dir           = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.494 12 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2828
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.494 12 DEBUG cotyledon._service [-] Run service AgentHeartBeatManager(0) [12] wait_forever /usr/lib/python3.12/site-packages/cotyledon/_service.py:263
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.498 12 DEBUG ceilometer.polling.manager [-] Started heartbeat child process. run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:519
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.501 12 DEBUG ceilometer.polling.manager [-] Started heartbeat update thread _read_queue /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:522
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.502 12 DEBUG ceilometer.polling.manager [-] Started heartbeat reporting thread _report_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:527
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.511 14 DEBUG ceilometer.compute.virt.libvirt.utils [-] Connecting to libvirt: qemu:///system new_libvirt_connection /usr/lib/python3.12/site-packages/ceilometer/compute/virt/libvirt/utils.py:96
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.574 14 INFO ceilometer.polling.manager [-] Looking for dynamic pollsters configurations at [['/etc/ceilometer/pollsters.d']].
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.575 14 INFO ceilometer.polling.manager [-] No dynamic pollsters found in folder [/etc/ceilometer/pollsters.d].
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.575 14 INFO ceilometer.polling.manager [-] No dynamic pollsters file found in dirs [['/etc/ceilometer/pollsters.d']].
Oct 11 02:07:13 compute-0 sudo[368110]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rqijkpblyazxtgiejwieyoirfifqayvy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148432.1075065-512-20544529074356/AnsiballZ_file.py'
Oct 11 02:07:13 compute-0 sudo[368110]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:13 compute-0 podman[368032]: 2025-10-11 02:07:13.696752232 +0000 UTC m=+0.110047794 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=kepler, release=1214.1726694543, com.redhat.component=ubi9-container, io.openshift.expose-services=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.buildah.version=1.29.0, maintainer=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, config_id=edpm, build-date=2024-09-18T21:23:30, managed_by=edpm_ansible, architecture=x86_64, io.k8s.display-name=Red Hat Universal Base Image 9, release-0.7.12=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., vcs-type=git, io.openshift.tags=base rhel9, summary=Provides the latest release of Red Hat Universal Base Image 9., name=ubi9, version=9.4, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543)
Oct 11 02:07:13 compute-0 podman[368031]: 2025-10-11 02:07:13.706110728 +0000 UTC m=+0.126461180 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:07:13 compute-0 podman[368029]: 2025-10-11 02:07:13.710424057 +0000 UTC m=+0.146945963 container health_status 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:07:13 compute-0 podman[368030]: 2025-10-11 02:07:13.775536228 +0000 UTC m=+0.200833076 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.799 14 DEBUG cotyledon.oslo_config_glue [-] Full set of CONF: _load_service_options /usr/lib/python3.12/site-packages/cotyledon/oslo_config_glue.py:53
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.800 14 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2804
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.800 14 DEBUG cotyledon.oslo_config_glue [-] Configuration options gathered from: log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2805
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.800 14 DEBUG cotyledon.oslo_config_glue [-] command line args: ['--polling-namespaces', 'compute', '--logfile', '/dev/stdout'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2806
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.800 14 DEBUG cotyledon.oslo_config_glue [-] config files: ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2807
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.800 14 DEBUG cotyledon.oslo_config_glue [-] ================================================================================ log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2809
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.800 14 DEBUG cotyledon.oslo_config_glue [-] batch_size                     = 50 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.800 14 DEBUG cotyledon.oslo_config_glue [-] cfg_file                       = polling.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.800 14 DEBUG cotyledon.oslo_config_glue [-] config_dir                     = ['/etc/ceilometer/ceilometer.conf.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.800 14 DEBUG cotyledon.oslo_config_glue [-] config_file                    = ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.801 14 DEBUG cotyledon.oslo_config_glue [-] config_source                  = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.801 14 DEBUG cotyledon.oslo_config_glue [-] debug                          = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.801 14 DEBUG cotyledon.oslo_config_glue [-] default_log_levels             = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'oslo_messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'requests.packages.urllib3.util.retry=WARN', 'urllib3.util.retry=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'taskflow=WARN', 'keystoneauth=WARN', 'oslo.cache=INFO', 'oslo_policy=INFO', 'dogpile.core.dogpile=INFO', 'futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.801 14 DEBUG cotyledon.oslo_config_glue [-] enable_notifications           = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.801 14 DEBUG cotyledon.oslo_config_glue [-] enable_prometheus_exporter     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.801 14 DEBUG cotyledon.oslo_config_glue [-] event_pipeline_cfg_file        = event_pipeline.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.801 14 DEBUG cotyledon.oslo_config_glue [-] graceful_shutdown_timeout      = 60 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.801 14 DEBUG cotyledon.oslo_config_glue [-] heartbeat_socket_dir           = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.802 14 DEBUG cotyledon.oslo_config_glue [-] host                           = compute-0.ctlplane.example.com log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.802 14 DEBUG cotyledon.oslo_config_glue [-] http_timeout                   = 600 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.802 14 DEBUG cotyledon.oslo_config_glue [-] hypervisor_inspector           = libvirt log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.802 14 DEBUG cotyledon.oslo_config_glue [-] identity_name_discovery        = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.802 14 DEBUG cotyledon.oslo_config_glue [-] ignore_disabled_projects       = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.802 14 DEBUG cotyledon.oslo_config_glue [-] instance_format                = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.802 14 DEBUG cotyledon.oslo_config_glue [-] instance_uuid_format           = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.802 14 DEBUG cotyledon.oslo_config_glue [-] libvirt_type                   = kvm log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.802 14 DEBUG cotyledon.oslo_config_glue [-] libvirt_uri                    =  log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.802 14 DEBUG cotyledon.oslo_config_glue [-] log_color                      = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.802 14 DEBUG cotyledon.oslo_config_glue [-] log_config_append              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.803 14 DEBUG cotyledon.oslo_config_glue [-] log_date_format                = %Y-%m-%d %H:%M:%S log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.803 14 DEBUG cotyledon.oslo_config_glue [-] log_dir                        = /var/log/ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.803 14 DEBUG cotyledon.oslo_config_glue [-] log_file                       = /dev/stdout log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.803 14 DEBUG cotyledon.oslo_config_glue [-] log_options                    = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.803 14 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval            = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.803 14 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval_type       = days log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.803 14 DEBUG cotyledon.oslo_config_glue [-] log_rotation_type              = none log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.803 14 DEBUG cotyledon.oslo_config_glue [-] logging_context_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.803 14 DEBUG cotyledon.oslo_config_glue [-] logging_debug_format_suffix    = %(funcName)s %(pathname)s:%(lineno)d log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.803 14 DEBUG cotyledon.oslo_config_glue [-] logging_default_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.803 14 DEBUG cotyledon.oslo_config_glue [-] logging_exception_prefix       = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.803 14 DEBUG cotyledon.oslo_config_glue [-] logging_user_identity_format   = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.804 14 DEBUG cotyledon.oslo_config_glue [-] max_logfile_count              = 30 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.804 14 DEBUG cotyledon.oslo_config_glue [-] max_logfile_size_mb            = 200 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.804 14 DEBUG cotyledon.oslo_config_glue [-] max_parallel_requests          = 64 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.804 14 DEBUG cotyledon.oslo_config_glue [-] partitioning_group_prefix      = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.804 14 DEBUG cotyledon.oslo_config_glue [-] pipeline_cfg_file              = pipeline.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.804 14 DEBUG cotyledon.oslo_config_glue [-] polling_namespaces             = ['compute'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.804 14 DEBUG cotyledon.oslo_config_glue [-] pollsters_definitions_dirs     = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.804 14 DEBUG cotyledon.oslo_config_glue [-] prometheus_listen_addresses    = ['127.0.0.1:9101'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.804 14 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_certfile        = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.804 14 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_enable          = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.804 14 DEBUG cotyledon.oslo_config_glue [-] prometheus_tls_keyfile         = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.805 14 DEBUG cotyledon.oslo_config_glue [-] publish_errors                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.805 14 DEBUG cotyledon.oslo_config_glue [-] rate_limit_burst               = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.805 14 DEBUG cotyledon.oslo_config_glue [-] rate_limit_except_level        = CRITICAL log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.805 14 DEBUG cotyledon.oslo_config_glue [-] rate_limit_interval            = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.805 14 DEBUG cotyledon.oslo_config_glue [-] reseller_prefix                = AUTH_ log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.805 14 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_keys         = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.805 14 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_length       = 256 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.805 14 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_namespace    = ['metering.'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.805 14 DEBUG cotyledon.oslo_config_glue [-] rootwrap_config                = /etc/ceilometer/rootwrap.conf log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.805 14 DEBUG cotyledon.oslo_config_glue [-] sample_source                  = openstack log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.805 14 DEBUG cotyledon.oslo_config_glue [-] shell_completion               = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.805 14 DEBUG cotyledon.oslo_config_glue [-] syslog_log_facility            = LOG_USER log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.806 14 DEBUG cotyledon.oslo_config_glue [-] threads_to_process_pollsters   = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.806 14 DEBUG cotyledon.oslo_config_glue [-] use_journal                    = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.806 14 DEBUG cotyledon.oslo_config_glue [-] use_json                       = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.806 14 DEBUG cotyledon.oslo_config_glue [-] use_stderr                     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.806 14 DEBUG cotyledon.oslo_config_glue [-] use_syslog                     = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.806 14 DEBUG cotyledon.oslo_config_glue [-] watch_log_file                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2817
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.806 14 DEBUG cotyledon.oslo_config_glue [-] compute.fetch_extra_metadata   = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.806 14 DEBUG cotyledon.oslo_config_glue [-] compute.instance_discovery_method = libvirt_metadata log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.806 14 DEBUG cotyledon.oslo_config_glue [-] compute.resource_cache_expiry  = 3600 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.806 14 DEBUG cotyledon.oslo_config_glue [-] compute.resource_update_interval = 0 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.806 14 DEBUG cotyledon.oslo_config_glue [-] coordination.backend_url       = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.806 14 DEBUG cotyledon.oslo_config_glue [-] event.definitions_cfg_file     = event_definitions.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.806 14 DEBUG cotyledon.oslo_config_glue [-] event.drop_unmatched_notifications = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.807 14 DEBUG cotyledon.oslo_config_glue [-] event.store_raw                = [] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.807 14 DEBUG cotyledon.oslo_config_glue [-] ipmi.polling_retry             = 3 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.807 14 DEBUG cotyledon.oslo_config_glue [-] meter.meter_definitions_dirs   = ['/etc/ceilometer/meters.d', '/usr/lib/python3.12/site-packages/ceilometer/data/meters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.807 14 DEBUG cotyledon.oslo_config_glue [-] notification.ack_on_event_error = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.807 14 DEBUG cotyledon.oslo_config_glue [-] notification.batch_size        = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.807 14 DEBUG cotyledon.oslo_config_glue [-] notification.batch_timeout     = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.807 14 DEBUG cotyledon.oslo_config_glue [-] notification.messaging_urls    = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.807 14 DEBUG cotyledon.oslo_config_glue [-] notification.notification_control_exchanges = ['nova', 'glance', 'neutron', 'cinder', 'heat', 'keystone', 'trove', 'zaqar', 'swift', 'ceilometer', 'magnum', 'dns', 'ironic', 'aodh'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.807 14 DEBUG cotyledon.oslo_config_glue [-] notification.pipelines         = ['meter', 'event'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.808 14 DEBUG cotyledon.oslo_config_glue [-] notification.workers           = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.808 14 DEBUG cotyledon.oslo_config_glue [-] polling.batch_size             = 50 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.808 14 DEBUG cotyledon.oslo_config_glue [-] polling.cfg_file               = polling.yaml log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.808 14 DEBUG cotyledon.oslo_config_glue [-] polling.enable_notifications   = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.808 14 DEBUG cotyledon.oslo_config_glue [-] polling.enable_prometheus_exporter = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.808 14 DEBUG cotyledon.oslo_config_glue [-] polling.heartbeat_socket_dir   = /var/lib/ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.808 14 DEBUG cotyledon.oslo_config_glue [-] polling.identity_name_discovery = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.808 14 DEBUG cotyledon.oslo_config_glue [-] polling.ignore_disabled_projects = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.808 14 DEBUG cotyledon.oslo_config_glue [-] polling.partitioning_group_prefix = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.808 14 DEBUG cotyledon.oslo_config_glue [-] polling.pollsters_definitions_dirs = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.808 14 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_listen_addresses = ['[::]:9101'] log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.809 14 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_certfile = /etc/ceilometer/tls/tls.crt log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.809 14 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_enable  = True log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.809 14 DEBUG cotyledon.oslo_config_glue [-] polling.prometheus_tls_keyfile = /etc/ceilometer/tls/tls.key log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.809 14 DEBUG cotyledon.oslo_config_glue [-] polling.threads_to_process_pollsters = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.809 14 DEBUG cotyledon.oslo_config_glue [-] publisher.telemetry_secret     = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.809 14 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.event_topic = event log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.809 14 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.metering_topic = metering log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.809 14 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.telemetry_driver = messagingv2 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.809 14 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.access_key = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.809 14 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.secret_key = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.809 14 DEBUG cotyledon.oslo_config_glue [-] rgw_client.implicit_tenants    = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.809 14 DEBUG cotyledon.oslo_config_glue [-] service_types.aodh             = alarming log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.810 14 DEBUG cotyledon.oslo_config_glue [-] service_types.cinder           = volumev3 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.810 14 DEBUG cotyledon.oslo_config_glue [-] service_types.glance           = image log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.810 14 DEBUG cotyledon.oslo_config_glue [-] service_types.neutron          = network log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.810 14 DEBUG cotyledon.oslo_config_glue [-] service_types.nova             = compute log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.810 14 DEBUG cotyledon.oslo_config_glue [-] service_types.radosgw          = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.810 14 DEBUG cotyledon.oslo_config_glue [-] service_types.swift            = object-store log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.810 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_section = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.810 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_type  = password log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.810 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_url   = https://keystone-internal.openstack.svc:5000 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.810 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.cafile     = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.810 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.certfile   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.810 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.collect_timing = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.810 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.default_domain_id = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.810 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.default_domain_name = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.810 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.domain_id  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.811 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.domain_name = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.811 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.insecure   = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.811 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.interface  = internalURL log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.811 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.keyfile    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.811 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.password   = **** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.811 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.project_domain_id = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.811 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.project_domain_name = Default log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.811 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.project_id = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.811 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.project_name = service log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.811 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.region_name = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.811 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.split_loggers = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.811 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.system_scope = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.811 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.timeout    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.811 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.trust_id   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.812 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.user_domain_id = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.812 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.user_domain_name = Default log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.812 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.user_id    = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.812 14 DEBUG cotyledon.oslo_config_glue [-] service_credentials.username   = ceilometer log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.812 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_section           = service_credentials log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.812 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_type              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.812 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.cafile                 = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.812 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.certfile               = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.812 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.collect_timing         = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.812 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.insecure               = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.812 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.interface              = internal log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.812 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.keyfile                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.812 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.region_name            = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.813 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.split_loggers          = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.813 14 DEBUG cotyledon.oslo_config_glue [-] gnocchi.timeout                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.813 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_section             = service_credentials log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.813 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_type                = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.813 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.cafile                   = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.813 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.certfile                 = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.813 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.collect_timing           = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.813 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.insecure                 = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.813 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.interface                = internal log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.813 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.keyfile                  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.813 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.region_name              = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.813 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.split_loggers            = False log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.814 14 DEBUG cotyledon.oslo_config_glue [-] zaqar.timeout                  = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.814 14 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.file_event_handler = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.814 14 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.file_event_handler_interval = 1 log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.814 14 DEBUG cotyledon.oslo_config_glue [-] oslo_reports.log_dir           = None log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2824
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.814 14 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.12/site-packages/oslo_config/cfg.py:2828
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.814 14 DEBUG cotyledon._service [-] Run service AgentManager(0) [14] wait_forever /usr/lib/python3.12/site-packages/cotyledon/_service.py:263
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.818 14 DEBUG ceilometer.agent [-] Config file: {'sources': [{'name': 'pollsters', 'interval': 120, 'meters': ['power.state', 'cpu', 'memory.usage', 'disk.*', 'network.*']}]} load_config /usr/lib/python3.12/site-packages/ceilometer/agent.py:64
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.856 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.857 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.858 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.859 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.861 14 DEBUG ceilometer.compute.virt.libvirt.utils [-] Connecting to libvirt: qemu:///system new_libvirt_connection /usr/lib/python3.12/site-packages/ceilometer/compute/virt/libvirt/utils.py:96
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69a30>] with cache [{}], pollster history [{'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.872 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.873 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.874 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.874 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.874 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.874 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.875 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.875 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.875 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.876 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.876 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.876 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.876 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.877 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.877 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.877 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.877 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.877 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.878 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.878 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.878 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.878 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.878 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.879 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.879 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.879 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.879 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.879 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.880 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.880 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.880 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.880 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.881 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.881 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.881 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.881 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.882 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.882 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.882 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.882 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.882 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.883 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.883 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.883 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.884 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.884 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.884 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.885 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.885 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.885 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.885 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.886 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.886 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.886 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.887 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.887 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.887 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.887 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.887 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.887 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.888 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.888 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.888 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.888 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.888 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.888 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.889 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.889 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.889 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.889 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.889 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.889 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.890 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.890 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.890 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.890 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:07:13.890 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:07:13 compute-0 python3.9[368133]: ansible-ansible.legacy.file Invoked with group=zuul mode=0700 owner=zuul setype=container_file_t dest=/var/lib/openstack/healthchecks/node_exporter/ _original_basename=healthcheck recurse=False state=file path=/var/lib/openstack/healthchecks/node_exporter/ force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:07:13 compute-0 sudo[368110]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:13 compute-0 sshd-session[367594]: Connection closed by authenticating user root 121.227.153.123 port 44104 [preauth]
Oct 11 02:07:14 compute-0 ceph-mon[191930]: pgmap v871: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:14 compute-0 sudo[368301]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iynuaejugrnilsqamppknerfkvgkxsmj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148434.4179907-526-22748797074977/AnsiballZ_container_config_data.py'
Oct 11 02:07:14 compute-0 sudo[368301]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:07:15 compute-0 python3.9[368303]: ansible-container_config_data Invoked with config_overrides={} config_path=/var/lib/openstack/config/telemetry config_pattern=node_exporter.json debug=False
Oct 11 02:07:15 compute-0 sudo[368301]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v872: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:15 compute-0 unix_chkpwd[368341]: password check failed for user (root)
Oct 11 02:07:15 compute-0 sshd-session[368174]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:07:16 compute-0 sudo[368484]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zcmfegmbguireiwuztmmkpeoakbkgjaq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148435.6059704-535-211389775045787/AnsiballZ_container_config_hash.py'
Oct 11 02:07:16 compute-0 sudo[368484]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:16 compute-0 podman[368429]: 2025-10-11 02:07:16.249674064 +0000 UTC m=+0.135578234 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, container_name=iscsid, config_id=iscsid, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:07:16 compute-0 podman[368428]: 2025-10-11 02:07:16.249288499 +0000 UTC m=+0.142709174 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, container_name=multipathd, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=multipathd, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:07:16 compute-0 python3.9[368492]: ansible-container_config_hash Invoked with check_mode=False config_vol_prefix=/var/lib/config-data
Oct 11 02:07:16 compute-0 sudo[368484]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:16 compute-0 ceph-mon[191930]: pgmap v872: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v873: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:17 compute-0 sudo[368644]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eoqzwkzdilqqtpfyqubuikundfkajfzl ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760148436.9722433-545-79218353918490/AnsiballZ_edpm_container_manage.py'
Oct 11 02:07:17 compute-0 sudo[368644]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:17 compute-0 python3[368646]: ansible-edpm_container_manage Invoked with concurrency=1 config_dir=/var/lib/openstack/config/telemetry config_id=edpm config_overrides={} config_patterns=node_exporter.json log_base_path=/var/log/containers/stdouts debug=False
Oct 11 02:07:18 compute-0 sshd-session[368174]: Failed password for root from 121.227.153.123 port 44108 ssh2
Oct 11 02:07:18 compute-0 python3[368646]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: [
                                                {
                                                     "Id": "0da6a335fe1356545476b749c68f022c897de3a2139e8f0054f6937349ee2b83",
                                                     "Digest": "sha256:fa8e5700b7762fffe0674e944762f44bb787a7e44d97569fe55348260453bf80",
                                                     "RepoTags": [
                                                          "quay.io/prometheus/node-exporter:v1.5.0"
                                                     ],
                                                     "RepoDigests": [
                                                          "quay.io/prometheus/node-exporter@sha256:39c642b2b337e38c18e80266fb14383754178202f40103646337722a594d984c",
                                                          "quay.io/prometheus/node-exporter@sha256:fa8e5700b7762fffe0674e944762f44bb787a7e44d97569fe55348260453bf80"
                                                     ],
                                                     "Parent": "",
                                                     "Comment": "",
                                                     "Created": "2022-11-29T19:06:14.987394068Z",
                                                     "Config": {
                                                          "User": "nobody",
                                                          "ExposedPorts": {
                                                               "9100/tcp": {}
                                                          },
                                                          "Env": [
                                                               "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
                                                          ],
                                                          "Entrypoint": [
                                                               "/bin/node_exporter"
                                                          ],
                                                          "Labels": {
                                                               "maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>"
                                                          }
                                                     },
                                                     "Version": "19.03.8",
                                                     "Author": "",
                                                     "Architecture": "amd64",
                                                     "Os": "linux",
                                                     "Size": 23851788,
                                                     "VirtualSize": 23851788,
                                                     "GraphDriver": {
                                                          "Name": "overlay",
                                                          "Data": {
                                                               "LowerDir": "/var/lib/containers/storage/overlay/a1185e7325783fe8cba63270bc6e59299386d7c73e4bc34c560a1fbc9e6d7e2c/diff:/var/lib/containers/storage/overlay/0438ade5aeea533b00cd75095bec75fbc2b307bace4c89bb39b75d428637bcd8/diff",
                                                               "UpperDir": "/var/lib/containers/storage/overlay/2cd9444c84550fbd551e3826a8110fcc009757858b99e84f1119041f2325189b/diff",
                                                               "WorkDir": "/var/lib/containers/storage/overlay/2cd9444c84550fbd551e3826a8110fcc009757858b99e84f1119041f2325189b/work"
                                                          }
                                                     },
                                                     "RootFS": {
                                                          "Type": "layers",
                                                          "Layers": [
                                                               "sha256:0438ade5aeea533b00cd75095bec75fbc2b307bace4c89bb39b75d428637bcd8",
                                                               "sha256:9f2d25037e3e722ca7f4ca9c7a885f19a2ce11140592ee0acb323dec3b26640d",
                                                               "sha256:76857a93cd03e12817c36c667cc3263d58886232cad116327e55d79036e5977d"
                                                          ]
                                                     },
                                                     "Labels": {
                                                          "maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>"
                                                     },
                                                     "Annotations": {},
                                                     "ManifestType": "application/vnd.docker.distribution.manifest.v2+json",
                                                     "User": "nobody",
                                                     "History": [
                                                          {
                                                               "created": "2022-10-26T06:30:33.700079457Z",
                                                               "created_by": "/bin/sh -c #(nop) ADD file:5e991de3200129dc05c3130f7a64bebb5704486b4f773bfcaa6b13165d6c2416 in / "
                                                          },
                                                          {
                                                               "created": "2022-10-26T06:30:33.794221299Z",
                                                               "created_by": "/bin/sh -c #(nop)  CMD [\"sh\"]",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2022-11-15T10:54:54.845364304Z",
                                                               "created_by": "/bin/sh -c #(nop)  MAINTAINER The Prometheus Authors <prometheus-developers@googlegroups.com>",
                                                               "author": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2022-11-15T10:54:55.54866664Z",
                                                               "created_by": "/bin/sh -c #(nop) COPY dir:02c961e21531be78a67ed9bad67d03391cfedcead8b0a35cfb9171346636f11a in / ",
                                                               "author": "The Prometheus Authors <prometheus-developers@googlegroups.com>"
                                                          },
                                                          {
                                                               "created": "2022-11-29T19:06:13.622645057Z",
                                                               "created_by": "/bin/sh -c #(nop)  LABEL maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2022-11-29T19:06:13.810765105Z",
                                                               "created_by": "/bin/sh -c #(nop)  ARG ARCH=amd64",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2022-11-29T19:06:13.990897895Z",
                                                               "created_by": "/bin/sh -c #(nop)  ARG OS=linux",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2022-11-29T19:06:14.358293759Z",
                                                               "created_by": "/bin/sh -c #(nop) COPY file:3ef20dd145817033186947b860c3b6f7bb06d4c435257258c0e5df15f6e51eb7 in /bin/node_exporter "
                                                          },
                                                          {
                                                               "created": "2022-11-29T19:06:14.630644274Z",
                                                               "created_by": "/bin/sh -c #(nop)  EXPOSE 9100",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2022-11-29T19:06:14.79596292Z",
                                                               "created_by": "/bin/sh -c #(nop)  USER nobody",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2022-11-29T19:06:14.987394068Z",
                                                               "created_by": "/bin/sh -c #(nop)  ENTRYPOINT [\"/bin/node_exporter\"]",
                                                               "empty_layer": true
                                                          }
                                                     ],
                                                     "NamesHistory": [
                                                          "quay.io/prometheus/node-exporter:v1.5.0"
                                                     ]
                                                }
                                           ]
                                           : quay.io/prometheus/node-exporter:v1.5.0
Oct 11 02:07:18 compute-0 systemd[1]: libpod-adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae.scope: Deactivated successfully.
Oct 11 02:07:18 compute-0 systemd[1]: libpod-adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae.scope: Consumed 7.652s CPU time.
Oct 11 02:07:18 compute-0 podman[368693]: 2025-10-11 02:07:18.348947834 +0000 UTC m=+0.090750865 container died adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:07:18 compute-0 systemd[1]: adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae-288d69b1c3a3be9c.timer: Deactivated successfully.
Oct 11 02:07:18 compute-0 systemd[1]: Stopped /usr/bin/podman healthcheck run adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae.
Oct 11 02:07:18 compute-0 systemd[1]: adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae-288d69b1c3a3be9c.service: Failed to open /run/systemd/transient/adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae-288d69b1c3a3be9c.service: No such file or directory
Oct 11 02:07:18 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae-userdata-shm.mount: Deactivated successfully.
Oct 11 02:07:18 compute-0 systemd[1]: var-lib-containers-storage-overlay-231aaa5d674eb54e8581f7727e06857007d94f590816ef0295f4b42fe3deb791-merged.mount: Deactivated successfully.
Oct 11 02:07:18 compute-0 podman[368693]: 2025-10-11 02:07:18.433350634 +0000 UTC m=+0.175153605 container cleanup adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:07:18 compute-0 python3[368646]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman stop node_exporter
Oct 11 02:07:18 compute-0 systemd[1]: edpm_node_exporter.service: Main process exited, code=exited, status=2/INVALIDARGUMENT
Oct 11 02:07:18 compute-0 systemd[1]: adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae-288d69b1c3a3be9c.timer: Failed to open /run/systemd/transient/adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae-288d69b1c3a3be9c.timer: No such file or directory
Oct 11 02:07:18 compute-0 systemd[1]: adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae-288d69b1c3a3be9c.service: Failed to open /run/systemd/transient/adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae-288d69b1c3a3be9c.service: No such file or directory
Oct 11 02:07:18 compute-0 ceph-mon[191930]: pgmap v873: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:18 compute-0 podman[368720]: 2025-10-11 02:07:18.595195756 +0000 UTC m=+0.113914917 container remove adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:07:18 compute-0 podman[368721]: Error: no container with ID adb1e0c21befe8faa54dce0c470ee88cc55bb9eea76da8f5c7ade62c423a2aae found in database: no such container
Oct 11 02:07:18 compute-0 python3[368646]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman rm --force node_exporter
Oct 11 02:07:18 compute-0 systemd[1]: edpm_node_exporter.service: Control process exited, code=exited, status=125/n/a
Oct 11 02:07:18 compute-0 systemd[1]: edpm_node_exporter.service: Failed with result 'exit-code'.
Oct 11 02:07:18 compute-0 podman[368745]: 2025-10-11 02:07:18.749089438 +0000 UTC m=+0.099402343 container create 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, config_id=edpm, container_name=node_exporter)
Oct 11 02:07:18 compute-0 podman[368745]: 2025-10-11 02:07:18.698725305 +0000 UTC m=+0.049038260 image pull 0da6a335fe1356545476b749c68f022c897de3a2139e8f0054f6937349ee2b83 quay.io/prometheus/node-exporter:v1.5.0
Oct 11 02:07:18 compute-0 python3[368646]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman create --name node_exporter --conmon-pidfile /run/node_exporter.pid --env OS_ENDPOINT_TYPE=internal --healthcheck-command /openstack/healthcheck node_exporter --label config_id=edpm --label container_name=node_exporter --label managed_by=edpm_ansible --label config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']} --log-driver journald --log-level info --network host --privileged=True --publish 9100:9100 --user root --volume /var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z --volume /var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z --volume /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw --volume /var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z quay.io/prometheus/node-exporter:v1.5.0 --web.config.file=/etc/node_exporter/node_exporter.yaml --web.disable-exporter-metrics --collector.systemd --collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\.service --no-collector.dmi --no-collector.entropy --no-collector.thermal_zone --no-collector.time --no-collector.timex --no-collector.uname --no-collector.stat --no-collector.hwmon --no-collector.os --no-collector.selinux --no-collector.textfile --no-collector.powersupplyclass --no-collector.pressure --no-collector.rapl
Oct 11 02:07:18 compute-0 systemd[1]: edpm_node_exporter.service: Scheduled restart job, restart counter is at 1.
Oct 11 02:07:18 compute-0 systemd[1]: Stopped node_exporter container.
Oct 11 02:07:18 compute-0 systemd[1]: Starting node_exporter container...
Oct 11 02:07:18 compute-0 sshd-session[368174]: Connection closed by authenticating user root 121.227.153.123 port 44108 [preauth]
Oct 11 02:07:18 compute-0 systemd[1]: Started libpod-conmon-7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce.scope.
Oct 11 02:07:18 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:07:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/273d67693d8ca8efc880c3fb4a42f063e5e5cd0c32ddf4dae2a4d4b38efa1053/merged/etc/node_exporter/node_exporter.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/273d67693d8ca8efc880c3fb4a42f063e5e5cd0c32ddf4dae2a4d4b38efa1053/merged/etc/node_exporter/tls supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:19 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce.
Oct 11 02:07:19 compute-0 podman[368756]: 2025-10-11 02:07:19.08138287 +0000 UTC m=+0.287663395 container init 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.118Z caller=node_exporter.go:180 level=info msg="Starting node_exporter" version="(version=1.5.0, branch=HEAD, revision=1b48970ffcf5630534fb00bb0687d73c66d1c959)"
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.118Z caller=node_exporter.go:181 level=info msg="Build context" build_context="(go=go1.19.3, user=root@6e7732a7b81b, date=20221129-18:59:09)"
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.118Z caller=node_exporter.go:183 level=warn msg="Node Exporter is running as root user. This exporter is designed to run as unprivileged user, root is not required."
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.119Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/)
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.119Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=systemd_linux.go:152 level=info collector=systemd msg="Parsed flag --collector.systemd.unit-include" flag=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\.service
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=systemd_linux.go:154 level=info collector=systemd msg="Parsed flag --collector.systemd.unit-exclude" flag=.+\.(automount|device|mount|scope|slice)
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=diskstats_common.go:111 level=info collector=diskstats msg="Parsed flag --collector.diskstats.device-exclude" flag=^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\d+n\d+p)\d+$
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=diskstats_linux.go:264 level=error collector=diskstats msg="Failed to open directory, disabling udev device properties" path=/run/udev/data
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:110 level=info msg="Enabled collectors"
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=arp
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=bcache
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=bonding
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=btrfs
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=conntrack
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=cpu
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=cpufreq
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=diskstats
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=edac
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=fibrechannel
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=filefd
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=filesystem
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=infiniband
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=ipvs
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=loadavg
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=mdadm
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=meminfo
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=netclass
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=netdev
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.120Z caller=node_exporter.go:117 level=info collector=netstat
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.121Z caller=node_exporter.go:117 level=info collector=nfs
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.121Z caller=node_exporter.go:117 level=info collector=nfsd
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.121Z caller=node_exporter.go:117 level=info collector=nvme
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.121Z caller=node_exporter.go:117 level=info collector=schedstat
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.121Z caller=node_exporter.go:117 level=info collector=sockstat
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.121Z caller=node_exporter.go:117 level=info collector=softnet
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.121Z caller=node_exporter.go:117 level=info collector=systemd
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.121Z caller=node_exporter.go:117 level=info collector=tapestats
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.121Z caller=node_exporter.go:117 level=info collector=udp_queues
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.121Z caller=node_exporter.go:117 level=info collector=vmstat
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.121Z caller=node_exporter.go:117 level=info collector=xfs
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.121Z caller=node_exporter.go:117 level=info collector=zfs
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.122Z caller=tls_config.go:232 level=info msg="Listening on" address=[::]:9100
Oct 11 02:07:19 compute-0 node_exporter[368783]: ts=2025-10-11T02:07:19.123Z caller=tls_config.go:268 level=info msg="TLS is enabled." http2=true address=[::]:9100
Oct 11 02:07:19 compute-0 podman[368756]: 2025-10-11 02:07:19.136988383 +0000 UTC m=+0.343268908 container start 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:07:19 compute-0 podman[368764]: node_exporter
Oct 11 02:07:19 compute-0 python3[368646]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman start node_exporter
Oct 11 02:07:19 compute-0 systemd[1]: Started node_exporter container.
Oct 11 02:07:19 compute-0 podman[368794]: 2025-10-11 02:07:19.279516295 +0000 UTC m=+0.115565177 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:07:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v874: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:19 compute-0 sudo[368644]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:07:20 compute-0 unix_chkpwd[368990]: password check failed for user (root)
Oct 11 02:07:20 compute-0 sshd-session[368793]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:07:20 compute-0 sudo[368993]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xibrwobzqpnyujzsutgzowgwhjiiuvxp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148439.7691672-553-147562154154369/AnsiballZ_stat.py'
Oct 11 02:07:20 compute-0 sudo[368993]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:20 compute-0 python3.9[368995]: ansible-ansible.builtin.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:07:20 compute-0 ceph-mon[191930]: pgmap v874: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:20 compute-0 sudo[368993]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v875: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:21 compute-0 sudo[369147]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-msfljnciwwykpchymrucvqxflsuafcen ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148441.010748-562-96565654716037/AnsiballZ_file.py'
Oct 11 02:07:21 compute-0 sudo[369147]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:21 compute-0 sshd-session[368793]: Failed password for root from 121.227.153.123 port 44110 ssh2
Oct 11 02:07:21 compute-0 python3.9[369149]: ansible-file Invoked with path=/etc/systemd/system/edpm_node_exporter.requires state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:07:21 compute-0 sudo[369147]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:22 compute-0 ceph-mon[191930]: pgmap v875: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:22 compute-0 sudo[369298]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dnzngffpdidmfaqhafgkoawcqvzuxzez ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148442.0019379-562-248667635016677/AnsiballZ_copy.py'
Oct 11 02:07:22 compute-0 sudo[369298]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:23 compute-0 python3.9[369300]: ansible-copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760148442.0019379-562-248667635016677/source dest=/etc/systemd/system/edpm_node_exporter.service mode=0644 owner=root group=root backup=False force=True remote_src=False follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:07:23 compute-0 sudo[369298]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v876: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:23 compute-0 sshd-session[368793]: Connection closed by authenticating user root 121.227.153.123 port 44110 [preauth]
Oct 11 02:07:23 compute-0 sudo[369374]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wlgqodrfpqukqomhhonueeibowmtjklk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148442.0019379-562-248667635016677/AnsiballZ_systemd.py'
Oct 11 02:07:23 compute-0 sudo[369374]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:24 compute-0 python3.9[369376]: ansible-systemd Invoked with state=started name=edpm_node_exporter.service enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:07:24 compute-0 sudo[369374]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:24 compute-0 ceph-mon[191930]: pgmap v876: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:07:25 compute-0 unix_chkpwd[369482]: password check failed for user (root)
Oct 11 02:07:25 compute-0 sshd-session[369377]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:07:25 compute-0 podman[369480]: 2025-10-11 02:07:25.267727124 +0000 UTC m=+0.145831264 container health_status ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.buildah.version=1.33.7, com.redhat.component=ubi9-minimal-container, container_name=openstack_network_exporter, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, version=9.6, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vcs-type=git, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, architecture=x86_64, managed_by=edpm_ansible, release=1755695350, name=ubi9-minimal, vendor=Red Hat, Inc., io.openshift.tags=minimal rhel9, distribution-scope=public, io.openshift.expose-services=, maintainer=Red Hat, Inc.)
Oct 11 02:07:25 compute-0 sudo[369552]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ukqsqvzkxsxldtkrebsbkipvjywxymbt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148444.750544-582-160628772088808/AnsiballZ_systemd.py'
Oct 11 02:07:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v877: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:25 compute-0 sudo[369552]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #39. Immutable memtables: 0.
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:25.649793) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 17] Flushing memtable with next log file: 39
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148445649853, "job": 17, "event": "flush_started", "num_memtables": 1, "num_entries": 2037, "num_deletes": 251, "total_data_size": 3472106, "memory_usage": 3541648, "flush_reason": "Manual Compaction"}
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 17] Level-0 flush table #40: started
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148445674576, "cf_name": "default", "job": 17, "event": "table_file_creation", "file_number": 40, "file_size": 3407183, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 16323, "largest_seqno": 18359, "table_properties": {"data_size": 3397918, "index_size": 5887, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 2309, "raw_key_size": 18055, "raw_average_key_size": 19, "raw_value_size": 3379559, "raw_average_value_size": 3693, "num_data_blocks": 267, "num_entries": 915, "num_filter_entries": 915, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760148212, "oldest_key_time": 1760148212, "file_creation_time": 1760148445, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 40, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 17] Flush lasted 24825 microseconds, and 14696 cpu microseconds.
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:25.674627) [db/flush_job.cc:967] [default] [JOB 17] Level-0 flush table #40: 3407183 bytes OK
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:25.674652) [db/memtable_list.cc:519] [default] Level-0 commit table #40 started
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:25.677592) [db/memtable_list.cc:722] [default] Level-0 commit table #40: memtable #1 done
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:25.677613) EVENT_LOG_v1 {"time_micros": 1760148445677606, "job": 17, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:25.677639) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 17] Try to delete WAL files size 3463620, prev total WAL file size 3463620, number of live WAL files 2.
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000036.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:25.679777) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F730031323535' seq:72057594037927935, type:22 .. '7061786F730031353037' seq:0, type:0; will stop at (end)
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 18] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 17 Base level 0, inputs: [40(3327KB)], [38(7543KB)]
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148445679820, "job": 18, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [40], "files_L6": [38], "score": -1, "input_data_size": 11132212, "oldest_snapshot_seqno": -1}
Oct 11 02:07:25 compute-0 python3.9[369554]: ansible-ansible.builtin.systemd Invoked with name=edpm_node_exporter.service state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 18] Generated table #41: 4383 keys, 9358956 bytes, temperature: kUnknown
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148445741954, "cf_name": "default", "job": 18, "event": "table_file_creation", "file_number": 41, "file_size": 9358956, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 9325851, "index_size": 21027, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 11013, "raw_key_size": 105884, "raw_average_key_size": 24, "raw_value_size": 9242832, "raw_average_value_size": 2108, "num_data_blocks": 895, "num_entries": 4383, "num_filter_entries": 4383, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760148445, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 41, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:25.742448) [db/compaction/compaction_job.cc:1663] [default] [JOB 18] Compacted 1@0 + 1@6 files to L6 => 9358956 bytes
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:25.744993) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 178.8 rd, 150.3 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(3.2, 7.4 +0.0 blob) out(8.9 +0.0 blob), read-write-amplify(6.0) write-amplify(2.7) OK, records in: 4897, records dropped: 514 output_compression: NoCompression
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:25.745024) EVENT_LOG_v1 {"time_micros": 1760148445745008, "job": 18, "event": "compaction_finished", "compaction_time_micros": 62262, "compaction_time_cpu_micros": 44292, "output_level": 6, "num_output_files": 1, "total_output_size": 9358956, "num_input_records": 4897, "num_output_records": 4383, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000040.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148445746657, "job": 18, "event": "table_file_deletion", "file_number": 40}
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000038.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148445750030, "job": 18, "event": "table_file_deletion", "file_number": 38}
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:25.679506) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:25.750286) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:25.750292) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:25.750296) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:25.750299) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:07:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:25.750301) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:07:25 compute-0 systemd[1]: Stopping node_exporter container...
Oct 11 02:07:25 compute-0 systemd[1]: libpod-7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce.scope: Deactivated successfully.
Oct 11 02:07:25 compute-0 podman[369558]: 2025-10-11 02:07:25.907909645 +0000 UTC m=+0.120765520 container died 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:07:25 compute-0 systemd[1]: 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce-3f756d249b752f81.timer: Deactivated successfully.
Oct 11 02:07:25 compute-0 systemd[1]: Stopped /usr/bin/podman healthcheck run 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce.
Oct 11 02:07:25 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce-userdata-shm.mount: Deactivated successfully.
Oct 11 02:07:25 compute-0 systemd[1]: var-lib-containers-storage-overlay-273d67693d8ca8efc880c3fb4a42f063e5e5cd0c32ddf4dae2a4d4b38efa1053-merged.mount: Deactivated successfully.
Oct 11 02:07:26 compute-0 podman[369558]: 2025-10-11 02:07:26.006432822 +0000 UTC m=+0.219288697 container cleanup 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:07:26 compute-0 podman[369558]: node_exporter
Oct 11 02:07:26 compute-0 systemd[1]: edpm_node_exporter.service: Main process exited, code=exited, status=2/INVALIDARGUMENT
Oct 11 02:07:26 compute-0 systemd[1]: libpod-conmon-7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce.scope: Deactivated successfully.
Oct 11 02:07:26 compute-0 podman[369586]: node_exporter
Oct 11 02:07:26 compute-0 systemd[1]: edpm_node_exporter.service: Failed with result 'exit-code'.
Oct 11 02:07:26 compute-0 systemd[1]: Stopped node_exporter container.
Oct 11 02:07:26 compute-0 systemd[1]: Starting node_exporter container...
Oct 11 02:07:26 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:07:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/273d67693d8ca8efc880c3fb4a42f063e5e5cd0c32ddf4dae2a4d4b38efa1053/merged/etc/node_exporter/node_exporter.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/273d67693d8ca8efc880c3fb4a42f063e5e5cd0c32ddf4dae2a4d4b38efa1053/merged/etc/node_exporter/tls supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:26 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce.
Oct 11 02:07:26 compute-0 podman[369599]: 2025-10-11 02:07:26.401406569 +0000 UTC m=+0.235429908 container init 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.432Z caller=node_exporter.go:180 level=info msg="Starting node_exporter" version="(version=1.5.0, branch=HEAD, revision=1b48970ffcf5630534fb00bb0687d73c66d1c959)"
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.433Z caller=node_exporter.go:181 level=info msg="Build context" build_context="(go=go1.19.3, user=root@6e7732a7b81b, date=20221129-18:59:09)"
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.433Z caller=node_exporter.go:183 level=warn msg="Node Exporter is running as root user. This exporter is designed to run as unprivileged user, root is not required."
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.434Z caller=systemd_linux.go:152 level=info collector=systemd msg="Parsed flag --collector.systemd.unit-include" flag=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\.service
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.434Z caller=systemd_linux.go:154 level=info collector=systemd msg="Parsed flag --collector.systemd.unit-exclude" flag=.+\.(automount|device|mount|scope|slice)
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.435Z caller=diskstats_common.go:111 level=info collector=diskstats msg="Parsed flag --collector.diskstats.device-exclude" flag=^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\d+n\d+p)\d+$
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.435Z caller=diskstats_linux.go:264 level=error collector=diskstats msg="Failed to open directory, disabling udev device properties" path=/run/udev/data
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.435Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/)
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.435Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.435Z caller=node_exporter.go:110 level=info msg="Enabled collectors"
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.435Z caller=node_exporter.go:117 level=info collector=arp
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.435Z caller=node_exporter.go:117 level=info collector=bcache
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.435Z caller=node_exporter.go:117 level=info collector=bonding
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.435Z caller=node_exporter.go:117 level=info collector=btrfs
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.435Z caller=node_exporter.go:117 level=info collector=conntrack
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.435Z caller=node_exporter.go:117 level=info collector=cpu
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.435Z caller=node_exporter.go:117 level=info collector=cpufreq
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.435Z caller=node_exporter.go:117 level=info collector=diskstats
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.435Z caller=node_exporter.go:117 level=info collector=edac
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.435Z caller=node_exporter.go:117 level=info collector=fibrechannel
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.435Z caller=node_exporter.go:117 level=info collector=filefd
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.435Z caller=node_exporter.go:117 level=info collector=filesystem
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.435Z caller=node_exporter.go:117 level=info collector=infiniband
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.436Z caller=node_exporter.go:117 level=info collector=ipvs
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.436Z caller=node_exporter.go:117 level=info collector=loadavg
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.436Z caller=node_exporter.go:117 level=info collector=mdadm
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.436Z caller=node_exporter.go:117 level=info collector=meminfo
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.436Z caller=node_exporter.go:117 level=info collector=netclass
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.436Z caller=node_exporter.go:117 level=info collector=netdev
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.436Z caller=node_exporter.go:117 level=info collector=netstat
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.436Z caller=node_exporter.go:117 level=info collector=nfs
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.436Z caller=node_exporter.go:117 level=info collector=nfsd
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.436Z caller=node_exporter.go:117 level=info collector=nvme
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.436Z caller=node_exporter.go:117 level=info collector=schedstat
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.436Z caller=node_exporter.go:117 level=info collector=sockstat
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.436Z caller=node_exporter.go:117 level=info collector=softnet
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.436Z caller=node_exporter.go:117 level=info collector=systemd
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.436Z caller=node_exporter.go:117 level=info collector=tapestats
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.436Z caller=node_exporter.go:117 level=info collector=udp_queues
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.436Z caller=node_exporter.go:117 level=info collector=vmstat
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.436Z caller=node_exporter.go:117 level=info collector=xfs
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.436Z caller=node_exporter.go:117 level=info collector=zfs
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.437Z caller=tls_config.go:232 level=info msg="Listening on" address=[::]:9100
Oct 11 02:07:26 compute-0 node_exporter[369614]: ts=2025-10-11T02:07:26.437Z caller=tls_config.go:268 level=info msg="TLS is enabled." http2=true address=[::]:9100
Oct 11 02:07:26 compute-0 podman[369599]: 2025-10-11 02:07:26.442954626 +0000 UTC m=+0.276977925 container start 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:07:26 compute-0 podman[369599]: node_exporter
Oct 11 02:07:26 compute-0 systemd[1]: Started node_exporter container.
Oct 11 02:07:26 compute-0 sudo[369552]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:07:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:07:26 compute-0 podman[369623]: 2025-10-11 02:07:26.560685486 +0000 UTC m=+0.098047810 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:07:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:07:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:07:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:07:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:07:26 compute-0 ceph-mon[191930]: pgmap v877: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:27 compute-0 sshd-session[369377]: Failed password for root from 121.227.153.123 port 47488 ssh2
Oct 11 02:07:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v878: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:27 compute-0 sudo[369793]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cwsagcqliopcngjcwuulhluiyffwljgh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148447.0719767-590-142590680549656/AnsiballZ_stat.py'
Oct 11 02:07:27 compute-0 sudo[369793]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:27 compute-0 python3.9[369795]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/podman_exporter/healthcheck follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:07:28 compute-0 sshd-session[369377]: Connection closed by authenticating user root 121.227.153.123 port 47488 [preauth]
Oct 11 02:07:28 compute-0 ceph-mon[191930]: pgmap v878: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:28 compute-0 sudo[369793]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v879: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:29 compute-0 sudo[369873]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sggsgayixzwnrcafvtqnhdpemagflpxt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148447.0719767-590-142590680549656/AnsiballZ_file.py'
Oct 11 02:07:29 compute-0 sudo[369873]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:29 compute-0 python3.9[369875]: ansible-ansible.legacy.file Invoked with group=zuul mode=0700 owner=zuul setype=container_file_t dest=/var/lib/openstack/healthchecks/podman_exporter/ _original_basename=healthcheck recurse=False state=file path=/var/lib/openstack/healthchecks/podman_exporter/ force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:07:29 compute-0 sudo[369873]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:29 compute-0 podman[157119]: time="2025-10-11T02:07:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:07:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:07:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45036 "" "Go-http-client/1.1"
Oct 11 02:07:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:07:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8516 "" "Go-http-client/1.1"
Oct 11 02:07:29 compute-0 unix_chkpwd[369900]: password check failed for user (root)
Oct 11 02:07:29 compute-0 sshd-session[369797]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:07:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #42. Immutable memtables: 0.
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:30.024520) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 19] Flushing memtable with next log file: 42
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148450024563, "job": 19, "event": "flush_started", "num_memtables": 1, "num_entries": 283, "num_deletes": 250, "total_data_size": 69210, "memory_usage": 74528, "flush_reason": "Manual Compaction"}
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 19] Level-0 flush table #43: started
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148450028312, "cf_name": "default", "job": 19, "event": "table_file_creation", "file_number": 43, "file_size": 68630, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 18360, "largest_seqno": 18642, "table_properties": {"data_size": 66703, "index_size": 155, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 709, "raw_key_size": 5153, "raw_average_key_size": 19, "raw_value_size": 62979, "raw_average_value_size": 234, "num_data_blocks": 7, "num_entries": 268, "num_filter_entries": 268, "num_deletions": 250, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760148446, "oldest_key_time": 1760148446, "file_creation_time": 1760148450, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 43, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 19] Flush lasted 3863 microseconds, and 1528 cpu microseconds.
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:30.028386) [db/flush_job.cc:967] [default] [JOB 19] Level-0 flush table #43: 68630 bytes OK
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:30.028407) [db/memtable_list.cc:519] [default] Level-0 commit table #43 started
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:30.030906) [db/memtable_list.cc:722] [default] Level-0 commit table #43: memtable #1 done
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:30.030928) EVENT_LOG_v1 {"time_micros": 1760148450030922, "job": 19, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:30.030946) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 19] Try to delete WAL files size 67099, prev total WAL file size 67099, number of live WAL files 2.
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000039.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:30.031844) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '6D67727374617400353032' seq:72057594037927935, type:22 .. '6D67727374617400373533' seq:0, type:0; will stop at (end)
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 20] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 19 Base level 0, inputs: [43(67KB)], [41(9139KB)]
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148450031940, "job": 20, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [43], "files_L6": [41], "score": -1, "input_data_size": 9427586, "oldest_snapshot_seqno": -1}
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 20] Generated table #44: 4144 keys, 6140256 bytes, temperature: kUnknown
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148450080403, "cf_name": "default", "job": 20, "event": "table_file_creation", "file_number": 44, "file_size": 6140256, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 6113430, "index_size": 15389, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 10373, "raw_key_size": 101352, "raw_average_key_size": 24, "raw_value_size": 6039133, "raw_average_value_size": 1457, "num_data_blocks": 650, "num_entries": 4144, "num_filter_entries": 4144, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760148450, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 44, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:30.080717) [db/compaction/compaction_job.cc:1663] [default] [JOB 20] Compacted 1@0 + 1@6 files to L6 => 6140256 bytes
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:30.083189) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 194.2 rd, 126.5 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(0.1, 8.9 +0.0 blob) out(5.9 +0.0 blob), read-write-amplify(226.8) write-amplify(89.5) OK, records in: 4651, records dropped: 507 output_compression: NoCompression
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:30.083220) EVENT_LOG_v1 {"time_micros": 1760148450083205, "job": 20, "event": "compaction_finished", "compaction_time_micros": 48555, "compaction_time_cpu_micros": 37468, "output_level": 6, "num_output_files": 1, "total_output_size": 6140256, "num_input_records": 4651, "num_output_records": 4144, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000043.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148450083479, "job": 20, "event": "table_file_deletion", "file_number": 43}
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000041.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148450087069, "job": 20, "event": "table_file_deletion", "file_number": 41}
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:30.031572) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:30.087465) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:30.087472) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:30.087475) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:30.087478) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:07:30 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:07:30.087481) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:07:30 compute-0 sudo[370038]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cunvpzkumydirqeskwygefypeyirbbzs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148450.0736933-604-146287721385880/AnsiballZ_container_config_data.py'
Oct 11 02:07:30 compute-0 sudo[370038]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:30 compute-0 ceph-mon[191930]: pgmap v879: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:30 compute-0 podman[370000]: 2025-10-11 02:07:30.703895201 +0000 UTC m=+0.152856178 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 02:07:30 compute-0 python3.9[370045]: ansible-container_config_data Invoked with config_overrides={} config_path=/var/lib/openstack/config/telemetry config_pattern=podman_exporter.json debug=False
Oct 11 02:07:30 compute-0 sudo[370038]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v880: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:31 compute-0 openstack_network_exporter[159265]: ERROR   02:07:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:07:31 compute-0 openstack_network_exporter[159265]: ERROR   02:07:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:07:31 compute-0 sshd-session[369797]: Failed password for root from 121.227.153.123 port 47502 ssh2
Oct 11 02:07:31 compute-0 openstack_network_exporter[159265]: ERROR   02:07:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:07:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:07:31 compute-0 openstack_network_exporter[159265]: ERROR   02:07:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:07:31 compute-0 openstack_network_exporter[159265]: ERROR   02:07:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:07:31 compute-0 openstack_network_exporter[159265]: 
Oct 11 02:07:31 compute-0 sshd-session[369797]: Connection closed by authenticating user root 121.227.153.123 port 47502 [preauth]
Oct 11 02:07:31 compute-0 sudo[370196]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-abfdkxyptmbljhgucbiyifkyezpcuito ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148451.3284156-613-78319121909242/AnsiballZ_container_config_hash.py'
Oct 11 02:07:31 compute-0 sudo[370196]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:32 compute-0 python3.9[370199]: ansible-container_config_hash Invoked with check_mode=False config_vol_prefix=/var/lib/config-data
Oct 11 02:07:32 compute-0 sudo[370196]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:32 compute-0 ceph-mon[191930]: pgmap v880: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:33 compute-0 unix_chkpwd[370300]: password check failed for user (root)
Oct 11 02:07:33 compute-0 sshd-session[370198]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:07:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v881: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:33 compute-0 sudo[370351]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qbuzzgciocyseggfjwocvtzgwazlfuwt ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760148452.7220104-623-119092523184079/AnsiballZ_edpm_container_manage.py'
Oct 11 02:07:33 compute-0 sudo[370351]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:33 compute-0 python3[370353]: ansible-edpm_container_manage Invoked with concurrency=1 config_dir=/var/lib/openstack/config/telemetry config_id=edpm config_overrides={} config_patterns=podman_exporter.json log_base_path=/var/log/containers/stdouts debug=False
Oct 11 02:07:34 compute-0 python3[370353]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: [
                                                {
                                                     "Id": "e56d40e393eb5ea8704d9af8cf0d74665df83747106713fda91530f201837815",
                                                     "Digest": "sha256:7b7f37816f4a78244e32f90a517fdec0c458a6d3cd132212bb6bc16a9dc4fade",
                                                     "RepoTags": [
                                                          "quay.io/navidys/prometheus-podman-exporter:v1.10.1"
                                                     ],
                                                     "RepoDigests": [
                                                          "quay.io/navidys/prometheus-podman-exporter@sha256:7b7f37816f4a78244e32f90a517fdec0c458a6d3cd132212bb6bc16a9dc4fade",
                                                          "quay.io/navidys/prometheus-podman-exporter@sha256:d339ba049bbd1adccb795962bf163f5b22fd84dea865d88b9eb525e46247d6bd"
                                                     ],
                                                     "Parent": "",
                                                     "Comment": "",
                                                     "Created": "2024-03-17T01:45:00.251170784Z",
                                                     "Config": {
                                                          "User": "nobody",
                                                          "ExposedPorts": {
                                                               "9882/tcp": {}
                                                          },
                                                          "Env": [
                                                               "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
                                                          ],
                                                          "Entrypoint": [
                                                               "/bin/podman_exporter"
                                                          ],
                                                          "Labels": {
                                                               "maintainer": "Navid Yaghoobi <navidys@fedoraproject.org>"
                                                          }
                                                     },
                                                     "Version": "",
                                                     "Author": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
                                                     "Architecture": "amd64",
                                                     "Os": "linux",
                                                     "Size": 33863535,
                                                     "VirtualSize": 33863535,
                                                     "GraphDriver": {
                                                          "Name": "overlay",
                                                          "Data": {
                                                               "LowerDir": "/var/lib/containers/storage/overlay/b4f761d90eeb5a4c1ea51e856783cf8398e02a6caf306b90498250a43e5bbae1/diff:/var/lib/containers/storage/overlay/1e604deea57dbda554a168861cff1238f93b8c6c69c863c43aed37d9d99c5fed/diff",
                                                               "UpperDir": "/var/lib/containers/storage/overlay/e1fac4507a16e359f79966290a44e975bb0ed717e8b6cc0e34b61e8c96e0a1a3/diff",
                                                               "WorkDir": "/var/lib/containers/storage/overlay/e1fac4507a16e359f79966290a44e975bb0ed717e8b6cc0e34b61e8c96e0a1a3/work"
                                                          }
                                                     },
                                                     "RootFS": {
                                                          "Type": "layers",
                                                          "Layers": [
                                                               "sha256:1e604deea57dbda554a168861cff1238f93b8c6c69c863c43aed37d9d99c5fed",
                                                               "sha256:6b83872188a9e8912bee1d43add5e9bc518601b02a14a364c0da43b0d59acf33",
                                                               "sha256:7a73cdcd46b4e3c3a632bae42ad152935f204b50dd02f0a46070f81446516318"
                                                          ]
                                                     },
                                                     "Labels": {
                                                          "maintainer": "Navid Yaghoobi <navidys@fedoraproject.org>"
                                                     },
                                                     "Annotations": {},
                                                     "ManifestType": "application/vnd.docker.distribution.manifest.v2+json",
                                                     "User": "nobody",
                                                     "History": [
                                                          {
                                                               "created": "2023-12-05T20:23:06.467739954Z",
                                                               "created_by": "/bin/sh -c #(nop) ADD file:ee9bb8755ccbdd689b434d9b4ac7518e972699604ecda33e4ddc2a15d2831443 in / "
                                                          },
                                                          {
                                                               "created": "2023-12-05T20:23:06.550971969Z",
                                                               "created_by": "/bin/sh -c #(nop)  CMD [\"sh\"]",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2023-12-15T10:54:58.99835989Z",
                                                               "created_by": "MAINTAINER The Prometheus Authors <prometheus-developers@googlegroups.com>",
                                                               "comment": "buildkit.dockerfile.v0",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2023-12-15T10:54:58.99835989Z",
                                                               "created_by": "COPY /rootfs / # buildkit",
                                                               "comment": "buildkit.dockerfile.v0"
                                                          },
                                                          {
                                                               "created": "2024-03-17T01:45:00.251170784Z",
                                                               "created_by": "LABEL maintainer=Navid Yaghoobi <navidys@fedoraproject.org>",
                                                               "comment": "buildkit.dockerfile.v0",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-03-17T01:45:00.251170784Z",
                                                               "created_by": "ARG TARGETPLATFORM",
                                                               "comment": "buildkit.dockerfile.v0",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-03-17T01:45:00.251170784Z",
                                                               "created_by": "ARG TARGETOS",
                                                               "comment": "buildkit.dockerfile.v0",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-03-17T01:45:00.251170784Z",
                                                               "created_by": "ARG TARGETARCH",
                                                               "comment": "buildkit.dockerfile.v0",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-03-17T01:45:00.251170784Z",
                                                               "created_by": "COPY ./bin/remote/prometheus-podman-exporter-amd64 /bin/podman_exporter # buildkit",
                                                               "comment": "buildkit.dockerfile.v0"
                                                          },
                                                          {
                                                               "created": "2024-03-17T01:45:00.251170784Z",
                                                               "created_by": "EXPOSE map[9882/tcp:{}]",
                                                               "comment": "buildkit.dockerfile.v0",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-03-17T01:45:00.251170784Z",
                                                               "created_by": "USER nobody",
                                                               "comment": "buildkit.dockerfile.v0",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-03-17T01:45:00.251170784Z",
                                                               "created_by": "ENTRYPOINT [\"/bin/podman_exporter\"]",
                                                               "comment": "buildkit.dockerfile.v0",
                                                               "empty_layer": true
                                                          }
                                                     ],
                                                     "NamesHistory": [
                                                          "quay.io/navidys/prometheus-podman-exporter:v1.10.1"
                                                     ]
                                                }
                                           ]
                                           : quay.io/navidys/prometheus-podman-exporter:v1.10.1
Oct 11 02:07:34 compute-0 podman[157119]: @ - - [11/Oct/2025:01:35:34 +0000] "GET /v4.9.3/libpod/events?filters=%7B%7D&since=&stream=true&until= HTTP/1.1" 200 3285917 "" "Go-http-client/1.1"
Oct 11 02:07:34 compute-0 systemd[1]: libpod-2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899.scope: Deactivated successfully.
Oct 11 02:07:34 compute-0 systemd[1]: libpod-2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899.scope: Consumed 4.735s CPU time.
Oct 11 02:07:34 compute-0 podman[370399]: 2025-10-11 02:07:34.299032013 +0000 UTC m=+0.089833783 container died 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:07:34 compute-0 systemd[1]: 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899-14651892a7473ac6.timer: Deactivated successfully.
Oct 11 02:07:34 compute-0 systemd[1]: Stopped /usr/bin/podman healthcheck run 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899.
Oct 11 02:07:34 compute-0 systemd[1]: 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899-14651892a7473ac6.service: Failed to open /run/systemd/transient/2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899-14651892a7473ac6.service: No such file or directory
Oct 11 02:07:34 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899-userdata-shm.mount: Deactivated successfully.
Oct 11 02:07:34 compute-0 systemd[1]: var-lib-containers-storage-overlay-8890de7455df195df0bcfea2886507750b2c2505c8ec69ea3d86cad543bcbee1-merged.mount: Deactivated successfully.
Oct 11 02:07:34 compute-0 podman[370399]: 2025-10-11 02:07:34.385540751 +0000 UTC m=+0.176342491 container cleanup 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 02:07:34 compute-0 python3[370353]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman stop podman_exporter
Oct 11 02:07:34 compute-0 systemd[1]: edpm_podman_exporter.service: Main process exited, code=exited, status=2/INVALIDARGUMENT
Oct 11 02:07:34 compute-0 systemd[1]: 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899-14651892a7473ac6.timer: Failed to open /run/systemd/transient/2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899-14651892a7473ac6.timer: No such file or directory
Oct 11 02:07:34 compute-0 systemd[1]: 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899-14651892a7473ac6.service: Failed to open /run/systemd/transient/2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899-14651892a7473ac6.service: No such file or directory
Oct 11 02:07:34 compute-0 podman[370426]: 2025-10-11 02:07:34.523488577 +0000 UTC m=+0.100977397 container remove 2cb15c5902f5fb75ca413b58f8aaf8fbe5042d01efef17521b079fe14ff81899 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:07:34 compute-0 podman[370432]: Error: no container with name or ID "podman_exporter" found: no such container
Oct 11 02:07:34 compute-0 python3[370353]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman rm --force podman_exporter
Oct 11 02:07:34 compute-0 systemd[1]: edpm_podman_exporter.service: Control process exited, code=exited, status=125/n/a
Oct 11 02:07:34 compute-0 systemd[1]: edpm_podman_exporter.service: Failed with result 'exit-code'.
Oct 11 02:07:34 compute-0 podman[370447]: 2025-10-11 02:07:34.627807238 +0000 UTC m=+0.069594144 container create 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, config_id=edpm, container_name=podman_exporter, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:07:34 compute-0 podman[370447]: 2025-10-11 02:07:34.595812632 +0000 UTC m=+0.037599568 image pull e56d40e393eb5ea8704d9af8cf0d74665df83747106713fda91530f201837815 quay.io/navidys/prometheus-podman-exporter:v1.10.1
Oct 11 02:07:34 compute-0 python3[370353]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman create --name podman_exporter --conmon-pidfile /run/podman_exporter.pid --env OS_ENDPOINT_TYPE=internal --env CONTAINER_HOST=unix:///run/podman/podman.sock --healthcheck-command /openstack/healthcheck podman_exporter --label config_id=edpm --label container_name=podman_exporter --label managed_by=edpm_ansible --label config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']} --log-driver journald --log-level info --network host --privileged=True --publish 9882:9882 --user root --volume /var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z --volume /var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z --volume /run/podman/podman.sock:/run/podman/podman.sock:rw,z --volume /var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z quay.io/navidys/prometheus-podman-exporter:v1.10.1 --web.config.file=/etc/podman_exporter/podman_exporter.yaml
Oct 11 02:07:34 compute-0 systemd[1]: edpm_podman_exporter.service: Scheduled restart job, restart counter is at 1.
Oct 11 02:07:34 compute-0 systemd[1]: Stopped podman_exporter container.
Oct 11 02:07:34 compute-0 ceph-mon[191930]: pgmap v881: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:34 compute-0 systemd[1]: Starting podman_exporter container...
Oct 11 02:07:34 compute-0 systemd[1]: Started libpod-conmon-31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028.scope.
Oct 11 02:07:34 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:07:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/502bfc72002f64ccbc12a48cc4693c04e463205a944f961736b644f289d4fdae/merged/etc/podman_exporter/podman_exporter.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/502bfc72002f64ccbc12a48cc4693c04e463205a944f961736b644f289d4fdae/merged/etc/podman_exporter/tls supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:34 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028.
Oct 11 02:07:34 compute-0 podman[370460]: 2025-10-11 02:07:34.879623196 +0000 UTC m=+0.222342279 container init 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:07:34 compute-0 podman_exporter[370478]: ts=2025-10-11T02:07:34.905Z caller=exporter.go:68 level=info msg="Starting podman-prometheus-exporter" version="(version=1.10.1, branch=HEAD, revision=1)"
Oct 11 02:07:34 compute-0 podman_exporter[370478]: ts=2025-10-11T02:07:34.907Z caller=exporter.go:69 level=info msg=metrics enhanced=false
Oct 11 02:07:34 compute-0 podman_exporter[370478]: ts=2025-10-11T02:07:34.907Z caller=handler.go:94 level=info msg="enabled collectors"
Oct 11 02:07:34 compute-0 podman_exporter[370478]: ts=2025-10-11T02:07:34.907Z caller=handler.go:105 level=info collector=container
Oct 11 02:07:34 compute-0 podman[157119]: @ - - [11/Oct/2025:02:07:34 +0000] "GET /v4.9.3/libpod/_ping HTTP/1.1" 200 2 "" "Go-http-client/1.1"
Oct 11 02:07:34 compute-0 podman[157119]: time="2025-10-11T02:07:34Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:07:34 compute-0 podman[370460]: 2025-10-11 02:07:34.922452638 +0000 UTC m=+0.265171731 container start 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:07:34 compute-0 python3[370353]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman start podman_exporter
Oct 11 02:07:34 compute-0 podman[370470]: podman_exporter
Oct 11 02:07:34 compute-0 systemd[1]: Started podman_exporter container.
Oct 11 02:07:34 compute-0 podman[157119]: @ - - [11/Oct/2025:02:07:34 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=true&sync=false HTTP/1.1" 200 45759 "" "Go-http-client/1.1"
Oct 11 02:07:34 compute-0 podman_exporter[370478]: ts=2025-10-11T02:07:34.973Z caller=exporter.go:96 level=info msg="Listening on" address=:9882
Oct 11 02:07:34 compute-0 podman_exporter[370478]: ts=2025-10-11T02:07:34.974Z caller=tls_config.go:313 level=info msg="Listening on" address=[::]:9882
Oct 11 02:07:34 compute-0 podman_exporter[370478]: ts=2025-10-11T02:07:34.975Z caller=tls_config.go:349 level=info msg="TLS is enabled." http2=true address=[::]:9882
Oct 11 02:07:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:07:35 compute-0 podman[370496]: 2025-10-11 02:07:35.063978939 +0000 UTC m=+0.127770331 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:07:35 compute-0 sudo[370351]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v882: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:35 compute-0 sshd-session[370198]: Failed password for root from 121.227.153.123 port 53182 ssh2
Oct 11 02:07:36 compute-0 sudo[370691]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tfbvlwjvicdvsdgtfmbfdkjjyxineenn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148455.4759002-631-46754389830127/AnsiballZ_stat.py'
Oct 11 02:07:36 compute-0 sudo[370691]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:36 compute-0 python3.9[370693]: ansible-ansible.builtin.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:07:36 compute-0 sudo[370691]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:36 compute-0 sshd-session[370198]: Connection closed by authenticating user root 121.227.153.123 port 53182 [preauth]
Oct 11 02:07:36 compute-0 ceph-mon[191930]: pgmap v882: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:37 compute-0 sudo[370801]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:07:37 compute-0 sudo[370801]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:37 compute-0 sudo[370801]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:37 compute-0 sudo[370886]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rnktfoirqxnahdbkwghilflulkygfszv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148456.6763284-640-244805147295725/AnsiballZ_file.py'
Oct 11 02:07:37 compute-0 sudo[370886]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:37 compute-0 sudo[370857]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:07:37 compute-0 sudo[370857]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:37 compute-0 sudo[370857]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v883: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:37 compute-0 sudo[370900]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:07:37 compute-0 sudo[370900]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:37 compute-0 sudo[370900]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:37 compute-0 python3.9[370897]: ansible-file Invoked with path=/etc/systemd/system/edpm_podman_exporter.requires state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:07:37 compute-0 sudo[370886]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:37 compute-0 sudo[370925]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ls
Oct 11 02:07:37 compute-0 sudo[370925]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:38 compute-0 unix_chkpwd[371072]: password check failed for user (root)
Oct 11 02:07:38 compute-0 sshd-session[370750]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:07:38 compute-0 podman[371118]: 2025-10-11 02:07:38.420138673 +0000 UTC m=+0.140144914 container exec ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, ceph=True, io.buildah.version=1.39.3)
Oct 11 02:07:38 compute-0 podman[371118]: 2025-10-11 02:07:38.552307007 +0000 UTC m=+0.272313218 container exec_died ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2)
Oct 11 02:07:38 compute-0 nova_compute[356901]: 2025-10-11 02:07:38.593 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:07:38 compute-0 nova_compute[356901]: 2025-10-11 02:07:38.595 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:07:38 compute-0 nova_compute[356901]: 2025-10-11 02:07:38.629 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:07:38 compute-0 nova_compute[356901]: 2025-10-11 02:07:38.631 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:07:38 compute-0 nova_compute[356901]: 2025-10-11 02:07:38.632 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:07:38 compute-0 nova_compute[356901]: 2025-10-11 02:07:38.633 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:07:38 compute-0 nova_compute[356901]: 2025-10-11 02:07:38.633 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:07:38 compute-0 ceph-mon[191930]: pgmap v883: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:38 compute-0 nova_compute[356901]: 2025-10-11 02:07:38.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:07:38 compute-0 nova_compute[356901]: 2025-10-11 02:07:38.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:07:38 compute-0 nova_compute[356901]: 2025-10-11 02:07:38.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:07:38 compute-0 nova_compute[356901]: 2025-10-11 02:07:38.920 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944
Oct 11 02:07:38 compute-0 nova_compute[356901]: 2025-10-11 02:07:38.921 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:07:38 compute-0 nova_compute[356901]: 2025-10-11 02:07:38.922 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:07:38 compute-0 nova_compute[356901]: 2025-10-11 02:07:38.922 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:07:38 compute-0 nova_compute[356901]: 2025-10-11 02:07:38.963 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:07:38 compute-0 nova_compute[356901]: 2025-10-11 02:07:38.964 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:07:38 compute-0 nova_compute[356901]: 2025-10-11 02:07:38.964 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:07:38 compute-0 nova_compute[356901]: 2025-10-11 02:07:38.964 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:07:38 compute-0 nova_compute[356901]: 2025-10-11 02:07:38.965 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:07:39 compute-0 sudo[371290]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iktzkotcgzfoopwislkjybnrswxcekwa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148457.611165-640-238423335258698/AnsiballZ_copy.py'
Oct 11 02:07:39 compute-0 sudo[371290]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v884: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:39 compute-0 python3.9[371294]: ansible-copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760148457.611165-640-238423335258698/source dest=/etc/systemd/system/edpm_podman_exporter.service mode=0644 owner=root group=root backup=False force=True remote_src=False follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:07:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:07:39 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3589028454' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:07:39 compute-0 sudo[371290]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:39 compute-0 nova_compute[356901]: 2025-10-11 02:07:39.490 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.525s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:07:39 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3589028454' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:07:39 compute-0 sudo[370925]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:07:39 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:07:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:07:39 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:07:39 compute-0 sudo[371392]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:07:39 compute-0 sudo[371392]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:39 compute-0 sudo[371392]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:39 compute-0 sudo[371442]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xiojoilplahkvyenmyfraqrgnikywzji ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148457.611165-640-238423335258698/AnsiballZ_systemd.py'
Oct 11 02:07:39 compute-0 sudo[371442]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:39 compute-0 nova_compute[356901]: 2025-10-11 02:07:39.928 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:07:39 compute-0 nova_compute[356901]: 2025-10-11 02:07:39.931 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=4589MB free_disk=59.98828125GB free_vcpus=8 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:07:39 compute-0 nova_compute[356901]: 2025-10-11 02:07:39.932 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:07:39 compute-0 nova_compute[356901]: 2025-10-11 02:07:39.932 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:07:39 compute-0 sudo[371445]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:07:40 compute-0 sudo[371445]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:40 compute-0 sudo[371445]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:40 compute-0 nova_compute[356901]: 2025-10-11 02:07:40.012 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 0 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:07:40 compute-0 nova_compute[356901]: 2025-10-11 02:07:40.013 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=512MB phys_disk=59GB used_disk=0GB total_vcpus=8 used_vcpus=0 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:07:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:07:40 compute-0 nova_compute[356901]: 2025-10-11 02:07:40.035 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:07:40 compute-0 sudo[371471]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:07:40 compute-0 sudo[371471]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:40 compute-0 sudo[371471]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:40 compute-0 sudo[371497]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:07:40 compute-0 python3.9[371448]: ansible-systemd Invoked with state=started name=edpm_podman_exporter.service enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:07:40 compute-0 sudo[371497]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:40 compute-0 sudo[371442]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:40 compute-0 sshd-session[370750]: Failed password for root from 121.227.153.123 port 53186 ssh2
Oct 11 02:07:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:07:40 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3365397893' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:07:40 compute-0 nova_compute[356901]: 2025-10-11 02:07:40.548 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.513s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:07:40 compute-0 nova_compute[356901]: 2025-10-11 02:07:40.561 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:07:40 compute-0 nova_compute[356901]: 2025-10-11 02:07:40.587 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:07:40 compute-0 nova_compute[356901]: 2025-10-11 02:07:40.592 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:07:40 compute-0 nova_compute[356901]: 2025-10-11 02:07:40.593 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.660s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:07:40 compute-0 ceph-mon[191930]: pgmap v884: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:07:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:07:40 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3365397893' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:07:40 compute-0 sudo[371497]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:07:40 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:07:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:07:40 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:07:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:07:40 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:07:40 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 6a428c49-0483-4cbb-982c-602bc243cf7c does not exist
Oct 11 02:07:40 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev c11a9dfd-f635-4bcf-89f6-6bf43637f93f does not exist
Oct 11 02:07:40 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev d7b6cdf5-2f71-44c6-9d33-52acb7ee388f does not exist
Oct 11 02:07:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:07:40 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:07:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:07:40 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:07:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:07:40 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:07:41 compute-0 sudo[371599]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:07:41 compute-0 sudo[371599]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:41 compute-0 sudo[371599]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:41 compute-0 sudo[371624]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:07:41 compute-0 sudo[371624]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:41 compute-0 sudo[371624]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:41 compute-0 sudo[371649]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:07:41 compute-0 sudo[371649]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:41 compute-0 sudo[371649]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:41 compute-0 sshd-session[370750]: Connection closed by authenticating user root 121.227.153.123 port 53186 [preauth]
Oct 11 02:07:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v885: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:41 compute-0 sudo[371699]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:07:41 compute-0 sudo[371699]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:07:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:07:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:07:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:07:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:07:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:07:41 compute-0 sudo[371851]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rwrmcynekqlnuenjbgikgcryyjnkonqb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148461.2653093-660-80437106781743/AnsiballZ_systemd.py'
Oct 11 02:07:41 compute-0 sudo[371851]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:42 compute-0 podman[371866]: 2025-10-11 02:07:42.058004305 +0000 UTC m=+0.085843679 container create 3dc0ca6e74897897c8832fa47ed77ebe9b9dd35de567bc3fd3af35e8f7ecdae1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_khorana, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507)
Oct 11 02:07:42 compute-0 podman[371866]: 2025-10-11 02:07:42.030495683 +0000 UTC m=+0.058335137 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:07:42 compute-0 systemd[1]: Started libpod-conmon-3dc0ca6e74897897c8832fa47ed77ebe9b9dd35de567bc3fd3af35e8f7ecdae1.scope.
Oct 11 02:07:42 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:07:42 compute-0 python3.9[371861]: ansible-ansible.builtin.systemd Invoked with name=edpm_podman_exporter.service state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 02:07:42 compute-0 podman[371866]: 2025-10-11 02:07:42.223330876 +0000 UTC m=+0.251170330 container init 3dc0ca6e74897897c8832fa47ed77ebe9b9dd35de567bc3fd3af35e8f7ecdae1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_khorana, ceph=True, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:07:42 compute-0 podman[371880]: 2025-10-11 02:07:42.238060596 +0000 UTC m=+0.127097990 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=starting, health_failing_streak=2, health_log=, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, io.buildah.version=1.41.4, managed_by=edpm_ansible, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']})
Oct 11 02:07:42 compute-0 systemd[1]: Stopping podman_exporter container...
Oct 11 02:07:42 compute-0 systemd[1]: c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6-4388892ecd7a1bb1.service: Main process exited, code=exited, status=1/FAILURE
Oct 11 02:07:42 compute-0 podman[371866]: 2025-10-11 02:07:42.250095767 +0000 UTC m=+0.277935171 container start 3dc0ca6e74897897c8832fa47ed77ebe9b9dd35de567bc3fd3af35e8f7ecdae1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_khorana, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507)
Oct 11 02:07:42 compute-0 systemd[1]: c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6-4388892ecd7a1bb1.service: Failed with result 'exit-code'.
Oct 11 02:07:42 compute-0 podman[371866]: 2025-10-11 02:07:42.256893823 +0000 UTC m=+0.284733207 container attach 3dc0ca6e74897897c8832fa47ed77ebe9b9dd35de567bc3fd3af35e8f7ecdae1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_khorana, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0)
Oct 11 02:07:42 compute-0 optimistic_khorana[371891]: 167 167
Oct 11 02:07:42 compute-0 systemd[1]: libpod-3dc0ca6e74897897c8832fa47ed77ebe9b9dd35de567bc3fd3af35e8f7ecdae1.scope: Deactivated successfully.
Oct 11 02:07:42 compute-0 podman[371866]: 2025-10-11 02:07:42.261035426 +0000 UTC m=+0.288874800 container died 3dc0ca6e74897897c8832fa47ed77ebe9b9dd35de567bc3fd3af35e8f7ecdae1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_khorana, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:07:42 compute-0 systemd[1]: var-lib-containers-storage-overlay-239b3c5811fc2631cee5faafa7a95052359d8101d2ef8129cb8058c77540b3e3-merged.mount: Deactivated successfully.
Oct 11 02:07:42 compute-0 podman[371866]: 2025-10-11 02:07:42.339963653 +0000 UTC m=+0.367803017 container remove 3dc0ca6e74897897c8832fa47ed77ebe9b9dd35de567bc3fd3af35e8f7ecdae1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_khorana, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 02:07:42 compute-0 podman[157119]: @ - - [11/Oct/2025:02:07:34 +0000] "GET /v4.9.3/libpod/events?filters=%7B%7D&since=&stream=true&until= HTTP/1.1" 200 14113 "" "Go-http-client/1.1"
Oct 11 02:07:42 compute-0 systemd[1]: libpod-31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028.scope: Deactivated successfully.
Oct 11 02:07:42 compute-0 podman[371907]: 2025-10-11 02:07:42.374147116 +0000 UTC m=+0.110300019 container died 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:07:42 compute-0 systemd[1]: libpod-conmon-3dc0ca6e74897897c8832fa47ed77ebe9b9dd35de567bc3fd3af35e8f7ecdae1.scope: Deactivated successfully.
Oct 11 02:07:42 compute-0 systemd[1]: 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028-3b688d3c94b2b49d.timer: Deactivated successfully.
Oct 11 02:07:42 compute-0 systemd[1]: Stopped /usr/bin/podman healthcheck run 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028.
Oct 11 02:07:42 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028-userdata-shm.mount: Deactivated successfully.
Oct 11 02:07:42 compute-0 systemd[1]: var-lib-containers-storage-overlay-502bfc72002f64ccbc12a48cc4693c04e463205a944f961736b644f289d4fdae-merged.mount: Deactivated successfully.
Oct 11 02:07:42 compute-0 podman[371907]: 2025-10-11 02:07:42.454599048 +0000 UTC m=+0.190751951 container cleanup 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 02:07:42 compute-0 podman[371907]: podman_exporter
Oct 11 02:07:42 compute-0 systemd[1]: edpm_podman_exporter.service: Main process exited, code=exited, status=2/INVALIDARGUMENT
Oct 11 02:07:42 compute-0 systemd[1]: libpod-conmon-31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028.scope: Deactivated successfully.
Oct 11 02:07:42 compute-0 podman[371956]: podman_exporter
Oct 11 02:07:42 compute-0 systemd[1]: edpm_podman_exporter.service: Failed with result 'exit-code'.
Oct 11 02:07:42 compute-0 systemd[1]: Stopped podman_exporter container.
Oct 11 02:07:42 compute-0 podman[371954]: 2025-10-11 02:07:42.574357845 +0000 UTC m=+0.075734380 container create f2f7f5d758a746d77986916c7a6f6374ea2b828e162ef8fd28dc569c6e275d1e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nifty_cannon, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:07:42 compute-0 systemd[1]: Starting podman_exporter container...
Oct 11 02:07:42 compute-0 podman[371954]: 2025-10-11 02:07:42.541370418 +0000 UTC m=+0.042747013 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:07:42 compute-0 systemd[1]: Started libpod-conmon-f2f7f5d758a746d77986916c7a6f6374ea2b828e162ef8fd28dc569c6e275d1e.scope.
Oct 11 02:07:42 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:07:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4357d2af5b3cd62693a37e51bdd8648f146368283570d074dd92fc5e54a14803/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4357d2af5b3cd62693a37e51bdd8648f146368283570d074dd92fc5e54a14803/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4357d2af5b3cd62693a37e51bdd8648f146368283570d074dd92fc5e54a14803/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:42 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:07:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4357d2af5b3cd62693a37e51bdd8648f146368283570d074dd92fc5e54a14803/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4357d2af5b3cd62693a37e51bdd8648f146368283570d074dd92fc5e54a14803/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/502bfc72002f64ccbc12a48cc4693c04e463205a944f961736b644f289d4fdae/merged/etc/podman_exporter/podman_exporter.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/502bfc72002f64ccbc12a48cc4693c04e463205a944f961736b644f289d4fdae/merged/etc/podman_exporter/tls supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:42 compute-0 podman[371954]: 2025-10-11 02:07:42.806082732 +0000 UTC m=+0.307459287 container init f2f7f5d758a746d77986916c7a6f6374ea2b828e162ef8fd28dc569c6e275d1e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nifty_cannon, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:07:42 compute-0 podman[371954]: 2025-10-11 02:07:42.830528411 +0000 UTC m=+0.331904956 container start f2f7f5d758a746d77986916c7a6f6374ea2b828e162ef8fd28dc569c6e275d1e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nifty_cannon, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2)
Oct 11 02:07:42 compute-0 podman[371954]: 2025-10-11 02:07:42.837264719 +0000 UTC m=+0.338641264 container attach f2f7f5d758a746d77986916c7a6f6374ea2b828e162ef8fd28dc569c6e275d1e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nifty_cannon, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default)
Oct 11 02:07:42 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028.
Oct 11 02:07:42 compute-0 ceph-mon[191930]: pgmap v885: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:42 compute-0 podman[371979]: 2025-10-11 02:07:42.877890234 +0000 UTC m=+0.281802381 container init 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:07:42 compute-0 podman_exporter[371997]: ts=2025-10-11T02:07:42.911Z caller=exporter.go:68 level=info msg="Starting podman-prometheus-exporter" version="(version=1.10.1, branch=HEAD, revision=1)"
Oct 11 02:07:42 compute-0 podman_exporter[371997]: ts=2025-10-11T02:07:42.911Z caller=exporter.go:69 level=info msg=metrics enhanced=false
Oct 11 02:07:42 compute-0 podman_exporter[371997]: ts=2025-10-11T02:07:42.911Z caller=handler.go:94 level=info msg="enabled collectors"
Oct 11 02:07:42 compute-0 podman_exporter[371997]: ts=2025-10-11T02:07:42.911Z caller=handler.go:105 level=info collector=container
Oct 11 02:07:42 compute-0 podman[157119]: @ - - [11/Oct/2025:02:07:42 +0000] "GET /v4.9.3/libpod/_ping HTTP/1.1" 200 2 "" "Go-http-client/1.1"
Oct 11 02:07:42 compute-0 podman[157119]: time="2025-10-11T02:07:42Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:07:42 compute-0 podman[371979]: 2025-10-11 02:07:42.938081615 +0000 UTC m=+0.341993792 container start 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:07:42 compute-0 podman[371979]: podman_exporter
Oct 11 02:07:42 compute-0 systemd[1]: Started podman_exporter container.
Oct 11 02:07:42 compute-0 podman[157119]: @ - - [11/Oct/2025:02:07:42 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=true&sync=false HTTP/1.1" 200 47494 "" "Go-http-client/1.1"
Oct 11 02:07:42 compute-0 podman_exporter[371997]: ts=2025-10-11T02:07:42.989Z caller=exporter.go:96 level=info msg="Listening on" address=:9882
Oct 11 02:07:42 compute-0 podman_exporter[371997]: ts=2025-10-11T02:07:42.990Z caller=tls_config.go:313 level=info msg="Listening on" address=[::]:9882
Oct 11 02:07:42 compute-0 sudo[371851]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:42 compute-0 podman_exporter[371997]: ts=2025-10-11T02:07:42.993Z caller=tls_config.go:349 level=info msg="TLS is enabled." http2=true address=[::]:9882
Oct 11 02:07:43 compute-0 unix_chkpwd[372020]: password check failed for user (root)
Oct 11 02:07:43 compute-0 sshd-session[371801]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:07:43 compute-0 podman[372009]: 2025-10-11 02:07:43.071767203 +0000 UTC m=+0.116212366 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:07:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v886: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:43 compute-0 sudo[372189]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tikeobsjgjxgjaguvtgyaaphfeuthmbc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148463.2364445-668-152999191287731/AnsiballZ_stat.py'
Oct 11 02:07:43 compute-0 sudo[372189]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:43 compute-0 podman[372192]: 2025-10-11 02:07:43.902669291 +0000 UTC m=+0.117690337 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:07:43 compute-0 podman[372194]: 2025-10-11 02:07:43.924873128 +0000 UTC m=+0.138093635 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vcs-type=git, maintainer=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vendor=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.buildah.version=1.29.0, config_id=edpm, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.tags=base rhel9, architecture=x86_64, container_name=kepler, build-date=2024-09-18T21:23:30, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, name=ubi9, release=1214.1726694543, managed_by=edpm_ansible, distribution-scope=public, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, release-0.7.12=, version=9.4, com.redhat.component=ubi9-container, io.openshift.expose-services=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of Red Hat Universal Base Image 9.)
Oct 11 02:07:44 compute-0 python3.9[372196]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/openstack_network_exporter/healthcheck follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:07:44 compute-0 sudo[372189]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:44 compute-0 podman[372234]: 2025-10-11 02:07:44.090267048 +0000 UTC m=+0.145776828 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, tcib_managed=true, org.label-schema.schema-version=1.0)
Oct 11 02:07:44 compute-0 nifty_cannon[371992]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:07:44 compute-0 nifty_cannon[371992]: --> relative data size: 1.0
Oct 11 02:07:44 compute-0 nifty_cannon[371992]: --> All data devices are unavailable
Oct 11 02:07:44 compute-0 systemd[1]: libpod-f2f7f5d758a746d77986916c7a6f6374ea2b828e162ef8fd28dc569c6e275d1e.scope: Deactivated successfully.
Oct 11 02:07:44 compute-0 systemd[1]: libpod-f2f7f5d758a746d77986916c7a6f6374ea2b828e162ef8fd28dc569c6e275d1e.scope: Consumed 1.235s CPU time.
Oct 11 02:07:44 compute-0 podman[371954]: 2025-10-11 02:07:44.150838845 +0000 UTC m=+1.652215400 container died f2f7f5d758a746d77986916c7a6f6374ea2b828e162ef8fd28dc569c6e275d1e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nifty_cannon, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:07:44 compute-0 systemd[1]: var-lib-containers-storage-overlay-4357d2af5b3cd62693a37e51bdd8648f146368283570d074dd92fc5e54a14803-merged.mount: Deactivated successfully.
Oct 11 02:07:44 compute-0 podman[371954]: 2025-10-11 02:07:44.248332768 +0000 UTC m=+1.749709293 container remove f2f7f5d758a746d77986916c7a6f6374ea2b828e162ef8fd28dc569c6e275d1e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nifty_cannon, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:07:44 compute-0 systemd[1]: libpod-conmon-f2f7f5d758a746d77986916c7a6f6374ea2b828e162ef8fd28dc569c6e275d1e.scope: Deactivated successfully.
Oct 11 02:07:44 compute-0 sudo[371699]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:44 compute-0 sudo[372332]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:07:44 compute-0 sudo[372332]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:44 compute-0 sudo[372332]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:44 compute-0 sudo[372384]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hwlivmlimepggykdvtegmnetbemzyajg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148463.2364445-668-152999191287731/AnsiballZ_file.py'
Oct 11 02:07:44 compute-0 sudo[372384]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:44 compute-0 sudo[372383]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:07:44 compute-0 sudo[372383]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:44 compute-0 sudo[372383]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:44 compute-0 sudo[372411]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:07:44 compute-0 python3.9[372391]: ansible-ansible.legacy.file Invoked with group=zuul mode=0700 owner=zuul setype=container_file_t dest=/var/lib/openstack/healthchecks/openstack_network_exporter/ _original_basename=healthcheck recurse=False state=file path=/var/lib/openstack/healthchecks/openstack_network_exporter/ force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:07:44 compute-0 sudo[372411]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:44 compute-0 sudo[372411]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:44 compute-0 sshd-session[371801]: Failed password for root from 121.227.153.123 port 33134 ssh2
Oct 11 02:07:44 compute-0 sudo[372384]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:44 compute-0 sudo[372436]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:07:44 compute-0 sudo[372436]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:44 compute-0 ceph-mon[191930]: pgmap v886: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:07:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v887: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:45 compute-0 podman[372592]: 2025-10-11 02:07:45.443439785 +0000 UTC m=+0.066669990 container create b7651844ad5417b7fcac7f3c9fdfe5b7ba6d3185916fb7fc3a72197248c12b05 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_mendeleev, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:07:45 compute-0 systemd[1]: Started libpod-conmon-b7651844ad5417b7fcac7f3c9fdfe5b7ba6d3185916fb7fc3a72197248c12b05.scope.
Oct 11 02:07:45 compute-0 podman[372592]: 2025-10-11 02:07:45.416826692 +0000 UTC m=+0.040056927 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:07:45 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:07:45 compute-0 podman[372592]: 2025-10-11 02:07:45.583815526 +0000 UTC m=+0.207045771 container init b7651844ad5417b7fcac7f3c9fdfe5b7ba6d3185916fb7fc3a72197248c12b05 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_mendeleev, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:07:45 compute-0 podman[372592]: 2025-10-11 02:07:45.600878418 +0000 UTC m=+0.224108653 container start b7651844ad5417b7fcac7f3c9fdfe5b7ba6d3185916fb7fc3a72197248c12b05 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_mendeleev, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=reef)
Oct 11 02:07:45 compute-0 podman[372592]: 2025-10-11 02:07:45.608753615 +0000 UTC m=+0.231983860 container attach b7651844ad5417b7fcac7f3c9fdfe5b7ba6d3185916fb7fc3a72197248c12b05 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_mendeleev, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:07:45 compute-0 hopeful_mendeleev[372634]: 167 167
Oct 11 02:07:45 compute-0 systemd[1]: libpod-b7651844ad5417b7fcac7f3c9fdfe5b7ba6d3185916fb7fc3a72197248c12b05.scope: Deactivated successfully.
Oct 11 02:07:45 compute-0 podman[372592]: 2025-10-11 02:07:45.610746687 +0000 UTC m=+0.233976892 container died b7651844ad5417b7fcac7f3c9fdfe5b7ba6d3185916fb7fc3a72197248c12b05 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_mendeleev, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2)
Oct 11 02:07:45 compute-0 systemd[1]: var-lib-containers-storage-overlay-649029d60a91a18d01a9a664a75ac166acbde8195ed9e4dc3dcaf349e8f1f027-merged.mount: Deactivated successfully.
Oct 11 02:07:45 compute-0 podman[372592]: 2025-10-11 02:07:45.66535489 +0000 UTC m=+0.288585095 container remove b7651844ad5417b7fcac7f3c9fdfe5b7ba6d3185916fb7fc3a72197248c12b05 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_mendeleev, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2)
Oct 11 02:07:45 compute-0 systemd[1]: libpod-conmon-b7651844ad5417b7fcac7f3c9fdfe5b7ba6d3185916fb7fc3a72197248c12b05.scope: Deactivated successfully.
Oct 11 02:07:45 compute-0 sudo[372680]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-utzmzokerfquxlgdonbblqhgnjsrciqh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148465.1735716-682-177333691834236/AnsiballZ_container_config_data.py'
Oct 11 02:07:45 compute-0 sudo[372680]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:45 compute-0 python3.9[372684]: ansible-container_config_data Invoked with config_overrides={} config_path=/var/lib/openstack/config/telemetry config_pattern=openstack_network_exporter.json debug=False
Oct 11 02:07:45 compute-0 podman[372690]: 2025-10-11 02:07:45.925987929 +0000 UTC m=+0.096466849 container create bef6e9414a5a8e5fde9d46b8f5099a7d4cca8bba68953815bc0f2c68bb23669f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_vaughan, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:07:45 compute-0 sudo[372680]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:45 compute-0 podman[372690]: 2025-10-11 02:07:45.87257417 +0000 UTC m=+0.043053180 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:07:45 compute-0 systemd[1]: Started libpod-conmon-bef6e9414a5a8e5fde9d46b8f5099a7d4cca8bba68953815bc0f2c68bb23669f.scope.
Oct 11 02:07:46 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:07:46 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/91166518c0bbd5b69df89812b56885d00db54b7ed0887794331ceb6c5b37b1ee/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:46 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/91166518c0bbd5b69df89812b56885d00db54b7ed0887794331ceb6c5b37b1ee/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:46 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/91166518c0bbd5b69df89812b56885d00db54b7ed0887794331ceb6c5b37b1ee/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:46 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/91166518c0bbd5b69df89812b56885d00db54b7ed0887794331ceb6c5b37b1ee/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:46 compute-0 podman[372690]: 2025-10-11 02:07:46.072039139 +0000 UTC m=+0.242518089 container init bef6e9414a5a8e5fde9d46b8f5099a7d4cca8bba68953815bc0f2c68bb23669f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_vaughan, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:07:46 compute-0 podman[372690]: 2025-10-11 02:07:46.085516826 +0000 UTC m=+0.255995756 container start bef6e9414a5a8e5fde9d46b8f5099a7d4cca8bba68953815bc0f2c68bb23669f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_vaughan, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2)
Oct 11 02:07:46 compute-0 podman[372690]: 2025-10-11 02:07:46.090549698 +0000 UTC m=+0.261028618 container attach bef6e9414a5a8e5fde9d46b8f5099a7d4cca8bba68953815bc0f2c68bb23669f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_vaughan, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0)
Oct 11 02:07:46 compute-0 sshd-session[371801]: Connection closed by authenticating user root 121.227.153.123 port 33134 [preauth]
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]: {
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:     "0": [
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:         {
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "devices": [
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "/dev/loop3"
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             ],
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "lv_name": "ceph_lv0",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "lv_size": "21470642176",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "name": "ceph_lv0",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "tags": {
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.cluster_name": "ceph",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.crush_device_class": "",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.encrypted": "0",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.osd_id": "0",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.type": "block",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.vdo": "0"
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             },
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "type": "block",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "vg_name": "ceph_vg0"
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:         }
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:     ],
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:     "1": [
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:         {
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "devices": [
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "/dev/loop4"
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             ],
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "lv_name": "ceph_lv1",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "lv_size": "21470642176",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "name": "ceph_lv1",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "tags": {
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.cluster_name": "ceph",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.crush_device_class": "",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.encrypted": "0",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.osd_id": "1",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.type": "block",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.vdo": "0"
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             },
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "type": "block",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "vg_name": "ceph_vg1"
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:         }
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:     ],
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:     "2": [
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:         {
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "devices": [
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "/dev/loop5"
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             ],
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "lv_name": "ceph_lv2",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "lv_size": "21470642176",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "name": "ceph_lv2",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "tags": {
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.cluster_name": "ceph",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.crush_device_class": "",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.encrypted": "0",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.osd_id": "2",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.type": "block",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:                 "ceph.vdo": "0"
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             },
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "type": "block",
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:             "vg_name": "ceph_vg2"
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:         }
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]:     ]
Oct 11 02:07:46 compute-0 vibrant_vaughan[372710]: }
Oct 11 02:07:46 compute-0 sudo[372898]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sfkgyfkfzxsuggmqmflnbksigvjodsyo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148466.2942042-691-38460893123133/AnsiballZ_container_config_hash.py'
Oct 11 02:07:46 compute-0 podman[372838]: 2025-10-11 02:07:46.864734885 +0000 UTC m=+0.109928464 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=iscsid, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 02:07:46 compute-0 sudo[372898]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:46 compute-0 podman[372836]: 2025-10-11 02:07:46.869807381 +0000 UTC m=+0.126241565 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, container_name=multipathd, io.buildah.version=1.41.3)
Oct 11 02:07:46 compute-0 systemd[1]: libpod-bef6e9414a5a8e5fde9d46b8f5099a7d4cca8bba68953815bc0f2c68bb23669f.scope: Deactivated successfully.
Oct 11 02:07:46 compute-0 podman[372690]: 2025-10-11 02:07:46.88568942 +0000 UTC m=+1.056168340 container died bef6e9414a5a8e5fde9d46b8f5099a7d4cca8bba68953815bc0f2c68bb23669f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_vaughan, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:07:46 compute-0 ceph-mon[191930]: pgmap v887: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:46 compute-0 systemd[1]: var-lib-containers-storage-overlay-91166518c0bbd5b69df89812b56885d00db54b7ed0887794331ceb6c5b37b1ee-merged.mount: Deactivated successfully.
Oct 11 02:07:46 compute-0 podman[372690]: 2025-10-11 02:07:46.969996731 +0000 UTC m=+1.140475641 container remove bef6e9414a5a8e5fde9d46b8f5099a7d4cca8bba68953815bc0f2c68bb23669f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_vaughan, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 02:07:46 compute-0 systemd[1]: libpod-conmon-bef6e9414a5a8e5fde9d46b8f5099a7d4cca8bba68953815bc0f2c68bb23669f.scope: Deactivated successfully.
Oct 11 02:07:47 compute-0 sudo[372436]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:47 compute-0 python3.9[372903]: ansible-container_config_hash Invoked with check_mode=False config_vol_prefix=/var/lib/config-data
Oct 11 02:07:47 compute-0 sudo[372898]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:47 compute-0 sudo[372915]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:07:47 compute-0 sudo[372915]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:47 compute-0 sudo[372915]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:47 compute-0 sudo[372943]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:07:47 compute-0 sudo[372943]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:47 compute-0 sudo[372943]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:47 compute-0 sudo[372989]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:07:47 compute-0 sudo[372989]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:47 compute-0 sudo[372989]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v888: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:47 compute-0 sudo[373014]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:07:47 compute-0 sudo[373014]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:47 compute-0 unix_chkpwd[373151]: password check failed for user (root)
Oct 11 02:07:47 compute-0 sshd-session[372817]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:07:47 compute-0 ceph-mon[191930]: pgmap v888: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:48 compute-0 podman[373177]: 2025-10-11 02:07:48.037559166 +0000 UTC m=+0.086395516 container create 374aa9523f563e9030ee1b276d7dd5132f260f879e2db128a75d5389936c26d7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_ganguly, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 02:07:48 compute-0 podman[373177]: 2025-10-11 02:07:48.008479963 +0000 UTC m=+0.057316353 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:07:48 compute-0 systemd[1]: Started libpod-conmon-374aa9523f563e9030ee1b276d7dd5132f260f879e2db128a75d5389936c26d7.scope.
Oct 11 02:07:48 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:07:48 compute-0 sudo[373221]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hrnshbvlpobmmhggsktsfadrowqsiiqi ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760148467.583178-701-37001726642944/AnsiballZ_edpm_container_manage.py'
Oct 11 02:07:48 compute-0 sudo[373221]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:48 compute-0 podman[373177]: 2025-10-11 02:07:48.200583918 +0000 UTC m=+0.249420298 container init 374aa9523f563e9030ee1b276d7dd5132f260f879e2db128a75d5389936c26d7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_ganguly, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, ceph=True)
Oct 11 02:07:48 compute-0 podman[373177]: 2025-10-11 02:07:48.220880933 +0000 UTC m=+0.269717253 container start 374aa9523f563e9030ee1b276d7dd5132f260f879e2db128a75d5389936c26d7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_ganguly, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507)
Oct 11 02:07:48 compute-0 podman[373177]: 2025-10-11 02:07:48.227340718 +0000 UTC m=+0.276177098 container attach 374aa9523f563e9030ee1b276d7dd5132f260f879e2db128a75d5389936c26d7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_ganguly, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0)
Oct 11 02:07:48 compute-0 heuristic_ganguly[373222]: 167 167
Oct 11 02:07:48 compute-0 systemd[1]: libpod-374aa9523f563e9030ee1b276d7dd5132f260f879e2db128a75d5389936c26d7.scope: Deactivated successfully.
Oct 11 02:07:48 compute-0 podman[373177]: 2025-10-11 02:07:48.235967146 +0000 UTC m=+0.284803496 container died 374aa9523f563e9030ee1b276d7dd5132f260f879e2db128a75d5389936c26d7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_ganguly, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:07:48 compute-0 systemd[1]: var-lib-containers-storage-overlay-2086b457b7d0b3258d95aa1f433b33c6fe565bfc7ad66210a07b5fd802ed6bd1-merged.mount: Deactivated successfully.
Oct 11 02:07:48 compute-0 podman[373177]: 2025-10-11 02:07:48.322049402 +0000 UTC m=+0.370885742 container remove 374aa9523f563e9030ee1b276d7dd5132f260f879e2db128a75d5389936c26d7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_ganguly, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:07:48 compute-0 systemd[1]: libpod-conmon-374aa9523f563e9030ee1b276d7dd5132f260f879e2db128a75d5389936c26d7.scope: Deactivated successfully.
Oct 11 02:07:48 compute-0 python3[373226]: ansible-edpm_container_manage Invoked with concurrency=1 config_dir=/var/lib/openstack/config/telemetry config_id=edpm config_overrides={} config_patterns=openstack_network_exporter.json log_base_path=/var/log/containers/stdouts debug=False
Oct 11 02:07:48 compute-0 podman[373246]: 2025-10-11 02:07:48.583786064 +0000 UTC m=+0.062395070 container create d218be575d4b2c630e9022fc9e496d0a819383f321506869e869be8baada7970 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_einstein, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:07:48 compute-0 systemd[1]: Started libpod-conmon-d218be575d4b2c630e9022fc9e496d0a819383f321506869e869be8baada7970.scope.
Oct 11 02:07:48 compute-0 podman[373246]: 2025-10-11 02:07:48.5583969 +0000 UTC m=+0.037005936 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:07:48 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:07:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b4dc79ae4f61a353473ba3579450f0acf276b0d50820aaef6c7846ba01889883/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b4dc79ae4f61a353473ba3579450f0acf276b0d50820aaef6c7846ba01889883/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b4dc79ae4f61a353473ba3579450f0acf276b0d50820aaef6c7846ba01889883/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b4dc79ae4f61a353473ba3579450f0acf276b0d50820aaef6c7846ba01889883/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:48 compute-0 podman[373246]: 2025-10-11 02:07:48.715384879 +0000 UTC m=+0.193993925 container init d218be575d4b2c630e9022fc9e496d0a819383f321506869e869be8baada7970 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_einstein, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:07:48 compute-0 podman[373246]: 2025-10-11 02:07:48.727943615 +0000 UTC m=+0.206552611 container start d218be575d4b2c630e9022fc9e496d0a819383f321506869e869be8baada7970 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_einstein, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.vendor=CentOS)
Oct 11 02:07:48 compute-0 podman[373246]: 2025-10-11 02:07:48.732644726 +0000 UTC m=+0.211253732 container attach d218be575d4b2c630e9022fc9e496d0a819383f321506869e869be8baada7970 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_einstein, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3, OSD_FLAVOR=default)
Oct 11 02:07:48 compute-0 python3[373226]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: [
                                                {
                                                     "Id": "186c5e97c6f6912533851a0044ea6da23938910e7bddfb4a6c0be9b48ab2a1d1",
                                                     "Digest": "sha256:ecd56e6733c475f2d441344fd98f288c3eac0261ba113695fec7520a954ccbc7",
                                                     "RepoTags": [
                                                          "quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified"
                                                     ],
                                                     "RepoDigests": [
                                                          "quay.io/openstack-k8s-operators/openstack-network-exporter@sha256:ecd56e6733c475f2d441344fd98f288c3eac0261ba113695fec7520a954ccbc7"
                                                     ],
                                                     "Parent": "",
                                                     "Comment": "",
                                                     "Created": "2025-08-26T15:52:54.446618393Z",
                                                     "Config": {
                                                          "ExposedPorts": {
                                                               "1981/tcp": {}
                                                          },
                                                          "Env": [
                                                               "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
                                                               "container=oci"
                                                          ],
                                                          "Cmd": [
                                                               "/app/openstack-network-exporter"
                                                          ],
                                                          "WorkingDir": "/",
                                                          "Labels": {
                                                               "architecture": "x86_64",
                                                               "build-date": "2025-08-20T13:12:41",
                                                               "com.redhat.component": "ubi9-minimal-container",
                                                               "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI",
                                                               "description": "The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.",
                                                               "distribution-scope": "public",
                                                               "io.buildah.version": "1.33.7",
                                                               "io.k8s.description": "The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.",
                                                               "io.k8s.display-name": "Red Hat Universal Base Image 9 Minimal",
                                                               "io.openshift.expose-services": "",
                                                               "io.openshift.tags": "minimal rhel9",
                                                               "maintainer": "Red Hat, Inc.",
                                                               "name": "ubi9-minimal",
                                                               "release": "1755695350",
                                                               "summary": "Provides the latest release of the minimal Red Hat Universal Base Image 9.",
                                                               "url": "https://catalog.redhat.com/en/search?searchType=containers",
                                                               "vcs-ref": "f4b088292653bbf5ca8188a5e59ffd06a8671d4b",
                                                               "vcs-type": "git",
                                                               "vendor": "Red Hat, Inc.",
                                                               "version": "9.6"
                                                          }
                                                     },
                                                     "Version": "",
                                                     "Author": "Red Hat",
                                                     "Architecture": "amd64",
                                                     "Os": "linux",
                                                     "Size": 142088877,
                                                     "VirtualSize": 142088877,
                                                     "GraphDriver": {
                                                          "Name": "overlay",
                                                          "Data": {
                                                               "LowerDir": "/var/lib/containers/storage/overlay/157961e3a1fe369d02893b19044a0e08e15689974ef810b235cb5ec194c7142c/diff:/var/lib/containers/storage/overlay/778d8c610941586099cac6c507cad2d1156b71b2bb54c42cebedf8808c68edb9/diff",
                                                               "UpperDir": "/var/lib/containers/storage/overlay/cd505d6f54e550fae708d1680b6b8d44753cf72fac8d36345974b92245bc660c/diff",
                                                               "WorkDir": "/var/lib/containers/storage/overlay/cd505d6f54e550fae708d1680b6b8d44753cf72fac8d36345974b92245bc660c/work"
                                                          }
                                                     },
                                                     "RootFS": {
                                                          "Type": "layers",
                                                          "Layers": [
                                                               "sha256:778d8c610941586099cac6c507cad2d1156b71b2bb54c42cebedf8808c68edb9",
                                                               "sha256:60984b2898b5b4ad1680d36433001b7e2bebb1073775d06b4c2ff80f985caccb",
                                                               "sha256:866ed9f0f685cc1d741f560227443a94926fc22494aa7808be751e7247cda421"
                                                          ]
                                                     },
                                                     "Labels": {
                                                          "architecture": "x86_64",
                                                          "build-date": "2025-08-20T13:12:41",
                                                          "com.redhat.component": "ubi9-minimal-container",
                                                          "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI",
                                                          "description": "The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.",
                                                          "distribution-scope": "public",
                                                          "io.buildah.version": "1.33.7",
                                                          "io.k8s.description": "The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.",
                                                          "io.k8s.display-name": "Red Hat Universal Base Image 9 Minimal",
                                                          "io.openshift.expose-services": "",
                                                          "io.openshift.tags": "minimal rhel9",
                                                          "maintainer": "Red Hat, Inc.",
                                                          "name": "ubi9-minimal",
                                                          "release": "1755695350",
                                                          "summary": "Provides the latest release of the minimal Red Hat Universal Base Image 9.",
                                                          "url": "https://catalog.redhat.com/en/search?searchType=containers",
                                                          "vcs-ref": "f4b088292653bbf5ca8188a5e59ffd06a8671d4b",
                                                          "vcs-type": "git",
                                                          "vendor": "Red Hat, Inc.",
                                                          "version": "9.6"
                                                     },
                                                     "Annotations": {},
                                                     "ManifestType": "application/vnd.docker.distribution.manifest.v2+json",
                                                     "User": "",
                                                     "History": [
                                                          {
                                                               "created": "2025-08-20T13:14:24.836114247Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL maintainer=\"Red Hat, Inc.\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-20T13:14:24.907067406Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL vendor=\"Red Hat, Inc.\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-20T13:14:24.953912498Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL url=\"https://catalog.redhat.com/en/search?searchType=containers\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-20T13:14:24.99202543Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL com.redhat.component=\"ubi9-minimal-container\"       name=\"ubi9-minimal\"       version=\"9.6\"       distribution-scope=\"public\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-20T13:14:25.033232759Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL com.redhat.license_terms=\"https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-20T13:14:25.116880439Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL summary=\"Provides the latest release of the minimal Red Hat Universal Base Image 9.\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-20T13:14:25.167988017Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL description=\"The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-20T13:14:25.205286235Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL io.k8s.description=\"The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-20T13:14:25.239930205Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL io.k8s.display-name=\"Red Hat Universal Base Image 9 Minimal\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-20T13:14:25.298417937Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL io.openshift.expose-services=\"\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-20T13:14:25.346108994Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL io.openshift.tags=\"minimal rhel9\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-20T13:14:25.381850293Z",
                                                               "created_by": "/bin/sh -c #(nop) ENV container oci",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-20T13:14:25.998561869Z",
                                                               "created_by": "/bin/sh -c #(nop) COPY dir:e1f22eafd6489859288910ef7585f9d694693aa84a31ba9d54dea9e7a451abe6 in / ",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-20T13:14:26.169088157Z",
                                                               "created_by": "/bin/sh -c #(nop) COPY file:b37d593713ee21ad52a4cd1424dc019a24f7966f85df0ac4b86d234302695328 in /etc/yum.repos.d/. ",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-20T13:14:26.222750062Z",
                                                               "created_by": "/bin/sh -c #(nop) CMD [\"/bin/bash\"]",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-20T13:14:26.44502305Z",
                                                               "created_by": "/bin/sh -c #(nop) COPY file:58cc94f5b3b2d60de2c77a6ed4b1797dcede502ccdb429a72e7a72d994235b3c in /usr/share/buildinfo/content-sets.json ",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-20T13:14:26.581849716Z",
                                                               "created_by": "/bin/sh -c #(nop) COPY file:58cc94f5b3b2d60de2c77a6ed4b1797dcede502ccdb429a72e7a72d994235b3c in /root/buildinfo/content_manifests/content-sets.json ",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-20T13:14:26.902035614Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL \"build-date\"=\"2025-08-20T13:12:41\" \"architecture\"=\"x86_64\" \"vcs-type\"=\"git\" \"vcs-ref\"=\"f4b088292653bbf5ca8188a5e59ffd06a8671d4b\" \"release\"=\"1755695350\""
                                                          },
                                                          {
                                                               "created": "2025-08-26T15:52:52.889456996Z",
                                                               "created_by": "/bin/sh -c microdnf update -y && rm -rf /var/cache/yum",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-26T15:52:54.116955892Z",
                                                               "created_by": "/bin/sh -c microdnf install -y iproute && microdnf clean all",
                                                               "comment": "FROM registry.access.redhat.com/ubi9/ubi-minimal:latest"
                                                          },
                                                          {
                                                               "created": "2025-08-26T15:52:54.314008349Z",
                                                               "created_by": "/bin/sh -c #(nop) COPY file:fab61bc60c39fae33dbfa4e382d473ceab94ebaf876018d5034ba62f04740767 in /etc/openstack-network-exporter.yaml ",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-26T15:52:54.407547534Z",
                                                               "created_by": "/bin/sh -c #(nop) COPY file:be836064c1a23a46d9411cf2aafe0d43f5d498cf2fd92e788160ae2e0f30bb86 in /app/openstack-network-exporter ",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-26T15:52:54.420490087Z",
                                                               "created_by": "/bin/sh -c #(nop) MAINTAINER Red Hat",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-26T15:52:54.432520013Z",
                                                               "created_by": "/bin/sh -c #(nop) EXPOSE 1981",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-08-26T15:52:54.48363818Z",
                                                               "created_by": "/bin/sh -c #(nop) CMD [\"/app/openstack-network-exporter\"]",
                                                               "author": "Red Hat",
                                                               "comment": "FROM 688666ea38a8"
                                                          }
                                                     ],
                                                     "NamesHistory": [
                                                          "quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified"
                                                     ]
                                                }
                                           ]
                                           : quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified
Oct 11 02:07:48 compute-0 systemd[1]: libpod-ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46.scope: Deactivated successfully.
Oct 11 02:07:48 compute-0 systemd[1]: libpod-ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46.scope: Consumed 6.313s CPU time.
Oct 11 02:07:48 compute-0 podman[373312]: 2025-10-11 02:07:48.977830818 +0000 UTC m=+0.071504146 container died ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, config_id=edpm, container_name=openstack_network_exporter, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., name=ubi9-minimal, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.component=ubi9-minimal-container, version=9.6, distribution-scope=public, managed_by=edpm_ansible, url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, architecture=x86_64, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, io.openshift.expose-services=, vendor=Red Hat, Inc., io.buildah.version=1.33.7, release=1755695350)
Oct 11 02:07:49 compute-0 systemd[1]: ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46-3754f5839c2d0939.timer: Deactivated successfully.
Oct 11 02:07:49 compute-0 systemd[1]: Stopped /usr/bin/podman healthcheck run ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46.
Oct 11 02:07:49 compute-0 systemd[1]: ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46-3754f5839c2d0939.service: Failed to open /run/systemd/transient/ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46-3754f5839c2d0939.service: No such file or directory
Oct 11 02:07:49 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46-userdata-shm.mount: Deactivated successfully.
Oct 11 02:07:49 compute-0 systemd[1]: var-lib-containers-storage-overlay-71da1f264f6c84273ff7279842042e12ee9d363bb98b441e04efeeff07ab2585-merged.mount: Deactivated successfully.
Oct 11 02:07:49 compute-0 podman[373312]: 2025-10-11 02:07:49.091734934 +0000 UTC m=+0.185408252 container cleanup ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, com.redhat.component=ubi9-minimal-container, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_id=edpm, io.openshift.tags=minimal rhel9, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, name=ubi9-minimal, version=9.6, build-date=2025-08-20T13:12:41, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=openstack_network_exporter, io.openshift.expose-services=, maintainer=Red Hat, Inc., io.buildah.version=1.33.7, release=1755695350, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vendor=Red Hat, Inc., architecture=x86_64, managed_by=edpm_ansible, url=https://catalog.redhat.com/en/search?searchType=containers)
Oct 11 02:07:49 compute-0 python3[373226]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman stop openstack_network_exporter
Oct 11 02:07:49 compute-0 systemd[1]: edpm_openstack_network_exporter.service: Main process exited, code=exited, status=2/INVALIDARGUMENT
Oct 11 02:07:49 compute-0 systemd[1]: ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46-3754f5839c2d0939.timer: Failed to open /run/systemd/transient/ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46-3754f5839c2d0939.timer: No such file or directory
Oct 11 02:07:49 compute-0 systemd[1]: ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46-3754f5839c2d0939.service: Failed to open /run/systemd/transient/ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46-3754f5839c2d0939.service: No such file or directory
Oct 11 02:07:49 compute-0 podman[373338]: 2025-10-11 02:07:49.23783003 +0000 UTC m=+0.108131656 container remove ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, url=https://catalog.redhat.com/en/search?searchType=containers, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-type=git, version=9.6, container_name=openstack_network_exporter, io.buildah.version=1.33.7, com.redhat.component=ubi9-minimal-container, io.openshift.expose-services=, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, build-date=2025-08-20T13:12:41, release=1755695350, vendor=Red Hat, Inc., distribution-scope=public, maintainer=Red Hat, Inc., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, name=ubi9-minimal, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., architecture=x86_64, io.openshift.tags=minimal rhel9, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b)
Oct 11 02:07:49 compute-0 podman[373337]: Error: no container with ID ed7867659d9ef5047fd55711a1a9f0be88fe0745405dd1e1813865a6b8402a46 found in database: no such container
Oct 11 02:07:49 compute-0 python3[373226]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman rm --force openstack_network_exporter
Oct 11 02:07:49 compute-0 systemd[1]: edpm_openstack_network_exporter.service: Control process exited, code=exited, status=125/n/a
Oct 11 02:07:49 compute-0 systemd[1]: edpm_openstack_network_exporter.service: Failed with result 'exit-code'.
Oct 11 02:07:49 compute-0 podman[373360]: 2025-10-11 02:07:49.369849326 +0000 UTC m=+0.092449671 container create 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, version=9.6, container_name=openstack_network_exporter, io.openshift.expose-services=, com.redhat.component=ubi9-minimal-container, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.tags=minimal rhel9, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., distribution-scope=public, vendor=Red Hat, Inc., name=ubi9-minimal, vcs-type=git, io.buildah.version=1.33.7, managed_by=edpm_ansible, release=1755695350, build-date=2025-08-20T13:12:41, config_id=edpm, maintainer=Red Hat, Inc., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64)
Oct 11 02:07:49 compute-0 podman[373360]: 2025-10-11 02:07:49.330067854 +0000 UTC m=+0.052668209 image pull 186c5e97c6f6912533851a0044ea6da23938910e7bddfb4a6c0be9b48ab2a1d1 quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified
Oct 11 02:07:49 compute-0 python3[373226]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman create --name openstack_network_exporter --conmon-pidfile /run/openstack_network_exporter.pid --env OS_ENDPOINT_TYPE=internal --env OPENSTACK_NETWORK_EXPORTER_YAML=/etc/openstack_network_exporter/openstack_network_exporter.yaml --healthcheck-command /openstack/healthcheck openstack-netwo --label config_id=edpm --label container_name=openstack_network_exporter --label managed_by=edpm_ansible --label config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']} --log-driver journald --log-level info --network host --privileged=True --publish 9105:9105 --volume /var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z --volume /var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z --volume /var/run/openvswitch:/run/openvswitch:rw,z --volume /var/lib/openvswitch/ovn:/run/ovn:rw,z --volume /proc:/host/proc:ro --volume /var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified
Oct 11 02:07:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v889: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:49 compute-0 systemd[1]: edpm_openstack_network_exporter.service: Scheduled restart job, restart counter is at 1.
Oct 11 02:07:49 compute-0 systemd[1]: Stopped openstack_network_exporter container.
Oct 11 02:07:49 compute-0 systemd[1]: Starting openstack_network_exporter container...
Oct 11 02:07:49 compute-0 systemd[1]: Started libpod-conmon-6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c.scope.
Oct 11 02:07:49 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:07:49 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7679b1f33abc4c10bb215cc54ab8c38e13c80904a04d087fcd3dfc842548596a/merged/run/ovn supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:49 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7679b1f33abc4c10bb215cc54ab8c38e13c80904a04d087fcd3dfc842548596a/merged/etc/openstack_network_exporter/openstack_network_exporter.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:49 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7679b1f33abc4c10bb215cc54ab8c38e13c80904a04d087fcd3dfc842548596a/merged/etc/openstack_network_exporter/tls supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:49 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c.
Oct 11 02:07:49 compute-0 podman[373374]: 2025-10-11 02:07:49.678136523 +0000 UTC m=+0.271833270 container init 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, managed_by=edpm_ansible, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vendor=Red Hat, Inc., com.redhat.component=ubi9-minimal-container, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.buildah.version=1.33.7, url=https://catalog.redhat.com/en/search?searchType=containers, config_id=edpm, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., version=9.6, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.expose-services=, container_name=openstack_network_exporter, release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., distribution-scope=public, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, architecture=x86_64, build-date=2025-08-20T13:12:41)
Oct 11 02:07:49 compute-0 podman[373374]: 2025-10-11 02:07:49.719065264 +0000 UTC m=+0.312761971 container start 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, distribution-scope=public, managed_by=edpm_ansible, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, build-date=2025-08-20T13:12:41, version=9.6, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, url=https://catalog.redhat.com/en/search?searchType=containers, vendor=Red Hat, Inc., architecture=x86_64, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.buildah.version=1.33.7, vcs-type=git, com.redhat.component=ubi9-minimal-container, maintainer=Red Hat, Inc., release=1755695350, io.openshift.expose-services=, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, container_name=openstack_network_exporter)
Oct 11 02:07:49 compute-0 openstack_network_exporter[373407]: INFO    02:07:49 main.go:48: registering *bridge.Collector
Oct 11 02:07:49 compute-0 openstack_network_exporter[373407]: INFO    02:07:49 main.go:48: registering *coverage.Collector
Oct 11 02:07:49 compute-0 podman[373388]: openstack_network_exporter
Oct 11 02:07:49 compute-0 openstack_network_exporter[373407]: INFO    02:07:49 main.go:48: registering *datapath.Collector
Oct 11 02:07:49 compute-0 openstack_network_exporter[373407]: INFO    02:07:49 main.go:48: registering *iface.Collector
Oct 11 02:07:49 compute-0 openstack_network_exporter[373407]: INFO    02:07:49 main.go:48: registering *memory.Collector
Oct 11 02:07:49 compute-0 openstack_network_exporter[373407]: INFO    02:07:49 main.go:48: registering *ovnnorthd.Collector
Oct 11 02:07:49 compute-0 openstack_network_exporter[373407]: INFO    02:07:49 main.go:48: registering *ovn.Collector
Oct 11 02:07:49 compute-0 openstack_network_exporter[373407]: INFO    02:07:49 main.go:48: registering *ovsdbserver.Collector
Oct 11 02:07:49 compute-0 openstack_network_exporter[373407]: INFO    02:07:49 main.go:48: registering *pmd_perf.Collector
Oct 11 02:07:49 compute-0 openstack_network_exporter[373407]: INFO    02:07:49 main.go:48: registering *pmd_rxq.Collector
Oct 11 02:07:49 compute-0 openstack_network_exporter[373407]: INFO    02:07:49 main.go:48: registering *vswitch.Collector
Oct 11 02:07:49 compute-0 openstack_network_exporter[373407]: NOTICE  02:07:49 main.go:76: listening on https://:9105/metrics
Oct 11 02:07:49 compute-0 python3[373226]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman start openstack_network_exporter
Oct 11 02:07:49 compute-0 systemd[1]: Started openstack_network_exporter container.
Oct 11 02:07:49 compute-0 loving_einstein[373274]: {
Oct 11 02:07:49 compute-0 loving_einstein[373274]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:07:49 compute-0 loving_einstein[373274]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:07:49 compute-0 loving_einstein[373274]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:07:49 compute-0 loving_einstein[373274]:         "osd_id": 1,
Oct 11 02:07:49 compute-0 loving_einstein[373274]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:07:49 compute-0 loving_einstein[373274]:         "type": "bluestore"
Oct 11 02:07:49 compute-0 loving_einstein[373274]:     },
Oct 11 02:07:49 compute-0 loving_einstein[373274]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:07:49 compute-0 loving_einstein[373274]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:07:49 compute-0 loving_einstein[373274]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:07:49 compute-0 loving_einstein[373274]:         "osd_id": 2,
Oct 11 02:07:49 compute-0 loving_einstein[373274]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:07:49 compute-0 loving_einstein[373274]:         "type": "bluestore"
Oct 11 02:07:49 compute-0 loving_einstein[373274]:     },
Oct 11 02:07:49 compute-0 loving_einstein[373274]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:07:49 compute-0 loving_einstein[373274]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:07:49 compute-0 loving_einstein[373274]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:07:49 compute-0 loving_einstein[373274]:         "osd_id": 0,
Oct 11 02:07:49 compute-0 loving_einstein[373274]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:07:49 compute-0 loving_einstein[373274]:         "type": "bluestore"
Oct 11 02:07:49 compute-0 loving_einstein[373274]:     }
Oct 11 02:07:49 compute-0 loving_einstein[373274]: }
Oct 11 02:07:49 compute-0 podman[373430]: 2025-10-11 02:07:49.883120902 +0000 UTC m=+0.146057963 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=openstack_network_exporter, io.openshift.tags=minimal rhel9, url=https://catalog.redhat.com/en/search?searchType=containers, build-date=2025-08-20T13:12:41, io.openshift.expose-services=, vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.33.7, com.redhat.component=ubi9-minimal-container, maintainer=Red Hat, Inc., vendor=Red Hat, Inc., distribution-scope=public, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, architecture=x86_64, version=9.6, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, managed_by=edpm_ansible, name=ubi9-minimal, release=1755695350, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:07:49 compute-0 podman[373246]: 2025-10-11 02:07:49.918420119 +0000 UTC m=+1.397029175 container died d218be575d4b2c630e9022fc9e496d0a819383f321506869e869be8baada7970 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_einstein, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:07:49 compute-0 systemd[1]: libpod-d218be575d4b2c630e9022fc9e496d0a819383f321506869e869be8baada7970.scope: Deactivated successfully.
Oct 11 02:07:49 compute-0 systemd[1]: libpod-d218be575d4b2c630e9022fc9e496d0a819383f321506869e869be8baada7970.scope: Consumed 1.168s CPU time.
Oct 11 02:07:49 compute-0 systemd[1]: var-lib-containers-storage-overlay-b4dc79ae4f61a353473ba3579450f0acf276b0d50820aaef6c7846ba01889883-merged.mount: Deactivated successfully.
Oct 11 02:07:50 compute-0 podman[373246]: 2025-10-11 02:07:50.004920556 +0000 UTC m=+1.483529552 container remove d218be575d4b2c630e9022fc9e496d0a819383f321506869e869be8baada7970 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_einstein, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507)
Oct 11 02:07:50 compute-0 sudo[373221]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:07:50 compute-0 systemd[1]: libpod-conmon-d218be575d4b2c630e9022fc9e496d0a819383f321506869e869be8baada7970.scope: Deactivated successfully.
Oct 11 02:07:50 compute-0 sudo[373014]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:07:50 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:07:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:07:50 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:07:50 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 750cd007-9f30-4630-a844-a5d46c209d1a does not exist
Oct 11 02:07:50 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev dd18016e-6f1f-45aa-8c77-5e3e5ee41743 does not exist
Oct 11 02:07:50 compute-0 sudo[373514]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:07:50 compute-0 sudo[373514]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:50 compute-0 sudo[373514]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:50 compute-0 sudo[373545]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:07:50 compute-0 sudo[373545]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:07:50 compute-0 sudo[373545]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:50 compute-0 ceph-mon[191930]: pgmap v889: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:50 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:07:50 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:07:50 compute-0 sshd-session[372817]: Failed password for root from 121.227.153.123 port 33144 ssh2
Oct 11 02:07:50 compute-0 sudo[373695]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fpiqphrmnkcsuraxewstnxoenptqkwty ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148470.3100665-709-143504813472003/AnsiballZ_stat.py'
Oct 11 02:07:50 compute-0 sudo[373695]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:51 compute-0 python3.9[373697]: ansible-ansible.builtin.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:07:51 compute-0 sudo[373695]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:51 compute-0 sshd-session[372817]: Connection closed by authenticating user root 121.227.153.123 port 33144 [preauth]
Oct 11 02:07:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v890: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:52 compute-0 sudo[373851]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lmofbkzyfjiaenubdlgncxsckbrovcey ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148471.5295684-718-206144988851205/AnsiballZ_file.py'
Oct 11 02:07:52 compute-0 sudo[373851]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:52 compute-0 python3.9[373853]: ansible-file Invoked with path=/etc/systemd/system/edpm_openstack_network_exporter.requires state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:07:52 compute-0 sudo[373851]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:52 compute-0 ceph-mon[191930]: pgmap v890: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:52 compute-0 unix_chkpwd[373854]: password check failed for user (root)
Oct 11 02:07:52 compute-0 sshd-session[373741]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:07:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v891: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:54 compute-0 sudo[374003]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ymuflxhteactwkhxsoroyshpeimkeihw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148473.4712198-718-254936635168896/AnsiballZ_copy.py'
Oct 11 02:07:54 compute-0 sudo[374003]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:54 compute-0 ceph-mon[191930]: pgmap v891: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:54 compute-0 python3.9[374005]: ansible-copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760148473.4712198-718-254936635168896/source dest=/etc/systemd/system/edpm_openstack_network_exporter.service mode=0644 owner=root group=root backup=False force=True remote_src=False follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:07:54 compute-0 sudo[374003]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:07:54.825 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:07:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:07:54.827 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:07:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:07:54.827 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:07:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:07:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v892: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:55 compute-0 sshd-session[373741]: Failed password for root from 121.227.153.123 port 54072 ssh2
Oct 11 02:07:55 compute-0 sudo[374079]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-swpwjyxuirygkdxmnazdhkwwhqztibiz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148473.4712198-718-254936635168896/AnsiballZ_systemd.py'
Oct 11 02:07:55 compute-0 sudo[374079]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:56 compute-0 python3.9[374081]: ansible-systemd Invoked with state=started name=edpm_openstack_network_exporter.service enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:07:56 compute-0 sudo[374079]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:56 compute-0 sshd-session[373741]: Connection closed by authenticating user root 121.227.153.123 port 54072 [preauth]
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:07:56
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['cephfs.cephfs.data', 'default.rgw.meta', 'cephfs.cephfs.meta', 'vms', '.mgr', 'default.rgw.log', 'default.rgw.control', 'volumes', 'images', '.rgw.root', 'backups']
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:07:56 compute-0 ceph-mon[191930]: pgmap v892: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:07:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:07:57 compute-0 sudo[374250]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-srwufzqtupmkxazyeweaegwwcxbdmevr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148476.5182285-738-280718718140973/AnsiballZ_systemd.py'
Oct 11 02:07:57 compute-0 podman[374209]: 2025-10-11 02:07:57.17790646 +0000 UTC m=+0.129483939 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:07:57 compute-0 sudo[374250]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v893: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:57 compute-0 python3.9[374260]: ansible-ansible.builtin.systemd Invoked with name=edpm_openstack_network_exporter.service state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 02:07:57 compute-0 systemd[1]: Stopping openstack_network_exporter container...
Oct 11 02:07:57 compute-0 unix_chkpwd[374274]: password check failed for user (root)
Oct 11 02:07:57 compute-0 sshd-session[374114]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:07:57 compute-0 systemd[1]: libpod-6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c.scope: Deactivated successfully.
Oct 11 02:07:57 compute-0 podman[374264]: 2025-10-11 02:07:57.730223468 +0000 UTC m=+0.111408004 container died 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, managed_by=edpm_ansible, name=ubi9-minimal, release=1755695350, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, container_name=openstack_network_exporter, com.redhat.component=ubi9-minimal-container, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.expose-services=, maintainer=Red Hat, Inc., version=9.6, io.openshift.tags=minimal rhel9, build-date=2025-08-20T13:12:41, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, config_id=edpm, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, url=https://catalog.redhat.com/en/search?searchType=containers, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.buildah.version=1.33.7, architecture=x86_64, distribution-scope=public)
Oct 11 02:07:57 compute-0 systemd[1]: 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c-2835966743282a02.timer: Deactivated successfully.
Oct 11 02:07:57 compute-0 systemd[1]: Stopped /usr/bin/podman healthcheck run 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c.
Oct 11 02:07:57 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c-userdata-shm.mount: Deactivated successfully.
Oct 11 02:07:57 compute-0 systemd[1]: var-lib-containers-storage-overlay-7679b1f33abc4c10bb215cc54ab8c38e13c80904a04d087fcd3dfc842548596a-merged.mount: Deactivated successfully.
Oct 11 02:07:57 compute-0 podman[374264]: 2025-10-11 02:07:57.825914411 +0000 UTC m=+0.207098917 container cleanup 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, maintainer=Red Hat, Inc., version=9.6, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., managed_by=edpm_ansible, name=ubi9-minimal, vendor=Red Hat, Inc., container_name=openstack_network_exporter, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, io.buildah.version=1.33.7, io.openshift.tags=minimal rhel9, com.redhat.component=ubi9-minimal-container, io.openshift.expose-services=, vcs-type=git, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, config_id=edpm, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, architecture=x86_64, release=1755695350)
Oct 11 02:07:57 compute-0 podman[374264]: openstack_network_exporter
Oct 11 02:07:57 compute-0 systemd[1]: edpm_openstack_network_exporter.service: Main process exited, code=exited, status=2/INVALIDARGUMENT
Oct 11 02:07:57 compute-0 systemd[1]: libpod-conmon-6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c.scope: Deactivated successfully.
Oct 11 02:07:57 compute-0 podman[374293]: openstack_network_exporter
Oct 11 02:07:57 compute-0 systemd[1]: edpm_openstack_network_exporter.service: Failed with result 'exit-code'.
Oct 11 02:07:57 compute-0 systemd[1]: Stopped openstack_network_exporter container.
Oct 11 02:07:57 compute-0 systemd[1]: Starting openstack_network_exporter container...
Oct 11 02:07:58 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:07:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7679b1f33abc4c10bb215cc54ab8c38e13c80904a04d087fcd3dfc842548596a/merged/run/ovn supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7679b1f33abc4c10bb215cc54ab8c38e13c80904a04d087fcd3dfc842548596a/merged/etc/openstack_network_exporter/openstack_network_exporter.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7679b1f33abc4c10bb215cc54ab8c38e13c80904a04d087fcd3dfc842548596a/merged/etc/openstack_network_exporter/tls supports timestamps until 2038 (0x7fffffff)
Oct 11 02:07:58 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c.
Oct 11 02:07:58 compute-0 podman[374304]: 2025-10-11 02:07:58.201938146 +0000 UTC m=+0.186886462 container init 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, io.buildah.version=1.33.7, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, build-date=2025-08-20T13:12:41, version=9.6, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, release=1755695350, io.openshift.expose-services=, container_name=openstack_network_exporter, distribution-scope=public, url=https://catalog.redhat.com/en/search?searchType=containers, architecture=x86_64, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vendor=Red Hat, Inc., com.redhat.component=ubi9-minimal-container, vcs-type=git, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, config_id=edpm, managed_by=edpm_ansible, name=ubi9-minimal)
Oct 11 02:07:58 compute-0 openstack_network_exporter[374316]: INFO    02:07:58 main.go:48: registering *bridge.Collector
Oct 11 02:07:58 compute-0 openstack_network_exporter[374316]: INFO    02:07:58 main.go:48: registering *coverage.Collector
Oct 11 02:07:58 compute-0 openstack_network_exporter[374316]: INFO    02:07:58 main.go:48: registering *datapath.Collector
Oct 11 02:07:58 compute-0 openstack_network_exporter[374316]: INFO    02:07:58 main.go:48: registering *iface.Collector
Oct 11 02:07:58 compute-0 openstack_network_exporter[374316]: INFO    02:07:58 main.go:48: registering *memory.Collector
Oct 11 02:07:58 compute-0 openstack_network_exporter[374316]: INFO    02:07:58 main.go:48: registering *ovnnorthd.Collector
Oct 11 02:07:58 compute-0 openstack_network_exporter[374316]: INFO    02:07:58 main.go:48: registering *ovn.Collector
Oct 11 02:07:58 compute-0 openstack_network_exporter[374316]: INFO    02:07:58 main.go:48: registering *ovsdbserver.Collector
Oct 11 02:07:58 compute-0 openstack_network_exporter[374316]: INFO    02:07:58 main.go:48: registering *pmd_perf.Collector
Oct 11 02:07:58 compute-0 openstack_network_exporter[374316]: INFO    02:07:58 main.go:48: registering *pmd_rxq.Collector
Oct 11 02:07:58 compute-0 openstack_network_exporter[374316]: INFO    02:07:58 main.go:48: registering *vswitch.Collector
Oct 11 02:07:58 compute-0 openstack_network_exporter[374316]: NOTICE  02:07:58 main.go:76: listening on https://:9105/metrics
Oct 11 02:07:58 compute-0 podman[374304]: 2025-10-11 02:07:58.236165553 +0000 UTC m=+0.221113849 container start 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, maintainer=Red Hat, Inc., container_name=openstack_network_exporter, managed_by=edpm_ansible, url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., build-date=2025-08-20T13:12:41, version=9.6, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.tags=minimal rhel9, config_id=edpm, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1755695350, com.redhat.component=ubi9-minimal-container, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, distribution-scope=public, io.buildah.version=1.33.7, name=ubi9-minimal, architecture=x86_64, vendor=Red Hat, Inc., io.openshift.expose-services=)
Oct 11 02:07:58 compute-0 podman[374304]: openstack_network_exporter
Oct 11 02:07:58 compute-0 systemd[1]: Started openstack_network_exporter container.
Oct 11 02:07:58 compute-0 sudo[374250]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:58 compute-0 podman[374324]: 2025-10-11 02:07:58.344509073 +0000 UTC m=+0.101167069 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., managed_by=edpm_ansible, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_id=edpm, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, architecture=x86_64, container_name=openstack_network_exporter, name=ubi9-minimal, version=9.6, release=1755695350, io.openshift.expose-services=, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, build-date=2025-08-20T13:12:41, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, com.redhat.component=ubi9-minimal-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, url=https://catalog.redhat.com/en/search?searchType=containers, vendor=Red Hat, Inc., io.openshift.tags=minimal rhel9, vcs-type=git, distribution-scope=public, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:07:58 compute-0 ceph-mon[191930]: pgmap v893: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:59 compute-0 sudo[374498]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-obcdidvjhetgiuanzugvurthgckruokh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148478.5669951-746-247892426310470/AnsiballZ_find.py'
Oct 11 02:07:59 compute-0 sudo[374498]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:07:59 compute-0 python3.9[374500]: ansible-ansible.builtin.find Invoked with file_type=directory paths=['/var/lib/openstack/healthchecks/'] patterns=[] read_whole_file=False age_stamp=mtime recurse=False hidden=False follow=False get_checksum=False checksum_algorithm=sha1 use_regex=False exact_mode=True excludes=None contains=None age=None size=None depth=None mode=None encoding=None limit=None
Oct 11 02:07:59 compute-0 sudo[374498]: pam_unix(sudo:session): session closed for user root
Oct 11 02:07:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v894: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:07:59 compute-0 podman[157119]: time="2025-10-11T02:07:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:07:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:07:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45036 "" "Go-http-client/1.1"
Oct 11 02:07:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:07:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8509 "" "Go-http-client/1.1"
Oct 11 02:08:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:08:00 compute-0 sshd-session[374114]: Failed password for root from 121.227.153.123 port 54074 ssh2
Oct 11 02:08:00 compute-0 ceph-mon[191930]: pgmap v894: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:00 compute-0 sudo[374652]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wmscucdnirpnyoxwhapgtmigjrdsecnl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148479.813559-756-272205115012830/AnsiballZ_podman_container_info.py'
Oct 11 02:08:00 compute-0 sudo[374652]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:00 compute-0 python3.9[374654]: ansible-containers.podman.podman_container_info Invoked with name=['ovn_controller'] executable=podman
Oct 11 02:08:01 compute-0 sshd-session[374114]: Connection closed by authenticating user root 121.227.153.123 port 54074 [preauth]
Oct 11 02:08:01 compute-0 sudo[374652]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:01 compute-0 podman[374673]: 2025-10-11 02:08:01.244395185 +0000 UTC m=+0.124395791 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=ceilometer_agent_ipmi, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, managed_by=edpm_ansible)
Oct 11 02:08:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v895: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:01 compute-0 openstack_network_exporter[374316]: ERROR   02:08:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:08:01 compute-0 openstack_network_exporter[374316]: ERROR   02:08:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:08:01 compute-0 openstack_network_exporter[374316]: ERROR   02:08:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:08:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:08:01 compute-0 openstack_network_exporter[374316]: ERROR   02:08:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:08:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:08:01 compute-0 openstack_network_exporter[374316]: ERROR   02:08:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:08:02 compute-0 sudo[374842]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iognyizjlyzglgtivctmjviossmcvzly ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148481.3142333-764-44251603497022/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:02 compute-0 sudo[374842]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:02 compute-0 python3.9[374844]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=ovn_controller detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:02 compute-0 systemd[1]: Started libpod-conmon-861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112.scope.
Oct 11 02:08:02 compute-0 podman[374845]: 2025-10-11 02:08:02.450486967 +0000 UTC m=+0.141950654 container exec 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_id=ovn_controller, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']})
Oct 11 02:08:02 compute-0 podman[374845]: 2025-10-11 02:08:02.486442404 +0000 UTC m=+0.177906021 container exec_died 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_id=ovn_controller, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:08:02 compute-0 ceph-mon[191930]: pgmap v895: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:02 compute-0 sudo[374842]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:02 compute-0 systemd[1]: libpod-conmon-861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112.scope: Deactivated successfully.
Oct 11 02:08:02 compute-0 unix_chkpwd[374887]: password check failed for user (root)
Oct 11 02:08:02 compute-0 sshd-session[374719]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:08:03 compute-0 sudo[375025]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-doypissgebkbzjuhqbhixfdclxsjnxju ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148482.8147793-772-253544620541039/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:03 compute-0 sudo[375025]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v896: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:03 compute-0 python3.9[375027]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=ovn_controller detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:03 compute-0 systemd[1]: Started libpod-conmon-861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112.scope.
Oct 11 02:08:03 compute-0 podman[375028]: 2025-10-11 02:08:03.773698594 +0000 UTC m=+0.155158158 container exec 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, config_id=ovn_controller, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']})
Oct 11 02:08:03 compute-0 podman[375028]: 2025-10-11 02:08:03.784063353 +0000 UTC m=+0.165522827 container exec_died 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=ovn_controller, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Oct 11 02:08:03 compute-0 sudo[375025]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:03 compute-0 systemd[1]: libpod-conmon-861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112.scope: Deactivated successfully.
Oct 11 02:08:04 compute-0 ceph-mon[191930]: pgmap v896: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:04 compute-0 sudo[375210]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xlbvzcsvispuxagjgzwrcnmdenxaytuk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148484.1407895-780-273514075112834/AnsiballZ_file.py'
Oct 11 02:08:04 compute-0 sudo[375210]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:04 compute-0 sshd-session[374719]: Failed password for root from 121.227.153.123 port 45884 ssh2
Oct 11 02:08:04 compute-0 python3.9[375212]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/ovn_controller recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:08:05 compute-0 sudo[375210]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:08:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v897: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:05 compute-0 sudo[375362]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xlcabnhmukcfkzfowcrxoyctyaedjqvq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148485.2983897-789-275177100688650/AnsiballZ_podman_container_info.py'
Oct 11 02:08:05 compute-0 sudo[375362]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:05 compute-0 sshd-session[374719]: Connection closed by authenticating user root 121.227.153.123 port 45884 [preauth]
Oct 11 02:08:06 compute-0 python3.9[375364]: ansible-containers.podman.podman_container_info Invoked with name=['ceilometer_agent_compute'] executable=podman
Oct 11 02:08:06 compute-0 sudo[375362]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:06 compute-0 PackageKit[338804]: daemon quit
Oct 11 02:08:06 compute-0 systemd[1]: packagekit.service: Deactivated successfully.
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:08:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:08:06 compute-0 ceph-mon[191930]: pgmap v897: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v898: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:07 compute-0 unix_chkpwd[375446]: password check failed for user (root)
Oct 11 02:08:07 compute-0 sshd-session[375377]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:08:08 compute-0 sudo[375529]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zrnwiizxgfljuppwzzcbkmqjmkeclrsx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148487.4708803-797-26519439305581/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:08 compute-0 sudo[375529]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:08 compute-0 python3.9[375531]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=ceilometer_agent_compute detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:08 compute-0 systemd[1]: Started libpod-conmon-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope.
Oct 11 02:08:08 compute-0 podman[375532]: 2025-10-11 02:08:08.388376487 +0000 UTC m=+0.108416480 container exec c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=edpm)
Oct 11 02:08:08 compute-0 podman[375532]: 2025-10-11 02:08:08.424011695 +0000 UTC m=+0.144051718 container exec_died c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, tcib_managed=true, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:08:08 compute-0 systemd[1]: libpod-conmon-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope: Deactivated successfully.
Oct 11 02:08:08 compute-0 sudo[375529]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:08 compute-0 ceph-mon[191930]: pgmap v898: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:08 compute-0 sshd-session[375377]: Failed password for root from 121.227.153.123 port 45888 ssh2
Oct 11 02:08:09 compute-0 sshd-session[375377]: Connection closed by authenticating user root 121.227.153.123 port 45888 [preauth]
Oct 11 02:08:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v899: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:08:10 compute-0 sudo[375714]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-frxowrdqxaqezqxexdzlggfxsdapvciy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148488.7380912-805-234568939746754/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:10 compute-0 sudo[375714]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:10 compute-0 python3.9[375716]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=ceilometer_agent_compute detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:10 compute-0 systemd[1]: Started libpod-conmon-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope.
Oct 11 02:08:10 compute-0 podman[375717]: 2025-10-11 02:08:10.523080115 +0000 UTC m=+0.173523289 container exec c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_compute, org.label-schema.schema-version=1.0, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 02:08:10 compute-0 podman[375717]: 2025-10-11 02:08:10.559442141 +0000 UTC m=+0.209885305 container exec_died c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_compute, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:08:10 compute-0 ceph-mon[191930]: pgmap v899: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:10 compute-0 systemd[1]: libpod-conmon-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope: Deactivated successfully.
Oct 11 02:08:10 compute-0 sudo[375714]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:10 compute-0 unix_chkpwd[375772]: password check failed for user (root)
Oct 11 02:08:10 compute-0 sshd-session[375639]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:08:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v900: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:11 compute-0 sudo[375898]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qbytavjqjndprtgdleuukdijrpddkesg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148490.9692478-813-123657769727237/AnsiballZ_file.py'
Oct 11 02:08:11 compute-0 sudo[375898]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:11 compute-0 python3.9[375900]: ansible-ansible.builtin.file Invoked with group=42405 mode=0700 owner=42405 path=/var/lib/openstack/healthchecks/ceilometer_agent_compute recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:08:11 compute-0 sudo[375898]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:12 compute-0 ceph-mon[191930]: pgmap v900: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:12 compute-0 sudo[376061]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kzypbbltyyadmteydbqfavrribsocehy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148492.1273296-822-4653134577966/AnsiballZ_podman_container_info.py'
Oct 11 02:08:12 compute-0 sudo[376061]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:12 compute-0 podman[376024]: 2025-10-11 02:08:12.750744503 +0000 UTC m=+0.130580312 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, config_id=edpm, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true)
Oct 11 02:08:12 compute-0 python3.9[376070]: ansible-containers.podman.podman_container_info Invoked with name=['node_exporter'] executable=podman
Oct 11 02:08:13 compute-0 sudo[376061]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:13 compute-0 sshd-session[375639]: Failed password for root from 121.227.153.123 port 53602 ssh2
Oct 11 02:08:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v901: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:14 compute-0 podman[376208]: 2025-10-11 02:08:14.091089231 +0000 UTC m=+0.117916054 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, container_name=ovn_metadata_agent, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:08:14 compute-0 podman[376207]: 2025-10-11 02:08:14.095564625 +0000 UTC m=+0.124411083 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:08:14 compute-0 sudo[376276]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pvkosambsqwvpssuutoagamjyxgipvbw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148493.4452128-830-209923442370868/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:14 compute-0 sudo[376276]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:14 compute-0 podman[376209]: 2025-10-11 02:08:14.153466108 +0000 UTC m=+0.167723174 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vendor=Red Hat, Inc., io.openshift.expose-services=, build-date=2024-09-18T21:23:30, config_id=edpm, io.openshift.tags=base rhel9, managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, vcs-type=git, maintainer=Red Hat, Inc., name=ubi9, container_name=kepler, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, version=9.4, com.redhat.component=ubi9-container, io.buildah.version=1.29.0, architecture=x86_64, distribution-scope=public, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1214.1726694543, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release-0.7.12=, io.k8s.display-name=Red Hat Universal Base Image 9)
Oct 11 02:08:14 compute-0 sshd-session[375639]: Connection closed by authenticating user root 121.227.153.123 port 53602 [preauth]
Oct 11 02:08:14 compute-0 podman[376293]: 2025-10-11 02:08:14.307687941 +0000 UTC m=+0.163401139 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, config_id=ovn_controller, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009)
Oct 11 02:08:14 compute-0 python3.9[376292]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=node_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:14 compute-0 systemd[1]: Started libpod-conmon-7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce.scope.
Oct 11 02:08:14 compute-0 podman[376316]: 2025-10-11 02:08:14.47307224 +0000 UTC m=+0.144921964 container exec 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:08:14 compute-0 podman[376316]: 2025-10-11 02:08:14.511013459 +0000 UTC m=+0.182863133 container exec_died 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:08:14 compute-0 systemd[1]: libpod-conmon-7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce.scope: Deactivated successfully.
Oct 11 02:08:14 compute-0 sudo[376276]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:14 compute-0 ceph-mon[191930]: pgmap v901: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:08:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v902: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:15 compute-0 sudo[376499]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nakfzfgcweemwdqfujhoqffatvsdauuf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148494.925645-838-118751325509573/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:15 compute-0 sudo[376499]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:15 compute-0 unix_chkpwd[376502]: password check failed for user (root)
Oct 11 02:08:15 compute-0 sshd-session[376332]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:08:15 compute-0 python3.9[376501]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=node_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:15 compute-0 systemd[1]: Started libpod-conmon-7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce.scope.
Oct 11 02:08:15 compute-0 podman[376503]: 2025-10-11 02:08:15.981929143 +0000 UTC m=+0.168408004 container exec 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:08:16 compute-0 podman[376503]: 2025-10-11 02:08:16.018128689 +0000 UTC m=+0.204607550 container exec_died 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:08:16 compute-0 systemd[1]: libpod-conmon-7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce.scope: Deactivated successfully.
Oct 11 02:08:16 compute-0 sudo[376499]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:16 compute-0 ceph-mon[191930]: pgmap v902: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:17 compute-0 sudo[376685]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-monccioisqfwmygwenzoghbrmidgrlmw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148496.4237552-846-215267677667967/AnsiballZ_file.py'
Oct 11 02:08:17 compute-0 sudo[376685]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:17 compute-0 podman[376688]: 2025-10-11 02:08:17.180706136 +0000 UTC m=+0.123021340 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, config_id=iscsid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, org.label-schema.schema-version=1.0, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:08:17 compute-0 podman[376687]: 2025-10-11 02:08:17.226958433 +0000 UTC m=+0.170378125 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, container_name=multipathd, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=multipathd)
Oct 11 02:08:17 compute-0 python3.9[376689]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/node_exporter recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:08:17 compute-0 sudo[376685]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v903: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:17 compute-0 sshd-session[376332]: Failed password for root from 121.227.153.123 port 53612 ssh2
Oct 11 02:08:18 compute-0 sudo[376875]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-klormzzmugtxjqhhskticbtznuaksqlo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148497.6241455-855-4041085683306/AnsiballZ_podman_container_info.py'
Oct 11 02:08:18 compute-0 sudo[376875]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:18 compute-0 python3.9[376877]: ansible-containers.podman.podman_container_info Invoked with name=['podman_exporter'] executable=podman
Oct 11 02:08:18 compute-0 sudo[376875]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:18 compute-0 ceph-mon[191930]: pgmap v903: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:19 compute-0 sshd-session[376332]: Connection closed by authenticating user root 121.227.153.123 port 53612 [preauth]
Oct 11 02:08:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v904: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:19 compute-0 sudo[377043]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gpquedhkrpgseycnkiliyvjkrnbzxksc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148498.9941416-863-242772831472559/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:19 compute-0 sudo[377043]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:19 compute-0 python3.9[377045]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=podman_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:20 compute-0 systemd[1]: Started libpod-conmon-31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028.scope.
Oct 11 02:08:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:08:20 compute-0 podman[377046]: 2025-10-11 02:08:20.086026839 +0000 UTC m=+0.196349598 container exec 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:08:20 compute-0 podman[377046]: 2025-10-11 02:08:20.124184407 +0000 UTC m=+0.234507156 container exec_died 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:08:20 compute-0 sudo[377043]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:20 compute-0 systemd[1]: libpod-conmon-31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028.scope: Deactivated successfully.
Oct 11 02:08:20 compute-0 ceph-mon[191930]: pgmap v904: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:20 compute-0 unix_chkpwd[377171]: password check failed for user (root)
Oct 11 02:08:20 compute-0 sshd-session[377004]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:08:21 compute-0 sudo[377225]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-buwyjxiayowqbevcopnjnqjcdqhfcfqw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148500.5005486-871-113048039976378/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:21 compute-0 sudo[377225]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:21 compute-0 python3.9[377227]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=podman_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v905: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:21 compute-0 systemd[1]: Started libpod-conmon-31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028.scope.
Oct 11 02:08:21 compute-0 podman[377228]: 2025-10-11 02:08:21.625373757 +0000 UTC m=+0.183635466 container exec 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 02:08:21 compute-0 podman[377228]: 2025-10-11 02:08:21.661803057 +0000 UTC m=+0.220064756 container exec_died 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:08:21 compute-0 sudo[377225]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:21 compute-0 systemd[1]: libpod-conmon-31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028.scope: Deactivated successfully.
Oct 11 02:08:22 compute-0 ceph-mon[191930]: pgmap v905: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:23 compute-0 sudo[377408]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lofuymdfpiaprxnxbckpydcnvvpsievp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148502.4381042-879-170582914782540/AnsiballZ_file.py'
Oct 11 02:08:23 compute-0 sudo[377408]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:23 compute-0 sshd-session[377004]: Failed password for root from 121.227.153.123 port 38800 ssh2
Oct 11 02:08:23 compute-0 python3.9[377410]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/podman_exporter recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:08:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v906: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:23 compute-0 sudo[377408]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:24 compute-0 sshd-session[377004]: Connection closed by authenticating user root 121.227.153.123 port 38800 [preauth]
Oct 11 02:08:24 compute-0 sudo[377562]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-egizpiphrgooxnwtvktrtldnxdbtmgqw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148503.7700217-888-121886364621100/AnsiballZ_podman_container_info.py'
Oct 11 02:08:24 compute-0 sudo[377562]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:24 compute-0 python3.9[377564]: ansible-containers.podman.podman_container_info Invoked with name=['openstack_network_exporter'] executable=podman
Oct 11 02:08:24 compute-0 ceph-mon[191930]: pgmap v906: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:24 compute-0 sudo[377562]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:08:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v907: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:25 compute-0 unix_chkpwd[377676]: password check failed for user (root)
Oct 11 02:08:25 compute-0 sshd-session[377560]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:08:25 compute-0 sudo[377727]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-towmmqbofsqjckprjandmniyednackpl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148505.244267-896-13520293085019/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:25 compute-0 sudo[377727]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:26 compute-0 python3.9[377729]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=openstack_network_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:26 compute-0 systemd[1]: Started libpod-conmon-6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c.scope.
Oct 11 02:08:26 compute-0 podman[377730]: 2025-10-11 02:08:26.364629648 +0000 UTC m=+0.143053829 container exec 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, build-date=2025-08-20T13:12:41, managed_by=edpm_ansible, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=1755695350, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, architecture=x86_64, name=ubi9-minimal, version=9.6, com.redhat.component=ubi9-minimal-container, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, container_name=openstack_network_exporter, distribution-scope=public, io.openshift.expose-services=, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, vendor=Red Hat, Inc., vcs-type=git, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, config_id=edpm, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc.)
Oct 11 02:08:26 compute-0 podman[377730]: 2025-10-11 02:08:26.404402022 +0000 UTC m=+0.182826203 container exec_died 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, release=1755695350, architecture=x86_64, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, config_id=edpm, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vcs-type=git, build-date=2025-08-20T13:12:41, name=ubi9-minimal, version=9.6, com.redhat.component=ubi9-minimal-container, io.buildah.version=1.33.7, io.openshift.tags=minimal rhel9, managed_by=edpm_ansible, container_name=openstack_network_exporter, io.openshift.expose-services=, maintainer=Red Hat, Inc.)
Oct 11 02:08:26 compute-0 sudo[377727]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:26 compute-0 systemd[1]: libpod-conmon-6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c.scope: Deactivated successfully.
Oct 11 02:08:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:08:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:08:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:08:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:08:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:08:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:08:26 compute-0 ceph-mon[191930]: pgmap v907: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v908: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:27 compute-0 sudo[377926]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hbclnlpowutnwoleeewedrgodcalsrfp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148506.804487-904-104787050147692/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:27 compute-0 sudo[377926]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:27 compute-0 podman[377884]: 2025-10-11 02:08:27.444919235 +0000 UTC m=+0.141716409 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:08:27 compute-0 python3.9[377935]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=openstack_network_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:27 compute-0 systemd[1]: Started libpod-conmon-6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c.scope.
Oct 11 02:08:27 compute-0 sshd-session[377560]: Failed password for root from 121.227.153.123 port 38814 ssh2
Oct 11 02:08:27 compute-0 podman[377936]: 2025-10-11 02:08:27.835516216 +0000 UTC m=+0.154886508 container exec 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.33.7, version=9.6, io.openshift.tags=minimal rhel9, distribution-scope=public, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, release=1755695350, config_id=edpm, name=ubi9-minimal, architecture=x86_64, url=https://catalog.redhat.com/en/search?searchType=containers, build-date=2025-08-20T13:12:41, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, maintainer=Red Hat, Inc., managed_by=edpm_ansible, com.redhat.component=ubi9-minimal-container, container_name=openstack_network_exporter, vendor=Red Hat, Inc.)
Oct 11 02:08:27 compute-0 podman[377936]: 2025-10-11 02:08:27.872034658 +0000 UTC m=+0.191404910 container exec_died 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, io.openshift.tags=minimal rhel9, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.openshift.expose-services=, maintainer=Red Hat, Inc., managed_by=edpm_ansible, vcs-type=git, release=1755695350, version=9.6, build-date=2025-08-20T13:12:41, architecture=x86_64, container_name=openstack_network_exporter, url=https://catalog.redhat.com/en/search?searchType=containers, io.buildah.version=1.33.7, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, com.redhat.component=ubi9-minimal-container, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']})
Oct 11 02:08:27 compute-0 sudo[377926]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:27 compute-0 systemd[1]: libpod-conmon-6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c.scope: Deactivated successfully.
Oct 11 02:08:28 compute-0 ceph-mon[191930]: pgmap v908: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:28 compute-0 sudo[378133]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fehptftczadanyfljleitpvhftxzufcz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148508.2539277-912-37186340526503/AnsiballZ_file.py'
Oct 11 02:08:28 compute-0 sudo[378133]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:28 compute-0 podman[378091]: 2025-10-11 02:08:28.858748319 +0000 UTC m=+0.140397648 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, name=ubi9-minimal, managed_by=edpm_ansible, container_name=openstack_network_exporter, vendor=Red Hat, Inc., io.openshift.tags=minimal rhel9, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.openshift.expose-services=, maintainer=Red Hat, Inc., vcs-type=git, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1755695350, version=9.6, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, architecture=x86_64, com.redhat.component=ubi9-minimal-container, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.buildah.version=1.33.7, url=https://catalog.redhat.com/en/search?searchType=containers, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., build-date=2025-08-20T13:12:41)
Oct 11 02:08:29 compute-0 python3.9[378140]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/openstack_network_exporter recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:08:29 compute-0 sshd-session[377560]: Connection closed by authenticating user root 121.227.153.123 port 38814 [preauth]
Oct 11 02:08:29 compute-0 sudo[378133]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v909: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:29 compute-0 podman[157119]: time="2025-10-11T02:08:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:08:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:08:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:08:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:08:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8514 "" "Go-http-client/1.1"
Oct 11 02:08:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:08:30 compute-0 sudo[378293]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dgcewnwdyfmohnbfpkfzwiebabxarpul ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148509.5242312-921-52770307272748/AnsiballZ_podman_container_info.py'
Oct 11 02:08:30 compute-0 sudo[378293]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:30 compute-0 python3.9[378295]: ansible-containers.podman.podman_container_info Invoked with name=['ceilometer_agent_ipmi'] executable=podman
Oct 11 02:08:30 compute-0 sudo[378293]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:30 compute-0 unix_chkpwd[378328]: password check failed for user (root)
Oct 11 02:08:30 compute-0 sshd-session[378165]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:08:30 compute-0 ceph-mon[191930]: pgmap v909: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v910: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:31 compute-0 openstack_network_exporter[374316]: ERROR   02:08:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:08:31 compute-0 openstack_network_exporter[374316]: ERROR   02:08:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:08:31 compute-0 openstack_network_exporter[374316]: ERROR   02:08:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:08:31 compute-0 openstack_network_exporter[374316]: ERROR   02:08:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:08:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:08:31 compute-0 openstack_network_exporter[374316]: ERROR   02:08:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:08:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:08:31 compute-0 sshd-session[378165]: Failed password for root from 121.227.153.123 port 56232 ssh2
Oct 11 02:08:32 compute-0 podman[378410]: 2025-10-11 02:08:32.249224569 +0000 UTC m=+0.141303270 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, config_id=edpm)
Oct 11 02:08:32 compute-0 sshd-session[378165]: Connection closed by authenticating user root 121.227.153.123 port 56232 [preauth]
Oct 11 02:08:32 compute-0 sudo[378480]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sbvlyavvqccpqbvhgkpmmnviryiloeoj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148510.8382013-929-257913550014945/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:32 compute-0 sudo[378480]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:32 compute-0 python3.9[378482]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=ceilometer_agent_ipmi detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:32 compute-0 ceph-mon[191930]: pgmap v910: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:32 compute-0 systemd[1]: Started libpod-conmon-47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.scope.
Oct 11 02:08:32 compute-0 podman[378485]: 2025-10-11 02:08:32.910014275 +0000 UTC m=+0.138470826 container exec 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.build-date=20251009, container_name=ceilometer_agent_ipmi, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=edpm, org.label-schema.license=GPLv2)
Oct 11 02:08:32 compute-0 podman[378485]: 2025-10-11 02:08:32.946431741 +0000 UTC m=+0.174888242 container exec_died 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, org.label-schema.license=GPLv2, container_name=ceilometer_agent_ipmi, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']})
Oct 11 02:08:33 compute-0 systemd[1]: libpod-conmon-47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.scope: Deactivated successfully.
Oct 11 02:08:33 compute-0 sudo[378480]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v911: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:33 compute-0 unix_chkpwd[378645]: password check failed for user (root)
Oct 11 02:08:33 compute-0 sshd-session[378483]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:08:33 compute-0 sudo[378663]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vjjomigtnxsaitkvivbtwaqctftlmvwa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148513.301538-937-224175200643142/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:33 compute-0 sudo[378663]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:34 compute-0 python3.9[378665]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=ceilometer_agent_ipmi detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:34 compute-0 systemd[1]: Started libpod-conmon-47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.scope.
Oct 11 02:08:34 compute-0 podman[378666]: 2025-10-11 02:08:34.326809588 +0000 UTC m=+0.168476437 container exec 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible, config_id=edpm, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:08:34 compute-0 podman[378666]: 2025-10-11 02:08:34.362858188 +0000 UTC m=+0.204524937 container exec_died 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, org.label-schema.vendor=CentOS, config_id=edpm, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible)
Oct 11 02:08:34 compute-0 sudo[378663]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:34 compute-0 systemd[1]: libpod-conmon-47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.scope: Deactivated successfully.
Oct 11 02:08:34 compute-0 ceph-mon[191930]: pgmap v911: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:08:35 compute-0 sudo[378846]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dtvtraijupiatabagmwtawbklqgilvui ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148514.691644-945-202786308554474/AnsiballZ_file.py'
Oct 11 02:08:35 compute-0 sudo[378846]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v912: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:35 compute-0 python3.9[378848]: ansible-ansible.builtin.file Invoked with group=42405 mode=0700 owner=42405 path=/var/lib/openstack/healthchecks/ceilometer_agent_ipmi recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:08:35 compute-0 sudo[378846]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:36 compute-0 sshd-session[378483]: Failed password for root from 121.227.153.123 port 56236 ssh2
Oct 11 02:08:36 compute-0 sudo[378998]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-egcqigclffgtbtfchbbfsmibbjhuqupz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148516.031886-954-280212410489628/AnsiballZ_podman_container_info.py'
Oct 11 02:08:36 compute-0 sudo[378998]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:36 compute-0 ceph-mon[191930]: pgmap v912: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:36 compute-0 python3.9[379000]: ansible-containers.podman.podman_container_info Invoked with name=['kepler'] executable=podman
Oct 11 02:08:37 compute-0 sudo[378998]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:37 compute-0 sshd-session[378483]: Connection closed by authenticating user root 121.227.153.123 port 56236 [preauth]
Oct 11 02:08:37 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:56250 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v913: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:37 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:56266 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:37 compute-0 sudo[379162]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fgqdehykaulwxvipwwynzihsntzalksn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148517.3693712-962-257962354984186/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:37 compute-0 sudo[379162]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:38 compute-0 python3.9[379164]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=kepler detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:38 compute-0 systemd[1]: Started libpod-conmon-e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304.scope.
Oct 11 02:08:38 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:56272 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:38 compute-0 podman[379165]: 2025-10-11 02:08:38.416467193 +0000 UTC m=+0.164495362 container exec e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, build-date=2024-09-18T21:23:30, managed_by=edpm_ansible, container_name=kepler, io.k8s.display-name=Red Hat Universal Base Image 9, version=9.4, io.openshift.expose-services=, maintainer=Red Hat, Inc., io.openshift.tags=base rhel9, name=ubi9, release=1214.1726694543, config_id=edpm, summary=Provides the latest release of Red Hat Universal Base Image 9., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.buildah.version=1.29.0, release-0.7.12=, architecture=x86_64, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, vendor=Red Hat, Inc., com.redhat.component=ubi9-container)
Oct 11 02:08:38 compute-0 podman[379165]: 2025-10-11 02:08:38.433192688 +0000 UTC m=+0.181220857 container exec_died e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, container_name=kepler, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.tags=base rhel9, version=9.4, io.buildah.version=1.29.0, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., name=ubi9, maintainer=Red Hat, Inc., release-0.7.12=, architecture=x86_64, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, com.redhat.component=ubi9-container, distribution-scope=public, io.openshift.expose-services=, build-date=2024-09-18T21:23:30, vendor=Red Hat, Inc., release=1214.1726694543, config_id=edpm, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543)
Oct 11 02:08:38 compute-0 sudo[379162]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:38 compute-0 systemd[1]: libpod-conmon-e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304.scope: Deactivated successfully.
Oct 11 02:08:38 compute-0 nova_compute[356901]: 2025-10-11 02:08:38.589 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:08:38 compute-0 nova_compute[356901]: 2025-10-11 02:08:38.591 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:08:38 compute-0 nova_compute[356901]: 2025-10-11 02:08:38.592 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:08:38 compute-0 ceph-mon[191930]: pgmap v913: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:38 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:56282 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:38 compute-0 nova_compute[356901]: 2025-10-11 02:08:38.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:08:38 compute-0 nova_compute[356901]: 2025-10-11 02:08:38.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:08:39 compute-0 nova_compute[356901]: 2025-10-11 02:08:39.062 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:08:39 compute-0 nova_compute[356901]: 2025-10-11 02:08:39.063 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:08:39 compute-0 nova_compute[356901]: 2025-10-11 02:08:39.064 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:08:39 compute-0 nova_compute[356901]: 2025-10-11 02:08:39.064 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:08:39 compute-0 nova_compute[356901]: 2025-10-11 02:08:39.065 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:08:39 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44614 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v914: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:39 compute-0 sudo[379366]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jbltidquqjqkfohrjamfwyetmncmdfhl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148518.7430744-970-220787178251609/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:39 compute-0 sudo[379366]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:08:39 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/649412189' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:08:39 compute-0 nova_compute[356901]: 2025-10-11 02:08:39.546 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.480s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:08:39 compute-0 python3.9[379368]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=kepler detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:39 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/649412189' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:08:39 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44628 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:39 compute-0 systemd[1]: Started libpod-conmon-e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304.scope.
Oct 11 02:08:39 compute-0 podman[379371]: 2025-10-11 02:08:39.949366364 +0000 UTC m=+0.158542186 container exec e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.openshift.tags=base rhel9, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.expose-services=, container_name=kepler, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, release-0.7.12=, distribution-scope=public, maintainer=Red Hat, Inc., managed_by=edpm_ansible, release=1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, vendor=Red Hat, Inc., summary=Provides the latest release of Red Hat Universal Base Image 9., architecture=x86_64, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, version=9.4, io.buildah.version=1.29.0, com.redhat.component=ubi9-container, build-date=2024-09-18T21:23:30)
Oct 11 02:08:39 compute-0 podman[379371]: 2025-10-11 02:08:39.984029978 +0000 UTC m=+0.193205770 container exec_died e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, io.k8s.display-name=Red Hat Universal Base Image 9, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, distribution-scope=public, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_id=edpm, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, release-0.7.12=, architecture=x86_64, io.openshift.tags=base rhel9, container_name=kepler, managed_by=edpm_ansible, name=ubi9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.buildah.version=1.29.0, build-date=2024-09-18T21:23:30, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, version=9.4, maintainer=Red Hat, Inc., release=1214.1726694543, vendor=Red Hat, Inc., com.redhat.component=ubi9-container, summary=Provides the latest release of Red Hat Universal Base Image 9.)
Oct 11 02:08:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:08:40 compute-0 sudo[379366]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:40 compute-0 systemd[1]: libpod-conmon-e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304.scope: Deactivated successfully.
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #45. Immutable memtables: 0.
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:08:40.052061) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 21] Flushing memtable with next log file: 45
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148520052101, "job": 21, "event": "flush_started", "num_memtables": 1, "num_entries": 820, "num_deletes": 257, "total_data_size": 1087040, "memory_usage": 1110784, "flush_reason": "Manual Compaction"}
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 21] Level-0 flush table #46: started
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148520060361, "cf_name": "default", "job": 21, "event": "table_file_creation", "file_number": 46, "file_size": 1066698, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 18643, "largest_seqno": 19462, "table_properties": {"data_size": 1062568, "index_size": 1846, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1221, "raw_key_size": 8684, "raw_average_key_size": 18, "raw_value_size": 1054246, "raw_average_value_size": 2219, "num_data_blocks": 84, "num_entries": 475, "num_filter_entries": 475, "num_deletions": 257, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760148450, "oldest_key_time": 1760148450, "file_creation_time": 1760148520, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 46, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 21] Flush lasted 8347 microseconds, and 3571 cpu microseconds.
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:08:40.060409) [db/flush_job.cc:967] [default] [JOB 21] Level-0 flush table #46: 1066698 bytes OK
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:08:40.060427) [db/memtable_list.cc:519] [default] Level-0 commit table #46 started
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:08:40.062124) [db/memtable_list.cc:722] [default] Level-0 commit table #46: memtable #1 done
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:08:40.062141) EVENT_LOG_v1 {"time_micros": 1760148520062135, "job": 21, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:08:40.062162) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 21] Try to delete WAL files size 1082939, prev total WAL file size 1082939, number of live WAL files 2.
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000042.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:08:40.062807) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '6C6F676D00323531' seq:72057594037927935, type:22 .. '6C6F676D00353034' seq:0, type:0; will stop at (end)
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 22] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 21 Base level 0, inputs: [46(1041KB)], [44(5996KB)]
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148520062857, "job": 22, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [46], "files_L6": [44], "score": -1, "input_data_size": 7206954, "oldest_snapshot_seqno": -1}
Oct 11 02:08:40 compute-0 nova_compute[356901]: 2025-10-11 02:08:40.066 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:08:40 compute-0 nova_compute[356901]: 2025-10-11 02:08:40.069 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=4580MB free_disk=59.98828125GB free_vcpus=8 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:08:40 compute-0 nova_compute[356901]: 2025-10-11 02:08:40.069 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:08:40 compute-0 nova_compute[356901]: 2025-10-11 02:08:40.070 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 22] Generated table #47: 4093 keys, 7068192 bytes, temperature: kUnknown
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148520109410, "cf_name": "default", "job": 22, "event": "table_file_creation", "file_number": 47, "file_size": 7068192, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 7040195, "index_size": 16687, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 10245, "raw_key_size": 101372, "raw_average_key_size": 24, "raw_value_size": 6965307, "raw_average_value_size": 1701, "num_data_blocks": 702, "num_entries": 4093, "num_filter_entries": 4093, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760148520, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 47, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:08:40.109691) [db/compaction/compaction_job.cc:1663] [default] [JOB 22] Compacted 1@0 + 1@6 files to L6 => 7068192 bytes
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:08:40.111547) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 154.4 rd, 151.4 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(1.0, 5.9 +0.0 blob) out(6.7 +0.0 blob), read-write-amplify(13.4) write-amplify(6.6) OK, records in: 4619, records dropped: 526 output_compression: NoCompression
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:08:40.111568) EVENT_LOG_v1 {"time_micros": 1760148520111557, "job": 22, "event": "compaction_finished", "compaction_time_micros": 46674, "compaction_time_cpu_micros": 29334, "output_level": 6, "num_output_files": 1, "total_output_size": 7068192, "num_input_records": 4619, "num_output_records": 4093, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000046.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148520113113, "job": 22, "event": "table_file_deletion", "file_number": 46}
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000044.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148520115040, "job": 22, "event": "table_file_deletion", "file_number": 44}
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:08:40.062712) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:08:40.115664) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:08:40.115673) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:08:40.115675) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:08:40.115676) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:08:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:08:40.115678) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:08:40 compute-0 nova_compute[356901]: 2025-10-11 02:08:40.156 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 0 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:08:40 compute-0 nova_compute[356901]: 2025-10-11 02:08:40.157 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=512MB phys_disk=59GB used_disk=0GB total_vcpus=8 used_vcpus=0 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:08:40 compute-0 nova_compute[356901]: 2025-10-11 02:08:40.179 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:08:40 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44632 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:08:40 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2840695597' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:08:40 compute-0 nova_compute[356901]: 2025-10-11 02:08:40.663 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.484s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:08:40 compute-0 nova_compute[356901]: 2025-10-11 02:08:40.674 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:08:40 compute-0 nova_compute[356901]: 2025-10-11 02:08:40.694 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:08:40 compute-0 nova_compute[356901]: 2025-10-11 02:08:40.697 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:08:40 compute-0 nova_compute[356901]: 2025-10-11 02:08:40.698 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.628s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:08:40 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44648 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:40 compute-0 sudo[379573]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vbhfpekapigcikiouaklbmicqodhpzia ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148520.3058064-978-267612393855710/AnsiballZ_file.py'
Oct 11 02:08:40 compute-0 sudo[379573]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:40 compute-0 ceph-mon[191930]: pgmap v914: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:40 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2840695597' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:08:41 compute-0 python3.9[379575]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/kepler recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:08:41 compute-0 sudo[379573]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:41 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44662 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v915: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:41 compute-0 nova_compute[356901]: 2025-10-11 02:08:41.698 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:08:41 compute-0 nova_compute[356901]: 2025-10-11 02:08:41.699 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:08:41 compute-0 nova_compute[356901]: 2025-10-11 02:08:41.699 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:08:41 compute-0 nova_compute[356901]: 2025-10-11 02:08:41.717 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944
Oct 11 02:08:41 compute-0 nova_compute[356901]: 2025-10-11 02:08:41.718 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:08:41 compute-0 nova_compute[356901]: 2025-10-11 02:08:41.719 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:08:41 compute-0 nova_compute[356901]: 2025-10-11 02:08:41.719 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:08:41 compute-0 nova_compute[356901]: 2025-10-11 02:08:41.720 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:08:41 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44668 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:41 compute-0 sudo[379725]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eizaeaztpseugekzqxlvvarqjnbzanlh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148521.4289854-987-88565936436988/AnsiballZ_podman_container_info.py'
Oct 11 02:08:41 compute-0 sudo[379725]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:42 compute-0 python3.9[379727]: ansible-containers.podman.podman_container_info Invoked with name=['ovn_metadata_agent'] executable=podman
Oct 11 02:08:42 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44684 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:42 compute-0 sudo[379725]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:42 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44692 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:42 compute-0 ceph-mon[191930]: pgmap v915: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:43 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44696 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:43 compute-0 podman[379860]: 2025-10-11 02:08:43.273221341 +0000 UTC m=+0.153895316 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, managed_by=edpm_ansible, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, org.label-schema.name=CentOS Stream 10 Base Image, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, tcib_managed=true, container_name=ceilometer_agent_compute)
Oct 11 02:08:43 compute-0 sudo[379908]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-prgchupradntccwhpijsaivkmsimbvqh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148522.6735394-995-257128283389660/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:43 compute-0 sudo[379908]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v916: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:43 compute-0 python3.9[379911]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=ovn_metadata_agent detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:43 compute-0 systemd[1]: Started libpod-conmon-c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3.scope.
Oct 11 02:08:43 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44706 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:43 compute-0 podman[379912]: 2025-10-11 02:08:43.708617814 +0000 UTC m=+0.164652370 container exec c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.schema-version=1.0, container_name=ovn_metadata_agent, tcib_managed=true, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:08:43 compute-0 podman[379912]: 2025-10-11 02:08:43.74456967 +0000 UTC m=+0.200604226 container exec_died c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, org.label-schema.schema-version=1.0, container_name=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:08:43 compute-0 sudo[379908]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:43 compute-0 systemd[1]: libpod-conmon-c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3.scope: Deactivated successfully.
Oct 11 02:08:44 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44714 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:44 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44718 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:44 compute-0 podman[379993]: 2025-10-11 02:08:44.846207237 +0000 UTC m=+0.104239903 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=ovn_metadata_agent, org.label-schema.build-date=20251009, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:08:44 compute-0 podman[379991]: 2025-10-11 02:08:44.853921577 +0000 UTC m=+0.125786869 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:08:44 compute-0 podman[379994]: 2025-10-11 02:08:44.887184983 +0000 UTC m=+0.140363260 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of Red Hat Universal Base Image 9., io.buildah.version=1.29.0, io.openshift.expose-services=, container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, io.k8s.display-name=Red Hat Universal Base Image 9, release=1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, architecture=x86_64, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, com.redhat.component=ubi9-container, config_id=edpm, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vendor=Red Hat, Inc., version=9.4, io.openshift.tags=base rhel9, build-date=2024-09-18T21:23:30, name=ubi9, release-0.7.12=, vcs-type=git, maintainer=Red Hat, Inc.)
Oct 11 02:08:44 compute-0 ceph-mon[191930]: pgmap v916: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:44 compute-0 podman[379992]: 2025-10-11 02:08:44.928389292 +0000 UTC m=+0.197083780 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, container_name=ovn_controller, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.vendor=CentOS)
Oct 11 02:08:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:08:45 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44732 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:45 compute-0 sudo[380180]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-frugesbsizxievyftcocpwohitvnfnlm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148524.2504032-1003-184127051518496/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v917: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:45 compute-0 sudo[380180]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:45 compute-0 python3.9[380182]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=ovn_metadata_agent detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:45 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44740 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:45 compute-0 systemd[1]: Started libpod-conmon-c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3.scope.
Oct 11 02:08:45 compute-0 podman[380183]: 2025-10-11 02:08:45.947348238 +0000 UTC m=+0.206202460 container exec c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:08:45 compute-0 podman[380183]: 2025-10-11 02:08:45.987053065 +0000 UTC m=+0.245907317 container exec_died c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, container_name=ovn_metadata_agent, managed_by=edpm_ansible)
Oct 11 02:08:46 compute-0 sudo[380180]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:46 compute-0 systemd[1]: libpod-conmon-c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3.scope: Deactivated successfully.
Oct 11 02:08:46 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44746 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:46 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44756 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:46 compute-0 ceph-mon[191930]: pgmap v917: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:47 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44758 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v918: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:47 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44768 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:47 compute-0 sudo[380394]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kwvgsiyxytwzeemrlyiossbxdtohtonj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148526.4203274-1011-88904254418960/AnsiballZ_file.py'
Oct 11 02:08:47 compute-0 sudo[380394]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:47 compute-0 podman[380337]: 2025-10-11 02:08:47.821544938 +0000 UTC m=+0.147627064 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, config_id=multipathd, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd)
Oct 11 02:08:47 compute-0 podman[380338]: 2025-10-11 02:08:47.836577516 +0000 UTC m=+0.156286196 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, config_id=iscsid, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:08:48 compute-0 python3.9[380402]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/ovn_metadata_agent recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:08:48 compute-0 sudo[380394]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:48 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44774 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:48 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:44784 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:48 compute-0 ceph-mon[191930]: pgmap v918: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:49 compute-0 sudo[380552]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-llirjjpgkduyeqatncdqqbxztimafwsq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148528.3744376-1020-93015629270547/AnsiballZ_podman_container_info.py'
Oct 11 02:08:49 compute-0 sudo[380552]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:49 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:52742 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:49 compute-0 python3.9[380554]: ansible-containers.podman.podman_container_info Invoked with name=['iscsid'] executable=podman
Oct 11 02:08:49 compute-0 sudo[380552]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v919: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:49 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:52756 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:49 compute-0 ceph-mon[191930]: pgmap v919: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:08:50 compute-0 sudo[380716]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-glhnxznkmlmhzlewgpwhmjrkxghrotia ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148529.7417283-1028-189496049943133/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:50 compute-0 sudo[380716]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:50 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:52770 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:50 compute-0 sudo[380719]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:08:50 compute-0 sudo[380719]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:08:50 compute-0 sudo[380719]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:50 compute-0 python3.9[380718]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=iscsid detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:50 compute-0 sudo[380744]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:08:50 compute-0 sudo[380744]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:08:50 compute-0 sudo[380744]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:50 compute-0 systemd[1]: Started libpod-conmon-b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e.scope.
Oct 11 02:08:50 compute-0 podman[380747]: 2025-10-11 02:08:50.699887539 +0000 UTC m=+0.162396259 container exec b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=iscsid, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=iscsid, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20251009)
Oct 11 02:08:50 compute-0 podman[380747]: 2025-10-11 02:08:50.709021433 +0000 UTC m=+0.171530123 container exec_died b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, container_name=iscsid, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=iscsid, managed_by=edpm_ansible, org.label-schema.license=GPLv2, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:08:50 compute-0 sudo[380716]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:50 compute-0 sudo[380783]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:08:50 compute-0 sudo[380783]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:08:50 compute-0 sudo[380783]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:50 compute-0 systemd[1]: libpod-conmon-b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e.scope: Deactivated successfully.
Oct 11 02:08:50 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:52772 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:50 compute-0 sudo[380825]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:08:50 compute-0 sudo[380825]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:08:51 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:52786 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v920: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:51 compute-0 sudo[380825]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:08:51 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:08:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:08:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:08:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:08:51 compute-0 sudo[381029]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wekcjnqnpvhyeuddttgknxcvkkqurswn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148531.0577981-1036-41131246643940/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:51 compute-0 sudo[381029]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:08:51 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 6246ba04-46d0-4578-9f5d-27049ce509de does not exist
Oct 11 02:08:51 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 7355d554-e480-47f5-a425-94947ca02e43 does not exist
Oct 11 02:08:51 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 88d79477-2d51-4779-9210-0970824fdfc7 does not exist
Oct 11 02:08:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:08:51 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:08:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:08:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:08:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:08:51 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:08:51 compute-0 sudo[381032]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:08:51 compute-0 sudo[381032]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:08:51 compute-0 sudo[381032]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:51 compute-0 python3.9[381031]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=iscsid detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:51 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:52788 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:51 compute-0 sudo[381057]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:08:51 compute-0 sudo[381057]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:08:51 compute-0 sudo[381057]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:51 compute-0 systemd[1]: Started libpod-conmon-b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e.scope.
Oct 11 02:08:51 compute-0 podman[381061]: 2025-10-11 02:08:51.951088384 +0000 UTC m=+0.131073160 container exec b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=iscsid, org.label-schema.schema-version=1.0, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:08:51 compute-0 podman[381061]: 2025-10-11 02:08:51.989148575 +0000 UTC m=+0.169133351 container exec_died b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=iscsid, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, container_name=iscsid)
Oct 11 02:08:52 compute-0 sudo[381093]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:08:52 compute-0 sudo[381093]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:08:52 compute-0 sudo[381093]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:52 compute-0 systemd[1]: libpod-conmon-b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e.scope: Deactivated successfully.
Oct 11 02:08:52 compute-0 sudo[381029]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:52 compute-0 sudo[381136]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:08:52 compute-0 sudo[381136]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:08:52 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:52800 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:52 compute-0 ceph-mon[191930]: pgmap v920: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:08:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:08:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:08:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:08:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:08:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:08:52 compute-0 podman[381284]: 2025-10-11 02:08:52.682341954 +0000 UTC m=+0.091848204 container create d84bb597190ecf1f069e62356ed1ed33014d449bfbfb9efda566555eb38c083d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_mendeleev, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, ceph=True, CEPH_REF=reef, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:08:52 compute-0 podman[381284]: 2025-10-11 02:08:52.6503612 +0000 UTC m=+0.059867540 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:08:52 compute-0 systemd[1]: Started libpod-conmon-d84bb597190ecf1f069e62356ed1ed33014d449bfbfb9efda566555eb38c083d.scope.
Oct 11 02:08:52 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:52816 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:08:52 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:08:52 compute-0 podman[381284]: 2025-10-11 02:08:52.823553182 +0000 UTC m=+0.233059462 container init d84bb597190ecf1f069e62356ed1ed33014d449bfbfb9efda566555eb38c083d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_mendeleev, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_REF=reef)
Oct 11 02:08:52 compute-0 podman[381284]: 2025-10-11 02:08:52.842751887 +0000 UTC m=+0.252258167 container start d84bb597190ecf1f069e62356ed1ed33014d449bfbfb9efda566555eb38c083d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_mendeleev, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_REF=reef)
Oct 11 02:08:52 compute-0 podman[381284]: 2025-10-11 02:08:52.851433975 +0000 UTC m=+0.260940305 container attach d84bb597190ecf1f069e62356ed1ed33014d449bfbfb9efda566555eb38c083d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_mendeleev, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.vendor=CentOS)
Oct 11 02:08:52 compute-0 objective_mendeleev[381336]: 167 167
Oct 11 02:08:52 compute-0 systemd[1]: libpod-d84bb597190ecf1f069e62356ed1ed33014d449bfbfb9efda566555eb38c083d.scope: Deactivated successfully.
Oct 11 02:08:52 compute-0 podman[381284]: 2025-10-11 02:08:52.858410472 +0000 UTC m=+0.267916782 container died d84bb597190ecf1f069e62356ed1ed33014d449bfbfb9efda566555eb38c083d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_mendeleev, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.license=GPLv2, io.buildah.version=1.39.3)
Oct 11 02:08:52 compute-0 systemd[1]: var-lib-containers-storage-overlay-c65672fedf7769e88a55ac09cd37ce1b5dffeefe707e44e0a40a7de14c2d140d-merged.mount: Deactivated successfully.
Oct 11 02:08:52 compute-0 podman[381284]: 2025-10-11 02:08:52.966474071 +0000 UTC m=+0.375980351 container remove d84bb597190ecf1f069e62356ed1ed33014d449bfbfb9efda566555eb38c083d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_mendeleev, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True)
Oct 11 02:08:53 compute-0 systemd[1]: libpod-conmon-d84bb597190ecf1f069e62356ed1ed33014d449bfbfb9efda566555eb38c083d.scope: Deactivated successfully.
Oct 11 02:08:53 compute-0 sudo[381384]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zlkjotzbqzadmnohmbglhufiqlvormxo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148532.3553817-1044-152506527663871/AnsiballZ_file.py'
Oct 11 02:08:53 compute-0 sudo[381384]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:53 compute-0 podman[381392]: 2025-10-11 02:08:53.241709659 +0000 UTC m=+0.088504290 container create 81cbd6af62cd2e04850dcb265e6681176714ac48d1520b5a69fe37079ea203b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_hamilton, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:08:53 compute-0 podman[381392]: 2025-10-11 02:08:53.208841466 +0000 UTC m=+0.055636117 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:08:53 compute-0 python3.9[381386]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/iscsid recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:08:53 compute-0 sudo[381384]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:53 compute-0 systemd[1]: Started libpod-conmon-81cbd6af62cd2e04850dcb265e6681176714ac48d1520b5a69fe37079ea203b7.scope.
Oct 11 02:08:53 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:08:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d3b5ea4505a7d6132cc8e5326a6d739557ee3cc9cb2e9b1c01544213376b52eb/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:08:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d3b5ea4505a7d6132cc8e5326a6d739557ee3cc9cb2e9b1c01544213376b52eb/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:08:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d3b5ea4505a7d6132cc8e5326a6d739557ee3cc9cb2e9b1c01544213376b52eb/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:08:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d3b5ea4505a7d6132cc8e5326a6d739557ee3cc9cb2e9b1c01544213376b52eb/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:08:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d3b5ea4505a7d6132cc8e5326a6d739557ee3cc9cb2e9b1c01544213376b52eb/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:08:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v921: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:53 compute-0 podman[381392]: 2025-10-11 02:08:53.442839968 +0000 UTC m=+0.289634649 container init 81cbd6af62cd2e04850dcb265e6681176714ac48d1520b5a69fe37079ea203b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_hamilton, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 02:08:53 compute-0 podman[381392]: 2025-10-11 02:08:53.47660055 +0000 UTC m=+0.323395171 container start 81cbd6af62cd2e04850dcb265e6681176714ac48d1520b5a69fe37079ea203b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_hamilton, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:08:53 compute-0 podman[381392]: 2025-10-11 02:08:53.486736579 +0000 UTC m=+0.333531240 container attach 81cbd6af62cd2e04850dcb265e6681176714ac48d1520b5a69fe37079ea203b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_hamilton, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:08:54 compute-0 sudo[381565]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jaxxcjugtbripdnfvmqcmwlryeeqrsuq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148533.712532-1053-82484467941340/AnsiballZ_podman_container_info.py'
Oct 11 02:08:54 compute-0 sudo[381565]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:54 compute-0 python3.9[381570]: ansible-containers.podman.podman_container_info Invoked with name=['multipathd'] executable=podman
Oct 11 02:08:54 compute-0 ceph-mon[191930]: pgmap v921: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:54 compute-0 sudo[381565]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:54 compute-0 funny_hamilton[381407]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:08:54 compute-0 funny_hamilton[381407]: --> relative data size: 1.0
Oct 11 02:08:54 compute-0 funny_hamilton[381407]: --> All data devices are unavailable
Oct 11 02:08:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:08:54.827 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:08:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:08:54.829 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:08:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:08:54.830 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:08:54 compute-0 systemd[1]: libpod-81cbd6af62cd2e04850dcb265e6681176714ac48d1520b5a69fe37079ea203b7.scope: Deactivated successfully.
Oct 11 02:08:54 compute-0 systemd[1]: libpod-81cbd6af62cd2e04850dcb265e6681176714ac48d1520b5a69fe37079ea203b7.scope: Consumed 1.338s CPU time.
Oct 11 02:08:54 compute-0 podman[381392]: 2025-10-11 02:08:54.897372427 +0000 UTC m=+1.744167058 container died 81cbd6af62cd2e04850dcb265e6681176714ac48d1520b5a69fe37079ea203b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_hamilton, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:08:54 compute-0 systemd[1]: var-lib-containers-storage-overlay-d3b5ea4505a7d6132cc8e5326a6d739557ee3cc9cb2e9b1c01544213376b52eb-merged.mount: Deactivated successfully.
Oct 11 02:08:55 compute-0 podman[381392]: 2025-10-11 02:08:55.004207628 +0000 UTC m=+1.851002229 container remove 81cbd6af62cd2e04850dcb265e6681176714ac48d1520b5a69fe37079ea203b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_hamilton, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 02:08:55 compute-0 systemd[1]: libpod-conmon-81cbd6af62cd2e04850dcb265e6681176714ac48d1520b5a69fe37079ea203b7.scope: Deactivated successfully.
Oct 11 02:08:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:08:55 compute-0 sudo[381136]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:55 compute-0 sudo[381667]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:08:55 compute-0 sudo[381667]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:08:55 compute-0 sudo[381667]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:55 compute-0 sudo[381718]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:08:55 compute-0 sudo[381718]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:08:55 compute-0 sudo[381718]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v922: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:55 compute-0 sudo[381767]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:08:55 compute-0 sudo[381767]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:08:55 compute-0 sudo[381767]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:55 compute-0 sudo[381816]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:08:55 compute-0 sudo[381861]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bemmtjvmhdmhkghqgcrerhvekqhpkeeg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148535.0265243-1061-186358280373999/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:55 compute-0 sudo[381816]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:08:55 compute-0 sudo[381861]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:55 compute-0 unix_chkpwd[381866]: password check failed for user (root)
Oct 11 02:08:55 compute-0 sshd-session[381569]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:08:55 compute-0 python3.9[381865]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=multipathd detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:56 compute-0 systemd[1]: Started libpod-conmon-1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c.scope.
Oct 11 02:08:56 compute-0 podman[381887]: 2025-10-11 02:08:56.043404473 +0000 UTC m=+0.156387540 container exec 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, container_name=multipathd, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, config_id=multipathd, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:08:56 compute-0 podman[381887]: 2025-10-11 02:08:56.07773886 +0000 UTC m=+0.190721827 container exec_died 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, config_id=multipathd, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:08:56 compute-0 sudo[381861]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:56 compute-0 systemd[1]: libpod-conmon-1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c.scope: Deactivated successfully.
Oct 11 02:08:56 compute-0 podman[381933]: 2025-10-11 02:08:56.226200699 +0000 UTC m=+0.068281464 container create cf8bc5a038f4b0c547d120efe6d0dde698cc833fff16ab913f464abcb66d54c9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_euler, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:08:56 compute-0 systemd[1]: Started libpod-conmon-cf8bc5a038f4b0c547d120efe6d0dde698cc833fff16ab913f464abcb66d54c9.scope.
Oct 11 02:08:56 compute-0 podman[381933]: 2025-10-11 02:08:56.199925663 +0000 UTC m=+0.042006528 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:08:56 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:08:56 compute-0 podman[381933]: 2025-10-11 02:08:56.360035264 +0000 UTC m=+0.202116079 container init cf8bc5a038f4b0c547d120efe6d0dde698cc833fff16ab913f464abcb66d54c9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_euler, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_REF=reef)
Oct 11 02:08:56 compute-0 podman[381933]: 2025-10-11 02:08:56.377665714 +0000 UTC m=+0.219746489 container start cf8bc5a038f4b0c547d120efe6d0dde698cc833fff16ab913f464abcb66d54c9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_euler, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:08:56 compute-0 podman[381933]: 2025-10-11 02:08:56.382628213 +0000 UTC m=+0.224709028 container attach cf8bc5a038f4b0c547d120efe6d0dde698cc833fff16ab913f464abcb66d54c9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_euler, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 02:08:56 compute-0 keen_euler[381973]: 167 167
Oct 11 02:08:56 compute-0 systemd[1]: libpod-cf8bc5a038f4b0c547d120efe6d0dde698cc833fff16ab913f464abcb66d54c9.scope: Deactivated successfully.
Oct 11 02:08:56 compute-0 podman[381933]: 2025-10-11 02:08:56.394869957 +0000 UTC m=+0.236950762 container died cf8bc5a038f4b0c547d120efe6d0dde698cc833fff16ab913f464abcb66d54c9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_euler, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:08:56 compute-0 systemd[1]: var-lib-containers-storage-overlay-0a866441d2a5513c1fa33cb91a85d676d0ac210dae132801021830ebbaa6471b-merged.mount: Deactivated successfully.
Oct 11 02:08:56 compute-0 podman[381933]: 2025-10-11 02:08:56.469519193 +0000 UTC m=+0.311599958 container remove cf8bc5a038f4b0c547d120efe6d0dde698cc833fff16ab913f464abcb66d54c9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_euler, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, ceph=True)
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:08:56
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['volumes', 'default.rgw.control', 'backups', 'images', '.mgr', '.rgw.root', 'default.rgw.log', 'default.rgw.meta', 'cephfs.cephfs.data', 'vms', 'cephfs.cephfs.meta']
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:08:56 compute-0 systemd[1]: libpod-conmon-cf8bc5a038f4b0c547d120efe6d0dde698cc833fff16ab913f464abcb66d54c9.scope: Deactivated successfully.
Oct 11 02:08:56 compute-0 ceph-mon[191930]: pgmap v922: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:08:56 compute-0 podman[381998]: 2025-10-11 02:08:56.704986394 +0000 UTC m=+0.081994606 container create 267dd9e5d3c88e749b1c9a4264ee8c8bd0ddbe114c4963ef02fd7f85ab18b18e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_hertz, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3)
Oct 11 02:08:56 compute-0 podman[381998]: 2025-10-11 02:08:56.668879367 +0000 UTC m=+0.045887629 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:08:56 compute-0 systemd[1]: Started libpod-conmon-267dd9e5d3c88e749b1c9a4264ee8c8bd0ddbe114c4963ef02fd7f85ab18b18e.scope.
Oct 11 02:08:56 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:08:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/05f011803badb28a6015e5e610cb475e888963f2e3b8d0fe2124bf75b48196d1/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:08:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/05f011803badb28a6015e5e610cb475e888963f2e3b8d0fe2124bf75b48196d1/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:08:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/05f011803badb28a6015e5e610cb475e888963f2e3b8d0fe2124bf75b48196d1/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:08:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/05f011803badb28a6015e5e610cb475e888963f2e3b8d0fe2124bf75b48196d1/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:08:56 compute-0 podman[381998]: 2025-10-11 02:08:56.873648768 +0000 UTC m=+0.250657020 container init 267dd9e5d3c88e749b1c9a4264ee8c8bd0ddbe114c4963ef02fd7f85ab18b18e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_hertz, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2)
Oct 11 02:08:56 compute-0 podman[381998]: 2025-10-11 02:08:56.907805653 +0000 UTC m=+0.284813855 container start 267dd9e5d3c88e749b1c9a4264ee8c8bd0ddbe114c4963ef02fd7f85ab18b18e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_hertz, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:08:56 compute-0 podman[381998]: 2025-10-11 02:08:56.914861139 +0000 UTC m=+0.291869341 container attach 267dd9e5d3c88e749b1c9a4264ee8c8bd0ddbe114c4963ef02fd7f85ab18b18e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_hertz, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, ceph=True)
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:08:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:08:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v923: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:57 compute-0 sudo[382144]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ujdhmtrrbyhotoftagfeoaswmwhjigxk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148536.9544804-1069-31836222091315/AnsiballZ_podman_container_exec.py'
Oct 11 02:08:57 compute-0 sudo[382144]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:57 compute-0 podman[382146]: 2025-10-11 02:08:57.735057142 +0000 UTC m=+0.129899558 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:08:57 compute-0 funny_hertz[382016]: {
Oct 11 02:08:57 compute-0 funny_hertz[382016]:     "0": [
Oct 11 02:08:57 compute-0 funny_hertz[382016]:         {
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "devices": [
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "/dev/loop3"
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             ],
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "lv_name": "ceph_lv0",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "lv_size": "21470642176",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "name": "ceph_lv0",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "tags": {
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.cluster_name": "ceph",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.crush_device_class": "",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.encrypted": "0",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.osd_id": "0",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.type": "block",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.vdo": "0"
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             },
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "type": "block",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "vg_name": "ceph_vg0"
Oct 11 02:08:57 compute-0 funny_hertz[382016]:         }
Oct 11 02:08:57 compute-0 funny_hertz[382016]:     ],
Oct 11 02:08:57 compute-0 funny_hertz[382016]:     "1": [
Oct 11 02:08:57 compute-0 funny_hertz[382016]:         {
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "devices": [
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "/dev/loop4"
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             ],
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "lv_name": "ceph_lv1",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "lv_size": "21470642176",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "name": "ceph_lv1",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "tags": {
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.cluster_name": "ceph",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.crush_device_class": "",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.encrypted": "0",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.osd_id": "1",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.type": "block",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.vdo": "0"
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             },
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "type": "block",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "vg_name": "ceph_vg1"
Oct 11 02:08:57 compute-0 funny_hertz[382016]:         }
Oct 11 02:08:57 compute-0 funny_hertz[382016]:     ],
Oct 11 02:08:57 compute-0 funny_hertz[382016]:     "2": [
Oct 11 02:08:57 compute-0 funny_hertz[382016]:         {
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "devices": [
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "/dev/loop5"
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             ],
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "lv_name": "ceph_lv2",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "lv_size": "21470642176",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "name": "ceph_lv2",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "tags": {
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.cluster_name": "ceph",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.crush_device_class": "",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.encrypted": "0",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.osd_id": "2",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.type": "block",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:                 "ceph.vdo": "0"
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             },
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "type": "block",
Oct 11 02:08:57 compute-0 funny_hertz[382016]:             "vg_name": "ceph_vg2"
Oct 11 02:08:57 compute-0 funny_hertz[382016]:         }
Oct 11 02:08:57 compute-0 funny_hertz[382016]:     ]
Oct 11 02:08:57 compute-0 funny_hertz[382016]: }
Oct 11 02:08:57 compute-0 systemd[1]: libpod-267dd9e5d3c88e749b1c9a4264ee8c8bd0ddbe114c4963ef02fd7f85ab18b18e.scope: Deactivated successfully.
Oct 11 02:08:57 compute-0 python3.9[382148]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=multipathd detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:08:57 compute-0 sshd-session[381569]: Failed password for root from 121.227.153.123 port 52818 ssh2
Oct 11 02:08:57 compute-0 podman[382175]: 2025-10-11 02:08:57.906573617 +0000 UTC m=+0.066248774 container died 267dd9e5d3c88e749b1c9a4264ee8c8bd0ddbe114c4963ef02fd7f85ab18b18e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_hertz, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 02:08:57 compute-0 systemd[1]: var-lib-containers-storage-overlay-05f011803badb28a6015e5e610cb475e888963f2e3b8d0fe2124bf75b48196d1-merged.mount: Deactivated successfully.
Oct 11 02:08:57 compute-0 systemd[1]: Started libpod-conmon-1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c.scope.
Oct 11 02:08:58 compute-0 podman[382175]: 2025-10-11 02:08:58.007637512 +0000 UTC m=+0.167312669 container remove 267dd9e5d3c88e749b1c9a4264ee8c8bd0ddbe114c4963ef02fd7f85ab18b18e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_hertz, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, ceph=True)
Oct 11 02:08:58 compute-0 systemd[1]: libpod-conmon-267dd9e5d3c88e749b1c9a4264ee8c8bd0ddbe114c4963ef02fd7f85ab18b18e.scope: Deactivated successfully.
Oct 11 02:08:58 compute-0 podman[382176]: 2025-10-11 02:08:58.027879352 +0000 UTC m=+0.178418046 container exec 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, org.label-schema.license=GPLv2, tcib_managed=true, container_name=multipathd, managed_by=edpm_ansible)
Oct 11 02:08:58 compute-0 podman[382176]: 2025-10-11 02:08:58.036840538 +0000 UTC m=+0.187379202 container exec_died 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, org.label-schema.schema-version=1.0, config_id=multipathd, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:08:58 compute-0 sudo[381816]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:58 compute-0 sudo[382144]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:58 compute-0 systemd[1]: libpod-conmon-1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c.scope: Deactivated successfully.
Oct 11 02:08:58 compute-0 sudo[382218]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:08:58 compute-0 sudo[382218]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:08:58 compute-0 sudo[382218]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:58 compute-0 sudo[382243]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:08:58 compute-0 sudo[382243]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:08:58 compute-0 sudo[382243]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:58 compute-0 sudo[382268]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:08:58 compute-0 sudo[382268]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:08:58 compute-0 sudo[382268]: pam_unix(sudo:session): session closed for user root
Oct 11 02:08:58 compute-0 sudo[382293]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:08:58 compute-0 sudo[382293]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:08:58 compute-0 ceph-mon[191930]: pgmap v923: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:58 compute-0 sshd-session[381569]: Connection closed by authenticating user root 121.227.153.123 port 52818 [preauth]
Oct 11 02:08:59 compute-0 podman[382358]: 2025-10-11 02:08:59.100870682 +0000 UTC m=+0.098466107 container create bf60630899adb686f22a00403fa7ae0cf08c55f5b8e704b67d72855b15d90654 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_proskuriakova, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:08:59 compute-0 podman[382358]: 2025-10-11 02:08:59.065625112 +0000 UTC m=+0.063220617 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:08:59 compute-0 systemd[1]: Started libpod-conmon-bf60630899adb686f22a00403fa7ae0cf08c55f5b8e704b67d72855b15d90654.scope.
Oct 11 02:08:59 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:08:59 compute-0 podman[382358]: 2025-10-11 02:08:59.238802056 +0000 UTC m=+0.236397571 container init bf60630899adb686f22a00403fa7ae0cf08c55f5b8e704b67d72855b15d90654 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_proskuriakova, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:08:59 compute-0 podman[382358]: 2025-10-11 02:08:59.255635333 +0000 UTC m=+0.253230758 container start bf60630899adb686f22a00403fa7ae0cf08c55f5b8e704b67d72855b15d90654 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_proskuriakova, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:08:59 compute-0 podman[382393]: 2025-10-11 02:08:59.251613624 +0000 UTC m=+0.137284238 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, build-date=2025-08-20T13:12:41, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.tags=minimal rhel9, managed_by=edpm_ansible, io.openshift.expose-services=, maintainer=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.6, vendor=Red Hat, Inc., container_name=openstack_network_exporter, name=ubi9-minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, architecture=x86_64, vcs-type=git, url=https://catalog.redhat.com/en/search?searchType=containers, distribution-scope=public, config_id=edpm, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi9-minimal-container, io.buildah.version=1.33.7, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, release=1755695350)
Oct 11 02:08:59 compute-0 podman[382358]: 2025-10-11 02:08:59.265112231 +0000 UTC m=+0.262707766 container attach bf60630899adb686f22a00403fa7ae0cf08c55f5b8e704b67d72855b15d90654 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_proskuriakova, ceph=True, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:08:59 compute-0 infallible_proskuriakova[382409]: 167 167
Oct 11 02:08:59 compute-0 systemd[1]: libpod-bf60630899adb686f22a00403fa7ae0cf08c55f5b8e704b67d72855b15d90654.scope: Deactivated successfully.
Oct 11 02:08:59 compute-0 podman[382358]: 2025-10-11 02:08:59.268282964 +0000 UTC m=+0.265878369 container died bf60630899adb686f22a00403fa7ae0cf08c55f5b8e704b67d72855b15d90654 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_proskuriakova, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:08:59 compute-0 systemd[1]: var-lib-containers-storage-overlay-afe86296479747584057397e59fa7ceb8e9f8ab0040ac69bc97549d4cf889936-merged.mount: Deactivated successfully.
Oct 11 02:08:59 compute-0 podman[382358]: 2025-10-11 02:08:59.32508264 +0000 UTC m=+0.322678065 container remove bf60630899adb686f22a00403fa7ae0cf08c55f5b8e704b67d72855b15d90654 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_proskuriakova, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, OSD_FLAVOR=default)
Oct 11 02:08:59 compute-0 systemd[1]: libpod-conmon-bf60630899adb686f22a00403fa7ae0cf08c55f5b8e704b67d72855b15d90654.scope: Deactivated successfully.
Oct 11 02:08:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v924: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:08:59 compute-0 podman[382512]: 2025-10-11 02:08:59.552807246 +0000 UTC m=+0.082009446 container create 2cf2588c2fa9fe5facee453c828ea3e19393a942b193bd8807dc1b463e13fd73 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_jepsen, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:08:59 compute-0 podman[382512]: 2025-10-11 02:08:59.511461559 +0000 UTC m=+0.040663769 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:08:59 compute-0 systemd[1]: Started libpod-conmon-2cf2588c2fa9fe5facee453c828ea3e19393a942b193bd8807dc1b463e13fd73.scope.
Oct 11 02:08:59 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:08:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dedba1d34387e102775c83ea3b042b33d9a2a1878cd423fa87149398593dc508/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:08:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dedba1d34387e102775c83ea3b042b33d9a2a1878cd423fa87149398593dc508/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:08:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dedba1d34387e102775c83ea3b042b33d9a2a1878cd423fa87149398593dc508/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:08:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dedba1d34387e102775c83ea3b042b33d9a2a1878cd423fa87149398593dc508/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:08:59 compute-0 podman[382512]: 2025-10-11 02:08:59.751834566 +0000 UTC m=+0.281036816 container init 2cf2588c2fa9fe5facee453c828ea3e19393a942b193bd8807dc1b463e13fd73 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_jepsen, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS)
Oct 11 02:08:59 compute-0 podman[157119]: time="2025-10-11T02:08:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:08:59 compute-0 podman[382512]: 2025-10-11 02:08:59.779016578 +0000 UTC m=+0.308218748 container start 2cf2588c2fa9fe5facee453c828ea3e19393a942b193bd8807dc1b463e13fd73 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_jepsen, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 02:08:59 compute-0 podman[382512]: 2025-10-11 02:08:59.785510321 +0000 UTC m=+0.314712511 container attach 2cf2588c2fa9fe5facee453c828ea3e19393a942b193bd8807dc1b463e13fd73 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_jepsen, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_REF=reef)
Oct 11 02:08:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:08:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46611 "" "Go-http-client/1.1"
Oct 11 02:08:59 compute-0 sudo[382587]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-naallyqmmpwhzafndwsijsbhspymyilv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148539.2727385-1077-171390840193585/AnsiballZ_file.py'
Oct 11 02:08:59 compute-0 sudo[382587]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:08:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:08:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8925 "" "Go-http-client/1.1"
Oct 11 02:09:00 compute-0 python3.9[382590]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/multipathd recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:09:00 compute-0 sudo[382587]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:09:00 compute-0 unix_chkpwd[382671]: password check failed for user (root)
Oct 11 02:09:00 compute-0 sshd-session[382433]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:09:00 compute-0 ceph-mon[191930]: pgmap v924: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]: {
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:         "osd_id": 1,
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:         "type": "bluestore"
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:     },
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:         "osd_id": 2,
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:         "type": "bluestore"
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:     },
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:         "osd_id": 0,
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:         "type": "bluestore"
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]:     }
Oct 11 02:09:00 compute-0 laughing_jepsen[382557]: }
Oct 11 02:09:00 compute-0 sudo[382767]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tbzsouwmfiheihkmwwinvqbwiygnvtcc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148540.3187075-1086-235886170030766/AnsiballZ_file.py'
Oct 11 02:09:00 compute-0 sudo[382767]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:00 compute-0 systemd[1]: libpod-2cf2588c2fa9fe5facee453c828ea3e19393a942b193bd8807dc1b463e13fd73.scope: Deactivated successfully.
Oct 11 02:09:00 compute-0 systemd[1]: libpod-2cf2588c2fa9fe5facee453c828ea3e19393a942b193bd8807dc1b463e13fd73.scope: Consumed 1.152s CPU time.
Oct 11 02:09:00 compute-0 podman[382512]: 2025-10-11 02:09:00.948473621 +0000 UTC m=+1.477675821 container died 2cf2588c2fa9fe5facee453c828ea3e19393a942b193bd8807dc1b463e13fd73 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_jepsen, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS)
Oct 11 02:09:00 compute-0 systemd[1]: var-lib-containers-storage-overlay-dedba1d34387e102775c83ea3b042b33d9a2a1878cd423fa87149398593dc508-merged.mount: Deactivated successfully.
Oct 11 02:09:01 compute-0 podman[382512]: 2025-10-11 02:09:01.054689012 +0000 UTC m=+1.583891212 container remove 2cf2588c2fa9fe5facee453c828ea3e19393a942b193bd8807dc1b463e13fd73 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_jepsen, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:09:01 compute-0 systemd[1]: libpod-conmon-2cf2588c2fa9fe5facee453c828ea3e19393a942b193bd8807dc1b463e13fd73.scope: Deactivated successfully.
Oct 11 02:09:01 compute-0 sudo[382293]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:09:01 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:09:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:09:01 compute-0 python3.9[382771]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/lib/edpm-config/firewall/ state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:09:01 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:09:01 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 108d9674-22df-462e-a025-7fb894045fb1 does not exist
Oct 11 02:09:01 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 1c95d4e3-c2e8-49ef-972f-dab0d6234138 does not exist
Oct 11 02:09:01 compute-0 sudo[382767]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:01 compute-0 sudo[382783]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:09:01 compute-0 sudo[382783]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:09:01 compute-0 sudo[382783]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:01 compute-0 sudo[382832]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:09:01 compute-0 sudo[382832]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:09:01 compute-0 sudo[382832]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:01 compute-0 openstack_network_exporter[374316]: ERROR   02:09:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:09:01 compute-0 openstack_network_exporter[374316]: ERROR   02:09:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:09:01 compute-0 openstack_network_exporter[374316]: ERROR   02:09:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:09:01 compute-0 openstack_network_exporter[374316]: ERROR   02:09:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:09:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:09:01 compute-0 openstack_network_exporter[374316]: ERROR   02:09:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:09:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:09:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v925: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:02 compute-0 sudo[382982]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kmxdhhxohsdrevbhsvpdhddyjbyewbno ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148541.4007905-1094-198031820141677/AnsiballZ_stat.py'
Oct 11 02:09:02 compute-0 sudo[382982]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:02 compute-0 sshd-session[382433]: Failed password for root from 121.227.153.123 port 42726 ssh2
Oct 11 02:09:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:09:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:09:02 compute-0 ceph-mon[191930]: pgmap v925: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:02 compute-0 python3.9[382984]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/telemetry.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:09:02 compute-0 sudo[382982]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:02 compute-0 sudo[383071]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bleywoymnsnbcugvybwtjapndvhjlttd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148541.4007905-1094-198031820141677/AnsiballZ_file.py'
Oct 11 02:09:02 compute-0 sudo[383071]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:02 compute-0 podman[383034]: 2025-10-11 02:09:02.811890923 +0000 UTC m=+0.137182553 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, config_id=edpm, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_managed=true, container_name=ceilometer_agent_ipmi, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']})
Oct 11 02:09:03 compute-0 python3.9[383079]: ansible-ansible.legacy.file Invoked with mode=0640 dest=/var/lib/edpm-config/firewall/telemetry.yaml _original_basename=firewall.yaml.j2 recurse=False state=file path=/var/lib/edpm-config/firewall/telemetry.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:09:03 compute-0 sudo[383071]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v926: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:03 compute-0 sshd-session[382433]: Connection closed by authenticating user root 121.227.153.123 port 42726 [preauth]
Oct 11 02:09:04 compute-0 sudo[383234]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vcvvasseaqgisltruibwbyelpcdxpsxa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148543.516522-1107-170993749002875/AnsiballZ_file.py'
Oct 11 02:09:04 compute-0 sudo[383234]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:04 compute-0 python3.9[383236]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/lib/edpm-config/firewall state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:09:04 compute-0 sudo[383234]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:04 compute-0 ceph-mon[191930]: pgmap v926: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:09:05 compute-0 sudo[383386]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zqwiqyioeydppfsbjhrgnsbqfugxcpre ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148544.7054179-1115-262004845818562/AnsiballZ_stat.py'
Oct 11 02:09:05 compute-0 sudo[383386]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:05 compute-0 unix_chkpwd[383389]: password check failed for user (root)
Oct 11 02:09:05 compute-0 sshd-session[383226]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:09:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v927: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:05 compute-0 python3.9[383388]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:09:05 compute-0 sudo[383386]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:06 compute-0 sudo[383465]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dmurfnaojkrwzdwnikfudmodrzybbbch ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148544.7054179-1115-262004845818562/AnsiballZ_file.py'
Oct 11 02:09:06 compute-0 sudo[383465]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:06 compute-0 python3.9[383467]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml _original_basename=base-rules.yaml.j2 recurse=False state=file path=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:09:06 compute-0 sudo[383465]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:06 compute-0 ceph-mon[191930]: pgmap v927: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:09:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:09:07 compute-0 sudo[383617]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-irnqnbbnolwnwipntwgcgmlusjzrufwa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148546.6853046-1127-82182417226064/AnsiballZ_stat.py'
Oct 11 02:09:07 compute-0 sudo[383617]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v928: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:07 compute-0 python3.9[383619]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:09:07 compute-0 sudo[383617]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:07 compute-0 sshd-session[383226]: Failed password for root from 121.227.153.123 port 42732 ssh2
Oct 11 02:09:07 compute-0 sudo[383695]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-itsuamppvmrxwryodqotkhujdyxqbavt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148546.6853046-1127-82182417226064/AnsiballZ_file.py'
Oct 11 02:09:07 compute-0 sudo[383695]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:08 compute-0 python3.9[383697]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml _original_basename=.5a1cw2m5 recurse=False state=file path=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:09:08 compute-0 sudo[383695]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:08 compute-0 ceph-mon[191930]: pgmap v928: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:08 compute-0 sshd-session[383226]: Connection closed by authenticating user root 121.227.153.123 port 42732 [preauth]
Oct 11 02:09:08 compute-0 sudo[383847]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ghtpjfylffdafrvxdmgdquyafovvcvnr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148548.4187975-1139-190903295897356/AnsiballZ_stat.py'
Oct 11 02:09:08 compute-0 sudo[383847]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:09 compute-0 python3.9[383849]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/iptables.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:09:09 compute-0 sudo[383847]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v929: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:09 compute-0 sudo[383927]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fzncihzziymdsrcagvgjmgrapckwotfc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148548.4187975-1139-190903295897356/AnsiballZ_file.py'
Oct 11 02:09:09 compute-0 sudo[383927]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:09 compute-0 python3.9[383929]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/iptables.nft _original_basename=iptables.nft recurse=False state=file path=/etc/nftables/iptables.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:09:09 compute-0 sudo[383927]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:09:10 compute-0 unix_chkpwd[383930]: password check failed for user (root)
Oct 11 02:09:10 compute-0 sshd-session[383850]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:09:10 compute-0 ceph-mon[191930]: pgmap v929: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v930: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:12 compute-0 sudo[384080]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gnwabytrguagnvvwoiazvmtofbbelkbq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148551.5588114-1152-135578998635817/AnsiballZ_command.py'
Oct 11 02:09:12 compute-0 sudo[384080]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:12 compute-0 python3.9[384082]: ansible-ansible.legacy.command Invoked with _raw_params=nft -j list ruleset _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:09:12 compute-0 sudo[384080]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:12 compute-0 ceph-mon[191930]: pgmap v930: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:12 compute-0 sshd-session[383850]: Failed password for root from 121.227.153.123 port 42740 ssh2
Oct 11 02:09:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v931: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:13 compute-0 sshd-session[383850]: Connection closed by authenticating user root 121.227.153.123 port 42740 [preauth]
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.857 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.858 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.858 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.859 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.859 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.860 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.862 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.862 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.862 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.863 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.864 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.864 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.864 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.865 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.865 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.866 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.866 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.866 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.866 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.866 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.866 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.866 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.866 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.867 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.867 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.867 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.867 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.870 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.871 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.871 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.871 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.871 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.871 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.871 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': [], 'power.state': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.871 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.872 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.872 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.873 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.873 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': [], 'power.state': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.bytes.rate': [], 'disk.ephemeral.size': [], 'network.incoming.packets': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.873 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.874 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.874 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.874 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.874 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.874 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.874 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.874 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.874 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.874 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': [], 'power.state': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.bytes.rate': [], 'disk.ephemeral.size': [], 'network.incoming.packets': [], 'network.outgoing.bytes.delta': [], 'disk.root.size': [], 'network.incoming.packets.drop': [], 'disk.device.allocation': [], 'network.incoming.packets.error': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': [], 'power.state': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.bytes.rate': [], 'disk.ephemeral.size': [], 'network.incoming.packets': [], 'network.outgoing.bytes.delta': [], 'disk.root.size': [], 'network.incoming.packets.drop': [], 'disk.device.allocation': [], 'network.incoming.packets.error': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': [], 'power.state': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.bytes.rate': [], 'disk.ephemeral.size': [], 'network.incoming.packets': [], 'network.outgoing.bytes.delta': [], 'disk.root.size': [], 'network.incoming.packets.drop': [], 'disk.device.allocation': [], 'network.incoming.packets.error': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': [], 'power.state': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.bytes.rate': [], 'disk.ephemeral.size': [], 'network.incoming.packets': [], 'network.outgoing.bytes.delta': [], 'disk.root.size': [], 'network.incoming.packets.drop': [], 'disk.device.allocation': [], 'network.incoming.packets.error': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.877 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.877 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.877 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.877 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.877 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.877 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.877 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.877 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.878 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.878 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.878 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.878 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.878 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.879 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.879 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.879 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.879 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.879 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.880 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.880 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.880 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.880 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.880 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.881 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.881 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.881 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.881 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.881 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.881 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.882 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.882 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.882 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.882 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:09:13.882 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:09:14 compute-0 podman[384210]: 2025-10-11 02:09:14.245122255 +0000 UTC m=+0.123270939 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.4, org.label-schema.schema-version=1.0, config_id=edpm, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image)
Oct 11 02:09:14 compute-0 sudo[384254]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ytkvokibufcsjnmysdvxhcwvbxmrpdcr ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760148552.6601114-1160-244509459766949/AnsiballZ_edpm_nftables_from_files.py'
Oct 11 02:09:14 compute-0 sudo[384254]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:14 compute-0 ceph-mon[191930]: pgmap v931: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:14 compute-0 python3[384256]: ansible-edpm_nftables_from_files Invoked with src=/var/lib/edpm-config/firewall
Oct 11 02:09:14 compute-0 sudo[384254]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:09:15 compute-0 podman[384333]: 2025-10-11 02:09:15.226104321 +0000 UTC m=+0.110328855 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:09:15 compute-0 podman[384335]: 2025-10-11 02:09:15.248020642 +0000 UTC m=+0.120282764 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, org.label-schema.license=GPLv2, tcib_managed=true)
Oct 11 02:09:15 compute-0 podman[384336]: 2025-10-11 02:09:15.248666739 +0000 UTC m=+0.121110258 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, managed_by=edpm_ansible, container_name=kepler, release-0.7.12=, io.openshift.expose-services=, maintainer=Red Hat, Inc., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, build-date=2024-09-18T21:23:30, io.buildah.version=1.29.0, com.redhat.component=ubi9-container, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, release=1214.1726694543, io.openshift.tags=base rhel9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, name=ubi9, distribution-scope=public, summary=Provides the latest release of Red Hat Universal Base Image 9., version=9.4, architecture=x86_64, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-type=git, vendor=Red Hat, Inc.)
Oct 11 02:09:15 compute-0 podman[384334]: 2025-10-11 02:09:15.282432457 +0000 UTC m=+0.161619890 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.license=GPLv2)
Oct 11 02:09:15 compute-0 unix_chkpwd[384462]: password check failed for user (root)
Oct 11 02:09:15 compute-0 sshd-session[384208]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:09:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v932: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:15 compute-0 sudo[384489]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nvsovtwwkcxvnawwknctqmscappikefx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148554.9329774-1168-133176309528858/AnsiballZ_stat.py'
Oct 11 02:09:15 compute-0 sudo[384489]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:15 compute-0 python3.9[384491]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:09:15 compute-0 sudo[384489]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:16 compute-0 sudo[384567]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-noogqtrgpvsgyqitsuuacrzpppcpxlfg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148554.9329774-1168-133176309528858/AnsiballZ_file.py'
Oct 11 02:09:16 compute-0 sudo[384567]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:16 compute-0 python3.9[384569]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-jumps.nft _original_basename=jump-chain.j2 recurse=False state=file path=/etc/nftables/edpm-jumps.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:09:16 compute-0 sudo[384567]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:16 compute-0 ceph-mon[191930]: pgmap v932: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:17 compute-0 sshd-session[384208]: Failed password for root from 121.227.153.123 port 47920 ssh2
Oct 11 02:09:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v933: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:17 compute-0 sudo[384719]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-aupjmopwutfcjtuyshgguiepdrpwjbhz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148556.81768-1180-173344652651906/AnsiballZ_stat.py'
Oct 11 02:09:17 compute-0 sudo[384719]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:17 compute-0 python3.9[384721]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-update-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:09:17 compute-0 sudo[384719]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:18 compute-0 sudo[384822]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nlpkcjkmzvporzjbjeraypggrojeqeqm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148556.81768-1180-173344652651906/AnsiballZ_file.py'
Oct 11 02:09:18 compute-0 sudo[384822]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:18 compute-0 podman[384772]: 2025-10-11 02:09:18.249062631 +0000 UTC m=+0.130375067 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, container_name=iscsid, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3)
Oct 11 02:09:18 compute-0 podman[384771]: 2025-10-11 02:09:18.284862385 +0000 UTC m=+0.168837083 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=multipathd, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:09:18 compute-0 python3.9[384837]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-update-jumps.nft _original_basename=jump-chain.j2 recurse=False state=file path=/etc/nftables/edpm-update-jumps.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:09:18 compute-0 sudo[384822]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:18 compute-0 ceph-mon[191930]: pgmap v933: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:18 compute-0 sshd-session[384208]: Connection closed by authenticating user root 121.227.153.123 port 47920 [preauth]
Oct 11 02:09:19 compute-0 sudo[384990]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nljqpwjrgvereywrnceheorhpffxvqmd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148558.7522752-1192-167390579787862/AnsiballZ_stat.py'
Oct 11 02:09:19 compute-0 sudo[384990]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v934: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:19 compute-0 python3.9[384992]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-flushes.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:09:19 compute-0 sudo[384990]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:09:20 compute-0 sudo[385069]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pxfrzemabwaspeocxhiaorstndbrxxoz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148558.7522752-1192-167390579787862/AnsiballZ_file.py'
Oct 11 02:09:20 compute-0 sudo[385069]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:20 compute-0 python3.9[385071]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-flushes.nft _original_basename=flush-chain.j2 recurse=False state=file path=/etc/nftables/edpm-flushes.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:09:20 compute-0 sudo[385069]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:20 compute-0 ceph-mon[191930]: pgmap v934: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:21 compute-0 unix_chkpwd[385192]: password check failed for user (root)
Oct 11 02:09:21 compute-0 sshd-session[384921]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:09:21 compute-0 sudo[385222]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ctcgubyfuyplduxkqmsvqrdnqxnalagj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148560.6847966-1204-196848355269823/AnsiballZ_stat.py'
Oct 11 02:09:21 compute-0 sudo[385222]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:21 compute-0 python3.9[385224]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-chains.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:09:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v935: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:21 compute-0 sudo[385222]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:21 compute-0 sudo[385300]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-aasklmfpxtyinyiyiheywyzfvlotwumy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148560.6847966-1204-196848355269823/AnsiballZ_file.py'
Oct 11 02:09:21 compute-0 sudo[385300]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:22 compute-0 python3.9[385302]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-chains.nft _original_basename=chains.j2 recurse=False state=file path=/etc/nftables/edpm-chains.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:09:22 compute-0 sudo[385300]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:22 compute-0 ceph-mon[191930]: pgmap v935: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:23 compute-0 sshd-session[384921]: Failed password for root from 121.227.153.123 port 47926 ssh2
Oct 11 02:09:23 compute-0 sudo[385452]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rfxdietcbsioegvragqzcehcbdwrtxzl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148562.5182443-1216-269412533728525/AnsiballZ_stat.py'
Oct 11 02:09:23 compute-0 sudo[385452]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v936: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:23 compute-0 python3.9[385454]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-rules.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:09:23 compute-0 sudo[385452]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:24 compute-0 sshd-session[384921]: Connection closed by authenticating user root 121.227.153.123 port 47926 [preauth]
Oct 11 02:09:24 compute-0 ceph-mon[191930]: pgmap v936: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:09:25 compute-0 sudo[385532]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-krdvaamimjhrjbedtzkblguhoqqliykr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148562.5182443-1216-269412533728525/AnsiballZ_file.py'
Oct 11 02:09:25 compute-0 sudo[385532]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:25 compute-0 python3.9[385534]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-rules.nft _original_basename=ruleset.j2 recurse=False state=file path=/etc/nftables/edpm-rules.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:09:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v937: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:25 compute-0 sudo[385532]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:26 compute-0 unix_chkpwd[385634]: password check failed for user (root)
Oct 11 02:09:26 compute-0 sshd-session[385504]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:09:26 compute-0 sudo[385685]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dphwaplcaswyhrmexqchwfngrxetbeqa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148565.7403598-1229-268104303526386/AnsiballZ_command.py'
Oct 11 02:09:26 compute-0 sudo[385685]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:26 compute-0 python3.9[385687]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail; cat /etc/nftables/edpm-chains.nft /etc/nftables/edpm-flushes.nft /etc/nftables/edpm-rules.nft /etc/nftables/edpm-update-jumps.nft /etc/nftables/edpm-jumps.nft | nft -c -f - _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:09:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:09:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:09:26 compute-0 sudo[385685]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:09:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:09:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:09:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:09:26 compute-0 ceph-mon[191930]: pgmap v937: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v938: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:27 compute-0 sshd-session[385504]: Failed password for root from 121.227.153.123 port 52238 ssh2
Oct 11 02:09:28 compute-0 podman[385814]: 2025-10-11 02:09:28.087679711 +0000 UTC m=+0.111124869 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:09:28 compute-0 sudo[385863]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fsovrkrtiwqxodpuwpsiustogdacrkvf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148567.3186853-1237-9641133236564/AnsiballZ_blockinfile.py'
Oct 11 02:09:28 compute-0 sudo[385863]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:28 compute-0 python3.9[385865]: ansible-ansible.builtin.blockinfile Invoked with backup=False block=include "/etc/nftables/iptables.nft"
                                             include "/etc/nftables/edpm-chains.nft"
                                             include "/etc/nftables/edpm-rules.nft"
                                             include "/etc/nftables/edpm-jumps.nft"
                                              path=/etc/sysconfig/nftables.conf validate=nft -c -f %s state=present marker=# {mark} ANSIBLE MANAGED BLOCK create=False marker_begin=BEGIN marker_end=END append_newline=False prepend_newline=False unsafe_writes=False insertafter=None insertbefore=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:09:28 compute-0 sudo[385863]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:28 compute-0 ceph-mon[191930]: pgmap v938: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:29 compute-0 sudo[386015]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mtomhvmpoumdhxkiaavfhfcpnirtrjof ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148568.7245114-1246-228200253785392/AnsiballZ_command.py'
Oct 11 02:09:29 compute-0 sudo[386015]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:29 compute-0 sshd-session[385504]: Connection closed by authenticating user root 121.227.153.123 port 52238 [preauth]
Oct 11 02:09:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v939: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:29 compute-0 podman[386017]: 2025-10-11 02:09:29.478062034 +0000 UTC m=+0.127926834 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.tags=minimal rhel9, url=https://catalog.redhat.com/en/search?searchType=containers, io.buildah.version=1.33.7, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., name=ubi9-minimal, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, architecture=x86_64, container_name=openstack_network_exporter, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.6, release=1755695350, maintainer=Red Hat, Inc., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, com.redhat.component=ubi9-minimal-container, vendor=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, config_id=edpm, build-date=2025-08-20T13:12:41, distribution-scope=public, managed_by=edpm_ansible)
Oct 11 02:09:29 compute-0 python3.9[386018]: ansible-ansible.legacy.command Invoked with _raw_params=nft -f /etc/nftables/edpm-chains.nft _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:09:29 compute-0 sudo[386015]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:29 compute-0 podman[157119]: time="2025-10-11T02:09:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:09:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:09:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:09:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:09:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8516 "" "Go-http-client/1.1"
Oct 11 02:09:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:09:30 compute-0 sudo[386190]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ilahgnfdztrvnsqkuufjudvzvhuxtpag ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148569.942993-1254-96011524309074/AnsiballZ_stat.py'
Oct 11 02:09:30 compute-0 sudo[386190]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:30 compute-0 ceph-mon[191930]: pgmap v939: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:30 compute-0 python3.9[386192]: ansible-ansible.builtin.stat Invoked with path=/etc/nftables/edpm-rules.nft.changed follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:09:30 compute-0 sudo[386190]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:31 compute-0 openstack_network_exporter[374316]: ERROR   02:09:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:09:31 compute-0 openstack_network_exporter[374316]: ERROR   02:09:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:09:31 compute-0 openstack_network_exporter[374316]: ERROR   02:09:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:09:31 compute-0 openstack_network_exporter[374316]: ERROR   02:09:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:09:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:09:31 compute-0 openstack_network_exporter[374316]: ERROR   02:09:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:09:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:09:31 compute-0 unix_chkpwd[386292]: password check failed for user (root)
Oct 11 02:09:31 compute-0 sshd-session[386039]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:09:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v940: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:31 compute-0 sudo[386343]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ncuefzfnqclofxdgjqfreevelupjywrg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148571.1803713-1263-137921181236669/AnsiballZ_file.py'
Oct 11 02:09:31 compute-0 sudo[386343]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:32 compute-0 python3.9[386345]: ansible-ansible.builtin.file Invoked with path=/etc/nftables/edpm-rules.nft.changed state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:09:32 compute-0 sudo[386343]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:32 compute-0 sshd-session[357351]: Connection closed by 192.168.122.30 port 34916
Oct 11 02:09:32 compute-0 sshd-session[357348]: pam_unix(sshd:session): session closed for user zuul
Oct 11 02:09:32 compute-0 systemd[1]: session-59.scope: Deactivated successfully.
Oct 11 02:09:32 compute-0 systemd[1]: session-59.scope: Consumed 2min 58.356s CPU time.
Oct 11 02:09:32 compute-0 systemd-logind[804]: Session 59 logged out. Waiting for processes to exit.
Oct 11 02:09:32 compute-0 systemd-logind[804]: Removed session 59.
Oct 11 02:09:32 compute-0 ceph-mon[191930]: pgmap v940: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:33 compute-0 podman[386370]: 2025-10-11 02:09:33.243893371 +0000 UTC m=+0.132349118 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi)
Oct 11 02:09:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v941: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:33 compute-0 sshd-session[386039]: Failed password for root from 121.227.153.123 port 53450 ssh2
Oct 11 02:09:34 compute-0 ceph-mon[191930]: pgmap v941: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:34 compute-0 sshd-session[386039]: Connection closed by authenticating user root 121.227.153.123 port 53450 [preauth]
Oct 11 02:09:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:09:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v942: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:36 compute-0 unix_chkpwd[386390]: password check failed for user (root)
Oct 11 02:09:36 compute-0 sshd-session[386388]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:09:36 compute-0 ceph-mon[191930]: pgmap v942: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v943: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:37 compute-0 nova_compute[356901]: 2025-10-11 02:09:37.914 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:09:38 compute-0 sshd-session[386388]: Failed password for root from 121.227.153.123 port 53466 ssh2
Oct 11 02:09:38 compute-0 sshd-session[386391]: Accepted publickey for zuul from 192.168.122.30 port 35688 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 02:09:38 compute-0 systemd-logind[804]: New session 60 of user zuul.
Oct 11 02:09:38 compute-0 systemd[1]: Started Session 60 of User zuul.
Oct 11 02:09:38 compute-0 sshd-session[386391]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 02:09:38 compute-0 ceph-mon[191930]: pgmap v943: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v944: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:39 compute-0 sshd-session[386388]: Connection closed by authenticating user root 121.227.153.123 port 53466 [preauth]
Oct 11 02:09:39 compute-0 nova_compute[356901]: 2025-10-11 02:09:39.891 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:09:39 compute-0 nova_compute[356901]: 2025-10-11 02:09:39.918 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:09:39 compute-0 nova_compute[356901]: 2025-10-11 02:09:39.918 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:09:39 compute-0 nova_compute[356901]: 2025-10-11 02:09:39.919 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:09:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:09:40 compute-0 sudo[386546]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hctwjqmlnmkiabuqxycxzgjeiczvezzw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148578.881447-24-277364050768299/AnsiballZ_systemd_service.py'
Oct 11 02:09:40 compute-0 sudo[386546]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:40 compute-0 ceph-mon[191930]: pgmap v944: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:40 compute-0 nova_compute[356901]: 2025-10-11 02:09:40.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:09:40 compute-0 nova_compute[356901]: 2025-10-11 02:09:40.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:09:40 compute-0 nova_compute[356901]: 2025-10-11 02:09:40.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:09:40 compute-0 nova_compute[356901]: 2025-10-11 02:09:40.918 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944
Oct 11 02:09:40 compute-0 nova_compute[356901]: 2025-10-11 02:09:40.918 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:09:40 compute-0 nova_compute[356901]: 2025-10-11 02:09:40.919 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:09:40 compute-0 nova_compute[356901]: 2025-10-11 02:09:40.919 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:09:40 compute-0 nova_compute[356901]: 2025-10-11 02:09:40.957 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:09:40 compute-0 nova_compute[356901]: 2025-10-11 02:09:40.958 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:09:40 compute-0 nova_compute[356901]: 2025-10-11 02:09:40.959 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:09:40 compute-0 nova_compute[356901]: 2025-10-11 02:09:40.959 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:09:40 compute-0 nova_compute[356901]: 2025-10-11 02:09:40.960 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:09:41 compute-0 python3.9[386548]: ansible-ansible.builtin.systemd_service Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 02:09:41 compute-0 systemd[1]: Reloading.
Oct 11 02:09:41 compute-0 systemd-rc-local-generator[386593]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:09:41 compute-0 systemd-sysv-generator[386599]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:09:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v945: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:09:41 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3279753637' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:09:41 compute-0 nova_compute[356901]: 2025-10-11 02:09:41.561 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.601s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:09:41 compute-0 sudo[386546]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:41 compute-0 unix_chkpwd[386612]: password check failed for user (root)
Oct 11 02:09:41 compute-0 sshd-session[386518]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=root
Oct 11 02:09:41 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3279753637' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:09:42 compute-0 nova_compute[356901]: 2025-10-11 02:09:42.065 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:09:42 compute-0 rsyslogd[187706]: imjournal: 3213 messages lost due to rate-limiting (20000 allowed within 600 seconds)
Oct 11 02:09:42 compute-0 nova_compute[356901]: 2025-10-11 02:09:42.070 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=4578MB free_disk=59.98828125GB free_vcpus=8 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:09:42 compute-0 nova_compute[356901]: 2025-10-11 02:09:42.070 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:09:42 compute-0 nova_compute[356901]: 2025-10-11 02:09:42.071 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:09:42 compute-0 nova_compute[356901]: 2025-10-11 02:09:42.182 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 0 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:09:42 compute-0 nova_compute[356901]: 2025-10-11 02:09:42.182 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=512MB phys_disk=59GB used_disk=0GB total_vcpus=8 used_vcpus=0 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:09:42 compute-0 nova_compute[356901]: 2025-10-11 02:09:42.218 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:09:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:09:42 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2960319084' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:09:42 compute-0 nova_compute[356901]: 2025-10-11 02:09:42.688 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.471s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:09:42 compute-0 nova_compute[356901]: 2025-10-11 02:09:42.698 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:09:42 compute-0 nova_compute[356901]: 2025-10-11 02:09:42.716 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:09:42 compute-0 nova_compute[356901]: 2025-10-11 02:09:42.717 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:09:42 compute-0 nova_compute[356901]: 2025-10-11 02:09:42.717 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.646s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:09:42 compute-0 ceph-mon[191930]: pgmap v945: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:42 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2960319084' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:09:43 compute-0 python3.9[386778]: ansible-ansible.builtin.service_facts Invoked
Oct 11 02:09:43 compute-0 network[386795]: You are using 'network' service provided by 'network-scripts', which are now deprecated.
Oct 11 02:09:43 compute-0 network[386796]: 'network-scripts' will be removed from distribution in near future.
Oct 11 02:09:43 compute-0 network[386797]: It is advised to switch to 'NetworkManager' instead for network management.
Oct 11 02:09:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v946: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:43 compute-0 nova_compute[356901]: 2025-10-11 02:09:43.695 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:09:43 compute-0 nova_compute[356901]: 2025-10-11 02:09:43.695 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:09:44 compute-0 sshd-session[386518]: Failed password for root from 121.227.153.123 port 37220 ssh2
Oct 11 02:09:44 compute-0 podman[386804]: 2025-10-11 02:09:44.525369461 +0000 UTC m=+0.130740138 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, config_id=edpm, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.build-date=20251007)
Oct 11 02:09:44 compute-0 ceph-mon[191930]: pgmap v946: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:09:45 compute-0 sshd-session[386518]: Connection closed by authenticating user root 121.227.153.123 port 37220 [preauth]
Oct 11 02:09:45 compute-0 podman[386851]: 2025-10-11 02:09:45.404717486 +0000 UTC m=+0.105986857 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, container_name=ovn_metadata_agent, org.label-schema.build-date=20251009, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, config_id=ovn_metadata_agent, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:09:45 compute-0 podman[386849]: 2025-10-11 02:09:45.409899054 +0000 UTC m=+0.130199815 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:09:45 compute-0 podman[386852]: 2025-10-11 02:09:45.428076867 +0000 UTC m=+0.126684995 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, release=1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, com.redhat.component=ubi9-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, container_name=kepler, io.buildah.version=1.29.0, io.openshift.tags=base rhel9, architecture=x86_64, io.openshift.expose-services=, release-0.7.12=, vcs-type=git, managed_by=edpm_ansible, build-date=2024-09-18T21:23:30, maintainer=Red Hat, Inc., distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.k8s.display-name=Red Hat Universal Base Image 9, summary=Provides the latest release of Red Hat Universal Base Image 9., version=9.4, vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_id=edpm, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9)
Oct 11 02:09:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v947: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:45 compute-0 podman[386882]: 2025-10-11 02:09:45.502575902 +0000 UTC m=+0.136598358 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 02:09:46 compute-0 ceph-mon[191930]: pgmap v947: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v948: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:47 compute-0 sshd-session[386969]: Invalid user user from 121.227.153.123 port 37234
Oct 11 02:09:47 compute-0 sshd-session[386969]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:09:47 compute-0 sshd-session[386969]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:09:48 compute-0 podman[387015]: 2025-10-11 02:09:48.479138466 +0000 UTC m=+0.143361202 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_id=iscsid, org.label-schema.build-date=20251009, tcib_managed=true, container_name=iscsid)
Oct 11 02:09:48 compute-0 podman[387017]: 2025-10-11 02:09:48.488193943 +0000 UTC m=+0.152053140 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, tcib_managed=true, config_id=multipathd, container_name=multipathd, io.buildah.version=1.41.3)
Oct 11 02:09:48 compute-0 ceph-mon[191930]: pgmap v948: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v949: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:49 compute-0 sudo[387211]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rwywdhvwjxwjesolivusyeotggttgmrv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148589.1960862-47-225182017032939/AnsiballZ_systemd_service.py'
Oct 11 02:09:49 compute-0 sudo[387211]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:50 compute-0 sshd-session[386969]: Failed password for invalid user user from 121.227.153.123 port 37234 ssh2
Oct 11 02:09:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:09:50 compute-0 python3.9[387213]: ansible-ansible.builtin.systemd_service Invoked with enabled=False name=tripleo_ceilometer_agent_ipmi.service state=stopped daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:09:50 compute-0 sudo[387211]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:50 compute-0 ceph-mon[191930]: pgmap v949: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v950: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:51 compute-0 sshd-session[386969]: Connection closed by invalid user user 121.227.153.123 port 37234 [preauth]
Oct 11 02:09:51 compute-0 sudo[387364]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dreveunibpbvitsqxbmcrumxfeihaevc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148590.9976606-57-50109815881587/AnsiballZ_file.py'
Oct 11 02:09:51 compute-0 sudo[387364]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:52 compute-0 python3.9[387366]: ansible-ansible.builtin.file Invoked with path=/usr/lib/systemd/system/tripleo_ceilometer_agent_ipmi.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:09:52 compute-0 sudo[387364]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:52 compute-0 sshd-session[387367]: Invalid user user from 121.227.153.123 port 52864
Oct 11 02:09:52 compute-0 ceph-mon[191930]: pgmap v950: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:52 compute-0 sshd-session[387367]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:09:52 compute-0 sshd-session[387367]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:09:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v951: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:53 compute-0 sudo[387518]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nliwsghtsrdvhbnwkndtrgfooavdwznr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148592.3631186-65-205760757396853/AnsiballZ_file.py'
Oct 11 02:09:53 compute-0 sudo[387518]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:53 compute-0 python3.9[387520]: ansible-ansible.builtin.file Invoked with path=/etc/systemd/system/tripleo_ceilometer_agent_ipmi.service state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:09:53 compute-0 sudo[387518]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:09:54.829 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:09:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:09:54.829 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:09:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:09:54.829 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:09:54 compute-0 ceph-mon[191930]: pgmap v951: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:55 compute-0 sudo[387670]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lgmtjlzllesazggwgsfisomtyvqwgpzi ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148594.2833285-74-58370523570872/AnsiballZ_command.py'
Oct 11 02:09:55 compute-0 sudo[387670]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:55 compute-0 sshd-session[387367]: Failed password for invalid user user from 121.227.153.123 port 52864 ssh2
Oct 11 02:09:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:09:55 compute-0 python3.9[387672]: ansible-ansible.legacy.command Invoked with _raw_params=if systemctl is-active certmonger.service; then
                                               systemctl disable --now certmonger.service
                                               test -f /etc/systemd/system/certmonger.service || systemctl mask certmonger.service
                                             fi
                                              _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:09:55 compute-0 sudo[387670]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v952: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:09:56
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['images', 'cephfs.cephfs.meta', '.rgw.root', 'default.rgw.control', '.mgr', 'vms', 'cephfs.cephfs.data', 'volumes', 'default.rgw.meta', 'backups', 'default.rgw.log']
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:09:56 compute-0 python3.9[387824]: ansible-ansible.builtin.find Invoked with file_type=any hidden=True paths=['/var/lib/certmonger/requests'] patterns=[] read_whole_file=False age_stamp=mtime recurse=False follow=False get_checksum=False checksum_algorithm=sha1 use_regex=False exact_mode=True excludes=None contains=None age=None size=None depth=None mode=None encoding=None limit=None
Oct 11 02:09:56 compute-0 sshd-session[387367]: Connection closed by invalid user user 121.227.153.123 port 52864 [preauth]
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:09:56 compute-0 ceph-mon[191930]: pgmap v952: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:09:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:09:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v953: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:57 compute-0 sudo[387976]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-atpockrdyostirimzetuncspllcrcfyt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148596.9423006-92-38465978082193/AnsiballZ_systemd_service.py'
Oct 11 02:09:57 compute-0 sudo[387976]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:57 compute-0 sshd-session[387849]: Invalid user user from 121.227.153.123 port 52872
Oct 11 02:09:57 compute-0 python3.9[387978]: ansible-ansible.builtin.systemd_service Invoked with daemon_reload=True daemon_reexec=False scope=system no_block=False name=None state=None enabled=None force=None masked=None
Oct 11 02:09:57 compute-0 systemd[1]: Reloading.
Oct 11 02:09:58 compute-0 systemd-sysv-generator[388007]: SysV service '/etc/rc.d/init.d/network' lacks a native systemd unit file. Automatically generating a unit file for compatibility. Please update package to include a native systemd unit file, in order to make it more safe and robust.
Oct 11 02:09:58 compute-0 systemd-rc-local-generator[388000]: /etc/rc.d/rc.local is not marked executable, skipping.
Oct 11 02:09:58 compute-0 sshd-session[387849]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:09:58 compute-0 sshd-session[387849]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:09:58 compute-0 sudo[387976]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:58 compute-0 podman[388014]: 2025-10-11 02:09:58.535354945 +0000 UTC m=+0.094493617 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:09:58 compute-0 ceph-mon[191930]: pgmap v953: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:59 compute-0 sudo[388184]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xqkgnlaveobdgqfkhckxwwwodyqftwvp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148598.7150505-100-167671432768179/AnsiballZ_command.py'
Oct 11 02:09:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v954: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:09:59 compute-0 sudo[388184]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:09:59 compute-0 python3.9[388186]: ansible-ansible.legacy.command Invoked with cmd=/usr/bin/systemctl reset-failed tripleo_ceilometer_agent_ipmi.service _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True _raw_params=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:09:59 compute-0 podman[157119]: time="2025-10-11T02:09:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:09:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:09:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:09:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:09:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8518 "" "Go-http-client/1.1"
Oct 11 02:09:59 compute-0 sudo[388184]: pam_unix(sudo:session): session closed for user root
Oct 11 02:09:59 compute-0 podman[388188]: 2025-10-11 02:09:59.914773702 +0000 UTC m=+0.122559691 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, release=1755695350, architecture=x86_64, container_name=openstack_network_exporter, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-type=git, version=9.6, distribution-scope=public, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, managed_by=edpm_ansible, com.redhat.component=ubi9-minimal-container, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, config_id=edpm, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, name=ubi9-minimal, vendor=Red Hat, Inc., build-date=2025-08-20T13:12:41, io.openshift.tags=minimal rhel9, io.buildah.version=1.33.7, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://catalog.redhat.com/en/search?searchType=containers)
Oct 11 02:10:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:10:00 compute-0 sshd-session[387849]: Failed password for invalid user user from 121.227.153.123 port 52872 ssh2
Oct 11 02:10:00 compute-0 sudo[388358]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zichwhhlpuphskjjuytoaspwrbgefpiu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148600.1917129-109-950992837628/AnsiballZ_file.py'
Oct 11 02:10:00 compute-0 sudo[388358]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:00 compute-0 ceph-mon[191930]: pgmap v954: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:01 compute-0 python3.9[388360]: ansible-ansible.builtin.file Invoked with group=zuul mode=0750 owner=zuul path=/var/lib/openstack/config/telemetry-power-monitoring recurse=True setype=container_file_t state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:10:01 compute-0 sudo[388358]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:01 compute-0 openstack_network_exporter[374316]: ERROR   02:10:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:10:01 compute-0 openstack_network_exporter[374316]: ERROR   02:10:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:10:01 compute-0 openstack_network_exporter[374316]: ERROR   02:10:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:10:01 compute-0 openstack_network_exporter[374316]: ERROR   02:10:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:10:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:10:01 compute-0 openstack_network_exporter[374316]: ERROR   02:10:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:10:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:10:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v955: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:01 compute-0 sudo[388403]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:10:01 compute-0 sudo[388403]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:10:01 compute-0 sudo[388403]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:01 compute-0 sshd-session[387849]: Connection closed by invalid user user 121.227.153.123 port 52872 [preauth]
Oct 11 02:10:01 compute-0 sudo[388456]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:10:01 compute-0 sudo[388456]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:10:01 compute-0 sudo[388456]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:01 compute-0 sudo[388487]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:10:01 compute-0 sudo[388487]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:10:01 compute-0 sudo[388487]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:02 compute-0 sudo[388525]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:10:02 compute-0 sudo[388525]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:10:02 compute-0 ceph-mon[191930]: pgmap v955: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:02 compute-0 python3.9[388623]: ansible-ansible.builtin.stat Invoked with path=/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:10:02 compute-0 sudo[388525]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:10:02 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:10:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:10:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:10:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:10:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:10:02 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 1bf51cd3-aaf8-4ee2-bead-6b325a9feb37 does not exist
Oct 11 02:10:02 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 5753ee19-1134-4454-b110-8fa4f8f6535f does not exist
Oct 11 02:10:02 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 6afc2d54-938a-4759-aa92-9ac3ebaf5182 does not exist
Oct 11 02:10:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:10:02 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:10:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:10:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:10:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:10:02 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:10:02 compute-0 sudo[388669]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:10:02 compute-0 sudo[388669]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:10:02 compute-0 sudo[388669]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:02 compute-0 sshd-session[388548]: Invalid user user from 121.227.153.123 port 58928
Oct 11 02:10:02 compute-0 sudo[388717]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:10:03 compute-0 sudo[388717]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:10:03 compute-0 sudo[388717]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:03 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:10:03 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:10:03 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:10:03 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:10:03 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:10:03 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:10:03 compute-0 sudo[388770]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:10:03 compute-0 sudo[388770]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:10:03 compute-0 sudo[388770]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:03 compute-0 sshd-session[388548]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:10:03 compute-0 sshd-session[388548]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:10:03 compute-0 sudo[388809]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:10:03 compute-0 sudo[388809]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:10:03 compute-0 podman[388850]: 2025-10-11 02:10:03.477630425 +0000 UTC m=+0.156309731 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, config_id=edpm, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ceilometer_agent_ipmi)
Oct 11 02:10:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v956: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:03 compute-0 python3.9[388922]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-host-specific.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:10:03 compute-0 podman[388953]: 2025-10-11 02:10:03.904638949 +0000 UTC m=+0.087420162 container create 76c79fc7951ac8e84c1fa8fb0c20a155c7b71ca26174fcb466dd94adccc4e5a6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_babbage, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True, CEPH_REF=reef, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3)
Oct 11 02:10:03 compute-0 podman[388953]: 2025-10-11 02:10:03.868707781 +0000 UTC m=+0.051488994 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:10:03 compute-0 systemd[1]: Started libpod-conmon-76c79fc7951ac8e84c1fa8fb0c20a155c7b71ca26174fcb466dd94adccc4e5a6.scope.
Oct 11 02:10:04 compute-0 ceph-mon[191930]: pgmap v956: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:04 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:10:04 compute-0 podman[388953]: 2025-10-11 02:10:04.072744329 +0000 UTC m=+0.255525622 container init 76c79fc7951ac8e84c1fa8fb0c20a155c7b71ca26174fcb466dd94adccc4e5a6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_babbage, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=reef, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2)
Oct 11 02:10:04 compute-0 podman[388953]: 2025-10-11 02:10:04.092597181 +0000 UTC m=+0.275378414 container start 76c79fc7951ac8e84c1fa8fb0c20a155c7b71ca26174fcb466dd94adccc4e5a6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_babbage, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507)
Oct 11 02:10:04 compute-0 podman[388953]: 2025-10-11 02:10:04.100120748 +0000 UTC m=+0.282902051 container attach 76c79fc7951ac8e84c1fa8fb0c20a155c7b71ca26174fcb466dd94adccc4e5a6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_babbage, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:10:04 compute-0 admiring_babbage[388968]: 167 167
Oct 11 02:10:04 compute-0 podman[388953]: 2025-10-11 02:10:04.107840803 +0000 UTC m=+0.290622046 container died 76c79fc7951ac8e84c1fa8fb0c20a155c7b71ca26174fcb466dd94adccc4e5a6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_babbage, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 02:10:04 compute-0 systemd[1]: libpod-76c79fc7951ac8e84c1fa8fb0c20a155c7b71ca26174fcb466dd94adccc4e5a6.scope: Deactivated successfully.
Oct 11 02:10:04 compute-0 systemd[1]: var-lib-containers-storage-overlay-08a0914185b071567c44c7c6c7ff9e6a1d5f90cad5132c092a0c2840cacd9f97-merged.mount: Deactivated successfully.
Oct 11 02:10:04 compute-0 podman[388953]: 2025-10-11 02:10:04.186372109 +0000 UTC m=+0.369153322 container remove 76c79fc7951ac8e84c1fa8fb0c20a155c7b71ca26174fcb466dd94adccc4e5a6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_babbage, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:10:04 compute-0 systemd[1]: libpod-conmon-76c79fc7951ac8e84c1fa8fb0c20a155c7b71ca26174fcb466dd94adccc4e5a6.scope: Deactivated successfully.
Oct 11 02:10:04 compute-0 podman[388992]: 2025-10-11 02:10:04.484575839 +0000 UTC m=+0.096376874 container create 30369ce320372ae598bfecaf5550eb4d3ed9a6094e0969d1583ee16d073840d4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_booth, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:10:04 compute-0 podman[388992]: 2025-10-11 02:10:04.448095007 +0000 UTC m=+0.059896102 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:10:04 compute-0 systemd[1]: Started libpod-conmon-30369ce320372ae598bfecaf5550eb4d3ed9a6094e0969d1583ee16d073840d4.scope.
Oct 11 02:10:04 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:10:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/643fa5a0083fc67d22d5ae11491a2b09bbc662ee0df4c755c81eb5bf7679648e/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:10:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/643fa5a0083fc67d22d5ae11491a2b09bbc662ee0df4c755c81eb5bf7679648e/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:10:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/643fa5a0083fc67d22d5ae11491a2b09bbc662ee0df4c755c81eb5bf7679648e/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:10:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/643fa5a0083fc67d22d5ae11491a2b09bbc662ee0df4c755c81eb5bf7679648e/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:10:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/643fa5a0083fc67d22d5ae11491a2b09bbc662ee0df4c755c81eb5bf7679648e/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:10:04 compute-0 podman[388992]: 2025-10-11 02:10:04.674348509 +0000 UTC m=+0.286149544 container init 30369ce320372ae598bfecaf5550eb4d3ed9a6094e0969d1583ee16d073840d4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_booth, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, ceph=True, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 02:10:04 compute-0 podman[388992]: 2025-10-11 02:10:04.690966419 +0000 UTC m=+0.302767434 container start 30369ce320372ae598bfecaf5550eb4d3ed9a6094e0969d1583ee16d073840d4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_booth, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:10:04 compute-0 podman[388992]: 2025-10-11 02:10:04.696093449 +0000 UTC m=+0.307894494 container attach 30369ce320372ae598bfecaf5550eb4d3ed9a6094e0969d1583ee16d073840d4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_booth, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS)
Oct 11 02:10:04 compute-0 sshd-session[388548]: Failed password for invalid user user from 121.227.153.123 port 58928 ssh2
Oct 11 02:10:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:10:05 compute-0 sshd-session[388548]: Connection closed by invalid user user 121.227.153.123 port 58928 [preauth]
Oct 11 02:10:05 compute-0 python3.9[389086]: ansible-ansible.legacy.file Invoked with mode=0644 setype=container_file_t dest=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-host-specific.conf _original_basename=ceilometer-host-specific.conf.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-host-specific.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:10:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v957: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:06 compute-0 funny_booth[389008]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:10:06 compute-0 funny_booth[389008]: --> relative data size: 1.0
Oct 11 02:10:06 compute-0 funny_booth[389008]: --> All data devices are unavailable
Oct 11 02:10:06 compute-0 systemd[1]: libpod-30369ce320372ae598bfecaf5550eb4d3ed9a6094e0969d1583ee16d073840d4.scope: Deactivated successfully.
Oct 11 02:10:06 compute-0 systemd[1]: libpod-30369ce320372ae598bfecaf5550eb4d3ed9a6094e0969d1583ee16d073840d4.scope: Consumed 1.391s CPU time.
Oct 11 02:10:06 compute-0 podman[388992]: 2025-10-11 02:10:06.155302996 +0000 UTC m=+1.767104031 container died 30369ce320372ae598bfecaf5550eb4d3ed9a6094e0969d1583ee16d073840d4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_booth, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:10:06 compute-0 systemd[1]: var-lib-containers-storage-overlay-643fa5a0083fc67d22d5ae11491a2b09bbc662ee0df4c755c81eb5bf7679648e-merged.mount: Deactivated successfully.
Oct 11 02:10:06 compute-0 podman[388992]: 2025-10-11 02:10:06.251483142 +0000 UTC m=+1.863284177 container remove 30369ce320372ae598bfecaf5550eb4d3ed9a6094e0969d1583ee16d073840d4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_booth, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:10:06 compute-0 systemd[1]: libpod-conmon-30369ce320372ae598bfecaf5550eb4d3ed9a6094e0969d1583ee16d073840d4.scope: Deactivated successfully.
Oct 11 02:10:06 compute-0 sudo[388809]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:06 compute-0 sshd-session[389087]: Invalid user user from 121.227.153.123 port 58940
Oct 11 02:10:06 compute-0 sudo[389223]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:10:06 compute-0 sudo[389223]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:10:06 compute-0 sudo[389223]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:10:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:10:06 compute-0 ceph-mon[191930]: pgmap v957: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:06 compute-0 sudo[389282]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:10:06 compute-0 sudo[389319]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lmfzvaqwthbsgabkyqaqjcgmfpwtqpjw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148605.8531144-140-18540397480106/AnsiballZ_getent.py'
Oct 11 02:10:06 compute-0 sudo[389282]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:10:06 compute-0 sudo[389319]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:06 compute-0 sudo[389282]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:06 compute-0 sshd-session[389087]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:10:06 compute-0 sshd-session[389087]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:10:06 compute-0 sudo[389326]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:10:06 compute-0 sudo[389326]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:10:06 compute-0 sudo[389326]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:06 compute-0 python3.9[389325]: ansible-ansible.builtin.getent Invoked with database=passwd key=ceilometer fail_key=True service=None split=None
Oct 11 02:10:06 compute-0 sudo[389319]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:06 compute-0 sudo[389351]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:10:06 compute-0 sudo[389351]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:10:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v958: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:07 compute-0 podman[389440]: 2025-10-11 02:10:07.57818716 +0000 UTC m=+0.097171343 container create 42aa6e839b2bab4e88ae5d56ecea4afe8551d40f3b54ce852899a939e11548cb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_heisenberg, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 02:10:07 compute-0 podman[389440]: 2025-10-11 02:10:07.541009342 +0000 UTC m=+0.059993575 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:10:07 compute-0 systemd[1]: Started libpod-conmon-42aa6e839b2bab4e88ae5d56ecea4afe8551d40f3b54ce852899a939e11548cb.scope.
Oct 11 02:10:07 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:10:07 compute-0 podman[389440]: 2025-10-11 02:10:07.727758259 +0000 UTC m=+0.246742492 container init 42aa6e839b2bab4e88ae5d56ecea4afe8551d40f3b54ce852899a939e11548cb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_heisenberg, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS)
Oct 11 02:10:07 compute-0 podman[389440]: 2025-10-11 02:10:07.748156776 +0000 UTC m=+0.267140959 container start 42aa6e839b2bab4e88ae5d56ecea4afe8551d40f3b54ce852899a939e11548cb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_heisenberg, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2)
Oct 11 02:10:07 compute-0 podman[389440]: 2025-10-11 02:10:07.755851296 +0000 UTC m=+0.274835529 container attach 42aa6e839b2bab4e88ae5d56ecea4afe8551d40f3b54ce852899a939e11548cb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_heisenberg, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0)
Oct 11 02:10:07 compute-0 objective_heisenberg[389456]: 167 167
Oct 11 02:10:07 compute-0 systemd[1]: libpod-42aa6e839b2bab4e88ae5d56ecea4afe8551d40f3b54ce852899a939e11548cb.scope: Deactivated successfully.
Oct 11 02:10:07 compute-0 podman[389440]: 2025-10-11 02:10:07.763817505 +0000 UTC m=+0.282801688 container died 42aa6e839b2bab4e88ae5d56ecea4afe8551d40f3b54ce852899a939e11548cb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_heisenberg, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:10:07 compute-0 systemd[1]: var-lib-containers-storage-overlay-564c41b22dffa70fff5a15ba08b5059ce7d6bf06fa4f3fc62133486ebff60435-merged.mount: Deactivated successfully.
Oct 11 02:10:07 compute-0 podman[389440]: 2025-10-11 02:10:07.840155691 +0000 UTC m=+0.359139874 container remove 42aa6e839b2bab4e88ae5d56ecea4afe8551d40f3b54ce852899a939e11548cb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_heisenberg, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:10:07 compute-0 systemd[1]: libpod-conmon-42aa6e839b2bab4e88ae5d56ecea4afe8551d40f3b54ce852899a939e11548cb.scope: Deactivated successfully.
Oct 11 02:10:08 compute-0 podman[389479]: 2025-10-11 02:10:08.097980636 +0000 UTC m=+0.085054968 container create a6b90b1e9904caf61131598a33b1fbe51b91e111549b17c6991f51b30d21ba38 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_cohen, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:10:08 compute-0 podman[389479]: 2025-10-11 02:10:08.060505868 +0000 UTC m=+0.047580270 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:10:08 compute-0 systemd[1]: Started libpod-conmon-a6b90b1e9904caf61131598a33b1fbe51b91e111549b17c6991f51b30d21ba38.scope.
Oct 11 02:10:08 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:10:08 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/489b169f10b45f6e79a345126037b928171bab8e72f5e206514514db52484429/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:10:08 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/489b169f10b45f6e79a345126037b928171bab8e72f5e206514514db52484429/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:10:08 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/489b169f10b45f6e79a345126037b928171bab8e72f5e206514514db52484429/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:10:08 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/489b169f10b45f6e79a345126037b928171bab8e72f5e206514514db52484429/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:10:08 compute-0 podman[389479]: 2025-10-11 02:10:08.271949538 +0000 UTC m=+0.259023900 container init a6b90b1e9904caf61131598a33b1fbe51b91e111549b17c6991f51b30d21ba38 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_cohen, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0)
Oct 11 02:10:08 compute-0 podman[389479]: 2025-10-11 02:10:08.296289962 +0000 UTC m=+0.283364284 container start a6b90b1e9904caf61131598a33b1fbe51b91e111549b17c6991f51b30d21ba38 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_cohen, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:10:08 compute-0 podman[389479]: 2025-10-11 02:10:08.300690393 +0000 UTC m=+0.287764755 container attach a6b90b1e9904caf61131598a33b1fbe51b91e111549b17c6991f51b30d21ba38 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_cohen, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:10:08 compute-0 sshd-session[389087]: Failed password for invalid user user from 121.227.153.123 port 58940 ssh2
Oct 11 02:10:08 compute-0 ceph-mon[191930]: pgmap v958: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:08 compute-0 sshd-session[389087]: Connection closed by invalid user user 121.227.153.123 port 58940 [preauth]
Oct 11 02:10:08 compute-0 python3.9[389625]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:10:09 compute-0 elastic_cohen[389521]: {
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:     "0": [
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:         {
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "devices": [
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "/dev/loop3"
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             ],
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "lv_name": "ceph_lv0",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "lv_size": "21470642176",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "name": "ceph_lv0",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "tags": {
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.cluster_name": "ceph",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.crush_device_class": "",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.encrypted": "0",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.osd_id": "0",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.type": "block",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.vdo": "0"
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             },
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "type": "block",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "vg_name": "ceph_vg0"
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:         }
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:     ],
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:     "1": [
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:         {
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "devices": [
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "/dev/loop4"
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             ],
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "lv_name": "ceph_lv1",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "lv_size": "21470642176",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "name": "ceph_lv1",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "tags": {
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.cluster_name": "ceph",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.crush_device_class": "",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.encrypted": "0",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.osd_id": "1",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.type": "block",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.vdo": "0"
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             },
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "type": "block",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "vg_name": "ceph_vg1"
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:         }
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:     ],
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:     "2": [
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:         {
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "devices": [
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "/dev/loop5"
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             ],
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "lv_name": "ceph_lv2",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "lv_size": "21470642176",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "name": "ceph_lv2",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "tags": {
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.cluster_name": "ceph",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.crush_device_class": "",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.encrypted": "0",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.osd_id": "2",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.type": "block",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:                 "ceph.vdo": "0"
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             },
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "type": "block",
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:             "vg_name": "ceph_vg2"
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:         }
Oct 11 02:10:09 compute-0 elastic_cohen[389521]:     ]
Oct 11 02:10:09 compute-0 elastic_cohen[389521]: }
Oct 11 02:10:09 compute-0 systemd[1]: libpod-a6b90b1e9904caf61131598a33b1fbe51b91e111549b17c6991f51b30d21ba38.scope: Deactivated successfully.
Oct 11 02:10:09 compute-0 podman[389479]: 2025-10-11 02:10:09.140642159 +0000 UTC m=+1.127716481 container died a6b90b1e9904caf61131598a33b1fbe51b91e111549b17c6991f51b30d21ba38 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_cohen, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507)
Oct 11 02:10:09 compute-0 systemd[1]: var-lib-containers-storage-overlay-489b169f10b45f6e79a345126037b928171bab8e72f5e206514514db52484429-merged.mount: Deactivated successfully.
Oct 11 02:10:09 compute-0 podman[389479]: 2025-10-11 02:10:09.235021169 +0000 UTC m=+1.222095531 container remove a6b90b1e9904caf61131598a33b1fbe51b91e111549b17c6991f51b30d21ba38 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_cohen, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 02:10:09 compute-0 systemd[1]: libpod-conmon-a6b90b1e9904caf61131598a33b1fbe51b91e111549b17c6991f51b30d21ba38.scope: Deactivated successfully.
Oct 11 02:10:09 compute-0 sudo[389351]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:09 compute-0 sudo[389708]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:10:09 compute-0 sudo[389708]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:10:09 compute-0 sudo[389708]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v959: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:09 compute-0 sudo[389744]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:10:09 compute-0 sudo[389744]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:10:09 compute-0 python3.9[389729]: ansible-ansible.legacy.file Invoked with mode=0640 dest=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer.conf _original_basename=ceilometer.conf recurse=False state=file path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:10:09 compute-0 sudo[389744]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:09 compute-0 sudo[389769]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:10:09 compute-0 sudo[389769]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:10:09 compute-0 sudo[389769]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:09 compute-0 sudo[389817]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:10:09 compute-0 sudo[389817]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:10:09 compute-0 sshd-session[389628]: Invalid user user from 121.227.153.123 port 58956
Oct 11 02:10:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:10:10 compute-0 sshd-session[389628]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:10:10 compute-0 sshd-session[389628]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:10:10 compute-0 podman[389991]: 2025-10-11 02:10:10.303224849 +0000 UTC m=+0.069102759 container create cdda84fa8abe034ae6ddc40c6ceb84d7ebaa19d95cee43e58c720fd4e2f8ccf5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_knuth, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2)
Oct 11 02:10:10 compute-0 systemd[1]: Started libpod-conmon-cdda84fa8abe034ae6ddc40c6ceb84d7ebaa19d95cee43e58c720fd4e2f8ccf5.scope.
Oct 11 02:10:10 compute-0 podman[389991]: 2025-10-11 02:10:10.283059265 +0000 UTC m=+0.048937205 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:10:10 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:10:10 compute-0 podman[389991]: 2025-10-11 02:10:10.438300849 +0000 UTC m=+0.204178849 container init cdda84fa8abe034ae6ddc40c6ceb84d7ebaa19d95cee43e58c720fd4e2f8ccf5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_knuth, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:10:10 compute-0 podman[389991]: 2025-10-11 02:10:10.456554102 +0000 UTC m=+0.222432052 container start cdda84fa8abe034ae6ddc40c6ceb84d7ebaa19d95cee43e58c720fd4e2f8ccf5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_knuth, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, OSD_FLAVOR=default)
Oct 11 02:10:10 compute-0 sweet_knuth[390025]: 167 167
Oct 11 02:10:10 compute-0 systemd[1]: libpod-cdda84fa8abe034ae6ddc40c6ceb84d7ebaa19d95cee43e58c720fd4e2f8ccf5.scope: Deactivated successfully.
Oct 11 02:10:10 compute-0 podman[389991]: 2025-10-11 02:10:10.463145182 +0000 UTC m=+0.229023152 container attach cdda84fa8abe034ae6ddc40c6ceb84d7ebaa19d95cee43e58c720fd4e2f8ccf5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_knuth, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:10:10 compute-0 python3.9[390019]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/polling.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:10:10 compute-0 podman[389991]: 2025-10-11 02:10:10.473659898 +0000 UTC m=+0.239537918 container died cdda84fa8abe034ae6ddc40c6ceb84d7ebaa19d95cee43e58c720fd4e2f8ccf5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_knuth, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 02:10:10 compute-0 systemd[1]: var-lib-containers-storage-overlay-c2833f6471f5d0000f5cff8b5f6a3cda62ab6652affc1e27dd64529395f02645-merged.mount: Deactivated successfully.
Oct 11 02:10:10 compute-0 podman[389991]: 2025-10-11 02:10:10.545575961 +0000 UTC m=+0.311453861 container remove cdda84fa8abe034ae6ddc40c6ceb84d7ebaa19d95cee43e58c720fd4e2f8ccf5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_knuth, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:10:10 compute-0 systemd[1]: libpod-conmon-cdda84fa8abe034ae6ddc40c6ceb84d7ebaa19d95cee43e58c720fd4e2f8ccf5.scope: Deactivated successfully.
Oct 11 02:10:10 compute-0 ceph-mon[191930]: pgmap v959: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:10 compute-0 podman[390080]: 2025-10-11 02:10:10.767684148 +0000 UTC m=+0.061043559 container create 801edad14afc09c36526dc06952c79a84d0f283e2855748b2ac62fc1bf1b30b4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_dhawan, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 02:10:10 compute-0 systemd[1]: Started libpod-conmon-801edad14afc09c36526dc06952c79a84d0f283e2855748b2ac62fc1bf1b30b4.scope.
Oct 11 02:10:10 compute-0 podman[390080]: 2025-10-11 02:10:10.746095879 +0000 UTC m=+0.039455300 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:10:10 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:10:10 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/006c27193db4361a7d0fb6a0584131741ef10f645bd5b7fa938bea51635d1e56/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:10:10 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/006c27193db4361a7d0fb6a0584131741ef10f645bd5b7fa938bea51635d1e56/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:10:10 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/006c27193db4361a7d0fb6a0584131741ef10f645bd5b7fa938bea51635d1e56/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:10:10 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/006c27193db4361a7d0fb6a0584131741ef10f645bd5b7fa938bea51635d1e56/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:10:10 compute-0 podman[390080]: 2025-10-11 02:10:10.884493172 +0000 UTC m=+0.177852603 container init 801edad14afc09c36526dc06952c79a84d0f283e2855748b2ac62fc1bf1b30b4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_dhawan, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0)
Oct 11 02:10:10 compute-0 podman[390080]: 2025-10-11 02:10:10.908277401 +0000 UTC m=+0.201636802 container start 801edad14afc09c36526dc06952c79a84d0f283e2855748b2ac62fc1bf1b30b4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_dhawan, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 02:10:10 compute-0 podman[390080]: 2025-10-11 02:10:10.912333805 +0000 UTC m=+0.205693206 container attach 801edad14afc09c36526dc06952c79a84d0f283e2855748b2ac62fc1bf1b30b4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_dhawan, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:10:11 compute-0 python3.9[390141]: ansible-ansible.legacy.file Invoked with mode=0640 dest=/var/lib/openstack/config/telemetry-power-monitoring/polling.yaml _original_basename=polling.yaml recurse=False state=file path=/var/lib/openstack/config/telemetry-power-monitoring/polling.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:10:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v960: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:12 compute-0 python3.9[390304]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/custom.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:10:12 compute-0 funny_dhawan[390136]: {
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:         "osd_id": 1,
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:         "type": "bluestore"
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:     },
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:         "osd_id": 2,
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:         "type": "bluestore"
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:     },
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:         "osd_id": 0,
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:         "type": "bluestore"
Oct 11 02:10:12 compute-0 funny_dhawan[390136]:     }
Oct 11 02:10:12 compute-0 funny_dhawan[390136]: }
Oct 11 02:10:12 compute-0 sshd-session[389628]: Failed password for invalid user user from 121.227.153.123 port 58956 ssh2
Oct 11 02:10:12 compute-0 podman[390080]: 2025-10-11 02:10:12.158542368 +0000 UTC m=+1.451901799 container died 801edad14afc09c36526dc06952c79a84d0f283e2855748b2ac62fc1bf1b30b4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_dhawan, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 02:10:12 compute-0 systemd[1]: libpod-801edad14afc09c36526dc06952c79a84d0f283e2855748b2ac62fc1bf1b30b4.scope: Deactivated successfully.
Oct 11 02:10:12 compute-0 systemd[1]: libpod-801edad14afc09c36526dc06952c79a84d0f283e2855748b2ac62fc1bf1b30b4.scope: Consumed 1.235s CPU time.
Oct 11 02:10:12 compute-0 systemd[1]: var-lib-containers-storage-overlay-006c27193db4361a7d0fb6a0584131741ef10f645bd5b7fa938bea51635d1e56-merged.mount: Deactivated successfully.
Oct 11 02:10:12 compute-0 podman[390080]: 2025-10-11 02:10:12.25680899 +0000 UTC m=+1.550168401 container remove 801edad14afc09c36526dc06952c79a84d0f283e2855748b2ac62fc1bf1b30b4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_dhawan, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 02:10:12 compute-0 systemd[1]: libpod-conmon-801edad14afc09c36526dc06952c79a84d0f283e2855748b2ac62fc1bf1b30b4.scope: Deactivated successfully.
Oct 11 02:10:12 compute-0 sudo[389817]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:10:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:10:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:10:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:10:12 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 216fd76e-fc36-43f6-89d1-b65a309aed67 does not exist
Oct 11 02:10:12 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 6bf95ffd-f58c-4901-9fcb-248d0c85da34 does not exist
Oct 11 02:10:12 compute-0 sudo[390389]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:10:12 compute-0 sudo[390389]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:10:12 compute-0 sudo[390389]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:12 compute-0 sudo[390434]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:10:12 compute-0 sudo[390434]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:10:12 compute-0 sudo[390434]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:12 compute-0 python3.9[390429]: ansible-ansible.legacy.file Invoked with mode=0640 dest=/var/lib/openstack/config/telemetry-power-monitoring/custom.conf _original_basename=custom.conf recurse=False state=file path=/var/lib/openstack/config/telemetry-power-monitoring/custom.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:10:12 compute-0 ceph-mon[191930]: pgmap v960: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:10:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:10:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v961: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:13 compute-0 python3.9[390608]: ansible-ansible.builtin.stat Invoked with path=/var/lib/openstack/certs/telemetry-power-monitoring/default/tls.crt follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:10:13 compute-0 sshd-session[389628]: Connection closed by invalid user user 121.227.153.123 port 58956 [preauth]
Oct 11 02:10:14 compute-0 ceph-mon[191930]: pgmap v961: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:14 compute-0 python3.9[390762]: ansible-ansible.builtin.stat Invoked with path=/var/lib/openstack/certs/telemetry-power-monitoring/default/tls.key follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:10:14 compute-0 podman[390764]: 2025-10-11 02:10:14.819640954 +0000 UTC m=+0.114874271 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, tcib_managed=true, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.4, org.label-schema.build-date=20251007)
Oct 11 02:10:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:10:15 compute-0 sshd-session[390687]: Invalid user user from 121.227.153.123 port 42292
Oct 11 02:10:15 compute-0 sshd-session[390687]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:10:15 compute-0 sshd-session[390687]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:10:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v962: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:15 compute-0 podman[390909]: 2025-10-11 02:10:15.599017096 +0000 UTC m=+0.109630435 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:10:15 compute-0 podman[390908]: 2025-10-11 02:10:15.633697082 +0000 UTC m=+0.145136464 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:10:15 compute-0 podman[390910]: 2025-10-11 02:10:15.647431488 +0000 UTC m=+0.150888960 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.29.0, vendor=Red Hat, Inc., architecture=x86_64, name=ubi9, build-date=2024-09-18T21:23:30, io.openshift.expose-services=, maintainer=Red Hat, Inc., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, release-0.7.12=, summary=Provides the latest release of Red Hat Universal Base Image 9., io.k8s.display-name=Red Hat Universal Base Image 9, vcs-type=git, release=1214.1726694543, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, version=9.4, com.redhat.component=ubi9-container, container_name=kepler, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, managed_by=edpm_ansible, config_id=edpm)
Oct 11 02:10:15 compute-0 python3.9[390965]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:10:15 compute-0 podman[390987]: 2025-10-11 02:10:15.787182456 +0000 UTC m=+0.160747717 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']})
Oct 11 02:10:16 compute-0 python3.9[391091]: ansible-ansible.legacy.file Invoked with mode=420 dest=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json _original_basename=ceilometer-agent-ipmi.json.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:10:16 compute-0 ceph-mon[191930]: pgmap v962: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:17 compute-0 sshd-session[390687]: Failed password for invalid user user from 121.227.153.123 port 42292 ssh2
Oct 11 02:10:17 compute-0 python3.9[391241]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-host-specific.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:10:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v963: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:18 compute-0 python3.9[391317]: ansible-ansible.legacy.file Invoked with mode=420 dest=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-host-specific.conf _original_basename=ceilometer-host-specific.conf.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-host-specific.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:10:18 compute-0 ceph-mon[191930]: pgmap v963: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:18 compute-0 sshd-session[390687]: Connection closed by invalid user user 121.227.153.123 port 42292 [preauth]
Oct 11 02:10:19 compute-0 podman[391400]: 2025-10-11 02:10:19.237993446 +0000 UTC m=+0.120432120 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, tcib_managed=true, container_name=multipathd, org.label-schema.vendor=CentOS, config_id=multipathd, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']})
Oct 11 02:10:19 compute-0 podman[391407]: 2025-10-11 02:10:19.241823279 +0000 UTC m=+0.116683158 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=iscsid, container_name=iscsid, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009)
Oct 11 02:10:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v964: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:19 compute-0 python3.9[391503]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_agent_ipmi.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:10:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:10:20 compute-0 sshd-session[391446]: Invalid user user from 121.227.153.123 port 45182
Oct 11 02:10:20 compute-0 python3.9[391580]: ansible-ansible.legacy.file Invoked with mode=420 dest=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_agent_ipmi.json _original_basename=ceilometer_agent_ipmi.json.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_agent_ipmi.json force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:10:20 compute-0 sshd-session[391446]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:10:20 compute-0 sshd-session[391446]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:10:20 compute-0 ceph-mon[191930]: pgmap v964: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v965: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:22 compute-0 python3.9[391730]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:10:22 compute-0 sshd-session[391446]: Failed password for invalid user user from 121.227.153.123 port 45182 ssh2
Oct 11 02:10:22 compute-0 ceph-mon[191930]: pgmap v965: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:22 compute-0 python3.9[391806]: ansible-ansible.legacy.file Invoked with mode=420 dest=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml _original_basename=ceilometer_prom_exporter.yaml.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:10:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v966: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:23 compute-0 sshd-session[391446]: Connection closed by invalid user user 121.227.153.123 port 45182 [preauth]
Oct 11 02:10:24 compute-0 python3.9[391956]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/firewall.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:10:24 compute-0 ceph-mon[191930]: pgmap v966: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:24 compute-0 python3.9[392034]: ansible-ansible.legacy.file Invoked with mode=420 dest=/var/lib/openstack/config/telemetry-power-monitoring/firewall.yaml _original_basename=firewall.yaml.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry-power-monitoring/firewall.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:10:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:10:25 compute-0 sshd-session[391982]: Invalid user user from 121.227.153.123 port 45188
Oct 11 02:10:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v967: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:25 compute-0 sshd-session[391982]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:10:25 compute-0 sshd-session[391982]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:10:25 compute-0 python3.9[392184]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/kepler.json follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:10:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:10:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:10:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:10:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:10:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:10:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:10:26 compute-0 python3.9[392260]: ansible-ansible.legacy.file Invoked with mode=420 dest=/var/lib/openstack/config/telemetry-power-monitoring/kepler.json _original_basename=kepler.json.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry-power-monitoring/kepler.json force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:10:26 compute-0 ceph-mon[191930]: pgmap v967: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:27 compute-0 sshd-session[391982]: Failed password for invalid user user from 121.227.153.123 port 45188 ssh2
Oct 11 02:10:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v968: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:27 compute-0 sshd-session[391982]: Connection closed by invalid user user 121.227.153.123 port 45188 [preauth]
Oct 11 02:10:27 compute-0 python3.9[392410]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:10:28 compute-0 python3.9[392488]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml _original_basename=ceilometer_prom_exporter.yaml.j2 recurse=False state=file path=/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:10:28 compute-0 ceph-mon[191930]: pgmap v968: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:29 compute-0 podman[392588]: 2025-10-11 02:10:29.220279886 +0000 UTC m=+0.110205243 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:10:29 compute-0 sudo[392661]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gyirsdvvvqhhdevhfjgrnuwgocjmqumy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148628.7545054-298-94961436845511/AnsiballZ_file.py'
Oct 11 02:10:29 compute-0 sudo[392661]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:29 compute-0 sshd-session[392419]: Invalid user user from 121.227.153.123 port 45198
Oct 11 02:10:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v969: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:29 compute-0 python3.9[392663]: ansible-ansible.builtin.file Invoked with group=ceilometer mode=0644 owner=ceilometer path=/var/lib/openstack/certs/telemetry-power-monitoring/default/tls.crt recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False state=None _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:10:29 compute-0 sshd-session[392419]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:10:29 compute-0 sshd-session[392419]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:10:29 compute-0 sudo[392661]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:29 compute-0 podman[157119]: time="2025-10-11T02:10:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:10:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:10:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:10:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:10:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8529 "" "Go-http-client/1.1"
Oct 11 02:10:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:10:30 compute-0 podman[392712]: 2025-10-11 02:10:30.244951783 +0000 UTC m=+0.133030575 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.expose-services=, build-date=2025-08-20T13:12:41, url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=minimal rhel9, version=9.6, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, distribution-scope=public, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi9-minimal-container, io.buildah.version=1.33.7, name=ubi9-minimal, vendor=Red Hat, Inc., managed_by=edpm_ansible, release=1755695350, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., architecture=x86_64, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vcs-type=git, maintainer=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, container_name=openstack_network_exporter)
Oct 11 02:10:30 compute-0 sudo[392833]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ddofzkafavdgmaeslebbsfqnhftmlhad ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148630.0437582-306-235115232207085/AnsiballZ_file.py'
Oct 11 02:10:30 compute-0 sudo[392833]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:30 compute-0 ceph-mon[191930]: pgmap v969: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:30 compute-0 python3.9[392835]: ansible-ansible.builtin.file Invoked with group=ceilometer mode=0644 owner=ceilometer path=/var/lib/openstack/certs/telemetry-power-monitoring/default/tls.key recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False state=None _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:10:30 compute-0 sudo[392833]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:31 compute-0 openstack_network_exporter[374316]: ERROR   02:10:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:10:31 compute-0 openstack_network_exporter[374316]: ERROR   02:10:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:10:31 compute-0 openstack_network_exporter[374316]: ERROR   02:10:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:10:31 compute-0 openstack_network_exporter[374316]: ERROR   02:10:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:10:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:10:31 compute-0 openstack_network_exporter[374316]: ERROR   02:10:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:10:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:10:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v970: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:31 compute-0 sshd-session[392419]: Failed password for invalid user user from 121.227.153.123 port 45198 ssh2
Oct 11 02:10:32 compute-0 ceph-mon[191930]: pgmap v970: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:32 compute-0 sudo[392985]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zcybbagdkbhyuwbmimeyjeizvzahfdqa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148631.2280428-314-243964012893525/AnsiballZ_file.py'
Oct 11 02:10:32 compute-0 sudo[392985]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:33 compute-0 python3.9[392987]: ansible-ansible.builtin.file Invoked with group=zuul mode=0755 owner=zuul path=/var/lib/openstack/healthchecks setype=container_file_t state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:10:33 compute-0 sudo[392985]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:33 compute-0 sshd-session[392419]: Connection closed by invalid user user 121.227.153.123 port 45198 [preauth]
Oct 11 02:10:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v971: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:34 compute-0 sudo[393154]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bbluricwdgoexiulelfkhubgpannkmww ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148633.3844056-322-194281277108270/AnsiballZ_stat.py'
Oct 11 02:10:34 compute-0 podman[393113]: 2025-10-11 02:10:34.042213033 +0000 UTC m=+0.138181954 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, config_id=edpm, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 02:10:34 compute-0 sudo[393154]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:34 compute-0 python3.9[393159]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/ceilometer_agent_ipmi/healthcheck follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:10:34 compute-0 sudo[393154]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:34 compute-0 ceph-mon[191930]: pgmap v971: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:10:35 compute-0 sudo[393235]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fuqmjmirwpfborwepzehqkzkxdfpnqqu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148633.3844056-322-194281277108270/AnsiballZ_file.py'
Oct 11 02:10:35 compute-0 sudo[393235]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:35 compute-0 sshd-session[393064]: Invalid user user from 121.227.153.123 port 46012
Oct 11 02:10:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v972: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:35 compute-0 python3.9[393237]: ansible-ansible.legacy.file Invoked with group=zuul mode=0700 owner=zuul setype=container_file_t dest=/var/lib/openstack/healthchecks/ceilometer_agent_ipmi/ _original_basename=healthcheck recurse=False state=file path=/var/lib/openstack/healthchecks/ceilometer_agent_ipmi/ force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:10:35 compute-0 sudo[393235]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:35 compute-0 sshd-session[393064]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:10:35 compute-0 sshd-session[393064]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:10:36 compute-0 sudo[393311]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ketxjlymfmiljvcwvkvycjhsyokgqmpc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148633.3844056-322-194281277108270/AnsiballZ_stat.py'
Oct 11 02:10:36 compute-0 sudo[393311]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:36 compute-0 python3.9[393313]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/ceilometer_agent_ipmi/healthcheck.future follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:10:36 compute-0 sudo[393311]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:36 compute-0 ceph-mon[191930]: pgmap v972: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:36 compute-0 sudo[393389]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pmmmxudhfhwrphviftqasezonyvpwkuq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148633.3844056-322-194281277108270/AnsiballZ_file.py'
Oct 11 02:10:36 compute-0 sudo[393389]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:36 compute-0 nova_compute[356901]: 2025-10-11 02:10:36.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._run_pending_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:10:36 compute-0 nova_compute[356901]: 2025-10-11 02:10:36.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11145
Oct 11 02:10:36 compute-0 nova_compute[356901]: 2025-10-11 02:10:36.927 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] There are 0 instances to clean _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11154
Oct 11 02:10:36 compute-0 nova_compute[356901]: 2025-10-11 02:10:36.930 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_incomplete_migrations run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:10:36 compute-0 nova_compute[356901]: 2025-10-11 02:10:36.931 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances with incomplete migration  _cleanup_incomplete_migrations /usr/lib/python3.9/site-packages/nova/compute/manager.py:11183
Oct 11 02:10:36 compute-0 nova_compute[356901]: 2025-10-11 02:10:36.948 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_expired_console_auth_tokens run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:10:37 compute-0 python3.9[393391]: ansible-ansible.legacy.file Invoked with group=zuul mode=0700 owner=zuul setype=container_file_t dest=/var/lib/openstack/healthchecks/ceilometer_agent_ipmi/ _original_basename=healthcheck.future recurse=False state=file path=/var/lib/openstack/healthchecks/ceilometer_agent_ipmi/ force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:10:37 compute-0 sudo[393389]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v973: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:37 compute-0 sudo[393541]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zmkyrepjmdcqtysykmiywpztvggotgnj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148637.3152983-322-177348380277354/AnsiballZ_stat.py'
Oct 11 02:10:37 compute-0 sudo[393541]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:38 compute-0 sshd-session[393064]: Failed password for invalid user user from 121.227.153.123 port 46012 ssh2
Oct 11 02:10:38 compute-0 python3.9[393543]: ansible-ansible.legacy.stat Invoked with path=/var/lib/openstack/healthchecks/kepler/healthcheck follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:10:38 compute-0 sudo[393541]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:38 compute-0 sudo[393619]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ycwvoyfpzdigwgdqfcuxroxreornnebr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148637.3152983-322-177348380277354/AnsiballZ_file.py'
Oct 11 02:10:38 compute-0 sudo[393619]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:38 compute-0 ceph-mon[191930]: pgmap v973: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:38 compute-0 python3.9[393621]: ansible-ansible.legacy.file Invoked with group=zuul mode=0700 owner=zuul setype=container_file_t dest=/var/lib/openstack/healthchecks/kepler/ _original_basename=healthcheck recurse=False state=file path=/var/lib/openstack/healthchecks/kepler/ force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None attributes=None
Oct 11 02:10:38 compute-0 sudo[393619]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:38 compute-0 nova_compute[356901]: 2025-10-11 02:10:38.960 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:10:39 compute-0 sshd-session[393064]: Connection closed by invalid user user 121.227.153.123 port 46012 [preauth]
Oct 11 02:10:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v974: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:39 compute-0 nova_compute[356901]: 2025-10-11 02:10:39.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:10:39 compute-0 nova_compute[356901]: 2025-10-11 02:10:39.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:10:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:10:40 compute-0 sudo[393773]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mcuxmjqrfaqekkaaihtvakbtdmylaedj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148639.2994044-355-80322984836150/AnsiballZ_container_config_data.py'
Oct 11 02:10:40 compute-0 sudo[393773]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:40 compute-0 python3.9[393775]: ansible-container_config_data Invoked with config_overrides={} config_path=/var/lib/openstack/config/telemetry-power-monitoring config_pattern=ceilometer_agent_ipmi.json debug=False
Oct 11 02:10:40 compute-0 sudo[393773]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:40 compute-0 sshd-session[393698]: Invalid user user from 121.227.153.123 port 47810
Oct 11 02:10:40 compute-0 ceph-mon[191930]: pgmap v974: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:40 compute-0 sshd-session[393698]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:10:40 compute-0 sshd-session[393698]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:10:40 compute-0 nova_compute[356901]: 2025-10-11 02:10:40.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:10:40 compute-0 nova_compute[356901]: 2025-10-11 02:10:40.929 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:10:40 compute-0 nova_compute[356901]: 2025-10-11 02:10:40.931 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:10:40 compute-0 nova_compute[356901]: 2025-10-11 02:10:40.932 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:10:40 compute-0 nova_compute[356901]: 2025-10-11 02:10:40.933 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:10:40 compute-0 nova_compute[356901]: 2025-10-11 02:10:40.934 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:10:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:10:41 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2341910583' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:10:41 compute-0 nova_compute[356901]: 2025-10-11 02:10:41.465 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.530s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:10:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v975: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:41 compute-0 sudo[393947]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lqddfavqbudsrzujrrsrjlxkgtxeyikt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148640.765455-364-128729203406496/AnsiballZ_container_config_hash.py'
Oct 11 02:10:41 compute-0 sudo[393947]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:41 compute-0 python3.9[393949]: ansible-container_config_hash Invoked with check_mode=False config_vol_prefix=/var/lib/config-data
Oct 11 02:10:41 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2341910583' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:10:41 compute-0 sudo[393947]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:41 compute-0 nova_compute[356901]: 2025-10-11 02:10:41.958 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:10:41 compute-0 nova_compute[356901]: 2025-10-11 02:10:41.960 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=4551MB free_disk=59.98828125GB free_vcpus=8 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:10:41 compute-0 nova_compute[356901]: 2025-10-11 02:10:41.960 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:10:41 compute-0 nova_compute[356901]: 2025-10-11 02:10:41.961 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:10:42 compute-0 nova_compute[356901]: 2025-10-11 02:10:42.173 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 0 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:10:42 compute-0 nova_compute[356901]: 2025-10-11 02:10:42.174 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=512MB phys_disk=59GB used_disk=0GB total_vcpus=8 used_vcpus=0 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:10:42 compute-0 nova_compute[356901]: 2025-10-11 02:10:42.309 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing inventories for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:804
Oct 11 02:10:42 compute-0 nova_compute[356901]: 2025-10-11 02:10:42.467 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating ProviderTree inventory for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 from _refresh_and_get_inventory using data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} _refresh_and_get_inventory /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:768
Oct 11 02:10:42 compute-0 nova_compute[356901]: 2025-10-11 02:10:42.468 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating inventory in ProviderTree for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 with inventory: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:176
Oct 11 02:10:42 compute-0 nova_compute[356901]: 2025-10-11 02:10:42.499 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing aggregate associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, aggregates: None _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:813
Oct 11 02:10:42 compute-0 nova_compute[356901]: 2025-10-11 02:10:42.543 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing trait associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, traits: COMPUTE_VOLUME_EXTEND,COMPUTE_NET_VIF_MODEL_VMXNET3,HW_CPU_X86_SSSE3,COMPUTE_RESCUE_BFV,COMPUTE_SOCKET_PCI_NUMA_AFFINITY,COMPUTE_NODE,HW_CPU_X86_SVM,COMPUTE_STORAGE_BUS_SCSI,HW_CPU_X86_FMA3,COMPUTE_GRAPHICS_MODEL_NONE,COMPUTE_NET_VIF_MODEL_RTL8139,HW_CPU_X86_SSE4A,COMPUTE_IMAGE_TYPE_QCOW2,HW_CPU_X86_BMI2,HW_CPU_X86_SSE42,HW_CPU_X86_AVX2,COMPUTE_IMAGE_TYPE_RAW,COMPUTE_VIOMMU_MODEL_VIRTIO,HW_CPU_X86_AESNI,COMPUTE_STORAGE_BUS_FDC,COMPUTE_GRAPHICS_MODEL_VIRTIO,HW_CPU_X86_AMD_SVM,COMPUTE_NET_VIF_MODEL_NE2K_PCI,COMPUTE_ACCELERATORS,HW_CPU_X86_SSE2,COMPUTE_GRAPHICS_MODEL_VGA,HW_CPU_X86_ABM,HW_CPU_X86_AVX,COMPUTE_NET_VIF_MODEL_E1000,COMPUTE_STORAGE_BUS_USB,COMPUTE_NET_ATTACH_INTERFACE,HW_CPU_X86_MMX,COMPUTE_SECURITY_TPM_2_0,COMPUTE_IMAGE_TYPE_ISO,HW_CPU_X86_SSE41,COMPUTE_IMAGE_TYPE_AKI,COMPUTE_IMAGE_TYPE_AMI,COMPUTE_NET_ATTACH_INTERFACE_WITH_TAG,COMPUTE_DEVICE_TAGGING,COMPUTE_SECURITY_UEFI_SECURE_BOOT,COMPUTE_TRUSTED_CERTS,COMPUTE_NET_VIF_MODEL_VIRTIO,COMPUTE_VIOMMU_MODEL_INTEL,COMPUTE_STORAGE_BUS_SATA,HW_CPU_X86_SSE,COMPUTE_STORAGE_BUS_VIRTIO,COMPUTE_NET_VIF_MODEL_PCNET,COMPUTE_GRAPHICS_MODEL_CIRRUS,HW_CPU_X86_SHA,HW_CPU_X86_BMI,COMPUTE_NET_VIF_MODEL_E1000E,COMPUTE_NET_VIF_MODEL_SPAPR_VLAN,COMPUTE_VOLUME_ATTACH_WITH_TAG,COMPUTE_GRAPHICS_MODEL_BOCHS,COMPUTE_VIOMMU_MODEL_AUTO,COMPUTE_IMAGE_TYPE_ARI,HW_CPU_X86_CLMUL,COMPUTE_STORAGE_BUS_IDE,COMPUTE_VOLUME_MULTI_ATTACH,HW_CPU_X86_F16C,COMPUTE_SECURITY_TPM_1_2 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:825
Oct 11 02:10:42 compute-0 nova_compute[356901]: 2025-10-11 02:10:42.567 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:10:42 compute-0 ceph-mon[191930]: pgmap v975: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:42 compute-0 sshd-session[393698]: Failed password for invalid user user from 121.227.153.123 port 47810 ssh2
Oct 11 02:10:43 compute-0 sudo[394119]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uzlyuxwucueuhyyjfcnmbrgjpccbhmmt ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760148642.2179828-374-49200598719173/AnsiballZ_edpm_container_manage.py'
Oct 11 02:10:43 compute-0 sudo[394119]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:10:43 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/951128542' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:10:43 compute-0 nova_compute[356901]: 2025-10-11 02:10:43.146 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.579s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:10:43 compute-0 nova_compute[356901]: 2025-10-11 02:10:43.157 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:10:43 compute-0 nova_compute[356901]: 2025-10-11 02:10:43.179 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:10:43 compute-0 nova_compute[356901]: 2025-10-11 02:10:43.182 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:10:43 compute-0 nova_compute[356901]: 2025-10-11 02:10:43.183 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 1.222s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:10:43 compute-0 python3[394121]: ansible-edpm_container_manage Invoked with concurrency=1 config_dir=/var/lib/openstack/config/telemetry-power-monitoring config_id=edpm config_overrides={} config_patterns=ceilometer_agent_ipmi.json log_base_path=/var/log/containers/stdouts debug=False
Oct 11 02:10:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v976: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:43 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/951128542' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:10:43 compute-0 python3[394121]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: [
                                                {
                                                     "Id": "3fd38304666e26ceda31e631b76b1276c0e32061a70084c62e30140f9f182bd7",
                                                     "Digest": "sha256:8b755bcae75f21718f07c7740080b034f4f289b859072ec2020fa0fde3f8c4f0",
                                                     "RepoTags": [
                                                          "quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified"
                                                     ],
                                                     "RepoDigests": [
                                                          "quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi@sha256:8b755bcae75f21718f07c7740080b034f4f289b859072ec2020fa0fde3f8c4f0"
                                                     ],
                                                     "Parent": "",
                                                     "Comment": "",
                                                     "Created": "2025-10-10T06:28:23.466005696Z",
                                                     "Config": {
                                                          "User": "ceilometer",
                                                          "Env": [
                                                               "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
                                                               "LANG=en_US.UTF-8",
                                                               "TZ=UTC",
                                                               "container=oci"
                                                          ],
                                                          "Entrypoint": [
                                                               "dumb-init",
                                                               "--single-child",
                                                               "--"
                                                          ],
                                                          "Cmd": [
                                                               "kolla_start"
                                                          ],
                                                          "Labels": {
                                                               "io.buildah.version": "1.41.3",
                                                               "maintainer": "OpenStack Kubernetes Operator team",
                                                               "org.label-schema.build-date": "20251009",
                                                               "org.label-schema.license": "GPLv2",
                                                               "org.label-schema.name": "CentOS Stream 9 Base Image",
                                                               "org.label-schema.schema-version": "1.0",
                                                               "org.label-schema.vendor": "CentOS",
                                                               "tcib_build_tag": "c4b77291aeca5591ac860bd4127cec2f",
                                                               "tcib_managed": "true"
                                                          },
                                                          "StopSignal": "SIGTERM"
                                                     },
                                                     "Version": "",
                                                     "Author": "",
                                                     "Architecture": "amd64",
                                                     "Os": "linux",
                                                     "Size": 506007928,
                                                     "VirtualSize": 506007928,
                                                     "GraphDriver": {
                                                          "Name": "overlay",
                                                          "Data": {
                                                               "LowerDir": "/var/lib/containers/storage/overlay/70eff67805c4992da6105fa696fd597ada818a9817158b2fc198a4ae3d1f7fde/diff:/var/lib/containers/storage/overlay/5070ecd2481479158dcb8390f4cb264778f1dda9b8f7f081ea77918068602bf7/diff:/var/lib/containers/storage/overlay/c2ad6f8b1a6091551e22adfb2e6ce479ea8bed05ed630c600bde534ea0820278/diff:/var/lib/containers/storage/overlay/f3f40f6483bf6d587286da9e86e40878c2aaaf723da5aa2364fff24f5ea28424/diff",
                                                               "UpperDir": "/var/lib/containers/storage/overlay/3bcb3d3b3a068c1f977d86462012ed6c7809547c40ac84fca47977c754f6f293/diff",
                                                               "WorkDir": "/var/lib/containers/storage/overlay/3bcb3d3b3a068c1f977d86462012ed6c7809547c40ac84fca47977c754f6f293/work"
                                                          }
                                                     },
                                                     "RootFS": {
                                                          "Type": "layers",
                                                          "Layers": [
                                                               "sha256:f3f40f6483bf6d587286da9e86e40878c2aaaf723da5aa2364fff24f5ea28424",
                                                               "sha256:3a9d73afb8795f4b13d74c2653e4fc76293cf6011ed9e4a2a730031f9b5a587e",
                                                               "sha256:a17e0bb381c222c58af32c704c00a9a70c73d5bad082858e10060d00f64461d6",
                                                               "sha256:7567c56c59163540b2a1f8228acfe72a14a2295fa943dfc5b2eabcdd17d2cac1",
                                                               "sha256:62d2762187e9963c0bbe8a0af637b5f72ada1f5e7b0cf30d9eaab83863d6f866"
                                                          ]
                                                     },
                                                     "Labels": {
                                                          "io.buildah.version": "1.41.3",
                                                          "maintainer": "OpenStack Kubernetes Operator team",
                                                          "org.label-schema.build-date": "20251009",
                                                          "org.label-schema.license": "GPLv2",
                                                          "org.label-schema.name": "CentOS Stream 9 Base Image",
                                                          "org.label-schema.schema-version": "1.0",
                                                          "org.label-schema.vendor": "CentOS",
                                                          "tcib_build_tag": "c4b77291aeca5591ac860bd4127cec2f",
                                                          "tcib_managed": "true"
                                                     },
                                                     "Annotations": {},
                                                     "ManifestType": "application/vnd.docker.distribution.manifest.v2+json",
                                                     "User": "ceilometer",
                                                     "History": [
                                                          {
                                                               "created": "2025-10-09T00:18:03.867908726Z",
                                                               "created_by": "/bin/sh -c #(nop) ADD file:b2e608b9da8e087a764c2aebbd9c2cc9181047f5b301f1dab77fdf098a28268b in / ",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-09T00:18:03.868015697Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL org.label-schema.schema-version=\"1.0\"     org.label-schema.name=\"CentOS Stream 9 Base Image\"     org.label-schema.vendor=\"CentOS\"     org.label-schema.license=\"GPLv2\"     org.label-schema.build-date=\"20251009\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-09T00:18:07.890794359Z",
                                                               "created_by": "/bin/sh -c #(nop) CMD [\"/bin/bash\"]"
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:06.074259055Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL maintainer=\"OpenStack Kubernetes Operator team\"",
                                                               "comment": "FROM quay.io/centos/centos:stream9",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:06.074278165Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL tcib_managed=true",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:06.074291595Z",
                                                               "created_by": "/bin/sh -c #(nop) ENV LANG=\"en_US.UTF-8\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:06.074304206Z",
                                                               "created_by": "/bin/sh -c #(nop) ENV TZ=\"UTC\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:06.074315366Z",
                                                               "created_by": "/bin/sh -c #(nop) ENV container=\"oci\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:06.074330277Z",
                                                               "created_by": "/bin/sh -c #(nop) USER root",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:06.426277446Z",
                                                               "created_by": "/bin/sh -c if [ -f \"/etc/yum.repos.d/ubi.repo\" ]; then rm -f /etc/yum.repos.d/ubi.repo && dnf clean all && rm -rf /var/cache/dnf; fi",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:45.068043089Z",
                                                               "created_by": "/bin/sh -c dnf install -y crudini && crudini --del /etc/dnf/dnf.conf main override_install_langs && crudini --set /etc/dnf/dnf.conf main clean_requirements_on_remove True && crudini --set /etc/dnf/dnf.conf main exactarch 1 && crudini --set /etc/dnf/dnf.conf main gpgcheck 1 && crudini --set /etc/dnf/dnf.conf main install_weak_deps False && if [ 'centos' == 'centos' ];then crudini --set /etc/dnf/dnf.conf main best False; fi && crudini --set /etc/dnf/dnf.conf main installonly_limit 0 && crudini --set /etc/dnf/dnf.conf main keepcache 0 && crudini --set /etc/dnf/dnf.conf main obsoletes 1 && crudini --set /etc/dnf/dnf.conf main plugins 1 && crudini --set /etc/dnf/dnf.conf main skip_missing_names_on_install False && crudini --set /etc/dnf/dnf.conf main tsflags nodocs",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:48.840534792Z",
                                                               "created_by": "/bin/sh -c dnf install -y ca-certificates dumb-init glibc-langpack-en procps-ng python3 sudo util-linux-user which python-tcib-containers",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:49.254426116Z",
                                                               "created_by": "/bin/sh -c cp /usr/share/tcib/container-images/kolla/base/uid_gid_manage.sh /usr/local/bin/uid_gid_manage",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:49.615669407Z",
                                                               "created_by": "/bin/sh -c chmod 755 /usr/local/bin/uid_gid_manage",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:50.32351585Z",
                                                               "created_by": "/bin/sh -c bash /usr/local/bin/uid_gid_manage kolla hugetlbfs libvirt qemu",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:50.95150669Z",
                                                               "created_by": "/bin/sh -c touch /usr/local/bin/kolla_extend_start && chmod 755 /usr/local/bin/kolla_extend_start",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:51.275453253Z",
                                                               "created_by": "/bin/sh -c cp /usr/share/tcib/container-images/kolla/base/set_configs.py /usr/local/bin/kolla_set_configs",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:51.585556245Z",
                                                               "created_by": "/bin/sh -c chmod 755 /usr/local/bin/kolla_set_configs",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:51.900910818Z",
                                                               "created_by": "/bin/sh -c cp /usr/share/tcib/container-images/kolla/base/start.sh /usr/local/bin/kolla_start",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:52.204754907Z",
                                                               "created_by": "/bin/sh -c chmod 755 /usr/local/bin/kolla_start",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:52.491429185Z",
                                                               "created_by": "/bin/sh -c cp /usr/share/tcib/container-images/kolla/base/httpd_setup.sh /usr/local/bin/kolla_httpd_setup",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:52.847171086Z",
                                                               "created_by": "/bin/sh -c chmod 755 /usr/local/bin/kolla_httpd_setup",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:53.137059418Z",
                                                               "created_by": "/bin/sh -c cp /usr/share/tcib/container-images/kolla/base/copy_cacerts.sh /usr/local/bin/kolla_copy_cacerts",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:53.44892538Z",
                                                               "created_by": "/bin/sh -c chmod 755 /usr/local/bin/kolla_copy_cacerts",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:53.826190187Z",
                                                               "created_by": "/bin/sh -c cp /usr/share/tcib/container-images/kolla/base/sudoers /etc/sudoers",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:54.222427047Z",
                                                               "created_by": "/bin/sh -c chmod 440 /etc/sudoers",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:54.586798968Z",
                                                               "created_by": "/bin/sh -c sed -ri '/^(passwd:|group:)/ s/systemd//g' /etc/nsswitch.conf",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:58.557228847Z",
                                                               "created_by": "/bin/sh -c dnf -y reinstall which && rpm -e --nodeps tzdata && dnf -y install tzdata",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:59.14899644Z",
                                                               "created_by": "/bin/sh -c if [ ! -f \"/etc/localtime\" ]; then ln -s /usr/share/zoneinfo/Etc/UTC /etc/localtime; fi",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:12:59.683368642Z",
                                                               "created_by": "/bin/sh -c mkdir -p /openstack",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:13:01.385446652Z",
                                                               "created_by": "/bin/sh -c if [ 'centos' == 'centos' ];then if [ -n \"$(rpm -qa redhat-release)\" ];then rpm -e --nodeps redhat-release; fi ; dnf -y install centos-stream-release; fi",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:13:03.734832392Z",
                                                               "created_by": "/bin/sh -c dnf update --excludepkgs redhat-release -y && dnf clean all && rm -rf /var/cache/dnf",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:13:03.734976356Z",
                                                               "created_by": "/bin/sh -c #(nop) STOPSIGNAL SIGTERM",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:13:03.734988536Z",
                                                               "created_by": "/bin/sh -c #(nop) ENTRYPOINT [\"dumb-init\", \"--single-child\", \"--\"]",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:13:03.734997136Z",
                                                               "created_by": "/bin/sh -c #(nop) CMD [\"kolla_start\"]",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:13:04.949823794Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL \"tcib_build_tag\"=\"c4b77291aeca5591ac860bd4127cec2f\""
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:15:14.766575448Z",
                                                               "created_by": "/bin/sh -c #(nop) USER root",
                                                               "comment": "FROM quay.rdoproject.org/podified-antelope-centos9/openstack-base:c4b77291aeca5591ac860bd4127cec2f",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:16:07.362238743Z",
                                                               "created_by": "/bin/sh -c dnf install -y python3-barbicanclient python3-cinderclient python3-designateclient python3-glanceclient python3-ironicclient python3-keystoneclient python3-manilaclient python3-neutronclient python3-novaclient python3-observabilityclient python3-octaviaclient python3-openstackclient python3-swiftclient python3-pymemcache && dnf clean all && rm -rf /var/cache/dnf",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:16:10.396726217Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL \"tcib_build_tag\"=\"c4b77291aeca5591ac860bd4127cec2f\""
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:19:26.4160234Z",
                                                               "created_by": "/bin/sh -c #(nop) USER root",
                                                               "comment": "FROM quay.rdoproject.org/podified-antelope-centos9/openstack-os:c4b77291aeca5591ac860bd4127cec2f",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:20:27.965571574Z",
                                                               "created_by": "/bin/sh -c bash /usr/local/bin/uid_gid_manage ceilometer",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:21:40.232443685Z",
                                                               "created_by": "/bin/sh -c dnf -y install openstack-ceilometer-common && dnf clean all && rm -rf /var/cache/dnf",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:21:44.507904176Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL \"tcib_build_tag\"=\"c4b77291aeca5591ac860bd4127cec2f\""
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:27:46.444748385Z",
                                                               "created_by": "/bin/sh -c #(nop) USER root",
                                                               "comment": "FROM quay.rdoproject.org/podified-antelope-centos9/openstack-ceilometer-base:c4b77291aeca5591ac860bd4127cec2f",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:28:23.464608569Z",
                                                               "created_by": "/bin/sh -c dnf -y install openstack-ceilometer-ipmi && dnf clean all && rm -rf /var/cache/dnf",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:28:23.464654101Z",
                                                               "created_by": "/bin/sh -c #(nop) USER ceilometer",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2025-10-10T06:28:25.821836206Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL \"tcib_build_tag\"=\"c4b77291aeca5591ac860bd4127cec2f\""
                                                          }
                                                     ],
                                                     "NamesHistory": [
                                                          "quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified"
                                                     ]
                                                }
                                           ]
                                           : quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified
Oct 11 02:10:44 compute-0 sudo[394119]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:44 compute-0 nova_compute[356901]: 2025-10-11 02:10:44.183 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:10:44 compute-0 nova_compute[356901]: 2025-10-11 02:10:44.185 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:10:44 compute-0 nova_compute[356901]: 2025-10-11 02:10:44.185 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:10:44 compute-0 nova_compute[356901]: 2025-10-11 02:10:44.206 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944
Oct 11 02:10:44 compute-0 nova_compute[356901]: 2025-10-11 02:10:44.207 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:10:44 compute-0 nova_compute[356901]: 2025-10-11 02:10:44.208 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:10:44 compute-0 nova_compute[356901]: 2025-10-11 02:10:44.208 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:10:44 compute-0 nova_compute[356901]: 2025-10-11 02:10:44.209 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:10:44 compute-0 nova_compute[356901]: 2025-10-11 02:10:44.209 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:10:44 compute-0 sshd-session[393698]: Connection closed by invalid user user 121.227.153.123 port 47810 [preauth]
Oct 11 02:10:44 compute-0 ceph-mon[191930]: pgmap v976: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:44 compute-0 sudo[394333]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-blymokjtwhuscbskwnbsuphgrhrfylgx ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148644.346644-382-241060229086409/AnsiballZ_stat.py'
Oct 11 02:10:44 compute-0 sudo[394333]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:45 compute-0 podman[394335]: 2025-10-11 02:10:45.051374252 +0000 UTC m=+0.100386428 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, managed_by=edpm_ansible, tcib_managed=true, org.label-schema.build-date=20251007, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, io.buildah.version=1.41.4, org.label-schema.schema-version=1.0)
Oct 11 02:10:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:10:45 compute-0 python3.9[394336]: ansible-ansible.builtin.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:10:45 compute-0 sudo[394333]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v977: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:45 compute-0 sshd-session[394281]: Invalid user user from 121.227.153.123 port 47818
Oct 11 02:10:45 compute-0 podman[394410]: 2025-10-11 02:10:45.799714787 +0000 UTC m=+0.120370552 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible)
Oct 11 02:10:45 compute-0 podman[394419]: 2025-10-11 02:10:45.826878918 +0000 UTC m=+0.129184897 container health_status e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vendor=Red Hat, Inc., version=9.4, config_id=edpm, container_name=kepler, io.buildah.version=1.29.0, maintainer=Red Hat, Inc., managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., io.openshift.expose-services=, com.redhat.component=ubi9-container, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.k8s.display-name=Red Hat Universal Base Image 9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1214.1726694543, release-0.7.12=, distribution-scope=public, vcs-type=git, architecture=x86_64, build-date=2024-09-18T21:23:30, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, name=ubi9)
Oct 11 02:10:45 compute-0 podman[394417]: 2025-10-11 02:10:45.834314054 +0000 UTC m=+0.146649080 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:10:45 compute-0 sshd-session[394281]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:10:45 compute-0 sshd-session[394281]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:10:46 compute-0 podman[394502]: 2025-10-11 02:10:46.013570425 +0000 UTC m=+0.173565734 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:10:46 compute-0 sudo[394587]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vertnccvmsqrwcknhaveaqnaavckrnkk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148645.57507-391-187243380947943/AnsiballZ_file.py'
Oct 11 02:10:46 compute-0 sudo[394587]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:46 compute-0 python3.9[394589]: ansible-file Invoked with path=/etc/systemd/system/edpm_ceilometer_agent_ipmi.requires state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:10:46 compute-0 sudo[394587]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:46 compute-0 ceph-mon[191930]: pgmap v977: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v978: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:47 compute-0 sshd-session[394281]: Failed password for invalid user user from 121.227.153.123 port 47818 ssh2
Oct 11 02:10:48 compute-0 sudo[394738]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uiutxecbvpbwhtamjoogdrnuevdigyse ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148646.4551759-391-1668257157848/AnsiballZ_copy.py'
Oct 11 02:10:48 compute-0 sudo[394738]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:48 compute-0 python3.9[394740]: ansible-copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760148646.4551759-391-1668257157848/source dest=/etc/systemd/system/edpm_ceilometer_agent_ipmi.service mode=0644 owner=root group=root backup=False force=True remote_src=False follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:10:48 compute-0 sudo[394738]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:48 compute-0 ceph-mon[191930]: pgmap v978: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:49 compute-0 sudo[394814]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-haahjorbzovtzxiekdgokmhzfvtdsjgt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148646.4551759-391-1668257157848/AnsiballZ_systemd.py'
Oct 11 02:10:49 compute-0 sudo[394814]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:49 compute-0 sshd-session[394281]: Connection closed by invalid user user 121.227.153.123 port 47818 [preauth]
Oct 11 02:10:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v979: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:49 compute-0 python3.9[394816]: ansible-systemd Invoked with state=started name=edpm_ceilometer_agent_ipmi.service enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:10:49 compute-0 sudo[394814]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:49 compute-0 podman[394820]: 2025-10-11 02:10:49.765436824 +0000 UTC m=+0.135330622 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, container_name=iscsid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:10:49 compute-0 podman[394819]: 2025-10-11 02:10:49.786456241 +0000 UTC m=+0.162480905 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, config_id=multipathd, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:10:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:10:50 compute-0 sshd-session[394849]: Invalid user user from 121.227.153.123 port 41830
Oct 11 02:10:50 compute-0 ceph-mon[191930]: pgmap v979: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:50 compute-0 sshd-session[394849]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:10:50 compute-0 sshd-session[394849]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:10:51 compute-0 sudo[395008]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-untxgzpvehgikfsudzwpwmuhxxhzpahb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148650.826426-413-100457597562137/AnsiballZ_container_config_data.py'
Oct 11 02:10:51 compute-0 sudo[395008]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v980: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:51 compute-0 python3.9[395010]: ansible-container_config_data Invoked with config_overrides={} config_path=/var/lib/openstack/config/telemetry-power-monitoring config_pattern=kepler.json debug=False
Oct 11 02:10:51 compute-0 sudo[395008]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:52 compute-0 sudo[395160]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eadgwjlcdctqemniwlqjxwsmnswvdxfz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148652.1336675-422-215137168386299/AnsiballZ_container_config_hash.py'
Oct 11 02:10:52 compute-0 sudo[395160]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:52 compute-0 ceph-mon[191930]: pgmap v980: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:53 compute-0 sshd-session[394849]: Failed password for invalid user user from 121.227.153.123 port 41830 ssh2
Oct 11 02:10:53 compute-0 python3.9[395162]: ansible-container_config_hash Invoked with check_mode=False config_vol_prefix=/var/lib/config-data
Oct 11 02:10:53 compute-0 sudo[395160]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v981: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:54 compute-0 sudo[395312]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-avbeblstxrfkmqrzrxcnlfsikusfvdvr ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760148653.5242782-432-249890305626967/AnsiballZ_edpm_container_manage.py'
Oct 11 02:10:54 compute-0 sudo[395312]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:54 compute-0 python3[395314]: ansible-edpm_container_manage Invoked with concurrency=1 config_dir=/var/lib/openstack/config/telemetry-power-monitoring config_id=edpm config_overrides={} config_patterns=kepler.json log_base_path=/var/log/containers/stdouts debug=False
Oct 11 02:10:54 compute-0 sshd-session[394849]: Connection closed by invalid user user 121.227.153.123 port 41830 [preauth]
Oct 11 02:10:54 compute-0 python3[395314]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: [
                                                {
                                                     "Id": "ed61e3ea3188391c18595d8ceada2a5a01f0ece915c62fde355798735b5208d7",
                                                     "Digest": "sha256:c74e63cd5740586d4c62182467bb463ef5e3dd809027aedc92c05ac19e93b086",
                                                     "RepoTags": [
                                                          "quay.io/sustainable_computing_io/kepler:release-0.7.12"
                                                     ],
                                                     "RepoDigests": [
                                                          "quay.io/sustainable_computing_io/kepler@sha256:581b65b646301e0fcb07582150ba63438f1353a85bf9acf1eb2acb4ce71c58bd",
                                                          "quay.io/sustainable_computing_io/kepler@sha256:c74e63cd5740586d4c62182467bb463ef5e3dd809027aedc92c05ac19e93b086"
                                                     ],
                                                     "Parent": "",
                                                     "Comment": "",
                                                     "Created": "2024-10-15T06:30:56.315982344Z",
                                                     "Config": {
                                                          "Env": [
                                                               "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
                                                               "container=oci",
                                                               "NVIDIA_VISIBLE_DEVICES=all",
                                                               "NVIDIA_DRIVER_CAPABILITIES=utility",
                                                               "NVIDIA_MIG_MONITOR_DEVICES=all",
                                                               "NVIDIA_MIG_CONFIG_DEVICES=all"
                                                          ],
                                                          "Entrypoint": [
                                                               "/usr/bin/kepler"
                                                          ],
                                                          "Labels": {
                                                               "architecture": "x86_64",
                                                               "build-date": "2024-09-18T21:23:30",
                                                               "com.redhat.component": "ubi9-container",
                                                               "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI",
                                                               "description": "The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.",
                                                               "distribution-scope": "public",
                                                               "io.buildah.version": "1.29.0",
                                                               "io.k8s.description": "The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.",
                                                               "io.k8s.display-name": "Red Hat Universal Base Image 9",
                                                               "io.openshift.expose-services": "",
                                                               "io.openshift.tags": "base rhel9",
                                                               "maintainer": "Red Hat, Inc.",
                                                               "name": "ubi9",
                                                               "release": "1214.1726694543",
                                                               "release-0.7.12": "",
                                                               "summary": "Provides the latest release of Red Hat Universal Base Image 9.",
                                                               "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543",
                                                               "vcs-ref": "e309397d02fc53f7fa99db1371b8700eb49f268f",
                                                               "vcs-type": "git",
                                                               "vendor": "Red Hat, Inc.",
                                                               "version": "9.4"
                                                          }
                                                     },
                                                     "Version": "",
                                                     "Author": "",
                                                     "Architecture": "amd64",
                                                     "Os": "linux",
                                                     "Size": 331545571,
                                                     "VirtualSize": 331545571,
                                                     "GraphDriver": {
                                                          "Name": "overlay",
                                                          "Data": {
                                                               "LowerDir": "/var/lib/containers/storage/overlay/de1557109facda5eb038045e25371b06ad2baf5cf32c60a7fe84a603bee1e079/diff:/var/lib/containers/storage/overlay/725f7e4e3b8edde36f0bdcd313bbaf872dbe55b162264f8008ee3c09a0b89b66/diff:/var/lib/containers/storage/overlay/573769ea2305456dffa2f0674424aa020c1494387d36bcccb339788fd220d39b/diff:/var/lib/containers/storage/overlay/56a7d751d1997fb4e9fb31bd07356a0c9a7699a9bb524feeb3c7fe2b433b8223/diff:/var/lib/containers/storage/overlay/0560e6233aa93f1e1ac7bed53255811f32dc680869ef7f31dd630efc1203b853/diff:/var/lib/containers/storage/overlay/8d984035cdde48f32944ddaa464ac42d376faabc98415168800b2b8c9aec0930/diff:/var/lib/containers/storage/overlay/e7328e803158cca63d8efdbe1caefb1b51654de77e5fa8691079ad06db1abf75/diff",
                                                               "UpperDir": "/var/lib/containers/storage/overlay/ed698de2bb3f7ef46422d45edf0654a1764e700cec794f481dab0a1f34f51932/diff",
                                                               "WorkDir": "/var/lib/containers/storage/overlay/ed698de2bb3f7ef46422d45edf0654a1764e700cec794f481dab0a1f34f51932/work"
                                                          }
                                                     },
                                                     "RootFS": {
                                                          "Type": "layers",
                                                          "Layers": [
                                                               "sha256:e7328e803158cca63d8efdbe1caefb1b51654de77e5fa8691079ad06db1abf75",
                                                               "sha256:f947b23b2d0723eac9b608b79e6d48e59d90f74958e05f2762295489e0088e86",
                                                               "sha256:3bf6ab40cc16a103a087232c2c6a1a093dcb6141e70397de57907f5d00741429",
                                                               "sha256:2f5269f1ade14b3b0806305a0b2d3efffe65a187b302789a50ac00bcb815b960",
                                                               "sha256:413f5abb84bd1c03bdfd9c1e0dec8f4be92159c9c6116c4e44247efcdcc6b518",
                                                               "sha256:60c06a2423851502fc43aec0680b91181b0d62b52812c019d3fc66f1546c4529",
                                                               "sha256:323ce4bcad35618db6032dd5bfbd6c8ebb0cde882f730b19296d0ceaf5e39427",
                                                               "sha256:270b3386a8e4a2127a32b007abfea7cb394ae1dee577ee7fefdbb79cd2bea856"
                                                          ]
                                                     },
                                                     "Labels": {
                                                          "architecture": "x86_64",
                                                          "build-date": "2024-09-18T21:23:30",
                                                          "com.redhat.component": "ubi9-container",
                                                          "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI",
                                                          "description": "The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.",
                                                          "distribution-scope": "public",
                                                          "io.buildah.version": "1.29.0",
                                                          "io.k8s.description": "The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.",
                                                          "io.k8s.display-name": "Red Hat Universal Base Image 9",
                                                          "io.openshift.expose-services": "",
                                                          "io.openshift.tags": "base rhel9",
                                                          "maintainer": "Red Hat, Inc.",
                                                          "name": "ubi9",
                                                          "release": "1214.1726694543",
                                                          "release-0.7.12": "",
                                                          "summary": "Provides the latest release of Red Hat Universal Base Image 9.",
                                                          "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543",
                                                          "vcs-ref": "e309397d02fc53f7fa99db1371b8700eb49f268f",
                                                          "vcs-type": "git",
                                                          "vendor": "Red Hat, Inc.",
                                                          "version": "9.4"
                                                     },
                                                     "Annotations": {},
                                                     "ManifestType": "application/vnd.oci.image.manifest.v1+json",
                                                     "User": "",
                                                     "History": [
                                                          {
                                                               "created": "2024-09-18T21:36:31.099323493Z",
                                                               "created_by": "/bin/sh -c #(nop) ADD file:0067eb9f2ee25ab2d666a7639a85fe707b582902a09242761abf30c53664069b in / ",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:32.031010231Z",
                                                               "created_by": "/bin/sh -c mv -f /etc/yum.repos.d/ubi.repo /tmp || :",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:32.418413433Z",
                                                               "created_by": "/bin/sh -c #(nop) ADD file:5b1f650e1376d79fa3a65df4a154ea5166def95154b52c1c1097dfd8fc7d58eb in /tmp/tls-ca-bundle.pem ",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:32.91238548Z",
                                                               "created_by": "/bin/sh -c #(nop) ADD multi:7a67822d03b1a3ddb205cc3fcf7acd9d3180aef5988a5d25887bc0753a7a493b in /etc/yum.repos.d/ ",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:32.912448474Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL maintainer=\"Red Hat, Inc.\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:32.912573716Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL com.redhat.component=\"ubi9-container\"       name=\"ubi9\"       version=\"9.4\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:32.912652474Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL com.redhat.license_terms=\"https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:32.912740628Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL summary=\"Provides the latest release of Red Hat Universal Base Image 9.\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:32.912866673Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL description=\"The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:32.912921304Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL io.k8s.display-name=\"Red Hat Universal Base Image 9\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:32.912962586Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL io.openshift.expose-services=\"\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:32.913001888Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL io.openshift.tags=\"base rhel9\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:32.913021599Z",
                                                               "created_by": "/bin/sh -c #(nop) ENV container oci",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:32.913081151Z",
                                                               "created_by": "/bin/sh -c #(nop) ENV PATH /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:32.913091001Z",
                                                               "created_by": "/bin/sh -c #(nop) CMD [\"/bin/bash\"]",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:33.824802353Z",
                                                               "created_by": "/bin/sh -c rm -rf /var/log/*",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:34.766737128Z",
                                                               "created_by": "/bin/sh -c mkdir -p /var/log/rhsm",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:35.121320055Z",
                                                               "created_by": "/bin/sh -c #(nop) ADD file:ed34e436a5c2cc729eecd8b15b94c75028aea1cb18b739cafbb293b5e4ad5dae in /root/buildinfo/content_manifests/ubi9-container-9.4-1214.1726694543.json ",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:35.525712655Z",
                                                               "created_by": "/bin/sh -c #(nop) ADD file:d56bb1961538221b52d7e292418978f186bf67b9906771f38530fc3996a9d0d4 in /root/buildinfo/Dockerfile-ubi9-9.4-1214.1726694543 ",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:35.526152969Z",
                                                               "created_by": "/bin/sh -c #(nop) LABEL \"release\"=\"1214.1726694543\" \"distribution-scope\"=\"public\" \"vendor\"=\"Red Hat, Inc.\" \"build-date\"=\"2024-09-18T21:23:30\" \"architecture\"=\"x86_64\" \"vcs-type\"=\"git\" \"vcs-ref\"=\"e309397d02fc53f7fa99db1371b8700eb49f268f\" \"io.k8s.description\"=\"The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.\" \"url\"=\"https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543\"",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:36.481014095Z",
                                                               "created_by": "/bin/sh -c rm -f '/etc/yum.repos.d/odcs-3496925-3b364.repo' '/etc/yum.repos.d/rhel-9.4-compose-34ae9.repo'",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:37.364179091Z",
                                                               "created_by": "/bin/sh -c rm -f /tmp/tls-ca-bundle.pem",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-09-18T21:36:41.423178117Z",
                                                               "created_by": "/bin/sh -c mv -fZ /tmp/ubi.repo /etc/yum.repos.d/ubi.repo || :"
                                                          },
                                                          {
                                                               "created": "2024-10-15T06:28:14.211190228Z",
                                                               "created_by": "SHELL [/bin/bash -c]",
                                                               "comment": "buildkit.dockerfile.v0",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-10-15T06:28:14.211190228Z",
                                                               "created_by": "ARG INSTALL_DCGM=false",
                                                               "comment": "buildkit.dockerfile.v0",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-10-15T06:28:14.211190228Z",
                                                               "created_by": "ARG INSTALL_HABANA=false",
                                                               "comment": "buildkit.dockerfile.v0",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-10-15T06:28:14.211190228Z",
                                                               "created_by": "ARG TARGETARCH=amd64",
                                                               "comment": "buildkit.dockerfile.v0",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-10-15T06:28:14.211190228Z",
                                                               "created_by": "ENV NVIDIA_VISIBLE_DEVICES=all",
                                                               "comment": "buildkit.dockerfile.v0",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-10-15T06:28:14.211190228Z",
                                                               "created_by": "ENV NVIDIA_DRIVER_CAPABILITIES=utility",
                                                               "comment": "buildkit.dockerfile.v0",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-10-15T06:28:14.211190228Z",
                                                               "created_by": "ENV NVIDIA_MIG_MONITOR_DEVICES=all",
                                                               "comment": "buildkit.dockerfile.v0",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-10-15T06:28:14.211190228Z",
                                                               "created_by": "ENV NVIDIA_MIG_CONFIG_DEVICES=all",
                                                               "comment": "buildkit.dockerfile.v0",
                                                               "empty_layer": true
                                                          },
                                                          {
                                                               "created": "2024-10-15T06:28:14.211190228Z",
                                                               "created_by": "RUN |3 INSTALL_DCGM=false INSTALL_HABANA=false TARGETARCH=amd64 /bin/bash -c yum -y update-minimal --security --sec-severity=Important --sec-severity=Critical && yum clean all # buildkit",
                                                               "comment": "buildkit.dockerfile.v0"
                                                          },
                                                          {
                                                               "created": "2024-10-15T06:28:38.991358946Z",
                                                               "created_by": "RUN |3 INSTALL_DCGM=false INSTALL_HABANA=false TARGETARCH=amd64 /bin/bash -c set -e -x ;\t\tINSTALL_PKGS=\" \t\t\tlibbpf  \t\t\" ;\t\tyum install -y $INSTALL_PKGS ;\t\t\t\tif [[ \"$TARGETARCH\" == \"amd64\" ]]; then \t\t\tyum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm; \t\t\tyum install -y cpuid; \t\t\tif [[ \"$INSTALL_DCGM\" == \"true\" ]]; then \t\t\t\tdnf config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/cuda-rhel9.repo; \t\t\t\tyum install -y datacenter-gpu-manager libnvidia-ml; \t\t\tfi; \t\t\tif [[ \"$INSTALL_HABANA\" == \"true\" ]]; then \t\t\t\trpm -Uvh https://vault.habana.ai/artifactory/rhel/9/9.2/habanalabs-firmware-tools-1.15.1-15.el9.x86_64.rpm --nodeps; \t\t\t\techo /usr/lib/habanalabs > /etc/ld.so.conf.d/habanalabs.conf; \t\t\t\tldconfig; \t\t\tfi; \t\tfi;\t\tyum clean all # buildkit",
                                                               "comment": "buildkit.dockerfile.v0"
                                                          },
                                                          {
                                                               "created": "2024-10-15T06:30:56.146511902Z",
                                                               "created_by": "COPY /workspace/_output/bin/kepler /usr/bin/kepler # buildkit",
                                                               "comment": "buildkit.dockerfile.v0"
                                                          },
                                                          {
                                                               "created": "2024-10-15T06:30:56.168608119Z",
                                                               "created_by": "COPY /libbpf-source/linux-5.14.0-424.el9/tools/bpf/bpftool/bpftool /usr/bin/bpftool # buildkit",
                                                               "comment": "buildkit.dockerfile.v0"
                                                          },
                                                          {
                                                               "created": "2024-10-15T06:30:56.24706386Z",
                                                               "created_by": "RUN |3 INSTALL_DCGM=false INSTALL_HABANA=false TARGETARCH=amd64 /bin/bash -c mkdir -p /var/lib/kepler/data # buildkit",
                                                               "comment": "buildkit.dockerfile.v0"
                                                          },
                                                          {
                                                               "created": "2024-10-15T06:30:56.299132132Z",
                                                               "created_by": "COPY /workspace/data/cpus.yaml /var/lib/kepler/data/cpus.yaml # buildkit",
                                                               "comment": "buildkit.dockerfile.v0"
                                                          },
                                                          {
                                                               "created": "2024-10-15T06:30:56.315982344Z",
                                                               "created_by": "COPY /workspace/data/model_weight /var/lib/kepler/data/model_weight # buildkit",
                                                               "comment": "buildkit.dockerfile.v0"
                                                          },
                                                          {
                                                               "created": "2024-10-15T06:30:56.315982344Z",
                                                               "created_by": "ENTRYPOINT [\"/usr/bin/kepler\"]",
                                                               "comment": "buildkit.dockerfile.v0",
                                                               "empty_layer": true
                                                          }
                                                     ],
                                                     "NamesHistory": [
                                                          "quay.io/sustainable_computing_io/kepler:release-0.7.12"
                                                     ]
                                                }
                                           ]
                                           : quay.io/sustainable_computing_io/kepler:release-0.7.12
Oct 11 02:10:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:10:54.830 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:10:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:10:54.832 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:10:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:10:54.832 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:10:54 compute-0 ceph-mon[191930]: pgmap v981: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:54 compute-0 kepler[176959]: I1011 02:10:54.926475       1 exporter.go:218] Received shutdown signal
Oct 11 02:10:54 compute-0 kepler[176959]: I1011 02:10:54.928861       1 exporter.go:226] Exiting...
Oct 11 02:10:55 compute-0 systemd[1]: libpod-e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304.scope: Deactivated successfully.
Oct 11 02:10:55 compute-0 systemd[1]: libpod-e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304.scope: Consumed 46.981s CPU time.
Oct 11 02:10:55 compute-0 podman[395363]: 2025-10-11 02:10:55.024629376 +0000 UTC m=+0.168891207 container died e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=base rhel9, managed_by=edpm_ansible, vcs-type=git, maintainer=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, name=ubi9, io.buildah.version=1.29.0, com.redhat.component=ubi9-container, config_id=edpm, architecture=x86_64, build-date=2024-09-18T21:23:30, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, distribution-scope=public, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of Red Hat Universal Base Image 9., release-0.7.12=, release=1214.1726694543, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.expose-services=, version=9.4)
Oct 11 02:10:55 compute-0 systemd[1]: e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304-5ec27e20c4a956d.timer: Deactivated successfully.
Oct 11 02:10:55 compute-0 systemd[1]: Stopped /usr/bin/podman healthcheck run e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304.
Oct 11 02:10:55 compute-0 systemd[1]: e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304-5ec27e20c4a956d.service: Failed to open /run/systemd/transient/e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304-5ec27e20c4a956d.service: No such file or directory
Oct 11 02:10:55 compute-0 systemd[1]: var-lib-containers-storage-overlay-07c333d45365e5e1beffd98c126bb9e4df6c2eef205c9a1a789247673061f9f9-merged.mount: Deactivated successfully.
Oct 11 02:10:55 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304-userdata-shm.mount: Deactivated successfully.
Oct 11 02:10:55 compute-0 podman[395363]: 2025-10-11 02:10:55.081397822 +0000 UTC m=+0.225659633 container cleanup e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, maintainer=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release-0.7.12=, distribution-scope=public, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, container_name=kepler, name=ubi9, io.openshift.tags=base rhel9, com.redhat.component=ubi9-container, config_id=edpm, release=1214.1726694543, version=9.4, architecture=x86_64, io.buildah.version=1.29.0, managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., io.openshift.expose-services=, build-date=2024-09-18T21:23:30, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-type=git)
Oct 11 02:10:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:10:55 compute-0 python3[395314]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman stop kepler
Oct 11 02:10:55 compute-0 systemd[1]: e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304-5ec27e20c4a956d.timer: Failed to open /run/systemd/transient/e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304-5ec27e20c4a956d.timer: No such file or directory
Oct 11 02:10:55 compute-0 systemd[1]: e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304-5ec27e20c4a956d.service: Failed to open /run/systemd/transient/e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304-5ec27e20c4a956d.service: No such file or directory
Oct 11 02:10:55 compute-0 podman[395390]: 2025-10-11 02:10:55.226549122 +0000 UTC m=+0.103870833 container remove e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, distribution-scope=public, name=ubi9, release=1214.1726694543, version=9.4, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, release-0.7.12=, io.openshift.expose-services=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9, summary=Provides the latest release of Red Hat Universal Base Image 9., managed_by=edpm_ansible, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.buildah.version=1.29.0, io.openshift.tags=base rhel9, vendor=Red Hat, Inc., config_id=edpm, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, container_name=kepler, vcs-type=git, architecture=x86_64, build-date=2024-09-18T21:23:30, com.redhat.component=ubi9-container)
Oct 11 02:10:55 compute-0 podman[395391]: Error: no container with ID e5116f05228e2f87471695f7128663e99833cb9043b805bd4f7b08b837814304 found in database: no such container
Oct 11 02:10:55 compute-0 systemd[1]: edpm_kepler.service: Control process exited, code=exited, status=125/n/a
Oct 11 02:10:55 compute-0 python3[395314]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman rm --force kepler
Oct 11 02:10:55 compute-0 podman[395416]: Error: no container with name or ID "kepler" found: no such container
Oct 11 02:10:55 compute-0 systemd[1]: edpm_kepler.service: Control process exited, code=exited, status=125/n/a
Oct 11 02:10:55 compute-0 systemd[1]: edpm_kepler.service: Failed with result 'exit-code'.
Oct 11 02:10:55 compute-0 podman[395417]: 2025-10-11 02:10:55.342530275 +0000 UTC m=+0.079950927 container create ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, version=9.4, distribution-scope=public, maintainer=Red Hat, Inc., container_name=kepler, io.openshift.tags=base rhel9, io.openshift.expose-services=, managed_by=edpm_ansible, config_id=edpm, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2024-09-18T21:23:30, com.redhat.component=ubi9-container, architecture=x86_64, io.buildah.version=1.29.0, name=ubi9, release-0.7.12=, summary=Provides the latest release of Red Hat Universal Base Image 9., io.k8s.display-name=Red Hat Universal Base Image 9, release=1214.1726694543, vendor=Red Hat, Inc., vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Oct 11 02:10:55 compute-0 podman[395417]: 2025-10-11 02:10:55.30483057 +0000 UTC m=+0.042251222 image pull ed61e3ea3188391c18595d8ceada2a5a01f0ece915c62fde355798735b5208d7 quay.io/sustainable_computing_io/kepler:release-0.7.12
Oct 11 02:10:55 compute-0 python3[395314]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman create --name kepler --conmon-pidfile /run/kepler.pid --env ENABLE_GPU=true --env EXPOSE_CONTAINER_METRICS=true --env ENABLE_PROCESS_METRICS=true --env EXPOSE_VM_METRICS=true --env EXPOSE_ESTIMATED_IDLE_POWER_METRICS=false --env LIBVIRT_METADATA_URI=http://openstack.org/xmlns/libvirt/nova/1.1 --healthcheck-command /openstack/healthcheck kepler --label config_id=edpm --label container_name=kepler --label managed_by=edpm_ansible --label config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']} --log-driver journald --log-level info --network host --privileged=True --publish 8888:8888 --volume /lib/modules:/lib/modules:ro --volume /run/libvirt:/run/libvirt:shared,ro --volume /sys:/sys --volume /proc:/proc --volume /var/lib/openstack/healthchecks/kepler:/openstack:ro,z quay.io/sustainable_computing_io/kepler:release-0.7.12 -v=2
Oct 11 02:10:55 compute-0 systemd[1]: edpm_kepler.service: Scheduled restart job, restart counter is at 1.
Oct 11 02:10:55 compute-0 systemd[1]: Stopped kepler container.
Oct 11 02:10:55 compute-0 systemd[1]: Starting kepler container...
Oct 11 02:10:55 compute-0 systemd[1]: Started libpod-conmon-ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603.scope.
Oct 11 02:10:55 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:10:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v982: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:55 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603.
Oct 11 02:10:55 compute-0 podman[395440]: 2025-10-11 02:10:55.589575131 +0000 UTC m=+0.208212611 container init ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, config_id=edpm, release-0.7.12=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=kepler, version=9.4, io.buildah.version=1.29.0, io.openshift.tags=base rhel9, name=ubi9, architecture=x86_64, io.openshift.expose-services=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9, com.redhat.component=ubi9-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, maintainer=Red Hat, Inc., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, distribution-scope=public, release=1214.1726694543, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, build-date=2024-09-18T21:23:30, vendor=Red Hat, Inc.)
Oct 11 02:10:55 compute-0 podman[395440]: 2025-10-11 02:10:55.626801329 +0000 UTC m=+0.245438839 container start ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, io.buildah.version=1.29.0, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9, name=ubi9, io.openshift.expose-services=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, build-date=2024-09-18T21:23:30, io.openshift.tags=base rhel9, architecture=x86_64, config_id=edpm, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release-0.7.12=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=kepler, com.redhat.component=ubi9-container, maintainer=Red Hat, Inc., vendor=Red Hat, Inc., release=1214.1726694543, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, version=9.4)
Oct 11 02:10:55 compute-0 kepler[395456]: WARNING: failed to read int from file: open /sys/devices/system/cpu/cpu0/online: no such file or directory
Oct 11 02:10:55 compute-0 podman[395453]: kepler
Oct 11 02:10:55 compute-0 kepler[395456]: I1011 02:10:55.642546       1 exporter.go:103] Kepler running on version: v0.7.12-dirty
Oct 11 02:10:55 compute-0 kepler[395456]: I1011 02:10:55.643806       1 config.go:293] using gCgroup ID in the BPF program: true
Oct 11 02:10:55 compute-0 kepler[395456]: I1011 02:10:55.643837       1 config.go:295] kernel version: 5.14
Oct 11 02:10:55 compute-0 python3[395314]: ansible-edpm_container_manage PODMAN-CONTAINER-DEBUG: podman start kepler
Oct 11 02:10:55 compute-0 kepler[395456]: I1011 02:10:55.645204       1 power.go:78] Unable to obtain power, use estimate method
Oct 11 02:10:55 compute-0 kepler[395456]: I1011 02:10:55.645283       1 redfish.go:169] failed to get redfish credential file path
Oct 11 02:10:55 compute-0 kepler[395456]: I1011 02:10:55.646694       1 acpi.go:71] Could not find any ACPI power meter path. Is it a VM?
Oct 11 02:10:55 compute-0 kepler[395456]: I1011 02:10:55.646732       1 power.go:79] using none to obtain power
Oct 11 02:10:55 compute-0 kepler[395456]: E1011 02:10:55.646765       1 accelerator.go:154] [DUMMY] doesn't contain GPU
Oct 11 02:10:55 compute-0 kepler[395456]: E1011 02:10:55.646824       1 exporter.go:154] failed to init GPU accelerators: no devices found
Oct 11 02:10:55 compute-0 kepler[395456]: WARNING: failed to read int from file: open /sys/devices/system/cpu/cpu0/online: no such file or directory
Oct 11 02:10:55 compute-0 kepler[395456]: I1011 02:10:55.650388       1 exporter.go:84] Number of CPUs: 8
Oct 11 02:10:55 compute-0 systemd[1]: Started kepler container.
Oct 11 02:10:55 compute-0 sshd-session[395361]: Invalid user user from 121.227.153.123 port 41838
Oct 11 02:10:55 compute-0 podman[395478]: 2025-10-11 02:10:55.798801553 +0000 UTC m=+0.150026956 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=starting, health_failing_streak=1, health_log=, release=1214.1726694543, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, version=9.4, container_name=kepler, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, managed_by=edpm_ansible, architecture=x86_64, com.redhat.component=ubi9-container, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, summary=Provides the latest release of Red Hat Universal Base Image 9., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, config_id=edpm, build-date=2024-09-18T21:23:30, io.openshift.expose-services=, release-0.7.12=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.openshift.tags=base rhel9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vendor=Red Hat, Inc., maintainer=Red Hat, Inc., distribution-scope=public, io.buildah.version=1.29.0, name=ubi9)
Oct 11 02:10:55 compute-0 systemd[1]: ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603-6b6d6d8a1c93aefc.service: Main process exited, code=exited, status=1/FAILURE
Oct 11 02:10:55 compute-0 systemd[1]: ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603-6b6d6d8a1c93aefc.service: Failed with result 'exit-code'.
Oct 11 02:10:55 compute-0 sudo[395312]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:55 compute-0 sshd-session[395361]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:10:55 compute-0 sshd-session[395361]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.289322       1 watcher.go:83] Using in cluster k8s config
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.289915       1 watcher.go:90] failed to get config: unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined
Oct 11 02:10:56 compute-0 kepler[395456]: E1011 02:10:56.290031       1 manager.go:59] could not run the watcher k8s APIserver watcher was not enabled
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.298514       1 process_energy.go:129] Using the Ratio Power Model to estimate PROCESS_TOTAL Power
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.298573       1 process_energy.go:130] Feature names: [bpf_cpu_time_ms]
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.307127       1 process_energy.go:129] Using the Ratio Power Model to estimate PROCESS_COMPONENTS Power
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.307180       1 process_energy.go:130] Feature names: [bpf_cpu_time_ms bpf_cpu_time_ms bpf_cpu_time_ms   gpu_compute_util]
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.322025       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.322100       1 model.go:125] Requesting for Machine Spec: &{authenticamd amd_epyc_rome 8 8 7 2800 1}
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.322131       1 node_platform_energy.go:53] Using the Regressor/AbsPower Power Model to estimate Node Platform Power
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.335476       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.335532       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.335541       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.335549       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.335559       1 model.go:125] Requesting for Machine Spec: &{authenticamd amd_epyc_rome 8 8 7 2800 1}
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.335578       1 node_component_energy.go:57] Using the Regressor/AbsPower Power Model to estimate Node Component Power
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.335715       1 prometheus_collector.go:90] Registered Process Prometheus metrics
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.335766       1 prometheus_collector.go:95] Registered Container Prometheus metrics
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.335838       1 prometheus_collector.go:100] Registered VM Prometheus metrics
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.335872       1 prometheus_collector.go:104] Registered Node Prometheus metrics
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.336099       1 exporter.go:194] starting to listen on 0.0.0.0:8888
Oct 11 02:10:56 compute-0 kepler[395456]: I1011 02:10:56.337618       1 exporter.go:208] Started Kepler in 695.316891ms
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:10:56
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['vms', 'volumes', 'default.rgw.meta', '.mgr', 'cephfs.cephfs.data', 'default.rgw.log', 'cephfs.cephfs.meta', 'images', 'default.rgw.control', 'backups', '.rgw.root']
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:10:56 compute-0 sudo[395689]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dwoznjocygcaeslkatrxgjdzebrpsfkq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148656.1070583-440-128583145170436/AnsiballZ_stat.py'
Oct 11 02:10:56 compute-0 sudo[395689]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:56 compute-0 python3.9[395691]: ansible-ansible.builtin.stat Invoked with path=/etc/sysconfig/podman_drop_in follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:10:56 compute-0 ceph-mon[191930]: pgmap v982: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:10:56 compute-0 sudo[395689]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:10:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:10:57 compute-0 unix_chkpwd[395718]: password check failed for user (root)
Oct 11 02:10:57 compute-0 sshd-session[395637]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 02:10:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v983: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:57 compute-0 sshd-session[395361]: Failed password for invalid user user from 121.227.153.123 port 41838 ssh2
Oct 11 02:10:57 compute-0 sudo[395844]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-aianatkbkycftbxlnuszbibvbmclxorr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148657.2700138-449-9479919749158/AnsiballZ_file.py'
Oct 11 02:10:57 compute-0 sudo[395844]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:58 compute-0 python3.9[395846]: ansible-file Invoked with path=/etc/systemd/system/edpm_kepler.requires state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:10:58 compute-0 sudo[395844]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:58 compute-0 sshd-session[395637]: Failed password for root from 193.46.255.217 port 48364 ssh2
Oct 11 02:10:58 compute-0 sudo[395995]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ajcxkbcytqocvvesjwtypbxfvhcjeqyu ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148658.1849048-449-17315657904069/AnsiballZ_copy.py'
Oct 11 02:10:58 compute-0 sudo[395995]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:58 compute-0 ceph-mon[191930]: pgmap v983: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:59 compute-0 python3.9[395997]: ansible-copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1760148658.1849048-449-17315657904069/source dest=/etc/systemd/system/edpm_kepler.service mode=0644 owner=root group=root backup=False force=True remote_src=False follow=False unsafe_writes=False _original_basename=None content=NOT_LOGGING_PARAMETER validate=None directory_mode=None local_follow=None checksum=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:10:59 compute-0 sudo[395995]: pam_unix(sudo:session): session closed for user root
Oct 11 02:10:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v984: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:10:59 compute-0 sshd-session[395361]: Connection closed by invalid user user 121.227.153.123 port 41838 [preauth]
Oct 11 02:10:59 compute-0 sudo[396085]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yubovsmcffnilsbvivybcpekeonqmpnc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148658.1849048-449-17315657904069/AnsiballZ_systemd.py'
Oct 11 02:10:59 compute-0 sudo[396085]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:10:59 compute-0 podman[396045]: 2025-10-11 02:10:59.702306624 +0000 UTC m=+0.142102883 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:10:59 compute-0 podman[157119]: time="2025-10-11T02:10:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:10:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:10:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45038 "" "Go-http-client/1.1"
Oct 11 02:10:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:10:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8510 "" "Go-http-client/1.1"
Oct 11 02:11:00 compute-0 python3.9[396096]: ansible-systemd Invoked with state=started name=edpm_kepler.service enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None
Oct 11 02:11:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:11:00 compute-0 sudo[396085]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:00 compute-0 unix_chkpwd[396125]: password check failed for user (root)
Oct 11 02:11:00 compute-0 ceph-mon[191930]: pgmap v984: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:00 compute-0 sshd-session[396097]: Invalid user user from 121.227.153.123 port 47566
Oct 11 02:11:01 compute-0 podman[396222]: 2025-10-11 02:11:01.096140804 +0000 UTC m=+0.100255549 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, name=ubi9-minimal, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, io.openshift.tags=minimal rhel9, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, vcs-type=git, com.redhat.component=ubi9-minimal-container, managed_by=edpm_ansible, io.openshift.expose-services=, architecture=x86_64, config_id=edpm, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.buildah.version=1.33.7, maintainer=Red Hat, Inc., version=9.6, vendor=Red Hat, Inc., container_name=openstack_network_exporter, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1755695350, build-date=2025-08-20T13:12:41, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Oct 11 02:11:01 compute-0 sudo[396269]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-opmpwcazozbjvkuzhbenufdlbbauhlsm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148660.4821794-469-240665459485022/AnsiballZ_systemd.py'
Oct 11 02:11:01 compute-0 sudo[396269]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:01 compute-0 sshd-session[396097]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:11:01 compute-0 sshd-session[396097]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:11:01 compute-0 python3.9[396271]: ansible-ansible.builtin.systemd Invoked with name=edpm_ceilometer_agent_ipmi.service state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 02:11:01 compute-0 openstack_network_exporter[374316]: ERROR   02:11:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:11:01 compute-0 openstack_network_exporter[374316]: ERROR   02:11:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:11:01 compute-0 openstack_network_exporter[374316]: ERROR   02:11:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:11:01 compute-0 openstack_network_exporter[374316]: ERROR   02:11:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:11:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:11:01 compute-0 openstack_network_exporter[374316]: ERROR   02:11:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:11:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:11:01 compute-0 systemd[1]: Stopping ceilometer_agent_ipmi container...
Oct 11 02:11:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v985: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 02:11:01.575 2 INFO cotyledon._service_manager [-] Caught SIGTERM signal, graceful exiting of master process
Oct 11 02:11:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 02:11:01.678 2 DEBUG cotyledon._service_manager [-] Killing services with signal SIGTERM _shutdown /usr/lib/python3.9/site-packages/cotyledon/_service_manager.py:304
Oct 11 02:11:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 02:11:01.678 2 DEBUG cotyledon._service_manager [-] Waiting services to terminate _shutdown /usr/lib/python3.9/site-packages/cotyledon/_service_manager.py:308
Oct 11 02:11:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 02:11:01.679 12 INFO cotyledon._service [-] Caught SIGTERM signal, graceful exiting of service AgentManager(0) [12]
Oct 11 02:11:01 compute-0 ceilometer_agent_ipmi[176703]: 2025-10-11 02:11:01.689 2 DEBUG cotyledon._service_manager [-] Shutdown finish _shutdown /usr/lib/python3.9/site-packages/cotyledon/_service_manager.py:320
Oct 11 02:11:01 compute-0 sshd-session[395637]: Failed password for root from 193.46.255.217 port 48364 ssh2
Oct 11 02:11:01 compute-0 systemd[1]: libpod-47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.scope: Deactivated successfully.
Oct 11 02:11:01 compute-0 systemd[1]: libpod-47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.scope: Consumed 3.957s CPU time.
Oct 11 02:11:01 compute-0 podman[396275]: 2025-10-11 02:11:01.949577292 +0000 UTC m=+0.453435250 container died 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, config_id=edpm, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:11:01 compute-0 systemd[1]: 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c-39a327549edd6123.timer: Deactivated successfully.
Oct 11 02:11:01 compute-0 systemd[1]: Stopped /usr/bin/podman healthcheck run 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.
Oct 11 02:11:01 compute-0 systemd[1]: 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c-39a327549edd6123.service: Failed to open /run/systemd/transient/47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c-39a327549edd6123.service: No such file or directory
Oct 11 02:11:01 compute-0 unix_chkpwd[396300]: password check failed for user (root)
Oct 11 02:11:02 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c-userdata-shm.mount: Deactivated successfully.
Oct 11 02:11:02 compute-0 systemd[1]: var-lib-containers-storage-overlay-88244608e55640e82c5cb6a8a67a46420230ae4798b24d993c337ff432ee2d45-merged.mount: Deactivated successfully.
Oct 11 02:11:02 compute-0 podman[396275]: 2025-10-11 02:11:02.034109195 +0000 UTC m=+0.537967123 container cleanup 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Oct 11 02:11:02 compute-0 podman[396275]: ceilometer_agent_ipmi
Oct 11 02:11:02 compute-0 podman[396305]: ceilometer_agent_ipmi
Oct 11 02:11:02 compute-0 systemd[1]: edpm_ceilometer_agent_ipmi.service: Deactivated successfully.
Oct 11 02:11:02 compute-0 systemd[1]: Stopped ceilometer_agent_ipmi container.
Oct 11 02:11:02 compute-0 systemd[1]: Starting ceilometer_agent_ipmi container...
Oct 11 02:11:02 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:11:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/88244608e55640e82c5cb6a8a67a46420230ae4798b24d993c337ff432ee2d45/merged/etc/ceilometer/tls supports timestamps until 2038 (0x7fffffff)
Oct 11 02:11:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/88244608e55640e82c5cb6a8a67a46420230ae4798b24d993c337ff432ee2d45/merged/etc/ceilometer/ceilometer_prom_exporter.yaml supports timestamps until 2038 (0x7fffffff)
Oct 11 02:11:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/88244608e55640e82c5cb6a8a67a46420230ae4798b24d993c337ff432ee2d45/merged/var/lib/openstack/config supports timestamps until 2038 (0x7fffffff)
Oct 11 02:11:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/88244608e55640e82c5cb6a8a67a46420230ae4798b24d993c337ff432ee2d45/merged/var/lib/kolla/config_files/config.json supports timestamps until 2038 (0x7fffffff)
Oct 11 02:11:02 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.
Oct 11 02:11:02 compute-0 podman[396317]: 2025-10-11 02:11:02.375577011 +0000 UTC m=+0.212459710 container init 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_ipmi, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']})
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: + sudo -E kolla_set_configs
Oct 11 02:11:02 compute-0 sudo[396337]: ceilometer : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_set_configs
Oct 11 02:11:02 compute-0 podman[396317]: 2025-10-11 02:11:02.412497551 +0000 UTC m=+0.249380240 container start 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, container_name=ceilometer_agent_ipmi, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009)
Oct 11 02:11:02 compute-0 sudo[396337]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Oct 11 02:11:02 compute-0 sudo[396337]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=42405)
Oct 11 02:11:02 compute-0 podman[396317]: ceilometer_agent_ipmi
Oct 11 02:11:02 compute-0 systemd[1]: Started ceilometer_agent_ipmi container.
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: INFO:__main__:Loading config file at /var/lib/kolla/config_files/config.json
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: INFO:__main__:Validating config file
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: INFO:__main__:Kolla config strategy set to: COPY_ALWAYS
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: INFO:__main__:Copying service configuration files
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: INFO:__main__:Deleting /etc/ceilometer/ceilometer.conf
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: INFO:__main__:Copying /var/lib/openstack/config/ceilometer.conf to /etc/ceilometer/ceilometer.conf
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: INFO:__main__:Setting permission for /etc/ceilometer/ceilometer.conf
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: INFO:__main__:Deleting /etc/ceilometer/polling.yaml
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: INFO:__main__:Copying /var/lib/openstack/config/polling.yaml to /etc/ceilometer/polling.yaml
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: INFO:__main__:Setting permission for /etc/ceilometer/polling.yaml
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: INFO:__main__:Deleting /etc/ceilometer/ceilometer.conf.d/01-ceilometer-custom.conf
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: INFO:__main__:Copying /var/lib/openstack/config/custom.conf to /etc/ceilometer/ceilometer.conf.d/01-ceilometer-custom.conf
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: INFO:__main__:Setting permission for /etc/ceilometer/ceilometer.conf.d/01-ceilometer-custom.conf
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: INFO:__main__:Deleting /etc/ceilometer/ceilometer.conf.d/02-ceilometer-host-specific.conf
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: INFO:__main__:Copying /var/lib/openstack/config/ceilometer-host-specific.conf to /etc/ceilometer/ceilometer.conf.d/02-ceilometer-host-specific.conf
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: INFO:__main__:Setting permission for /etc/ceilometer/ceilometer.conf.d/02-ceilometer-host-specific.conf
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: INFO:__main__:Writing out command to execute
Oct 11 02:11:02 compute-0 sudo[396269]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:02 compute-0 sudo[396337]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: ++ cat /run_command
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: + CMD='/usr/bin/ceilometer-polling --polling-namespaces ipmi --logfile /dev/stdout'
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: + ARGS=
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: + sudo kolla_copy_cacerts
Oct 11 02:11:02 compute-0 sudo[396352]: ceilometer : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_copy_cacerts
Oct 11 02:11:02 compute-0 sudo[396352]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Oct 11 02:11:02 compute-0 sudo[396352]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=42405)
Oct 11 02:11:02 compute-0 sudo[396352]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: + [[ ! -n '' ]]
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: + . kolla_extend_start
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: Running command: '/usr/bin/ceilometer-polling --polling-namespaces ipmi --logfile /dev/stdout'
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: + echo 'Running command: '\''/usr/bin/ceilometer-polling --polling-namespaces ipmi --logfile /dev/stdout'\'''
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: + umask 0022
Oct 11 02:11:02 compute-0 ceilometer_agent_ipmi[396331]: + exec /usr/bin/ceilometer-polling --polling-namespaces ipmi --logfile /dev/stdout
Oct 11 02:11:02 compute-0 podman[396338]: 2025-10-11 02:11:02.511376929 +0000 UTC m=+0.083975028 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=starting, health_failing_streak=1, health_log=, org.label-schema.license=GPLv2, config_id=edpm, container_name=ceilometer_agent_ipmi, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:11:02 compute-0 systemd[1]: 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c-728cc5f9610cec62.service: Main process exited, code=exited, status=1/FAILURE
Oct 11 02:11:02 compute-0 systemd[1]: 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c-728cc5f9610cec62.service: Failed with result 'exit-code'.
Oct 11 02:11:02 compute-0 ceph-mon[191930]: pgmap v985: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:03 compute-0 sudo[396510]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vrttkhawsqjpisevdaxpwnnitkdmufda ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148662.7789917-477-197023462249763/AnsiballZ_systemd.py'
Oct 11 02:11:03 compute-0 sudo[396510]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:03 compute-0 sshd-session[396097]: Failed password for invalid user user from 121.227.153.123 port 47566 ssh2
Oct 11 02:11:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v986: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:03 compute-0 python3.9[396512]: ansible-ansible.builtin.systemd Invoked with name=edpm_kepler.service state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Oct 11 02:11:03 compute-0 systemd[1]: Stopping kepler container...
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.738 2 DEBUG cotyledon.oslo_config_glue [-] Full set of CONF: _load_service_manager_options /usr/lib/python3.9/site-packages/cotyledon/oslo_config_glue.py:40
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.739 2 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2589
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.739 2 DEBUG cotyledon.oslo_config_glue [-] Configuration options gathered from: log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2590
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.739 2 DEBUG cotyledon.oslo_config_glue [-] command line args: ['--polling-namespaces', 'ipmi', '--logfile', '/dev/stdout'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2591
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.739 2 DEBUG cotyledon.oslo_config_glue [-] config files: ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2592
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.739 2 DEBUG cotyledon.oslo_config_glue [-] ================================================================================ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2594
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.739 2 DEBUG cotyledon.oslo_config_glue [-] batch_size                     = 50 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.739 2 DEBUG cotyledon.oslo_config_glue [-] cfg_file                       = polling.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.739 2 DEBUG cotyledon.oslo_config_glue [-] config_dir                     = ['/etc/ceilometer/ceilometer.conf.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.740 2 DEBUG cotyledon.oslo_config_glue [-] config_file                    = ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.740 2 DEBUG cotyledon.oslo_config_glue [-] config_source                  = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.740 2 DEBUG cotyledon.oslo_config_glue [-] debug                          = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.740 2 DEBUG cotyledon.oslo_config_glue [-] default_log_levels             = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'oslo_messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'requests.packages.urllib3.util.retry=WARN', 'urllib3.util.retry=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'taskflow=WARN', 'keystoneauth=WARN', 'oslo.cache=INFO', 'oslo_policy=INFO', 'dogpile.core.dogpile=INFO', 'futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.740 2 DEBUG cotyledon.oslo_config_glue [-] event_pipeline_cfg_file        = event_pipeline.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.740 2 DEBUG cotyledon.oslo_config_glue [-] graceful_shutdown_timeout      = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.740 2 DEBUG cotyledon.oslo_config_glue [-] host                           = compute-0.ctlplane.example.com log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.740 2 DEBUG cotyledon.oslo_config_glue [-] http_timeout                   = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.740 2 DEBUG cotyledon.oslo_config_glue [-] hypervisor_inspector           = libvirt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.741 2 DEBUG cotyledon.oslo_config_glue [-] instance_format                = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.741 2 DEBUG cotyledon.oslo_config_glue [-] instance_uuid_format           = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.741 2 DEBUG cotyledon.oslo_config_glue [-] libvirt_type                   = kvm log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.741 2 DEBUG cotyledon.oslo_config_glue [-] libvirt_uri                    =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.741 2 DEBUG cotyledon.oslo_config_glue [-] log_config_append              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.741 2 DEBUG cotyledon.oslo_config_glue [-] log_date_format                = %Y-%m-%d %H:%M:%S log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.741 2 DEBUG cotyledon.oslo_config_glue [-] log_dir                        = /var/log/ceilometer log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.741 2 DEBUG cotyledon.oslo_config_glue [-] log_file                       = /dev/stdout log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.741 2 DEBUG cotyledon.oslo_config_glue [-] log_options                    = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.741 2 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval            = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.742 2 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval_type       = days log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.742 2 DEBUG cotyledon.oslo_config_glue [-] log_rotation_type              = none log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.742 2 DEBUG cotyledon.oslo_config_glue [-] logging_context_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.742 2 DEBUG cotyledon.oslo_config_glue [-] logging_debug_format_suffix    = %(funcName)s %(pathname)s:%(lineno)d log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.742 2 DEBUG cotyledon.oslo_config_glue [-] logging_default_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.742 2 DEBUG cotyledon.oslo_config_glue [-] logging_exception_prefix       = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.742 2 DEBUG cotyledon.oslo_config_glue [-] logging_user_identity_format   = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.742 2 DEBUG cotyledon.oslo_config_glue [-] max_logfile_count              = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.742 2 DEBUG cotyledon.oslo_config_glue [-] max_logfile_size_mb            = 200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.742 2 DEBUG cotyledon.oslo_config_glue [-] max_parallel_requests          = 64 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.742 2 DEBUG cotyledon.oslo_config_glue [-] partitioning_group_prefix      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.743 2 DEBUG cotyledon.oslo_config_glue [-] pipeline_cfg_file              = pipeline.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.743 2 DEBUG cotyledon.oslo_config_glue [-] polling_namespaces             = ['ipmi'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.743 2 DEBUG cotyledon.oslo_config_glue [-] pollsters_definitions_dirs     = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.743 2 DEBUG cotyledon.oslo_config_glue [-] publish_errors                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.743 2 DEBUG cotyledon.oslo_config_glue [-] rate_limit_burst               = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.743 2 DEBUG cotyledon.oslo_config_glue [-] rate_limit_except_level        = CRITICAL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.743 2 DEBUG cotyledon.oslo_config_glue [-] rate_limit_interval            = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.743 2 DEBUG cotyledon.oslo_config_glue [-] reseller_prefix                = AUTH_ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.743 2 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_keys         = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.743 2 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_length       = 256 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.744 2 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_namespace    = ['metering.'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.744 2 DEBUG cotyledon.oslo_config_glue [-] rootwrap_config                = /etc/ceilometer/rootwrap.conf log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.744 2 DEBUG cotyledon.oslo_config_glue [-] sample_source                  = openstack log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.744 2 DEBUG cotyledon.oslo_config_glue [-] syslog_log_facility            = LOG_USER log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.744 2 DEBUG cotyledon.oslo_config_glue [-] tenant_name_discovery          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.744 2 DEBUG cotyledon.oslo_config_glue [-] use_eventlog                   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.744 2 DEBUG cotyledon.oslo_config_glue [-] use_journal                    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.744 2 DEBUG cotyledon.oslo_config_glue [-] use_json                       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.744 2 DEBUG cotyledon.oslo_config_glue [-] use_stderr                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.744 2 DEBUG cotyledon.oslo_config_glue [-] use_syslog                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.745 2 DEBUG cotyledon.oslo_config_glue [-] watch_log_file                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.745 2 DEBUG cotyledon.oslo_config_glue [-] compute.instance_discovery_method = libvirt_metadata log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.745 2 DEBUG cotyledon.oslo_config_glue [-] compute.resource_cache_expiry  = 3600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.745 2 DEBUG cotyledon.oslo_config_glue [-] compute.resource_update_interval = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.745 2 DEBUG cotyledon.oslo_config_glue [-] coordination.backend_url       = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.745 2 DEBUG cotyledon.oslo_config_glue [-] event.definitions_cfg_file     = event_definitions.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.745 2 DEBUG cotyledon.oslo_config_glue [-] event.drop_unmatched_notifications = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.745 2 DEBUG cotyledon.oslo_config_glue [-] event.store_raw                = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.745 2 DEBUG cotyledon.oslo_config_glue [-] ipmi.node_manager_init_retry   = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.746 2 DEBUG cotyledon.oslo_config_glue [-] ipmi.polling_retry             = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.746 2 DEBUG cotyledon.oslo_config_glue [-] meter.meter_definitions_dirs   = ['/etc/ceilometer/meters.d', '/usr/lib/python3.9/site-packages/ceilometer/data/meters.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.746 2 DEBUG cotyledon.oslo_config_glue [-] monasca.archive_on_failure     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.746 2 DEBUG cotyledon.oslo_config_glue [-] monasca.archive_path           = mon_pub_failures.txt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.746 2 DEBUG cotyledon.oslo_config_glue [-] monasca.auth_section           = service_credentials log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.746 2 DEBUG cotyledon.oslo_config_glue [-] monasca.auth_type              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.746 2 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_count            = 1000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.746 2 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_max_retries      = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.746 2 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_mode             = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.746 2 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_polling_interval = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.747 2 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_timeout          = 15 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.747 2 DEBUG cotyledon.oslo_config_glue [-] monasca.cafile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.747 2 DEBUG cotyledon.oslo_config_glue [-] monasca.certfile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.747 2 DEBUG cotyledon.oslo_config_glue [-] monasca.client_max_retries     = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.747 2 DEBUG cotyledon.oslo_config_glue [-] monasca.client_retry_interval  = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.747 2 DEBUG cotyledon.oslo_config_glue [-] monasca.clientapi_version      = 2_0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.747 2 DEBUG cotyledon.oslo_config_glue [-] monasca.cloud_name             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.747 2 DEBUG cotyledon.oslo_config_glue [-] monasca.cluster                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.747 2 DEBUG cotyledon.oslo_config_glue [-] monasca.collect_timing         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.748 2 DEBUG cotyledon.oslo_config_glue [-] monasca.control_plane          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.748 2 DEBUG cotyledon.oslo_config_glue [-] monasca.enable_api_pagination  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.748 2 DEBUG cotyledon.oslo_config_glue [-] monasca.insecure               = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.748 2 DEBUG cotyledon.oslo_config_glue [-] monasca.interface              = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.748 2 DEBUG cotyledon.oslo_config_glue [-] monasca.keyfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.748 2 DEBUG cotyledon.oslo_config_glue [-] monasca.monasca_mappings       = /etc/ceilometer/monasca_field_definitions.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.748 2 DEBUG cotyledon.oslo_config_glue [-] monasca.region_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.748 2 DEBUG cotyledon.oslo_config_glue [-] monasca.retry_on_failure       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.748 2 DEBUG cotyledon.oslo_config_glue [-] monasca.split_loggers          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.749 2 DEBUG cotyledon.oslo_config_glue [-] monasca.timeout                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.749 2 DEBUG cotyledon.oslo_config_glue [-] notification.ack_on_event_error = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.749 2 DEBUG cotyledon.oslo_config_glue [-] notification.batch_size        = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.749 2 DEBUG cotyledon.oslo_config_glue [-] notification.batch_timeout     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.749 2 DEBUG cotyledon.oslo_config_glue [-] notification.messaging_urls    = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.749 2 DEBUG cotyledon.oslo_config_glue [-] notification.notification_control_exchanges = ['nova', 'glance', 'neutron', 'cinder', 'heat', 'keystone', 'sahara', 'trove', 'zaqar', 'swift', 'ceilometer', 'magnum', 'dns', 'ironic', 'aodh'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.749 2 DEBUG cotyledon.oslo_config_glue [-] notification.pipelines         = ['meter', 'event'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.750 2 DEBUG cotyledon.oslo_config_glue [-] notification.workers           = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.750 2 DEBUG cotyledon.oslo_config_glue [-] polling.batch_size             = 50 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.750 2 DEBUG cotyledon.oslo_config_glue [-] polling.cfg_file               = polling.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.750 2 DEBUG cotyledon.oslo_config_glue [-] polling.partitioning_group_prefix = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.750 2 DEBUG cotyledon.oslo_config_glue [-] polling.pollsters_definitions_dirs = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.750 2 DEBUG cotyledon.oslo_config_glue [-] polling.tenant_name_discovery  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.750 2 DEBUG cotyledon.oslo_config_glue [-] publisher.telemetry_secret     = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.751 2 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.event_topic = event log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.751 2 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.metering_topic = metering log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.751 2 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.telemetry_driver = messagingv2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.751 2 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.access_key = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.751 2 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.secret_key = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.751 2 DEBUG cotyledon.oslo_config_glue [-] rgw_client.implicit_tenants    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.751 2 DEBUG cotyledon.oslo_config_glue [-] service_types.cinder           = volumev3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.751 2 DEBUG cotyledon.oslo_config_glue [-] service_types.glance           = image log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.752 2 DEBUG cotyledon.oslo_config_glue [-] service_types.neutron          = network log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.752 2 DEBUG cotyledon.oslo_config_glue [-] service_types.nova             = compute log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.752 2 DEBUG cotyledon.oslo_config_glue [-] service_types.radosgw          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.752 2 DEBUG cotyledon.oslo_config_glue [-] service_types.swift            = object-store log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.752 2 DEBUG cotyledon.oslo_config_glue [-] vmware.api_retry_count         = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.752 2 DEBUG cotyledon.oslo_config_glue [-] vmware.ca_file                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.752 2 DEBUG cotyledon.oslo_config_glue [-] vmware.host_ip                 = 127.0.0.1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.752 2 DEBUG cotyledon.oslo_config_glue [-] vmware.host_password           = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.752 2 DEBUG cotyledon.oslo_config_glue [-] vmware.host_port               = 443 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.752 2 DEBUG cotyledon.oslo_config_glue [-] vmware.host_username           =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.753 2 DEBUG cotyledon.oslo_config_glue [-] vmware.insecure                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.753 2 DEBUG cotyledon.oslo_config_glue [-] vmware.task_poll_interval      = 0.5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.753 2 DEBUG cotyledon.oslo_config_glue [-] vmware.wsdl_location           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.753 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_section = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.753 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_type  = password log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.753 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.cafile     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.753 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.certfile   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.753 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.collect_timing = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.753 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.insecure   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.754 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.interface  = internalURL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.754 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.keyfile    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.754 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.region_name = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.754 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.split_loggers = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.754 2 DEBUG cotyledon.oslo_config_glue [-] service_credentials.timeout    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.754 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_section           = service_credentials log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.754 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_type              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.754 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.cafile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.754 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.certfile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.755 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.collect_timing         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.755 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.insecure               = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.755 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.interface              = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.755 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.keyfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.755 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.region_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.755 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.split_loggers          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.755 2 DEBUG cotyledon.oslo_config_glue [-] gnocchi.timeout                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.755 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_section             = service_credentials log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.755 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_type                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.755 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.cafile                   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.756 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.certfile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.756 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.collect_timing           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.756 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.insecure                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.756 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.interface                = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.756 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.keyfile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.756 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.region_name              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.756 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.split_loggers            = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.756 2 DEBUG cotyledon.oslo_config_glue [-] zaqar.timeout                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.756 2 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2613
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.783 12 INFO ceilometer.polling.manager [-] Looking for dynamic pollsters configurations at [['/etc/ceilometer/pollsters.d']].
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.785 12 INFO ceilometer.polling.manager [-] No dynamic pollsters found in folder [/etc/ceilometer/pollsters.d].
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.786 12 INFO ceilometer.polling.manager [-] No dynamic pollsters file found in dirs [['/etc/ceilometer/pollsters.d']].
Oct 11 02:11:03 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:03.809 12 INFO oslo.privsep.daemon [-] Running privsep helper: ['sudo', 'ceilometer-rootwrap', '/etc/ceilometer/rootwrap.conf', 'privsep-helper', '--privsep_context', 'ceilometer.privsep.sys_admin_pctxt', '--privsep_sock_path', '/tmp/tmpr6yzruiy/privsep.sock']
Oct 11 02:11:03 compute-0 kepler[395456]: I1011 02:11:03.813722       1 exporter.go:218] Received shutdown signal
Oct 11 02:11:03 compute-0 kepler[395456]: I1011 02:11:03.815120       1 exporter.go:226] Exiting...
Oct 11 02:11:03 compute-0 sudo[396534]: ceilometer : PWD=/ ; USER=root ; COMMAND=/usr/bin/ceilometer-rootwrap /etc/ceilometer/rootwrap.conf privsep-helper --privsep_context ceilometer.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmpr6yzruiy/privsep.sock
Oct 11 02:11:03 compute-0 sudo[396534]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Oct 11 02:11:03 compute-0 sudo[396534]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=42405)
Oct 11 02:11:04 compute-0 systemd[1]: libpod-ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603.scope: Deactivated successfully.
Oct 11 02:11:04 compute-0 systemd[1]: libpod-ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603.scope: Consumed 1.088s CPU time.
Oct 11 02:11:04 compute-0 conmon[395456]: conmon ffd3ae2f504a16fb2d0c <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603.scope/container/memory.events
Oct 11 02:11:04 compute-0 podman[396516]: 2025-10-11 02:11:04.027862133 +0000 UTC m=+0.303257176 container died ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, managed_by=edpm_ansible, container_name=kepler, name=ubi9, summary=Provides the latest release of Red Hat Universal Base Image 9., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, build-date=2024-09-18T21:23:30, version=9.4, io.buildah.version=1.29.0, vcs-type=git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi9-container, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9, distribution-scope=public, release-0.7.12=, config_id=edpm, maintainer=Red Hat, Inc., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, release=1214.1726694543, vendor=Red Hat, Inc., io.openshift.tags=base rhel9, architecture=x86_64)
Oct 11 02:11:04 compute-0 systemd[1]: ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603-6b6d6d8a1c93aefc.timer: Deactivated successfully.
Oct 11 02:11:04 compute-0 systemd[1]: Stopped /usr/bin/podman healthcheck run ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603.
Oct 11 02:11:04 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603-userdata-shm.mount: Deactivated successfully.
Oct 11 02:11:04 compute-0 systemd[1]: var-lib-containers-storage-overlay-8276ec39d2c1cbaca661abb80fa8ebe3e53556b89498c6824aafa088e9910012-merged.mount: Deactivated successfully.
Oct 11 02:11:04 compute-0 podman[396516]: 2025-10-11 02:11:04.077080264 +0000 UTC m=+0.352475287 container cleanup ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, build-date=2024-09-18T21:23:30, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, name=ubi9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release-0.7.12=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, managed_by=edpm_ansible, container_name=kepler, vendor=Red Hat, Inc., version=9.4, io.openshift.tags=base rhel9, io.openshift.expose-services=, com.redhat.component=ubi9-container, maintainer=Red Hat, Inc., distribution-scope=public, release=1214.1726694543, architecture=x86_64, vcs-type=git, config_id=edpm, io.k8s.display-name=Red Hat Universal Base Image 9, summary=Provides the latest release of Red Hat Universal Base Image 9., io.buildah.version=1.29.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:11:04 compute-0 podman[396516]: kepler
Oct 11 02:11:04 compute-0 systemd[1]: libpod-conmon-ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603.scope: Deactivated successfully.
Oct 11 02:11:04 compute-0 podman[396551]: kepler
Oct 11 02:11:04 compute-0 systemd[1]: edpm_kepler.service: Deactivated successfully.
Oct 11 02:11:04 compute-0 systemd[1]: Stopped kepler container.
Oct 11 02:11:04 compute-0 systemd[1]: Starting kepler container...
Oct 11 02:11:04 compute-0 sshd-session[395637]: Failed password for root from 193.46.255.217 port 48364 ssh2
Oct 11 02:11:04 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:11:04 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603.
Oct 11 02:11:04 compute-0 podman[396566]: 2025-10-11 02:11:04.382878761 +0000 UTC m=+0.163793005 container init ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, vcs-type=git, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, managed_by=edpm_ansible, vendor=Red Hat, Inc., io.buildah.version=1.29.0, summary=Provides the latest release of Red Hat Universal Base Image 9., io.openshift.expose-services=, com.redhat.component=ubi9-container, architecture=x86_64, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, maintainer=Red Hat, Inc., release=1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, version=9.4, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, config_id=edpm, io.openshift.tags=base rhel9, container_name=kepler, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2024-09-18T21:23:30, name=ubi9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, release-0.7.12=)
Oct 11 02:11:04 compute-0 kepler[396582]: WARNING: failed to read int from file: open /sys/devices/system/cpu/cpu0/online: no such file or directory
Oct 11 02:11:04 compute-0 podman[396566]: 2025-10-11 02:11:04.41580826 +0000 UTC m=+0.196722484 container start ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, managed_by=edpm_ansible, architecture=x86_64, release=1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, com.redhat.component=ubi9-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9, config_id=edpm, io.openshift.tags=base rhel9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, container_name=kepler, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, vendor=Red Hat, Inc., distribution-scope=public, io.buildah.version=1.29.0, version=9.4, io.openshift.expose-services=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, build-date=2024-09-18T21:23:30, name=ubi9, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., release-0.7.12=)
Oct 11 02:11:04 compute-0 podman[396566]: kepler
Oct 11 02:11:04 compute-0 kepler[396582]: I1011 02:11:04.430597       1 exporter.go:103] Kepler running on version: v0.7.12-dirty
Oct 11 02:11:04 compute-0 kepler[396582]: I1011 02:11:04.430807       1 config.go:293] using gCgroup ID in the BPF program: true
Oct 11 02:11:04 compute-0 kepler[396582]: I1011 02:11:04.430833       1 config.go:295] kernel version: 5.14
Oct 11 02:11:04 compute-0 kepler[396582]: I1011 02:11:04.431903       1 power.go:78] Unable to obtain power, use estimate method
Oct 11 02:11:04 compute-0 kepler[396582]: I1011 02:11:04.431940       1 redfish.go:169] failed to get redfish credential file path
Oct 11 02:11:04 compute-0 kepler[396582]: I1011 02:11:04.432617       1 acpi.go:71] Could not find any ACPI power meter path. Is it a VM?
Oct 11 02:11:04 compute-0 kepler[396582]: I1011 02:11:04.432636       1 power.go:79] using none to obtain power
Oct 11 02:11:04 compute-0 kepler[396582]: E1011 02:11:04.432664       1 accelerator.go:154] [DUMMY] doesn't contain GPU
Oct 11 02:11:04 compute-0 kepler[396582]: E1011 02:11:04.432700       1 exporter.go:154] failed to init GPU accelerators: no devices found
Oct 11 02:11:04 compute-0 systemd[1]: Started kepler container.
Oct 11 02:11:04 compute-0 kepler[396582]: WARNING: failed to read int from file: open /sys/devices/system/cpu/cpu0/online: no such file or directory
Oct 11 02:11:04 compute-0 kepler[396582]: I1011 02:11:04.436469       1 exporter.go:84] Number of CPUs: 8
Oct 11 02:11:04 compute-0 sudo[396510]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:04 compute-0 podman[396592]: 2025-10-11 02:11:04.513918244 +0000 UTC m=+0.079272542 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=starting, health_failing_streak=1, health_log=, config_id=edpm, build-date=2024-09-18T21:23:30, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, architecture=x86_64, distribution-scope=public, release-0.7.12=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.buildah.version=1.29.0, com.redhat.component=ubi9-container, name=ubi9, summary=Provides the latest release of Red Hat Universal Base Image 9., io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.tags=base rhel9, io.openshift.expose-services=, maintainer=Red Hat, Inc., managed_by=edpm_ansible, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-type=git, release=1214.1726694543, vendor=Red Hat, Inc., container_name=kepler, version=9.4, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:11:04 compute-0 systemd[1]: ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603-26fcb5de8124f39d.service: Main process exited, code=exited, status=1/FAILURE
Oct 11 02:11:04 compute-0 systemd[1]: ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603-26fcb5de8124f39d.service: Failed with result 'exit-code'.
Oct 11 02:11:04 compute-0 sudo[396534]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.581 12 INFO oslo.privsep.daemon [-] Spawned new privsep daemon via rootwrap
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.582 12 DEBUG oslo.privsep.daemon [-] Accepted privsep connection to /tmp/tmpr6yzruiy/privsep.sock __init__ /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:362
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.432 19 INFO oslo.privsep.daemon [-] privsep daemon starting
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.442 19 INFO oslo.privsep.daemon [-] privsep process running with uid/gid: 0/0
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.445 19 INFO oslo.privsep.daemon [-] privsep process running with capabilities (eff/prm/inh): CAP_CHOWN|CAP_DAC_OVERRIDE|CAP_DAC_READ_SEARCH|CAP_FOWNER|CAP_NET_ADMIN|CAP_SYS_ADMIN/CAP_CHOWN|CAP_DAC_OVERRIDE|CAP_DAC_READ_SEARCH|CAP_FOWNER|CAP_NET_ADMIN|CAP_SYS_ADMIN/none
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.446 19 INFO oslo.privsep.daemon [-] privsep daemon running as pid 19
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.718 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.current: IPMITool not supported on host _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.719 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.fan: IPMITool not supported on host _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.722 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.airflow: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.722 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.cpu_util: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.722 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.cups: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.722 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.io_util: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.723 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.mem_util: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.723 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.outlet_temperature: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.723 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.power: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.723 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.node.temperature: object.__new__() takes exactly one argument (the type to instantiate) _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.724 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.temperature: IPMITool not supported on host _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.724 12 DEBUG ceilometer.polling.manager [-] Skip loading extension for hardware.ipmi.voltage: IPMITool not supported on host _catch_extension_load_error /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:421
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.724 12 WARNING ceilometer.polling.manager [-] No valid pollsters can be loaded from ['ipmi'] namespaces
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.732 12 DEBUG cotyledon.oslo_config_glue [-] Full set of CONF: _load_service_options /usr/lib/python3.9/site-packages/cotyledon/oslo_config_glue.py:48
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.732 12 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2589
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.732 12 DEBUG cotyledon.oslo_config_glue [-] Configuration options gathered from: log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2590
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.733 12 DEBUG cotyledon.oslo_config_glue [-] command line args: ['--polling-namespaces', 'ipmi', '--logfile', '/dev/stdout'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2591
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.733 12 DEBUG cotyledon.oslo_config_glue [-] config files: ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2592
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.733 12 DEBUG cotyledon.oslo_config_glue [-] ================================================================================ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2594
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.734 12 DEBUG cotyledon.oslo_config_glue [-] batch_size                     = 50 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.734 12 DEBUG cotyledon.oslo_config_glue [-] cfg_file                       = polling.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.734 12 DEBUG cotyledon.oslo_config_glue [-] config_dir                     = ['/etc/ceilometer/ceilometer.conf.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.734 12 DEBUG cotyledon.oslo_config_glue [-] config_file                    = ['/etc/ceilometer/ceilometer.conf'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.734 12 DEBUG cotyledon.oslo_config_glue [-] config_source                  = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.735 12 DEBUG cotyledon.oslo_config_glue [-] control_exchange               = ceilometer log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.735 12 DEBUG cotyledon.oslo_config_glue [-] debug                          = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.735 12 DEBUG cotyledon.oslo_config_glue [-] default_log_levels             = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'oslo_messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'requests.packages.urllib3.util.retry=WARN', 'urllib3.util.retry=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'taskflow=WARN', 'keystoneauth=WARN', 'oslo.cache=INFO', 'oslo_policy=INFO', 'dogpile.core.dogpile=INFO', 'futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.736 12 DEBUG cotyledon.oslo_config_glue [-] event_pipeline_cfg_file        = event_pipeline.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.736 12 DEBUG cotyledon.oslo_config_glue [-] graceful_shutdown_timeout      = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.736 12 DEBUG cotyledon.oslo_config_glue [-] host                           = compute-0.ctlplane.example.com log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.736 12 DEBUG cotyledon.oslo_config_glue [-] http_timeout                   = 600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.737 12 DEBUG cotyledon.oslo_config_glue [-] hypervisor_inspector           = libvirt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.737 12 DEBUG cotyledon.oslo_config_glue [-] instance_format                = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.737 12 DEBUG cotyledon.oslo_config_glue [-] instance_uuid_format           = [instance: %(uuid)s]  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.737 12 DEBUG cotyledon.oslo_config_glue [-] libvirt_type                   = kvm log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.737 12 DEBUG cotyledon.oslo_config_glue [-] libvirt_uri                    =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.738 12 DEBUG cotyledon.oslo_config_glue [-] log_config_append              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.738 12 DEBUG cotyledon.oslo_config_glue [-] log_date_format                = %Y-%m-%d %H:%M:%S log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.738 12 DEBUG cotyledon.oslo_config_glue [-] log_dir                        = /var/log/ceilometer log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.738 12 DEBUG cotyledon.oslo_config_glue [-] log_file                       = /dev/stdout log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.739 12 DEBUG cotyledon.oslo_config_glue [-] log_options                    = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.739 12 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval            = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.739 12 DEBUG cotyledon.oslo_config_glue [-] log_rotate_interval_type       = days log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.739 12 DEBUG cotyledon.oslo_config_glue [-] log_rotation_type              = none log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.739 12 DEBUG cotyledon.oslo_config_glue [-] logging_context_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.740 12 DEBUG cotyledon.oslo_config_glue [-] logging_debug_format_suffix    = %(funcName)s %(pathname)s:%(lineno)d log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.740 12 DEBUG cotyledon.oslo_config_glue [-] logging_default_format_string  = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.740 12 DEBUG cotyledon.oslo_config_glue [-] logging_exception_prefix       = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.740 12 DEBUG cotyledon.oslo_config_glue [-] logging_user_identity_format   = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.740 12 DEBUG cotyledon.oslo_config_glue [-] max_logfile_count              = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.740 12 DEBUG cotyledon.oslo_config_glue [-] max_logfile_size_mb            = 200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.741 12 DEBUG cotyledon.oslo_config_glue [-] max_parallel_requests          = 64 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.741 12 DEBUG cotyledon.oslo_config_glue [-] partitioning_group_prefix      = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.741 12 DEBUG cotyledon.oslo_config_glue [-] pipeline_cfg_file              = pipeline.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.741 12 DEBUG cotyledon.oslo_config_glue [-] polling_namespaces             = ['ipmi'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.741 12 DEBUG cotyledon.oslo_config_glue [-] pollsters_definitions_dirs     = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.742 12 DEBUG cotyledon.oslo_config_glue [-] publish_errors                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.742 12 DEBUG cotyledon.oslo_config_glue [-] rate_limit_burst               = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.742 12 DEBUG cotyledon.oslo_config_glue [-] rate_limit_except_level        = CRITICAL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.742 12 DEBUG cotyledon.oslo_config_glue [-] rate_limit_interval            = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.742 12 DEBUG cotyledon.oslo_config_glue [-] reseller_prefix                = AUTH_ log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.743 12 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_keys         = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.743 12 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_length       = 256 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.743 12 DEBUG cotyledon.oslo_config_glue [-] reserved_metadata_namespace    = ['metering.'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.743 12 DEBUG cotyledon.oslo_config_glue [-] rootwrap_config                = /etc/ceilometer/rootwrap.conf log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.744 12 DEBUG cotyledon.oslo_config_glue [-] sample_source                  = openstack log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.744 12 DEBUG cotyledon.oslo_config_glue [-] syslog_log_facility            = LOG_USER log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.744 12 DEBUG cotyledon.oslo_config_glue [-] tenant_name_discovery          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.744 12 DEBUG cotyledon.oslo_config_glue [-] transport_url                  = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.745 12 DEBUG cotyledon.oslo_config_glue [-] use_eventlog                   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.745 12 DEBUG cotyledon.oslo_config_glue [-] use_journal                    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.745 12 DEBUG cotyledon.oslo_config_glue [-] use_json                       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.745 12 DEBUG cotyledon.oslo_config_glue [-] use_stderr                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.745 12 DEBUG cotyledon.oslo_config_glue [-] use_syslog                     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.745 12 DEBUG cotyledon.oslo_config_glue [-] watch_log_file                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2602
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.746 12 DEBUG cotyledon.oslo_config_glue [-] compute.instance_discovery_method = libvirt_metadata log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.746 12 DEBUG cotyledon.oslo_config_glue [-] compute.resource_cache_expiry  = 3600 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.746 12 DEBUG cotyledon.oslo_config_glue [-] compute.resource_update_interval = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.746 12 DEBUG cotyledon.oslo_config_glue [-] coordination.backend_url       = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.747 12 DEBUG cotyledon.oslo_config_glue [-] event.definitions_cfg_file     = event_definitions.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.747 12 DEBUG cotyledon.oslo_config_glue [-] event.drop_unmatched_notifications = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.747 12 DEBUG cotyledon.oslo_config_glue [-] event.store_raw                = [] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.747 12 DEBUG cotyledon.oslo_config_glue [-] ipmi.node_manager_init_retry   = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.747 12 DEBUG cotyledon.oslo_config_glue [-] ipmi.polling_retry             = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.748 12 DEBUG cotyledon.oslo_config_glue [-] meter.meter_definitions_dirs   = ['/etc/ceilometer/meters.d', '/usr/lib/python3.9/site-packages/ceilometer/data/meters.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.748 12 DEBUG cotyledon.oslo_config_glue [-] monasca.archive_on_failure     = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.748 12 DEBUG cotyledon.oslo_config_glue [-] monasca.archive_path           = mon_pub_failures.txt log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.748 12 DEBUG cotyledon.oslo_config_glue [-] monasca.auth_section           = service_credentials log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.749 12 DEBUG cotyledon.oslo_config_glue [-] monasca.auth_type              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.749 12 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_count            = 1000 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.749 12 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_max_retries      = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.749 12 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_mode             = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.749 12 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_polling_interval = 5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.750 12 DEBUG cotyledon.oslo_config_glue [-] monasca.batch_timeout          = 15 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.750 12 DEBUG cotyledon.oslo_config_glue [-] monasca.cafile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.750 12 DEBUG cotyledon.oslo_config_glue [-] monasca.certfile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.750 12 DEBUG cotyledon.oslo_config_glue [-] monasca.client_max_retries     = 3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.750 12 DEBUG cotyledon.oslo_config_glue [-] monasca.client_retry_interval  = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.751 12 DEBUG cotyledon.oslo_config_glue [-] monasca.clientapi_version      = 2_0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.751 12 DEBUG cotyledon.oslo_config_glue [-] monasca.cloud_name             = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.751 12 DEBUG cotyledon.oslo_config_glue [-] monasca.cluster                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.752 12 DEBUG cotyledon.oslo_config_glue [-] monasca.collect_timing         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.752 12 DEBUG cotyledon.oslo_config_glue [-] monasca.control_plane          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.752 12 DEBUG cotyledon.oslo_config_glue [-] monasca.enable_api_pagination  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.753 12 DEBUG cotyledon.oslo_config_glue [-] monasca.insecure               = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.753 12 DEBUG cotyledon.oslo_config_glue [-] monasca.interface              = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.753 12 DEBUG cotyledon.oslo_config_glue [-] monasca.keyfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.754 12 DEBUG cotyledon.oslo_config_glue [-] monasca.monasca_mappings       = /etc/ceilometer/monasca_field_definitions.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.754 12 DEBUG cotyledon.oslo_config_glue [-] monasca.region_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.754 12 DEBUG cotyledon.oslo_config_glue [-] monasca.retry_on_failure       = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.754 12 DEBUG cotyledon.oslo_config_glue [-] monasca.split_loggers          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.755 12 DEBUG cotyledon.oslo_config_glue [-] monasca.timeout                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.755 12 DEBUG cotyledon.oslo_config_glue [-] notification.ack_on_event_error = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.755 12 DEBUG cotyledon.oslo_config_glue [-] notification.batch_size        = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.756 12 DEBUG cotyledon.oslo_config_glue [-] notification.batch_timeout     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.756 12 DEBUG cotyledon.oslo_config_glue [-] notification.messaging_urls    = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.756 12 DEBUG cotyledon.oslo_config_glue [-] notification.notification_control_exchanges = ['nova', 'glance', 'neutron', 'cinder', 'heat', 'keystone', 'sahara', 'trove', 'zaqar', 'swift', 'ceilometer', 'magnum', 'dns', 'ironic', 'aodh'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.757 12 DEBUG cotyledon.oslo_config_glue [-] notification.pipelines         = ['meter', 'event'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.757 12 DEBUG cotyledon.oslo_config_glue [-] notification.workers           = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.757 12 DEBUG cotyledon.oslo_config_glue [-] polling.batch_size             = 50 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.758 12 DEBUG cotyledon.oslo_config_glue [-] polling.cfg_file               = polling.yaml log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.758 12 DEBUG cotyledon.oslo_config_glue [-] polling.partitioning_group_prefix = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.758 12 DEBUG cotyledon.oslo_config_glue [-] polling.pollsters_definitions_dirs = ['/etc/ceilometer/pollsters.d'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.758 12 DEBUG cotyledon.oslo_config_glue [-] polling.tenant_name_discovery  = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.759 12 DEBUG cotyledon.oslo_config_glue [-] publisher.telemetry_secret     = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.759 12 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.event_topic = event log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.759 12 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.metering_topic = metering log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.760 12 DEBUG cotyledon.oslo_config_glue [-] publisher_notifier.telemetry_driver = messagingv2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.760 12 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.access_key = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.760 12 DEBUG cotyledon.oslo_config_glue [-] rgw_admin_credentials.secret_key = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.761 12 DEBUG cotyledon.oslo_config_glue [-] rgw_client.implicit_tenants    = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.761 12 DEBUG cotyledon.oslo_config_glue [-] service_types.cinder           = volumev3 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.761 12 DEBUG cotyledon.oslo_config_glue [-] service_types.glance           = image log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.761 12 DEBUG cotyledon.oslo_config_glue [-] service_types.neutron          = network log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.762 12 DEBUG cotyledon.oslo_config_glue [-] service_types.nova             = compute log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.762 12 DEBUG cotyledon.oslo_config_glue [-] service_types.radosgw          = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.763 12 DEBUG cotyledon.oslo_config_glue [-] service_types.swift            = object-store log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.763 12 DEBUG cotyledon.oslo_config_glue [-] vmware.api_retry_count         = 10 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.764 12 DEBUG cotyledon.oslo_config_glue [-] vmware.ca_file                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.764 12 DEBUG cotyledon.oslo_config_glue [-] vmware.host_ip                 = 127.0.0.1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.764 12 DEBUG cotyledon.oslo_config_glue [-] vmware.host_password           = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.765 12 DEBUG cotyledon.oslo_config_glue [-] vmware.host_port               = 443 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.765 12 DEBUG cotyledon.oslo_config_glue [-] vmware.host_username           =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.765 12 DEBUG cotyledon.oslo_config_glue [-] vmware.insecure                = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.765 12 DEBUG cotyledon.oslo_config_glue [-] vmware.task_poll_interval      = 0.5 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.766 12 DEBUG cotyledon.oslo_config_glue [-] vmware.wsdl_location           = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.766 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_section = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.766 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.auth_type  = password log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.767 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.cafile     = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.767 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.certfile   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.767 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.collect_timing = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.767 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.insecure   = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.768 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.interface  = internalURL log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.768 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.keyfile    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.768 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.region_name = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.768 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.split_loggers = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.768 12 DEBUG cotyledon.oslo_config_glue [-] service_credentials.timeout    = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.768 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_section           = service_credentials log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.769 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.auth_type              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.769 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.cafile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.769 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.certfile               = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.769 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.collect_timing         = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.769 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.insecure               = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.769 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.interface              = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.769 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.keyfile                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.770 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.region_name            = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.770 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.split_loggers          = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.770 12 DEBUG cotyledon.oslo_config_glue [-] gnocchi.timeout                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.770 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_section             = service_credentials log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.770 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.auth_type                = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.770 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.cafile                   = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.771 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.certfile                 = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.771 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.collect_timing           = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.771 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.insecure                 = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.771 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.interface                = internal log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.771 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.keyfile                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.771 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.region_name              = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.771 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.split_loggers            = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.772 12 DEBUG cotyledon.oslo_config_glue [-] zaqar.timeout                  = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.772 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_notifications.driver = ['noop'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.772 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_notifications.retry = -1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.772 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_notifications.topics = ['notifications'] log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.772 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_notifications.transport_url = **** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.772 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.amqp_auto_delete = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.773 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.amqp_durable_queues = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.773 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.conn_pool_min_size = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.773 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.conn_pool_ttl = 1200 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.773 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.direct_mandatory_flag = True log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.773 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.enable_cancel_on_failover = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.773 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.heartbeat_in_pthread = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.773 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.heartbeat_rate = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.774 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.heartbeat_timeout_threshold = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.774 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.kombu_compression = None log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.774 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.kombu_failover_strategy = round-robin log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.774 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.kombu_missing_consumer_retry_timeout = 60 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.774 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.kombu_reconnect_delay = 1.0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.774 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_ha_queues = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.775 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_interval_max = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.775 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_login_method = AMQPLAIN log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.775 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_qos_prefetch_count = 100 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.775 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_quorum_delivery_limit = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.775 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_quorum_max_memory_bytes = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.775 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_quorum_max_memory_length = 0 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.776 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_quorum_queue = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.776 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_retry_backoff = 2 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.776 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_retry_interval = 1 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.776 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rabbit_transient_queues_ttl = 1800 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.776 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.rpc_conn_pool_size = 30 log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.776 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.ssl      = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.777 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.ssl_ca_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.777 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.ssl_cert_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.777 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.ssl_enforce_fips_mode = False log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.777 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.ssl_key_file =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.777 12 DEBUG cotyledon.oslo_config_glue [-] oslo_messaging_rabbit.ssl_version =  log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2609
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.777 12 DEBUG cotyledon.oslo_config_glue [-] ******************************************************************************** log_opt_values /usr/lib/python3.9/site-packages/oslo_config/cfg.py:2613
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.778 12 DEBUG cotyledon._service [-] Run service AgentManager(0) [12] wait_forever /usr/lib/python3.9/site-packages/cotyledon/_service.py:241
Oct 11 02:11:04 compute-0 ceilometer_agent_ipmi[396331]: 2025-10-11 02:11:04.783 12 DEBUG ceilometer.agent [-] Config file: {'sources': [{'name': 'pollsters', 'interval': 120, 'meters': ['hardware.*']}]} load_config /usr/lib/python3.9/site-packages/ceilometer/agent.py:64
Oct 11 02:11:04 compute-0 sshd-session[396097]: Connection closed by invalid user user 121.227.153.123 port 47566 [preauth]
Oct 11 02:11:04 compute-0 kepler[396582]: I1011 02:11:04.987204       1 watcher.go:83] Using in cluster k8s config
Oct 11 02:11:04 compute-0 kepler[396582]: I1011 02:11:04.987278       1 watcher.go:90] failed to get config: unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined
Oct 11 02:11:04 compute-0 kepler[396582]: E1011 02:11:04.987339       1 manager.go:59] could not run the watcher k8s APIserver watcher was not enabled
Oct 11 02:11:04 compute-0 ceph-mon[191930]: pgmap v986: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:04 compute-0 kepler[396582]: I1011 02:11:04.997986       1 process_energy.go:129] Using the Ratio Power Model to estimate PROCESS_TOTAL Power
Oct 11 02:11:04 compute-0 kepler[396582]: I1011 02:11:04.998025       1 process_energy.go:130] Feature names: [bpf_cpu_time_ms]
Oct 11 02:11:05 compute-0 kepler[396582]: I1011 02:11:05.006047       1 process_energy.go:129] Using the Ratio Power Model to estimate PROCESS_COMPONENTS Power
Oct 11 02:11:05 compute-0 kepler[396582]: I1011 02:11:05.006074       1 process_energy.go:130] Feature names: [bpf_cpu_time_ms bpf_cpu_time_ms bpf_cpu_time_ms   gpu_compute_util]
Oct 11 02:11:05 compute-0 kepler[396582]: I1011 02:11:05.016122       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 02:11:05 compute-0 kepler[396582]: I1011 02:11:05.016156       1 model.go:125] Requesting for Machine Spec: &{authenticamd amd_epyc_rome 8 8 7 2800 1}
Oct 11 02:11:05 compute-0 kepler[396582]: I1011 02:11:05.016169       1 node_platform_energy.go:53] Using the Regressor/AbsPower Power Model to estimate Node Platform Power
Oct 11 02:11:05 compute-0 kepler[396582]: I1011 02:11:05.022809       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 02:11:05 compute-0 kepler[396582]: I1011 02:11:05.022846       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 02:11:05 compute-0 kepler[396582]: I1011 02:11:05.022850       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 02:11:05 compute-0 kepler[396582]: I1011 02:11:05.022855       1 regressor.go:276] Created predictor linear for trainer: "SGDRegressorTrainer"
Oct 11 02:11:05 compute-0 kepler[396582]: I1011 02:11:05.022863       1 model.go:125] Requesting for Machine Spec: &{authenticamd amd_epyc_rome 8 8 7 2800 1}
Oct 11 02:11:05 compute-0 kepler[396582]: I1011 02:11:05.022879       1 node_component_energy.go:57] Using the Regressor/AbsPower Power Model to estimate Node Component Power
Oct 11 02:11:05 compute-0 kepler[396582]: I1011 02:11:05.022960       1 prometheus_collector.go:90] Registered Process Prometheus metrics
Oct 11 02:11:05 compute-0 kepler[396582]: I1011 02:11:05.022984       1 prometheus_collector.go:95] Registered Container Prometheus metrics
Oct 11 02:11:05 compute-0 kepler[396582]: I1011 02:11:05.023001       1 prometheus_collector.go:100] Registered VM Prometheus metrics
Oct 11 02:11:05 compute-0 kepler[396582]: I1011 02:11:05.023017       1 prometheus_collector.go:104] Registered Node Prometheus metrics
Oct 11 02:11:05 compute-0 kepler[396582]: I1011 02:11:05.023122       1 exporter.go:194] starting to listen on 0.0.0.0:8888
Oct 11 02:11:05 compute-0 kepler[396582]: I1011 02:11:05.023921       1 exporter.go:208] Started Kepler in 593.821753ms
Oct 11 02:11:05 compute-0 sudo[396780]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qsxoozttpdsneaelpvvmvlebqdupibqk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148664.6667664-485-81428838404747/AnsiballZ_find.py'
Oct 11 02:11:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:11:05 compute-0 sudo[396780]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:05 compute-0 sshd-session[395637]: Received disconnect from 193.46.255.217 port 48364:11:  [preauth]
Oct 11 02:11:05 compute-0 sshd-session[395637]: Disconnected from authenticating user root 193.46.255.217 port 48364 [preauth]
Oct 11 02:11:05 compute-0 sshd-session[395637]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 02:11:05 compute-0 python3.9[396782]: ansible-ansible.builtin.find Invoked with file_type=directory paths=['/var/lib/openstack/healthchecks/'] patterns=[] read_whole_file=False age_stamp=mtime recurse=False hidden=False follow=False get_checksum=False checksum_algorithm=sha1 use_regex=False exact_mode=True excludes=None contains=None age=None size=None depth=None mode=None encoding=None limit=None
Oct 11 02:11:05 compute-0 sudo[396780]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v987: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:06 compute-0 unix_chkpwd[396863]: password check failed for user (root)
Oct 11 02:11:06 compute-0 sshd-session[396785]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 02:11:06 compute-0 sshd-session[396783]: Invalid user user from 121.227.153.123 port 47570
Oct 11 02:11:06 compute-0 sshd-session[396783]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:11:06 compute-0 sshd-session[396783]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:11:06 compute-0 sudo[396937]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-faphohzqafifzzdpanaemzrcadggvhio ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148665.7911544-495-140789515051330/AnsiballZ_podman_container_info.py'
Oct 11 02:11:06 compute-0 sudo[396937]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:11:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:11:06 compute-0 python3.9[396939]: ansible-containers.podman.podman_container_info Invoked with name=['ovn_controller'] executable=podman
Oct 11 02:11:06 compute-0 sudo[396937]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:07 compute-0 ceph-mon[191930]: pgmap v987: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v988: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:07 compute-0 sudo[397102]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ebesksbpfpavzcgcjacsesndsdyyvbuf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148667.1085932-503-135439835829304/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:07 compute-0 sudo[397102]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:07 compute-0 sshd-session[396785]: Failed password for root from 193.46.255.217 port 55948 ssh2
Oct 11 02:11:08 compute-0 ceph-mon[191930]: pgmap v988: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:08 compute-0 python3.9[397104]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=ovn_controller detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:08 compute-0 systemd[1]: Started libpod-conmon-861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112.scope.
Oct 11 02:11:08 compute-0 podman[397105]: 2025-10-11 02:11:08.233175041 +0000 UTC m=+0.149965126 container exec 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, config_id=ovn_controller, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 02:11:08 compute-0 podman[397105]: 2025-10-11 02:11:08.244804436 +0000 UTC m=+0.161594451 container exec_died 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:11:08 compute-0 sudo[397102]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:08 compute-0 systemd[1]: libpod-conmon-861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112.scope: Deactivated successfully.
Oct 11 02:11:08 compute-0 sshd-session[396783]: Failed password for invalid user user from 121.227.153.123 port 47570 ssh2
Oct 11 02:11:09 compute-0 sudo[397286]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vbntxqdvdvlzkpdsxdlyrfuuoczxhpxw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148668.5453115-511-58573200144295/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:09 compute-0 sudo[397286]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:09 compute-0 python3.9[397288]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=ovn_controller detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:09 compute-0 unix_chkpwd[397289]: password check failed for user (root)
Oct 11 02:11:09 compute-0 systemd[1]: Started libpod-conmon-861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112.scope.
Oct 11 02:11:09 compute-0 podman[397290]: 2025-10-11 02:11:09.464966696 +0000 UTC m=+0.143923870 container exec 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:11:09 compute-0 podman[397290]: 2025-10-11 02:11:09.499660092 +0000 UTC m=+0.178617246 container exec_died 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS)
Oct 11 02:11:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:11:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 1800.0 total, 600.0 interval
                                            Cumulative writes: 4609 writes, 20K keys, 4609 commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.02 MB/s
                                            Cumulative WAL: 4609 writes, 4609 syncs, 1.00 writes per sync, written: 0.03 GB, 0.02 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 1288 writes, 5590 keys, 1288 commit groups, 1.0 writes per commit group, ingest: 8.45 MB, 0.01 MB/s
                                            Interval WAL: 1288 writes, 1288 syncs, 1.00 writes per sync, written: 0.01 GB, 0.01 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
                                            
                                            ** Compaction Stats [default] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0    127.6      0.17              0.09        11    0.016       0      0       0.0       0.0
                                              L6      1/0    6.74 MB   0.0      0.1     0.0      0.1       0.1      0.0       0.0   3.2    174.1    142.9      0.48              0.32        10    0.048     43K   5262       0.0       0.0
                                             Sum      1/0    6.74 MB   0.0      0.1     0.0      0.1       0.1      0.0       0.0   4.2    128.6    138.9      0.66              0.41        21    0.031     43K   5262       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   5.2    137.1    137.7      0.26              0.17         8    0.032     18K   2061       0.0       0.0
                                            
                                            ** Compaction Stats [default] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Low      0/0    0.00 KB   0.0      0.1     0.0      0.1       0.1      0.0       0.0   0.0    174.1    142.9      0.48              0.32        10    0.048     43K   5262       0.0       0.0
                                            High      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0    130.6      0.17              0.09        10    0.017       0      0       0.0       0.0
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0     12.0      0.00              0.00         1    0.004       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 1800.0 total, 600.0 interval
                                            Flush(GB): cumulative 0.021, interval 0.007
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.09 GB write, 0.05 MB/s write, 0.08 GB read, 0.05 MB/s read, 0.7 seconds
                                            Interval compaction: 0.03 GB write, 0.06 MB/s write, 0.03 GB read, 0.06 MB/s read, 0.3 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x55816e47f1f0#2 capacity: 308.00 MB usage: 6.61 MB table_size: 0 occupancy: 18446744073709551615 collections: 4 last_copies: 0 last_secs: 0.000132 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(414,6.25 MB,2.02926%) FilterBlock(22,127.42 KB,0.0404011%) IndexBlock(22,241.83 KB,0.0766754%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [default] **
Oct 11 02:11:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v989: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:09 compute-0 sudo[397286]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:09 compute-0 systemd[1]: libpod-conmon-861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112.scope: Deactivated successfully.
Oct 11 02:11:10 compute-0 sshd-session[396783]: Connection closed by invalid user user 121.227.153.123 port 47570 [preauth]
Oct 11 02:11:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:11:10 compute-0 sudo[397468]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ealyndsjyavbkpphuhshbfazakxocdja ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148669.7646194-519-51547099483830/AnsiballZ_file.py'
Oct 11 02:11:10 compute-0 sudo[397468]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:10 compute-0 python3.9[397470]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/ovn_controller recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:11:10 compute-0 sudo[397468]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:10 compute-0 ceph-mon[191930]: pgmap v989: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:11 compute-0 sudo[397625]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xmrilqnaixbxzcyxbfqlsyvkzmbllfzw ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148670.7464414-528-204704407187521/AnsiballZ_podman_container_info.py'
Oct 11 02:11:11 compute-0 sudo[397625]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:11 compute-0 sshd-session[396785]: Failed password for root from 193.46.255.217 port 55948 ssh2
Oct 11 02:11:11 compute-0 sshd-session[397471]: Invalid user user from 121.227.153.123 port 58132
Oct 11 02:11:11 compute-0 python3.9[397627]: ansible-containers.podman.podman_container_info Invoked with name=['ceilometer_agent_compute'] executable=podman
Oct 11 02:11:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v990: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:11 compute-0 sudo[397625]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:11 compute-0 sshd-session[397471]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:11:11 compute-0 sshd-session[397471]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:11:12 compute-0 sudo[397791]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mkzeddhrntuevjxdljqxzdnhjokekrdr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148671.877432-536-238996375361370/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:12 compute-0 sudo[397791]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:12 compute-0 unix_chkpwd[397794]: password check failed for user (root)
Oct 11 02:11:12 compute-0 python3.9[397793]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=ceilometer_agent_compute detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:12 compute-0 ceph-mon[191930]: pgmap v990: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:12 compute-0 sudo[397795]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:11:12 compute-0 sudo[397795]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:11:12 compute-0 sudo[397795]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:12 compute-0 systemd[1]: Started libpod-conmon-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope.
Oct 11 02:11:12 compute-0 podman[397803]: 2025-10-11 02:11:12.719573906 +0000 UTC m=+0.113506382 container exec c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.vendor=CentOS, config_id=edpm, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4)
Oct 11 02:11:12 compute-0 sudo[397830]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:11:12 compute-0 sudo[397830]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:11:12 compute-0 sudo[397830]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:12 compute-0 podman[397803]: 2025-10-11 02:11:12.756744625 +0000 UTC m=+0.150677061 container exec_died c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=edpm)
Oct 11 02:11:12 compute-0 systemd[1]: libpod-conmon-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope: Deactivated successfully.
Oct 11 02:11:12 compute-0 sudo[397791]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:12 compute-0 sudo[397870]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:11:12 compute-0 sudo[397870]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:11:12 compute-0 sudo[397870]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:12 compute-0 sudo[397901]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:11:12 compute-0 sudo[397901]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:11:13 compute-0 sudo[398101]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wtzblfgxeeudfcqqqqeraigjmusvxwvj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148673.058643-544-199325410226655/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:13 compute-0 sudo[398101]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v991: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:13 compute-0 sudo[397901]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:13 compute-0 sshd-session[397471]: Failed password for invalid user user from 121.227.153.123 port 58132 ssh2
Oct 11 02:11:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:11:13 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:11:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:11:13 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:11:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:11:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:11:13 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:11:13 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev e6dd9d17-2da9-4e80-98a3-4b281b452fba does not exist
Oct 11 02:11:13 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev d4213c45-dc21-45e9-a41c-4389f97290e4 does not exist
Oct 11 02:11:13 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 1abcf3ee-e7cc-4f7f-b561-560b1860f36c does not exist
Oct 11 02:11:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:11:13 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #48. Immutable memtables: 0.
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:11:13.644895) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 23] Flushing memtable with next log file: 48
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148673644937, "job": 23, "event": "flush_started", "num_memtables": 1, "num_entries": 1439, "num_deletes": 251, "total_data_size": 2265558, "memory_usage": 2313136, "flush_reason": "Manual Compaction"}
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 23] Level-0 flush table #49: started
Oct 11 02:11:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:11:13 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148673663302, "cf_name": "default", "job": 23, "event": "table_file_creation", "file_number": 49, "file_size": 2232993, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 19463, "largest_seqno": 20901, "table_properties": {"data_size": 2226271, "index_size": 3857, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1797, "raw_key_size": 13741, "raw_average_key_size": 19, "raw_value_size": 2212849, "raw_average_value_size": 3174, "num_data_blocks": 176, "num_entries": 697, "num_filter_entries": 697, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760148520, "oldest_key_time": 1760148520, "file_creation_time": 1760148673, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 49, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 23] Flush lasted 18438 microseconds, and 7377 cpu microseconds.
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:11:13.663335) [db/flush_job.cc:967] [default] [JOB 23] Level-0 flush table #49: 2232993 bytes OK
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:11:13.663359) [db/memtable_list.cc:519] [default] Level-0 commit table #49 started
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:11:13.666131) [db/memtable_list.cc:722] [default] Level-0 commit table #49: memtable #1 done
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:11:13.666151) EVENT_LOG_v1 {"time_micros": 1760148673666144, "job": 23, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:11:13.666173) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 23] Try to delete WAL files size 2259242, prev total WAL file size 2259242, number of live WAL files 2.
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000045.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:11:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:11:13.667573) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F730031353036' seq:72057594037927935, type:22 .. '7061786F730031373538' seq:0, type:0; will stop at (end)
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 24] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 23 Base level 0, inputs: [49(2180KB)], [47(6902KB)]
Oct 11 02:11:13 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148673667610, "job": 24, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [49], "files_L6": [47], "score": -1, "input_data_size": 9301185, "oldest_snapshot_seqno": -1}
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 24] Generated table #50: 4276 keys, 7538446 bytes, temperature: kUnknown
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148673712464, "cf_name": "default", "job": 24, "event": "table_file_creation", "file_number": 50, "file_size": 7538446, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 7508819, "index_size": 17805, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 10757, "raw_key_size": 105686, "raw_average_key_size": 24, "raw_value_size": 7430214, "raw_average_value_size": 1737, "num_data_blocks": 748, "num_entries": 4276, "num_filter_entries": 4276, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760148673, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 50, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:11:13.712816) [db/compaction/compaction_job.cc:1663] [default] [JOB 24] Compacted 1@0 + 1@6 files to L6 => 7538446 bytes
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:11:13.714884) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 206.9 rd, 167.7 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(2.1, 6.7 +0.0 blob) out(7.2 +0.0 blob), read-write-amplify(7.5) write-amplify(3.4) OK, records in: 4790, records dropped: 514 output_compression: NoCompression
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:11:13.714912) EVENT_LOG_v1 {"time_micros": 1760148673714899, "job": 24, "event": "compaction_finished", "compaction_time_micros": 44950, "compaction_time_cpu_micros": 24062, "output_level": 6, "num_output_files": 1, "total_output_size": 7538446, "num_input_records": 4790, "num_output_records": 4276, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000049.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148673715557, "job": 24, "event": "table_file_deletion", "file_number": 49}
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000047.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148673717203, "job": 24, "event": "table_file_deletion", "file_number": 47}
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:11:13.667391) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:11:13.717509) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:11:13.717519) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:11:13.717522) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:11:13.717525) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:11:13 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:11:13.717529) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:11:13 compute-0 sudo[398104]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:11:13 compute-0 python3.9[398103]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=ceilometer_agent_compute detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:13 compute-0 sudo[398104]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:11:13 compute-0 sudo[398104]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.857 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.858 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.858 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.859 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.860 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.860 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.860 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.860 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.860 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.862 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.864 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.865 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.866 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.866 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.866 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.867 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.867 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.867 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.867 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.867 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.868 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.868 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.868 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.868 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.868 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.869 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.869 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.869 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.869 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.870 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.870 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.870 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.870 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.870 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.871 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.871 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.871 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.871 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.871 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.872 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.872 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.872 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.872 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.873 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.873 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.873 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.873 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.873 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.874 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.874 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.874 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.874 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.874 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.874 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.875 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.875 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.875 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.875 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.876 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.876 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.876 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.876 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.876 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.877 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.877 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.877 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.877 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.877 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.877 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.877 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.877 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.878 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.878 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.878 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.878 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.878 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.878 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.878 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.879 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.879 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.879 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.879 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.879 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.879 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.879 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:11:13.879 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:11:13 compute-0 systemd[1]: Started libpod-conmon-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope.
Oct 11 02:11:13 compute-0 podman[398128]: 2025-10-11 02:11:13.93216333 +0000 UTC m=+0.112977915 container exec c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, org.label-schema.license=GPLv2, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, tcib_managed=true, config_id=edpm, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:11:13 compute-0 sudo[398135]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:11:13 compute-0 sudo[398135]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:11:13 compute-0 sudo[398135]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:13 compute-0 podman[398128]: 2025-10-11 02:11:13.966662239 +0000 UTC m=+0.147476804 container exec_died c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, org.label-schema.name=CentOS Stream 10 Base Image, io.buildah.version=1.41.4, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute)
Oct 11 02:11:14 compute-0 sudo[398101]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:14 compute-0 systemd[1]: libpod-conmon-c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6.scope: Deactivated successfully.
Oct 11 02:11:14 compute-0 sudo[398180]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:11:14 compute-0 sudo[398180]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:11:14 compute-0 sudo[398180]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:14 compute-0 sudo[398212]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:11:14 compute-0 sudo[398212]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:11:14 compute-0 sshd-session[396785]: Failed password for root from 193.46.255.217 port 55948 ssh2
Oct 11 02:11:14 compute-0 ceph-mon[191930]: pgmap v991: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:11:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:11:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:11:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:11:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:11:14 compute-0 podman[398283]: 2025-10-11 02:11:14.713014597 +0000 UTC m=+0.079818708 container create f1a5c75377d0553606632109b607a7fd816812cbd86c32d5a3765c6fae045d31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=relaxed_kapitsa, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2)
Oct 11 02:11:14 compute-0 systemd[1]: Started libpod-conmon-f1a5c75377d0553606632109b607a7fd816812cbd86c32d5a3765c6fae045d31.scope.
Oct 11 02:11:14 compute-0 podman[398283]: 2025-10-11 02:11:14.684680514 +0000 UTC m=+0.051484625 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:11:14 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:11:14 compute-0 podman[398283]: 2025-10-11 02:11:14.833912644 +0000 UTC m=+0.200716845 container init f1a5c75377d0553606632109b607a7fd816812cbd86c32d5a3765c6fae045d31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=relaxed_kapitsa, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:11:14 compute-0 podman[398283]: 2025-10-11 02:11:14.845767288 +0000 UTC m=+0.212571409 container start f1a5c75377d0553606632109b607a7fd816812cbd86c32d5a3765c6fae045d31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=relaxed_kapitsa, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:11:14 compute-0 podman[398283]: 2025-10-11 02:11:14.854651853 +0000 UTC m=+0.221455984 container attach f1a5c75377d0553606632109b607a7fd816812cbd86c32d5a3765c6fae045d31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=relaxed_kapitsa, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:11:14 compute-0 relaxed_kapitsa[398334]: 167 167
Oct 11 02:11:14 compute-0 systemd[1]: libpod-f1a5c75377d0553606632109b607a7fd816812cbd86c32d5a3765c6fae045d31.scope: Deactivated successfully.
Oct 11 02:11:14 compute-0 podman[398283]: 2025-10-11 02:11:14.856974256 +0000 UTC m=+0.223778377 container died f1a5c75377d0553606632109b607a7fd816812cbd86c32d5a3765c6fae045d31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=relaxed_kapitsa, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:11:14 compute-0 systemd[1]: var-lib-containers-storage-overlay-6d84d67add16f84f5e82ac509094bee32a3f0eee47be7b17f8b8d91d94d5e6a2-merged.mount: Deactivated successfully.
Oct 11 02:11:14 compute-0 podman[398283]: 2025-10-11 02:11:14.914673244 +0000 UTC m=+0.281477355 container remove f1a5c75377d0553606632109b607a7fd816812cbd86c32d5a3765c6fae045d31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=relaxed_kapitsa, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:11:14 compute-0 systemd[1]: libpod-conmon-f1a5c75377d0553606632109b607a7fd816812cbd86c32d5a3765c6fae045d31.scope: Deactivated successfully.
Oct 11 02:11:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:11:15 compute-0 podman[398413]: 2025-10-11 02:11:15.13294614 +0000 UTC m=+0.073802512 container create b000e25cbecba912b6e98ad692e279219fb64e105a211fe913d22b3fe0b917a0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_sammet, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:11:15 compute-0 podman[398413]: 2025-10-11 02:11:15.089903955 +0000 UTC m=+0.030760367 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:11:15 compute-0 systemd[1]: Started libpod-conmon-b000e25cbecba912b6e98ad692e279219fb64e105a211fe913d22b3fe0b917a0.scope.
Oct 11 02:11:15 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:11:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/43ed004f956a4ef7664f854ec37e0d1ec0dc743e17856b6846990bbf2f218541/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:11:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/43ed004f956a4ef7664f854ec37e0d1ec0dc743e17856b6846990bbf2f218541/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:11:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/43ed004f956a4ef7664f854ec37e0d1ec0dc743e17856b6846990bbf2f218541/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:11:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/43ed004f956a4ef7664f854ec37e0d1ec0dc743e17856b6846990bbf2f218541/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:11:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/43ed004f956a4ef7664f854ec37e0d1ec0dc743e17856b6846990bbf2f218541/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:11:15 compute-0 sshd-session[397471]: Connection closed by invalid user user 121.227.153.123 port 58132 [preauth]
Oct 11 02:11:15 compute-0 podman[398413]: 2025-10-11 02:11:15.270007949 +0000 UTC m=+0.210864351 container init b000e25cbecba912b6e98ad692e279219fb64e105a211fe913d22b3fe0b917a0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_sammet, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:11:15 compute-0 podman[398413]: 2025-10-11 02:11:15.28642091 +0000 UTC m=+0.227277272 container start b000e25cbecba912b6e98ad692e279219fb64e105a211fe913d22b3fe0b917a0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_sammet, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:11:15 compute-0 podman[398413]: 2025-10-11 02:11:15.292721804 +0000 UTC m=+0.233578396 container attach b000e25cbecba912b6e98ad692e279219fb64e105a211fe913d22b3fe0b917a0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_sammet, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:11:15 compute-0 podman[398450]: 2025-10-11 02:11:15.306409514 +0000 UTC m=+0.126563497 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, managed_by=edpm_ansible, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:11:15 compute-0 sudo[398499]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oidvvvublpcgjwuzepewwzrndfnvhvka ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148674.7936206-552-60200818515074/AnsiballZ_file.py'
Oct 11 02:11:15 compute-0 sudo[398499]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:15 compute-0 python3.9[398505]: ansible-ansible.builtin.file Invoked with group=42405 mode=0700 owner=42405 path=/var/lib/openstack/healthchecks/ceilometer_agent_compute recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:11:15 compute-0 sudo[398499]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v992: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:15 compute-0 sshd-session[396785]: Received disconnect from 193.46.255.217 port 55948:11:  [preauth]
Oct 11 02:11:15 compute-0 sshd-session[396785]: Disconnected from authenticating user root 193.46.255.217 port 55948 [preauth]
Oct 11 02:11:15 compute-0 sshd-session[396785]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 02:11:16 compute-0 podman[398606]: 2025-10-11 02:11:16.241357446 +0000 UTC m=+0.106435644 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3)
Oct 11 02:11:16 compute-0 podman[398595]: 2025-10-11 02:11:16.245900232 +0000 UTC m=+0.124758479 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:11:16 compute-0 podman[398604]: 2025-10-11 02:11:16.286876923 +0000 UTC m=+0.161334302 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=ovn_controller, org.label-schema.license=GPLv2, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller)
Oct 11 02:11:16 compute-0 sudo[398745]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-trccrtccwkcfmnmbpljztkfbvxygofvl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148675.8837888-561-209177835882697/AnsiballZ_podman_container_info.py'
Oct 11 02:11:16 compute-0 sudo[398745]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:16 compute-0 python3.9[398749]: ansible-containers.podman.podman_container_info Invoked with name=['node_exporter'] executable=podman
Oct 11 02:11:16 compute-0 quirky_sammet[398467]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:11:16 compute-0 quirky_sammet[398467]: --> relative data size: 1.0
Oct 11 02:11:16 compute-0 quirky_sammet[398467]: --> All data devices are unavailable
Oct 11 02:11:16 compute-0 sshd-session[398506]: Invalid user user from 121.227.153.123 port 58134
Oct 11 02:11:16 compute-0 systemd[1]: libpod-b000e25cbecba912b6e98ad692e279219fb64e105a211fe913d22b3fe0b917a0.scope: Deactivated successfully.
Oct 11 02:11:16 compute-0 systemd[1]: libpod-b000e25cbecba912b6e98ad692e279219fb64e105a211fe913d22b3fe0b917a0.scope: Consumed 1.198s CPU time.
Oct 11 02:11:16 compute-0 podman[398413]: 2025-10-11 02:11:16.592177304 +0000 UTC m=+1.533033706 container died b000e25cbecba912b6e98ad692e279219fb64e105a211fe913d22b3fe0b917a0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_sammet, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.schema-version=1.0)
Oct 11 02:11:16 compute-0 unix_chkpwd[398763]: password check failed for user (root)
Oct 11 02:11:16 compute-0 sshd-session[398541]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 02:11:16 compute-0 ceph-mon[191930]: pgmap v992: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:16 compute-0 systemd[1]: var-lib-containers-storage-overlay-43ed004f956a4ef7664f854ec37e0d1ec0dc743e17856b6846990bbf2f218541-merged.mount: Deactivated successfully.
Oct 11 02:11:16 compute-0 podman[398413]: 2025-10-11 02:11:16.704498463 +0000 UTC m=+1.645354865 container remove b000e25cbecba912b6e98ad692e279219fb64e105a211fe913d22b3fe0b917a0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_sammet, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 02:11:16 compute-0 systemd[1]: libpod-conmon-b000e25cbecba912b6e98ad692e279219fb64e105a211fe913d22b3fe0b917a0.scope: Deactivated successfully.
Oct 11 02:11:16 compute-0 sudo[398212]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:16 compute-0 sudo[398745]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:16 compute-0 sshd-session[398506]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:11:16 compute-0 sshd-session[398506]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:11:16 compute-0 sudo[398778]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:11:16 compute-0 sudo[398778]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:11:16 compute-0 sudo[398778]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:17 compute-0 sudo[398803]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:11:17 compute-0 sudo[398803]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:11:17 compute-0 sudo[398803]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:17 compute-0 sudo[398828]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:11:17 compute-0 sudo[398828]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:11:17 compute-0 sudo[398828]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:17 compute-0 sudo[398877]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:11:17 compute-0 sudo[398877]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:11:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v993: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:17 compute-0 podman[399039]: 2025-10-11 02:11:17.847565355 +0000 UTC m=+0.091947079 container create 777a544ac5b525756a76fa5b50fcbff4c6df803ff73ab3e70298f51b751d23fd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_buck, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507)
Oct 11 02:11:17 compute-0 podman[399039]: 2025-10-11 02:11:17.808821958 +0000 UTC m=+0.053203652 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:11:17 compute-0 systemd[1]: Started libpod-conmon-777a544ac5b525756a76fa5b50fcbff4c6df803ff73ab3e70298f51b751d23fd.scope.
Oct 11 02:11:17 compute-0 sudo[399082]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fmnfygaeipeakuenmfjwbrpqgvfsjlpc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148677.350234-569-236194942263928/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:17 compute-0 sudo[399082]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:17 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:11:18 compute-0 podman[399039]: 2025-10-11 02:11:18.001727379 +0000 UTC m=+0.246109173 container init 777a544ac5b525756a76fa5b50fcbff4c6df803ff73ab3e70298f51b751d23fd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_buck, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:11:18 compute-0 podman[399039]: 2025-10-11 02:11:18.021515214 +0000 UTC m=+0.265896948 container start 777a544ac5b525756a76fa5b50fcbff4c6df803ff73ab3e70298f51b751d23fd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_buck, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:11:18 compute-0 podman[399039]: 2025-10-11 02:11:18.027832198 +0000 UTC m=+0.272213952 container attach 777a544ac5b525756a76fa5b50fcbff4c6df803ff73ab3e70298f51b751d23fd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_buck, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:11:18 compute-0 goofy_buck[399083]: 167 167
Oct 11 02:11:18 compute-0 systemd[1]: libpod-777a544ac5b525756a76fa5b50fcbff4c6df803ff73ab3e70298f51b751d23fd.scope: Deactivated successfully.
Oct 11 02:11:18 compute-0 podman[399039]: 2025-10-11 02:11:18.036441675 +0000 UTC m=+0.280823409 container died 777a544ac5b525756a76fa5b50fcbff4c6df803ff73ab3e70298f51b751d23fd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_buck, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 02:11:18 compute-0 systemd[1]: var-lib-containers-storage-overlay-9d127de66ae2ec7b485a46c2bca3fbb384ae992d2de58f634f4aeca040c97a84-merged.mount: Deactivated successfully.
Oct 11 02:11:18 compute-0 podman[399039]: 2025-10-11 02:11:18.110659133 +0000 UTC m=+0.355040867 container remove 777a544ac5b525756a76fa5b50fcbff4c6df803ff73ab3e70298f51b751d23fd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=goofy_buck, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:11:18 compute-0 systemd[1]: libpod-conmon-777a544ac5b525756a76fa5b50fcbff4c6df803ff73ab3e70298f51b751d23fd.scope: Deactivated successfully.
Oct 11 02:11:18 compute-0 python3.9[399087]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=node_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:18 compute-0 systemd[1]: Started libpod-conmon-7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce.scope.
Oct 11 02:11:18 compute-0 podman[399103]: 2025-10-11 02:11:18.33971631 +0000 UTC m=+0.154184405 container exec 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:11:18 compute-0 podman[399103]: 2025-10-11 02:11:18.375940746 +0000 UTC m=+0.190408841 container exec_died 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:11:18 compute-0 podman[399123]: 2025-10-11 02:11:18.401000343 +0000 UTC m=+0.098076415 container create 460dbd1841fa21c76587f79953a69eed217057b45ba963c0fa82b948cbcb6d6b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_saha, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, ceph=True, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507)
Oct 11 02:11:18 compute-0 sudo[399082]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:18 compute-0 systemd[1]: libpod-conmon-7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce.scope: Deactivated successfully.
Oct 11 02:11:18 compute-0 podman[399123]: 2025-10-11 02:11:18.365206304 +0000 UTC m=+0.062282436 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:11:18 compute-0 systemd[1]: Started libpod-conmon-460dbd1841fa21c76587f79953a69eed217057b45ba963c0fa82b948cbcb6d6b.scope.
Oct 11 02:11:18 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:11:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9cc27d1250bb7193bbd7a1320188a94e52e71622960ced73b426a79fe5864bc0/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:11:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9cc27d1250bb7193bbd7a1320188a94e52e71622960ced73b426a79fe5864bc0/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:11:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9cc27d1250bb7193bbd7a1320188a94e52e71622960ced73b426a79fe5864bc0/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:11:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9cc27d1250bb7193bbd7a1320188a94e52e71622960ced73b426a79fe5864bc0/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:11:18 compute-0 podman[399123]: 2025-10-11 02:11:18.532989849 +0000 UTC m=+0.230065931 container init 460dbd1841fa21c76587f79953a69eed217057b45ba963c0fa82b948cbcb6d6b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_saha, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:11:18 compute-0 podman[399123]: 2025-10-11 02:11:18.551415414 +0000 UTC m=+0.248491476 container start 460dbd1841fa21c76587f79953a69eed217057b45ba963c0fa82b948cbcb6d6b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_saha, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:11:18 compute-0 podman[399123]: 2025-10-11 02:11:18.557210432 +0000 UTC m=+0.254286484 container attach 460dbd1841fa21c76587f79953a69eed217057b45ba963c0fa82b948cbcb6d6b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_saha, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=reef)
Oct 11 02:11:18 compute-0 sshd-session[398541]: Failed password for root from 193.46.255.217 port 14856 ssh2
Oct 11 02:11:18 compute-0 ceph-mon[191930]: pgmap v993: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:18 compute-0 sshd-session[398506]: Failed password for invalid user user from 121.227.153.123 port 58134 ssh2
Oct 11 02:11:19 compute-0 sudo[399310]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-oihixhchtbvkyjjztjevcdugkzezqsil ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148678.700053-577-219900903057075/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:19 compute-0 sudo[399310]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:19 compute-0 gallant_saha[399162]: {
Oct 11 02:11:19 compute-0 gallant_saha[399162]:     "0": [
Oct 11 02:11:19 compute-0 gallant_saha[399162]:         {
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "devices": [
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "/dev/loop3"
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             ],
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "lv_name": "ceph_lv0",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "lv_size": "21470642176",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "name": "ceph_lv0",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "tags": {
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.cluster_name": "ceph",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.crush_device_class": "",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.encrypted": "0",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.osd_id": "0",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.type": "block",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.vdo": "0"
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             },
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "type": "block",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "vg_name": "ceph_vg0"
Oct 11 02:11:19 compute-0 gallant_saha[399162]:         }
Oct 11 02:11:19 compute-0 gallant_saha[399162]:     ],
Oct 11 02:11:19 compute-0 gallant_saha[399162]:     "1": [
Oct 11 02:11:19 compute-0 gallant_saha[399162]:         {
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "devices": [
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "/dev/loop4"
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             ],
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "lv_name": "ceph_lv1",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "lv_size": "21470642176",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "name": "ceph_lv1",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "tags": {
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.cluster_name": "ceph",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.crush_device_class": "",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.encrypted": "0",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.osd_id": "1",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.type": "block",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.vdo": "0"
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             },
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "type": "block",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "vg_name": "ceph_vg1"
Oct 11 02:11:19 compute-0 gallant_saha[399162]:         }
Oct 11 02:11:19 compute-0 gallant_saha[399162]:     ],
Oct 11 02:11:19 compute-0 gallant_saha[399162]:     "2": [
Oct 11 02:11:19 compute-0 gallant_saha[399162]:         {
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "devices": [
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "/dev/loop5"
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             ],
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "lv_name": "ceph_lv2",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "lv_size": "21470642176",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "name": "ceph_lv2",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "tags": {
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.cluster_name": "ceph",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.crush_device_class": "",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.encrypted": "0",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.osd_id": "2",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.type": "block",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:                 "ceph.vdo": "0"
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             },
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "type": "block",
Oct 11 02:11:19 compute-0 gallant_saha[399162]:             "vg_name": "ceph_vg2"
Oct 11 02:11:19 compute-0 gallant_saha[399162]:         }
Oct 11 02:11:19 compute-0 gallant_saha[399162]:     ]
Oct 11 02:11:19 compute-0 gallant_saha[399162]: }
Oct 11 02:11:19 compute-0 python3.9[399312]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=node_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:19 compute-0 systemd[1]: libpod-460dbd1841fa21c76587f79953a69eed217057b45ba963c0fa82b948cbcb6d6b.scope: Deactivated successfully.
Oct 11 02:11:19 compute-0 podman[399123]: 2025-10-11 02:11:19.426017257 +0000 UTC m=+1.123093339 container died 460dbd1841fa21c76587f79953a69eed217057b45ba963c0fa82b948cbcb6d6b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_saha, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:11:19 compute-0 systemd[1]: var-lib-containers-storage-overlay-9cc27d1250bb7193bbd7a1320188a94e52e71622960ced73b426a79fe5864bc0-merged.mount: Deactivated successfully.
Oct 11 02:11:19 compute-0 systemd[1]: Started libpod-conmon-7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce.scope.
Oct 11 02:11:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v994: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:19 compute-0 podman[399123]: 2025-10-11 02:11:19.548601962 +0000 UTC m=+1.245678004 container remove 460dbd1841fa21c76587f79953a69eed217057b45ba963c0fa82b948cbcb6d6b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_saha, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:11:19 compute-0 podman[399317]: 2025-10-11 02:11:19.560048579 +0000 UTC m=+0.117747582 container exec 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:11:19 compute-0 systemd[1]: libpod-conmon-460dbd1841fa21c76587f79953a69eed217057b45ba963c0fa82b948cbcb6d6b.scope: Deactivated successfully.
Oct 11 02:11:19 compute-0 podman[399317]: 2025-10-11 02:11:19.59677383 +0000 UTC m=+0.154472843 container exec_died 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:11:19 compute-0 sudo[398877]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:19 compute-0 sudo[399310]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:19 compute-0 systemd[1]: libpod-conmon-7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce.scope: Deactivated successfully.
Oct 11 02:11:19 compute-0 sudo[399360]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:11:19 compute-0 sudo[399360]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:11:19 compute-0 sudo[399360]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:19 compute-0 unix_chkpwd[399436]: password check failed for user (root)
Oct 11 02:11:19 compute-0 sudo[399408]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:11:19 compute-0 sudo[399408]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:11:19 compute-0 sudo[399408]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:19 compute-0 podman[399438]: 2025-10-11 02:11:19.959117444 +0000 UTC m=+0.105957787 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=iscsid, org.label-schema.schema-version=1.0, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, org.label-schema.build-date=20251009)
Oct 11 02:11:19 compute-0 sudo[399456]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:11:19 compute-0 sudo[399456]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:11:19 compute-0 sudo[399456]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:19 compute-0 podman[399434]: 2025-10-11 02:11:19.993033816 +0000 UTC m=+0.137873574 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, container_name=multipathd, io.buildah.version=1.41.3, managed_by=edpm_ansible)
Oct 11 02:11:20 compute-0 sudo[399535]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:11:20 compute-0 sudo[399535]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:11:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:11:20 compute-0 sudo[399670]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pmktoyvmudzrveeiskukkjycpvyqwtkl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148679.8958855-585-91640549768169/AnsiballZ_file.py'
Oct 11 02:11:20 compute-0 sudo[399670]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:20 compute-0 sshd-session[398506]: Connection closed by invalid user user 121.227.153.123 port 58134 [preauth]
Oct 11 02:11:20 compute-0 python3.9[399674]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/node_exporter recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:11:20 compute-0 podman[399686]: 2025-10-11 02:11:20.595423187 +0000 UTC m=+0.081827603 container create 51f0f2f294849cc833e47527439473ee26c9784b307b3d1da20b694df1ddf31b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=clever_meitner, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507)
Oct 11 02:11:20 compute-0 sudo[399670]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:20 compute-0 podman[399686]: 2025-10-11 02:11:20.554560906 +0000 UTC m=+0.040965392 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:11:20 compute-0 ceph-mon[191930]: pgmap v994: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:20 compute-0 systemd[1]: Started libpod-conmon-51f0f2f294849cc833e47527439473ee26c9784b307b3d1da20b694df1ddf31b.scope.
Oct 11 02:11:20 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:11:20 compute-0 podman[399686]: 2025-10-11 02:11:20.735538164 +0000 UTC m=+0.221942620 container init 51f0f2f294849cc833e47527439473ee26c9784b307b3d1da20b694df1ddf31b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=clever_meitner, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, ceph=True)
Oct 11 02:11:20 compute-0 podman[399686]: 2025-10-11 02:11:20.746496334 +0000 UTC m=+0.232900760 container start 51f0f2f294849cc833e47527439473ee26c9784b307b3d1da20b694df1ddf31b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=clever_meitner, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:11:20 compute-0 podman[399686]: 2025-10-11 02:11:20.752843397 +0000 UTC m=+0.239247853 container attach 51f0f2f294849cc833e47527439473ee26c9784b307b3d1da20b694df1ddf31b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=clever_meitner, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef)
Oct 11 02:11:20 compute-0 clever_meitner[399708]: 167 167
Oct 11 02:11:20 compute-0 systemd[1]: libpod-51f0f2f294849cc833e47527439473ee26c9784b307b3d1da20b694df1ddf31b.scope: Deactivated successfully.
Oct 11 02:11:20 compute-0 conmon[399708]: conmon 51f0f2f294849cc833e4 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-51f0f2f294849cc833e47527439473ee26c9784b307b3d1da20b694df1ddf31b.scope/container/memory.events
Oct 11 02:11:20 compute-0 podman[399686]: 2025-10-11 02:11:20.759042382 +0000 UTC m=+0.245446768 container died 51f0f2f294849cc833e47527439473ee26c9784b307b3d1da20b694df1ddf31b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=clever_meitner, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.vendor=CentOS)
Oct 11 02:11:20 compute-0 systemd[1]: var-lib-containers-storage-overlay-0921f0d73f28d89b21b4014d1ec494717e67f795e7a735fe6c24dc61b55e0e21-merged.mount: Deactivated successfully.
Oct 11 02:11:20 compute-0 podman[399686]: 2025-10-11 02:11:20.81958674 +0000 UTC m=+0.305991166 container remove 51f0f2f294849cc833e47527439473ee26c9784b307b3d1da20b694df1ddf31b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=clever_meitner, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS)
Oct 11 02:11:20 compute-0 systemd[1]: libpod-conmon-51f0f2f294849cc833e47527439473ee26c9784b307b3d1da20b694df1ddf31b.scope: Deactivated successfully.
Oct 11 02:11:21 compute-0 podman[399773]: 2025-10-11 02:11:21.066686265 +0000 UTC m=+0.075757247 container create 7a005608bbb29df32612bc1f3578cfe023c0fbef98bf1fcce485d3343673a0c1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_hamilton, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=reef, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True)
Oct 11 02:11:21 compute-0 podman[399773]: 2025-10-11 02:11:21.031726761 +0000 UTC m=+0.040797743 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:11:21 compute-0 systemd[1]: Started libpod-conmon-7a005608bbb29df32612bc1f3578cfe023c0fbef98bf1fcce485d3343673a0c1.scope.
Oct 11 02:11:21 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:11:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b01b2390df52565284260a43871fa985a21d7a863c9533e1ee6ebe467f13e7a2/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:11:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b01b2390df52565284260a43871fa985a21d7a863c9533e1ee6ebe467f13e7a2/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:11:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b01b2390df52565284260a43871fa985a21d7a863c9533e1ee6ebe467f13e7a2/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:11:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b01b2390df52565284260a43871fa985a21d7a863c9533e1ee6ebe467f13e7a2/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:11:21 compute-0 podman[399773]: 2025-10-11 02:11:21.23728016 +0000 UTC m=+0.246351152 container init 7a005608bbb29df32612bc1f3578cfe023c0fbef98bf1fcce485d3343673a0c1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_hamilton, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 02:11:21 compute-0 podman[399773]: 2025-10-11 02:11:21.254692353 +0000 UTC m=+0.263763305 container start 7a005608bbb29df32612bc1f3578cfe023c0fbef98bf1fcce485d3343673a0c1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_hamilton, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:11:21 compute-0 podman[399773]: 2025-10-11 02:11:21.260771228 +0000 UTC m=+0.269842180 container attach 7a005608bbb29df32612bc1f3578cfe023c0fbef98bf1fcce485d3343673a0c1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_hamilton, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:11:21 compute-0 sudo[399900]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vwyapkpfuwdfdeftnlpcovfjwnquwvir ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148680.9747064-594-3532649096494/AnsiballZ_podman_container_info.py'
Oct 11 02:11:21 compute-0 sshd-session[398541]: Failed password for root from 193.46.255.217 port 14856 ssh2
Oct 11 02:11:21 compute-0 sudo[399900]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v995: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:21 compute-0 sshd-session[399714]: Invalid user user from 121.227.153.123 port 48782
Oct 11 02:11:21 compute-0 python3.9[399902]: ansible-containers.podman.podman_container_info Invoked with name=['podman_exporter'] executable=podman
Oct 11 02:11:21 compute-0 sudo[399900]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:21 compute-0 sshd-session[399714]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:11:21 compute-0 sshd-session[399714]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:11:22 compute-0 angry_hamilton[399823]: {
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:         "osd_id": 1,
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:         "type": "bluestore"
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:     },
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:         "osd_id": 2,
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:         "type": "bluestore"
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:     },
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:         "osd_id": 0,
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:         "type": "bluestore"
Oct 11 02:11:22 compute-0 angry_hamilton[399823]:     }
Oct 11 02:11:22 compute-0 angry_hamilton[399823]: }
Oct 11 02:11:22 compute-0 systemd[1]: libpod-7a005608bbb29df32612bc1f3578cfe023c0fbef98bf1fcce485d3343673a0c1.scope: Deactivated successfully.
Oct 11 02:11:22 compute-0 systemd[1]: libpod-7a005608bbb29df32612bc1f3578cfe023c0fbef98bf1fcce485d3343673a0c1.scope: Consumed 1.299s CPU time.
Oct 11 02:11:22 compute-0 podman[399773]: 2025-10-11 02:11:22.570037586 +0000 UTC m=+1.579108538 container died 7a005608bbb29df32612bc1f3578cfe023c0fbef98bf1fcce485d3343673a0c1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_hamilton, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, io.buildah.version=1.39.3, OSD_FLAVOR=default)
Oct 11 02:11:22 compute-0 systemd[1]: var-lib-containers-storage-overlay-b01b2390df52565284260a43871fa985a21d7a863c9533e1ee6ebe467f13e7a2-merged.mount: Deactivated successfully.
Oct 11 02:11:22 compute-0 podman[399773]: 2025-10-11 02:11:22.668704145 +0000 UTC m=+1.677775097 container remove 7a005608bbb29df32612bc1f3578cfe023c0fbef98bf1fcce485d3343673a0c1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_hamilton, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, io.buildah.version=1.39.3)
Oct 11 02:11:22 compute-0 ceph-mon[191930]: pgmap v995: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:22 compute-0 systemd[1]: libpod-conmon-7a005608bbb29df32612bc1f3578cfe023c0fbef98bf1fcce485d3343673a0c1.scope: Deactivated successfully.
Oct 11 02:11:22 compute-0 sudo[399535]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:22 compute-0 sudo[400104]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xdelztvvifkkmnxvammkqlelwhkwavlj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148682.1900477-602-108370704987744/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:22 compute-0 sudo[400104]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:11:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:11:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:11:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:11:22 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 79bf645d-dd8e-4c70-8aa9-6e04df5c6765 does not exist
Oct 11 02:11:22 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev fb8db5c4-5cd7-465e-8c5d-76462bc7951d does not exist
Oct 11 02:11:22 compute-0 sudo[400107]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:11:22 compute-0 sudo[400107]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:11:22 compute-0 sudo[400107]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:22 compute-0 python3.9[400106]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=podman_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:23 compute-0 sudo[400132]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:11:23 compute-0 sudo[400132]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:11:23 compute-0 sudo[400132]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:23 compute-0 unix_chkpwd[400171]: password check failed for user (root)
Oct 11 02:11:23 compute-0 systemd[1]: Started libpod-conmon-31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028.scope.
Oct 11 02:11:23 compute-0 podman[400145]: 2025-10-11 02:11:23.082652122 +0000 UTC m=+0.112659738 container exec 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:11:23 compute-0 podman[400145]: 2025-10-11 02:11:23.121518748 +0000 UTC m=+0.151526374 container exec_died 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 02:11:23 compute-0 systemd[1]: libpod-conmon-31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028.scope: Deactivated successfully.
Oct 11 02:11:23 compute-0 sudo[400104]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v996: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:11:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:11:23 compute-0 sshd-session[399714]: Failed password for invalid user user from 121.227.153.123 port 48782 ssh2
Oct 11 02:11:24 compute-0 sudo[400335]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xkvxfefqtimalvuraqeanfnyfrnohzve ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148683.452343-610-207645071749623/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:24 compute-0 sudo[400335]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:24 compute-0 python3.9[400337]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=podman_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:24 compute-0 systemd[1]: Started libpod-conmon-31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028.scope.
Oct 11 02:11:24 compute-0 podman[400338]: 2025-10-11 02:11:24.48376063 +0000 UTC m=+0.145110201 container exec 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:11:24 compute-0 podman[400338]: 2025-10-11 02:11:24.519614658 +0000 UTC m=+0.180964219 container exec_died 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:11:24 compute-0 systemd[1]: libpod-conmon-31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028.scope: Deactivated successfully.
Oct 11 02:11:24 compute-0 sudo[400335]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:24 compute-0 ceph-mon[191930]: pgmap v996: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:11:25 compute-0 sshd-session[398541]: Failed password for root from 193.46.255.217 port 14856 ssh2
Oct 11 02:11:25 compute-0 sudo[400518]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qhzqzcfdjceejncxvlbqwtligvvuyzfk ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148684.8934472-618-182483245357167/AnsiballZ_file.py'
Oct 11 02:11:25 compute-0 sudo[400518]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:25 compute-0 sshd-session[399714]: Connection closed by invalid user user 121.227.153.123 port 48782 [preauth]
Oct 11 02:11:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v997: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:25 compute-0 python3.9[400520]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/podman_exporter recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:11:25 compute-0 sudo[400518]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:26 compute-0 sshd-session[398541]: Received disconnect from 193.46.255.217 port 14856:11:  [preauth]
Oct 11 02:11:26 compute-0 sshd-session[398541]: Disconnected from authenticating user root 193.46.255.217 port 14856 [preauth]
Oct 11 02:11:26 compute-0 sshd-session[398541]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.217  user=root
Oct 11 02:11:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:11:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:11:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:11:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:11:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:11:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:11:26 compute-0 sshd-session[400521]: Invalid user user from 121.227.153.123 port 48788
Oct 11 02:11:26 compute-0 ceph-mon[191930]: pgmap v997: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:26 compute-0 sudo[400672]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ervsldxbclaosfarddqifsttyywzlxwq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148686.1872072-627-203543836266887/AnsiballZ_podman_container_info.py'
Oct 11 02:11:26 compute-0 sudo[400672]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:26 compute-0 sshd-session[400521]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:11:26 compute-0 sshd-session[400521]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:11:27 compute-0 python3.9[400674]: ansible-containers.podman.podman_container_info Invoked with name=['openstack_network_exporter'] executable=podman
Oct 11 02:11:27 compute-0 sudo[400672]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v998: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:28 compute-0 sudo[400836]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ybydgsjpurglitfplzzomairpksktyxb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148687.5751016-635-37860037132476/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:28 compute-0 sudo[400836]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:28 compute-0 python3.9[400838]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=openstack_network_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:28 compute-0 systemd[1]: Started libpod-conmon-6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c.scope.
Oct 11 02:11:28 compute-0 podman[400839]: 2025-10-11 02:11:28.575709924 +0000 UTC m=+0.151936660 container exec 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, build-date=2025-08-20T13:12:41, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, architecture=x86_64, version=9.6, io.buildah.version=1.33.7, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, url=https://catalog.redhat.com/en/search?searchType=containers, container_name=openstack_network_exporter, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, name=ubi9-minimal, vcs-type=git, io.openshift.expose-services=, com.redhat.component=ubi9-minimal-container, distribution-scope=public, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, managed_by=edpm_ansible, release=1755695350, config_id=edpm, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']})
Oct 11 02:11:28 compute-0 podman[400839]: 2025-10-11 02:11:28.611090456 +0000 UTC m=+0.187317162 container exec_died 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, config_id=edpm, architecture=x86_64, url=https://catalog.redhat.com/en/search?searchType=containers, container_name=openstack_network_exporter, managed_by=edpm_ansible, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.expose-services=, version=9.6, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.buildah.version=1.33.7, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vendor=Red Hat, Inc., io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., com.redhat.component=ubi9-minimal-container, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., build-date=2025-08-20T13:12:41, release=1755695350, name=ubi9-minimal, vcs-type=git, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:11:28 compute-0 sudo[400836]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:28 compute-0 systemd[1]: libpod-conmon-6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c.scope: Deactivated successfully.
Oct 11 02:11:28 compute-0 ceph-mon[191930]: pgmap v998: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:28 compute-0 sshd-session[400521]: Failed password for invalid user user from 121.227.153.123 port 48788 ssh2
Oct 11 02:11:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v999: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:29 compute-0 sudo[401019]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gvrctfjspyknbrsyxsehhcmyomcfoaxc ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148688.9774675-643-18485103152588/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:29 compute-0 sudo[401019]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:29 compute-0 podman[157119]: time="2025-10-11T02:11:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:11:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:11:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45037 "" "Go-http-client/1.1"
Oct 11 02:11:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:11:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8510 "" "Go-http-client/1.1"
Oct 11 02:11:29 compute-0 python3.9[401021]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=openstack_network_exporter detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:29 compute-0 systemd[1]: Started libpod-conmon-6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c.scope.
Oct 11 02:11:29 compute-0 podman[401022]: 2025-10-11 02:11:29.985886566 +0000 UTC m=+0.139868640 container exec 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, release=1755695350, url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.component=ubi9-minimal-container, version=9.6, architecture=x86_64, build-date=2025-08-20T13:12:41, io.buildah.version=1.33.7, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.tags=minimal rhel9, managed_by=edpm_ansible, container_name=openstack_network_exporter, vcs-type=git, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, name=ubi9-minimal, config_id=edpm, maintainer=Red Hat, Inc.)
Oct 11 02:11:30 compute-0 podman[401022]: 2025-10-11 02:11:30.031787601 +0000 UTC m=+0.185769645 container exec_died 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, com.redhat.component=ubi9-minimal-container, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, io.buildah.version=1.33.7, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, managed_by=edpm_ansible, name=ubi9-minimal, container_name=openstack_network_exporter, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vendor=Red Hat, Inc., url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.6, architecture=x86_64, config_id=edpm, release=1755695350, vcs-type=git, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.openshift.expose-services=, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc.)
Oct 11 02:11:30 compute-0 sudo[401019]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:30 compute-0 systemd[1]: libpod-conmon-6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c.scope: Deactivated successfully.
Oct 11 02:11:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:11:30 compute-0 podman[401036]: 2025-10-11 02:11:30.105593822 +0000 UTC m=+0.117166986 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:11:30 compute-0 sshd-session[400521]: Connection closed by invalid user user 121.227.153.123 port 48788 [preauth]
Oct 11 02:11:30 compute-0 ceph-mon[191930]: pgmap v999: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:30 compute-0 sudo[401224]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gxnpgbgzldxhlpiittdzetsoptfxqdyf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148690.3649852-651-56281023084120/AnsiballZ_file.py'
Oct 11 02:11:30 compute-0 sudo[401224]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:31 compute-0 python3.9[401226]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/openstack_network_exporter recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:11:31 compute-0 sudo[401224]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:31 compute-0 openstack_network_exporter[374316]: ERROR   02:11:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:11:31 compute-0 openstack_network_exporter[374316]: ERROR   02:11:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:11:31 compute-0 openstack_network_exporter[374316]: ERROR   02:11:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:11:31 compute-0 openstack_network_exporter[374316]: ERROR   02:11:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:11:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:11:31 compute-0 openstack_network_exporter[374316]: ERROR   02:11:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:11:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:11:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1000: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:31 compute-0 sshd-session[401196]: Invalid user user from 121.227.153.123 port 59020
Oct 11 02:11:31 compute-0 podman[401303]: 2025-10-11 02:11:31.916809464 +0000 UTC m=+0.139744200 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, com.redhat.component=ubi9-minimal-container, config_id=edpm, name=ubi9-minimal, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, vendor=Red Hat, Inc., version=9.6, url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.33.7, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, container_name=openstack_network_exporter, release=1755695350, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, maintainer=Red Hat, Inc., vcs-type=git, io.openshift.expose-services=, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, distribution-scope=public, io.openshift.tags=minimal rhel9, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal)
Oct 11 02:11:31 compute-0 sshd-session[401196]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:11:31 compute-0 sshd-session[401196]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:11:32 compute-0 sudo[401394]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bllxftywlpfsasgqqbzyjyslpsgnmnjm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148691.5710588-660-151625756013741/AnsiballZ_podman_container_info.py'
Oct 11 02:11:32 compute-0 sudo[401394]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:32 compute-0 python3.9[401396]: ansible-containers.podman.podman_container_info Invoked with name=['ceilometer_agent_ipmi'] executable=podman
Oct 11 02:11:32 compute-0 sudo[401394]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:32 compute-0 ceph-mon[191930]: pgmap v1000: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:33 compute-0 podman[401485]: 2025-10-11 02:11:33.259634277 +0000 UTC m=+0.138149752 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=starting, health_failing_streak=2, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.license=GPLv2, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009)
Oct 11 02:11:33 compute-0 systemd[1]: 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c-728cc5f9610cec62.service: Main process exited, code=exited, status=1/FAILURE
Oct 11 02:11:33 compute-0 systemd[1]: 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c-728cc5f9610cec62.service: Failed with result 'exit-code'.
Oct 11 02:11:33 compute-0 sudo[401576]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xixryockzlxuigdiabeeclxoodqivsrm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148692.9057238-668-92226172805029/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:33 compute-0 sudo[401576]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1001: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:33 compute-0 python3.9[401578]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=ceilometer_agent_ipmi detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:33 compute-0 systemd[1]: Started libpod-conmon-47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.scope.
Oct 11 02:11:33 compute-0 podman[401579]: 2025-10-11 02:11:33.96441172 +0000 UTC m=+0.166028909 container exec 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=edpm, org.label-schema.license=GPLv2, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:11:33 compute-0 sshd-session[401196]: Failed password for invalid user user from 121.227.153.123 port 59020 ssh2
Oct 11 02:11:34 compute-0 podman[401579]: 2025-10-11 02:11:34.001295291 +0000 UTC m=+0.202912430 container exec_died 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_managed=true)
Oct 11 02:11:34 compute-0 systemd[1]: libpod-conmon-47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.scope: Deactivated successfully.
Oct 11 02:11:34 compute-0 sudo[401576]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:34 compute-0 ceph-mon[191930]: pgmap v1001: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:34 compute-0 sudo[401772]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-crqckrtogbcqadngupbytzfhbjgheale ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148694.342767-676-204295464585333/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:34 compute-0 sudo[401772]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:34 compute-0 podman[401731]: 2025-10-11 02:11:34.910182693 +0000 UTC m=+0.137010860 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.openshift.expose-services=, name=ubi9, config_id=edpm, vcs-type=git, distribution-scope=public, architecture=x86_64, io.buildah.version=1.29.0, summary=Provides the latest release of Red Hat Universal Base Image 9., version=9.4, container_name=kepler, io.openshift.tags=base rhel9, vendor=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., com.redhat.component=ubi9-container, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, release=1214.1726694543, release-0.7.12=, build-date=2024-09-18T21:23:30)
Oct 11 02:11:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:11:35 compute-0 python3.9[401778]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=ceilometer_agent_ipmi detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:35 compute-0 systemd[1]: Started libpod-conmon-47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.scope.
Oct 11 02:11:35 compute-0 podman[401779]: 2025-10-11 02:11:35.321772507 +0000 UTC m=+0.167320089 container exec 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_managed=true, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible)
Oct 11 02:11:35 compute-0 podman[401779]: 2025-10-11 02:11:35.355395622 +0000 UTC m=+0.200943104 container exec_died 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, config_id=edpm, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:11:35 compute-0 systemd[1]: libpod-conmon-47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c.scope: Deactivated successfully.
Oct 11 02:11:35 compute-0 sudo[401772]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1002: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:35 compute-0 sshd-session[401196]: Connection closed by invalid user user 121.227.153.123 port 59020 [preauth]
Oct 11 02:11:36 compute-0 sudo[401960]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hvfumnpzqbtsznawpyzoieilsekfbgqy ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148695.6835268-684-6320697563673/AnsiballZ_file.py'
Oct 11 02:11:36 compute-0 sudo[401960]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:36 compute-0 python3.9[401962]: ansible-ansible.builtin.file Invoked with group=42405 mode=0700 owner=42405 path=/var/lib/openstack/healthchecks/ceilometer_agent_ipmi recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:11:36 compute-0 sudo[401960]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:36 compute-0 sshd-session[401885]: Invalid user user from 121.227.153.123 port 59024
Oct 11 02:11:36 compute-0 ceph-mon[191930]: pgmap v1002: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:37 compute-0 sshd-session[401885]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:11:37 compute-0 sshd-session[401885]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:11:37 compute-0 sudo[402112]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-kpryqyvbvmdschluvrqtohlifivzjwod ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148696.8552465-693-237074431606681/AnsiballZ_podman_container_info.py'
Oct 11 02:11:37 compute-0 sudo[402112]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1003: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:37 compute-0 python3.9[402114]: ansible-containers.podman.podman_container_info Invoked with name=['kepler'] executable=podman
Oct 11 02:11:37 compute-0 sudo[402112]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:38 compute-0 sudo[402277]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-expwlvjdazcfbwwcqdfvnadeiwjkutec ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148698.1439936-701-234083120774858/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:38 compute-0 sudo[402277]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:38 compute-0 ceph-mon[191930]: pgmap v1003: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:38 compute-0 sshd-session[401885]: Failed password for invalid user user from 121.227.153.123 port 59024 ssh2
Oct 11 02:11:38 compute-0 python3.9[402279]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=kepler detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:39 compute-0 systemd[1]: Started libpod-conmon-ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603.scope.
Oct 11 02:11:39 compute-0 podman[402280]: 2025-10-11 02:11:39.155763726 +0000 UTC m=+0.152852564 container exec ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, com.redhat.component=ubi9-container, release-0.7.12=, architecture=x86_64, container_name=kepler, release=1214.1726694543, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, io.openshift.expose-services=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, build-date=2024-09-18T21:23:30, io.openshift.tags=base rhel9, vcs-type=git, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, maintainer=Red Hat, Inc., managed_by=edpm_ansible, version=9.4, config_id=edpm, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, vendor=Red Hat, Inc., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.29.0, io.k8s.display-name=Red Hat Universal Base Image 9, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f)
Oct 11 02:11:39 compute-0 podman[402280]: 2025-10-11 02:11:39.193386772 +0000 UTC m=+0.190475630 container exec_died ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., architecture=x86_64, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, distribution-scope=public, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vendor=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9, release-0.7.12=, container_name=kepler, maintainer=Red Hat, Inc., version=9.4, config_id=edpm, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.openshift.tags=base rhel9, vcs-type=git, release=1214.1726694543, name=ubi9, build-date=2024-09-18T21:23:30, io.buildah.version=1.29.0, com.redhat.component=ubi9-container, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=)
Oct 11 02:11:39 compute-0 systemd[1]: libpod-conmon-ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603.scope: Deactivated successfully.
Oct 11 02:11:39 compute-0 sudo[402277]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1004: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:39 compute-0 nova_compute[356901]: 2025-10-11 02:11:39.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:11:39 compute-0 nova_compute[356901]: 2025-10-11 02:11:39.899 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:11:39 compute-0 nova_compute[356901]: 2025-10-11 02:11:39.919 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:11:39 compute-0 nova_compute[356901]: 2025-10-11 02:11:39.920 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:11:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:11:40 compute-0 sudo[402456]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gdsasoppkxpzvznopgsoiariapyyvykq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148700.0496504-709-3461075304739/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:40 compute-0 sudo[402456]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:40 compute-0 sshd-session[401885]: Connection closed by invalid user user 121.227.153.123 port 59024 [preauth]
Oct 11 02:11:40 compute-0 python3.9[402458]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=kepler detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:40 compute-0 ceph-mon[191930]: pgmap v1004: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:41 compute-0 systemd[1]: Started libpod-conmon-ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603.scope.
Oct 11 02:11:41 compute-0 podman[402459]: 2025-10-11 02:11:41.040884148 +0000 UTC m=+0.181870403 container exec ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, release-0.7.12=, io.buildah.version=1.29.0, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, architecture=x86_64, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, managed_by=edpm_ansible, version=9.4, config_id=edpm, summary=Provides the latest release of Red Hat Universal Base Image 9., vendor=Red Hat, Inc., io.openshift.expose-services=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, maintainer=Red Hat, Inc., name=ubi9, build-date=2024-09-18T21:23:30, com.redhat.component=ubi9-container, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=kepler, release=1214.1726694543, distribution-scope=public, io.openshift.tags=base rhel9)
Oct 11 02:11:41 compute-0 podman[402459]: 2025-10-11 02:11:41.051905828 +0000 UTC m=+0.192892083 container exec_died ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, io.openshift.expose-services=, name=ubi9, release=1214.1726694543, release-0.7.12=, build-date=2024-09-18T21:23:30, config_id=edpm, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=base rhel9, managed_by=edpm_ansible, architecture=x86_64, com.redhat.component=ubi9-container, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=kepler, summary=Provides the latest release of Red Hat Universal Base Image 9., vendor=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, maintainer=Red Hat, Inc., distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9, io.buildah.version=1.29.0, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, version=9.4)
Oct 11 02:11:41 compute-0 sudo[402456]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:41 compute-0 systemd[1]: libpod-conmon-ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603.scope: Deactivated successfully.
Oct 11 02:11:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1005: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:41 compute-0 nova_compute[356901]: 2025-10-11 02:11:41.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:11:41 compute-0 nova_compute[356901]: 2025-10-11 02:11:41.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:11:41 compute-0 sshd-session[402472]: Invalid user user from 121.227.153.123 port 33416
Oct 11 02:11:41 compute-0 nova_compute[356901]: 2025-10-11 02:11:41.926 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:11:41 compute-0 nova_compute[356901]: 2025-10-11 02:11:41.926 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:11:41 compute-0 nova_compute[356901]: 2025-10-11 02:11:41.927 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:11:41 compute-0 nova_compute[356901]: 2025-10-11 02:11:41.927 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:11:41 compute-0 nova_compute[356901]: 2025-10-11 02:11:41.928 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:11:42 compute-0 sshd-session[402472]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:11:42 compute-0 sshd-session[402472]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:11:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:11:42 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2242434299' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:11:42 compute-0 nova_compute[356901]: 2025-10-11 02:11:42.472 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.544s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:11:42 compute-0 sudo[402662]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qieypqwvdiphmuecxkfixtfeefcecdrz ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148702.071392-717-43805635884510/AnsiballZ_file.py'
Oct 11 02:11:42 compute-0 sudo[402662]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:42 compute-0 ceph-mon[191930]: pgmap v1005: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:42 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2242434299' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:11:43 compute-0 python3.9[402664]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/kepler recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:11:43 compute-0 sudo[402662]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:43 compute-0 nova_compute[356901]: 2025-10-11 02:11:43.063 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:11:43 compute-0 nova_compute[356901]: 2025-10-11 02:11:43.064 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=4539MB free_disk=59.98828125GB free_vcpus=8 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:11:43 compute-0 nova_compute[356901]: 2025-10-11 02:11:43.065 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:11:43 compute-0 nova_compute[356901]: 2025-10-11 02:11:43.065 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:11:43 compute-0 nova_compute[356901]: 2025-10-11 02:11:43.172 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 0 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:11:43 compute-0 nova_compute[356901]: 2025-10-11 02:11:43.173 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=512MB phys_disk=59GB used_disk=0GB total_vcpus=8 used_vcpus=0 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:11:43 compute-0 nova_compute[356901]: 2025-10-11 02:11:43.199 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:11:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1006: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:11:43 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/4191220354' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:11:43 compute-0 nova_compute[356901]: 2025-10-11 02:11:43.672 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.473s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:11:43 compute-0 sshd-session[402472]: Failed password for invalid user user from 121.227.153.123 port 33416 ssh2
Oct 11 02:11:43 compute-0 nova_compute[356901]: 2025-10-11 02:11:43.681 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:11:43 compute-0 nova_compute[356901]: 2025-10-11 02:11:43.696 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:11:43 compute-0 nova_compute[356901]: 2025-10-11 02:11:43.698 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:11:43 compute-0 nova_compute[356901]: 2025-10-11 02:11:43.698 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.633s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:11:43 compute-0 sudo[402836]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-xcysqprzdfzyzijjywnxvvcdupxkmndl ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148703.3567595-726-13989056704942/AnsiballZ_podman_container_info.py'
Oct 11 02:11:43 compute-0 sudo[402836]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:43 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/4191220354' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:11:44 compute-0 sshd-session[402472]: Connection closed by invalid user user 121.227.153.123 port 33416 [preauth]
Oct 11 02:11:44 compute-0 python3.9[402838]: ansible-containers.podman.podman_container_info Invoked with name=['ovn_metadata_agent'] executable=podman
Oct 11 02:11:44 compute-0 sudo[402836]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:44 compute-0 nova_compute[356901]: 2025-10-11 02:11:44.698 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:11:44 compute-0 nova_compute[356901]: 2025-10-11 02:11:44.699 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:11:44 compute-0 nova_compute[356901]: 2025-10-11 02:11:44.700 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:11:44 compute-0 nova_compute[356901]: 2025-10-11 02:11:44.714 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944
Oct 11 02:11:44 compute-0 nova_compute[356901]: 2025-10-11 02:11:44.715 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:11:44 compute-0 nova_compute[356901]: 2025-10-11 02:11:44.716 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:11:44 compute-0 nova_compute[356901]: 2025-10-11 02:11:44.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:11:44 compute-0 nova_compute[356901]: 2025-10-11 02:11:44.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:11:44 compute-0 ceph-mon[191930]: pgmap v1006: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:11:45 compute-0 sudo[403003]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-vbwybswafyxgrbettgwecezwlkgwhwnp ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148704.5113513-734-30495161973754/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:45 compute-0 sudo[403003]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:45 compute-0 python3.9[403005]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=ovn_metadata_agent detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:45 compute-0 sshd-session[402876]: Invalid user user from 121.227.153.123 port 33432
Oct 11 02:11:45 compute-0 systemd[1]: Started libpod-conmon-c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3.scope.
Oct 11 02:11:45 compute-0 podman[403006]: 2025-10-11 02:11:45.536066337 +0000 UTC m=+0.135762862 container exec c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_metadata_agent, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009, io.buildah.version=1.41.3)
Oct 11 02:11:45 compute-0 podman[403006]: 2025-10-11 02:11:45.546628952 +0000 UTC m=+0.146325467 container exec_died c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_id=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 02:11:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1007: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:45 compute-0 systemd[1]: libpod-conmon-c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3.scope: Deactivated successfully.
Oct 11 02:11:45 compute-0 sudo[403003]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:45 compute-0 podman[403016]: 2025-10-11 02:11:45.621926363 +0000 UTC m=+0.143107681 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251007, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.4, org.label-schema.name=CentOS Stream 10 Base Image, config_id=edpm, container_name=ceilometer_agent_compute)
Oct 11 02:11:45 compute-0 sshd-session[402876]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:11:45 compute-0 sshd-session[402876]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:11:46 compute-0 sudo[403204]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ukkxmtfskzoqvzbuteyrkldruljglmuh ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148705.837457-742-238598546165305/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:46 compute-0 sudo[403204]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:46 compute-0 podman[403207]: 2025-10-11 02:11:46.416853634 +0000 UTC m=+0.103927837 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:11:46 compute-0 podman[403206]: 2025-10-11 02:11:46.417425006 +0000 UTC m=+0.113774068 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:11:46 compute-0 podman[403213]: 2025-10-11 02:11:46.483821108 +0000 UTC m=+0.147950977 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true)
Oct 11 02:11:46 compute-0 python3.9[403219]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=ovn_metadata_agent detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:46 compute-0 systemd[1]: Started libpod-conmon-c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3.scope.
Oct 11 02:11:46 compute-0 podman[403271]: 2025-10-11 02:11:46.732860175 +0000 UTC m=+0.146533874 container exec c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_managed=true, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, config_id=ovn_metadata_agent, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:11:46 compute-0 podman[403271]: 2025-10-11 02:11:46.767669044 +0000 UTC m=+0.181342773 container exec_died c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_id=ovn_metadata_agent, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:11:46 compute-0 systemd[1]: libpod-conmon-c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3.scope: Deactivated successfully.
Oct 11 02:11:46 compute-0 sudo[403204]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:46 compute-0 ceph-mon[191930]: pgmap v1007: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1008: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:47 compute-0 sudo[403452]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fopwwrcygkalntjxioraxkjgyryedeun ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148707.0952427-750-55451861194458/AnsiballZ_file.py'
Oct 11 02:11:47 compute-0 sudo[403452]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:47 compute-0 python3.9[403454]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/ovn_metadata_agent recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:11:47 compute-0 sudo[403452]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:47 compute-0 sshd-session[402876]: Failed password for invalid user user from 121.227.153.123 port 33432 ssh2
Oct 11 02:11:48 compute-0 sudo[403604]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-abhjygewswgabrbigcefmylvyrzjaccr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148708.2167883-759-165932846209152/AnsiballZ_podman_container_info.py'
Oct 11 02:11:48 compute-0 sudo[403604]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:48 compute-0 ceph-mon[191930]: pgmap v1008: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:48 compute-0 python3.9[403606]: ansible-containers.podman.podman_container_info Invoked with name=['iscsid'] executable=podman
Oct 11 02:11:49 compute-0 sudo[403604]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:49 compute-0 sshd-session[402876]: Connection closed by invalid user user 121.227.153.123 port 33432 [preauth]
Oct 11 02:11:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1009: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:50 compute-0 sudo[403772]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jxynsdufthdeflmcbcleiklyakuygyhb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148709.4232066-767-238238428237258/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:50 compute-0 sudo[403772]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:11:50 compute-0 podman[403774]: 2025-10-11 02:11:50.223701474 +0000 UTC m=+0.154030563 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, config_id=multipathd, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:11:50 compute-0 podman[403775]: 2025-10-11 02:11:50.227153776 +0000 UTC m=+0.153235180 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, org.label-schema.build-date=20251009, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, tcib_managed=true)
Oct 11 02:11:50 compute-0 python3.9[403776]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=iscsid detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:50 compute-0 systemd[1]: Started libpod-conmon-b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e.scope.
Oct 11 02:11:50 compute-0 podman[403815]: 2025-10-11 02:11:50.436969526 +0000 UTC m=+0.124665981 container exec b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, container_name=iscsid, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:11:50 compute-0 podman[403815]: 2025-10-11 02:11:50.473497848 +0000 UTC m=+0.161194273 container exec_died b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, container_name=iscsid, io.buildah.version=1.41.3)
Oct 11 02:11:50 compute-0 sudo[403772]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:50 compute-0 systemd[1]: libpod-conmon-b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e.scope: Deactivated successfully.
Oct 11 02:11:50 compute-0 sshd-session[403701]: Invalid user user from 121.227.153.123 port 35526
Oct 11 02:11:50 compute-0 ceph-mon[191930]: pgmap v1009: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:50 compute-0 sshd-session[403701]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:11:50 compute-0 sshd-session[403701]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:11:51 compute-0 sudo[403993]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uiifrerylvoulkcxwpeubyjwbojnacze ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148710.8983176-775-197228080544323/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:51 compute-0 sudo[403993]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1010: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:51 compute-0 python3.9[403995]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=iscsid detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:51 compute-0 systemd[1]: Started libpod-conmon-b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e.scope.
Oct 11 02:11:51 compute-0 podman[403996]: 2025-10-11 02:11:51.92703162 +0000 UTC m=+0.146840601 container exec b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, container_name=iscsid, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:11:51 compute-0 podman[403996]: 2025-10-11 02:11:51.960982932 +0000 UTC m=+0.180791833 container exec_died b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, managed_by=edpm_ansible)
Oct 11 02:11:52 compute-0 sudo[403993]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:52 compute-0 systemd[1]: libpod-conmon-b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e.scope: Deactivated successfully.
Oct 11 02:11:52 compute-0 sudo[404176]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rurmtfwyepifxbbaxuftlpciogcwygha ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148712.259912-783-244618059627352/AnsiballZ_file.py'
Oct 11 02:11:52 compute-0 sudo[404176]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:52 compute-0 ceph-mon[191930]: pgmap v1010: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:53 compute-0 python3.9[404178]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/iscsid recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:11:53 compute-0 sudo[404176]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:53 compute-0 sshd-session[403701]: Failed password for invalid user user from 121.227.153.123 port 35526 ssh2
Oct 11 02:11:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1011: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:53 compute-0 sudo[404328]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rlvfycofxnebrownslklsudzcvobbdwa ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148713.372507-792-72933992422427/AnsiballZ_podman_container_info.py'
Oct 11 02:11:53 compute-0 sudo[404328]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:54 compute-0 python3.9[404330]: ansible-containers.podman.podman_container_info Invoked with name=['multipathd'] executable=podman
Oct 11 02:11:54 compute-0 sudo[404328]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:54 compute-0 sshd-session[403701]: Connection closed by invalid user user 121.227.153.123 port 35526 [preauth]
Oct 11 02:11:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:11:54.832 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:11:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:11:54.834 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:11:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:11:54.834 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:11:54 compute-0 ceph-mon[191930]: pgmap v1011: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:11:55 compute-0 sudo[404494]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dkcwypufwoywaitddqdbcqltrtlwuxai ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148714.674803-800-259805536384156/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:55 compute-0 sudo[404494]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:55 compute-0 python3.9[404496]: ansible-containers.podman.podman_container_exec Invoked with command=id -u name=multipathd detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1012: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:55 compute-0 systemd[1]: Started libpod-conmon-1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c.scope.
Oct 11 02:11:55 compute-0 podman[404497]: 2025-10-11 02:11:55.63299863 +0000 UTC m=+0.126739154 container exec 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, config_id=multipathd, org.label-schema.vendor=CentOS, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd)
Oct 11 02:11:55 compute-0 podman[404497]: 2025-10-11 02:11:55.67013488 +0000 UTC m=+0.163875404 container exec_died 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, config_id=multipathd, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:11:55 compute-0 systemd[1]: libpod-conmon-1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c.scope: Deactivated successfully.
Oct 11 02:11:55 compute-0 sudo[404494]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:55 compute-0 sshd-session[404421]: Invalid user user from 121.227.153.123 port 35536
Oct 11 02:11:56 compute-0 sshd-session[404421]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:11:56 compute-0 sshd-session[404421]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:11:56
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['.rgw.root', 'cephfs.cephfs.meta', 'default.rgw.control', 'backups', 'images', '.mgr', 'default.rgw.meta', 'volumes', 'default.rgw.log', 'cephfs.cephfs.data', 'vms']
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:11:56 compute-0 sudo[404677]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rhejxuzghuuasihyepyvrrbkpkpzeori ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148716.0262127-808-139749393246551/AnsiballZ_podman_container_exec.py'
Oct 11 02:11:56 compute-0 sudo[404677]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:11:56 compute-0 python3.9[404679]: ansible-containers.podman.podman_container_exec Invoked with command=id -g name=multipathd detach=False executable=podman privileged=False tty=False argv=None env=None user=None workdir=None
Oct 11 02:11:56 compute-0 systemd[1]: Started libpod-conmon-1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c.scope.
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:11:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:11:56 compute-0 podman[404680]: 2025-10-11 02:11:56.983536864 +0000 UTC m=+0.157986544 container exec 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_managed=true, config_id=multipathd, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, container_name=multipathd, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.build-date=20251009)
Oct 11 02:11:57 compute-0 ceph-mon[191930]: pgmap v1012: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:57 compute-0 podman[404680]: 2025-10-11 02:11:57.017192223 +0000 UTC m=+0.191641923 container exec_died 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=multipathd, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, config_id=multipathd, org.label-schema.build-date=20251009)
Oct 11 02:11:57 compute-0 sudo[404677]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:57 compute-0 systemd[1]: libpod-conmon-1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c.scope: Deactivated successfully.
Oct 11 02:11:57 compute-0 ceph-mgr[192233]: client.0 ms_handle_reset on v2:192.168.122.100:6800/1088804496
Oct 11 02:11:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1013: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:58 compute-0 sshd-session[404421]: Failed password for invalid user user from 121.227.153.123 port 35536 ssh2
Oct 11 02:11:58 compute-0 sudo[404859]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sruuxsctufyeznwfmwcnjrgqplypbuii ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148718.3518155-816-113046421695432/AnsiballZ_file.py'
Oct 11 02:11:58 compute-0 sudo[404859]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:11:59 compute-0 ceph-mon[191930]: pgmap v1013: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:59 compute-0 python3.9[404861]: ansible-ansible.builtin.file Invoked with group=0 mode=0700 owner=0 path=/var/lib/openstack/healthchecks/multipathd recurse=True state=directory force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:11:59 compute-0 sudo[404859]: pam_unix(sudo:session): session closed for user root
Oct 11 02:11:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1014: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:11:59 compute-0 podman[157119]: time="2025-10-11T02:11:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:11:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:11:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45036 "" "Go-http-client/1.1"
Oct 11 02:11:59 compute-0 sshd-session[404421]: Connection closed by invalid user user 121.227.153.123 port 35536 [preauth]
Oct 11 02:11:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:11:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8511 "" "Go-http-client/1.1"
Oct 11 02:12:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:12:00 compute-0 podman[404987]: 2025-10-11 02:12:00.517627695 +0000 UTC m=+0.102231053 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:12:00 compute-0 sudo[405038]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cgnccxqcftkvvhhflbfcuagxrwvslvzv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148719.491596-825-115006350941579/AnsiballZ_file.py'
Oct 11 02:12:00 compute-0 sudo[405038]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:00 compute-0 python3.9[405040]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/lib/edpm-config/firewall/ state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:12:00 compute-0 sudo[405038]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:00 compute-0 sshd-session[404961]: Invalid user user from 121.227.153.123 port 49648
Oct 11 02:12:01 compute-0 ceph-mon[191930]: pgmap v1014: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:01 compute-0 sshd-session[404961]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:12:01 compute-0 sshd-session[404961]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:12:01 compute-0 openstack_network_exporter[374316]: ERROR   02:12:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:12:01 compute-0 openstack_network_exporter[374316]: ERROR   02:12:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:12:01 compute-0 openstack_network_exporter[374316]: ERROR   02:12:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:12:01 compute-0 openstack_network_exporter[374316]: ERROR   02:12:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:12:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:12:01 compute-0 openstack_network_exporter[374316]: ERROR   02:12:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:12:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:12:01 compute-0 sudo[405190]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dtpacdphbnzvfqijhocevkslqtcnbmyr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148721.030815-833-156382982905399/AnsiballZ_stat.py'
Oct 11 02:12:01 compute-0 sudo[405190]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1015: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:01 compute-0 python3.9[405192]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/kepler.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:12:01 compute-0 sudo[405190]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:02 compute-0 ceph-mon[191930]: pgmap v1015: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:02 compute-0 podman[405218]: 2025-10-11 02:12:02.227965784 +0000 UTC m=+0.117031278 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., vendor=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, com.redhat.component=ubi9-minimal-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, version=9.6, io.openshift.expose-services=, build-date=2025-08-20T13:12:41, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, name=ubi9-minimal, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, io.openshift.tags=minimal rhel9, managed_by=edpm_ansible, container_name=openstack_network_exporter, distribution-scope=public, release=1755695350, url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, architecture=x86_64)
Oct 11 02:12:02 compute-0 sudo[405288]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-sheoqjybqtnhpcobksbznbosvabeqhhe ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148721.030815-833-156382982905399/AnsiballZ_file.py'
Oct 11 02:12:02 compute-0 sudo[405288]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:02 compute-0 python3.9[405290]: ansible-ansible.legacy.file Invoked with mode=0640 dest=/var/lib/edpm-config/firewall/kepler.yaml _original_basename=firewall.yaml.j2 recurse=False state=file path=/var/lib/edpm-config/firewall/kepler.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:12:02 compute-0 sudo[405288]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:03 compute-0 sshd-session[404961]: Failed password for invalid user user from 121.227.153.123 port 49648 ssh2
Oct 11 02:12:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1016: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:03 compute-0 sudo[405458]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-irrxqqzvazquvlqryyuqbbczyuyxwyic ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148723.0953834-846-131995022357817/AnsiballZ_file.py'
Oct 11 02:12:03 compute-0 sudo[405458]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:03 compute-0 podman[405414]: 2025-10-11 02:12:03.684855087 +0000 UTC m=+0.143980853 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Oct 11 02:12:03 compute-0 python3.9[405463]: ansible-ansible.builtin.file Invoked with group=root mode=0750 owner=root path=/var/lib/edpm-config/firewall state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:12:03 compute-0 sudo[405458]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:04 compute-0 ceph-mon[191930]: pgmap v1016: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:04 compute-0 sudo[405613]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nnrasclygsijkyylqelfgnoodizlfont ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148724.1772676-854-14835373510541/AnsiballZ_stat.py'
Oct 11 02:12:04 compute-0 sudo[405613]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:04 compute-0 sshd-session[404961]: Connection closed by invalid user user 121.227.153.123 port 49648 [preauth]
Oct 11 02:12:05 compute-0 python3.9[405615]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:12:05 compute-0 sudo[405613]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:12:05 compute-0 podman[405618]: 2025-10-11 02:12:05.26726106 +0000 UTC m=+0.150762517 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.expose-services=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, maintainer=Red Hat, Inc., architecture=x86_64, vcs-type=git, build-date=2024-09-18T21:23:30, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, com.redhat.component=ubi9-container, version=9.4, io.buildah.version=1.29.0, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9, managed_by=edpm_ansible, release=1214.1726694543, vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, summary=Provides the latest release of Red Hat Universal Base Image 9., config_id=edpm, container_name=kepler, io.openshift.tags=base rhel9, release-0.7.12=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public)
Oct 11 02:12:05 compute-0 sudo[405712]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-itupivunzyaebtjapkhuyepvbcylpbnj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148724.1772676-854-14835373510541/AnsiballZ_file.py'
Oct 11 02:12:05 compute-0 sudo[405712]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1017: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:05 compute-0 python3.9[405714]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml _original_basename=base-rules.yaml.j2 recurse=False state=file path=/var/lib/edpm-config/firewall/edpm-nftables-base.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:12:05 compute-0 sudo[405712]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:06 compute-0 sshd-session[405619]: Invalid user user from 121.227.153.123 port 49656
Oct 11 02:12:06 compute-0 sshd-session[405619]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:12:06 compute-0 sshd-session[405619]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:12:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:12:06 compute-0 sudo[405864]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-pcikqyrhxtubatjhinqdbmrbsxsghfve ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148726.0209134-866-22876389749815/AnsiballZ_stat.py'
Oct 11 02:12:06 compute-0 sudo[405864]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:06 compute-0 ceph-mon[191930]: pgmap v1017: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:06 compute-0 python3.9[405866]: ansible-ansible.legacy.stat Invoked with path=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:12:06 compute-0 sudo[405864]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:07 compute-0 sudo[405942]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yjamhfbcnptbwlegfdtxmgyynyacgazb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148726.0209134-866-22876389749815/AnsiballZ_file.py'
Oct 11 02:12:07 compute-0 sudo[405942]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:07 compute-0 python3.9[405944]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml _original_basename=.5i5ehdh_ recurse=False state=file path=/var/lib/edpm-config/firewall/edpm-nftables-user-rules.yaml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:12:07 compute-0 sudo[405942]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1018: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:08 compute-0 sshd-session[405619]: Failed password for invalid user user from 121.227.153.123 port 49656 ssh2
Oct 11 02:12:08 compute-0 sudo[406094]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wvwtszqxrbxnqbncavtnwkezquukklwt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148727.885464-878-95908460055022/AnsiballZ_stat.py'
Oct 11 02:12:08 compute-0 sudo[406094]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:08 compute-0 python3.9[406096]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/iptables.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:12:08 compute-0 ceph-mon[191930]: pgmap v1018: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:08 compute-0 sudo[406094]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:09 compute-0 sudo[406172]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mtozfrdktzvegafjwwkjbtiniuirteoe ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148727.885464-878-95908460055022/AnsiballZ_file.py'
Oct 11 02:12:09 compute-0 sudo[406172]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:09 compute-0 python3.9[406174]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/iptables.nft _original_basename=iptables.nft recurse=False state=file path=/etc/nftables/iptables.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:12:09 compute-0 sudo[406172]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1019: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:10 compute-0 sshd-session[405619]: Connection closed by invalid user user 121.227.153.123 port 49656 [preauth]
Oct 11 02:12:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:12:10 compute-0 sudo[406324]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zlngyyvvxzeybgjmxwjgglnwcknpiabs ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148729.7608685-891-36959688970339/AnsiballZ_command.py'
Oct 11 02:12:10 compute-0 sudo[406324]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:10 compute-0 python3.9[406326]: ansible-ansible.legacy.command Invoked with _raw_params=nft -j list ruleset _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:12:10 compute-0 sudo[406324]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:10 compute-0 ceph-mon[191930]: pgmap v1019: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:11 compute-0 sshd-session[406327]: Invalid user user from 121.227.153.123 port 36266
Oct 11 02:12:11 compute-0 sshd-session[406327]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:12:11 compute-0 sshd-session[406327]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:12:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1020: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:12 compute-0 sudo[406479]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yapmmbeidmwkqdvrnnzwcqfjdxxcpzmv ; /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760148731.56259-899-192305744005290/AnsiballZ_edpm_nftables_from_files.py'
Oct 11 02:12:12 compute-0 sudo[406479]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:12 compute-0 python3[406481]: ansible-edpm_nftables_from_files Invoked with src=/var/lib/edpm-config/firewall
Oct 11 02:12:12 compute-0 sudo[406479]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:12 compute-0 ceph-mon[191930]: pgmap v1020: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:13 compute-0 sudo[406631]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jjkkeibditzbsrmczvnjitgntifnxlos ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148732.8515413-907-99965885901343/AnsiballZ_stat.py'
Oct 11 02:12:13 compute-0 sudo[406631]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1021: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:13 compute-0 python3.9[406633]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:12:13 compute-0 sudo[406631]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:14 compute-0 sshd-session[406327]: Failed password for invalid user user from 121.227.153.123 port 36266 ssh2
Oct 11 02:12:14 compute-0 ceph-mon[191930]: pgmap v1021: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:14 compute-0 sudo[406709]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jnhnvxgfiiciiacpqdjahlvhidmhxdit ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148732.8515413-907-99965885901343/AnsiballZ_file.py'
Oct 11 02:12:14 compute-0 sudo[406709]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:15 compute-0 python3.9[406711]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-jumps.nft _original_basename=jump-chain.j2 recurse=False state=file path=/etc/nftables/edpm-jumps.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:12:15 compute-0 sudo[406709]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:12:15 compute-0 sshd-session[406327]: Connection closed by invalid user user 121.227.153.123 port 36266 [preauth]
Oct 11 02:12:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1022: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:15 compute-0 sudo[406875]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eqkaldrivjblhqdaxegyfaedxgjiynte ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148735.3163998-919-96429394426785/AnsiballZ_stat.py'
Oct 11 02:12:15 compute-0 sudo[406875]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:16 compute-0 podman[406837]: 2025-10-11 02:12:16.013792096 +0000 UTC m=+0.176425818 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true)
Oct 11 02:12:16 compute-0 python3.9[406881]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-update-jumps.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:12:16 compute-0 sudo[406875]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:16 compute-0 sshd-session[406779]: Invalid user user from 121.227.153.123 port 36282
Oct 11 02:12:16 compute-0 sudo[406991]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tqznwnkkqqzdbifikhlbqjdmuljqhoqj ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148735.3163998-919-96429394426785/AnsiballZ_file.py'
Oct 11 02:12:16 compute-0 sudo[406991]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:16 compute-0 podman[406935]: 2025-10-11 02:12:16.601716051 +0000 UTC m=+0.106804968 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:12:16 compute-0 podman[406934]: 2025-10-11 02:12:16.614260557 +0000 UTC m=+0.115260221 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:12:16 compute-0 ceph-mon[191930]: pgmap v1022: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:16 compute-0 podman[406996]: 2025-10-11 02:12:16.746651996 +0000 UTC m=+0.145582109 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, io.buildah.version=1.41.3)
Oct 11 02:12:16 compute-0 sshd-session[406779]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:12:16 compute-0 sshd-session[406779]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:12:16 compute-0 python3.9[407003]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-update-jumps.nft _original_basename=jump-chain.j2 recurse=False state=file path=/etc/nftables/edpm-update-jumps.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:12:16 compute-0 sudo[406991]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1023: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:17 compute-0 sudo[407177]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-raoitaroxiadadrwnumrzylruxdhpcra ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148737.0835772-931-68423029857804/AnsiballZ_stat.py'
Oct 11 02:12:17 compute-0 sudo[407177]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:17 compute-0 python3.9[407179]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-flushes.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:12:17 compute-0 sudo[407177]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:18 compute-0 sudo[407255]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-idkctiiyrnfvuwkspifhlyynijamgnxr ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148737.0835772-931-68423029857804/AnsiballZ_file.py'
Oct 11 02:12:18 compute-0 sudo[407255]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:18 compute-0 python3.9[407257]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-flushes.nft _original_basename=flush-chain.j2 recurse=False state=file path=/etc/nftables/edpm-flushes.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:12:18 compute-0 sudo[407255]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:18 compute-0 ceph-mon[191930]: pgmap v1023: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:18 compute-0 sshd-session[406779]: Failed password for invalid user user from 121.227.153.123 port 36282 ssh2
Oct 11 02:12:19 compute-0 sudo[407408]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mubnbxjeaalwinbeokdmtqgchpcvpztg ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148738.958421-943-242881095188245/AnsiballZ_stat.py'
Oct 11 02:12:19 compute-0 sudo[407408]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1024: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:19 compute-0 python3.9[407410]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-chains.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:12:19 compute-0 sudo[407408]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:12:20 compute-0 sudo[407486]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jgcxlixsoifwbcojfttpudxjygomcpuv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148738.958421-943-242881095188245/AnsiballZ_file.py'
Oct 11 02:12:20 compute-0 sudo[407486]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:20 compute-0 sshd-session[406779]: Connection closed by invalid user user 121.227.153.123 port 36282 [preauth]
Oct 11 02:12:20 compute-0 podman[407488]: 2025-10-11 02:12:20.490206778 +0000 UTC m=+0.128273566 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=multipathd, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:12:20 compute-0 podman[407489]: 2025-10-11 02:12:20.496510536 +0000 UTC m=+0.125348126 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_id=iscsid, container_name=iscsid)
Oct 11 02:12:20 compute-0 python3.9[407490]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-chains.nft _original_basename=chains.j2 recurse=False state=file path=/etc/nftables/edpm-chains.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:12:20 compute-0 sudo[407486]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:20 compute-0 ceph-mon[191930]: pgmap v1024: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:21 compute-0 sudo[407674]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hqwuqplbrtpbsjnryjoacwyphubxnfig ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148740.901006-955-270260010549312/AnsiballZ_stat.py'
Oct 11 02:12:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1025: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:21 compute-0 sudo[407674]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:21 compute-0 python3.9[407676]: ansible-ansible.legacy.stat Invoked with path=/etc/nftables/edpm-rules.nft follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:12:21 compute-0 sshd-session[407541]: Invalid user user from 121.227.153.123 port 44196
Oct 11 02:12:21 compute-0 sudo[407674]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:22 compute-0 sshd-session[407541]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:12:22 compute-0 sshd-session[407541]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:12:22 compute-0 sudo[407752]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-stwbnslqtzjvtxmhayposdiurpjdaxtt ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148740.901006-955-270260010549312/AnsiballZ_file.py'
Oct 11 02:12:22 compute-0 sudo[407752]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:22 compute-0 python3.9[407754]: ansible-ansible.legacy.file Invoked with group=root mode=0600 owner=root dest=/etc/nftables/edpm-rules.nft _original_basename=ruleset.j2 recurse=False state=file path=/etc/nftables/edpm-rules.nft force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:12:22 compute-0 sudo[407752]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:22 compute-0 ceph-mon[191930]: pgmap v1025: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:23 compute-0 sudo[407812]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:12:23 compute-0 sudo[407812]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:23 compute-0 sudo[407812]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:23 compute-0 sudo[407856]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:12:23 compute-0 sudo[407856]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:23 compute-0 sudo[407856]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:23 compute-0 sudo[407908]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:12:23 compute-0 sudo[407908]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:23 compute-0 sudo[407908]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:23 compute-0 sudo[408002]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-istxdluukndzhxugjgeerrfvnnrhdqtf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148742.982409-968-63428826150320/AnsiballZ_command.py'
Oct 11 02:12:23 compute-0 sudo[407960]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:12:23 compute-0 sudo[408002]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:23 compute-0 sudo[407960]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1026: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:23 compute-0 python3.9[408005]: ansible-ansible.legacy.command Invoked with _raw_params=set -o pipefail; cat /etc/nftables/edpm-chains.nft /etc/nftables/edpm-flushes.nft /etc/nftables/edpm-rules.nft /etc/nftables/edpm-update-jumps.nft /etc/nftables/edpm-jumps.nft | nft -c -f - _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:12:23 compute-0 sudo[408002]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:24 compute-0 sudo[407960]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:24 compute-0 sudo[408116]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:12:24 compute-0 sudo[408116]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:24 compute-0 sudo[408116]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:24 compute-0 sudo[408141]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:12:24 compute-0 sudo[408141]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:24 compute-0 sudo[408141]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:24 compute-0 sshd-session[407541]: Failed password for invalid user user from 121.227.153.123 port 44196 ssh2
Oct 11 02:12:24 compute-0 sudo[408189]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:12:24 compute-0 sudo[408189]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:24 compute-0 sudo[408189]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:24 compute-0 sudo[408237]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 list-networks
Oct 11 02:12:24 compute-0 sudo[408237]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:24 compute-0 sudo[408289]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-fkplyknptcfsgaajrclzqumfrvlvdmij ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148744.0363328-976-119727452711159/AnsiballZ_blockinfile.py'
Oct 11 02:12:24 compute-0 sudo[408289]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:24 compute-0 ceph-mon[191930]: pgmap v1026: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:24 compute-0 python3.9[408291]: ansible-ansible.builtin.blockinfile Invoked with backup=False block=include "/etc/nftables/iptables.nft"
                                             include "/etc/nftables/edpm-chains.nft"
                                             include "/etc/nftables/edpm-rules.nft"
                                             include "/etc/nftables/edpm-jumps.nft"
                                              path=/etc/sysconfig/nftables.conf validate=nft -c -f %s state=present marker=# {mark} ANSIBLE MANAGED BLOCK create=False marker_begin=BEGIN marker_end=END append_newline=False prepend_newline=False unsafe_writes=False insertafter=None insertbefore=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:12:24 compute-0 sudo[408237]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:12:25 compute-0 sudo[408289]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:25 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:12:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:12:25 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:12:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:12:25 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:12:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:12:25 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:12:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:12:25 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:12:25 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 4da5548b-9c94-4186-a69d-c575fd443446 does not exist
Oct 11 02:12:25 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev f5606e4d-91a9-4323-82e6-c1427292fb77 does not exist
Oct 11 02:12:25 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev febc22c7-bf78-424f-b0bb-7d989dd95cd2 does not exist
Oct 11 02:12:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:12:25 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:12:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:12:25 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:12:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:12:25 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:12:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:12:25 compute-0 sudo[408319]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:12:25 compute-0 sudo[408319]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:25 compute-0 sudo[408319]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:25 compute-0 sudo[408360]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:12:25 compute-0 sudo[408360]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:25 compute-0 sudo[408360]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:25 compute-0 sudo[408408]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:12:25 compute-0 sudo[408408]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:25 compute-0 sudo[408408]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1027: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:25 compute-0 sudo[408460]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:12:25 compute-0 sudo[408460]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:25 compute-0 sshd-session[407541]: Connection closed by invalid user user 121.227.153.123 port 44196 [preauth]
Oct 11 02:12:25 compute-0 sudo[408583]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tlxfgxrhpiwakidbeswwbhptjqgzzjmm ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148745.3680465-985-151175725023568/AnsiballZ_command.py'
Oct 11 02:12:25 compute-0 sudo[408583]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:12:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:12:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:12:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:12:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:12:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:12:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:12:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:12:26 compute-0 python3.9[408587]: ansible-ansible.legacy.command Invoked with _raw_params=nft -f /etc/nftables/edpm-chains.nft _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True cmd=None argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:12:26 compute-0 podman[408603]: 2025-10-11 02:12:26.201376579 +0000 UTC m=+0.079934470 container create cafb1bf6d465f115e801a22dec23fa04cd581f1aa00b5e863f84acf7df591c34 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=reverent_chaum, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:12:26 compute-0 sudo[408583]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:26 compute-0 systemd[1]: Started libpod-conmon-cafb1bf6d465f115e801a22dec23fa04cd581f1aa00b5e863f84acf7df591c34.scope.
Oct 11 02:12:26 compute-0 podman[408603]: 2025-10-11 02:12:26.16754327 +0000 UTC m=+0.046101171 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:12:26 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:12:26 compute-0 podman[408603]: 2025-10-11 02:12:26.337900292 +0000 UTC m=+0.216458223 container init cafb1bf6d465f115e801a22dec23fa04cd581f1aa00b5e863f84acf7df591c34 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=reverent_chaum, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 02:12:26 compute-0 podman[408603]: 2025-10-11 02:12:26.353220255 +0000 UTC m=+0.231778136 container start cafb1bf6d465f115e801a22dec23fa04cd581f1aa00b5e863f84acf7df591c34 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=reverent_chaum, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:12:26 compute-0 reverent_chaum[408626]: 167 167
Oct 11 02:12:26 compute-0 podman[408603]: 2025-10-11 02:12:26.359746528 +0000 UTC m=+0.238304479 container attach cafb1bf6d465f115e801a22dec23fa04cd581f1aa00b5e863f84acf7df591c34 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=reverent_chaum, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:12:26 compute-0 systemd[1]: libpod-cafb1bf6d465f115e801a22dec23fa04cd581f1aa00b5e863f84acf7df591c34.scope: Deactivated successfully.
Oct 11 02:12:26 compute-0 podman[408603]: 2025-10-11 02:12:26.363412323 +0000 UTC m=+0.241970174 container died cafb1bf6d465f115e801a22dec23fa04cd581f1aa00b5e863f84acf7df591c34 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=reverent_chaum, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True)
Oct 11 02:12:26 compute-0 systemd[1]: var-lib-containers-storage-overlay-4b5060bdf8f8ee13cbb39400d091682b43bb329ca4c612d72343fcfc3bfd40ad-merged.mount: Deactivated successfully.
Oct 11 02:12:26 compute-0 podman[408603]: 2025-10-11 02:12:26.434108624 +0000 UTC m=+0.312666475 container remove cafb1bf6d465f115e801a22dec23fa04cd581f1aa00b5e863f84acf7df591c34 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=reverent_chaum, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:12:26 compute-0 systemd[1]: libpod-conmon-cafb1bf6d465f115e801a22dec23fa04cd581f1aa00b5e863f84acf7df591c34.scope: Deactivated successfully.
Oct 11 02:12:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:12:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:12:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:12:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:12:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:12:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:12:26 compute-0 podman[408721]: 2025-10-11 02:12:26.699710498 +0000 UTC m=+0.082311619 container create 167edc4c0fcdfdce01f7721f64ee4076ef8ea5ebdf13a01ef09bf0af4c4a4d4a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_jemison, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.build-date=20250507)
Oct 11 02:12:26 compute-0 podman[408721]: 2025-10-11 02:12:26.670954902 +0000 UTC m=+0.053556063 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:12:26 compute-0 systemd[1]: Started libpod-conmon-167edc4c0fcdfdce01f7721f64ee4076ef8ea5ebdf13a01ef09bf0af4c4a4d4a.scope.
Oct 11 02:12:26 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:12:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/308ab04ee6b9dc50b61b89ef99e402c9d42bacf326433159100e7c95aeb444d6/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:12:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/308ab04ee6b9dc50b61b89ef99e402c9d42bacf326433159100e7c95aeb444d6/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:12:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/308ab04ee6b9dc50b61b89ef99e402c9d42bacf326433159100e7c95aeb444d6/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:12:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/308ab04ee6b9dc50b61b89ef99e402c9d42bacf326433159100e7c95aeb444d6/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:12:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/308ab04ee6b9dc50b61b89ef99e402c9d42bacf326433159100e7c95aeb444d6/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:12:26 compute-0 podman[408721]: 2025-10-11 02:12:26.861404244 +0000 UTC m=+0.244005445 container init 167edc4c0fcdfdce01f7721f64ee4076ef8ea5ebdf13a01ef09bf0af4c4a4d4a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_jemison, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3)
Oct 11 02:12:26 compute-0 podman[408721]: 2025-10-11 02:12:26.884329912 +0000 UTC m=+0.266931073 container start 167edc4c0fcdfdce01f7721f64ee4076ef8ea5ebdf13a01ef09bf0af4c4a4d4a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_jemison, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS)
Oct 11 02:12:26 compute-0 podman[408721]: 2025-10-11 02:12:26.891530728 +0000 UTC m=+0.274131949 container attach 167edc4c0fcdfdce01f7721f64ee4076ef8ea5ebdf13a01ef09bf0af4c4a4d4a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_jemison, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:12:27 compute-0 ceph-mon[191930]: pgmap v1027: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:27 compute-0 sudo[408815]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-htolugnmcdvlusxzconknguydrpqfesf ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148746.4792538-993-117042360783636/AnsiballZ_stat.py'
Oct 11 02:12:27 compute-0 sudo[408815]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:27 compute-0 sshd-session[408601]: Invalid user user from 121.227.153.123 port 44208
Oct 11 02:12:27 compute-0 python3.9[408817]: ansible-ansible.builtin.stat Invoked with path=/etc/nftables/edpm-rules.nft.changed follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:12:27 compute-0 sudo[408815]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:27 compute-0 sshd-session[408601]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:12:27 compute-0 sshd-session[408601]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:12:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1028: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:28 compute-0 ceph-mon[191930]: pgmap v1028: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:28 compute-0 cranky_jemison[408763]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:12:28 compute-0 cranky_jemison[408763]: --> relative data size: 1.0
Oct 11 02:12:28 compute-0 cranky_jemison[408763]: --> All data devices are unavailable
Oct 11 02:12:28 compute-0 systemd[1]: libpod-167edc4c0fcdfdce01f7721f64ee4076ef8ea5ebdf13a01ef09bf0af4c4a4d4a.scope: Deactivated successfully.
Oct 11 02:12:28 compute-0 systemd[1]: libpod-167edc4c0fcdfdce01f7721f64ee4076ef8ea5ebdf13a01ef09bf0af4c4a4d4a.scope: Consumed 1.256s CPU time.
Oct 11 02:12:28 compute-0 podman[408721]: 2025-10-11 02:12:28.215872005 +0000 UTC m=+1.598473166 container died 167edc4c0fcdfdce01f7721f64ee4076ef8ea5ebdf13a01ef09bf0af4c4a4d4a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_jemison, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:12:28 compute-0 sudo[408991]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-knpraqepgqcuyhkvmrrktmzchluylnza ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148747.660518-1002-88588171713712/AnsiballZ_file.py'
Oct 11 02:12:28 compute-0 sudo[408991]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:28 compute-0 systemd[1]: var-lib-containers-storage-overlay-308ab04ee6b9dc50b61b89ef99e402c9d42bacf326433159100e7c95aeb444d6-merged.mount: Deactivated successfully.
Oct 11 02:12:28 compute-0 podman[408721]: 2025-10-11 02:12:28.314992606 +0000 UTC m=+1.697593727 container remove 167edc4c0fcdfdce01f7721f64ee4076ef8ea5ebdf13a01ef09bf0af4c4a4d4a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_jemison, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:12:28 compute-0 systemd[1]: libpod-conmon-167edc4c0fcdfdce01f7721f64ee4076ef8ea5ebdf13a01ef09bf0af4c4a4d4a.scope: Deactivated successfully.
Oct 11 02:12:28 compute-0 sudo[408460]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:28 compute-0 sudo[409005]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:12:28 compute-0 sudo[409005]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:28 compute-0 sudo[409005]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:28 compute-0 python3.9[409000]: ansible-ansible.builtin.file Invoked with path=/etc/nftables/edpm-rules.nft.changed state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:12:28 compute-0 sudo[408991]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:28 compute-0 sudo[409030]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:12:28 compute-0 sudo[409030]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:28 compute-0 sudo[409030]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:28 compute-0 sudo[409075]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:12:28 compute-0 sudo[409075]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:28 compute-0 sudo[409075]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:28 compute-0 sudo[409104]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:12:28 compute-0 sudo[409104]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:29 compute-0 sshd-session[386394]: Connection closed by 192.168.122.30 port 35688
Oct 11 02:12:29 compute-0 sshd-session[386391]: pam_unix(sshd:session): session closed for user zuul
Oct 11 02:12:29 compute-0 systemd[1]: session-60.scope: Deactivated successfully.
Oct 11 02:12:29 compute-0 systemd[1]: session-60.scope: Consumed 2min 15.601s CPU time.
Oct 11 02:12:29 compute-0 systemd-logind[804]: Session 60 logged out. Waiting for processes to exit.
Oct 11 02:12:29 compute-0 systemd-logind[804]: Removed session 60.
Oct 11 02:12:29 compute-0 podman[409166]: 2025-10-11 02:12:29.31835468 +0000 UTC m=+0.068675721 container create 25fd41d9da1feff7f444a319a37f0843a4fcef4d44ad0c2926f1fc5d2f557d91 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_nightingale, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.license=GPLv2)
Oct 11 02:12:29 compute-0 podman[409166]: 2025-10-11 02:12:29.287371378 +0000 UTC m=+0.037692509 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:12:29 compute-0 systemd[1]: Started libpod-conmon-25fd41d9da1feff7f444a319a37f0843a4fcef4d44ad0c2926f1fc5d2f557d91.scope.
Oct 11 02:12:29 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:12:29 compute-0 sshd-session[408601]: Failed password for invalid user user from 121.227.153.123 port 44208 ssh2
Oct 11 02:12:29 compute-0 podman[409166]: 2025-10-11 02:12:29.465741153 +0000 UTC m=+0.216062204 container init 25fd41d9da1feff7f444a319a37f0843a4fcef4d44ad0c2926f1fc5d2f557d91 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_nightingale, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:12:29 compute-0 podman[409166]: 2025-10-11 02:12:29.490844985 +0000 UTC m=+0.241166066 container start 25fd41d9da1feff7f444a319a37f0843a4fcef4d44ad0c2926f1fc5d2f557d91 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_nightingale, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3)
Oct 11 02:12:29 compute-0 podman[409166]: 2025-10-11 02:12:29.498616383 +0000 UTC m=+0.248937484 container attach 25fd41d9da1feff7f444a319a37f0843a4fcef4d44ad0c2926f1fc5d2f557d91 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_nightingale, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:12:29 compute-0 loving_nightingale[409182]: 167 167
Oct 11 02:12:29 compute-0 systemd[1]: libpod-25fd41d9da1feff7f444a319a37f0843a4fcef4d44ad0c2926f1fc5d2f557d91.scope: Deactivated successfully.
Oct 11 02:12:29 compute-0 podman[409166]: 2025-10-11 02:12:29.5062988 +0000 UTC m=+0.256619871 container died 25fd41d9da1feff7f444a319a37f0843a4fcef4d44ad0c2926f1fc5d2f557d91 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_nightingale, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:12:29 compute-0 systemd[1]: var-lib-containers-storage-overlay-714ee68b781637d2d707095b343645414b4b655785c9eb467b85a592511e46fa-merged.mount: Deactivated successfully.
Oct 11 02:12:29 compute-0 podman[409166]: 2025-10-11 02:12:29.581476963 +0000 UTC m=+0.331798044 container remove 25fd41d9da1feff7f444a319a37f0843a4fcef4d44ad0c2926f1fc5d2f557d91 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_nightingale, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507)
Oct 11 02:12:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1029: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:29 compute-0 systemd[1]: libpod-conmon-25fd41d9da1feff7f444a319a37f0843a4fcef4d44ad0c2926f1fc5d2f557d91.scope: Deactivated successfully.
Oct 11 02:12:29 compute-0 podman[157119]: time="2025-10-11T02:12:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:12:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:12:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:12:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:12:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8498 "" "Go-http-client/1.1"
Oct 11 02:12:29 compute-0 podman[409206]: 2025-10-11 02:12:29.907023679 +0000 UTC m=+0.085482614 container create ab9d84b27074573d607cd90751cd7035122473c178a25e32ce72e50aa79c7a35 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=flamboyant_shaw, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:12:29 compute-0 podman[409206]: 2025-10-11 02:12:29.868894632 +0000 UTC m=+0.047353607 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:12:30 compute-0 systemd[1]: Started libpod-conmon-ab9d84b27074573d607cd90751cd7035122473c178a25e32ce72e50aa79c7a35.scope.
Oct 11 02:12:30 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:12:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/562756b567b70aa177f0398e29c034b0058efda09354603da43799af5eab6ce3/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:12:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/562756b567b70aa177f0398e29c034b0058efda09354603da43799af5eab6ce3/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:12:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/562756b567b70aa177f0398e29c034b0058efda09354603da43799af5eab6ce3/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:12:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/562756b567b70aa177f0398e29c034b0058efda09354603da43799af5eab6ce3/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:12:30 compute-0 podman[409206]: 2025-10-11 02:12:30.069120734 +0000 UTC m=+0.247579659 container init ab9d84b27074573d607cd90751cd7035122473c178a25e32ce72e50aa79c7a35 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=flamboyant_shaw, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:12:30 compute-0 podman[409206]: 2025-10-11 02:12:30.087937407 +0000 UTC m=+0.266396282 container start ab9d84b27074573d607cd90751cd7035122473c178a25e32ce72e50aa79c7a35 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=flamboyant_shaw, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:12:30 compute-0 podman[409206]: 2025-10-11 02:12:30.092678874 +0000 UTC m=+0.271137799 container attach ab9d84b27074573d607cd90751cd7035122473c178a25e32ce72e50aa79c7a35 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=flamboyant_shaw, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:12:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:12:30 compute-0 ceph-mon[191930]: pgmap v1029: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]: {
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:     "0": [
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:         {
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "devices": [
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "/dev/loop3"
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             ],
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "lv_name": "ceph_lv0",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "lv_size": "21470642176",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "name": "ceph_lv0",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "tags": {
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.cluster_name": "ceph",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.crush_device_class": "",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.encrypted": "0",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.osd_id": "0",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.type": "block",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.vdo": "0"
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             },
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "type": "block",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "vg_name": "ceph_vg0"
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:         }
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:     ],
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:     "1": [
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:         {
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "devices": [
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "/dev/loop4"
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             ],
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "lv_name": "ceph_lv1",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "lv_size": "21470642176",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "name": "ceph_lv1",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "tags": {
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.cluster_name": "ceph",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.crush_device_class": "",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.encrypted": "0",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.osd_id": "1",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.type": "block",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.vdo": "0"
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             },
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "type": "block",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "vg_name": "ceph_vg1"
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:         }
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:     ],
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:     "2": [
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:         {
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "devices": [
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "/dev/loop5"
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             ],
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "lv_name": "ceph_lv2",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "lv_size": "21470642176",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "name": "ceph_lv2",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "tags": {
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.cluster_name": "ceph",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.crush_device_class": "",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.encrypted": "0",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.osd_id": "2",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.type": "block",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:                 "ceph.vdo": "0"
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             },
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "type": "block",
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:             "vg_name": "ceph_vg2"
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:         }
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]:     ]
Oct 11 02:12:30 compute-0 flamboyant_shaw[409223]: }
Oct 11 02:12:31 compute-0 systemd[1]: libpod-ab9d84b27074573d607cd90751cd7035122473c178a25e32ce72e50aa79c7a35.scope: Deactivated successfully.
Oct 11 02:12:31 compute-0 podman[409206]: 2025-10-11 02:12:31.020068859 +0000 UTC m=+1.198527774 container died ab9d84b27074573d607cd90751cd7035122473c178a25e32ce72e50aa79c7a35 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=flamboyant_shaw, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:12:31 compute-0 sshd-session[408601]: Connection closed by invalid user user 121.227.153.123 port 44208 [preauth]
Oct 11 02:12:31 compute-0 systemd[1]: var-lib-containers-storage-overlay-562756b567b70aa177f0398e29c034b0058efda09354603da43799af5eab6ce3-merged.mount: Deactivated successfully.
Oct 11 02:12:31 compute-0 podman[409206]: 2025-10-11 02:12:31.111553154 +0000 UTC m=+1.290012049 container remove ab9d84b27074573d607cd90751cd7035122473c178a25e32ce72e50aa79c7a35 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=flamboyant_shaw, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_REF=reef)
Oct 11 02:12:31 compute-0 systemd[1]: libpod-conmon-ab9d84b27074573d607cd90751cd7035122473c178a25e32ce72e50aa79c7a35.scope: Deactivated successfully.
Oct 11 02:12:31 compute-0 sudo[409104]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:31 compute-0 podman[409233]: 2025-10-11 02:12:31.183828287 +0000 UTC m=+0.108279628 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:12:31 compute-0 sudo[409264]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:12:31 compute-0 sudo[409264]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:31 compute-0 sudo[409264]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:31 compute-0 openstack_network_exporter[374316]: ERROR   02:12:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:12:31 compute-0 openstack_network_exporter[374316]: ERROR   02:12:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:12:31 compute-0 openstack_network_exporter[374316]: ERROR   02:12:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:12:31 compute-0 openstack_network_exporter[374316]: ERROR   02:12:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:12:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:12:31 compute-0 openstack_network_exporter[374316]: ERROR   02:12:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:12:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:12:31 compute-0 sudo[409294]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:12:31 compute-0 sudo[409294]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:31 compute-0 sudo[409294]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:31 compute-0 sudo[409319]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:12:31 compute-0 sudo[409319]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:31 compute-0 sudo[409319]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1030: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:31 compute-0 sudo[409344]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:12:31 compute-0 sudo[409344]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:32 compute-0 sshd-session[409290]: Invalid user user from 121.227.153.123 port 48068
Oct 11 02:12:32 compute-0 podman[409408]: 2025-10-11 02:12:32.36374048 +0000 UTC m=+0.094224272 container create eceb1307e8d8e5c76287bcb6eb6d9a2b7d08f1dbc999f3b68dafbb49092f5f2a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=charming_cerf, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:12:32 compute-0 podman[409408]: 2025-10-11 02:12:32.325316196 +0000 UTC m=+0.055800048 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:12:32 compute-0 systemd[1]: Started libpod-conmon-eceb1307e8d8e5c76287bcb6eb6d9a2b7d08f1dbc999f3b68dafbb49092f5f2a.scope.
Oct 11 02:12:32 compute-0 sshd-session[409290]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:12:32 compute-0 sshd-session[409290]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:12:32 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:12:32 compute-0 podman[409408]: 2025-10-11 02:12:32.530905297 +0000 UTC m=+0.261389069 container init eceb1307e8d8e5c76287bcb6eb6d9a2b7d08f1dbc999f3b68dafbb49092f5f2a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=charming_cerf, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_REF=reef)
Oct 11 02:12:32 compute-0 podman[409408]: 2025-10-11 02:12:32.551016727 +0000 UTC m=+0.281500519 container start eceb1307e8d8e5c76287bcb6eb6d9a2b7d08f1dbc999f3b68dafbb49092f5f2a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=charming_cerf, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:12:32 compute-0 podman[409408]: 2025-10-11 02:12:32.561553932 +0000 UTC m=+0.292037734 container attach eceb1307e8d8e5c76287bcb6eb6d9a2b7d08f1dbc999f3b68dafbb49092f5f2a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=charming_cerf, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:12:32 compute-0 charming_cerf[409430]: 167 167
Oct 11 02:12:32 compute-0 systemd[1]: libpod-eceb1307e8d8e5c76287bcb6eb6d9a2b7d08f1dbc999f3b68dafbb49092f5f2a.scope: Deactivated successfully.
Oct 11 02:12:32 compute-0 podman[409408]: 2025-10-11 02:12:32.56783732 +0000 UTC m=+0.298321112 container died eceb1307e8d8e5c76287bcb6eb6d9a2b7d08f1dbc999f3b68dafbb49092f5f2a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=charming_cerf, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:12:32 compute-0 podman[409422]: 2025-10-11 02:12:32.609274565 +0000 UTC m=+0.159535943 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, distribution-scope=public, vcs-type=git, io.openshift.expose-services=, managed_by=edpm_ansible, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.buildah.version=1.33.7, vendor=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, version=9.6, name=ubi9-minimal, url=https://catalog.redhat.com/en/search?searchType=containers, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, maintainer=Red Hat, Inc., release=1755695350, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_id=edpm, architecture=x86_64, container_name=openstack_network_exporter, build-date=2025-08-20T13:12:41, io.openshift.tags=minimal rhel9, com.redhat.component=ubi9-minimal-container)
Oct 11 02:12:32 compute-0 systemd[1]: var-lib-containers-storage-overlay-c7fb68b2614be263b2125b4098357335b7b9d396efeae8bb50dbdb38a77a4007-merged.mount: Deactivated successfully.
Oct 11 02:12:32 compute-0 podman[409408]: 2025-10-11 02:12:32.655524598 +0000 UTC m=+0.386008380 container remove eceb1307e8d8e5c76287bcb6eb6d9a2b7d08f1dbc999f3b68dafbb49092f5f2a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=charming_cerf, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3)
Oct 11 02:12:32 compute-0 systemd[1]: libpod-conmon-eceb1307e8d8e5c76287bcb6eb6d9a2b7d08f1dbc999f3b68dafbb49092f5f2a.scope: Deactivated successfully.
Oct 11 02:12:32 compute-0 ceph-mon[191930]: pgmap v1030: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:32 compute-0 podman[409465]: 2025-10-11 02:12:32.932611706 +0000 UTC m=+0.105750636 container create 59be007c0a5407c25e2032bcd3d42dce32bf364c2b527d7ad134f4d85e0c0c68 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_banach, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:12:32 compute-0 podman[409465]: 2025-10-11 02:12:32.897363638 +0000 UTC m=+0.070502618 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:12:33 compute-0 systemd[1]: Started libpod-conmon-59be007c0a5407c25e2032bcd3d42dce32bf364c2b527d7ad134f4d85e0c0c68.scope.
Oct 11 02:12:33 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:12:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/169f94f0801384f203af8cb13a24d5e34189cbdd54ea5889b5752a9d2d9020d4/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:12:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/169f94f0801384f203af8cb13a24d5e34189cbdd54ea5889b5752a9d2d9020d4/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:12:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/169f94f0801384f203af8cb13a24d5e34189cbdd54ea5889b5752a9d2d9020d4/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:12:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/169f94f0801384f203af8cb13a24d5e34189cbdd54ea5889b5752a9d2d9020d4/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:12:33 compute-0 podman[409465]: 2025-10-11 02:12:33.120342662 +0000 UTC m=+0.293481622 container init 59be007c0a5407c25e2032bcd3d42dce32bf364c2b527d7ad134f4d85e0c0c68 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_banach, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 02:12:33 compute-0 podman[409465]: 2025-10-11 02:12:33.141223558 +0000 UTC m=+0.314362458 container start 59be007c0a5407c25e2032bcd3d42dce32bf364c2b527d7ad134f4d85e0c0c68 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_banach, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=reef, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:12:33 compute-0 podman[409465]: 2025-10-11 02:12:33.147045987 +0000 UTC m=+0.320184967 container attach 59be007c0a5407c25e2032bcd3d42dce32bf364c2b527d7ad134f4d85e0c0c68 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_banach, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, io.buildah.version=1.39.3)
Oct 11 02:12:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1031: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:34 compute-0 podman[409498]: 2025-10-11 02:12:34.181677338 +0000 UTC m=+0.076388648 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:12:34 compute-0 happy_banach[409482]: {
Oct 11 02:12:34 compute-0 happy_banach[409482]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:12:34 compute-0 happy_banach[409482]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:12:34 compute-0 happy_banach[409482]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:12:34 compute-0 happy_banach[409482]:         "osd_id": 1,
Oct 11 02:12:34 compute-0 happy_banach[409482]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:12:34 compute-0 happy_banach[409482]:         "type": "bluestore"
Oct 11 02:12:34 compute-0 happy_banach[409482]:     },
Oct 11 02:12:34 compute-0 happy_banach[409482]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:12:34 compute-0 happy_banach[409482]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:12:34 compute-0 happy_banach[409482]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:12:34 compute-0 happy_banach[409482]:         "osd_id": 2,
Oct 11 02:12:34 compute-0 happy_banach[409482]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:12:34 compute-0 happy_banach[409482]:         "type": "bluestore"
Oct 11 02:12:34 compute-0 happy_banach[409482]:     },
Oct 11 02:12:34 compute-0 happy_banach[409482]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:12:34 compute-0 happy_banach[409482]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:12:34 compute-0 happy_banach[409482]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:12:34 compute-0 happy_banach[409482]:         "osd_id": 0,
Oct 11 02:12:34 compute-0 happy_banach[409482]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:12:34 compute-0 happy_banach[409482]:         "type": "bluestore"
Oct 11 02:12:34 compute-0 happy_banach[409482]:     }
Oct 11 02:12:34 compute-0 happy_banach[409482]: }
Oct 11 02:12:34 compute-0 systemd[1]: libpod-59be007c0a5407c25e2032bcd3d42dce32bf364c2b527d7ad134f4d85e0c0c68.scope: Deactivated successfully.
Oct 11 02:12:34 compute-0 systemd[1]: libpod-59be007c0a5407c25e2032bcd3d42dce32bf364c2b527d7ad134f4d85e0c0c68.scope: Consumed 1.200s CPU time.
Oct 11 02:12:34 compute-0 podman[409533]: 2025-10-11 02:12:34.411632085 +0000 UTC m=+0.043606119 container died 59be007c0a5407c25e2032bcd3d42dce32bf364c2b527d7ad134f4d85e0c0c68 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_banach, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:12:34 compute-0 systemd[1]: var-lib-containers-storage-overlay-169f94f0801384f203af8cb13a24d5e34189cbdd54ea5889b5752a9d2d9020d4-merged.mount: Deactivated successfully.
Oct 11 02:12:34 compute-0 podman[409533]: 2025-10-11 02:12:34.494405963 +0000 UTC m=+0.126379977 container remove 59be007c0a5407c25e2032bcd3d42dce32bf364c2b527d7ad134f4d85e0c0c68 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_banach, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3)
Oct 11 02:12:34 compute-0 systemd[1]: libpod-conmon-59be007c0a5407c25e2032bcd3d42dce32bf364c2b527d7ad134f4d85e0c0c68.scope: Deactivated successfully.
Oct 11 02:12:34 compute-0 sshd-session[409290]: Failed password for invalid user user from 121.227.153.123 port 48068 ssh2
Oct 11 02:12:34 compute-0 sudo[409344]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:12:34 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:12:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:12:34 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:12:34 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 80920922-5cb6-45af-93aa-8af541130447 does not exist
Oct 11 02:12:34 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 894cfebe-95e9-4ac5-9bb0-c50926251f4b does not exist
Oct 11 02:12:34 compute-0 sudo[409547]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:12:34 compute-0 ceph-mon[191930]: pgmap v1031: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:34 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:12:34 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:12:34 compute-0 sudo[409547]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:34 compute-0 sudo[409547]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:34 compute-0 sudo[409572]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:12:34 compute-0 sudo[409572]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:12:34 compute-0 sudo[409572]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:34 compute-0 sshd-session[409590]: Accepted publickey for zuul from 192.168.122.30 port 60010 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 02:12:34 compute-0 systemd-logind[804]: New session 61 of user zuul.
Oct 11 02:12:34 compute-0 systemd[1]: Started Session 61 of User zuul.
Oct 11 02:12:34 compute-0 sshd-session[409590]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 02:12:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:12:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1032: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:36 compute-0 sshd-session[409290]: Connection closed by invalid user user 121.227.153.123 port 48068 [preauth]
Oct 11 02:12:36 compute-0 podman[409724]: 2025-10-11 02:12:36.231942553 +0000 UTC m=+0.128781976 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, release=1214.1726694543, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release-0.7.12=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of Red Hat Universal Base Image 9., vendor=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9, build-date=2024-09-18T21:23:30, config_id=edpm, version=9.4, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, maintainer=Red Hat, Inc., vcs-type=git, io.openshift.tags=base rhel9, architecture=x86_64, container_name=kepler, io.buildah.version=1.29.0, com.redhat.component=ubi9-container, distribution-scope=public, io.openshift.expose-services=, name=ubi9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f)
Oct 11 02:12:36 compute-0 python3.9[409767]: ansible-ansible.builtin.setup Invoked with gather_subset=['!all', '!min', 'local'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 02:12:36 compute-0 ceph-mon[191930]: pgmap v1032: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:37 compute-0 sshd-session[409771]: Invalid user user from 121.227.153.123 port 48074
Oct 11 02:12:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1033: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:37 compute-0 sshd-session[409771]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:12:37 compute-0 sshd-session[409771]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:12:38 compute-0 sudo[409926]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-elcmzblguibwczawthzulwdxkqjxrrdv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148757.1912837-34-25961117937677/AnsiballZ_systemd.py'
Oct 11 02:12:38 compute-0 sudo[409926]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:38 compute-0 python3.9[409928]: ansible-ansible.builtin.systemd Invoked with name=rsyslog daemon_reload=False daemon_reexec=False scope=system no_block=False state=None enabled=None force=None masked=None
Oct 11 02:12:38 compute-0 sudo[409926]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:38 compute-0 ceph-mon[191930]: pgmap v1033: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:39 compute-0 sudo[410079]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-hgtyloxdhmakfenjrabhjnwotcmxnzus ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148758.8471408-42-269611719988106/AnsiballZ_setup.py'
Oct 11 02:12:39 compute-0 sudo[410079]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1034: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:39 compute-0 sshd-session[409771]: Failed password for invalid user user from 121.227.153.123 port 48074 ssh2
Oct 11 02:12:39 compute-0 nova_compute[356901]: 2025-10-11 02:12:39.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:12:39 compute-0 nova_compute[356901]: 2025-10-11 02:12:39.898 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:12:39 compute-0 python3.9[410081]: ansible-ansible.legacy.setup Invoked with filter=['ansible_pkg_mgr'] gather_subset=['!all'] gather_timeout=10 fact_path=/etc/ansible/facts.d
Oct 11 02:12:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:12:40 compute-0 sudo[410079]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:40 compute-0 ceph-mon[191930]: pgmap v1034: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:40 compute-0 nova_compute[356901]: 2025-10-11 02:12:40.893 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:12:41 compute-0 sudo[410163]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-jixywpegylfevppazhxunvstamwhpqbb ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148758.8471408-42-269611719988106/AnsiballZ_dnf.py'
Oct 11 02:12:41 compute-0 sudo[410163]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:41 compute-0 python3.9[410165]: ansible-ansible.legacy.dnf Invoked with name=['rsyslog-openssl'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None
Oct 11 02:12:41 compute-0 sshd-session[409771]: Connection closed by invalid user user 121.227.153.123 port 48074 [preauth]
Oct 11 02:12:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1035: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:42 compute-0 sshd-session[410170]: error: kex_exchange_identification: read: Connection reset by peer
Oct 11 02:12:42 compute-0 sshd-session[410170]: Connection reset by 45.140.17.97 port 30936
Oct 11 02:12:42 compute-0 sshd-session[410167]: Invalid user user from 121.227.153.123 port 47650
Oct 11 02:12:42 compute-0 sudo[410163]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:42 compute-0 ceph-mon[191930]: pgmap v1035: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:42 compute-0 sshd-session[410167]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:12:42 compute-0 sshd-session[410167]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:12:42 compute-0 nova_compute[356901]: 2025-10-11 02:12:42.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:12:42 compute-0 nova_compute[356901]: 2025-10-11 02:12:42.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:12:42 compute-0 nova_compute[356901]: 2025-10-11 02:12:42.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:12:42 compute-0 nova_compute[356901]: 2025-10-11 02:12:42.921 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944
Oct 11 02:12:42 compute-0 nova_compute[356901]: 2025-10-11 02:12:42.921 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:12:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1036: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:43 compute-0 sudo[410320]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-svwyrkldlxtasbwyoqpppswrxljqjvyq ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148762.937493-54-265378122913575/AnsiballZ_stat.py'
Oct 11 02:12:43 compute-0 sudo[410320]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:43 compute-0 nova_compute[356901]: 2025-10-11 02:12:43.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:12:43 compute-0 nova_compute[356901]: 2025-10-11 02:12:43.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:12:43 compute-0 nova_compute[356901]: 2025-10-11 02:12:43.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:12:43 compute-0 nova_compute[356901]: 2025-10-11 02:12:43.922 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:12:43 compute-0 nova_compute[356901]: 2025-10-11 02:12:43.922 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:12:43 compute-0 nova_compute[356901]: 2025-10-11 02:12:43.922 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:12:43 compute-0 nova_compute[356901]: 2025-10-11 02:12:43.922 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:12:43 compute-0 nova_compute[356901]: 2025-10-11 02:12:43.922 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:12:44 compute-0 python3.9[410322]: ansible-ansible.legacy.stat Invoked with path=/etc/pki/rsyslog/ca-openshift.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:12:44 compute-0 sudo[410320]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:12:44 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2253061199' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:12:44 compute-0 nova_compute[356901]: 2025-10-11 02:12:44.489 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.567s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:12:44 compute-0 ceph-mon[191930]: pgmap v1036: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:44 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2253061199' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:12:44 compute-0 sshd-session[410167]: Failed password for invalid user user from 121.227.153.123 port 47650 ssh2
Oct 11 02:12:45 compute-0 nova_compute[356901]: 2025-10-11 02:12:45.031 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:12:45 compute-0 nova_compute[356901]: 2025-10-11 02:12:45.033 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=4524MB free_disk=59.98828125GB free_vcpus=8 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:12:45 compute-0 nova_compute[356901]: 2025-10-11 02:12:45.033 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:12:45 compute-0 nova_compute[356901]: 2025-10-11 02:12:45.034 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:12:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:12:45 compute-0 nova_compute[356901]: 2025-10-11 02:12:45.169 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 0 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:12:45 compute-0 nova_compute[356901]: 2025-10-11 02:12:45.170 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=512MB phys_disk=59GB used_disk=0GB total_vcpus=8 used_vcpus=0 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:12:45 compute-0 nova_compute[356901]: 2025-10-11 02:12:45.213 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:12:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1037: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:45 compute-0 sudo[410440]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-adzzybwnkgxmanqiaublnnzzpsifyjtd ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148762.937493-54-265378122913575/AnsiballZ_file.py'
Oct 11 02:12:45 compute-0 sudo[410440]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:12:45 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2026076575' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:12:45 compute-0 nova_compute[356901]: 2025-10-11 02:12:45.802 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.589s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:12:45 compute-0 python3.9[410442]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/etc/pki/rsyslog/ca-openshift.crt _original_basename=ca-openshift.crt recurse=False state=file path=/etc/pki/rsyslog/ca-openshift.crt force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:12:45 compute-0 nova_compute[356901]: 2025-10-11 02:12:45.816 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:12:45 compute-0 sudo[410440]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:45 compute-0 nova_compute[356901]: 2025-10-11 02:12:45.842 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:12:45 compute-0 nova_compute[356901]: 2025-10-11 02:12:45.843 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:12:45 compute-0 nova_compute[356901]: 2025-10-11 02:12:45.843 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.810s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:12:46 compute-0 podman[410477]: 2025-10-11 02:12:46.265513967 +0000 UTC m=+0.146783143 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 10 Base Image, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=edpm, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4)
Oct 11 02:12:46 compute-0 sshd-session[410167]: Connection closed by invalid user user 121.227.153.123 port 47650 [preauth]
Oct 11 02:12:46 compute-0 sudo[410614]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-lzfuqlhusemzlsblrttnqusohpxhvcgo ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148766.1448255-66-269131421376641/AnsiballZ_file.py'
Oct 11 02:12:46 compute-0 sudo[410614]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:46 compute-0 ceph-mon[191930]: pgmap v1037: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:46 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2026076575' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:12:46 compute-0 podman[410618]: 2025-10-11 02:12:46.900987721 +0000 UTC m=+0.155090902 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true)
Oct 11 02:12:46 compute-0 podman[410617]: 2025-10-11 02:12:46.903700027 +0000 UTC m=+0.161155857 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 02:12:46 compute-0 python3.9[410619]: ansible-ansible.builtin.file Invoked with mode=0755 path=/etc/rsyslog.d state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:12:46 compute-0 sudo[410614]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:47 compute-0 podman[410660]: 2025-10-11 02:12:47.096046722 +0000 UTC m=+0.187155161 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:12:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1038: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:47 compute-0 sshd-session[410611]: Invalid user user from 121.227.153.123 port 47666
Oct 11 02:12:47 compute-0 nova_compute[356901]: 2025-10-11 02:12:47.844 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:12:47 compute-0 nova_compute[356901]: 2025-10-11 02:12:47.844 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:12:47 compute-0 sshd-session[410611]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:12:47 compute-0 sshd-session[410611]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:12:48 compute-0 sudo[410835]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zrsxuqjdyvajhxowoicmjauuxouotfqn ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148767.680026-74-95237007936056/AnsiballZ_stat.py'
Oct 11 02:12:48 compute-0 sudo[410835]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:48 compute-0 python3.9[410837]: ansible-ansible.legacy.stat Invoked with path=/etc/rsyslog.d/10-telemetry.conf follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True
Oct 11 02:12:48 compute-0 sudo[410835]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:48 compute-0 ceph-mon[191930]: pgmap v1038: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:49 compute-0 sudo[410913]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tcjzrwojqlyaaepprrgmpweiodutthwv ; /usr/bin/python3.9 /home/zuul/.ansible/tmp/ansible-tmp-1760148767.680026-74-95237007936056/AnsiballZ_file.py'
Oct 11 02:12:49 compute-0 sudo[410913]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:12:49 compute-0 python3.9[410915]: ansible-ansible.legacy.file Invoked with mode=0644 dest=/etc/rsyslog.d/10-telemetry.conf _original_basename=10-telemetry.conf recurse=False state=file path=/etc/rsyslog.d/10-telemetry.conf force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:12:49 compute-0 sudo[410913]: pam_unix(sudo:session): session closed for user root
Oct 11 02:12:49 compute-0 sshd-session[409600]: Connection closed by 192.168.122.30 port 60010
Oct 11 02:12:49 compute-0 sshd-session[409590]: pam_unix(sshd:session): session closed for user zuul
Oct 11 02:12:49 compute-0 systemd[1]: session-61.scope: Deactivated successfully.
Oct 11 02:12:49 compute-0 systemd[1]: session-61.scope: Consumed 10.997s CPU time.
Oct 11 02:12:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1039: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:49 compute-0 systemd-logind[804]: Session 61 logged out. Waiting for processes to exit.
Oct 11 02:12:49 compute-0 systemd-logind[804]: Removed session 61.
Oct 11 02:12:50 compute-0 sshd-session[410611]: Failed password for invalid user user from 121.227.153.123 port 47666 ssh2
Oct 11 02:12:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:12:50 compute-0 ceph-mon[191930]: pgmap v1039: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:51 compute-0 podman[410942]: 2025-10-11 02:12:51.264844542 +0000 UTC m=+0.135858951 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=iscsid, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:12:51 compute-0 podman[410941]: 2025-10-11 02:12:51.269536534 +0000 UTC m=+0.145763569 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=multipathd, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, managed_by=edpm_ansible)
Oct 11 02:12:51 compute-0 sshd-session[410611]: Connection closed by invalid user user 121.227.153.123 port 47666 [preauth]
Oct 11 02:12:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1040: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:52 compute-0 sshd-session[410981]: Invalid user user from 121.227.153.123 port 49706
Oct 11 02:12:52 compute-0 ceph-mon[191930]: pgmap v1040: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:53 compute-0 sshd-session[410981]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:12:53 compute-0 sshd-session[410981]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:12:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1041: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:12:54.833 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:12:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:12:54.834 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:12:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:12:54.834 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:12:54 compute-0 ceph-mon[191930]: pgmap v1041: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:12:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1042: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:56 compute-0 sshd-session[410981]: Failed password for invalid user user from 121.227.153.123 port 49706 ssh2
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:12:56
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['images', 'volumes', '.mgr', 'default.rgw.log', '.rgw.root', 'vms', 'default.rgw.meta', 'cephfs.cephfs.data', 'backups', 'default.rgw.control', 'cephfs.cephfs.meta']
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:12:56 compute-0 ceph-mon[191930]: pgmap v1042: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:12:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:12:57 compute-0 sshd-session[410981]: Connection closed by invalid user user 121.227.153.123 port 49706 [preauth]
Oct 11 02:12:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1043: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:58 compute-0 sshd-session[410983]: Invalid user user from 121.227.153.123 port 49714
Oct 11 02:12:58 compute-0 sshd-session[410983]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:12:58 compute-0 sshd-session[410983]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:12:58 compute-0 ceph-mon[191930]: pgmap v1043: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1044: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:12:59 compute-0 podman[157119]: time="2025-10-11T02:12:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:12:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:12:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:12:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:12:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8520 "" "Go-http-client/1.1"
Oct 11 02:13:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:13:00 compute-0 ceph-mon[191930]: pgmap v1044: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:01 compute-0 sshd-session[410983]: Failed password for invalid user user from 121.227.153.123 port 49714 ssh2
Oct 11 02:13:01 compute-0 openstack_network_exporter[374316]: ERROR   02:13:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:13:01 compute-0 openstack_network_exporter[374316]: ERROR   02:13:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:13:01 compute-0 openstack_network_exporter[374316]: ERROR   02:13:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:13:01 compute-0 openstack_network_exporter[374316]: ERROR   02:13:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:13:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:13:01 compute-0 openstack_network_exporter[374316]: ERROR   02:13:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:13:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:13:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1045: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:02 compute-0 podman[410985]: 2025-10-11 02:13:02.230548931 +0000 UTC m=+0.118441297 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:13:02 compute-0 sshd-session[410983]: Connection closed by invalid user user 121.227.153.123 port 49714 [preauth]
Oct 11 02:13:02 compute-0 ceph-mon[191930]: pgmap v1045: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:03 compute-0 podman[411012]: 2025-10-11 02:13:03.223579385 +0000 UTC m=+0.120180758 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vcs-type=git, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://catalog.redhat.com/en/search?searchType=containers, config_id=edpm, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.tags=minimal rhel9, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, managed_by=edpm_ansible, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vendor=Red Hat, Inc., build-date=2025-08-20T13:12:41, com.redhat.component=ubi9-minimal-container, version=9.6, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, name=ubi9-minimal, container_name=openstack_network_exporter, io.openshift.expose-services=, maintainer=Red Hat, Inc., io.buildah.version=1.33.7, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1755695350)
Oct 11 02:13:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1046: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:03 compute-0 sshd-session[411010]: Invalid user user from 121.227.153.123 port 41762
Oct 11 02:13:04 compute-0 sshd-session[411010]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:13:04 compute-0 sshd-session[411010]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:13:04 compute-0 ceph-mon[191930]: pgmap v1046: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:13:05 compute-0 podman[411033]: 2025-10-11 02:13:05.271896457 +0000 UTC m=+0.160216834 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Oct 11 02:13:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1047: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:06 compute-0 sshd-session[411010]: Failed password for invalid user user from 121.227.153.123 port 41762 ssh2
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:13:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:13:06 compute-0 ceph-mon[191930]: pgmap v1047: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:07 compute-0 podman[411054]: 2025-10-11 02:13:07.259018461 +0000 UTC m=+0.146521864 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vendor=Red Hat, Inc., managed_by=edpm_ansible, config_id=edpm, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9, vcs-type=git, com.redhat.component=ubi9-container, name=ubi9, build-date=2024-09-18T21:23:30, summary=Provides the latest release of Red Hat Universal Base Image 9., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.4, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, architecture=x86_64, io.openshift.expose-services=, container_name=kepler, maintainer=Red Hat, Inc., release=1214.1726694543, release-0.7.12=, distribution-scope=public, io.buildah.version=1.29.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9)
Oct 11 02:13:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1048: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:07 compute-0 sshd-session[411010]: Connection closed by invalid user user 121.227.153.123 port 41762 [preauth]
Oct 11 02:13:08 compute-0 sshd-session[411075]: Invalid user user from 121.227.153.123 port 41778
Oct 11 02:13:08 compute-0 ceph-mon[191930]: pgmap v1048: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:09 compute-0 sshd-session[411075]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:13:09 compute-0 sshd-session[411075]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:13:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1049: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:13:10 compute-0 ceph-mon[191930]: pgmap v1049: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:11 compute-0 sshd-session[411075]: Failed password for invalid user user from 121.227.153.123 port 41778 ssh2
Oct 11 02:13:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:13:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 1800.1 total, 600.0 interval
                                            Cumulative writes: 5851 writes, 24K keys, 5851 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.01 MB/s
                                            Cumulative WAL: 5851 writes, 991 syncs, 5.90 writes per sync, written: 0.02 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 212 writes, 318 keys, 212 commit groups, 1.0 writes per commit group, ingest: 0.10 MB, 0.00 MB/s
                                            Interval WAL: 212 writes, 106 syncs, 2.00 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:13:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1050: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:12 compute-0 sshd-session[411075]: Connection closed by invalid user user 121.227.153.123 port 41778 [preauth]
Oct 11 02:13:12 compute-0 ceph-mon[191930]: pgmap v1050: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1051: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:13 compute-0 sshd-session[411077]: Invalid user user from 121.227.153.123 port 33124
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.858 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.859 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.859 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.860 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.860 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.860 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.860 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.860 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.861 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.862 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.862 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.863 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.863 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.863 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.863 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.863 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.863 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.863 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.863 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.863 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.864 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.864 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.864 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.864 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.864 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.865 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.865 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.865 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.865 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.865 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.865 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': [], 'power.state': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.865 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.866 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.866 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.866 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.866 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.866 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': [], 'power.state': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.bytes.rate': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.866 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.867 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': [], 'power.state': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.bytes.rate': [], 'disk.ephemeral.size': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.867 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.868 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': [], 'power.state': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.bytes.rate': [], 'disk.ephemeral.size': [], 'network.incoming.packets': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.868 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.868 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': [], 'power.state': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.bytes.rate': [], 'disk.ephemeral.size': [], 'network.incoming.packets': [], 'network.outgoing.bytes.delta': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.868 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.869 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.869 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.869 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.869 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.869 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.869 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': [], 'power.state': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.bytes.rate': [], 'disk.ephemeral.size': [], 'network.incoming.packets': [], 'network.outgoing.bytes.delta': [], 'disk.root.size': [], 'network.incoming.packets.drop': [], 'disk.device.allocation': [], 'network.incoming.packets.error': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.869 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.870 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': [], 'power.state': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.bytes.rate': [], 'disk.ephemeral.size': [], 'network.incoming.packets': [], 'network.outgoing.bytes.delta': [], 'disk.root.size': [], 'network.incoming.packets.drop': [], 'disk.device.allocation': [], 'network.incoming.packets.error': [], 'cpu': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc8bec0>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': [], 'network.outgoing.packets.drop': [], 'network.outgoing.packets.error': [], 'disk.device.capacity': [], 'disk.device.read.bytes': [], 'disk.device.read.latency': [], 'disk.device.read.requests': [], 'disk.device.usage': [], 'disk.device.write.bytes': [], 'disk.device.write.latency': [], 'power.state': [], 'disk.device.write.requests': [], 'network.incoming.bytes.delta': [], 'network.incoming.bytes.rate': [], 'disk.ephemeral.size': [], 'network.incoming.packets': [], 'network.outgoing.bytes.delta': [], 'disk.root.size': [], 'network.incoming.packets.drop': [], 'disk.device.allocation': [], 'network.incoming.packets.error': [], 'cpu': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.870 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.870 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.871 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.871 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.871 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.871 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.871 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:13:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:13:14 compute-0 sshd-session[411077]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:13:14 compute-0 sshd-session[411077]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:13:14 compute-0 ceph-mon[191930]: pgmap v1051: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:13:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1052: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:16 compute-0 sshd-session[411077]: Failed password for invalid user user from 121.227.153.123 port 33124 ssh2
Oct 11 02:13:16 compute-0 ceph-mon[191930]: pgmap v1052: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:17 compute-0 podman[411080]: 2025-10-11 02:13:17.240664539 +0000 UTC m=+0.127268633 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:13:17 compute-0 podman[411081]: 2025-10-11 02:13:17.272150402 +0000 UTC m=+0.148215473 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251007, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 02:13:17 compute-0 podman[411082]: 2025-10-11 02:13:17.27295258 +0000 UTC m=+0.146566957 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, config_id=ovn_metadata_agent, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:13:17 compute-0 podman[411138]: 2025-10-11 02:13:17.377572355 +0000 UTC m=+0.137686408 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, config_id=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Oct 11 02:13:17 compute-0 sshd-session[411077]: Connection closed by invalid user user 121.227.153.123 port 33124 [preauth]
Oct 11 02:13:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1053: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:13:17 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1151438423' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:13:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:13:17 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1151438423' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:13:18 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1151438423' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:13:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:13:18 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1848622628' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:13:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:13:18 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1848622628' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:13:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:13:18 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1015891064' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:13:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:13:18 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1015891064' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:13:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:13:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 1800.1 total, 600.0 interval
                                            Cumulative writes: 7127 writes, 29K keys, 7127 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.01 MB/s
                                            Cumulative WAL: 7127 writes, 1339 syncs, 5.32 writes per sync, written: 0.02 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 180 writes, 270 keys, 180 commit groups, 1.0 writes per commit group, ingest: 0.09 MB, 0.00 MB/s
                                            Interval WAL: 180 writes, 90 syncs, 2.00 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:13:19 compute-0 ceph-mon[191930]: pgmap v1053: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:19 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1151438423' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:13:19 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1848622628' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:13:19 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1848622628' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:13:19 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1015891064' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:13:19 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1015891064' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:13:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1054: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:19 compute-0 sshd-session[411166]: Invalid user user from 121.227.153.123 port 33140
Oct 11 02:13:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:13:20 compute-0 sshd-session[411166]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:13:20 compute-0 sshd-session[411166]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:13:21 compute-0 ceph-mon[191930]: pgmap v1054: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1055: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:22 compute-0 podman[411169]: 2025-10-11 02:13:22.248200377 +0000 UTC m=+0.137791886 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=multipathd, io.buildah.version=1.41.3, managed_by=edpm_ansible, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009)
Oct 11 02:13:22 compute-0 podman[411170]: 2025-10-11 02:13:22.27015034 +0000 UTC m=+0.158643661 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=iscsid, org.label-schema.name=CentOS Stream 9 Base Image, config_id=iscsid, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 02:13:22 compute-0 sshd-session[411166]: Failed password for invalid user user from 121.227.153.123 port 33140 ssh2
Oct 11 02:13:23 compute-0 ceph-mon[191930]: pgmap v1055: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1056: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:23 compute-0 sshd-session[411166]: Connection closed by invalid user user 121.227.153.123 port 33140 [preauth]
Oct 11 02:13:24 compute-0 ceph-mon[191930]: pgmap v1056: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:13:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1057: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:26 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:13:26 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 1800.2 total, 600.0 interval
                                            Cumulative writes: 5908 writes, 24K keys, 5908 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.01 MB/s
                                            Cumulative WAL: 5908 writes, 1010 syncs, 5.85 writes per sync, written: 0.02 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 180 writes, 270 keys, 180 commit groups, 1.0 writes per commit group, ingest: 0.09 MB, 0.00 MB/s
                                            Interval WAL: 180 writes, 90 syncs, 2.00 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:13:26 compute-0 ceph-mgr[192233]: [devicehealth INFO root] Check health
Oct 11 02:13:26 compute-0 sshd-session[411206]: Invalid user user from 121.227.153.123 port 42640
Oct 11 02:13:26 compute-0 sshd-session[411206]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:13:26 compute-0 sshd-session[411206]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:13:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:13:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:13:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:13:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:13:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:13:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:13:26 compute-0 ceph-mon[191930]: pgmap v1057: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1058: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:28 compute-0 sshd-session[411206]: Failed password for invalid user user from 121.227.153.123 port 42640 ssh2
Oct 11 02:13:28 compute-0 ceph-mon[191930]: pgmap v1058: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1059: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:29 compute-0 podman[157119]: time="2025-10-11T02:13:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:13:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:13:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:13:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:13:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8520 "" "Go-http-client/1.1"
Oct 11 02:13:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:13:30 compute-0 sshd-session[411206]: Connection closed by invalid user user 121.227.153.123 port 42640 [preauth]
Oct 11 02:13:30 compute-0 ceph-mon[191930]: pgmap v1059: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:31 compute-0 sshd-session[411208]: Invalid user user from 121.227.153.123 port 38976
Oct 11 02:13:31 compute-0 openstack_network_exporter[374316]: ERROR   02:13:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:13:31 compute-0 openstack_network_exporter[374316]: ERROR   02:13:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:13:31 compute-0 openstack_network_exporter[374316]: ERROR   02:13:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:13:31 compute-0 openstack_network_exporter[374316]: ERROR   02:13:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:13:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:13:31 compute-0 openstack_network_exporter[374316]: ERROR   02:13:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:13:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:13:31 compute-0 sshd-session[411208]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:13:31 compute-0 sshd-session[411208]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:13:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1060: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:32 compute-0 ceph-mon[191930]: pgmap v1060: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:33 compute-0 podman[411210]: 2025-10-11 02:13:33.208403181 +0000 UTC m=+0.106542787 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:13:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1061: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:33 compute-0 sshd-session[411208]: Failed password for invalid user user from 121.227.153.123 port 38976 ssh2
Oct 11 02:13:34 compute-0 podman[411234]: 2025-10-11 02:13:34.196284907 +0000 UTC m=+0.097946692 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., distribution-scope=public, name=ubi9-minimal, io.openshift.expose-services=, vcs-type=git, maintainer=Red Hat, Inc., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, managed_by=edpm_ansible, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, container_name=openstack_network_exporter, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, url=https://catalog.redhat.com/en/search?searchType=containers, build-date=2025-08-20T13:12:41, io.openshift.tags=minimal rhel9, architecture=x86_64, com.redhat.component=ubi9-minimal-container, version=9.6, vendor=Red Hat, Inc.)
Oct 11 02:13:34 compute-0 ceph-mon[191930]: pgmap v1061: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:34 compute-0 sudo[411254]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:13:34 compute-0 sudo[411254]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:35 compute-0 sudo[411254]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:35 compute-0 sudo[411279]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:13:35 compute-0 sshd-session[411208]: Connection closed by invalid user user 121.227.153.123 port 38976 [preauth]
Oct 11 02:13:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:13:35 compute-0 sudo[411279]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:35 compute-0 sudo[411279]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:35 compute-0 sudo[411304]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:13:35 compute-0 sudo[411304]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:35 compute-0 sudo[411304]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:35 compute-0 sudo[411329]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:13:35 compute-0 sudo[411329]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:35 compute-0 podman[411355]: 2025-10-11 02:13:35.594169674 +0000 UTC m=+0.149015779 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=edpm, managed_by=edpm_ansible, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Oct 11 02:13:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1062: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:36 compute-0 sudo[411329]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:36 compute-0 sudo[411407]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:13:36 compute-0 sudo[411407]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:36 compute-0 sudo[411407]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:36 compute-0 sudo[411432]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:13:36 compute-0 sudo[411432]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:36 compute-0 sudo[411432]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:36 compute-0 sudo[411457]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:13:36 compute-0 sshd-session[411352]: Invalid user user from 121.227.153.123 port 38988
Oct 11 02:13:36 compute-0 sudo[411457]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:36 compute-0 sudo[411457]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:36 compute-0 sudo[411482]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- inventory --format=json-pretty --filter-for-batch
Oct 11 02:13:36 compute-0 sudo[411482]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:36 compute-0 sshd-session[411352]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:13:36 compute-0 sshd-session[411352]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:13:36 compute-0 ceph-mon[191930]: pgmap v1062: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:37 compute-0 podman[411545]: 2025-10-11 02:13:37.101742061 +0000 UTC m=+0.090858322 container create 4bc53c0779abd107c141d49a9dcff04500f1b81bf4543dd46d7b8ff9f7a035dc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_moore, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2)
Oct 11 02:13:37 compute-0 podman[411545]: 2025-10-11 02:13:37.069316524 +0000 UTC m=+0.058432895 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:13:37 compute-0 systemd[1]: Started libpod-conmon-4bc53c0779abd107c141d49a9dcff04500f1b81bf4543dd46d7b8ff9f7a035dc.scope.
Oct 11 02:13:37 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:13:37 compute-0 podman[411545]: 2025-10-11 02:13:37.248219187 +0000 UTC m=+0.237335478 container init 4bc53c0779abd107c141d49a9dcff04500f1b81bf4543dd46d7b8ff9f7a035dc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_moore, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 02:13:37 compute-0 podman[411545]: 2025-10-11 02:13:37.261201722 +0000 UTC m=+0.250317983 container start 4bc53c0779abd107c141d49a9dcff04500f1b81bf4543dd46d7b8ff9f7a035dc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_moore, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3)
Oct 11 02:13:37 compute-0 podman[411545]: 2025-10-11 02:13:37.266302854 +0000 UTC m=+0.255419115 container attach 4bc53c0779abd107c141d49a9dcff04500f1b81bf4543dd46d7b8ff9f7a035dc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_moore, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.license=GPLv2)
Oct 11 02:13:37 compute-0 cool_moore[411561]: 167 167
Oct 11 02:13:37 compute-0 systemd[1]: libpod-4bc53c0779abd107c141d49a9dcff04500f1b81bf4543dd46d7b8ff9f7a035dc.scope: Deactivated successfully.
Oct 11 02:13:37 compute-0 podman[411545]: 2025-10-11 02:13:37.276857576 +0000 UTC m=+0.265973877 container died 4bc53c0779abd107c141d49a9dcff04500f1b81bf4543dd46d7b8ff9f7a035dc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_moore, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS)
Oct 11 02:13:37 compute-0 systemd[1]: var-lib-containers-storage-overlay-24a0590bb29b5730e73c96288057aed44b7511883fef15c21b2b0235ad79501f-merged.mount: Deactivated successfully.
Oct 11 02:13:37 compute-0 podman[411545]: 2025-10-11 02:13:37.360424389 +0000 UTC m=+0.349540660 container remove 4bc53c0779abd107c141d49a9dcff04500f1b81bf4543dd46d7b8ff9f7a035dc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_moore, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, OSD_FLAVOR=default)
Oct 11 02:13:37 compute-0 systemd[1]: libpod-conmon-4bc53c0779abd107c141d49a9dcff04500f1b81bf4543dd46d7b8ff9f7a035dc.scope: Deactivated successfully.
Oct 11 02:13:37 compute-0 podman[411573]: 2025-10-11 02:13:37.476157049 +0000 UTC m=+0.118371642 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, io.buildah.version=1.29.0, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.openshift.expose-services=, architecture=x86_64, release-0.7.12=, io.openshift.tags=base rhel9, managed_by=edpm_ansible, vendor=Red Hat, Inc., com.redhat.component=ubi9-container, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, summary=Provides the latest release of Red Hat Universal Base Image 9., version=9.4, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1214.1726694543, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, vcs-type=git, config_id=edpm, container_name=kepler, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, build-date=2024-09-18T21:23:30, maintainer=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9)
Oct 11 02:13:37 compute-0 podman[411603]: 2025-10-11 02:13:37.641425065 +0000 UTC m=+0.098089051 container create 0a724d771410cc421a382409b5c326ad9e05a046c2583a6a0d85c112a683beff (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_poincare, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:13:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1063: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:37 compute-0 podman[411603]: 2025-10-11 02:13:37.597794104 +0000 UTC m=+0.054458130 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:13:37 compute-0 systemd[1]: Started libpod-conmon-0a724d771410cc421a382409b5c326ad9e05a046c2583a6a0d85c112a683beff.scope.
Oct 11 02:13:37 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:13:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6f8388c99c9a8c7588f8b44f8d46febe1bb5f94c6012e60413904835e2970dbf/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:13:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6f8388c99c9a8c7588f8b44f8d46febe1bb5f94c6012e60413904835e2970dbf/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:13:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6f8388c99c9a8c7588f8b44f8d46febe1bb5f94c6012e60413904835e2970dbf/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:13:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6f8388c99c9a8c7588f8b44f8d46febe1bb5f94c6012e60413904835e2970dbf/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:13:37 compute-0 podman[411603]: 2025-10-11 02:13:37.828698674 +0000 UTC m=+0.285362650 container init 0a724d771410cc421a382409b5c326ad9e05a046c2583a6a0d85c112a683beff (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_poincare, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2)
Oct 11 02:13:37 compute-0 podman[411603]: 2025-10-11 02:13:37.860647406 +0000 UTC m=+0.317311372 container start 0a724d771410cc421a382409b5c326ad9e05a046c2583a6a0d85c112a683beff (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_poincare, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:13:37 compute-0 podman[411603]: 2025-10-11 02:13:37.867818455 +0000 UTC m=+0.324482471 container attach 0a724d771410cc421a382409b5c326ad9e05a046c2583a6a0d85c112a683beff (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_poincare, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True)
Oct 11 02:13:38 compute-0 ceph-mon[191930]: pgmap v1063: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:38 compute-0 sshd-session[411352]: Failed password for invalid user user from 121.227.153.123 port 38988 ssh2
Oct 11 02:13:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1064: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:13:40 compute-0 sshd-session[411352]: Connection closed by invalid user user 121.227.153.123 port 38988 [preauth]
Oct 11 02:13:40 compute-0 zealous_poincare[411620]: [
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:     {
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:         "available": false,
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:         "ceph_device": false,
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:         "device_id": "QEMU_DVD-ROM_QM00001",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:         "lsm_data": {},
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:         "lvs": [],
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:         "path": "/dev/sr0",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:         "rejected_reasons": [
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "Has a FileSystem",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "Insufficient space (<5GB)"
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:         ],
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:         "sys_api": {
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "actuators": null,
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "device_nodes": "sr0",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "devname": "sr0",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "human_readable_size": "482.00 KB",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "id_bus": "ata",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "model": "QEMU DVD-ROM",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "nr_requests": "2",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "parent": "/dev/sr0",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "partitions": {},
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "path": "/dev/sr0",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "removable": "1",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "rev": "2.5+",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "ro": "0",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "rotational": "0",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "sas_address": "",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "sas_device_handle": "",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "scheduler_mode": "mq-deadline",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "sectors": 0,
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "sectorsize": "2048",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "size": 493568.0,
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "support_discard": "2048",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "type": "disk",
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:             "vendor": "QEMU"
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:         }
Oct 11 02:13:40 compute-0 zealous_poincare[411620]:     }
Oct 11 02:13:40 compute-0 zealous_poincare[411620]: ]
Oct 11 02:13:40 compute-0 systemd[1]: libpod-0a724d771410cc421a382409b5c326ad9e05a046c2583a6a0d85c112a683beff.scope: Deactivated successfully.
Oct 11 02:13:40 compute-0 podman[411603]: 2025-10-11 02:13:40.637183845 +0000 UTC m=+3.093847821 container died 0a724d771410cc421a382409b5c326ad9e05a046c2583a6a0d85c112a683beff (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_poincare, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=reef, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default)
Oct 11 02:13:40 compute-0 systemd[1]: libpod-0a724d771410cc421a382409b5c326ad9e05a046c2583a6a0d85c112a683beff.scope: Consumed 2.889s CPU time.
Oct 11 02:13:40 compute-0 systemd[1]: var-lib-containers-storage-overlay-6f8388c99c9a8c7588f8b44f8d46febe1bb5f94c6012e60413904835e2970dbf-merged.mount: Deactivated successfully.
Oct 11 02:13:40 compute-0 podman[411603]: 2025-10-11 02:13:40.763481329 +0000 UTC m=+3.220145255 container remove 0a724d771410cc421a382409b5c326ad9e05a046c2583a6a0d85c112a683beff (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_poincare, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2)
Oct 11 02:13:40 compute-0 systemd[1]: libpod-conmon-0a724d771410cc421a382409b5c326ad9e05a046c2583a6a0d85c112a683beff.scope: Deactivated successfully.
Oct 11 02:13:40 compute-0 ceph-mon[191930]: pgmap v1064: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:40 compute-0 sudo[411482]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:13:40 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:13:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:13:40 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:13:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:13:40 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:13:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:13:40 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:13:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:13:40 compute-0 nova_compute[356901]: 2025-10-11 02:13:40.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:13:40 compute-0 nova_compute[356901]: 2025-10-11 02:13:40.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:13:40 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:13:40 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 29bd0a57-f074-4b05-ba90-0faf981d08d4 does not exist
Oct 11 02:13:40 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 27daf349-9b46-46ea-bdd8-677640a951a1 does not exist
Oct 11 02:13:40 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 0db2a684-102b-4d65-871f-71c3ad2b47ea does not exist
Oct 11 02:13:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:13:40 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:13:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:13:40 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:13:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:13:40 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:13:41 compute-0 sudo[413881]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:13:41 compute-0 sudo[413881]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:41 compute-0 sudo[413881]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:41 compute-0 sudo[413906]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:13:41 compute-0 sudo[413906]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:41 compute-0 sudo[413906]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:41 compute-0 sudo[413931]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:13:41 compute-0 sudo[413931]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:41 compute-0 sudo[413931]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:41 compute-0 sshd-session[413659]: Invalid user user from 121.227.153.123 port 58888
Oct 11 02:13:41 compute-0 sudo[413956]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:13:41 compute-0 sudo[413956]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1065: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:41 compute-0 sshd-session[413659]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:13:41 compute-0 sshd-session[413659]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:13:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:13:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:13:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:13:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:13:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:13:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:13:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:13:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:13:41 compute-0 nova_compute[356901]: 2025-10-11 02:13:41.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:13:42 compute-0 podman[414019]: 2025-10-11 02:13:42.072754229 +0000 UTC m=+0.084390454 container create a090606235daec0dbeeebd2204868bab8ba93bf9a2c60264921c895503554600 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_keldysh, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:13:42 compute-0 podman[414019]: 2025-10-11 02:13:42.03880312 +0000 UTC m=+0.050439415 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:13:42 compute-0 systemd[1]: Started libpod-conmon-a090606235daec0dbeeebd2204868bab8ba93bf9a2c60264921c895503554600.scope.
Oct 11 02:13:42 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:13:42 compute-0 podman[414019]: 2025-10-11 02:13:42.224645905 +0000 UTC m=+0.236282170 container init a090606235daec0dbeeebd2204868bab8ba93bf9a2c60264921c895503554600 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_keldysh, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:13:42 compute-0 podman[414019]: 2025-10-11 02:13:42.248958612 +0000 UTC m=+0.260594837 container start a090606235daec0dbeeebd2204868bab8ba93bf9a2c60264921c895503554600 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_keldysh, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:13:42 compute-0 podman[414019]: 2025-10-11 02:13:42.255873974 +0000 UTC m=+0.267510249 container attach a090606235daec0dbeeebd2204868bab8ba93bf9a2c60264921c895503554600 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_keldysh, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2)
Oct 11 02:13:42 compute-0 affectionate_keldysh[414034]: 167 167
Oct 11 02:13:42 compute-0 systemd[1]: libpod-a090606235daec0dbeeebd2204868bab8ba93bf9a2c60264921c895503554600.scope: Deactivated successfully.
Oct 11 02:13:42 compute-0 podman[414019]: 2025-10-11 02:13:42.264356509 +0000 UTC m=+0.275992744 container died a090606235daec0dbeeebd2204868bab8ba93bf9a2c60264921c895503554600 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_keldysh, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:13:42 compute-0 systemd[1]: var-lib-containers-storage-overlay-f861365e2cf691d06e2528f43d04ac2fa6e1d6e4c96453ce78eb8800939f4b97-merged.mount: Deactivated successfully.
Oct 11 02:13:42 compute-0 podman[414019]: 2025-10-11 02:13:42.342961457 +0000 UTC m=+0.354597682 container remove a090606235daec0dbeeebd2204868bab8ba93bf9a2c60264921c895503554600 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_keldysh, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:13:42 compute-0 systemd[1]: libpod-conmon-a090606235daec0dbeeebd2204868bab8ba93bf9a2c60264921c895503554600.scope: Deactivated successfully.
Oct 11 02:13:42 compute-0 podman[414057]: 2025-10-11 02:13:42.666890543 +0000 UTC m=+0.102052896 container create 196499098f916aad1c4a789f29f5f264cf633a20b5dc7b90c9bc104807cb397c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_edison, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 02:13:42 compute-0 podman[414057]: 2025-10-11 02:13:42.630166045 +0000 UTC m=+0.065328488 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:13:42 compute-0 systemd[1]: Started libpod-conmon-196499098f916aad1c4a789f29f5f264cf633a20b5dc7b90c9bc104807cb397c.scope.
Oct 11 02:13:42 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:13:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cb3edbbf21ec311702299e6c5317480c4712560d1e8232ade757430ab6885bec/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:13:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cb3edbbf21ec311702299e6c5317480c4712560d1e8232ade757430ab6885bec/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:13:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cb3edbbf21ec311702299e6c5317480c4712560d1e8232ade757430ab6885bec/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:13:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cb3edbbf21ec311702299e6c5317480c4712560d1e8232ade757430ab6885bec/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:13:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cb3edbbf21ec311702299e6c5317480c4712560d1e8232ade757430ab6885bec/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:13:42 compute-0 podman[414057]: 2025-10-11 02:13:42.814062282 +0000 UTC m=+0.249224675 container init 196499098f916aad1c4a789f29f5f264cf633a20b5dc7b90c9bc104807cb397c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_edison, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:13:42 compute-0 podman[414057]: 2025-10-11 02:13:42.829625127 +0000 UTC m=+0.264787480 container start 196499098f916aad1c4a789f29f5f264cf633a20b5dc7b90c9bc104807cb397c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_edison, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:13:42 compute-0 podman[414057]: 2025-10-11 02:13:42.83473174 +0000 UTC m=+0.269894173 container attach 196499098f916aad1c4a789f29f5f264cf633a20b5dc7b90c9bc104807cb397c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_edison, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, OSD_FLAVOR=default)
Oct 11 02:13:42 compute-0 nova_compute[356901]: 2025-10-11 02:13:42.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:13:42 compute-0 ceph-mon[191930]: pgmap v1065: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:42 compute-0 nova_compute[356901]: 2025-10-11 02:13:42.910 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:13:42 compute-0 nova_compute[356901]: 2025-10-11 02:13:42.910 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:13:42 compute-0 nova_compute[356901]: 2025-10-11 02:13:42.911 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:13:42 compute-0 nova_compute[356901]: 2025-10-11 02:13:42.969 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944
Oct 11 02:13:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1066: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:43 compute-0 sshd-session[413659]: Failed password for invalid user user from 121.227.153.123 port 58888 ssh2
Oct 11 02:13:43 compute-0 condescending_edison[414074]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:13:43 compute-0 condescending_edison[414074]: --> relative data size: 1.0
Oct 11 02:13:43 compute-0 condescending_edison[414074]: --> All data devices are unavailable
Oct 11 02:13:43 compute-0 podman[414057]: 2025-10-11 02:13:43.979513847 +0000 UTC m=+1.414676190 container died 196499098f916aad1c4a789f29f5f264cf633a20b5dc7b90c9bc104807cb397c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_edison, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2)
Oct 11 02:13:43 compute-0 systemd[1]: libpod-196499098f916aad1c4a789f29f5f264cf633a20b5dc7b90c9bc104807cb397c.scope: Deactivated successfully.
Oct 11 02:13:43 compute-0 systemd[1]: libpod-196499098f916aad1c4a789f29f5f264cf633a20b5dc7b90c9bc104807cb397c.scope: Consumed 1.094s CPU time.
Oct 11 02:13:44 compute-0 systemd[1]: var-lib-containers-storage-overlay-cb3edbbf21ec311702299e6c5317480c4712560d1e8232ade757430ab6885bec-merged.mount: Deactivated successfully.
Oct 11 02:13:44 compute-0 podman[414057]: 2025-10-11 02:13:44.072168867 +0000 UTC m=+1.507331210 container remove 196499098f916aad1c4a789f29f5f264cf633a20b5dc7b90c9bc104807cb397c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_edison, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 02:13:44 compute-0 systemd[1]: libpod-conmon-196499098f916aad1c4a789f29f5f264cf633a20b5dc7b90c9bc104807cb397c.scope: Deactivated successfully.
Oct 11 02:13:44 compute-0 sudo[413956]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:44 compute-0 sudo[414116]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:13:44 compute-0 sudo[414116]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:44 compute-0 sudo[414116]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:44 compute-0 sudo[414141]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:13:44 compute-0 sudo[414141]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:44 compute-0 sudo[414141]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:44 compute-0 sudo[414166]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:13:44 compute-0 sudo[414166]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:44 compute-0 sudo[414166]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:44 compute-0 sudo[414191]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:13:44 compute-0 sudo[414191]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:44 compute-0 nova_compute[356901]: 2025-10-11 02:13:44.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:13:44 compute-0 nova_compute[356901]: 2025-10-11 02:13:44.898 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:13:44 compute-0 nova_compute[356901]: 2025-10-11 02:13:44.899 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:13:44 compute-0 ceph-mon[191930]: pgmap v1066: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:44 compute-0 nova_compute[356901]: 2025-10-11 02:13:44.928 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:13:44 compute-0 nova_compute[356901]: 2025-10-11 02:13:44.930 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:13:44 compute-0 nova_compute[356901]: 2025-10-11 02:13:44.931 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:13:44 compute-0 nova_compute[356901]: 2025-10-11 02:13:44.932 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:13:44 compute-0 nova_compute[356901]: 2025-10-11 02:13:44.933 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:13:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:13:45 compute-0 podman[414256]: 2025-10-11 02:13:45.172095936 +0000 UTC m=+0.089389508 container create 84cf64021e2182588ff0cb525f1a1b1e890fb605a5e511b3b35114027936c0af (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_galois, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:13:45 compute-0 podman[414256]: 2025-10-11 02:13:45.137759831 +0000 UTC m=+0.055053403 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:13:45 compute-0 systemd[1]: Started libpod-conmon-84cf64021e2182588ff0cb525f1a1b1e890fb605a5e511b3b35114027936c0af.scope.
Oct 11 02:13:45 compute-0 sshd-session[413659]: Connection closed by invalid user user 121.227.153.123 port 58888 [preauth]
Oct 11 02:13:45 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:13:45 compute-0 podman[414256]: 2025-10-11 02:13:45.356899143 +0000 UTC m=+0.274192725 container init 84cf64021e2182588ff0cb525f1a1b1e890fb605a5e511b3b35114027936c0af (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_galois, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0)
Oct 11 02:13:45 compute-0 podman[414256]: 2025-10-11 02:13:45.369836638 +0000 UTC m=+0.287130210 container start 84cf64021e2182588ff0cb525f1a1b1e890fb605a5e511b3b35114027936c0af (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_galois, org.label-schema.license=GPLv2, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:13:45 compute-0 podman[414256]: 2025-10-11 02:13:45.376186186 +0000 UTC m=+0.293479738 container attach 84cf64021e2182588ff0cb525f1a1b1e890fb605a5e511b3b35114027936c0af (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_galois, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 02:13:45 compute-0 elastic_galois[414291]: 167 167
Oct 11 02:13:45 compute-0 podman[414256]: 2025-10-11 02:13:45.381312329 +0000 UTC m=+0.298605911 container died 84cf64021e2182588ff0cb525f1a1b1e890fb605a5e511b3b35114027936c0af (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_galois, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:13:45 compute-0 systemd[1]: libpod-84cf64021e2182588ff0cb525f1a1b1e890fb605a5e511b3b35114027936c0af.scope: Deactivated successfully.
Oct 11 02:13:45 compute-0 systemd[1]: var-lib-containers-storage-overlay-e9d80c3f6bf0ed906d847b075afd562624d445b325b6841a5c761bf9c0cb244b-merged.mount: Deactivated successfully.
Oct 11 02:13:45 compute-0 podman[414256]: 2025-10-11 02:13:45.453827835 +0000 UTC m=+0.371121387 container remove 84cf64021e2182588ff0cb525f1a1b1e890fb605a5e511b3b35114027936c0af (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_galois, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.license=GPLv2)
Oct 11 02:13:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:13:45 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2896894267' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:13:45 compute-0 systemd[1]: libpod-conmon-84cf64021e2182588ff0cb525f1a1b1e890fb605a5e511b3b35114027936c0af.scope: Deactivated successfully.
Oct 11 02:13:45 compute-0 nova_compute[356901]: 2025-10-11 02:13:45.515 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.581s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:13:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1067: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:45 compute-0 podman[414318]: 2025-10-11 02:13:45.73683625 +0000 UTC m=+0.096628337 container create cf3bd8e2cd4b12b913f159ff9426228e6212240a709ec1faf2356f7390feadec (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stupefied_matsumoto, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_REF=reef)
Oct 11 02:13:45 compute-0 podman[414318]: 2025-10-11 02:13:45.701079701 +0000 UTC m=+0.060871858 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:13:45 compute-0 systemd[1]: Started libpod-conmon-cf3bd8e2cd4b12b913f159ff9426228e6212240a709ec1faf2356f7390feadec.scope.
Oct 11 02:13:45 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:13:45 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c92bdcd170cecf4cd6d4c4f57a43b9a53e2c0fe7357efafb0faf2c674d3629b0/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:13:45 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c92bdcd170cecf4cd6d4c4f57a43b9a53e2c0fe7357efafb0faf2c674d3629b0/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:13:45 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c92bdcd170cecf4cd6d4c4f57a43b9a53e2c0fe7357efafb0faf2c674d3629b0/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:13:45 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c92bdcd170cecf4cd6d4c4f57a43b9a53e2c0fe7357efafb0faf2c674d3629b0/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:13:45 compute-0 podman[414318]: 2025-10-11 02:13:45.882657694 +0000 UTC m=+0.242449791 container init cf3bd8e2cd4b12b913f159ff9426228e6212240a709ec1faf2356f7390feadec (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stupefied_matsumoto, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS)
Oct 11 02:13:45 compute-0 podman[414318]: 2025-10-11 02:13:45.917567173 +0000 UTC m=+0.277359260 container start cf3bd8e2cd4b12b913f159ff9426228e6212240a709ec1faf2356f7390feadec (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stupefied_matsumoto, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:13:45 compute-0 podman[414318]: 2025-10-11 02:13:45.922681785 +0000 UTC m=+0.282473882 container attach cf3bd8e2cd4b12b913f159ff9426228e6212240a709ec1faf2356f7390feadec (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stupefied_matsumoto, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:13:45 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2896894267' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:13:45 compute-0 nova_compute[356901]: 2025-10-11 02:13:45.943 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:13:45 compute-0 nova_compute[356901]: 2025-10-11 02:13:45.946 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=4516MB free_disk=59.98828125GB free_vcpus=8 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:13:45 compute-0 nova_compute[356901]: 2025-10-11 02:13:45.947 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:13:45 compute-0 nova_compute[356901]: 2025-10-11 02:13:45.947 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:13:46 compute-0 nova_compute[356901]: 2025-10-11 02:13:46.034 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 0 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:13:46 compute-0 nova_compute[356901]: 2025-10-11 02:13:46.034 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=512MB phys_disk=59GB used_disk=0GB total_vcpus=8 used_vcpus=0 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:13:46 compute-0 nova_compute[356901]: 2025-10-11 02:13:46.064 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:13:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:13:46 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1034711906' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:13:46 compute-0 nova_compute[356901]: 2025-10-11 02:13:46.604 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.540s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:13:46 compute-0 nova_compute[356901]: 2025-10-11 02:13:46.614 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:13:46 compute-0 nova_compute[356901]: 2025-10-11 02:13:46.631 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:13:46 compute-0 nova_compute[356901]: 2025-10-11 02:13:46.633 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:13:46 compute-0 nova_compute[356901]: 2025-10-11 02:13:46.634 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.686s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:13:46 compute-0 sshd-session[414311]: Invalid user user from 121.227.153.123 port 58904
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]: {
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:     "0": [
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:         {
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "devices": [
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "/dev/loop3"
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             ],
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "lv_name": "ceph_lv0",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "lv_size": "21470642176",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "name": "ceph_lv0",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "tags": {
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.cluster_name": "ceph",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.crush_device_class": "",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.encrypted": "0",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.osd_id": "0",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.type": "block",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.vdo": "0"
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             },
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "type": "block",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "vg_name": "ceph_vg0"
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:         }
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:     ],
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:     "1": [
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:         {
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "devices": [
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "/dev/loop4"
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             ],
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "lv_name": "ceph_lv1",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "lv_size": "21470642176",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "name": "ceph_lv1",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "tags": {
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.cluster_name": "ceph",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.crush_device_class": "",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.encrypted": "0",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.osd_id": "1",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.type": "block",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.vdo": "0"
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             },
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "type": "block",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "vg_name": "ceph_vg1"
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:         }
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:     ],
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:     "2": [
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:         {
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "devices": [
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "/dev/loop5"
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             ],
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "lv_name": "ceph_lv2",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "lv_size": "21470642176",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "name": "ceph_lv2",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "tags": {
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.cluster_name": "ceph",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.crush_device_class": "",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.encrypted": "0",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.osd_id": "2",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.type": "block",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:                 "ceph.vdo": "0"
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             },
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "type": "block",
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:             "vg_name": "ceph_vg2"
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:         }
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]:     ]
Oct 11 02:13:46 compute-0 stupefied_matsumoto[414335]: }
Oct 11 02:13:46 compute-0 systemd[1]: libpod-cf3bd8e2cd4b12b913f159ff9426228e6212240a709ec1faf2356f7390feadec.scope: Deactivated successfully.
Oct 11 02:13:46 compute-0 podman[414318]: 2025-10-11 02:13:46.823074954 +0000 UTC m=+1.182867071 container died cf3bd8e2cd4b12b913f159ff9426228e6212240a709ec1faf2356f7390feadec (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stupefied_matsumoto, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0)
Oct 11 02:13:46 compute-0 systemd[1]: var-lib-containers-storage-overlay-c92bdcd170cecf4cd6d4c4f57a43b9a53e2c0fe7357efafb0faf2c674d3629b0-merged.mount: Deactivated successfully.
Oct 11 02:13:46 compute-0 podman[414318]: 2025-10-11 02:13:46.930671216 +0000 UTC m=+1.290463313 container remove cf3bd8e2cd4b12b913f159ff9426228e6212240a709ec1faf2356f7390feadec (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stupefied_matsumoto, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:13:46 compute-0 sshd-session[414311]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:13:46 compute-0 sshd-session[414311]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:13:46 compute-0 ceph-mon[191930]: pgmap v1067: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:46 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1034711906' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:13:46 compute-0 systemd[1]: libpod-conmon-cf3bd8e2cd4b12b913f159ff9426228e6212240a709ec1faf2356f7390feadec.scope: Deactivated successfully.
Oct 11 02:13:46 compute-0 sudo[414191]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:47 compute-0 sudo[414378]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:13:47 compute-0 sudo[414378]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:47 compute-0 sudo[414378]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:47 compute-0 sudo[414403]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:13:47 compute-0 sudo[414403]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:47 compute-0 sudo[414403]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:47 compute-0 podman[414429]: 2025-10-11 02:13:47.462091014 +0000 UTC m=+0.112005854 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, container_name=ovn_metadata_agent, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, managed_by=edpm_ansible)
Oct 11 02:13:47 compute-0 podman[414428]: 2025-10-11 02:13:47.466147999 +0000 UTC m=+0.125086498 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 10 Base Image, config_id=edpm, container_name=ceilometer_agent_compute)
Oct 11 02:13:47 compute-0 sudo[414446]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:13:47 compute-0 sudo[414446]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:47 compute-0 podman[414427]: 2025-10-11 02:13:47.484422514 +0000 UTC m=+0.143219574 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:13:47 compute-0 sudo[414446]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:47 compute-0 sudo[414516]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:13:47 compute-0 sudo[414516]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:47 compute-0 podman[414509]: 2025-10-11 02:13:47.61403042 +0000 UTC m=+0.127641849 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, container_name=ovn_controller)
Oct 11 02:13:47 compute-0 nova_compute[356901]: 2025-10-11 02:13:47.635 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:13:47 compute-0 nova_compute[356901]: 2025-10-11 02:13:47.635 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:13:47 compute-0 nova_compute[356901]: 2025-10-11 02:13:47.635 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:13:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1068: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:48 compute-0 podman[414599]: 2025-10-11 02:13:48.110577948 +0000 UTC m=+0.077689258 container create 135b429468b474810f5f4211ab146a15455f185f92945fa198131f221ec06e3a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_dhawan, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507)
Oct 11 02:13:48 compute-0 podman[414599]: 2025-10-11 02:13:48.084321124 +0000 UTC m=+0.051432424 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:13:48 compute-0 systemd[1]: Started libpod-conmon-135b429468b474810f5f4211ab146a15455f185f92945fa198131f221ec06e3a.scope.
Oct 11 02:13:48 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:13:48 compute-0 podman[414599]: 2025-10-11 02:13:48.256399042 +0000 UTC m=+0.223510412 container init 135b429468b474810f5f4211ab146a15455f185f92945fa198131f221ec06e3a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_dhawan, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:13:48 compute-0 podman[414599]: 2025-10-11 02:13:48.276567606 +0000 UTC m=+0.243678926 container start 135b429468b474810f5f4211ab146a15455f185f92945fa198131f221ec06e3a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_dhawan, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:13:48 compute-0 podman[414599]: 2025-10-11 02:13:48.283382029 +0000 UTC m=+0.250493399 container attach 135b429468b474810f5f4211ab146a15455f185f92945fa198131f221ec06e3a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_dhawan, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:13:48 compute-0 brave_dhawan[414615]: 167 167
Oct 11 02:13:48 compute-0 systemd[1]: libpod-135b429468b474810f5f4211ab146a15455f185f92945fa198131f221ec06e3a.scope: Deactivated successfully.
Oct 11 02:13:48 compute-0 podman[414599]: 2025-10-11 02:13:48.289368962 +0000 UTC m=+0.256480252 container died 135b429468b474810f5f4211ab146a15455f185f92945fa198131f221ec06e3a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_dhawan, CEPH_REF=reef, io.buildah.version=1.39.3, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:13:48 compute-0 systemd[1]: var-lib-containers-storage-overlay-4baa93a7a19298e45a0dddfca64ac60c8f6e53aecb799aef8aada6fee9caefcd-merged.mount: Deactivated successfully.
Oct 11 02:13:48 compute-0 podman[414599]: 2025-10-11 02:13:48.364831156 +0000 UTC m=+0.331942466 container remove 135b429468b474810f5f4211ab146a15455f185f92945fa198131f221ec06e3a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_dhawan, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, org.label-schema.build-date=20250507)
Oct 11 02:13:48 compute-0 systemd[1]: libpod-conmon-135b429468b474810f5f4211ab146a15455f185f92945fa198131f221ec06e3a.scope: Deactivated successfully.
Oct 11 02:13:48 compute-0 sshd-session[414311]: Failed password for invalid user user from 121.227.153.123 port 58904 ssh2
Oct 11 02:13:48 compute-0 podman[414638]: 2025-10-11 02:13:48.631754681 +0000 UTC m=+0.063110313 container create d598067de693ce3d022f6894a27b0760b8148bea3d7defc27c9fc443b53a1f25 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_pasteur, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True)
Oct 11 02:13:48 compute-0 podman[414638]: 2025-10-11 02:13:48.607371964 +0000 UTC m=+0.038727686 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:13:48 compute-0 systemd[1]: Started libpod-conmon-d598067de693ce3d022f6894a27b0760b8148bea3d7defc27c9fc443b53a1f25.scope.
Oct 11 02:13:48 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:13:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/61602190a57e5be6c9d638b428e83fbacc12eaf50478dbd9ddbcd2aea7ce5975/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:13:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/61602190a57e5be6c9d638b428e83fbacc12eaf50478dbd9ddbcd2aea7ce5975/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:13:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/61602190a57e5be6c9d638b428e83fbacc12eaf50478dbd9ddbcd2aea7ce5975/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:13:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/61602190a57e5be6c9d638b428e83fbacc12eaf50478dbd9ddbcd2aea7ce5975/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:13:48 compute-0 podman[414638]: 2025-10-11 02:13:48.803022849 +0000 UTC m=+0.234378521 container init d598067de693ce3d022f6894a27b0760b8148bea3d7defc27c9fc443b53a1f25 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_pasteur, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:13:48 compute-0 podman[414638]: 2025-10-11 02:13:48.826443287 +0000 UTC m=+0.257798949 container start d598067de693ce3d022f6894a27b0760b8148bea3d7defc27c9fc443b53a1f25 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_pasteur, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True)
Oct 11 02:13:48 compute-0 podman[414638]: 2025-10-11 02:13:48.833067602 +0000 UTC m=+0.264423274 container attach d598067de693ce3d022f6894a27b0760b8148bea3d7defc27c9fc443b53a1f25 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_pasteur, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_REF=reef)
Oct 11 02:13:48 compute-0 sshd-session[414311]: Connection closed by invalid user user 121.227.153.123 port 58904 [preauth]
Oct 11 02:13:48 compute-0 ceph-mon[191930]: pgmap v1068: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1069: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]: {
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:         "osd_id": 1,
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:         "type": "bluestore"
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:     },
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:         "osd_id": 2,
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:         "type": "bluestore"
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:     },
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:         "osd_id": 0,
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:         "type": "bluestore"
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]:     }
Oct 11 02:13:50 compute-0 affectionate_pasteur[414654]: }
Oct 11 02:13:50 compute-0 systemd[1]: libpod-d598067de693ce3d022f6894a27b0760b8148bea3d7defc27c9fc443b53a1f25.scope: Deactivated successfully.
Oct 11 02:13:50 compute-0 podman[414638]: 2025-10-11 02:13:50.19436002 +0000 UTC m=+1.625715692 container died d598067de693ce3d022f6894a27b0760b8148bea3d7defc27c9fc443b53a1f25 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_pasteur, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3)
Oct 11 02:13:50 compute-0 systemd[1]: libpod-d598067de693ce3d022f6894a27b0760b8148bea3d7defc27c9fc443b53a1f25.scope: Consumed 1.351s CPU time.
Oct 11 02:13:50 compute-0 systemd[1]: var-lib-containers-storage-overlay-61602190a57e5be6c9d638b428e83fbacc12eaf50478dbd9ddbcd2aea7ce5975-merged.mount: Deactivated successfully.
Oct 11 02:13:50 compute-0 sshd-session[414659]: Invalid user user from 121.227.153.123 port 58920
Oct 11 02:13:50 compute-0 podman[414638]: 2025-10-11 02:13:50.317092463 +0000 UTC m=+1.748448135 container remove d598067de693ce3d022f6894a27b0760b8148bea3d7defc27c9fc443b53a1f25 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_pasteur, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:13:50 compute-0 systemd[1]: libpod-conmon-d598067de693ce3d022f6894a27b0760b8148bea3d7defc27c9fc443b53a1f25.scope: Deactivated successfully.
Oct 11 02:13:50 compute-0 sudo[414516]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:13:50 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:13:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:13:50 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:13:50 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev a0dcf717-b34b-4268-9620-b39d098aa1ca does not exist
Oct 11 02:13:50 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev b5019fe4-332d-42e5-9dcd-39ce8211fa4a does not exist
Oct 11 02:13:50 compute-0 sshd-session[414659]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:13:50 compute-0 sshd-session[414659]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:13:50 compute-0 sudo[414704]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:13:50 compute-0 sudo[414704]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:50 compute-0 sudo[414704]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:50 compute-0 sudo[414729]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:13:50 compute-0 sudo[414729]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:13:50 compute-0 sudo[414729]: pam_unix(sudo:session): session closed for user root
Oct 11 02:13:50 compute-0 ceph-mon[191930]: pgmap v1069: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:50 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:13:50 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:13:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1070: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:52 compute-0 sshd-session[414659]: Failed password for invalid user user from 121.227.153.123 port 58920 ssh2
Oct 11 02:13:52 compute-0 ceph-mon[191930]: pgmap v1070: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:53 compute-0 podman[414754]: 2025-10-11 02:13:53.27770305 +0000 UTC m=+0.159909425 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:13:53 compute-0 podman[414755]: 2025-10-11 02:13:53.283145291 +0000 UTC m=+0.164715676 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=iscsid, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, container_name=iscsid, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS)
Oct 11 02:13:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1071: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:54 compute-0 sshd-session[414659]: Connection closed by invalid user user 121.227.153.123 port 58920 [preauth]
Oct 11 02:13:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:13:54.835 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:13:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:13:54.835 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:13:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:13:54.835 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:13:55 compute-0 ceph-mon[191930]: pgmap v1071: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:13:55 compute-0 sshd-session[414792]: Invalid user user from 121.227.153.123 port 44450
Oct 11 02:13:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1072: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:55 compute-0 sshd-session[414792]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:13:55 compute-0 sshd-session[414792]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:13:56
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.meta', 'default.rgw.control', 'images', 'default.rgw.log', 'cephfs.cephfs.data', 'vms', '.rgw.root', 'volumes', 'backups', '.mgr', 'cephfs.cephfs.meta']
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:13:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:13:57 compute-0 ceph-mon[191930]: pgmap v1072: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1073: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:57 compute-0 sshd-session[414792]: Failed password for invalid user user from 121.227.153.123 port 44450 ssh2
Oct 11 02:13:59 compute-0 ceph-mon[191930]: pgmap v1073: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:59 compute-0 sshd-session[414792]: Connection closed by invalid user user 121.227.153.123 port 44450 [preauth]
Oct 11 02:13:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1074: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:13:59 compute-0 podman[157119]: time="2025-10-11T02:13:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:13:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:13:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:13:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:13:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8527 "" "Go-http-client/1.1"
Oct 11 02:14:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:14:00 compute-0 sshd-session[414794]: Invalid user user from 121.227.153.123 port 51692
Oct 11 02:14:01 compute-0 sshd-session[414794]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:14:01 compute-0 sshd-session[414794]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:14:01 compute-0 ceph-mon[191930]: pgmap v1074: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:01 compute-0 openstack_network_exporter[374316]: ERROR   02:14:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:14:01 compute-0 openstack_network_exporter[374316]: ERROR   02:14:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:14:01 compute-0 openstack_network_exporter[374316]: ERROR   02:14:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:14:01 compute-0 openstack_network_exporter[374316]: ERROR   02:14:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:14:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:14:01 compute-0 openstack_network_exporter[374316]: ERROR   02:14:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:14:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:14:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1075: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:02 compute-0 sshd-session[414794]: Failed password for invalid user user from 121.227.153.123 port 51692 ssh2
Oct 11 02:14:03 compute-0 ceph-mon[191930]: pgmap v1075: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1076: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:04 compute-0 podman[414796]: 2025-10-11 02:14:04.233842752 +0000 UTC m=+0.117760888 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:14:04 compute-0 podman[414818]: 2025-10-11 02:14:04.403985647 +0000 UTC m=+0.125775861 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=openstack_network_exporter, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, version=9.6, io.openshift.tags=minimal rhel9, url=https://catalog.redhat.com/en/search?searchType=containers, io.buildah.version=1.33.7, name=ubi9-minimal, build-date=2025-08-20T13:12:41, architecture=x86_64, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, com.redhat.component=ubi9-minimal-container, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, managed_by=edpm_ansible, distribution-scope=public, maintainer=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=1755695350, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vendor=Red Hat, Inc.)
Oct 11 02:14:04 compute-0 sshd-session[414794]: Connection closed by invalid user user 121.227.153.123 port 51692 [preauth]
Oct 11 02:14:05 compute-0 ceph-mon[191930]: pgmap v1076: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:14:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1077: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:05 compute-0 sshd-session[414838]: Invalid user user from 121.227.153.123 port 51696
Oct 11 02:14:05 compute-0 podman[414840]: 2025-10-11 02:14:05.959842528 +0000 UTC m=+0.131622727 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_ipmi, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 02:14:06 compute-0 sshd-session[414838]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:14:06 compute-0 sshd-session[414838]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:14:06 compute-0 ceph-mon[191930]: pgmap v1077: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:14:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:14:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1078: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:07 compute-0 sshd-session[414838]: Failed password for invalid user user from 121.227.153.123 port 51696 ssh2
Oct 11 02:14:08 compute-0 podman[414860]: 2025-10-11 02:14:08.263954754 +0000 UTC m=+0.147163761 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.tags=base rhel9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, release=1214.1726694543, architecture=x86_64, maintainer=Red Hat, Inc., vcs-type=git, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, version=9.4, com.redhat.component=ubi9-container, managed_by=edpm_ansible, name=ubi9, summary=Provides the latest release of Red Hat Universal Base Image 9., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_id=edpm, io.openshift.expose-services=, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., release-0.7.12=, build-date=2024-09-18T21:23:30, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=kepler, io.buildah.version=1.29.0)
Oct 11 02:14:08 compute-0 ceph-mon[191930]: pgmap v1078: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "version", "format": "json"} v 0) v1
Oct 11 02:14:09 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3428005334' entity='client.openstack' cmd=[{"prefix": "version", "format": "json"}]: dispatch
Oct 11 02:14:09 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14381 -' entity='client.openstack' cmd=[{"prefix": "fs volume ls", "format": "json"}]: dispatch
Oct 11 02:14:09 compute-0 ceph-mgr[192233]: [volumes INFO volumes.module] Starting _cmd_fs_volume_ls(format:json, prefix:fs volume ls) < ""
Oct 11 02:14:09 compute-0 ceph-mgr[192233]: [volumes INFO volumes.module] Finishing _cmd_fs_volume_ls(format:json, prefix:fs volume ls) < ""
Oct 11 02:14:09 compute-0 sshd-session[414838]: Connection closed by invalid user user 121.227.153.123 port 51696 [preauth]
Oct 11 02:14:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1079: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:09 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3428005334' entity='client.openstack' cmd=[{"prefix": "version", "format": "json"}]: dispatch
Oct 11 02:14:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:14:10 compute-0 ceph-mon[191930]: from='client.14381 -' entity='client.openstack' cmd=[{"prefix": "fs volume ls", "format": "json"}]: dispatch
Oct 11 02:14:10 compute-0 ceph-mon[191930]: pgmap v1079: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:10 compute-0 sshd-session[414879]: Invalid user user from 121.227.153.123 port 36498
Oct 11 02:14:11 compute-0 sshd-session[414879]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:14:11 compute-0 sshd-session[414879]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:14:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1080: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:12 compute-0 ceph-mon[191930]: pgmap v1080: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1081: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:13 compute-0 sshd-session[414879]: Failed password for invalid user user from 121.227.153.123 port 36498 ssh2
Oct 11 02:14:14 compute-0 ceph-mon[191930]: pgmap v1081: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:14 compute-0 sshd-session[414879]: Connection closed by invalid user user 121.227.153.123 port 36498 [preauth]
Oct 11 02:14:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:14:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1082: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:16 compute-0 sshd-session[414881]: Invalid user user from 121.227.153.123 port 36514
Oct 11 02:14:16 compute-0 sshd-session[414881]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:14:16 compute-0 sshd-session[414881]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:14:16 compute-0 ceph-mon[191930]: pgmap v1082: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1083: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:18 compute-0 podman[414883]: 2025-10-11 02:14:18.238122978 +0000 UTC m=+0.124507010 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:14:18 compute-0 podman[414885]: 2025-10-11 02:14:18.254924053 +0000 UTC m=+0.119614801 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, io.buildah.version=1.41.4, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 10 Base Image, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 02:14:18 compute-0 podman[414889]: 2025-10-11 02:14:18.264298637 +0000 UTC m=+0.122381631 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_managed=true, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:14:18 compute-0 podman[414884]: 2025-10-11 02:14:18.286938208 +0000 UTC m=+0.163039940 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, io.buildah.version=1.41.3, managed_by=edpm_ansible, tcib_managed=true, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:14:18 compute-0 ceph-mon[191930]: pgmap v1083: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:19 compute-0 sshd-session[414881]: Failed password for invalid user user from 121.227.153.123 port 36514 ssh2
Oct 11 02:14:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1084: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:19 compute-0 sshd-session[414881]: Connection closed by invalid user user 121.227.153.123 port 36514 [preauth]
Oct 11 02:14:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:14:20 compute-0 ceph-mon[191930]: pgmap v1084: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:21 compute-0 sshd-session[414967]: Invalid user user from 121.227.153.123 port 52508
Oct 11 02:14:21 compute-0 sshd-session[414967]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:14:21 compute-0 sshd-session[414967]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:14:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1085: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:22 compute-0 ceph-mon[191930]: pgmap v1085: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:23 compute-0 sshd-session[414967]: Failed password for invalid user user from 121.227.153.123 port 52508 ssh2
Oct 11 02:14:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1086: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:24 compute-0 podman[414969]: 2025-10-11 02:14:24.249774797 +0000 UTC m=+0.140289081 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, container_name=multipathd, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd)
Oct 11 02:14:24 compute-0 podman[414970]: 2025-10-11 02:14:24.296644005 +0000 UTC m=+0.180032392 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_id=iscsid, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS)
Oct 11 02:14:24 compute-0 ceph-mon[191930]: pgmap v1086: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:24 compute-0 sshd-session[414967]: Connection closed by invalid user user 121.227.153.123 port 52508 [preauth]
Oct 11 02:14:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:14:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1087: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:26 compute-0 sshd-session[415008]: Invalid user user from 121.227.153.123 port 52520
Oct 11 02:14:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:14:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:14:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:14:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:14:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:14:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:14:26 compute-0 sshd-session[415008]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:14:26 compute-0 sshd-session[415008]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:14:26 compute-0 ceph-mon[191930]: pgmap v1087: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:14:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2243588010' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:14:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:14:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2243588010' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:14:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1088: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2243588010' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:14:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2243588010' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:14:28 compute-0 ceph-mon[191930]: pgmap v1088: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:29 compute-0 sshd-session[415008]: Failed password for invalid user user from 121.227.153.123 port 52520 ssh2
Oct 11 02:14:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1089: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:29 compute-0 podman[157119]: time="2025-10-11T02:14:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:14:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:14:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:14:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:14:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8527 "" "Go-http-client/1.1"
Oct 11 02:14:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:14:30 compute-0 sshd-session[415008]: Connection closed by invalid user user 121.227.153.123 port 52520 [preauth]
Oct 11 02:14:30 compute-0 ceph-mon[191930]: pgmap v1089: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:31 compute-0 openstack_network_exporter[374316]: ERROR   02:14:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:14:31 compute-0 openstack_network_exporter[374316]: ERROR   02:14:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:14:31 compute-0 openstack_network_exporter[374316]: ERROR   02:14:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:14:31 compute-0 openstack_network_exporter[374316]: ERROR   02:14:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:14:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:14:31 compute-0 openstack_network_exporter[374316]: ERROR   02:14:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:14:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:14:31 compute-0 sshd-session[415010]: Invalid user user from 121.227.153.123 port 45310
Oct 11 02:14:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1090: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:31 compute-0 sshd-session[415010]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:14:31 compute-0 sshd-session[415010]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:14:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "version", "format": "json"} v 0) v1
Oct 11 02:14:31 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/226678772' entity='client.openstack' cmd=[{"prefix": "version", "format": "json"}]: dispatch
Oct 11 02:14:31 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.14387 -' entity='client.openstack' cmd=[{"prefix": "fs volume ls", "format": "json"}]: dispatch
Oct 11 02:14:31 compute-0 ceph-mgr[192233]: [volumes INFO volumes.module] Starting _cmd_fs_volume_ls(format:json, prefix:fs volume ls) < ""
Oct 11 02:14:31 compute-0 ceph-mgr[192233]: [volumes INFO volumes.module] Finishing _cmd_fs_volume_ls(format:json, prefix:fs volume ls) < ""
Oct 11 02:14:31 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/226678772' entity='client.openstack' cmd=[{"prefix": "version", "format": "json"}]: dispatch
Oct 11 02:14:32 compute-0 ceph-mon[191930]: pgmap v1090: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:32 compute-0 ceph-mon[191930]: from='client.14387 -' entity='client.openstack' cmd=[{"prefix": "fs volume ls", "format": "json"}]: dispatch
Oct 11 02:14:33 compute-0 sshd-session[415010]: Failed password for invalid user user from 121.227.153.123 port 45310 ssh2
Oct 11 02:14:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1091: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:33 compute-0 sshd-session[415010]: Connection closed by invalid user user 121.227.153.123 port 45310 [preauth]
Oct 11 02:14:34 compute-0 ceph-mon[191930]: pgmap v1091: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:14:35 compute-0 podman[415015]: 2025-10-11 02:14:35.188784659 +0000 UTC m=+0.081130481 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:14:35 compute-0 podman[415014]: 2025-10-11 02:14:35.230379814 +0000 UTC m=+0.116357729 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vcs-type=git, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, url=https://catalog.redhat.com/en/search?searchType=containers, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1755695350, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_id=edpm, vendor=Red Hat, Inc., io.openshift.tags=minimal rhel9, distribution-scope=public, managed_by=edpm_ansible, name=ubi9-minimal, architecture=x86_64, com.redhat.component=ubi9-minimal-container, container_name=openstack_network_exporter, io.buildah.version=1.33.7, build-date=2025-08-20T13:12:41, version=9.6, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=)
Oct 11 02:14:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1092: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:35 compute-0 sshd-session[415012]: Invalid user user from 121.227.153.123 port 45318
Oct 11 02:14:36 compute-0 sshd-session[415012]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:14:36 compute-0 sshd-session[415012]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:14:36 compute-0 podman[415053]: 2025-10-11 02:14:36.143072439 +0000 UTC m=+0.106693409 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, tcib_managed=true, config_id=edpm, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:14:36 compute-0 ceph-mon[191930]: pgmap v1092: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1093: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:37 compute-0 sshd-session[415012]: Failed password for invalid user user from 121.227.153.123 port 45318 ssh2
Oct 11 02:14:39 compute-0 ceph-mon[191930]: pgmap v1093: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:39 compute-0 podman[415072]: 2025-10-11 02:14:39.238494036 +0000 UTC m=+0.123096201 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vendor=Red Hat, Inc., io.buildah.version=1.29.0, name=ubi9, build-date=2024-09-18T21:23:30, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=1214.1726694543, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, release-0.7.12=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, version=9.4, io.openshift.tags=base rhel9, maintainer=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, config_id=edpm, io.openshift.expose-services=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, architecture=x86_64, container_name=kepler, distribution-scope=public, managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., com.redhat.component=ubi9-container, vcs-type=git)
Oct 11 02:14:39 compute-0 sshd-session[415012]: Connection closed by invalid user user 121.227.153.123 port 45318 [preauth]
Oct 11 02:14:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1094: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:14:40 compute-0 sshd-session[415093]: Invalid user user from 121.227.153.123 port 55134
Oct 11 02:14:41 compute-0 ceph-mon[191930]: pgmap v1094: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:41 compute-0 sshd-session[415093]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:14:41 compute-0 sshd-session[415093]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:14:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1095: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:41 compute-0 nova_compute[356901]: 2025-10-11 02:14:41.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:14:41 compute-0 nova_compute[356901]: 2025-10-11 02:14:41.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:14:41 compute-0 nova_compute[356901]: 2025-10-11 02:14:41.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:14:43 compute-0 ceph-mon[191930]: pgmap v1095: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:43 compute-0 sshd-session[415093]: Failed password for invalid user user from 121.227.153.123 port 55134 ssh2
Oct 11 02:14:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1096: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:44 compute-0 sshd-session[415093]: Connection closed by invalid user user 121.227.153.123 port 55134 [preauth]
Oct 11 02:14:44 compute-0 nova_compute[356901]: 2025-10-11 02:14:44.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:14:44 compute-0 nova_compute[356901]: 2025-10-11 02:14:44.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:14:44 compute-0 nova_compute[356901]: 2025-10-11 02:14:44.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:14:44 compute-0 nova_compute[356901]: 2025-10-11 02:14:44.916 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944
Oct 11 02:14:45 compute-0 ceph-mon[191930]: pgmap v1096: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:14:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1097: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:45 compute-0 nova_compute[356901]: 2025-10-11 02:14:45.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:14:45 compute-0 nova_compute[356901]: 2025-10-11 02:14:45.923 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:14:45 compute-0 nova_compute[356901]: 2025-10-11 02:14:45.923 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:14:45 compute-0 nova_compute[356901]: 2025-10-11 02:14:45.923 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:14:45 compute-0 nova_compute[356901]: 2025-10-11 02:14:45.924 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:14:45 compute-0 nova_compute[356901]: 2025-10-11 02:14:45.924 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:14:46 compute-0 sshd-session[415095]: Invalid user user from 121.227.153.123 port 55142
Oct 11 02:14:46 compute-0 sshd-session[415095]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:14:46 compute-0 sshd-session[415095]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:14:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:14:46 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1126788941' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:14:46 compute-0 nova_compute[356901]: 2025-10-11 02:14:46.423 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.498s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:14:46 compute-0 nova_compute[356901]: 2025-10-11 02:14:46.943 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:14:46 compute-0 nova_compute[356901]: 2025-10-11 02:14:46.945 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=4561MB free_disk=59.98828125GB free_vcpus=8 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:14:46 compute-0 nova_compute[356901]: 2025-10-11 02:14:46.945 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:14:46 compute-0 nova_compute[356901]: 2025-10-11 02:14:46.946 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:14:47 compute-0 nova_compute[356901]: 2025-10-11 02:14:47.029 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 0 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:14:47 compute-0 nova_compute[356901]: 2025-10-11 02:14:47.030 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=512MB phys_disk=59GB used_disk=0GB total_vcpus=8 used_vcpus=0 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:14:47 compute-0 nova_compute[356901]: 2025-10-11 02:14:47.046 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:14:47 compute-0 ceph-mon[191930]: pgmap v1097: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:47 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1126788941' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:14:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:14:47 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3181632574' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:14:47 compute-0 nova_compute[356901]: 2025-10-11 02:14:47.550 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.504s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:14:47 compute-0 nova_compute[356901]: 2025-10-11 02:14:47.563 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:14:47 compute-0 nova_compute[356901]: 2025-10-11 02:14:47.586 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:14:47 compute-0 nova_compute[356901]: 2025-10-11 02:14:47.589 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:14:47 compute-0 nova_compute[356901]: 2025-10-11 02:14:47.590 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.644s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:14:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1098: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:48 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3181632574' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:14:48 compute-0 nova_compute[356901]: 2025-10-11 02:14:48.592 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:14:48 compute-0 nova_compute[356901]: 2025-10-11 02:14:48.592 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:14:48 compute-0 nova_compute[356901]: 2025-10-11 02:14:48.593 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:14:48 compute-0 nova_compute[356901]: 2025-10-11 02:14:48.593 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:14:48 compute-0 nova_compute[356901]: 2025-10-11 02:14:48.594 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:14:48 compute-0 sshd-session[415095]: Failed password for invalid user user from 121.227.153.123 port 55142 ssh2
Oct 11 02:14:49 compute-0 ceph-mon[191930]: pgmap v1098: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:49 compute-0 podman[415141]: 2025-10-11 02:14:49.220123421 +0000 UTC m=+0.103254979 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 02:14:49 compute-0 podman[415143]: 2025-10-11 02:14:49.238430095 +0000 UTC m=+0.109359970 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.4)
Oct 11 02:14:49 compute-0 podman[415144]: 2025-10-11 02:14:49.25114817 +0000 UTC m=+0.110729740 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, org.label-schema.license=GPLv2)
Oct 11 02:14:49 compute-0 podman[415142]: 2025-10-11 02:14:49.282015071 +0000 UTC m=+0.158715862 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, managed_by=edpm_ansible, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true)
Oct 11 02:14:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1099: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:49 compute-0 sshd-session[415095]: Connection closed by invalid user user 121.227.153.123 port 55142 [preauth]
Oct 11 02:14:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:14:50 compute-0 sudo[415229]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:14:50 compute-0 sudo[415229]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:14:50 compute-0 sudo[415229]: pam_unix(sudo:session): session closed for user root
Oct 11 02:14:50 compute-0 sudo[415254]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:14:50 compute-0 sudo[415254]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:14:50 compute-0 sudo[415254]: pam_unix(sudo:session): session closed for user root
Oct 11 02:14:51 compute-0 sudo[415279]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:14:51 compute-0 sudo[415279]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:14:51 compute-0 sudo[415279]: pam_unix(sudo:session): session closed for user root
Oct 11 02:14:51 compute-0 ceph-mon[191930]: pgmap v1099: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:14:51 compute-0 sudo[415304]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 check-host
Oct 11 02:14:51 compute-0 sudo[415304]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:14:51 compute-0 sshd-session[415227]: Invalid user user from 121.227.153.123 port 45270
Oct 11 02:14:51 compute-0 sshd-session[415227]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:14:51 compute-0 sshd-session[415227]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:14:51 compute-0 sudo[415304]: pam_unix(sudo:session): session closed for user root
Oct 11 02:14:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:14:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:14:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:14:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:14:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1100: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 1.2 KiB/s rd, 0 B/s wr, 2 op/s
Oct 11 02:14:51 compute-0 sudo[415348]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:14:51 compute-0 sudo[415348]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:14:51 compute-0 sudo[415348]: pam_unix(sudo:session): session closed for user root
Oct 11 02:14:51 compute-0 sudo[415373]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:14:51 compute-0 sudo[415373]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:14:51 compute-0 sudo[415373]: pam_unix(sudo:session): session closed for user root
Oct 11 02:14:52 compute-0 sudo[415398]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:14:52 compute-0 sudo[415398]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:14:52 compute-0 sudo[415398]: pam_unix(sudo:session): session closed for user root
Oct 11 02:14:52 compute-0 sudo[415423]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:14:52 compute-0 sudo[415423]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:14:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:14:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:14:52 compute-0 ceph-mon[191930]: pgmap v1100: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 1.2 KiB/s rd, 0 B/s wr, 2 op/s
Oct 11 02:14:52 compute-0 sudo[415423]: pam_unix(sudo:session): session closed for user root
Oct 11 02:14:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:14:52 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:14:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:14:52 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:14:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:14:52 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:14:52 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 4e40cc6a-6370-4e24-b3d0-720a479aa71a does not exist
Oct 11 02:14:52 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 2d5984ae-6ef9-4ad5-8c03-6b8f24662797 does not exist
Oct 11 02:14:52 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 55bb4f3c-9dc1-4967-b565-63ac9d27082c does not exist
Oct 11 02:14:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:14:52 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:14:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:14:52 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:14:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:14:52 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:14:52 compute-0 sudo[415477]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:14:52 compute-0 sudo[415477]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:14:52 compute-0 sudo[415477]: pam_unix(sudo:session): session closed for user root
Oct 11 02:14:53 compute-0 sudo[415502]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:14:53 compute-0 sudo[415502]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:14:53 compute-0 sudo[415502]: pam_unix(sudo:session): session closed for user root
Oct 11 02:14:53 compute-0 sudo[415527]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:14:53 compute-0 sudo[415527]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:14:53 compute-0 sudo[415527]: pam_unix(sudo:session): session closed for user root
Oct 11 02:14:53 compute-0 sudo[415552]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:14:53 compute-0 sudo[415552]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:14:53 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:14:53 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:14:53 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:14:53 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:14:53 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:14:53 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:14:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1101: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 25 KiB/s rd, 0 B/s wr, 42 op/s
Oct 11 02:14:54 compute-0 sshd-session[415227]: Failed password for invalid user user from 121.227.153.123 port 45270 ssh2
Oct 11 02:14:54 compute-0 podman[415616]: 2025-10-11 02:14:54.03907983 +0000 UTC m=+0.087481209 container create a541f214f9f47caa9b8e3a7d87157319692b8c5e3e8ce073b8ce90f526efa193 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_murdock, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=reef, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:14:54 compute-0 podman[415616]: 2025-10-11 02:14:54.006179668 +0000 UTC m=+0.054581107 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:14:54 compute-0 systemd[1]: Started libpod-conmon-a541f214f9f47caa9b8e3a7d87157319692b8c5e3e8ce073b8ce90f526efa193.scope.
Oct 11 02:14:54 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:14:54 compute-0 podman[415616]: 2025-10-11 02:14:54.189447213 +0000 UTC m=+0.237848642 container init a541f214f9f47caa9b8e3a7d87157319692b8c5e3e8ce073b8ce90f526efa193 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_murdock, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:14:54 compute-0 podman[415616]: 2025-10-11 02:14:54.20129508 +0000 UTC m=+0.249696469 container start a541f214f9f47caa9b8e3a7d87157319692b8c5e3e8ce073b8ce90f526efa193 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_murdock, org.label-schema.vendor=CentOS, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:14:54 compute-0 podman[415616]: 2025-10-11 02:14:54.207180655 +0000 UTC m=+0.255582084 container attach a541f214f9f47caa9b8e3a7d87157319692b8c5e3e8ce073b8ce90f526efa193 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_murdock, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Oct 11 02:14:54 compute-0 dazzling_murdock[415632]: 167 167
Oct 11 02:14:54 compute-0 systemd[1]: libpod-a541f214f9f47caa9b8e3a7d87157319692b8c5e3e8ce073b8ce90f526efa193.scope: Deactivated successfully.
Oct 11 02:14:54 compute-0 podman[415616]: 2025-10-11 02:14:54.214823093 +0000 UTC m=+0.263224482 container died a541f214f9f47caa9b8e3a7d87157319692b8c5e3e8ce073b8ce90f526efa193 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_murdock, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=reef, org.label-schema.build-date=20250507)
Oct 11 02:14:54 compute-0 systemd[1]: var-lib-containers-storage-overlay-3294ad0a93491b32ceb60e58fa45aea0101852109be4e068a1897c02586f1a1e-merged.mount: Deactivated successfully.
Oct 11 02:14:54 compute-0 podman[415616]: 2025-10-11 02:14:54.316615913 +0000 UTC m=+0.365017302 container remove a541f214f9f47caa9b8e3a7d87157319692b8c5e3e8ce073b8ce90f526efa193 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_murdock, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:14:54 compute-0 systemd[1]: libpod-conmon-a541f214f9f47caa9b8e3a7d87157319692b8c5e3e8ce073b8ce90f526efa193.scope: Deactivated successfully.
Oct 11 02:14:54 compute-0 podman[415650]: 2025-10-11 02:14:54.512149659 +0000 UTC m=+0.107272321 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=multipathd, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, config_id=multipathd, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:14:54 compute-0 podman[415651]: 2025-10-11 02:14:54.517652439 +0000 UTC m=+0.100863564 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, container_name=iscsid, io.buildah.version=1.41.3, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=iscsid, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:14:54 compute-0 podman[415691]: 2025-10-11 02:14:54.564276131 +0000 UTC m=+0.056183464 container create 4025b05efe9a53596293491b7047a19f093029161d52098e3dbb2a6dd8f76ed7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_mestorf, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:14:54 compute-0 systemd[1]: Started libpod-conmon-4025b05efe9a53596293491b7047a19f093029161d52098e3dbb2a6dd8f76ed7.scope.
Oct 11 02:14:54 compute-0 podman[415691]: 2025-10-11 02:14:54.545267807 +0000 UTC m=+0.037175160 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:14:54 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:14:54 compute-0 ceph-mon[191930]: pgmap v1101: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 25 KiB/s rd, 0 B/s wr, 42 op/s
Oct 11 02:14:54 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e699249f5d69063687f7f410364d89fc93e581487009b0be3cd37d52c6221539/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:14:54 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e699249f5d69063687f7f410364d89fc93e581487009b0be3cd37d52c6221539/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:14:54 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e699249f5d69063687f7f410364d89fc93e581487009b0be3cd37d52c6221539/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:14:54 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e699249f5d69063687f7f410364d89fc93e581487009b0be3cd37d52c6221539/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:14:54 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e699249f5d69063687f7f410364d89fc93e581487009b0be3cd37d52c6221539/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:14:54 compute-0 podman[415691]: 2025-10-11 02:14:54.703616414 +0000 UTC m=+0.195523807 container init 4025b05efe9a53596293491b7047a19f093029161d52098e3dbb2a6dd8f76ed7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_mestorf, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 02:14:54 compute-0 podman[415691]: 2025-10-11 02:14:54.728514692 +0000 UTC m=+0.220422035 container start 4025b05efe9a53596293491b7047a19f093029161d52098e3dbb2a6dd8f76ed7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_mestorf, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:14:54 compute-0 podman[415691]: 2025-10-11 02:14:54.734056901 +0000 UTC m=+0.225964244 container attach 4025b05efe9a53596293491b7047a19f093029161d52098e3dbb2a6dd8f76ed7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_mestorf, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS)
Oct 11 02:14:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:14:54.836 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:14:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:14:54.837 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:14:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:14:54.837 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:14:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:14:55 compute-0 sshd-session[415227]: Connection closed by invalid user user 121.227.153.123 port 45270 [preauth]
Oct 11 02:14:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1102: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 35 KiB/s rd, 0 B/s wr, 58 op/s
Oct 11 02:14:56 compute-0 modest_mestorf[415713]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:14:56 compute-0 modest_mestorf[415713]: --> relative data size: 1.0
Oct 11 02:14:56 compute-0 modest_mestorf[415713]: --> All data devices are unavailable
Oct 11 02:14:56 compute-0 systemd[1]: libpod-4025b05efe9a53596293491b7047a19f093029161d52098e3dbb2a6dd8f76ed7.scope: Deactivated successfully.
Oct 11 02:14:56 compute-0 systemd[1]: libpod-4025b05efe9a53596293491b7047a19f093029161d52098e3dbb2a6dd8f76ed7.scope: Consumed 1.340s CPU time.
Oct 11 02:14:56 compute-0 podman[415744]: 2025-10-11 02:14:56.225941841 +0000 UTC m=+0.057590623 container died 4025b05efe9a53596293491b7047a19f093029161d52098e3dbb2a6dd8f76ed7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_mestorf, org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.build-date=20250507)
Oct 11 02:14:56 compute-0 systemd[1]: var-lib-containers-storage-overlay-e699249f5d69063687f7f410364d89fc93e581487009b0be3cd37d52c6221539-merged.mount: Deactivated successfully.
Oct 11 02:14:56 compute-0 podman[415744]: 2025-10-11 02:14:56.325647311 +0000 UTC m=+0.157296043 container remove 4025b05efe9a53596293491b7047a19f093029161d52098e3dbb2a6dd8f76ed7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=modest_mestorf, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.vendor=CentOS)
Oct 11 02:14:56 compute-0 systemd[1]: libpod-conmon-4025b05efe9a53596293491b7047a19f093029161d52098e3dbb2a6dd8f76ed7.scope: Deactivated successfully.
Oct 11 02:14:56 compute-0 sudo[415552]: pam_unix(sudo:session): session closed for user root
Oct 11 02:14:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:14:56
Oct 11 02:14:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:14:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:14:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.meta', '.mgr', 'cephfs.cephfs.data', 'default.rgw.log', 'default.rgw.control', 'cephfs.cephfs.meta', 'backups', 'vms', '.rgw.root', 'images', 'volumes']
Oct 11 02:14:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:14:56 compute-0 sudo[415759]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:14:56 compute-0 sudo[415759]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:14:56 compute-0 sudo[415759]: pam_unix(sudo:session): session closed for user root
Oct 11 02:14:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:14:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:14:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:14:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:14:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:14:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:14:56 compute-0 sudo[415784]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:14:56 compute-0 sudo[415784]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:14:56 compute-0 sudo[415784]: pam_unix(sudo:session): session closed for user root
Oct 11 02:14:56 compute-0 ceph-mon[191930]: pgmap v1102: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 35 KiB/s rd, 0 B/s wr, 58 op/s
Oct 11 02:14:56 compute-0 sudo[415809]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:14:56 compute-0 sudo[415809]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:14:56 compute-0 sudo[415809]: pam_unix(sudo:session): session closed for user root
Oct 11 02:14:56 compute-0 sudo[415834]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:14:56 compute-0 sudo[415834]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:14:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:14:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:14:56 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:14:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:14:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:14:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:14:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:14:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:14:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:14:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:14:57 compute-0 sshd-session[415718]: Invalid user user from 121.227.153.123 port 45276
Oct 11 02:14:57 compute-0 sshd-session[415718]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:14:57 compute-0 sshd-session[415718]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:14:57 compute-0 podman[415897]: 2025-10-11 02:14:57.557522541 +0000 UTC m=+0.091299591 container create a072789a4edd266e0a83cbccae45d881a8b7c2edd062eed3519b54e989d52a08 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_feistel, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 02:14:57 compute-0 podman[415897]: 2025-10-11 02:14:57.524745841 +0000 UTC m=+0.058522971 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:14:57 compute-0 systemd[1]: Started libpod-conmon-a072789a4edd266e0a83cbccae45d881a8b7c2edd062eed3519b54e989d52a08.scope.
Oct 11 02:14:57 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:14:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1103: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:14:57 compute-0 podman[415897]: 2025-10-11 02:14:57.720477927 +0000 UTC m=+0.254255037 container init a072789a4edd266e0a83cbccae45d881a8b7c2edd062eed3519b54e989d52a08 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_feistel, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:14:57 compute-0 podman[415897]: 2025-10-11 02:14:57.740483426 +0000 UTC m=+0.274260486 container start a072789a4edd266e0a83cbccae45d881a8b7c2edd062eed3519b54e989d52a08 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_feistel, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 02:14:57 compute-0 podman[415897]: 2025-10-11 02:14:57.747064679 +0000 UTC m=+0.280841799 container attach a072789a4edd266e0a83cbccae45d881a8b7c2edd062eed3519b54e989d52a08 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_feistel, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:14:57 compute-0 xenodochial_feistel[415913]: 167 167
Oct 11 02:14:57 compute-0 systemd[1]: libpod-a072789a4edd266e0a83cbccae45d881a8b7c2edd062eed3519b54e989d52a08.scope: Deactivated successfully.
Oct 11 02:14:57 compute-0 podman[415897]: 2025-10-11 02:14:57.755983328 +0000 UTC m=+0.289760388 container died a072789a4edd266e0a83cbccae45d881a8b7c2edd062eed3519b54e989d52a08 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_feistel, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #51. Immutable memtables: 0.
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:14:57.792215) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 25] Flushing memtable with next log file: 51
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148897792366, "job": 25, "event": "flush_started", "num_memtables": 1, "num_entries": 2054, "num_deletes": 251, "total_data_size": 3453744, "memory_usage": 3515424, "flush_reason": "Manual Compaction"}
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 25] Level-0 flush table #52: started
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148897819200, "cf_name": "default", "job": 25, "event": "table_file_creation", "file_number": 52, "file_size": 3366074, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 20902, "largest_seqno": 22955, "table_properties": {"data_size": 3356821, "index_size": 5811, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 2373, "raw_key_size": 18633, "raw_average_key_size": 19, "raw_value_size": 3338336, "raw_average_value_size": 3578, "num_data_blocks": 263, "num_entries": 933, "num_filter_entries": 933, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760148674, "oldest_key_time": 1760148674, "file_creation_time": 1760148897, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 52, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 25] Flush lasted 27121 microseconds, and 10118 cpu microseconds.
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:14:57 compute-0 systemd[1]: var-lib-containers-storage-overlay-f5e3c97d5e3756f83696b24dcb551315dcf040c45bf2198b91e2d5424f109b84-merged.mount: Deactivated successfully.
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:14:57.819389) [db/flush_job.cc:967] [default] [JOB 25] Level-0 flush table #52: 3366074 bytes OK
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:14:57.819430) [db/memtable_list.cc:519] [default] Level-0 commit table #52 started
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:14:57.822938) [db/memtable_list.cc:722] [default] Level-0 commit table #52: memtable #1 done
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:14:57.822970) EVENT_LOG_v1 {"time_micros": 1760148897822959, "job": 25, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:14:57.823002) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 25] Try to delete WAL files size 3445139, prev total WAL file size 3445139, number of live WAL files 2.
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000048.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:14:57.825444) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F730031373537' seq:72057594037927935, type:22 .. '7061786F730032303039' seq:0, type:0; will stop at (end)
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 26] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 25 Base level 0, inputs: [52(3287KB)], [50(7361KB)]
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148897825560, "job": 26, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [52], "files_L6": [50], "score": -1, "input_data_size": 10904520, "oldest_snapshot_seqno": -1}
Oct 11 02:14:57 compute-0 podman[415897]: 2025-10-11 02:14:57.837713855 +0000 UTC m=+0.371490885 container remove a072789a4edd266e0a83cbccae45d881a8b7c2edd062eed3519b54e989d52a08 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_feistel, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef)
Oct 11 02:14:57 compute-0 systemd[1]: libpod-conmon-a072789a4edd266e0a83cbccae45d881a8b7c2edd062eed3519b54e989d52a08.scope: Deactivated successfully.
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 26] Generated table #53: 4695 keys, 9149423 bytes, temperature: kUnknown
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148897888805, "cf_name": "default", "job": 26, "event": "table_file_creation", "file_number": 53, "file_size": 9149423, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 9115649, "index_size": 20928, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 11781, "raw_key_size": 114892, "raw_average_key_size": 24, "raw_value_size": 9028340, "raw_average_value_size": 1922, "num_data_blocks": 882, "num_entries": 4695, "num_filter_entries": 4695, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760148897, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 53, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:14:57.889103) [db/compaction/compaction_job.cc:1663] [default] [JOB 26] Compacted 1@0 + 1@6 files to L6 => 9149423 bytes
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:14:57.892455) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 172.2 rd, 144.5 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(3.2, 7.2 +0.0 blob) out(8.7 +0.0 blob), read-write-amplify(6.0) write-amplify(2.7) OK, records in: 5209, records dropped: 514 output_compression: NoCompression
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:14:57.892513) EVENT_LOG_v1 {"time_micros": 1760148897892490, "job": 26, "event": "compaction_finished", "compaction_time_micros": 63310, "compaction_time_cpu_micros": 31469, "output_level": 6, "num_output_files": 1, "total_output_size": 9149423, "num_input_records": 5209, "num_output_records": 4695, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000052.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148897893547, "job": 26, "event": "table_file_deletion", "file_number": 52}
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000050.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148897895035, "job": 26, "event": "table_file_deletion", "file_number": 50}
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:14:57.825055) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:14:57.895283) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:14:57.895288) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:14:57.895291) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:14:57.895293) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:14:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:14:57.895295) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:14:58 compute-0 podman[415936]: 2025-10-11 02:14:58.14014414 +0000 UTC m=+0.122354948 container create 69b0d57effa6783e2dc1a3c4c7053b9215a325ae5d4c9d9c24eb16aa556d5c19 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_johnson, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:14:58 compute-0 podman[415936]: 2025-10-11 02:14:58.086709387 +0000 UTC m=+0.068920265 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:14:58 compute-0 systemd[1]: Started libpod-conmon-69b0d57effa6783e2dc1a3c4c7053b9215a325ae5d4c9d9c24eb16aa556d5c19.scope.
Oct 11 02:14:58 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:14:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ce49cc19ab74712f42e12f9de0cc3dbee641f1f23168cf072499adb85f841ac8/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:14:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ce49cc19ab74712f42e12f9de0cc3dbee641f1f23168cf072499adb85f841ac8/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:14:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ce49cc19ab74712f42e12f9de0cc3dbee641f1f23168cf072499adb85f841ac8/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:14:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ce49cc19ab74712f42e12f9de0cc3dbee641f1f23168cf072499adb85f841ac8/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:14:58 compute-0 podman[415936]: 2025-10-11 02:14:58.314452525 +0000 UTC m=+0.296663393 container init 69b0d57effa6783e2dc1a3c4c7053b9215a325ae5d4c9d9c24eb16aa556d5c19 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_johnson, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 02:14:58 compute-0 podman[415936]: 2025-10-11 02:14:58.325199042 +0000 UTC m=+0.307409820 container start 69b0d57effa6783e2dc1a3c4c7053b9215a325ae5d4c9d9c24eb16aa556d5c19 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_johnson, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True)
Oct 11 02:14:58 compute-0 podman[415936]: 2025-10-11 02:14:58.335595066 +0000 UTC m=+0.317805934 container attach 69b0d57effa6783e2dc1a3c4c7053b9215a325ae5d4c9d9c24eb16aa556d5c19 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_johnson, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:14:58 compute-0 ceph-mon[191930]: pgmap v1103: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:14:58 compute-0 sshd-session[415718]: Failed password for invalid user user from 121.227.153.123 port 45276 ssh2
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]: {
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:     "0": [
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:         {
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "devices": [
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "/dev/loop3"
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             ],
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "lv_name": "ceph_lv0",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "lv_size": "21470642176",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "name": "ceph_lv0",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "tags": {
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.cluster_name": "ceph",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.crush_device_class": "",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.encrypted": "0",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.osd_id": "0",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.type": "block",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.vdo": "0"
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             },
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "type": "block",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "vg_name": "ceph_vg0"
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:         }
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:     ],
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:     "1": [
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:         {
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "devices": [
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "/dev/loop4"
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             ],
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "lv_name": "ceph_lv1",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "lv_size": "21470642176",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "name": "ceph_lv1",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "tags": {
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.cluster_name": "ceph",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.crush_device_class": "",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.encrypted": "0",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.osd_id": "1",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.type": "block",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.vdo": "0"
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             },
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "type": "block",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "vg_name": "ceph_vg1"
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:         }
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:     ],
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:     "2": [
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:         {
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "devices": [
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "/dev/loop5"
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             ],
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "lv_name": "ceph_lv2",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "lv_size": "21470642176",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "name": "ceph_lv2",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "tags": {
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.cluster_name": "ceph",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.crush_device_class": "",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.encrypted": "0",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.osd_id": "2",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.type": "block",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:                 "ceph.vdo": "0"
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             },
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "type": "block",
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:             "vg_name": "ceph_vg2"
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:         }
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]:     ]
Oct 11 02:14:59 compute-0 vigilant_johnson[415953]: }
Oct 11 02:14:59 compute-0 sshd-session[415718]: Connection closed by invalid user user 121.227.153.123 port 45276 [preauth]
Oct 11 02:14:59 compute-0 systemd[1]: libpod-69b0d57effa6783e2dc1a3c4c7053b9215a325ae5d4c9d9c24eb16aa556d5c19.scope: Deactivated successfully.
Oct 11 02:14:59 compute-0 podman[415936]: 2025-10-11 02:14:59.241633024 +0000 UTC m=+1.223843822 container died 69b0d57effa6783e2dc1a3c4c7053b9215a325ae5d4c9d9c24eb16aa556d5c19 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_johnson, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:14:59 compute-0 systemd[1]: var-lib-containers-storage-overlay-ce49cc19ab74712f42e12f9de0cc3dbee641f1f23168cf072499adb85f841ac8-merged.mount: Deactivated successfully.
Oct 11 02:14:59 compute-0 podman[415936]: 2025-10-11 02:14:59.344513412 +0000 UTC m=+1.326724200 container remove 69b0d57effa6783e2dc1a3c4c7053b9215a325ae5d4c9d9c24eb16aa556d5c19 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_johnson, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:14:59 compute-0 systemd[1]: libpod-conmon-69b0d57effa6783e2dc1a3c4c7053b9215a325ae5d4c9d9c24eb16aa556d5c19.scope: Deactivated successfully.
Oct 11 02:14:59 compute-0 sudo[415834]: pam_unix(sudo:session): session closed for user root
Oct 11 02:14:59 compute-0 sudo[415972]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:14:59 compute-0 sudo[415972]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:14:59 compute-0 sudo[415972]: pam_unix(sudo:session): session closed for user root
Oct 11 02:14:59 compute-0 sudo[415999]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:14:59 compute-0 sudo[415999]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:14:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1104: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:14:59 compute-0 sudo[415999]: pam_unix(sudo:session): session closed for user root
Oct 11 02:14:59 compute-0 podman[157119]: time="2025-10-11T02:14:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:14:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:14:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:14:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:14:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8523 "" "Go-http-client/1.1"
Oct 11 02:14:59 compute-0 sudo[416024]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:14:59 compute-0 sudo[416024]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:14:59 compute-0 sudo[416024]: pam_unix(sudo:session): session closed for user root
Oct 11 02:15:00 compute-0 sudo[416049]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:15:00 compute-0 sudo[416049]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:15:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:15:00 compute-0 sshd-session[415978]: Invalid user user from 121.227.153.123 port 42132
Oct 11 02:15:00 compute-0 sshd-session[415978]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:15:00 compute-0 sshd-session[415978]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:15:00 compute-0 podman[416114]: 2025-10-11 02:15:00.680064997 +0000 UTC m=+0.099313518 container create 54f41e6c4c73acc79d735b95fb1076aea371fcb0e0d1e67591ccd354d555eaa4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_euler, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:15:00 compute-0 podman[416114]: 2025-10-11 02:15:00.641930879 +0000 UTC m=+0.061179470 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:15:00 compute-0 systemd[1]: Started libpod-conmon-54f41e6c4c73acc79d735b95fb1076aea371fcb0e0d1e67591ccd354d555eaa4.scope.
Oct 11 02:15:00 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:15:00 compute-0 podman[416114]: 2025-10-11 02:15:00.845793105 +0000 UTC m=+0.265041666 container init 54f41e6c4c73acc79d735b95fb1076aea371fcb0e0d1e67591ccd354d555eaa4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_euler, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0)
Oct 11 02:15:00 compute-0 podman[416114]: 2025-10-11 02:15:00.864424593 +0000 UTC m=+0.283673114 container start 54f41e6c4c73acc79d735b95fb1076aea371fcb0e0d1e67591ccd354d555eaa4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_euler, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:15:00 compute-0 ceph-mon[191930]: pgmap v1104: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:15:00 compute-0 podman[416114]: 2025-10-11 02:15:00.87546885 +0000 UTC m=+0.294717381 container attach 54f41e6c4c73acc79d735b95fb1076aea371fcb0e0d1e67591ccd354d555eaa4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_euler, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0)
Oct 11 02:15:00 compute-0 gracious_euler[416130]: 167 167
Oct 11 02:15:00 compute-0 systemd[1]: libpod-54f41e6c4c73acc79d735b95fb1076aea371fcb0e0d1e67591ccd354d555eaa4.scope: Deactivated successfully.
Oct 11 02:15:00 compute-0 podman[416114]: 2025-10-11 02:15:00.882195719 +0000 UTC m=+0.301444250 container died 54f41e6c4c73acc79d735b95fb1076aea371fcb0e0d1e67591ccd354d555eaa4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_euler, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:15:00 compute-0 systemd[1]: var-lib-containers-storage-overlay-31ef30f1fd9d918f09a24731020d2a5da34d98015985af07e70fd4b0f622b41d-merged.mount: Deactivated successfully.
Oct 11 02:15:00 compute-0 podman[416114]: 2025-10-11 02:15:00.96132443 +0000 UTC m=+0.380572931 container remove 54f41e6c4c73acc79d735b95fb1076aea371fcb0e0d1e67591ccd354d555eaa4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gracious_euler, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3)
Oct 11 02:15:00 compute-0 systemd[1]: libpod-conmon-54f41e6c4c73acc79d735b95fb1076aea371fcb0e0d1e67591ccd354d555eaa4.scope: Deactivated successfully.
Oct 11 02:15:01 compute-0 podman[416154]: 2025-10-11 02:15:01.252723068 +0000 UTC m=+0.093398640 container create 769721380e8b625968f9efa961817c3b601216e3cf82201bcdfd9941770363bb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_leavitt, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Oct 11 02:15:01 compute-0 podman[416154]: 2025-10-11 02:15:01.203872034 +0000 UTC m=+0.044547646 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:15:01 compute-0 systemd[1]: Started libpod-conmon-769721380e8b625968f9efa961817c3b601216e3cf82201bcdfd9941770363bb.scope.
Oct 11 02:15:01 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:15:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/668f0cb95c67c2137125a4cd1a3fd9ff6413506495a1597ea43409bcccd8347a/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:15:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/668f0cb95c67c2137125a4cd1a3fd9ff6413506495a1597ea43409bcccd8347a/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:15:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/668f0cb95c67c2137125a4cd1a3fd9ff6413506495a1597ea43409bcccd8347a/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:15:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/668f0cb95c67c2137125a4cd1a3fd9ff6413506495a1597ea43409bcccd8347a/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:15:01 compute-0 openstack_network_exporter[374316]: ERROR   02:15:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:15:01 compute-0 openstack_network_exporter[374316]: ERROR   02:15:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:15:01 compute-0 openstack_network_exporter[374316]: ERROR   02:15:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:15:01 compute-0 openstack_network_exporter[374316]: ERROR   02:15:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:15:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:15:01 compute-0 openstack_network_exporter[374316]: ERROR   02:15:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:15:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:15:01 compute-0 podman[416154]: 2025-10-11 02:15:01.440045813 +0000 UTC m=+0.280721395 container init 769721380e8b625968f9efa961817c3b601216e3cf82201bcdfd9941770363bb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_leavitt, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:15:01 compute-0 podman[416154]: 2025-10-11 02:15:01.46298468 +0000 UTC m=+0.303660212 container start 769721380e8b625968f9efa961817c3b601216e3cf82201bcdfd9941770363bb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_leavitt, CEPH_REF=reef, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, OSD_FLAVOR=default)
Oct 11 02:15:01 compute-0 podman[416154]: 2025-10-11 02:15:01.471479044 +0000 UTC m=+0.312154576 container attach 769721380e8b625968f9efa961817c3b601216e3cf82201bcdfd9941770363bb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_leavitt, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 02:15:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1105: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:15:02 compute-0 sshd-session[415978]: Failed password for invalid user user from 121.227.153.123 port 42132 ssh2
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]: {
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:         "osd_id": 1,
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:         "type": "bluestore"
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:     },
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:         "osd_id": 2,
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:         "type": "bluestore"
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:     },
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:         "osd_id": 0,
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:         "type": "bluestore"
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]:     }
Oct 11 02:15:02 compute-0 priceless_leavitt[416171]: }
Oct 11 02:15:02 compute-0 systemd[1]: libpod-769721380e8b625968f9efa961817c3b601216e3cf82201bcdfd9941770363bb.scope: Deactivated successfully.
Oct 11 02:15:02 compute-0 systemd[1]: libpod-769721380e8b625968f9efa961817c3b601216e3cf82201bcdfd9941770363bb.scope: Consumed 1.245s CPU time.
Oct 11 02:15:02 compute-0 podman[416154]: 2025-10-11 02:15:02.713935462 +0000 UTC m=+1.554611034 container died 769721380e8b625968f9efa961817c3b601216e3cf82201bcdfd9941770363bb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_leavitt, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, ceph=True, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:15:02 compute-0 systemd[1]: var-lib-containers-storage-overlay-668f0cb95c67c2137125a4cd1a3fd9ff6413506495a1597ea43409bcccd8347a-merged.mount: Deactivated successfully.
Oct 11 02:15:02 compute-0 podman[416154]: 2025-10-11 02:15:02.826897892 +0000 UTC m=+1.667573414 container remove 769721380e8b625968f9efa961817c3b601216e3cf82201bcdfd9941770363bb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_leavitt, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:15:02 compute-0 systemd[1]: libpod-conmon-769721380e8b625968f9efa961817c3b601216e3cf82201bcdfd9941770363bb.scope: Deactivated successfully.
Oct 11 02:15:02 compute-0 sudo[416049]: pam_unix(sudo:session): session closed for user root
Oct 11 02:15:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:15:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:15:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:15:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:15:02 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev b6450e21-7628-4203-8429-90be123af629 does not exist
Oct 11 02:15:02 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev f2ca924c-18c2-4e48-a3bf-10d6c1140be4 does not exist
Oct 11 02:15:02 compute-0 ceph-mon[191930]: pgmap v1105: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:15:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:15:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:15:03 compute-0 sudo[416215]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:15:03 compute-0 sudo[416215]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:15:03 compute-0 sudo[416215]: pam_unix(sudo:session): session closed for user root
Oct 11 02:15:03 compute-0 sudo[416240]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:15:03 compute-0 sudo[416240]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:15:03 compute-0 sudo[416240]: pam_unix(sudo:session): session closed for user root
Oct 11 02:15:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1106: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 34 KiB/s rd, 0 B/s wr, 57 op/s
Oct 11 02:15:04 compute-0 sshd-session[415978]: Connection closed by invalid user user 121.227.153.123 port 42132 [preauth]
Oct 11 02:15:04 compute-0 ceph-mon[191930]: pgmap v1106: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 34 KiB/s rd, 0 B/s wr, 57 op/s
Oct 11 02:15:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:15:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1107: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 10 KiB/s rd, 0 B/s wr, 17 op/s
Oct 11 02:15:06 compute-0 podman[416268]: 2025-10-11 02:15:06.267177178 +0000 UTC m=+0.147755445 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:15:06 compute-0 podman[416267]: 2025-10-11 02:15:06.302386398 +0000 UTC m=+0.180733913 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, url=https://catalog.redhat.com/en/search?searchType=containers, io.openshift.tags=minimal rhel9, config_id=edpm, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, managed_by=edpm_ansible, version=9.6, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, distribution-scope=public, release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, architecture=x86_64, container_name=openstack_network_exporter, name=ubi9-minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, build-date=2025-08-20T13:12:41, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., vendor=Red Hat, Inc., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, com.redhat.component=ubi9-minimal-container, io.openshift.expose-services=)
Oct 11 02:15:06 compute-0 sshd-session[416265]: Invalid user user from 121.227.153.123 port 42142
Oct 11 02:15:06 compute-0 podman[416309]: 2025-10-11 02:15:06.42594804 +0000 UTC m=+0.114618953 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2)
Oct 11 02:15:06 compute-0 sshd-session[416265]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:15:06 compute-0 sshd-session[416265]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:15:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:15:06 compute-0 ceph-mon[191930]: pgmap v1107: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 10 KiB/s rd, 0 B/s wr, 17 op/s
Oct 11 02:15:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1108: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 511 B/s rd, 0 B/s wr, 0 op/s
Oct 11 02:15:07 compute-0 sshd-session[416265]: Failed password for invalid user user from 121.227.153.123 port 42142 ssh2
Oct 11 02:15:08 compute-0 sshd-session[416265]: Connection closed by invalid user user 121.227.153.123 port 42142 [preauth]
Oct 11 02:15:08 compute-0 ceph-mon[191930]: pgmap v1108: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 511 B/s rd, 0 B/s wr, 0 op/s
Oct 11 02:15:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1109: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:09 compute-0 sshd-session[416329]: Invalid user user from 121.227.153.123 port 42154
Oct 11 02:15:10 compute-0 podman[416331]: 2025-10-11 02:15:10.030511772 +0000 UTC m=+0.159753238 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.expose-services=, managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., config_id=edpm, name=ubi9, version=9.4, architecture=x86_64, build-date=2024-09-18T21:23:30, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9, com.redhat.component=ubi9-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=kepler, vcs-type=git, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.openshift.tags=base rhel9, io.buildah.version=1.29.0, vendor=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, maintainer=Red Hat, Inc., release=1214.1726694543, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, release-0.7.12=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:15:10 compute-0 sshd-session[416329]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:15:10 compute-0 sshd-session[416329]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:15:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:15:11 compute-0 ceph-mon[191930]: pgmap v1109: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1110: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:12 compute-0 sshd-session[416329]: Failed password for invalid user user from 121.227.153.123 port 42154 ssh2
Oct 11 02:15:13 compute-0 ceph-mon[191930]: pgmap v1110: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:13 compute-0 sshd-session[416329]: Connection closed by invalid user user 121.227.153.123 port 42154 [preauth]
Oct 11 02:15:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1111: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.859 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.860 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.860 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.860 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.861 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.863 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{'network.incoming.bytes': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.863 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.864 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dcce780>] with cache [{}], pollster history [{'network.incoming.bytes': [], 'network.outgoing.packets': []}], and discovery cache [{'local_instances': []}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.864 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.865 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.865 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.865 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.865 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.865 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.865 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.866 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.866 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.866 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.866 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.866 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.866 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.866 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.866 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.867 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.867 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.867 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.867 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.867 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.867 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.867 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.867 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.868 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.868 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.868 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.868 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.868 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.868 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.868 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.869 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.869 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.869 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.869 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.869 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.869 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.869 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.870 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.870 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.870 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.870 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.870 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.870 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.870 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.870 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.871 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.871 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.871 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:15:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:15:14 compute-0 sshd-session[416351]: Invalid user user from 121.227.153.123 port 43078
Oct 11 02:15:15 compute-0 ceph-mon[191930]: pgmap v1111: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:15 compute-0 sshd-session[416351]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:15:15 compute-0 sshd-session[416351]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:15:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:15:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1112: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:17 compute-0 ceph-mon[191930]: pgmap v1112: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:17 compute-0 sshd-session[416351]: Failed password for invalid user user from 121.227.153.123 port 43078 ssh2
Oct 11 02:15:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1113: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:18 compute-0 sshd-session[416351]: Connection closed by invalid user user 121.227.153.123 port 43078 [preauth]
Oct 11 02:15:19 compute-0 ceph-mon[191930]: pgmap v1113: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1114: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:15:20 compute-0 podman[416356]: 2025-10-11 02:15:20.241764619 +0000 UTC m=+0.133034423 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:15:20 compute-0 podman[416363]: 2025-10-11 02:15:20.241991787 +0000 UTC m=+0.102671941 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, org.label-schema.build-date=20251007, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, managed_by=edpm_ansible, tcib_managed=true)
Oct 11 02:15:20 compute-0 podman[416357]: 2025-10-11 02:15:20.277221088 +0000 UTC m=+0.144844919 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=ovn_controller, container_name=ovn_controller, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 02:15:20 compute-0 podman[416364]: 2025-10-11 02:15:20.279410649 +0000 UTC m=+0.130202808 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, container_name=ovn_metadata_agent, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 02:15:20 compute-0 sshd-session[416353]: Invalid user user from 121.227.153.123 port 43082
Oct 11 02:15:20 compute-0 sshd-session[416353]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:15:20 compute-0 sshd-session[416353]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:15:21 compute-0 ceph-mon[191930]: pgmap v1114: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:21 compute-0 sshd-session[416440]: Accepted publickey for zuul from 38.102.83.70 port 43706 ssh2: RSA SHA256:sxgyqRujXfGvMV2Eq7ZlGcFGCGFr/dtz6dk2ZJwy3W4
Oct 11 02:15:21 compute-0 systemd-logind[804]: New session 62 of user zuul.
Oct 11 02:15:21 compute-0 systemd[1]: Started Session 62 of User zuul.
Oct 11 02:15:21 compute-0 sshd-session[416440]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 02:15:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1115: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:22 compute-0 sshd-session[416353]: Failed password for invalid user user from 121.227.153.123 port 43082 ssh2
Oct 11 02:15:22 compute-0 python3[416617]: ansible-ansible.legacy.setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 02:15:23 compute-0 ceph-mon[191930]: pgmap v1115: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1116: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:24 compute-0 sshd-session[416353]: Connection closed by invalid user user 121.227.153.123 port 43082 [preauth]
Oct 11 02:15:24 compute-0 sudo[416878]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tygxmegigowmuposhloszdccgvldtpwz ; KUBECONFIG=/home/zuul/.crc/machines/crc/kubeconfig PATH=/home/zuul/.crc/bin:/home/zuul/.crc/bin/oc:/home/zuul/bin:/home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760148924.376851-35231-86111806419717/AnsiballZ_command.py'
Oct 11 02:15:24 compute-0 podman[416826]: 2025-10-11 02:15:24.978700822 +0000 UTC m=+0.110049054 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=iscsid, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_id=iscsid)
Oct 11 02:15:24 compute-0 sudo[416878]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:15:25 compute-0 podman[416825]: 2025-10-11 02:15:25.003895222 +0000 UTC m=+0.134819438 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_id=multipathd, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, container_name=multipathd, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true)
Oct 11 02:15:25 compute-0 ceph-mon[191930]: pgmap v1116: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:25 compute-0 python3[416886]: ansible-ansible.legacy.command Invoked with _raw_params=tstamp=$(date -d '30 minute ago' "+%Y-%m-%d %H:%M:%S")
                                           journalctl -t "ceilometer_agent_compute" --no-pager -S "${tstamp}"
                                            _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:15:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:15:25 compute-0 sudo[416878]: pam_unix(sudo:session): session closed for user root
Oct 11 02:15:25 compute-0 sshd-session[416799]: Invalid user user from 121.227.153.123 port 41106
Oct 11 02:15:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1117: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:25 compute-0 sshd-session[416799]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:15:25 compute-0 sshd-session[416799]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:15:26 compute-0 ceph-mon[191930]: pgmap v1117: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:26 compute-0 sudo[417037]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gtlwlvayxklrfsirrolfvximxhzuycwc ; KUBECONFIG=/home/zuul/.crc/machines/crc/kubeconfig PATH=/home/zuul/.crc/bin:/home/zuul/.crc/bin/oc:/home/zuul/bin:/home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760148925.84611-35242-116729010136736/AnsiballZ_command.py'
Oct 11 02:15:26 compute-0 sudo[417037]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:15:26 compute-0 python3[417039]: ansible-ansible.legacy.command Invoked with _raw_params=tstamp=$(date -d '30 minute ago' "+%Y-%m-%d %H:%M:%S")
                                           journalctl -t "nova_compute" --no-pager -S "${tstamp}"
                                            _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:15:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:15:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:15:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:15:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:15:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:15:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:15:27 compute-0 sshd-session[416799]: Failed password for invalid user user from 121.227.153.123 port 41106 ssh2
Oct 11 02:15:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:15:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/4195219458' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:15:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:15:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/4195219458' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:15:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/4195219458' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:15:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/4195219458' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:15:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1118: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:27 compute-0 sshd-session[416799]: Connection closed by invalid user user 121.227.153.123 port 41106 [preauth]
Oct 11 02:15:27 compute-0 sudo[417037]: pam_unix(sudo:session): session closed for user root
Oct 11 02:15:28 compute-0 ceph-mon[191930]: pgmap v1118: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:29 compute-0 sshd-session[417042]: Invalid user user from 121.227.153.123 port 41122
Oct 11 02:15:29 compute-0 python3[417192]: ansible-ansible.builtin.stat Invoked with path=/etc/rsyslog.d/10-telemetry.conf follow=False get_md5=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1
Oct 11 02:15:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1119: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:29 compute-0 podman[157119]: time="2025-10-11T02:15:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:15:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:15:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:15:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:15:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8527 "" "Go-http-client/1.1"
Oct 11 02:15:29 compute-0 sshd-session[417042]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:15:29 compute-0 sshd-session[417042]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:15:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:15:30 compute-0 sudo[417343]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dupadezjztxddjcywohdgzvfxrjydxot ; KUBECONFIG=/home/zuul/.crc/machines/crc/kubeconfig PATH=/home/zuul/.crc/bin:/home/zuul/.crc/bin/oc:/home/zuul/bin:/home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760148930.0520833-35286-27327596379553/AnsiballZ_setup.py'
Oct 11 02:15:30 compute-0 sudo[417343]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:15:30 compute-0 python3[417345]: ansible-ansible.legacy.setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d
Oct 11 02:15:30 compute-0 ceph-mon[191930]: pgmap v1119: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:31 compute-0 openstack_network_exporter[374316]: ERROR   02:15:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:15:31 compute-0 openstack_network_exporter[374316]: ERROR   02:15:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:15:31 compute-0 openstack_network_exporter[374316]: ERROR   02:15:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:15:31 compute-0 openstack_network_exporter[374316]: ERROR   02:15:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:15:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:15:31 compute-0 openstack_network_exporter[374316]: ERROR   02:15:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:15:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:15:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1120: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:32 compute-0 sshd-session[417042]: Failed password for invalid user user from 121.227.153.123 port 41122 ssh2
Oct 11 02:15:32 compute-0 sudo[417343]: pam_unix(sudo:session): session closed for user root
Oct 11 02:15:32 compute-0 ceph-mon[191930]: pgmap v1120: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:33 compute-0 sshd-session[417042]: Connection closed by invalid user user 121.227.153.123 port 41122 [preauth]
Oct 11 02:15:33 compute-0 sudo[417578]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-zkczursttyyajkikmntdwaqvhfhxskte ; KUBECONFIG=/home/zuul/.crc/machines/crc/kubeconfig PATH=/home/zuul/.crc/bin:/home/zuul/.crc/bin/oc:/home/zuul/bin:/home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760148933.0081067-35315-140517863568963/AnsiballZ_command.py'
Oct 11 02:15:33 compute-0 sudo[417578]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:15:33 compute-0 python3[417580]: ansible-ansible.legacy.command Invoked with _raw_params=podman ps -a --format "{{.Names}} {{.Status}}" | grep ceilometer_agent_compute
                                            _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:15:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1121: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:33 compute-0 sudo[417578]: pam_unix(sudo:session): session closed for user root
Oct 11 02:15:34 compute-0 sudo[417744]:     zuul : TTY=pts/0 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wpeayqswdjkmcquxrtlqqoqzaxjxvikg ; KUBECONFIG=/home/zuul/.crc/machines/crc/kubeconfig PATH=/home/zuul/.crc/bin:/home/zuul/.crc/bin/oc:/home/zuul/bin:/home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760148934.3156638-35332-95464839543720/AnsiballZ_command.py'
Oct 11 02:15:34 compute-0 sudo[417744]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:15:34 compute-0 sshd-session[417584]: Invalid user user from 121.227.153.123 port 41100
Oct 11 02:15:34 compute-0 python3[417746]: ansible-ansible.legacy.command Invoked with _raw_params=podman ps -a --format "{{.Names}} {{.Status}}" | grep node_exporter
                                            _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:15:34 compute-0 ceph-mon[191930]: pgmap v1121: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:34 compute-0 sudo[417744]: pam_unix(sudo:session): session closed for user root
Oct 11 02:15:35 compute-0 sshd-session[417584]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:15:35 compute-0 sshd-session[417584]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:15:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:15:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1122: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:36 compute-0 ceph-mon[191930]: pgmap v1122: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:37 compute-0 podman[417786]: 2025-10-11 02:15:37.198357267 +0000 UTC m=+0.097323334 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, config_id=edpm, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:15:37 compute-0 podman[417788]: 2025-10-11 02:15:37.228746159 +0000 UTC m=+0.109575877 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:15:37 compute-0 podman[417787]: 2025-10-11 02:15:37.237754501 +0000 UTC m=+0.126532352 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, com.redhat.component=ubi9-minimal-container, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=minimal rhel9, url=https://catalog.redhat.com/en/search?searchType=containers, io.openshift.expose-services=, build-date=2025-08-20T13:12:41, distribution-scope=public, name=ubi9-minimal, architecture=x86_64, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.buildah.version=1.33.7, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., version=9.6, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, container_name=openstack_network_exporter, maintainer=Red Hat, Inc., managed_by=edpm_ansible, release=1755695350, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b)
Oct 11 02:15:37 compute-0 sshd-session[417584]: Failed password for invalid user user from 121.227.153.123 port 41100 ssh2
Oct 11 02:15:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1123: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:38 compute-0 sshd-session[417584]: Connection closed by invalid user user 121.227.153.123 port 41100 [preauth]
Oct 11 02:15:38 compute-0 ceph-mon[191930]: pgmap v1123: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1124: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:39 compute-0 nova_compute[356901]: 2025-10-11 02:15:39.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_incomplete_migrations run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:15:39 compute-0 nova_compute[356901]: 2025-10-11 02:15:39.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances with incomplete migration  _cleanup_incomplete_migrations /usr/lib/python3.9/site-packages/nova/compute/manager.py:11183
Oct 11 02:15:39 compute-0 sshd-session[417847]: Invalid user user from 121.227.153.123 port 41114
Oct 11 02:15:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:15:40 compute-0 podman[417849]: 2025-10-11 02:15:40.260630655 +0000 UTC m=+0.144292688 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, container_name=kepler, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, name=ubi9, version=9.4, io.buildah.version=1.29.0, vcs-type=git, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, build-date=2024-09-18T21:23:30, io.openshift.expose-services=, summary=Provides the latest release of Red Hat Universal Base Image 9., managed_by=edpm_ansible, distribution-scope=public, io.openshift.tags=base rhel9, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1214.1726694543, maintainer=Red Hat, Inc., vendor=Red Hat, Inc., com.redhat.component=ubi9-container, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, release-0.7.12=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, architecture=x86_64, config_id=edpm)
Oct 11 02:15:40 compute-0 sshd-session[417847]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:15:40 compute-0 sshd-session[417847]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:15:40 compute-0 nova_compute[356901]: 2025-10-11 02:15:40.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_expired_console_auth_tokens run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:15:40 compute-0 ceph-mon[191930]: pgmap v1124: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1125: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:41 compute-0 nova_compute[356901]: 2025-10-11 02:15:41.914 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:15:41 compute-0 nova_compute[356901]: 2025-10-11 02:15:41.915 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:15:42 compute-0 sshd-session[417847]: Failed password for invalid user user from 121.227.153.123 port 41114 ssh2
Oct 11 02:15:42 compute-0 ceph-mon[191930]: pgmap v1125: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1126: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:43 compute-0 sshd-session[417847]: Connection closed by invalid user user 121.227.153.123 port 41114 [preauth]
Oct 11 02:15:43 compute-0 nova_compute[356901]: 2025-10-11 02:15:43.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:15:44 compute-0 nova_compute[356901]: 2025-10-11 02:15:44.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:15:44 compute-0 nova_compute[356901]: 2025-10-11 02:15:44.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:15:44 compute-0 nova_compute[356901]: 2025-10-11 02:15:44.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:15:44 compute-0 nova_compute[356901]: 2025-10-11 02:15:44.911 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944
Oct 11 02:15:44 compute-0 ceph-mon[191930]: pgmap v1126: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:45 compute-0 sshd-session[417869]: Invalid user  from 60.188.249.64 port 53314
Oct 11 02:15:45 compute-0 sshd-session[417871]: Invalid user user from 121.227.153.123 port 54354
Oct 11 02:15:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:15:45 compute-0 sshd-session[417871]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:15:45 compute-0 sshd-session[417871]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:15:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1127: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:45 compute-0 nova_compute[356901]: 2025-10-11 02:15:45.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:15:45 compute-0 nova_compute[356901]: 2025-10-11 02:15:45.918 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._run_pending_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:15:45 compute-0 nova_compute[356901]: 2025-10-11 02:15:45.919 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11145
Oct 11 02:15:45 compute-0 nova_compute[356901]: 2025-10-11 02:15:45.932 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] There are 0 instances to clean _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11154
Oct 11 02:15:46 compute-0 nova_compute[356901]: 2025-10-11 02:15:46.911 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:15:46 compute-0 nova_compute[356901]: 2025-10-11 02:15:46.911 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:15:46 compute-0 nova_compute[356901]: 2025-10-11 02:15:46.911 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:15:46 compute-0 nova_compute[356901]: 2025-10-11 02:15:46.940 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:15:46 compute-0 nova_compute[356901]: 2025-10-11 02:15:46.941 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:15:46 compute-0 nova_compute[356901]: 2025-10-11 02:15:46.941 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:15:46 compute-0 nova_compute[356901]: 2025-10-11 02:15:46.942 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:15:46 compute-0 nova_compute[356901]: 2025-10-11 02:15:46.942 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:15:46 compute-0 ceph-mon[191930]: pgmap v1127: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:15:47 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2810828184' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:15:47 compute-0 nova_compute[356901]: 2025-10-11 02:15:47.376 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.434s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:15:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1128: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:47 compute-0 nova_compute[356901]: 2025-10-11 02:15:47.769 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:15:47 compute-0 nova_compute[356901]: 2025-10-11 02:15:47.771 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=4535MB free_disk=59.98828125GB free_vcpus=8 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:15:47 compute-0 nova_compute[356901]: 2025-10-11 02:15:47.771 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:15:47 compute-0 nova_compute[356901]: 2025-10-11 02:15:47.771 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:15:47 compute-0 sshd-session[417871]: Failed password for invalid user user from 121.227.153.123 port 54354 ssh2
Oct 11 02:15:47 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2810828184' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:15:48 compute-0 nova_compute[356901]: 2025-10-11 02:15:48.138 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 0 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:15:48 compute-0 nova_compute[356901]: 2025-10-11 02:15:48.139 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=512MB phys_disk=59GB used_disk=0GB total_vcpus=8 used_vcpus=0 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:15:48 compute-0 nova_compute[356901]: 2025-10-11 02:15:48.204 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing inventories for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:804
Oct 11 02:15:48 compute-0 nova_compute[356901]: 2025-10-11 02:15:48.267 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating ProviderTree inventory for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 from _refresh_and_get_inventory using data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} _refresh_and_get_inventory /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:768
Oct 11 02:15:48 compute-0 nova_compute[356901]: 2025-10-11 02:15:48.268 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating inventory in ProviderTree for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 with inventory: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:176
Oct 11 02:15:48 compute-0 nova_compute[356901]: 2025-10-11 02:15:48.293 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing aggregate associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, aggregates: None _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:813
Oct 11 02:15:48 compute-0 nova_compute[356901]: 2025-10-11 02:15:48.320 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing trait associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, traits: COMPUTE_VOLUME_EXTEND,COMPUTE_NET_VIF_MODEL_VMXNET3,HW_CPU_X86_SSSE3,COMPUTE_RESCUE_BFV,COMPUTE_SOCKET_PCI_NUMA_AFFINITY,COMPUTE_NODE,HW_CPU_X86_SVM,COMPUTE_STORAGE_BUS_SCSI,HW_CPU_X86_FMA3,COMPUTE_GRAPHICS_MODEL_NONE,COMPUTE_NET_VIF_MODEL_RTL8139,HW_CPU_X86_SSE4A,COMPUTE_IMAGE_TYPE_QCOW2,HW_CPU_X86_BMI2,HW_CPU_X86_SSE42,HW_CPU_X86_AVX2,COMPUTE_IMAGE_TYPE_RAW,COMPUTE_VIOMMU_MODEL_VIRTIO,HW_CPU_X86_AESNI,COMPUTE_STORAGE_BUS_FDC,COMPUTE_GRAPHICS_MODEL_VIRTIO,HW_CPU_X86_AMD_SVM,COMPUTE_NET_VIF_MODEL_NE2K_PCI,COMPUTE_ACCELERATORS,HW_CPU_X86_SSE2,COMPUTE_GRAPHICS_MODEL_VGA,HW_CPU_X86_ABM,HW_CPU_X86_AVX,COMPUTE_NET_VIF_MODEL_E1000,COMPUTE_STORAGE_BUS_USB,COMPUTE_NET_ATTACH_INTERFACE,HW_CPU_X86_MMX,COMPUTE_SECURITY_TPM_2_0,COMPUTE_IMAGE_TYPE_ISO,HW_CPU_X86_SSE41,COMPUTE_IMAGE_TYPE_AKI,COMPUTE_IMAGE_TYPE_AMI,COMPUTE_NET_ATTACH_INTERFACE_WITH_TAG,COMPUTE_DEVICE_TAGGING,COMPUTE_SECURITY_UEFI_SECURE_BOOT,COMPUTE_TRUSTED_CERTS,COMPUTE_NET_VIF_MODEL_VIRTIO,COMPUTE_VIOMMU_MODEL_INTEL,COMPUTE_STORAGE_BUS_SATA,HW_CPU_X86_SSE,COMPUTE_STORAGE_BUS_VIRTIO,COMPUTE_NET_VIF_MODEL_PCNET,COMPUTE_GRAPHICS_MODEL_CIRRUS,HW_CPU_X86_SHA,HW_CPU_X86_BMI,COMPUTE_NET_VIF_MODEL_E1000E,COMPUTE_NET_VIF_MODEL_SPAPR_VLAN,COMPUTE_VOLUME_ATTACH_WITH_TAG,COMPUTE_GRAPHICS_MODEL_BOCHS,COMPUTE_VIOMMU_MODEL_AUTO,COMPUTE_IMAGE_TYPE_ARI,HW_CPU_X86_CLMUL,COMPUTE_STORAGE_BUS_IDE,COMPUTE_VOLUME_MULTI_ATTACH,HW_CPU_X86_F16C,COMPUTE_SECURITY_TPM_1_2 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:825
Oct 11 02:15:48 compute-0 nova_compute[356901]: 2025-10-11 02:15:48.339 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:15:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:15:48 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/4164449349' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:15:48 compute-0 nova_compute[356901]: 2025-10-11 02:15:48.784 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.444s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:15:48 compute-0 nova_compute[356901]: 2025-10-11 02:15:48.794 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:15:48 compute-0 nova_compute[356901]: 2025-10-11 02:15:48.812 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:15:48 compute-0 nova_compute[356901]: 2025-10-11 02:15:48.814 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:15:48 compute-0 nova_compute[356901]: 2025-10-11 02:15:48.814 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 1.043s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:15:48 compute-0 sshd-session[417871]: Connection closed by invalid user user 121.227.153.123 port 54354 [preauth]
Oct 11 02:15:49 compute-0 ceph-mon[191930]: pgmap v1128: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:49 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/4164449349' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:15:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1129: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:49 compute-0 nova_compute[356901]: 2025-10-11 02:15:49.799 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:15:49 compute-0 nova_compute[356901]: 2025-10-11 02:15:49.800 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:15:49 compute-0 nova_compute[356901]: 2025-10-11 02:15:49.800 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:15:49 compute-0 sshd-session[417869]: Connection closed by invalid user  60.188.249.64 port 53314 [preauth]
Oct 11 02:15:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:15:50 compute-0 sshd-session[417917]: Invalid user user from 121.227.153.123 port 47574
Oct 11 02:15:50 compute-0 podman[417920]: 2025-10-11 02:15:50.469120902 +0000 UTC m=+0.108188595 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 02:15:50 compute-0 podman[417922]: 2025-10-11 02:15:50.487188109 +0000 UTC m=+0.101765468 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=edpm, org.label-schema.name=CentOS Stream 10 Base Image)
Oct 11 02:15:50 compute-0 podman[417928]: 2025-10-11 02:15:50.507901054 +0000 UTC m=+0.108132413 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, config_id=ovn_metadata_agent, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, tcib_managed=true, container_name=ovn_metadata_agent, managed_by=edpm_ansible)
Oct 11 02:15:50 compute-0 podman[417921]: 2025-10-11 02:15:50.519472441 +0000 UTC m=+0.140579091 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.schema-version=1.0, container_name=ovn_controller, org.label-schema.license=GPLv2)
Oct 11 02:15:50 compute-0 sshd-session[417917]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:15:50 compute-0 sshd-session[417917]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:15:51 compute-0 ceph-mon[191930]: pgmap v1129: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1130: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:52 compute-0 sshd-session[417917]: Failed password for invalid user user from 121.227.153.123 port 47574 ssh2
Oct 11 02:15:53 compute-0 ceph-mon[191930]: pgmap v1130: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1131: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:54 compute-0 sshd-session[417917]: Connection closed by invalid user user 121.227.153.123 port 47574 [preauth]
Oct 11 02:15:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:15:54.839 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:15:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:15:54.839 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:15:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:15:54.839 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:15:55 compute-0 ceph-mon[191930]: pgmap v1131: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:15:55 compute-0 podman[418005]: 2025-10-11 02:15:55.212325778 +0000 UTC m=+0.108393273 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, container_name=multipathd, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:15:55 compute-0 podman[418006]: 2025-10-11 02:15:55.238953171 +0000 UTC m=+0.120491519 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, config_id=iscsid, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=iscsid, io.buildah.version=1.41.3)
Oct 11 02:15:55 compute-0 sshd-session[418003]: Invalid user user from 121.227.153.123 port 47582
Oct 11 02:15:55 compute-0 sshd-session[418003]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:15:55 compute-0 sshd-session[418003]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:15:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1132: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:15:56
Oct 11 02:15:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:15:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:15:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['cephfs.cephfs.meta', 'default.rgw.meta', 'backups', 'volumes', '.mgr', 'default.rgw.control', 'vms', '.rgw.root', 'cephfs.cephfs.data', 'images', 'default.rgw.log']
Oct 11 02:15:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:15:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:15:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:15:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:15:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:15:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:15:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:15:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:15:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:15:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:15:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:15:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:15:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:15:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:15:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:15:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:15:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:15:57 compute-0 ceph-mon[191930]: pgmap v1132: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:57 compute-0 sshd-session[418003]: Failed password for invalid user user from 121.227.153.123 port 47582 ssh2
Oct 11 02:15:57 compute-0 sshd-session[418003]: Connection closed by invalid user user 121.227.153.123 port 47582 [preauth]
Oct 11 02:15:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1133: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:58 compute-0 sshd-session[418044]: Invalid user user from 121.227.153.123 port 47588
Oct 11 02:15:59 compute-0 ceph-mon[191930]: pgmap v1133: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:59 compute-0 sshd-session[418044]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:15:59 compute-0 sshd-session[418044]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:15:59 compute-0 podman[157119]: time="2025-10-11T02:15:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:15:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1134: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:15:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:15:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:15:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:15:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8530 "" "Go-http-client/1.1"
Oct 11 02:16:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:16:01 compute-0 ceph-mon[191930]: pgmap v1134: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:01 compute-0 sshd-session[418044]: Failed password for invalid user user from 121.227.153.123 port 47588 ssh2
Oct 11 02:16:01 compute-0 openstack_network_exporter[374316]: ERROR   02:16:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:16:01 compute-0 openstack_network_exporter[374316]: ERROR   02:16:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:16:01 compute-0 openstack_network_exporter[374316]: ERROR   02:16:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:16:01 compute-0 openstack_network_exporter[374316]: ERROR   02:16:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:16:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:16:01 compute-0 openstack_network_exporter[374316]: ERROR   02:16:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:16:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:16:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1135: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:02 compute-0 sshd-session[418044]: Connection closed by invalid user user 121.227.153.123 port 47588 [preauth]
Oct 11 02:16:03 compute-0 ceph-mon[191930]: pgmap v1135: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:03 compute-0 sudo[418048]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:16:03 compute-0 sudo[418048]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:16:03 compute-0 sudo[418048]: pam_unix(sudo:session): session closed for user root
Oct 11 02:16:03 compute-0 sudo[418073]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:16:03 compute-0 sudo[418073]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:16:03 compute-0 sudo[418073]: pam_unix(sudo:session): session closed for user root
Oct 11 02:16:03 compute-0 sudo[418098]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:16:03 compute-0 sudo[418098]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:16:03 compute-0 sudo[418098]: pam_unix(sudo:session): session closed for user root
Oct 11 02:16:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1136: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:03 compute-0 sudo[418123]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:16:03 compute-0 sudo[418123]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:16:04 compute-0 sshd-session[418046]: Invalid user user from 121.227.153.123 port 43156
Oct 11 02:16:04 compute-0 sshd-session[418046]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:16:04 compute-0 sshd-session[418046]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:16:04 compute-0 sudo[418123]: pam_unix(sudo:session): session closed for user root
Oct 11 02:16:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"} v 0) v1
Oct 11 02:16:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"}]: dispatch
Oct 11 02:16:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:16:04 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:16:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:16:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:16:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:16:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:16:04 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev e51e4198-d43f-4008-b78b-1937792aae05 does not exist
Oct 11 02:16:04 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 37fa26d0-c82a-4a4a-a261-4215e3888cf7 does not exist
Oct 11 02:16:04 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 3c28eb6b-817c-482a-890b-587eeeac26da does not exist
Oct 11 02:16:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:16:04 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:16:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:16:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:16:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:16:04 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:16:04 compute-0 sudo[418177]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:16:04 compute-0 sudo[418177]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:16:04 compute-0 sudo[418177]: pam_unix(sudo:session): session closed for user root
Oct 11 02:16:04 compute-0 sudo[418202]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:16:04 compute-0 sudo[418202]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:16:04 compute-0 sudo[418202]: pam_unix(sudo:session): session closed for user root
Oct 11 02:16:05 compute-0 sudo[418227]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:16:05 compute-0 sudo[418227]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:16:05 compute-0 sudo[418227]: pam_unix(sudo:session): session closed for user root
Oct 11 02:16:05 compute-0 ceph-mon[191930]: pgmap v1136: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"}]: dispatch
Oct 11 02:16:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:16:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:16:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:16:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:16:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:16:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:16:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:16:05 compute-0 sudo[418252]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:16:05 compute-0 sudo[418252]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:16:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1137: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:05 compute-0 podman[418316]: 2025-10-11 02:16:05.80134916 +0000 UTC m=+0.116142064 container create 7070b4496a49055e5fb33359f6b0427d6d9022effefeb0117c7ff4bd46f1b943 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=bold_bassi, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3)
Oct 11 02:16:05 compute-0 podman[418316]: 2025-10-11 02:16:05.747469322 +0000 UTC m=+0.062262276 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:16:05 compute-0 systemd[1]: Started libpod-conmon-7070b4496a49055e5fb33359f6b0427d6d9022effefeb0117c7ff4bd46f1b943.scope.
Oct 11 02:16:05 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:16:05 compute-0 podman[418316]: 2025-10-11 02:16:05.96321613 +0000 UTC m=+0.278009014 container init 7070b4496a49055e5fb33359f6b0427d6d9022effefeb0117c7ff4bd46f1b943 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=bold_bassi, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:16:05 compute-0 podman[418316]: 2025-10-11 02:16:05.985805937 +0000 UTC m=+0.300598841 container start 7070b4496a49055e5fb33359f6b0427d6d9022effefeb0117c7ff4bd46f1b943 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=bold_bassi, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2)
Oct 11 02:16:05 compute-0 podman[418316]: 2025-10-11 02:16:05.992514929 +0000 UTC m=+0.307307833 container attach 7070b4496a49055e5fb33359f6b0427d6d9022effefeb0117c7ff4bd46f1b943 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=bold_bassi, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:16:05 compute-0 bold_bassi[418332]: 167 167
Oct 11 02:16:05 compute-0 systemd[1]: libpod-7070b4496a49055e5fb33359f6b0427d6d9022effefeb0117c7ff4bd46f1b943.scope: Deactivated successfully.
Oct 11 02:16:06 compute-0 conmon[418332]: conmon 7070b4496a49055e5fb3 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-7070b4496a49055e5fb33359f6b0427d6d9022effefeb0117c7ff4bd46f1b943.scope/container/memory.events
Oct 11 02:16:06 compute-0 podman[418316]: 2025-10-11 02:16:06.002078873 +0000 UTC m=+0.316871777 container died 7070b4496a49055e5fb33359f6b0427d6d9022effefeb0117c7ff4bd46f1b943 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=bold_bassi, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:16:06 compute-0 systemd[1]: var-lib-containers-storage-overlay-ad00c3569c568c838d5800df66d5810d7367f819dfcf7e3602272e4845bb0d45-merged.mount: Deactivated successfully.
Oct 11 02:16:06 compute-0 podman[418316]: 2025-10-11 02:16:06.093117565 +0000 UTC m=+0.407910469 container remove 7070b4496a49055e5fb33359f6b0427d6d9022effefeb0117c7ff4bd46f1b943 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=bold_bassi, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:16:06 compute-0 systemd[1]: libpod-conmon-7070b4496a49055e5fb33359f6b0427d6d9022effefeb0117c7ff4bd46f1b943.scope: Deactivated successfully.
Oct 11 02:16:06 compute-0 ceph-mon[191930]: pgmap v1137: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:06 compute-0 podman[418355]: 2025-10-11 02:16:06.394223786 +0000 UTC m=+0.076735157 container create 4918e74b9831326f3b6afd8ec3620883198230dcf2d423aa69e66c303e26be31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_raman, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:16:06 compute-0 podman[418355]: 2025-10-11 02:16:06.360821384 +0000 UTC m=+0.043332795 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:16:06 compute-0 systemd[1]: Started libpod-conmon-4918e74b9831326f3b6afd8ec3620883198230dcf2d423aa69e66c303e26be31.scope.
Oct 11 02:16:06 compute-0 sshd-session[418046]: Failed password for invalid user user from 121.227.153.123 port 43156 ssh2
Oct 11 02:16:06 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:16:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/33382a801aed533389b0f473660f5292390f77f43a0cb33923d966dbfc516e1a/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:16:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/33382a801aed533389b0f473660f5292390f77f43a0cb33923d966dbfc516e1a/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:16:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/33382a801aed533389b0f473660f5292390f77f43a0cb33923d966dbfc516e1a/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:16:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/33382a801aed533389b0f473660f5292390f77f43a0cb33923d966dbfc516e1a/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:16:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/33382a801aed533389b0f473660f5292390f77f43a0cb33923d966dbfc516e1a/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:16:06 compute-0 podman[418355]: 2025-10-11 02:16:06.582162455 +0000 UTC m=+0.264673846 container init 4918e74b9831326f3b6afd8ec3620883198230dcf2d423aa69e66c303e26be31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_raman, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2)
Oct 11 02:16:06 compute-0 podman[418355]: 2025-10-11 02:16:06.602428373 +0000 UTC m=+0.284939714 container start 4918e74b9831326f3b6afd8ec3620883198230dcf2d423aa69e66c303e26be31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_raman, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, OSD_FLAVOR=default, CEPH_REF=reef, io.buildah.version=1.39.3)
Oct 11 02:16:06 compute-0 podman[418355]: 2025-10-11 02:16:06.607422947 +0000 UTC m=+0.289934278 container attach 4918e74b9831326f3b6afd8ec3620883198230dcf2d423aa69e66c303e26be31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_raman, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3)
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:16:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:16:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1138: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:07 compute-0 romantic_raman[418371]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:16:07 compute-0 romantic_raman[418371]: --> relative data size: 1.0
Oct 11 02:16:07 compute-0 romantic_raman[418371]: --> All data devices are unavailable
Oct 11 02:16:07 compute-0 sshd-session[418046]: Connection closed by invalid user user 121.227.153.123 port 43156 [preauth]
Oct 11 02:16:07 compute-0 podman[418355]: 2025-10-11 02:16:07.946221396 +0000 UTC m=+1.628732777 container died 4918e74b9831326f3b6afd8ec3620883198230dcf2d423aa69e66c303e26be31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_raman, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default)
Oct 11 02:16:07 compute-0 systemd[1]: libpod-4918e74b9831326f3b6afd8ec3620883198230dcf2d423aa69e66c303e26be31.scope: Deactivated successfully.
Oct 11 02:16:07 compute-0 systemd[1]: libpod-4918e74b9831326f3b6afd8ec3620883198230dcf2d423aa69e66c303e26be31.scope: Consumed 1.293s CPU time.
Oct 11 02:16:08 compute-0 systemd[1]: var-lib-containers-storage-overlay-33382a801aed533389b0f473660f5292390f77f43a0cb33923d966dbfc516e1a-merged.mount: Deactivated successfully.
Oct 11 02:16:08 compute-0 podman[418355]: 2025-10-11 02:16:08.05657137 +0000 UTC m=+1.739082721 container remove 4918e74b9831326f3b6afd8ec3620883198230dcf2d423aa69e66c303e26be31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_raman, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 02:16:08 compute-0 systemd[1]: libpod-conmon-4918e74b9831326f3b6afd8ec3620883198230dcf2d423aa69e66c303e26be31.scope: Deactivated successfully.
Oct 11 02:16:08 compute-0 sudo[418252]: pam_unix(sudo:session): session closed for user root
Oct 11 02:16:08 compute-0 podman[418404]: 2025-10-11 02:16:08.122415945 +0000 UTC m=+0.116094779 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc., release=1755695350, vcs-type=git, container_name=openstack_network_exporter, io.openshift.expose-services=, architecture=x86_64, com.redhat.component=ubi9-minimal-container, io.buildah.version=1.33.7, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, distribution-scope=public, vendor=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, build-date=2025-08-20T13:12:41, managed_by=edpm_ansible, version=9.6, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_id=edpm, name=ubi9-minimal)
Oct 11 02:16:08 compute-0 podman[418405]: 2025-10-11 02:16:08.12532329 +0000 UTC m=+0.115721109 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:16:08 compute-0 podman[418401]: 2025-10-11 02:16:08.159961112 +0000 UTC m=+0.148281033 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, container_name=ceilometer_agent_ipmi)
Oct 11 02:16:08 compute-0 sudo[418470]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:16:08 compute-0 sudo[418470]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:16:08 compute-0 sudo[418470]: pam_unix(sudo:session): session closed for user root
Oct 11 02:16:08 compute-0 sudo[418498]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:16:08 compute-0 sudo[418498]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:16:08 compute-0 sudo[418498]: pam_unix(sudo:session): session closed for user root
Oct 11 02:16:08 compute-0 sudo[418523]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:16:08 compute-0 sudo[418523]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:16:08 compute-0 sudo[418523]: pam_unix(sudo:session): session closed for user root
Oct 11 02:16:08 compute-0 sudo[418548]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:16:08 compute-0 sudo[418548]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:16:08 compute-0 ceph-mon[191930]: pgmap v1138: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:09 compute-0 sshd-session[418492]: Invalid user user from 121.227.153.123 port 43164
Oct 11 02:16:09 compute-0 podman[418615]: 2025-10-11 02:16:09.210487449 +0000 UTC m=+0.077050912 container create ecda45cb3af30896cb41d75b8c280bab53032eb78f24905c0057a6f245156128 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_sammet, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef)
Oct 11 02:16:09 compute-0 podman[418615]: 2025-10-11 02:16:09.182592333 +0000 UTC m=+0.049155776 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:16:09 compute-0 systemd[1]: Started libpod-conmon-ecda45cb3af30896cb41d75b8c280bab53032eb78f24905c0057a6f245156128.scope.
Oct 11 02:16:09 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:16:09 compute-0 podman[418615]: 2025-10-11 02:16:09.357816403 +0000 UTC m=+0.224379866 container init ecda45cb3af30896cb41d75b8c280bab53032eb78f24905c0057a6f245156128 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_sammet, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:16:09 compute-0 podman[418615]: 2025-10-11 02:16:09.379746127 +0000 UTC m=+0.246309560 container start ecda45cb3af30896cb41d75b8c280bab53032eb78f24905c0057a6f245156128 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_sammet, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, ceph=True)
Oct 11 02:16:09 compute-0 podman[418615]: 2025-10-11 02:16:09.384843289 +0000 UTC m=+0.251406762 container attach ecda45cb3af30896cb41d75b8c280bab53032eb78f24905c0057a6f245156128 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_sammet, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:16:09 compute-0 strange_sammet[418631]: 167 167
Oct 11 02:16:09 compute-0 systemd[1]: libpod-ecda45cb3af30896cb41d75b8c280bab53032eb78f24905c0057a6f245156128.scope: Deactivated successfully.
Oct 11 02:16:09 compute-0 podman[418615]: 2025-10-11 02:16:09.394565745 +0000 UTC m=+0.261129178 container died ecda45cb3af30896cb41d75b8c280bab53032eb78f24905c0057a6f245156128 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_sammet, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:16:09 compute-0 sshd-session[418492]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:16:09 compute-0 sshd-session[418492]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:16:09 compute-0 systemd[1]: var-lib-containers-storage-overlay-b37fa441be2a9cbc66c3afea56e3af010c6b55ecdd0afdd63ba23992596064fa-merged.mount: Deactivated successfully.
Oct 11 02:16:09 compute-0 podman[418615]: 2025-10-11 02:16:09.456847512 +0000 UTC m=+0.323410975 container remove ecda45cb3af30896cb41d75b8c280bab53032eb78f24905c0057a6f245156128 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_sammet, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default)
Oct 11 02:16:09 compute-0 systemd[1]: libpod-conmon-ecda45cb3af30896cb41d75b8c280bab53032eb78f24905c0057a6f245156128.scope: Deactivated successfully.
Oct 11 02:16:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1139: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:09 compute-0 podman[418654]: 2025-10-11 02:16:09.787650385 +0000 UTC m=+0.138506563 container create 365a812c8026b5edb2afe7ef826d3281acfee908fd67d9c2773ff09bff27df01 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_golick, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0)
Oct 11 02:16:09 compute-0 podman[418654]: 2025-10-11 02:16:09.717826808 +0000 UTC m=+0.068683056 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:16:09 compute-0 systemd[1]: Started libpod-conmon-365a812c8026b5edb2afe7ef826d3281acfee908fd67d9c2773ff09bff27df01.scope.
Oct 11 02:16:09 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:16:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/70a22195e49435a71a8ec74c8395c8d1590b984adf208a4bb5b3cd408d10bffe/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:16:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/70a22195e49435a71a8ec74c8395c8d1590b984adf208a4bb5b3cd408d10bffe/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:16:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/70a22195e49435a71a8ec74c8395c8d1590b984adf208a4bb5b3cd408d10bffe/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:16:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/70a22195e49435a71a8ec74c8395c8d1590b984adf208a4bb5b3cd408d10bffe/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:16:09 compute-0 podman[418654]: 2025-10-11 02:16:09.950963082 +0000 UTC m=+0.301819290 container init 365a812c8026b5edb2afe7ef826d3281acfee908fd67d9c2773ff09bff27df01 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_golick, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:16:09 compute-0 podman[418654]: 2025-10-11 02:16:09.98345846 +0000 UTC m=+0.334314648 container start 365a812c8026b5edb2afe7ef826d3281acfee908fd67d9c2773ff09bff27df01 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_golick, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:16:09 compute-0 podman[418654]: 2025-10-11 02:16:09.990593167 +0000 UTC m=+0.341449345 container attach 365a812c8026b5edb2afe7ef826d3281acfee908fd67d9c2773ff09bff27df01 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_golick, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:16:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:16:10 compute-0 silly_golick[418667]: {
Oct 11 02:16:10 compute-0 silly_golick[418667]:     "0": [
Oct 11 02:16:10 compute-0 silly_golick[418667]:         {
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "devices": [
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "/dev/loop3"
Oct 11 02:16:10 compute-0 silly_golick[418667]:             ],
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "lv_name": "ceph_lv0",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "lv_size": "21470642176",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "name": "ceph_lv0",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "tags": {
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.cluster_name": "ceph",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.crush_device_class": "",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.encrypted": "0",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.osd_id": "0",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.type": "block",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.vdo": "0"
Oct 11 02:16:10 compute-0 silly_golick[418667]:             },
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "type": "block",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "vg_name": "ceph_vg0"
Oct 11 02:16:10 compute-0 silly_golick[418667]:         }
Oct 11 02:16:10 compute-0 silly_golick[418667]:     ],
Oct 11 02:16:10 compute-0 silly_golick[418667]:     "1": [
Oct 11 02:16:10 compute-0 silly_golick[418667]:         {
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "devices": [
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "/dev/loop4"
Oct 11 02:16:10 compute-0 silly_golick[418667]:             ],
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "lv_name": "ceph_lv1",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "lv_size": "21470642176",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "name": "ceph_lv1",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "tags": {
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.cluster_name": "ceph",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.crush_device_class": "",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.encrypted": "0",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.osd_id": "1",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.type": "block",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.vdo": "0"
Oct 11 02:16:10 compute-0 silly_golick[418667]:             },
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "type": "block",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "vg_name": "ceph_vg1"
Oct 11 02:16:10 compute-0 silly_golick[418667]:         }
Oct 11 02:16:10 compute-0 silly_golick[418667]:     ],
Oct 11 02:16:10 compute-0 silly_golick[418667]:     "2": [
Oct 11 02:16:10 compute-0 silly_golick[418667]:         {
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "devices": [
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "/dev/loop5"
Oct 11 02:16:10 compute-0 silly_golick[418667]:             ],
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "lv_name": "ceph_lv2",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "lv_size": "21470642176",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "name": "ceph_lv2",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "tags": {
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.cluster_name": "ceph",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.crush_device_class": "",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.encrypted": "0",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.osd_id": "2",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.type": "block",
Oct 11 02:16:10 compute-0 silly_golick[418667]:                 "ceph.vdo": "0"
Oct 11 02:16:10 compute-0 silly_golick[418667]:             },
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "type": "block",
Oct 11 02:16:10 compute-0 silly_golick[418667]:             "vg_name": "ceph_vg2"
Oct 11 02:16:10 compute-0 silly_golick[418667]:         }
Oct 11 02:16:10 compute-0 silly_golick[418667]:     ]
Oct 11 02:16:10 compute-0 silly_golick[418667]: }
Oct 11 02:16:10 compute-0 systemd[1]: libpod-365a812c8026b5edb2afe7ef826d3281acfee908fd67d9c2773ff09bff27df01.scope: Deactivated successfully.
Oct 11 02:16:10 compute-0 ceph-mon[191930]: pgmap v1139: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:10 compute-0 podman[418654]: 2025-10-11 02:16:10.851765181 +0000 UTC m=+1.202621369 container died 365a812c8026b5edb2afe7ef826d3281acfee908fd67d9c2773ff09bff27df01 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_golick, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2)
Oct 11 02:16:10 compute-0 systemd[1]: var-lib-containers-storage-overlay-70a22195e49435a71a8ec74c8395c8d1590b984adf208a4bb5b3cd408d10bffe-merged.mount: Deactivated successfully.
Oct 11 02:16:10 compute-0 podman[418654]: 2025-10-11 02:16:10.964354256 +0000 UTC m=+1.315210414 container remove 365a812c8026b5edb2afe7ef826d3281acfee908fd67d9c2773ff09bff27df01 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_golick, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 02:16:10 compute-0 systemd[1]: libpod-conmon-365a812c8026b5edb2afe7ef826d3281acfee908fd67d9c2773ff09bff27df01.scope: Deactivated successfully.
Oct 11 02:16:11 compute-0 sudo[418548]: pam_unix(sudo:session): session closed for user root
Oct 11 02:16:11 compute-0 podman[418678]: 2025-10-11 02:16:11.066939282 +0000 UTC m=+0.163110572 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=kepler, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9, release-0.7.12=, build-date=2024-09-18T21:23:30, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1214.1726694543, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, managed_by=edpm_ansible, com.redhat.component=ubi9-container, io.buildah.version=1.29.0, summary=Provides the latest release of Red Hat Universal Base Image 9., io.openshift.expose-services=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vendor=Red Hat, Inc., architecture=x86_64, name=ubi9, vcs-type=git, maintainer=Red Hat, Inc., distribution-scope=public, version=9.4)
Oct 11 02:16:11 compute-0 sudo[418706]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:16:11 compute-0 sudo[418706]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:16:11 compute-0 sudo[418706]: pam_unix(sudo:session): session closed for user root
Oct 11 02:16:11 compute-0 sudo[418733]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:16:11 compute-0 sudo[418733]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:16:11 compute-0 sudo[418733]: pam_unix(sudo:session): session closed for user root
Oct 11 02:16:11 compute-0 sudo[418758]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:16:11 compute-0 sudo[418758]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:16:11 compute-0 sudo[418758]: pam_unix(sudo:session): session closed for user root
Oct 11 02:16:11 compute-0 sudo[418783]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:16:11 compute-0 sudo[418783]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:16:11 compute-0 sshd-session[418492]: Failed password for invalid user user from 121.227.153.123 port 43164 ssh2
Oct 11 02:16:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1140: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:12 compute-0 podman[418847]: 2025-10-11 02:16:12.162186565 +0000 UTC m=+0.081615692 container create a0b3055bad9f9257781b86cd422e9cd256c30dd07320a551f6d6e078d7e4e069 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_bose, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.license=GPLv2)
Oct 11 02:16:12 compute-0 podman[418847]: 2025-10-11 02:16:12.131727212 +0000 UTC m=+0.051156409 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:16:12 compute-0 systemd[1]: Started libpod-conmon-a0b3055bad9f9257781b86cd422e9cd256c30dd07320a551f6d6e078d7e4e069.scope.
Oct 11 02:16:12 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:16:12 compute-0 podman[418847]: 2025-10-11 02:16:12.326175037 +0000 UTC m=+0.245604254 container init a0b3055bad9f9257781b86cd422e9cd256c30dd07320a551f6d6e078d7e4e069 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_bose, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0)
Oct 11 02:16:12 compute-0 podman[418847]: 2025-10-11 02:16:12.344877289 +0000 UTC m=+0.264306416 container start a0b3055bad9f9257781b86cd422e9cd256c30dd07320a551f6d6e078d7e4e069 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_bose, org.label-schema.build-date=20250507, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:16:12 compute-0 podman[418847]: 2025-10-11 02:16:12.350278056 +0000 UTC m=+0.269707183 container attach a0b3055bad9f9257781b86cd422e9cd256c30dd07320a551f6d6e078d7e4e069 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_bose, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:16:12 compute-0 priceless_bose[418862]: 167 167
Oct 11 02:16:12 compute-0 systemd[1]: libpod-a0b3055bad9f9257781b86cd422e9cd256c30dd07320a551f6d6e078d7e4e069.scope: Deactivated successfully.
Oct 11 02:16:12 compute-0 podman[418847]: 2025-10-11 02:16:12.35936068 +0000 UTC m=+0.278789827 container died a0b3055bad9f9257781b86cd422e9cd256c30dd07320a551f6d6e078d7e4e069 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_bose, OSD_FLAVOR=default, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:16:12 compute-0 systemd[1]: var-lib-containers-storage-overlay-cb86cf8bd2effb6e40106a4006206ee9244f0d97dfa25919c0a32face0cbc1c2-merged.mount: Deactivated successfully.
Oct 11 02:16:12 compute-0 podman[418847]: 2025-10-11 02:16:12.43997394 +0000 UTC m=+0.359403067 container remove a0b3055bad9f9257781b86cd422e9cd256c30dd07320a551f6d6e078d7e4e069 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_bose, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:16:12 compute-0 systemd[1]: libpod-conmon-a0b3055bad9f9257781b86cd422e9cd256c30dd07320a551f6d6e078d7e4e069.scope: Deactivated successfully.
Oct 11 02:16:12 compute-0 podman[418887]: 2025-10-11 02:16:12.68418995 +0000 UTC m=+0.084808910 container create 2399ec976be7014739fdb1ff34636275c906af17006f7e1afe3c7c8c0c89d6f8 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_franklin, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:16:12 compute-0 podman[418887]: 2025-10-11 02:16:12.654670852 +0000 UTC m=+0.055289812 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:16:12 compute-0 systemd[1]: Started libpod-conmon-2399ec976be7014739fdb1ff34636275c906af17006f7e1afe3c7c8c0c89d6f8.scope.
Oct 11 02:16:12 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:16:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd5d7faa23f1300b7bb010ccec3254bf0108fa07c5b13b4031a4501a97e773f5/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:16:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd5d7faa23f1300b7bb010ccec3254bf0108fa07c5b13b4031a4501a97e773f5/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:16:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd5d7faa23f1300b7bb010ccec3254bf0108fa07c5b13b4031a4501a97e773f5/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:16:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fd5d7faa23f1300b7bb010ccec3254bf0108fa07c5b13b4031a4501a97e773f5/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:16:12 compute-0 podman[418887]: 2025-10-11 02:16:12.845947241 +0000 UTC m=+0.246566191 container init 2399ec976be7014739fdb1ff34636275c906af17006f7e1afe3c7c8c0c89d6f8 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_franklin, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef)
Oct 11 02:16:12 compute-0 ceph-mon[191930]: pgmap v1140: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:12 compute-0 podman[418887]: 2025-10-11 02:16:12.864157334 +0000 UTC m=+0.264776274 container start 2399ec976be7014739fdb1ff34636275c906af17006f7e1afe3c7c8c0c89d6f8 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_franklin, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS)
Oct 11 02:16:12 compute-0 podman[418887]: 2025-10-11 02:16:12.872061893 +0000 UTC m=+0.272680823 container attach 2399ec976be7014739fdb1ff34636275c906af17006f7e1afe3c7c8c0c89d6f8 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_franklin, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:16:13 compute-0 sshd-session[418492]: Connection closed by invalid user user 121.227.153.123 port 43164 [preauth]
Oct 11 02:16:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1141: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:14 compute-0 priceless_franklin[418903]: {
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:         "osd_id": 1,
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:         "type": "bluestore"
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:     },
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:         "osd_id": 2,
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:         "type": "bluestore"
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:     },
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:         "osd_id": 0,
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:         "type": "bluestore"
Oct 11 02:16:14 compute-0 priceless_franklin[418903]:     }
Oct 11 02:16:14 compute-0 priceless_franklin[418903]: }
Oct 11 02:16:14 compute-0 systemd[1]: libpod-2399ec976be7014739fdb1ff34636275c906af17006f7e1afe3c7c8c0c89d6f8.scope: Deactivated successfully.
Oct 11 02:16:14 compute-0 podman[418887]: 2025-10-11 02:16:14.215122498 +0000 UTC m=+1.615741468 container died 2399ec976be7014739fdb1ff34636275c906af17006f7e1afe3c7c8c0c89d6f8 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_franklin, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0)
Oct 11 02:16:14 compute-0 systemd[1]: libpod-2399ec976be7014739fdb1ff34636275c906af17006f7e1afe3c7c8c0c89d6f8.scope: Consumed 1.348s CPU time.
Oct 11 02:16:14 compute-0 systemd[1]: var-lib-containers-storage-overlay-fd5d7faa23f1300b7bb010ccec3254bf0108fa07c5b13b4031a4501a97e773f5-merged.mount: Deactivated successfully.
Oct 11 02:16:14 compute-0 podman[418887]: 2025-10-11 02:16:14.330105156 +0000 UTC m=+1.730724076 container remove 2399ec976be7014739fdb1ff34636275c906af17006f7e1afe3c7c8c0c89d6f8 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_franklin, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 02:16:14 compute-0 systemd[1]: libpod-conmon-2399ec976be7014739fdb1ff34636275c906af17006f7e1afe3c7c8c0c89d6f8.scope: Deactivated successfully.
Oct 11 02:16:14 compute-0 sshd-session[418908]: Invalid user user from 121.227.153.123 port 39552
Oct 11 02:16:14 compute-0 sudo[418783]: pam_unix(sudo:session): session closed for user root
Oct 11 02:16:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:16:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:16:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:16:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:16:14 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 8812549c-c160-4af3-b148-4f46eb72d26d does not exist
Oct 11 02:16:14 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev efa0ae03-bc76-41f7-bb10-e34e1f703e9a does not exist
Oct 11 02:16:14 compute-0 sudo[418951]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:16:14 compute-0 sudo[418951]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:16:14 compute-0 sudo[418951]: pam_unix(sudo:session): session closed for user root
Oct 11 02:16:14 compute-0 sshd-session[418908]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:16:14 compute-0 sshd-session[418908]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:16:14 compute-0 sudo[418976]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:16:14 compute-0 sudo[418976]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:16:14 compute-0 sudo[418976]: pam_unix(sudo:session): session closed for user root
Oct 11 02:16:14 compute-0 ceph-mon[191930]: pgmap v1141: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:16:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:16:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:16:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1142: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:16 compute-0 sshd-session[418908]: Failed password for invalid user user from 121.227.153.123 port 39552 ssh2
Oct 11 02:16:16 compute-0 ceph-mon[191930]: pgmap v1142: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1143: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:18 compute-0 sshd-session[418908]: Connection closed by invalid user user 121.227.153.123 port 39552 [preauth]
Oct 11 02:16:18 compute-0 ceph-mon[191930]: pgmap v1143: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:19 compute-0 sshd-session[419001]: Invalid user user from 121.227.153.123 port 39562
Oct 11 02:16:19 compute-0 sshd-session[419001]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:16:19 compute-0 sshd-session[419001]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:16:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1144: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:16:20 compute-0 nova_compute[356901]: 2025-10-11 02:16:20.281 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_power_states run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:16:20 compute-0 sshd-session[419001]: Failed password for invalid user user from 121.227.153.123 port 39562 ssh2
Oct 11 02:16:20 compute-0 ceph-mon[191930]: pgmap v1144: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:21 compute-0 podman[419007]: 2025-10-11 02:16:21.233542233 +0000 UTC m=+0.097109224 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.license=GPLv2)
Oct 11 02:16:21 compute-0 podman[419006]: 2025-10-11 02:16:21.263567772 +0000 UTC m=+0.133351366 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, tcib_managed=true, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:16:21 compute-0 podman[419004]: 2025-10-11 02:16:21.263731715 +0000 UTC m=+0.142923099 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:16:21 compute-0 podman[419005]: 2025-10-11 02:16:21.305655975 +0000 UTC m=+0.184651794 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, org.label-schema.build-date=20251009, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0)
Oct 11 02:16:21 compute-0 sshd-session[419001]: Connection closed by invalid user user 121.227.153.123 port 39562 [preauth]
Oct 11 02:16:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1145: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:22 compute-0 ceph-mon[191930]: pgmap v1145: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:22 compute-0 sshd-session[419090]: Invalid user user from 121.227.153.123 port 58382
Oct 11 02:16:23 compute-0 sshd-session[419090]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:16:23 compute-0 sshd-session[419090]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:16:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1146: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:24 compute-0 ceph-mon[191930]: pgmap v1146: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #54. Immutable memtables: 0.
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:16:25.195824) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 27] Flushing memtable with next log file: 54
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148985196003, "job": 27, "event": "flush_started", "num_memtables": 1, "num_entries": 1189, "num_deletes": 506, "total_data_size": 1328733, "memory_usage": 1352672, "flush_reason": "Manual Compaction"}
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 27] Level-0 flush table #55: started
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148985210508, "cf_name": "default", "job": 27, "event": "table_file_creation", "file_number": 55, "file_size": 1042082, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 22956, "largest_seqno": 24144, "table_properties": {"data_size": 1037237, "index_size": 1857, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1925, "raw_key_size": 14038, "raw_average_key_size": 18, "raw_value_size": 1025341, "raw_average_value_size": 1368, "num_data_blocks": 84, "num_entries": 749, "num_filter_entries": 749, "num_deletions": 506, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760148898, "oldest_key_time": 1760148898, "file_creation_time": 1760148985, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 55, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 27] Flush lasted 14763 microseconds, and 8850 cpu microseconds.
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:16:25.210604) [db/flush_job.cc:967] [default] [JOB 27] Level-0 flush table #55: 1042082 bytes OK
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:16:25.210633) [db/memtable_list.cc:519] [default] Level-0 commit table #55 started
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:16:25.213421) [db/memtable_list.cc:722] [default] Level-0 commit table #55: memtable #1 done
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:16:25.213443) EVENT_LOG_v1 {"time_micros": 1760148985213436, "job": 27, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:16:25.213470) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 27] Try to delete WAL files size 1322208, prev total WAL file size 1322208, number of live WAL files 2.
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000051.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:16:25.214702) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '6C6F676D00353033' seq:72057594037927935, type:22 .. '6C6F676D00373535' seq:0, type:0; will stop at (end)
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 28] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 27 Base level 0, inputs: [55(1017KB)], [53(8934KB)]
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148985214793, "job": 28, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [55], "files_L6": [53], "score": -1, "input_data_size": 10191505, "oldest_snapshot_seqno": -1}
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 28] Generated table #56: 4439 keys, 7072366 bytes, temperature: kUnknown
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148985266463, "cf_name": "default", "job": 28, "event": "table_file_creation", "file_number": 56, "file_size": 7072366, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 7043025, "index_size": 17135, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 11141, "raw_key_size": 111144, "raw_average_key_size": 25, "raw_value_size": 6962906, "raw_average_value_size": 1568, "num_data_blocks": 714, "num_entries": 4439, "num_filter_entries": 4439, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760148985, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 56, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:16:25.266779) [db/compaction/compaction_job.cc:1663] [default] [JOB 28] Compacted 1@0 + 1@6 files to L6 => 7072366 bytes
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:16:25.270459) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 196.9 rd, 136.6 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(1.0, 8.7 +0.0 blob) out(6.7 +0.0 blob), read-write-amplify(16.6) write-amplify(6.8) OK, records in: 5444, records dropped: 1005 output_compression: NoCompression
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:16:25.270527) EVENT_LOG_v1 {"time_micros": 1760148985270502, "job": 28, "event": "compaction_finished", "compaction_time_micros": 51758, "compaction_time_cpu_micros": 36014, "output_level": 6, "num_output_files": 1, "total_output_size": 7072366, "num_input_records": 5444, "num_output_records": 4439, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000055.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148985274149, "job": 28, "event": "table_file_deletion", "file_number": 55}
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000053.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760148985279179, "job": 28, "event": "table_file_deletion", "file_number": 53}
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:16:25.214523) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:16:25.279386) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:16:25.279395) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:16:25.279397) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:16:25.279399) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:16:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:16:25.279401) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:16:25 compute-0 sshd-session[419090]: Failed password for invalid user user from 121.227.153.123 port 58382 ssh2
Oct 11 02:16:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1147: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:26 compute-0 ceph-mon[191930]: pgmap v1147: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:26 compute-0 podman[419092]: 2025-10-11 02:16:26.232631202 +0000 UTC m=+0.122856456 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, config_id=multipathd, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_managed=true, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:16:26 compute-0 podman[419093]: 2025-10-11 02:16:26.270991864 +0000 UTC m=+0.157087264 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_managed=true, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=iscsid, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 02:16:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:16:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:16:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:16:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:16:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:16:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:16:26 compute-0 sshd-session[419090]: Connection closed by invalid user user 121.227.153.123 port 58382 [preauth]
Oct 11 02:16:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:16:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1606145353' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:16:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:16:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1606145353' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:16:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1606145353' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:16:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1606145353' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:16:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1148: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:28 compute-0 sshd-session[419128]: Invalid user user from 121.227.153.123 port 58398
Oct 11 02:16:28 compute-0 sshd-session[419128]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:16:28 compute-0 sshd-session[419128]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:16:28 compute-0 ceph-mon[191930]: pgmap v1148: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:29 compute-0 podman[157119]: time="2025-10-11T02:16:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:16:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:16:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:16:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1149: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:16:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8529 "" "Go-http-client/1.1"
Oct 11 02:16:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:16:30 compute-0 sshd-session[419128]: Failed password for invalid user user from 121.227.153.123 port 58398 ssh2
Oct 11 02:16:30 compute-0 ceph-mon[191930]: pgmap v1149: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:31 compute-0 openstack_network_exporter[374316]: ERROR   02:16:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:16:31 compute-0 openstack_network_exporter[374316]: ERROR   02:16:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:16:31 compute-0 openstack_network_exporter[374316]: ERROR   02:16:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:16:31 compute-0 openstack_network_exporter[374316]: ERROR   02:16:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:16:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:16:31 compute-0 openstack_network_exporter[374316]: ERROR   02:16:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:16:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:16:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1150: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:31 compute-0 sshd-session[419128]: Connection closed by invalid user user 121.227.153.123 port 58398 [preauth]
Oct 11 02:16:32 compute-0 ceph-mon[191930]: pgmap v1150: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:33 compute-0 sshd-session[419130]: Invalid user user from 121.227.153.123 port 42526
Oct 11 02:16:33 compute-0 sshd-session[419130]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:16:33 compute-0 sshd-session[419130]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:16:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1151: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:34 compute-0 sshd-session[416443]: Received disconnect from 38.102.83.70 port 43706:11: disconnected by user
Oct 11 02:16:34 compute-0 sshd-session[416443]: Disconnected from user zuul 38.102.83.70 port 43706
Oct 11 02:16:34 compute-0 sshd-session[416440]: pam_unix(sshd:session): session closed for user zuul
Oct 11 02:16:34 compute-0 systemd[1]: session-62.scope: Deactivated successfully.
Oct 11 02:16:34 compute-0 systemd[1]: session-62.scope: Consumed 11.171s CPU time.
Oct 11 02:16:34 compute-0 systemd-logind[804]: Session 62 logged out. Waiting for processes to exit.
Oct 11 02:16:34 compute-0 systemd-logind[804]: Removed session 62.
Oct 11 02:16:34 compute-0 ceph-mon[191930]: pgmap v1151: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:16:35 compute-0 sshd-session[419130]: Failed password for invalid user user from 121.227.153.123 port 42526 ssh2
Oct 11 02:16:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1152: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:36 compute-0 ceph-mon[191930]: pgmap v1152: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:37 compute-0 sshd-session[419130]: Connection closed by invalid user user 121.227.153.123 port 42526 [preauth]
Oct 11 02:16:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1153: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:38 compute-0 sshd-session[419132]: Invalid user ubuntu from 121.227.153.123 port 42530
Oct 11 02:16:38 compute-0 podman[419135]: 2025-10-11 02:16:38.538552432 +0000 UTC m=+0.139009313 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, build-date=2025-08-20T13:12:41, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_id=edpm, url=https://catalog.redhat.com/en/search?searchType=containers, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, version=9.6, architecture=x86_64, distribution-scope=public, io.buildah.version=1.33.7, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.expose-services=, release=1755695350, container_name=openstack_network_exporter, io.openshift.tags=minimal rhel9, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, com.redhat.component=ubi9-minimal-container, maintainer=Red Hat, Inc., vcs-type=git, vendor=Red Hat, Inc., name=ubi9-minimal, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible)
Oct 11 02:16:38 compute-0 podman[419136]: 2025-10-11 02:16:38.562315094 +0000 UTC m=+0.153594042 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:16:38 compute-0 podman[419134]: 2025-10-11 02:16:38.56288561 +0000 UTC m=+0.168688763 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_ipmi, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']})
Oct 11 02:16:38 compute-0 sshd-session[419132]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:16:38 compute-0 sshd-session[419132]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:16:38 compute-0 ceph-mon[191930]: pgmap v1153: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1154: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:40 compute-0 sshd-session[419132]: Failed password for invalid user ubuntu from 121.227.153.123 port 42530 ssh2
Oct 11 02:16:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:16:40 compute-0 ceph-mon[191930]: pgmap v1154: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:41 compute-0 sshd-session[419132]: Connection closed by invalid user ubuntu 121.227.153.123 port 42530 [preauth]
Oct 11 02:16:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1155: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:42 compute-0 podman[419196]: 2025-10-11 02:16:42.27819621 +0000 UTC m=+0.167121976 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, name=ubi9, io.openshift.tags=base rhel9, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, version=9.4, io.openshift.expose-services=, config_id=edpm, vendor=Red Hat, Inc., release-0.7.12=, build-date=2024-09-18T21:23:30, architecture=x86_64, container_name=kepler, managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., com.redhat.component=ubi9-container, vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=1214.1726694543, io.k8s.display-name=Red Hat Universal Base Image 9, io.buildah.version=1.29.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public)
Oct 11 02:16:42 compute-0 ceph-mon[191930]: pgmap v1155: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:43 compute-0 sshd-session[419194]: Invalid user ubuntu from 121.227.153.123 port 36306
Oct 11 02:16:43 compute-0 sshd-session[419194]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:16:43 compute-0 sshd-session[419194]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:16:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1156: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:43 compute-0 nova_compute[356901]: 2025-10-11 02:16:43.924 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:16:43 compute-0 nova_compute[356901]: 2025-10-11 02:16:43.925 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:16:43 compute-0 nova_compute[356901]: 2025-10-11 02:16:43.925 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:16:44 compute-0 sshd-session[419194]: Failed password for invalid user ubuntu from 121.227.153.123 port 36306 ssh2
Oct 11 02:16:44 compute-0 ceph-mon[191930]: pgmap v1156: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:16:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1157: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:46 compute-0 sshd-session[419194]: Connection closed by invalid user ubuntu 121.227.153.123 port 36306 [preauth]
Oct 11 02:16:46 compute-0 nova_compute[356901]: 2025-10-11 02:16:46.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:16:46 compute-0 nova_compute[356901]: 2025-10-11 02:16:46.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:16:46 compute-0 nova_compute[356901]: 2025-10-11 02:16:46.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:16:46 compute-0 nova_compute[356901]: 2025-10-11 02:16:46.917 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944
Oct 11 02:16:46 compute-0 ceph-mon[191930]: pgmap v1157: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:47 compute-0 sshd-session[419216]: Invalid user ubuntu from 121.227.153.123 port 36322
Oct 11 02:16:47 compute-0 sshd-session[419216]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:16:47 compute-0 sshd-session[419216]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:16:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1158: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:48 compute-0 nova_compute[356901]: 2025-10-11 02:16:48.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:16:48 compute-0 nova_compute[356901]: 2025-10-11 02:16:48.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:16:48 compute-0 nova_compute[356901]: 2025-10-11 02:16:48.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:16:48 compute-0 nova_compute[356901]: 2025-10-11 02:16:48.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:16:48 compute-0 nova_compute[356901]: 2025-10-11 02:16:48.943 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:16:48 compute-0 nova_compute[356901]: 2025-10-11 02:16:48.944 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:16:48 compute-0 nova_compute[356901]: 2025-10-11 02:16:48.944 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:16:48 compute-0 nova_compute[356901]: 2025-10-11 02:16:48.944 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:16:48 compute-0 nova_compute[356901]: 2025-10-11 02:16:48.944 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:16:48 compute-0 ceph-mon[191930]: pgmap v1158: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:16:49 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/784521159' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:16:49 compute-0 nova_compute[356901]: 2025-10-11 02:16:49.421 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.477s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:16:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1159: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:49 compute-0 nova_compute[356901]: 2025-10-11 02:16:49.918 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:16:49 compute-0 nova_compute[356901]: 2025-10-11 02:16:49.920 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=4558MB free_disk=59.98828125GB free_vcpus=8 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:16:49 compute-0 nova_compute[356901]: 2025-10-11 02:16:49.920 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:16:49 compute-0 nova_compute[356901]: 2025-10-11 02:16:49.921 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:16:49 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/784521159' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:16:50 compute-0 nova_compute[356901]: 2025-10-11 02:16:50.026 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 0 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:16:50 compute-0 nova_compute[356901]: 2025-10-11 02:16:50.027 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=512MB phys_disk=59GB used_disk=0GB total_vcpus=8 used_vcpus=0 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:16:50 compute-0 nova_compute[356901]: 2025-10-11 02:16:50.059 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:16:50 compute-0 sshd-session[419216]: Failed password for invalid user ubuntu from 121.227.153.123 port 36322 ssh2
Oct 11 02:16:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:16:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:16:50 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1598777757' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:16:50 compute-0 nova_compute[356901]: 2025-10-11 02:16:50.587 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.528s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:16:50 compute-0 nova_compute[356901]: 2025-10-11 02:16:50.599 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:16:50 compute-0 nova_compute[356901]: 2025-10-11 02:16:50.762 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:16:50 compute-0 nova_compute[356901]: 2025-10-11 02:16:50.765 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:16:50 compute-0 nova_compute[356901]: 2025-10-11 02:16:50.765 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.844s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:16:50 compute-0 sshd-session[419216]: Connection closed by invalid user ubuntu 121.227.153.123 port 36322 [preauth]
Oct 11 02:16:50 compute-0 ceph-mon[191930]: pgmap v1159: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:50 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1598777757' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:16:51 compute-0 nova_compute[356901]: 2025-10-11 02:16:51.766 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:16:51 compute-0 nova_compute[356901]: 2025-10-11 02:16:51.766 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:16:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1160: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:52 compute-0 sshd-session[419263]: Invalid user ubuntu from 121.227.153.123 port 37314
Oct 11 02:16:52 compute-0 podman[419268]: 2025-10-11 02:16:52.244784526 +0000 UTC m=+0.114053193 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009)
Oct 11 02:16:52 compute-0 podman[419265]: 2025-10-11 02:16:52.254685947 +0000 UTC m=+0.137672164 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:16:52 compute-0 podman[419267]: 2025-10-11 02:16:52.255951103 +0000 UTC m=+0.123477401 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 10 Base Image, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 02:16:52 compute-0 podman[419266]: 2025-10-11 02:16:52.316270321 +0000 UTC m=+0.192142482 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, container_name=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:16:52 compute-0 sshd-session[419263]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:16:52 compute-0 sshd-session[419263]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:16:53 compute-0 ceph-mon[191930]: pgmap v1160: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1161: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:54 compute-0 sshd-session[419263]: Failed password for invalid user ubuntu from 121.227.153.123 port 37314 ssh2
Oct 11 02:16:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:16:54.840 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:16:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:16:54.841 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:16:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:16:54.841 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:16:55 compute-0 ceph-mon[191930]: pgmap v1161: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:16:55 compute-0 sshd-session[419263]: Connection closed by invalid user ubuntu 121.227.153.123 port 37314 [preauth]
Oct 11 02:16:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1162: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:16:56
Oct 11 02:16:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:16:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:16:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['cephfs.cephfs.data', '.mgr', 'default.rgw.control', 'default.rgw.log', 'backups', '.rgw.root', 'volumes', 'images', 'default.rgw.meta', 'cephfs.cephfs.meta', 'vms']
Oct 11 02:16:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:16:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:16:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:16:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:16:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:16:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:16:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:16:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:16:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:16:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:16:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:16:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:16:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:16:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:16:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:16:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:16:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:16:57 compute-0 ceph-mon[191930]: pgmap v1162: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:57 compute-0 podman[419350]: 2025-10-11 02:16:57.231565753 +0000 UTC m=+0.117389711 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=iscsid, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.schema-version=1.0, tcib_managed=true)
Oct 11 02:16:57 compute-0 podman[419349]: 2025-10-11 02:16:57.246209057 +0000 UTC m=+0.136170096 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_id=multipathd, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:16:57 compute-0 sshd-session[419347]: Invalid user ubuntu from 121.227.153.123 port 37326
Oct 11 02:16:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1163: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:57 compute-0 sshd-session[419347]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:16:57 compute-0 sshd-session[419347]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:16:59 compute-0 ceph-mon[191930]: pgmap v1163: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:16:59 compute-0 podman[157119]: time="2025-10-11T02:16:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:16:59 compute-0 sshd-session[419347]: Failed password for invalid user ubuntu from 121.227.153.123 port 37326 ssh2
Oct 11 02:16:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:16:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:16:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:16:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8535 "" "Go-http-client/1.1"
Oct 11 02:16:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1164: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:17:01 compute-0 sshd-session[419347]: Connection closed by invalid user ubuntu 121.227.153.123 port 37326 [preauth]
Oct 11 02:17:01 compute-0 ceph-mon[191930]: pgmap v1164: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:01 compute-0 openstack_network_exporter[374316]: ERROR   02:17:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:17:01 compute-0 openstack_network_exporter[374316]: ERROR   02:17:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:17:01 compute-0 openstack_network_exporter[374316]: ERROR   02:17:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:17:01 compute-0 openstack_network_exporter[374316]: ERROR   02:17:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:17:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:17:01 compute-0 openstack_network_exporter[374316]: ERROR   02:17:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:17:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:17:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1165: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:02 compute-0 sshd-session[419388]: Invalid user ubuntu from 121.227.153.123 port 44392
Oct 11 02:17:02 compute-0 sshd-session[419388]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:17:02 compute-0 sshd-session[419388]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:17:03 compute-0 ceph-mon[191930]: pgmap v1165: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1166: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:04 compute-0 sshd-session[419388]: Failed password for invalid user ubuntu from 121.227.153.123 port 44392 ssh2
Oct 11 02:17:05 compute-0 ceph-mon[191930]: pgmap v1166: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:17:05 compute-0 sshd-session[419388]: Connection closed by invalid user ubuntu 121.227.153.123 port 44392 [preauth]
Oct 11 02:17:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1167: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:17:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:17:06 compute-0 sshd-session[419390]: Invalid user ubuntu from 121.227.153.123 port 44402
Oct 11 02:17:07 compute-0 ceph-mon[191930]: pgmap v1167: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:07 compute-0 sshd-session[419390]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:17:07 compute-0 sshd-session[419390]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:17:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1168: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:08 compute-0 sshd-session[419390]: Failed password for invalid user ubuntu from 121.227.153.123 port 44402 ssh2
Oct 11 02:17:09 compute-0 ceph-mon[191930]: pgmap v1168: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:09 compute-0 podman[419393]: 2025-10-11 02:17:09.236393634 +0000 UTC m=+0.119655735 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, version=9.6, release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-type=git, distribution-scope=public, name=ubi9-minimal, build-date=2025-08-20T13:12:41, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc., com.redhat.component=ubi9-minimal-container, io.openshift.tags=minimal rhel9, managed_by=edpm_ansible, container_name=openstack_network_exporter, io.buildah.version=1.33.7, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, vendor=Red Hat, Inc., url=https://catalog.redhat.com/en/search?searchType=containers, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64)
Oct 11 02:17:09 compute-0 podman[419394]: 2025-10-11 02:17:09.246876618 +0000 UTC m=+0.125456973 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:17:09 compute-0 podman[419392]: 2025-10-11 02:17:09.261082928 +0000 UTC m=+0.148604223 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=edpm, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0)
Oct 11 02:17:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1169: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:17:10 compute-0 sshd-session[419390]: Connection closed by invalid user ubuntu 121.227.153.123 port 44402 [preauth]
Oct 11 02:17:11 compute-0 ceph-mon[191930]: pgmap v1169: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:11 compute-0 sshd-session[419453]: Invalid user ubuntu from 121.227.153.123 port 59860
Oct 11 02:17:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1170: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:12 compute-0 sshd-session[419453]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:17:12 compute-0 sshd-session[419453]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:17:13 compute-0 ceph-mon[191930]: pgmap v1170: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:13 compute-0 podman[419455]: 2025-10-11 02:17:13.253041518 +0000 UTC m=+0.136491308 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9, config_id=edpm, container_name=kepler, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, release-0.7.12=, vcs-type=git, version=9.4, maintainer=Red Hat, Inc., io.buildah.version=1.29.0, io.openshift.tags=base rhel9, managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., release=1214.1726694543, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, architecture=x86_64, build-date=2024-09-18T21:23:30, name=ubi9, vendor=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, com.redhat.component=ubi9-container)
Oct 11 02:17:13 compute-0 sshd-session[419453]: Failed password for invalid user ubuntu from 121.227.153.123 port 59860 ssh2
Oct 11 02:17:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1171: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.859 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.860 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.860 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.861 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.866 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.866 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.866 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.866 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.866 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.866 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.867 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.867 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.867 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.capacity, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.867 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.867 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.867 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.867 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.867 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.867 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.867 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.867 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.867 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.867 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.867 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.867 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.868 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.868 14 DEBUG ceilometer.polling.manager [-] Skip pollster power.state, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.868 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.868 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.requests, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.868 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.868 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.868 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.868 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.868 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.868 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.ephemeral.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.868 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.868 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.868 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.869 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.delta, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.869 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.869 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.root.size, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.869 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.869 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.drop, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.869 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.869 14 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.allocation, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.869 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.869 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.packets.error, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.869 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.869 14 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.869 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.869 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.870 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.870 14 DEBUG ceilometer.polling.manager [-] Skip pollster memory.usage, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.870 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.870 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no  resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.870 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.870 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.871 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.872 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:17:13.873 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:17:14 compute-0 sudo[419476]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:17:14 compute-0 sudo[419476]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:17:14 compute-0 sudo[419476]: pam_unix(sudo:session): session closed for user root
Oct 11 02:17:15 compute-0 sudo[419501]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:17:15 compute-0 sudo[419501]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:17:15 compute-0 sudo[419501]: pam_unix(sudo:session): session closed for user root
Oct 11 02:17:15 compute-0 sudo[419526]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:17:15 compute-0 sudo[419526]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:17:15 compute-0 sshd-session[419453]: Connection closed by invalid user ubuntu 121.227.153.123 port 59860 [preauth]
Oct 11 02:17:15 compute-0 sudo[419526]: pam_unix(sudo:session): session closed for user root
Oct 11 02:17:15 compute-0 ceph-mon[191930]: pgmap v1171: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:17:15 compute-0 sudo[419551]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:17:15 compute-0 sudo[419551]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:17:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1172: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:15 compute-0 sudo[419551]: pam_unix(sudo:session): session closed for user root
Oct 11 02:17:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:17:16 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:17:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:17:16 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:17:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:17:16 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:17:16 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 8e9f150a-946b-448b-ab1a-adde0d6e78dd does not exist
Oct 11 02:17:16 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev fcbe3988-8609-4470-bf01-a4234ee61f1e does not exist
Oct 11 02:17:16 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 324a5dd1-4aaf-4516-9647-2839823bec52 does not exist
Oct 11 02:17:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:17:16 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:17:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:17:16 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:17:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:17:16 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:17:16 compute-0 sudo[419607]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:17:16 compute-0 sudo[419607]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:17:16 compute-0 sudo[419607]: pam_unix(sudo:session): session closed for user root
Oct 11 02:17:16 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:17:16 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:17:16 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:17:16 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:17:16 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:17:16 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:17:16 compute-0 sudo[419632]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:17:16 compute-0 sudo[419632]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:17:16 compute-0 sudo[419632]: pam_unix(sudo:session): session closed for user root
Oct 11 02:17:16 compute-0 sshd-session[419576]: Invalid user ubuntu from 121.227.153.123 port 59870
Oct 11 02:17:16 compute-0 sudo[419657]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:17:16 compute-0 sudo[419657]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:17:16 compute-0 sudo[419657]: pam_unix(sudo:session): session closed for user root
Oct 11 02:17:16 compute-0 sudo[419682]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:17:16 compute-0 sudo[419682]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:17:16 compute-0 sshd-session[419576]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:17:16 compute-0 sshd-session[419576]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:17:17 compute-0 podman[419744]: 2025-10-11 02:17:17.15587486 +0000 UTC m=+0.099524824 container create 7d990a01dc5f424ae23d7fdf321ec275061ff273b55ba39879ce579de2fdcb2f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=exciting_gates, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, ceph=True, CEPH_REF=reef, OSD_FLAVOR=default)
Oct 11 02:17:17 compute-0 ceph-mon[191930]: pgmap v1172: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:17 compute-0 podman[419744]: 2025-10-11 02:17:17.118787246 +0000 UTC m=+0.062437270 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:17:17 compute-0 systemd[1]: Started libpod-conmon-7d990a01dc5f424ae23d7fdf321ec275061ff273b55ba39879ce579de2fdcb2f.scope.
Oct 11 02:17:17 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:17:17 compute-0 podman[419744]: 2025-10-11 02:17:17.306052988 +0000 UTC m=+0.249702962 container init 7d990a01dc5f424ae23d7fdf321ec275061ff273b55ba39879ce579de2fdcb2f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=exciting_gates, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 02:17:17 compute-0 podman[419744]: 2025-10-11 02:17:17.326880793 +0000 UTC m=+0.270530767 container start 7d990a01dc5f424ae23d7fdf321ec275061ff273b55ba39879ce579de2fdcb2f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=exciting_gates, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:17:17 compute-0 podman[419744]: 2025-10-11 02:17:17.332848857 +0000 UTC m=+0.276498851 container attach 7d990a01dc5f424ae23d7fdf321ec275061ff273b55ba39879ce579de2fdcb2f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=exciting_gates, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:17:17 compute-0 exciting_gates[419759]: 167 167
Oct 11 02:17:17 compute-0 systemd[1]: libpod-7d990a01dc5f424ae23d7fdf321ec275061ff273b55ba39879ce579de2fdcb2f.scope: Deactivated successfully.
Oct 11 02:17:17 compute-0 podman[419744]: 2025-10-11 02:17:17.34192768 +0000 UTC m=+0.285577644 container died 7d990a01dc5f424ae23d7fdf321ec275061ff273b55ba39879ce579de2fdcb2f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=exciting_gates, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:17:17 compute-0 systemd[1]: var-lib-containers-storage-overlay-37363edf87a9469dc6e5efbafb0f0ebf0ccf61dfe02f9574a210aa2e873def43-merged.mount: Deactivated successfully.
Oct 11 02:17:17 compute-0 podman[419744]: 2025-10-11 02:17:17.434066208 +0000 UTC m=+0.377716172 container remove 7d990a01dc5f424ae23d7fdf321ec275061ff273b55ba39879ce579de2fdcb2f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=exciting_gates, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:17:17 compute-0 systemd[1]: libpod-conmon-7d990a01dc5f424ae23d7fdf321ec275061ff273b55ba39879ce579de2fdcb2f.scope: Deactivated successfully.
Oct 11 02:17:17 compute-0 podman[419783]: 2025-10-11 02:17:17.700405234 +0000 UTC m=+0.078526834 container create 9d92b6ba5cc706902d6385fd053a577aaf6eeb1fdeb1533d7d452af508120408 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_banzai, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 02:17:17 compute-0 podman[419783]: 2025-10-11 02:17:17.670848509 +0000 UTC m=+0.048970109 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:17:17 compute-0 systemd[1]: Started libpod-conmon-9d92b6ba5cc706902d6385fd053a577aaf6eeb1fdeb1533d7d452af508120408.scope.
Oct 11 02:17:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1173: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:17 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:17:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f7076076d11ffff6ede65caea2b15fdef0abd5c20994f66a8556a60686fb6662/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:17:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f7076076d11ffff6ede65caea2b15fdef0abd5c20994f66a8556a60686fb6662/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:17:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f7076076d11ffff6ede65caea2b15fdef0abd5c20994f66a8556a60686fb6662/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:17:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f7076076d11ffff6ede65caea2b15fdef0abd5c20994f66a8556a60686fb6662/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:17:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f7076076d11ffff6ede65caea2b15fdef0abd5c20994f66a8556a60686fb6662/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:17:17 compute-0 podman[419783]: 2025-10-11 02:17:17.878695463 +0000 UTC m=+0.256817123 container init 9d92b6ba5cc706902d6385fd053a577aaf6eeb1fdeb1533d7d452af508120408 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_banzai, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef)
Oct 11 02:17:17 compute-0 podman[419783]: 2025-10-11 02:17:17.909282154 +0000 UTC m=+0.287403754 container start 9d92b6ba5cc706902d6385fd053a577aaf6eeb1fdeb1533d7d452af508120408 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_banzai, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True)
Oct 11 02:17:17 compute-0 podman[419783]: 2025-10-11 02:17:17.917017283 +0000 UTC m=+0.295138943 container attach 9d92b6ba5cc706902d6385fd053a577aaf6eeb1fdeb1533d7d452af508120408 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_banzai, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef)
Oct 11 02:17:18 compute-0 ceph-mon[191930]: pgmap v1173: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:18 compute-0 sshd-session[419576]: Failed password for invalid user ubuntu from 121.227.153.123 port 59870 ssh2
Oct 11 02:17:18 compute-0 upbeat_banzai[419798]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:17:18 compute-0 upbeat_banzai[419798]: --> relative data size: 1.0
Oct 11 02:17:18 compute-0 upbeat_banzai[419798]: --> All data devices are unavailable
Oct 11 02:17:19 compute-0 systemd[1]: libpod-9d92b6ba5cc706902d6385fd053a577aaf6eeb1fdeb1533d7d452af508120408.scope: Deactivated successfully.
Oct 11 02:17:19 compute-0 systemd[1]: libpod-9d92b6ba5cc706902d6385fd053a577aaf6eeb1fdeb1533d7d452af508120408.scope: Consumed 1.064s CPU time.
Oct 11 02:17:19 compute-0 podman[419827]: 2025-10-11 02:17:19.09058929 +0000 UTC m=+0.050954183 container died 9d92b6ba5cc706902d6385fd053a577aaf6eeb1fdeb1533d7d452af508120408 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_banzai, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:17:19 compute-0 systemd[1]: var-lib-containers-storage-overlay-f7076076d11ffff6ede65caea2b15fdef0abd5c20994f66a8556a60686fb6662-merged.mount: Deactivated successfully.
Oct 11 02:17:19 compute-0 podman[419827]: 2025-10-11 02:17:19.204947861 +0000 UTC m=+0.165312704 container remove 9d92b6ba5cc706902d6385fd053a577aaf6eeb1fdeb1533d7d452af508120408 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_banzai, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:17:19 compute-0 systemd[1]: libpod-conmon-9d92b6ba5cc706902d6385fd053a577aaf6eeb1fdeb1533d7d452af508120408.scope: Deactivated successfully.
Oct 11 02:17:19 compute-0 sudo[419682]: pam_unix(sudo:session): session closed for user root
Oct 11 02:17:19 compute-0 sudo[419841]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:17:19 compute-0 sudo[419841]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:17:19 compute-0 sudo[419841]: pam_unix(sudo:session): session closed for user root
Oct 11 02:17:19 compute-0 sudo[419866]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:17:19 compute-0 sudo[419866]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:17:19 compute-0 sudo[419866]: pam_unix(sudo:session): session closed for user root
Oct 11 02:17:19 compute-0 sshd-session[419576]: Connection closed by invalid user ubuntu 121.227.153.123 port 59870 [preauth]
Oct 11 02:17:19 compute-0 sudo[419892]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:17:19 compute-0 sudo[419892]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:17:19 compute-0 sudo[419892]: pam_unix(sudo:session): session closed for user root
Oct 11 02:17:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1174: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:19 compute-0 sudo[419917]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:17:19 compute-0 sudo[419917]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:17:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:17:20 compute-0 podman[419982]: 2025-10-11 02:17:20.491034748 +0000 UTC m=+0.091740731 container create 8bfa1eb4fbda8689f42ea356021b3936fc9371a279e891ef74cfbab402e9ab1b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_shaw, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:17:20 compute-0 podman[419982]: 2025-10-11 02:17:20.451723297 +0000 UTC m=+0.052429330 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:17:20 compute-0 systemd[1]: Started libpod-conmon-8bfa1eb4fbda8689f42ea356021b3936fc9371a279e891ef74cfbab402e9ab1b.scope.
Oct 11 02:17:20 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:17:20 compute-0 podman[419982]: 2025-10-11 02:17:20.651977726 +0000 UTC m=+0.252683769 container init 8bfa1eb4fbda8689f42ea356021b3936fc9371a279e891ef74cfbab402e9ab1b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_shaw, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, ceph=True, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3)
Oct 11 02:17:20 compute-0 podman[419982]: 2025-10-11 02:17:20.665786849 +0000 UTC m=+0.266492832 container start 8bfa1eb4fbda8689f42ea356021b3936fc9371a279e891ef74cfbab402e9ab1b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_shaw, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_REF=reef)
Oct 11 02:17:20 compute-0 podman[419982]: 2025-10-11 02:17:20.67268815 +0000 UTC m=+0.273394213 container attach 8bfa1eb4fbda8689f42ea356021b3936fc9371a279e891ef74cfbab402e9ab1b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_shaw, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0)
Oct 11 02:17:20 compute-0 sweet_shaw[419998]: 167 167
Oct 11 02:17:20 compute-0 systemd[1]: libpod-8bfa1eb4fbda8689f42ea356021b3936fc9371a279e891ef74cfbab402e9ab1b.scope: Deactivated successfully.
Oct 11 02:17:20 compute-0 podman[419982]: 2025-10-11 02:17:20.678425303 +0000 UTC m=+0.279131286 container died 8bfa1eb4fbda8689f42ea356021b3936fc9371a279e891ef74cfbab402e9ab1b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_shaw, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:17:20 compute-0 systemd[1]: var-lib-containers-storage-overlay-c48d17ab49c1f68549a62985f2780de4a46ca68e0f86645cbf6ec70d44f3e33b-merged.mount: Deactivated successfully.
Oct 11 02:17:20 compute-0 podman[419982]: 2025-10-11 02:17:20.763437698 +0000 UTC m=+0.364143651 container remove 8bfa1eb4fbda8689f42ea356021b3936fc9371a279e891ef74cfbab402e9ab1b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_shaw, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:17:20 compute-0 systemd[1]: libpod-conmon-8bfa1eb4fbda8689f42ea356021b3936fc9371a279e891ef74cfbab402e9ab1b.scope: Deactivated successfully.
Oct 11 02:17:20 compute-0 sshd-session[419942]: Invalid user ubuntu from 121.227.153.123 port 37050
Oct 11 02:17:20 compute-0 ceph-mon[191930]: pgmap v1174: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:21 compute-0 podman[420020]: 2025-10-11 02:17:21.047959344 +0000 UTC m=+0.092897469 container create 25d340697b7c417f369b77dfac6c756537f6d7394b44cf343aadbf1aed205a69 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_jepsen, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:17:21 compute-0 sshd-session[419942]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:17:21 compute-0 sshd-session[419942]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:17:21 compute-0 podman[420020]: 2025-10-11 02:17:21.015569046 +0000 UTC m=+0.060507221 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:17:21 compute-0 systemd[1]: Started libpod-conmon-25d340697b7c417f369b77dfac6c756537f6d7394b44cf343aadbf1aed205a69.scope.
Oct 11 02:17:21 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:17:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d5d9d6f872c5bfb1826e5cc3d2aacd880032a0441f46c602aa8ab46fb3a44da0/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:17:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d5d9d6f872c5bfb1826e5cc3d2aacd880032a0441f46c602aa8ab46fb3a44da0/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:17:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d5d9d6f872c5bfb1826e5cc3d2aacd880032a0441f46c602aa8ab46fb3a44da0/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:17:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d5d9d6f872c5bfb1826e5cc3d2aacd880032a0441f46c602aa8ab46fb3a44da0/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:17:21 compute-0 podman[420020]: 2025-10-11 02:17:21.231082512 +0000 UTC m=+0.276020677 container init 25d340697b7c417f369b77dfac6c756537f6d7394b44cf343aadbf1aed205a69 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_jepsen, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:17:21 compute-0 podman[420020]: 2025-10-11 02:17:21.248763584 +0000 UTC m=+0.293701679 container start 25d340697b7c417f369b77dfac6c756537f6d7394b44cf343aadbf1aed205a69 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_jepsen, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, OSD_FLAVOR=default)
Oct 11 02:17:21 compute-0 podman[420020]: 2025-10-11 02:17:21.254649391 +0000 UTC m=+0.299587556 container attach 25d340697b7c417f369b77dfac6c756537f6d7394b44cf343aadbf1aed205a69 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_jepsen, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0)
Oct 11 02:17:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1175: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]: {
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:     "0": [
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:         {
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "devices": [
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "/dev/loop3"
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             ],
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "lv_name": "ceph_lv0",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "lv_size": "21470642176",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "name": "ceph_lv0",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "tags": {
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.cluster_name": "ceph",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.crush_device_class": "",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.encrypted": "0",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.osd_id": "0",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.type": "block",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.vdo": "0"
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             },
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "type": "block",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "vg_name": "ceph_vg0"
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:         }
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:     ],
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:     "1": [
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:         {
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "devices": [
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "/dev/loop4"
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             ],
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "lv_name": "ceph_lv1",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "lv_size": "21470642176",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "name": "ceph_lv1",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "tags": {
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.cluster_name": "ceph",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.crush_device_class": "",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.encrypted": "0",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.osd_id": "1",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.type": "block",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.vdo": "0"
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             },
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "type": "block",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "vg_name": "ceph_vg1"
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:         }
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:     ],
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:     "2": [
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:         {
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "devices": [
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "/dev/loop5"
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             ],
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "lv_name": "ceph_lv2",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "lv_size": "21470642176",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "name": "ceph_lv2",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "tags": {
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.cluster_name": "ceph",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.crush_device_class": "",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.encrypted": "0",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.osd_id": "2",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.type": "block",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:                 "ceph.vdo": "0"
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             },
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "type": "block",
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:             "vg_name": "ceph_vg2"
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:         }
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]:     ]
Oct 11 02:17:22 compute-0 optimistic_jepsen[420037]: }
Oct 11 02:17:22 compute-0 systemd[1]: libpod-25d340697b7c417f369b77dfac6c756537f6d7394b44cf343aadbf1aed205a69.scope: Deactivated successfully.
Oct 11 02:17:22 compute-0 podman[420020]: 2025-10-11 02:17:22.056080025 +0000 UTC m=+1.101018140 container died 25d340697b7c417f369b77dfac6c756537f6d7394b44cf343aadbf1aed205a69 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_jepsen, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:17:22 compute-0 systemd[1]: var-lib-containers-storage-overlay-d5d9d6f872c5bfb1826e5cc3d2aacd880032a0441f46c602aa8ab46fb3a44da0-merged.mount: Deactivated successfully.
Oct 11 02:17:22 compute-0 podman[420020]: 2025-10-11 02:17:22.162111193 +0000 UTC m=+1.207049308 container remove 25d340697b7c417f369b77dfac6c756537f6d7394b44cf343aadbf1aed205a69 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=optimistic_jepsen, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:17:22 compute-0 systemd[1]: libpod-conmon-25d340697b7c417f369b77dfac6c756537f6d7394b44cf343aadbf1aed205a69.scope: Deactivated successfully.
Oct 11 02:17:22 compute-0 sudo[419917]: pam_unix(sudo:session): session closed for user root
Oct 11 02:17:22 compute-0 sudo[420059]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:17:22 compute-0 sudo[420059]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:17:22 compute-0 sudo[420059]: pam_unix(sudo:session): session closed for user root
Oct 11 02:17:22 compute-0 podman[420086]: 2025-10-11 02:17:22.527472047 +0000 UTC m=+0.099997769 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, container_name=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_id=ovn_metadata_agent, io.buildah.version=1.41.3)
Oct 11 02:17:22 compute-0 podman[420083]: 2025-10-11 02:17:22.530271757 +0000 UTC m=+0.113803681 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:17:22 compute-0 sudo[420109]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:17:22 compute-0 sudo[420109]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:17:22 compute-0 sudo[420109]: pam_unix(sudo:session): session closed for user root
Oct 11 02:17:22 compute-0 podman[420085]: 2025-10-11 02:17:22.578638069 +0000 UTC m=+0.153768843 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, org.label-schema.build-date=20251007, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:17:22 compute-0 podman[420084]: 2025-10-11 02:17:22.600208162 +0000 UTC m=+0.180873650 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_id=ovn_controller, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller)
Oct 11 02:17:22 compute-0 sudo[420188]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:17:22 compute-0 sudo[420188]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:17:22 compute-0 sudo[420188]: pam_unix(sudo:session): session closed for user root
Oct 11 02:17:22 compute-0 sudo[420216]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:17:22 compute-0 sudo[420216]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:17:22 compute-0 sshd-session[419942]: Failed password for invalid user ubuntu from 121.227.153.123 port 37050 ssh2
Oct 11 02:17:22 compute-0 ceph-mon[191930]: pgmap v1175: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:23 compute-0 podman[420279]: 2025-10-11 02:17:23.478472282 +0000 UTC m=+0.078655937 container create be3d19f35866620c6100ccdfa1b7246c16974ec5f2e41e52a6fd4cb88316b5d3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_matsumoto, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_REF=reef, io.buildah.version=1.39.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS)
Oct 11 02:17:23 compute-0 podman[420279]: 2025-10-11 02:17:23.448168967 +0000 UTC m=+0.048352692 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:17:23 compute-0 systemd[1]: Started libpod-conmon-be3d19f35866620c6100ccdfa1b7246c16974ec5f2e41e52a6fd4cb88316b5d3.scope.
Oct 11 02:17:23 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:17:23 compute-0 podman[420279]: 2025-10-11 02:17:23.668983536 +0000 UTC m=+0.269167231 container init be3d19f35866620c6100ccdfa1b7246c16974ec5f2e41e52a6fd4cb88316b5d3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_matsumoto, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS)
Oct 11 02:17:23 compute-0 podman[420279]: 2025-10-11 02:17:23.691148824 +0000 UTC m=+0.291332519 container start be3d19f35866620c6100ccdfa1b7246c16974ec5f2e41e52a6fd4cb88316b5d3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_matsumoto, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:17:23 compute-0 podman[420279]: 2025-10-11 02:17:23.698502207 +0000 UTC m=+0.298685872 container attach be3d19f35866620c6100ccdfa1b7246c16974ec5f2e41e52a6fd4cb88316b5d3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_matsumoto, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 02:17:23 compute-0 cool_matsumoto[420295]: 167 167
Oct 11 02:17:23 compute-0 systemd[1]: libpod-be3d19f35866620c6100ccdfa1b7246c16974ec5f2e41e52a6fd4cb88316b5d3.scope: Deactivated successfully.
Oct 11 02:17:23 compute-0 podman[420279]: 2025-10-11 02:17:23.706665725 +0000 UTC m=+0.306849451 container died be3d19f35866620c6100ccdfa1b7246c16974ec5f2e41e52a6fd4cb88316b5d3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_matsumoto, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:17:23 compute-0 systemd[1]: var-lib-containers-storage-overlay-4fc6beb90437b8ea11c65fcdb0365d6c50e53bfa53c0b0de5a393f4671edce42-merged.mount: Deactivated successfully.
Oct 11 02:17:23 compute-0 podman[420279]: 2025-10-11 02:17:23.789462165 +0000 UTC m=+0.389645860 container remove be3d19f35866620c6100ccdfa1b7246c16974ec5f2e41e52a6fd4cb88316b5d3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_matsumoto, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:17:23 compute-0 systemd[1]: libpod-conmon-be3d19f35866620c6100ccdfa1b7246c16974ec5f2e41e52a6fd4cb88316b5d3.scope: Deactivated successfully.
Oct 11 02:17:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1176: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:24 compute-0 podman[420318]: 2025-10-11 02:17:24.087101449 +0000 UTC m=+0.096218047 container create c1e603224d93245fb57b28f8573714873e7f930a7544e6bd76a51ba7c74078f0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_meitner, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 02:17:24 compute-0 sshd-session[419942]: Connection closed by invalid user ubuntu 121.227.153.123 port 37050 [preauth]
Oct 11 02:17:24 compute-0 systemd[1]: Started libpod-conmon-c1e603224d93245fb57b28f8573714873e7f930a7544e6bd76a51ba7c74078f0.scope.
Oct 11 02:17:24 compute-0 podman[420318]: 2025-10-11 02:17:24.058877208 +0000 UTC m=+0.067993886 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:17:24 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:17:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f8f73d1e669388565b5449d9b7a5e8a1ddaaca8557c6b369ce9a2172bf2993cb/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:17:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f8f73d1e669388565b5449d9b7a5e8a1ddaaca8557c6b369ce9a2172bf2993cb/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:17:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f8f73d1e669388565b5449d9b7a5e8a1ddaaca8557c6b369ce9a2172bf2993cb/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:17:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f8f73d1e669388565b5449d9b7a5e8a1ddaaca8557c6b369ce9a2172bf2993cb/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:17:24 compute-0 podman[420318]: 2025-10-11 02:17:24.225657168 +0000 UTC m=+0.234773806 container init c1e603224d93245fb57b28f8573714873e7f930a7544e6bd76a51ba7c74078f0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_meitner, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:17:24 compute-0 podman[420318]: 2025-10-11 02:17:24.243371133 +0000 UTC m=+0.252487741 container start c1e603224d93245fb57b28f8573714873e7f930a7544e6bd76a51ba7c74078f0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_meitner, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0)
Oct 11 02:17:24 compute-0 podman[420318]: 2025-10-11 02:17:24.249595351 +0000 UTC m=+0.258711949 container attach c1e603224d93245fb57b28f8573714873e7f930a7544e6bd76a51ba7c74078f0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_meitner, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=reef)
Oct 11 02:17:24 compute-0 ceph-mon[191930]: pgmap v1176: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:17:25 compute-0 magical_meitner[420334]: {
Oct 11 02:17:25 compute-0 magical_meitner[420334]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:17:25 compute-0 magical_meitner[420334]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:17:25 compute-0 magical_meitner[420334]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:17:25 compute-0 magical_meitner[420334]:         "osd_id": 1,
Oct 11 02:17:25 compute-0 magical_meitner[420334]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:17:25 compute-0 magical_meitner[420334]:         "type": "bluestore"
Oct 11 02:17:25 compute-0 magical_meitner[420334]:     },
Oct 11 02:17:25 compute-0 magical_meitner[420334]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:17:25 compute-0 magical_meitner[420334]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:17:25 compute-0 magical_meitner[420334]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:17:25 compute-0 magical_meitner[420334]:         "osd_id": 2,
Oct 11 02:17:25 compute-0 magical_meitner[420334]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:17:25 compute-0 magical_meitner[420334]:         "type": "bluestore"
Oct 11 02:17:25 compute-0 magical_meitner[420334]:     },
Oct 11 02:17:25 compute-0 magical_meitner[420334]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:17:25 compute-0 magical_meitner[420334]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:17:25 compute-0 magical_meitner[420334]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:17:25 compute-0 magical_meitner[420334]:         "osd_id": 0,
Oct 11 02:17:25 compute-0 magical_meitner[420334]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:17:25 compute-0 magical_meitner[420334]:         "type": "bluestore"
Oct 11 02:17:25 compute-0 magical_meitner[420334]:     }
Oct 11 02:17:25 compute-0 magical_meitner[420334]: }
Oct 11 02:17:25 compute-0 systemd[1]: libpod-c1e603224d93245fb57b28f8573714873e7f930a7544e6bd76a51ba7c74078f0.scope: Deactivated successfully.
Oct 11 02:17:25 compute-0 systemd[1]: libpod-c1e603224d93245fb57b28f8573714873e7f930a7544e6bd76a51ba7c74078f0.scope: Consumed 1.171s CPU time.
Oct 11 02:17:25 compute-0 podman[420318]: 2025-10-11 02:17:25.414627856 +0000 UTC m=+1.423744484 container died c1e603224d93245fb57b28f8573714873e7f930a7544e6bd76a51ba7c74078f0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_meitner, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True)
Oct 11 02:17:25 compute-0 sshd-session[420339]: Invalid user ubuntu from 121.227.153.123 port 37064
Oct 11 02:17:25 compute-0 systemd[1]: var-lib-containers-storage-overlay-f8f73d1e669388565b5449d9b7a5e8a1ddaaca8557c6b369ce9a2172bf2993cb-merged.mount: Deactivated successfully.
Oct 11 02:17:25 compute-0 podman[420318]: 2025-10-11 02:17:25.521224336 +0000 UTC m=+1.530340954 container remove c1e603224d93245fb57b28f8573714873e7f930a7544e6bd76a51ba7c74078f0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_meitner, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507)
Oct 11 02:17:25 compute-0 systemd[1]: libpod-conmon-c1e603224d93245fb57b28f8573714873e7f930a7544e6bd76a51ba7c74078f0.scope: Deactivated successfully.
Oct 11 02:17:25 compute-0 sudo[420216]: pam_unix(sudo:session): session closed for user root
Oct 11 02:17:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:17:25 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:17:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:17:25 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:17:25 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 689ee8e1-b392-4035-b797-f4540bacf431 does not exist
Oct 11 02:17:25 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev c71e1e49-e127-4837-b645-ca5551781468 does not exist
Oct 11 02:17:25 compute-0 sshd-session[420339]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:17:25 compute-0 sshd-session[420339]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:17:25 compute-0 sudo[420380]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:17:25 compute-0 sudo[420380]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:17:25 compute-0 sudo[420380]: pam_unix(sudo:session): session closed for user root
Oct 11 02:17:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1177: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:25 compute-0 sudo[420405]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:17:25 compute-0 sudo[420405]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:17:25 compute-0 sudo[420405]: pam_unix(sudo:session): session closed for user root
Oct 11 02:17:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:17:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:17:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:17:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:17:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:17:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:17:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:17:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:17:26 compute-0 ceph-mon[191930]: pgmap v1177: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:27 compute-0 sshd-session[420339]: Failed password for invalid user ubuntu from 121.227.153.123 port 37064 ssh2
Oct 11 02:17:27 compute-0 sshd-session[420339]: Connection closed by invalid user ubuntu 121.227.153.123 port 37064 [preauth]
Oct 11 02:17:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:17:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2085361667' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:17:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:17:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2085361667' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:17:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2085361667' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:17:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2085361667' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:17:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1178: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:28 compute-0 podman[420432]: 2025-10-11 02:17:28.211622111 +0000 UTC m=+0.100689682 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, container_name=multipathd, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:17:28 compute-0 podman[420433]: 2025-10-11 02:17:28.236219186 +0000 UTC m=+0.123919070 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, container_name=iscsid, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 02:17:28 compute-0 sshd-session[420430]: Invalid user ubuntu from 121.227.153.123 port 37090
Oct 11 02:17:28 compute-0 ceph-mon[191930]: pgmap v1178: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:28 compute-0 sshd-session[420430]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:17:28 compute-0 sshd-session[420430]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:17:29 compute-0 podman[157119]: time="2025-10-11T02:17:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:17:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:17:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:17:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:17:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8527 "" "Go-http-client/1.1"
Oct 11 02:17:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1179: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:17:30 compute-0 ceph-mon[191930]: pgmap v1179: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:30 compute-0 sshd-session[420430]: Failed password for invalid user ubuntu from 121.227.153.123 port 37090 ssh2
Oct 11 02:17:31 compute-0 openstack_network_exporter[374316]: ERROR   02:17:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:17:31 compute-0 openstack_network_exporter[374316]: ERROR   02:17:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:17:31 compute-0 openstack_network_exporter[374316]: ERROR   02:17:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:17:31 compute-0 openstack_network_exporter[374316]: ERROR   02:17:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:17:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:17:31 compute-0 openstack_network_exporter[374316]: ERROR   02:17:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:17:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:17:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1180: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:31 compute-0 sshd-session[420430]: Connection closed by invalid user ubuntu 121.227.153.123 port 37090 [preauth]
Oct 11 02:17:32 compute-0 ceph-mon[191930]: pgmap v1180: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:33 compute-0 sshd-session[420469]: Invalid user ubuntu from 121.227.153.123 port 55502
Oct 11 02:17:33 compute-0 sshd-session[420469]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:17:33 compute-0 sshd-session[420469]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:17:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1181: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e122 do_prune osdmap full prune enabled
Oct 11 02:17:34 compute-0 ceph-mon[191930]: pgmap v1181: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e123 e123: 3 total, 3 up, 3 in
Oct 11 02:17:34 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e123: 3 total, 3 up, 3 in
Oct 11 02:17:35 compute-0 sshd-session[420469]: Failed password for invalid user ubuntu from 121.227.153.123 port 55502 ssh2
Oct 11 02:17:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e123 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:17:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1183: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 102 B/s wr, 0 op/s
Oct 11 02:17:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e123 do_prune osdmap full prune enabled
Oct 11 02:17:35 compute-0 ceph-mon[191930]: osdmap e123: 3 total, 3 up, 3 in
Oct 11 02:17:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e124 e124: 3 total, 3 up, 3 in
Oct 11 02:17:35 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e124: 3 total, 3 up, 3 in
Oct 11 02:17:36 compute-0 sshd-session[420469]: Connection closed by invalid user ubuntu 121.227.153.123 port 55502 [preauth]
Oct 11 02:17:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e124 do_prune osdmap full prune enabled
Oct 11 02:17:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e125 e125: 3 total, 3 up, 3 in
Oct 11 02:17:36 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e125: 3 total, 3 up, 3 in
Oct 11 02:17:36 compute-0 ceph-mon[191930]: pgmap v1183: 321 pgs: 321 active+clean; 455 KiB data, 148 MiB used, 60 GiB / 60 GiB avail; 102 B/s wr, 0 op/s
Oct 11 02:17:36 compute-0 ceph-mon[191930]: osdmap e124: 3 total, 3 up, 3 in
Oct 11 02:17:37 compute-0 sshd-session[420471]: Invalid user ubuntu from 121.227.153.123 port 55512
Oct 11 02:17:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1186: 321 pgs: 321 active+clean; 8.4 MiB data, 156 MiB used, 60 GiB / 60 GiB avail; 11 KiB/s rd, 1.3 MiB/s wr, 16 op/s
Oct 11 02:17:37 compute-0 sshd-session[420471]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:17:37 compute-0 sshd-session[420471]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:17:37 compute-0 ceph-mon[191930]: osdmap e125: 3 total, 3 up, 3 in
Oct 11 02:17:39 compute-0 ceph-mon[191930]: pgmap v1186: 321 pgs: 321 active+clean; 8.4 MiB data, 156 MiB used, 60 GiB / 60 GiB avail; 11 KiB/s rd, 1.3 MiB/s wr, 16 op/s
Oct 11 02:17:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1187: 321 pgs: 321 active+clean; 8.4 MiB data, 156 MiB used, 60 GiB / 60 GiB avail; 11 KiB/s rd, 1.3 MiB/s wr, 16 op/s
Oct 11 02:17:39 compute-0 sshd-session[420471]: Failed password for invalid user ubuntu from 121.227.153.123 port 55512 ssh2
Oct 11 02:17:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e125 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:17:40 compute-0 podman[420475]: 2025-10-11 02:17:40.254600603 +0000 UTC m=+0.128389426 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:17:40 compute-0 podman[420473]: 2025-10-11 02:17:40.260143548 +0000 UTC m=+0.148137600 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ceilometer_agent_ipmi, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:17:40 compute-0 podman[420474]: 2025-10-11 02:17:40.266659183 +0000 UTC m=+0.150651433 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, release=1755695350, architecture=x86_64, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vendor=Red Hat, Inc., name=ubi9-minimal, maintainer=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=openstack_network_exporter, distribution-scope=public, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, io.openshift.expose-services=, managed_by=edpm_ansible, io.buildah.version=1.33.7, io.openshift.tags=minimal rhel9, vcs-type=git, com.redhat.component=ubi9-minimal-container, build-date=2025-08-20T13:12:41, config_id=edpm, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, version=9.6)
Oct 11 02:17:41 compute-0 ceph-mon[191930]: pgmap v1187: 321 pgs: 321 active+clean; 8.4 MiB data, 156 MiB used, 60 GiB / 60 GiB avail; 11 KiB/s rd, 1.3 MiB/s wr, 16 op/s
Oct 11 02:17:41 compute-0 sshd-session[420471]: Connection closed by invalid user ubuntu 121.227.153.123 port 55512 [preauth]
Oct 11 02:17:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1188: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail; 10 KiB/s rd, 2.3 MiB/s wr, 16 op/s
Oct 11 02:17:42 compute-0 sshd-session[420532]: Invalid user ubuntu from 121.227.153.123 port 54998
Oct 11 02:17:42 compute-0 sshd-session[420532]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:17:42 compute-0 sshd-session[420532]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:17:43 compute-0 ceph-mon[191930]: pgmap v1188: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail; 10 KiB/s rd, 2.3 MiB/s wr, 16 op/s
Oct 11 02:17:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1189: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail; 8.7 KiB/s rd, 1.9 MiB/s wr, 13 op/s
Oct 11 02:17:44 compute-0 sshd-session[420532]: Failed password for invalid user ubuntu from 121.227.153.123 port 54998 ssh2
Oct 11 02:17:44 compute-0 podman[420534]: 2025-10-11 02:17:44.249053025 +0000 UTC m=+0.135076756 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, release=1214.1726694543, distribution-scope=public, io.buildah.version=1.29.0, maintainer=Red Hat, Inc., com.redhat.component=ubi9-container, config_id=edpm, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, version=9.4, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.expose-services=, build-date=2024-09-18T21:23:30, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=kepler, vcs-type=git, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9, summary=Provides the latest release of Red Hat Universal Base Image 9., vendor=Red Hat, Inc., architecture=x86_64, name=ubi9, io.openshift.tags=base rhel9, release-0.7.12=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f)
Oct 11 02:17:44 compute-0 nova_compute[356901]: 2025-10-11 02:17:44.893 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:17:44 compute-0 nova_compute[356901]: 2025-10-11 02:17:44.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:17:44 compute-0 nova_compute[356901]: 2025-10-11 02:17:44.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:17:45 compute-0 ceph-mon[191930]: pgmap v1189: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail; 8.7 KiB/s rd, 1.9 MiB/s wr, 13 op/s
Oct 11 02:17:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e125 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:17:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e125 do_prune osdmap full prune enabled
Oct 11 02:17:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e126 e126: 3 total, 3 up, 3 in
Oct 11 02:17:45 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e126: 3 total, 3 up, 3 in
Oct 11 02:17:45 compute-0 sshd-session[420532]: Connection closed by invalid user ubuntu 121.227.153.123 port 54998 [preauth]
Oct 11 02:17:45 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:17:45.763 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: SbGlobalUpdateEvent(events=('update',), table='SB_Global', conditions=None, old_conditions=None), priority=20 to row=SB_Global(external_ids={}, nb_cfg=2, options={'arp_ns_explicit_output': 'true', 'mac_prefix': 'fe:55:97', 'max_tunid': '16711680', 'northd_internal_version': '24.03.7-20.33.0-76.8', 'svc_monitor_mac': 'ce:9c:4f:b4:85:9b'}, ipsec=False) old=SB_Global(nb_cfg=1) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:17:45 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:17:45.765 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Delaying updating chassis table for 0 seconds run /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:274
Oct 11 02:17:45 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:17:45.766 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '2'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:17:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1191: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail; 7.9 KiB/s rd, 1.8 MiB/s wr, 12 op/s
Oct 11 02:17:46 compute-0 ceph-mon[191930]: osdmap e126: 3 total, 3 up, 3 in
Oct 11 02:17:46 compute-0 ceph-mon[191930]: pgmap v1191: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail; 7.9 KiB/s rd, 1.8 MiB/s wr, 12 op/s
Oct 11 02:17:46 compute-0 sshd-session[420554]: Invalid user ubuntu from 121.227.153.123 port 55012
Oct 11 02:17:47 compute-0 sshd-session[420554]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:17:47 compute-0 sshd-session[420554]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:17:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1192: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail; 306 B/s rd, 772 KiB/s wr, 0 op/s
Oct 11 02:17:47 compute-0 nova_compute[356901]: 2025-10-11 02:17:47.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:17:47 compute-0 nova_compute[356901]: 2025-10-11 02:17:47.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:17:47 compute-0 nova_compute[356901]: 2025-10-11 02:17:47.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:17:47 compute-0 nova_compute[356901]: 2025-10-11 02:17:47.918 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944
Oct 11 02:17:48 compute-0 ceph-mon[191930]: pgmap v1192: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail; 306 B/s rd, 772 KiB/s wr, 0 op/s
Oct 11 02:17:49 compute-0 sshd-session[420554]: Failed password for invalid user ubuntu from 121.227.153.123 port 55012 ssh2
Oct 11 02:17:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1193: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail; 306 B/s rd, 772 KiB/s wr, 0 op/s
Oct 11 02:17:49 compute-0 nova_compute[356901]: 2025-10-11 02:17:49.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:17:49 compute-0 nova_compute[356901]: 2025-10-11 02:17:49.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:17:49 compute-0 nova_compute[356901]: 2025-10-11 02:17:49.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:17:49 compute-0 nova_compute[356901]: 2025-10-11 02:17:49.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:17:49 compute-0 nova_compute[356901]: 2025-10-11 02:17:49.934 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:17:49 compute-0 nova_compute[356901]: 2025-10-11 02:17:49.936 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:17:49 compute-0 nova_compute[356901]: 2025-10-11 02:17:49.936 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:17:49 compute-0 nova_compute[356901]: 2025-10-11 02:17:49.937 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:17:49 compute-0 nova_compute[356901]: 2025-10-11 02:17:49.937 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:17:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e126 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:17:50 compute-0 sshd-session[420554]: Connection closed by invalid user ubuntu 121.227.153.123 port 55012 [preauth]
Oct 11 02:17:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:17:50 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/4068230553' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:17:50 compute-0 nova_compute[356901]: 2025-10-11 02:17:50.465 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.527s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:17:50 compute-0 ceph-mon[191930]: pgmap v1193: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail; 306 B/s rd, 772 KiB/s wr, 0 op/s
Oct 11 02:17:50 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/4068230553' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:17:50 compute-0 nova_compute[356901]: 2025-10-11 02:17:50.956 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:17:50 compute-0 nova_compute[356901]: 2025-10-11 02:17:50.958 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=4561MB free_disk=59.98828125GB free_vcpus=8 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:17:50 compute-0 nova_compute[356901]: 2025-10-11 02:17:50.958 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:17:50 compute-0 nova_compute[356901]: 2025-10-11 02:17:50.959 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:17:51 compute-0 nova_compute[356901]: 2025-10-11 02:17:51.063 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 0 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:17:51 compute-0 nova_compute[356901]: 2025-10-11 02:17:51.064 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=512MB phys_disk=59GB used_disk=0GB total_vcpus=8 used_vcpus=0 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:17:51 compute-0 nova_compute[356901]: 2025-10-11 02:17:51.095 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:17:51 compute-0 sshd-session[420579]: Invalid user ubuntu from 121.227.153.123 port 51192
Oct 11 02:17:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:17:51 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2561173695' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:17:51 compute-0 nova_compute[356901]: 2025-10-11 02:17:51.617 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.522s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:17:51 compute-0 nova_compute[356901]: 2025-10-11 02:17:51.629 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:17:51 compute-0 nova_compute[356901]: 2025-10-11 02:17:51.648 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:17:51 compute-0 nova_compute[356901]: 2025-10-11 02:17:51.651 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:17:51 compute-0 nova_compute[356901]: 2025-10-11 02:17:51.651 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.692s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:17:51 compute-0 sshd-session[420579]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:17:51 compute-0 sshd-session[420579]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:17:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1194: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:51 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2561173695' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:17:52 compute-0 nova_compute[356901]: 2025-10-11 02:17:52.648 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:17:52 compute-0 nova_compute[356901]: 2025-10-11 02:17:52.672 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:17:52 compute-0 nova_compute[356901]: 2025-10-11 02:17:52.673 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:17:52 compute-0 ceph-mon[191930]: pgmap v1194: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:53 compute-0 podman[420603]: 2025-10-11 02:17:53.233062785 +0000 UTC m=+0.115660251 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:17:53 compute-0 podman[420605]: 2025-10-11 02:17:53.245736529 +0000 UTC m=+0.113550454 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:17:53 compute-0 podman[420606]: 2025-10-11 02:17:53.257344424 +0000 UTC m=+0.122913578 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.schema-version=1.0, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.license=GPLv2)
Oct 11 02:17:53 compute-0 podman[420604]: 2025-10-11 02:17:53.284798399 +0000 UTC m=+0.161156225 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, tcib_managed=true, config_id=ovn_controller, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:17:53 compute-0 sshd-session[420579]: Failed password for invalid user ubuntu from 121.227.153.123 port 51192 ssh2
Oct 11 02:17:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1195: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:54 compute-0 sshd-session[420579]: Connection closed by invalid user ubuntu 121.227.153.123 port 51192 [preauth]
Oct 11 02:17:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:17:54.841 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:17:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:17:54.842 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:17:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:17:54.842 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:17:54 compute-0 ceph-mon[191930]: pgmap v1195: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:55 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:51202 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:17:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e126 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:17:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1196: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:55 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:51218 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:17:56 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:51226 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:17:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:17:56
Oct 11 02:17:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:17:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:17:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.control', 'vms', 'cephfs.cephfs.data', '.mgr', '.rgw.root', 'default.rgw.log', 'backups', 'volumes', 'images', 'default.rgw.meta', 'cephfs.cephfs.meta']
Oct 11 02:17:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:17:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:17:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:17:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:17:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:17:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:17:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:17:56 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:51238 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:17:56 compute-0 ceph-mon[191930]: pgmap v1196: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:17:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:17:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:17:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:17:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:17:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:17:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:17:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:17:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:17:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:17:57 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:51244 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:17:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1197: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:57 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:51250 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:17:58 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:51254 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:17:58 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:51262 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:17:58 compute-0 ceph-mon[191930]: pgmap v1197: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:59 compute-0 podman[420686]: 2025-10-11 02:17:59.230052978 +0000 UTC m=+0.124167162 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.schema-version=1.0, config_id=multipathd, container_name=multipathd)
Oct 11 02:17:59 compute-0 podman[420687]: 2025-10-11 02:17:59.237433718 +0000 UTC m=+0.125806686 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, config_id=iscsid, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=iscsid, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 02:17:59 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54634 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:17:59 compute-0 podman[157119]: time="2025-10-11T02:17:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:17:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:17:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:17:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:17:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8532 "" "Go-http-client/1.1"
Oct 11 02:17:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1198: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:17:59 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54646 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e126 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:18:00 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54656 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:00 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54660 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:01 compute-0 ceph-mon[191930]: pgmap v1198: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:01 compute-0 openstack_network_exporter[374316]: ERROR   02:18:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:18:01 compute-0 openstack_network_exporter[374316]: ERROR   02:18:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:18:01 compute-0 openstack_network_exporter[374316]: ERROR   02:18:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:18:01 compute-0 openstack_network_exporter[374316]: ERROR   02:18:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:18:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:18:01 compute-0 openstack_network_exporter[374316]: ERROR   02:18:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:18:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:18:01 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54672 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1199: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:02 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54682 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:02 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54692 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:02 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54706 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:03 compute-0 ceph-mon[191930]: pgmap v1199: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:03 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54722 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1200: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:03 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54728 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:04 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54744 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:04 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54754 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:05 compute-0 ceph-mon[191930]: pgmap v1200: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e126 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:18:05 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54768 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1201: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:06 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54772 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:06 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54786 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00025334537995702286 of space, bias 1.0, pg target 0.07600361398710685 quantized to 32 (current 32)
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:18:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:18:07 compute-0 ceph-mon[191930]: pgmap v1201: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:07 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54798 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:07 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54806 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1202: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:08 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54808 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:08 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54822 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:09 compute-0 ceph-mon[191930]: pgmap v1202: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:09 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:54828 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:09 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:52832 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1203: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:10 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:52838 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e126 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:18:10 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:52842 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:11 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:52846 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:18:11 compute-0 ceph-mon[191930]: pgmap v1203: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:11 compute-0 podman[420724]: 2025-10-11 02:18:11.245516311 +0000 UTC m=+0.118072150 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:18:11 compute-0 podman[420723]: 2025-10-11 02:18:11.255736053 +0000 UTC m=+0.132314489 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=openstack_network_exporter, distribution-scope=public, release=1755695350, vcs-type=git, com.redhat.component=ubi9-minimal-container, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.tags=minimal rhel9, io.buildah.version=1.33.7, managed_by=edpm_ansible, name=ubi9-minimal, build-date=2025-08-20T13:12:41, vendor=Red Hat, Inc., version=9.6, architecture=x86_64, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_id=edpm, io.openshift.expose-services=, maintainer=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:18:11 compute-0 podman[420722]: 2025-10-11 02:18:11.258347406 +0000 UTC m=+0.142771850 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:18:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1204: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:12 compute-0 sshd-session[420786]: Invalid user ubuntu from 121.227.153.123 port 52848
Oct 11 02:18:13 compute-0 sshd-session[420786]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:18:13 compute-0 sshd-session[420786]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:18:13 compute-0 ceph-mon[191930]: pgmap v1204: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1205: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:14 compute-0 podman[420788]: 2025-10-11 02:18:14.86235671 +0000 UTC m=+0.147775638 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, architecture=x86_64, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, container_name=kepler, distribution-scope=public, vcs-type=git, maintainer=Red Hat, Inc., name=ubi9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.expose-services=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, managed_by=edpm_ansible, config_id=edpm, summary=Provides the latest release of Red Hat Universal Base Image 9., io.openshift.tags=base rhel9, release=1214.1726694543, release-0.7.12=, build-date=2024-09-18T21:23:30, com.redhat.component=ubi9-container, io.buildah.version=1.29.0, version=9.4, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:18:14 compute-0 sshd-session[420786]: Failed password for invalid user ubuntu from 121.227.153.123 port 52848 ssh2
Oct 11 02:18:15 compute-0 ceph-mon[191930]: pgmap v1205: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e126 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:18:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1206: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:16 compute-0 sshd-session[420786]: Connection closed by invalid user ubuntu 121.227.153.123 port 52848 [preauth]
Oct 11 02:18:17 compute-0 ceph-mon[191930]: pgmap v1206: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:17 compute-0 sshd-session[420807]: Invalid user ubuntu from 121.227.153.123 port 52852
Oct 11 02:18:17 compute-0 sshd-session[420807]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:18:17 compute-0 sshd-session[420807]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:18:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1207: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:19 compute-0 ceph-mon[191930]: pgmap v1207: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:19 compute-0 sshd-session[420807]: Failed password for invalid user ubuntu from 121.227.153.123 port 52852 ssh2
Oct 11 02:18:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1208: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e126 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:18:20 compute-0 sshd-session[420807]: Connection closed by invalid user ubuntu 121.227.153.123 port 52852 [preauth]
Oct 11 02:18:21 compute-0 ceph-mon[191930]: pgmap v1208: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1209: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:22 compute-0 sshd-session[420810]: Invalid user ubuntu from 121.227.153.123 port 51246
Oct 11 02:18:23 compute-0 ceph-mon[191930]: pgmap v1209: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:23 compute-0 sshd-session[420810]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:18:23 compute-0 sshd-session[420810]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:18:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1210: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:24 compute-0 podman[420815]: 2025-10-11 02:18:24.216541931 +0000 UTC m=+0.089847619 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, container_name=ovn_metadata_agent)
Oct 11 02:18:24 compute-0 podman[420814]: 2025-10-11 02:18:24.226700592 +0000 UTC m=+0.104833142 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, managed_by=edpm_ansible, tcib_managed=true, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image)
Oct 11 02:18:24 compute-0 podman[420812]: 2025-10-11 02:18:24.249590635 +0000 UTC m=+0.130927597 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:18:24 compute-0 podman[420813]: 2025-10-11 02:18:24.262280355 +0000 UTC m=+0.141056856 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']})
Oct 11 02:18:25 compute-0 ceph-mon[191930]: pgmap v1210: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e126 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:18:25 compute-0 sshd-session[420810]: Failed password for invalid user ubuntu from 121.227.153.123 port 51246 ssh2
Oct 11 02:18:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1211: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:26 compute-0 sudo[420891]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:18:26 compute-0 sudo[420891]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:26 compute-0 sudo[420891]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:26 compute-0 sudo[420916]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:18:26 compute-0 sudo[420916]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:26 compute-0 sudo[420916]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:26 compute-0 sshd-session[420810]: Connection closed by invalid user ubuntu 121.227.153.123 port 51246 [preauth]
Oct 11 02:18:26 compute-0 sudo[420941]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:18:26 compute-0 sudo[420941]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:26 compute-0 sudo[420941]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:26 compute-0 sudo[420966]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ls
Oct 11 02:18:26 compute-0 sudo[420966]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:18:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:18:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:18:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:18:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:18:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:18:27 compute-0 ceph-mon[191930]: pgmap v1211: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:27 compute-0 podman[421063]: 2025-10-11 02:18:27.396478662 +0000 UTC m=+0.153774256 container exec ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:18:27 compute-0 sshd-session[420991]: Invalid user ubuntu from 121.227.153.123 port 51260
Oct 11 02:18:27 compute-0 podman[421063]: 2025-10-11 02:18:27.506952741 +0000 UTC m=+0.264248315 container exec_died ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:18:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:18:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/4051517741' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:18:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:18:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/4051517741' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:18:27 compute-0 sshd-session[420991]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:18:27 compute-0 sshd-session[420991]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:18:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1212: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/4051517741' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:18:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/4051517741' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:18:28 compute-0 sudo[420966]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:28 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:18:28 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:18:28 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:18:28 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:18:28 compute-0 sudo[421217]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:18:28 compute-0 sudo[421217]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:28 compute-0 sudo[421217]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:29 compute-0 sudo[421242]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:18:29 compute-0 sudo[421242]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:29 compute-0 sudo[421242]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:29 compute-0 sudo[421267]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:18:29 compute-0 sudo[421267]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:29 compute-0 sudo[421267]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:29 compute-0 ceph-mon[191930]: pgmap v1212: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:29 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:18:29 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:18:29 compute-0 sudo[421292]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:18:29 compute-0 sudo[421292]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:29 compute-0 sshd-session[420991]: Failed password for invalid user ubuntu from 121.227.153.123 port 51260 ssh2
Oct 11 02:18:29 compute-0 podman[421316]: 2025-10-11 02:18:29.495764104 +0000 UTC m=+0.132145184 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=multipathd, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, container_name=multipathd, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:18:29 compute-0 podman[421317]: 2025-10-11 02:18:29.495814846 +0000 UTC m=+0.130201543 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, container_name=iscsid, tcib_managed=true, config_id=iscsid, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:18:29 compute-0 podman[157119]: time="2025-10-11T02:18:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:18:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:18:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:18:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:18:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8531 "" "Go-http-client/1.1"
Oct 11 02:18:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1213: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:30 compute-0 sudo[421292]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:18:30 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:18:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:18:30 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:18:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:18:30 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:18:30 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 51897f0b-5104-45e9-849f-e8985d2f6fbf does not exist
Oct 11 02:18:30 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 6f764812-8ebd-4af0-8c3a-4e53e89b38a7 does not exist
Oct 11 02:18:30 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 4a999e59-e9da-4541-ba94-dc1208fc601e does not exist
Oct 11 02:18:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:18:30 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:18:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:18:30 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:18:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:18:30 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:18:30 compute-0 sudo[421381]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:18:30 compute-0 sudo[421381]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:30 compute-0 sudo[421381]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:30 compute-0 ceph-mon[191930]: pgmap v1213: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:30 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:18:30 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:18:30 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:18:30 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:18:30 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:18:30 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:18:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e126 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:18:30 compute-0 sudo[421406]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:18:30 compute-0 sudo[421406]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:30 compute-0 sudo[421406]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:30 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:30.483 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: SbGlobalUpdateEvent(events=('update',), table='SB_Global', conditions=None, old_conditions=None), priority=20 to row=SB_Global(external_ids={}, nb_cfg=3, options={'arp_ns_explicit_output': 'true', 'mac_prefix': 'fe:55:97', 'max_tunid': '16711680', 'northd_internal_version': '24.03.7-20.33.0-76.8', 'svc_monitor_mac': 'ce:9c:4f:b4:85:9b'}, ipsec=False) old=SB_Global(nb_cfg=2) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:18:30 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:30.485 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Delaying updating chassis table for 1 seconds run /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:274
Oct 11 02:18:30 compute-0 sudo[421431]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:18:30 compute-0 sudo[421431]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:30 compute-0 sudo[421431]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:30 compute-0 sudo[421456]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:18:30 compute-0 sudo[421456]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:30 compute-0 sshd-session[420991]: Connection closed by invalid user ubuntu 121.227.153.123 port 51260 [preauth]
Oct 11 02:18:31 compute-0 podman[421522]: 2025-10-11 02:18:31.308572457 +0000 UTC m=+0.103945724 container create b061129ff50271dd37ed31f36e076641a207846698b7b6fcec98923f87420663 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_edison, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0)
Oct 11 02:18:31 compute-0 podman[421522]: 2025-10-11 02:18:31.264191715 +0000 UTC m=+0.059565002 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:18:31 compute-0 systemd[1]: Started libpod-conmon-b061129ff50271dd37ed31f36e076641a207846698b7b6fcec98923f87420663.scope.
Oct 11 02:18:31 compute-0 openstack_network_exporter[374316]: ERROR   02:18:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:18:31 compute-0 openstack_network_exporter[374316]: ERROR   02:18:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:18:31 compute-0 openstack_network_exporter[374316]: ERROR   02:18:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:18:31 compute-0 openstack_network_exporter[374316]: ERROR   02:18:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:18:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:18:31 compute-0 openstack_network_exporter[374316]: ERROR   02:18:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:18:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:18:31 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:18:31 compute-0 podman[421522]: 2025-10-11 02:18:31.466878517 +0000 UTC m=+0.262251794 container init b061129ff50271dd37ed31f36e076641a207846698b7b6fcec98923f87420663 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_edison, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507)
Oct 11 02:18:31 compute-0 podman[421522]: 2025-10-11 02:18:31.481520669 +0000 UTC m=+0.276893936 container start b061129ff50271dd37ed31f36e076641a207846698b7b6fcec98923f87420663 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_edison, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507)
Oct 11 02:18:31 compute-0 podman[421522]: 2025-10-11 02:18:31.488433677 +0000 UTC m=+0.283806954 container attach b061129ff50271dd37ed31f36e076641a207846698b7b6fcec98923f87420663 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_edison, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef)
Oct 11 02:18:31 compute-0 suspicious_edison[421538]: 167 167
Oct 11 02:18:31 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:31.491 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '3'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:18:31 compute-0 systemd[1]: libpod-b061129ff50271dd37ed31f36e076641a207846698b7b6fcec98923f87420663.scope: Deactivated successfully.
Oct 11 02:18:31 compute-0 podman[421522]: 2025-10-11 02:18:31.493757345 +0000 UTC m=+0.289130622 container died b061129ff50271dd37ed31f36e076641a207846698b7b6fcec98923f87420663 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_edison, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, OSD_FLAVOR=default)
Oct 11 02:18:31 compute-0 systemd[1]: var-lib-containers-storage-overlay-c3313e77d4c9c2442a61da983b92c7d17493fc398de823461f3784ea927e4dd1-merged.mount: Deactivated successfully.
Oct 11 02:18:31 compute-0 podman[421522]: 2025-10-11 02:18:31.58066944 +0000 UTC m=+0.376042717 container remove b061129ff50271dd37ed31f36e076641a207846698b7b6fcec98923f87420663 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_edison, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS)
Oct 11 02:18:31 compute-0 systemd[1]: libpod-conmon-b061129ff50271dd37ed31f36e076641a207846698b7b6fcec98923f87420663.scope: Deactivated successfully.
Oct 11 02:18:31 compute-0 podman[421560]: 2025-10-11 02:18:31.871680301 +0000 UTC m=+0.091138219 container create b15c3c3a64196bd1cabb5bba2cce2df80f4cba5762d9e61ab4826fad75ec075a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=epic_austin, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3)
Oct 11 02:18:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1214: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:31 compute-0 podman[421560]: 2025-10-11 02:18:31.838817233 +0000 UTC m=+0.058275241 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:18:31 compute-0 systemd[1]: Started libpod-conmon-b15c3c3a64196bd1cabb5bba2cce2df80f4cba5762d9e61ab4826fad75ec075a.scope.
Oct 11 02:18:32 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:18:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f5e6c1db9702f25508b459d7c1e92f56b9a3c029173f0ad93728ab2f793ecfd4/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:18:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f5e6c1db9702f25508b459d7c1e92f56b9a3c029173f0ad93728ab2f793ecfd4/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:18:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f5e6c1db9702f25508b459d7c1e92f56b9a3c029173f0ad93728ab2f793ecfd4/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:18:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f5e6c1db9702f25508b459d7c1e92f56b9a3c029173f0ad93728ab2f793ecfd4/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:18:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f5e6c1db9702f25508b459d7c1e92f56b9a3c029173f0ad93728ab2f793ecfd4/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:18:32 compute-0 podman[421560]: 2025-10-11 02:18:32.074025962 +0000 UTC m=+0.293483960 container init b15c3c3a64196bd1cabb5bba2cce2df80f4cba5762d9e61ab4826fad75ec075a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=epic_austin, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS)
Oct 11 02:18:32 compute-0 podman[421560]: 2025-10-11 02:18:32.096194922 +0000 UTC m=+0.315652870 container start b15c3c3a64196bd1cabb5bba2cce2df80f4cba5762d9e61ab4826fad75ec075a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=epic_austin, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:18:32 compute-0 podman[421560]: 2025-10-11 02:18:32.103493953 +0000 UTC m=+0.322951951 container attach b15c3c3a64196bd1cabb5bba2cce2df80f4cba5762d9e61ab4826fad75ec075a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=epic_austin, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True)
Oct 11 02:18:32 compute-0 sshd-session[421506]: Invalid user ubuntu from 121.227.153.123 port 52524
Oct 11 02:18:32 compute-0 sshd-session[421506]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:18:32 compute-0 sshd-session[421506]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:18:32 compute-0 ceph-mon[191930]: pgmap v1214: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:33 compute-0 epic_austin[421577]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:18:33 compute-0 epic_austin[421577]: --> relative data size: 1.0
Oct 11 02:18:33 compute-0 epic_austin[421577]: --> All data devices are unavailable
Oct 11 02:18:33 compute-0 systemd[1]: libpod-b15c3c3a64196bd1cabb5bba2cce2df80f4cba5762d9e61ab4826fad75ec075a.scope: Deactivated successfully.
Oct 11 02:18:33 compute-0 systemd[1]: libpod-b15c3c3a64196bd1cabb5bba2cce2df80f4cba5762d9e61ab4826fad75ec075a.scope: Consumed 1.322s CPU time.
Oct 11 02:18:33 compute-0 podman[421560]: 2025-10-11 02:18:33.484640503 +0000 UTC m=+1.704098451 container died b15c3c3a64196bd1cabb5bba2cce2df80f4cba5762d9e61ab4826fad75ec075a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=epic_austin, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:18:33 compute-0 systemd[1]: var-lib-containers-storage-overlay-f5e6c1db9702f25508b459d7c1e92f56b9a3c029173f0ad93728ab2f793ecfd4-merged.mount: Deactivated successfully.
Oct 11 02:18:33 compute-0 podman[421560]: 2025-10-11 02:18:33.603625311 +0000 UTC m=+1.823083229 container remove b15c3c3a64196bd1cabb5bba2cce2df80f4cba5762d9e61ab4826fad75ec075a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=epic_austin, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0)
Oct 11 02:18:33 compute-0 systemd[1]: libpod-conmon-b15c3c3a64196bd1cabb5bba2cce2df80f4cba5762d9e61ab4826fad75ec075a.scope: Deactivated successfully.
Oct 11 02:18:33 compute-0 sudo[421456]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:33 compute-0 sudo[421618]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:18:33 compute-0 sudo[421618]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:33 compute-0 sudo[421618]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1215: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:33 compute-0 sudo[421643]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:18:33 compute-0 sudo[421643]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:34 compute-0 sudo[421643]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:34 compute-0 sudo[421668]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:18:34 compute-0 sudo[421668]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:34 compute-0 sudo[421668]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:34 compute-0 sshd-session[421506]: Failed password for invalid user ubuntu from 121.227.153.123 port 52524 ssh2
Oct 11 02:18:34 compute-0 sudo[421693]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:18:34 compute-0 sudo[421693]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:34 compute-0 podman[421756]: 2025-10-11 02:18:34.867170467 +0000 UTC m=+0.081793524 container create 42c7e3021e58aba59327c1e5873ead7444a0834d694038cc8134bc68cb4a4099 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_chebyshev, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 02:18:34 compute-0 podman[421756]: 2025-10-11 02:18:34.842346873 +0000 UTC m=+0.056969910 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:18:34 compute-0 systemd[1]: Started libpod-conmon-42c7e3021e58aba59327c1e5873ead7444a0834d694038cc8134bc68cb4a4099.scope.
Oct 11 02:18:34 compute-0 ceph-mon[191930]: pgmap v1215: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:34 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:18:35 compute-0 podman[421756]: 2025-10-11 02:18:35.009968267 +0000 UTC m=+0.224591304 container init 42c7e3021e58aba59327c1e5873ead7444a0834d694038cc8134bc68cb4a4099 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_chebyshev, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:18:35 compute-0 podman[421756]: 2025-10-11 02:18:35.02683692 +0000 UTC m=+0.241459927 container start 42c7e3021e58aba59327c1e5873ead7444a0834d694038cc8134bc68cb4a4099 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_chebyshev, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 02:18:35 compute-0 podman[421756]: 2025-10-11 02:18:35.032346994 +0000 UTC m=+0.246970011 container attach 42c7e3021e58aba59327c1e5873ead7444a0834d694038cc8134bc68cb4a4099 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_chebyshev, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:18:35 compute-0 inspiring_chebyshev[421772]: 167 167
Oct 11 02:18:35 compute-0 systemd[1]: libpod-42c7e3021e58aba59327c1e5873ead7444a0834d694038cc8134bc68cb4a4099.scope: Deactivated successfully.
Oct 11 02:18:35 compute-0 podman[421756]: 2025-10-11 02:18:35.045669595 +0000 UTC m=+0.260292642 container died 42c7e3021e58aba59327c1e5873ead7444a0834d694038cc8134bc68cb4a4099 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_chebyshev, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:18:35 compute-0 systemd[1]: var-lib-containers-storage-overlay-72675d4839ba8c6e571075df0302cb78a8b05369a5a3a96f32d620d15098bdf7-merged.mount: Deactivated successfully.
Oct 11 02:18:35 compute-0 podman[421756]: 2025-10-11 02:18:35.128143009 +0000 UTC m=+0.342766056 container remove 42c7e3021e58aba59327c1e5873ead7444a0834d694038cc8134bc68cb4a4099 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_chebyshev, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3)
Oct 11 02:18:35 compute-0 systemd[1]: libpod-conmon-42c7e3021e58aba59327c1e5873ead7444a0834d694038cc8134bc68cb4a4099.scope: Deactivated successfully.
Oct 11 02:18:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e126 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:18:35 compute-0 podman[421795]: 2025-10-11 02:18:35.408611877 +0000 UTC m=+0.082825227 container create 6271ef99860826c2fc0361a7354f40f0166e0a210628e7b03886cde4b1fd5813 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_wescoff, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:18:35 compute-0 podman[421795]: 2025-10-11 02:18:35.380305503 +0000 UTC m=+0.054518863 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:18:35 compute-0 sshd-session[421506]: Connection closed by invalid user ubuntu 121.227.153.123 port 52524 [preauth]
Oct 11 02:18:35 compute-0 systemd[1]: Started libpod-conmon-6271ef99860826c2fc0361a7354f40f0166e0a210628e7b03886cde4b1fd5813.scope.
Oct 11 02:18:35 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:18:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ab67f0daad19181f71c98ed2c437f384105c150ee13c4d4b27062baefe614667/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:18:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ab67f0daad19181f71c98ed2c437f384105c150ee13c4d4b27062baefe614667/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:18:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ab67f0daad19181f71c98ed2c437f384105c150ee13c4d4b27062baefe614667/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:18:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ab67f0daad19181f71c98ed2c437f384105c150ee13c4d4b27062baefe614667/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:18:35 compute-0 podman[421795]: 2025-10-11 02:18:35.594950002 +0000 UTC m=+0.269163372 container init 6271ef99860826c2fc0361a7354f40f0166e0a210628e7b03886cde4b1fd5813 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_wescoff, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:18:35 compute-0 podman[421795]: 2025-10-11 02:18:35.626947553 +0000 UTC m=+0.301160903 container start 6271ef99860826c2fc0361a7354f40f0166e0a210628e7b03886cde4b1fd5813 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_wescoff, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 02:18:35 compute-0 podman[421795]: 2025-10-11 02:18:35.633903293 +0000 UTC m=+0.308116623 container attach 6271ef99860826c2fc0361a7354f40f0166e0a210628e7b03886cde4b1fd5813 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_wescoff, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.license=GPLv2)
Oct 11 02:18:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1216: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]: {
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:     "0": [
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:         {
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "devices": [
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "/dev/loop3"
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             ],
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "lv_name": "ceph_lv0",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "lv_size": "21470642176",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "name": "ceph_lv0",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "tags": {
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.cluster_name": "ceph",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.crush_device_class": "",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.encrypted": "0",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.osd_id": "0",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.type": "block",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.vdo": "0"
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             },
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "type": "block",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "vg_name": "ceph_vg0"
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:         }
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:     ],
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:     "1": [
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:         {
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "devices": [
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "/dev/loop4"
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             ],
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "lv_name": "ceph_lv1",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "lv_size": "21470642176",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "name": "ceph_lv1",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "tags": {
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.cluster_name": "ceph",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.crush_device_class": "",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.encrypted": "0",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.osd_id": "1",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.type": "block",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.vdo": "0"
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             },
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "type": "block",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "vg_name": "ceph_vg1"
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:         }
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:     ],
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:     "2": [
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:         {
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "devices": [
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "/dev/loop5"
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             ],
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "lv_name": "ceph_lv2",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "lv_size": "21470642176",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "name": "ceph_lv2",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "tags": {
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.cluster_name": "ceph",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.crush_device_class": "",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.encrypted": "0",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.osd_id": "2",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.type": "block",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:                 "ceph.vdo": "0"
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             },
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "type": "block",
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:             "vg_name": "ceph_vg2"
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:         }
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]:     ]
Oct 11 02:18:36 compute-0 condescending_wescoff[421811]: }
Oct 11 02:18:36 compute-0 systemd[1]: libpod-6271ef99860826c2fc0361a7354f40f0166e0a210628e7b03886cde4b1fd5813.scope: Deactivated successfully.
Oct 11 02:18:36 compute-0 podman[421795]: 2025-10-11 02:18:36.538080429 +0000 UTC m=+1.212293789 container died 6271ef99860826c2fc0361a7354f40f0166e0a210628e7b03886cde4b1fd5813 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_wescoff, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:18:36 compute-0 systemd[1]: var-lib-containers-storage-overlay-ab67f0daad19181f71c98ed2c437f384105c150ee13c4d4b27062baefe614667-merged.mount: Deactivated successfully.
Oct 11 02:18:36 compute-0 sshd-session[421816]: Invalid user ubuntu from 121.227.153.123 port 52534
Oct 11 02:18:36 compute-0 podman[421795]: 2025-10-11 02:18:36.650465409 +0000 UTC m=+1.324678729 container remove 6271ef99860826c2fc0361a7354f40f0166e0a210628e7b03886cde4b1fd5813 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_wescoff, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 02:18:36 compute-0 systemd[1]: libpod-conmon-6271ef99860826c2fc0361a7354f40f0166e0a210628e7b03886cde4b1fd5813.scope: Deactivated successfully.
Oct 11 02:18:36 compute-0 sudo[421693]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:36 compute-0 sshd-session[421816]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:18:36 compute-0 sshd-session[421816]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:18:36 compute-0 sudo[421833]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:18:36 compute-0 sudo[421833]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:36 compute-0 sudo[421833]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:36 compute-0 sudo[421858]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:18:36 compute-0 sudo[421858]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:36 compute-0 ceph-mon[191930]: pgmap v1216: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:36 compute-0 sudo[421858]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:37 compute-0 sudo[421883]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:18:37 compute-0 sudo[421883]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:37 compute-0 sudo[421883]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:37 compute-0 sudo[421908]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:18:37 compute-0 sudo[421908]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:37 compute-0 podman[421972]: 2025-10-11 02:18:37.873495006 +0000 UTC m=+0.089880450 container create 0272d38f64e7bca4faaef41d62fad1127653a912444932674ab2815e18535ff5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eloquent_sammet, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True)
Oct 11 02:18:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1217: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:37 compute-0 podman[421972]: 2025-10-11 02:18:37.837066085 +0000 UTC m=+0.053451629 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:18:37 compute-0 systemd[1]: Started libpod-conmon-0272d38f64e7bca4faaef41d62fad1127653a912444932674ab2815e18535ff5.scope.
Oct 11 02:18:37 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:18:38 compute-0 podman[421972]: 2025-10-11 02:18:38.008068506 +0000 UTC m=+0.224453980 container init 0272d38f64e7bca4faaef41d62fad1127653a912444932674ab2815e18535ff5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eloquent_sammet, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True)
Oct 11 02:18:38 compute-0 podman[421972]: 2025-10-11 02:18:38.020073405 +0000 UTC m=+0.236458889 container start 0272d38f64e7bca4faaef41d62fad1127653a912444932674ab2815e18535ff5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eloquent_sammet, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:18:38 compute-0 podman[421972]: 2025-10-11 02:18:38.026708804 +0000 UTC m=+0.243094288 container attach 0272d38f64e7bca4faaef41d62fad1127653a912444932674ab2815e18535ff5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eloquent_sammet, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:18:38 compute-0 eloquent_sammet[421988]: 167 167
Oct 11 02:18:38 compute-0 systemd[1]: libpod-0272d38f64e7bca4faaef41d62fad1127653a912444932674ab2815e18535ff5.scope: Deactivated successfully.
Oct 11 02:18:38 compute-0 podman[421972]: 2025-10-11 02:18:38.031494825 +0000 UTC m=+0.247880269 container died 0272d38f64e7bca4faaef41d62fad1127653a912444932674ab2815e18535ff5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eloquent_sammet, OSD_FLAVOR=default, CEPH_REF=reef, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:18:38 compute-0 systemd[1]: var-lib-containers-storage-overlay-358df8335c9da55148ecd8ed9374c10e13cd4c7e19f6c289900ec7e951caa106-merged.mount: Deactivated successfully.
Oct 11 02:18:38 compute-0 podman[421972]: 2025-10-11 02:18:38.098526602 +0000 UTC m=+0.314912046 container remove 0272d38f64e7bca4faaef41d62fad1127653a912444932674ab2815e18535ff5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eloquent_sammet, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:18:38 compute-0 systemd[1]: libpod-conmon-0272d38f64e7bca4faaef41d62fad1127653a912444932674ab2815e18535ff5.scope: Deactivated successfully.
Oct 11 02:18:38 compute-0 podman[422011]: 2025-10-11 02:18:38.414062978 +0000 UTC m=+0.096430997 container create 4341fb2fc6c7e8c9af7778212dd1d7126c4b4e03fba0eeec2837f54b29707011 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_chebyshev, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=reef, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:18:38 compute-0 podman[422011]: 2025-10-11 02:18:38.378950459 +0000 UTC m=+0.061318538 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:18:38 compute-0 systemd[1]: Started libpod-conmon-4341fb2fc6c7e8c9af7778212dd1d7126c4b4e03fba0eeec2837f54b29707011.scope.
Oct 11 02:18:38 compute-0 sshd-session[421816]: Failed password for invalid user ubuntu from 121.227.153.123 port 52534 ssh2
Oct 11 02:18:38 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:18:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/785613a81c92f09c6166106cd2ef178b45ab4f2a33072be5e5097098691581c2/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:18:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/785613a81c92f09c6166106cd2ef178b45ab4f2a33072be5e5097098691581c2/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:18:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/785613a81c92f09c6166106cd2ef178b45ab4f2a33072be5e5097098691581c2/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:18:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/785613a81c92f09c6166106cd2ef178b45ab4f2a33072be5e5097098691581c2/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:18:38 compute-0 podman[422011]: 2025-10-11 02:18:38.583418636 +0000 UTC m=+0.265786715 container init 4341fb2fc6c7e8c9af7778212dd1d7126c4b4e03fba0eeec2837f54b29707011 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_chebyshev, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20250507)
Oct 11 02:18:38 compute-0 podman[422011]: 2025-10-11 02:18:38.613863297 +0000 UTC m=+0.296231316 container start 4341fb2fc6c7e8c9af7778212dd1d7126c4b4e03fba0eeec2837f54b29707011 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_chebyshev, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:18:38 compute-0 podman[422011]: 2025-10-11 02:18:38.620517868 +0000 UTC m=+0.302886007 container attach 4341fb2fc6c7e8c9af7778212dd1d7126c4b4e03fba0eeec2837f54b29707011 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_chebyshev, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True)
Oct 11 02:18:38 compute-0 ceph-mon[191930]: pgmap v1217: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]: {
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:         "osd_id": 1,
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:         "type": "bluestore"
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:     },
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:         "osd_id": 2,
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:         "type": "bluestore"
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:     },
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:         "osd_id": 0,
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:         "type": "bluestore"
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]:     }
Oct 11 02:18:39 compute-0 friendly_chebyshev[422027]: }
Oct 11 02:18:39 compute-0 systemd[1]: libpod-4341fb2fc6c7e8c9af7778212dd1d7126c4b4e03fba0eeec2837f54b29707011.scope: Deactivated successfully.
Oct 11 02:18:39 compute-0 podman[422011]: 2025-10-11 02:18:39.832184226 +0000 UTC m=+1.514552255 container died 4341fb2fc6c7e8c9af7778212dd1d7126c4b4e03fba0eeec2837f54b29707011 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_chebyshev, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:18:39 compute-0 systemd[1]: libpod-4341fb2fc6c7e8c9af7778212dd1d7126c4b4e03fba0eeec2837f54b29707011.scope: Consumed 1.227s CPU time.
Oct 11 02:18:39 compute-0 sshd-session[421816]: Connection closed by invalid user ubuntu 121.227.153.123 port 52534 [preauth]
Oct 11 02:18:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1218: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:39 compute-0 systemd[1]: var-lib-containers-storage-overlay-785613a81c92f09c6166106cd2ef178b45ab4f2a33072be5e5097098691581c2-merged.mount: Deactivated successfully.
Oct 11 02:18:39 compute-0 podman[422011]: 2025-10-11 02:18:39.964183885 +0000 UTC m=+1.646551884 container remove 4341fb2fc6c7e8c9af7778212dd1d7126c4b4e03fba0eeec2837f54b29707011 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_chebyshev, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 02:18:39 compute-0 systemd[1]: libpod-conmon-4341fb2fc6c7e8c9af7778212dd1d7126c4b4e03fba0eeec2837f54b29707011.scope: Deactivated successfully.
Oct 11 02:18:40 compute-0 sudo[421908]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:18:40 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:18:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:18:40 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:18:40 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev fa0b4c94-caed-49d7-91d5-0dd93d1978ca does not exist
Oct 11 02:18:40 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 4c2a9234-1e9f-4d44-82c5-afc2b9865f5b does not exist
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #57. Immutable memtables: 0.
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:18:40.078001) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 29] Flushing memtable with next log file: 57
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149120078079, "job": 29, "event": "flush_started", "num_memtables": 1, "num_entries": 1378, "num_deletes": 251, "total_data_size": 2130500, "memory_usage": 2161456, "flush_reason": "Manual Compaction"}
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 29] Level-0 flush table #58: started
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149120096712, "cf_name": "default", "job": 29, "event": "table_file_creation", "file_number": 58, "file_size": 2087816, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 24145, "largest_seqno": 25522, "table_properties": {"data_size": 2081287, "index_size": 3732, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1733, "raw_key_size": 13608, "raw_average_key_size": 19, "raw_value_size": 2068140, "raw_average_value_size": 3032, "num_data_blocks": 167, "num_entries": 682, "num_filter_entries": 682, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760148986, "oldest_key_time": 1760148986, "file_creation_time": 1760149120, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 58, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 29] Flush lasted 18826 microseconds, and 10863 cpu microseconds.
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:18:40.096826) [db/flush_job.cc:967] [default] [JOB 29] Level-0 flush table #58: 2087816 bytes OK
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:18:40.096861) [db/memtable_list.cc:519] [default] Level-0 commit table #58 started
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:18:40.100465) [db/memtable_list.cc:722] [default] Level-0 commit table #58: memtable #1 done
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:18:40.100489) EVENT_LOG_v1 {"time_micros": 1760149120100481, "job": 29, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:18:40.100516) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 29] Try to delete WAL files size 2124371, prev total WAL file size 2124371, number of live WAL files 2.
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000054.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:18:40.102511) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F730032303038' seq:72057594037927935, type:22 .. '7061786F730032323630' seq:0, type:0; will stop at (end)
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 30] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 29 Base level 0, inputs: [58(2038KB)], [56(6906KB)]
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149120102636, "job": 30, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [58], "files_L6": [56], "score": -1, "input_data_size": 9160182, "oldest_snapshot_seqno": -1}
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 30] Generated table #59: 4603 keys, 7426873 bytes, temperature: kUnknown
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149120156662, "cf_name": "default", "job": 30, "event": "table_file_creation", "file_number": 59, "file_size": 7426873, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 7396070, "index_size": 18213, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 11525, "raw_key_size": 115217, "raw_average_key_size": 25, "raw_value_size": 7312501, "raw_average_value_size": 1588, "num_data_blocks": 755, "num_entries": 4603, "num_filter_entries": 4603, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760149120, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 59, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:18:40.157526) [db/compaction/compaction_job.cc:1663] [default] [JOB 30] Compacted 1@0 + 1@6 files to L6 => 7426873 bytes
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:18:40.160814) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 168.2 rd, 136.3 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(2.0, 6.7 +0.0 blob) out(7.1 +0.0 blob), read-write-amplify(7.9) write-amplify(3.6) OK, records in: 5121, records dropped: 518 output_compression: NoCompression
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:18:40.160855) EVENT_LOG_v1 {"time_micros": 1760149120160836, "job": 30, "event": "compaction_finished", "compaction_time_micros": 54470, "compaction_time_cpu_micros": 42705, "output_level": 6, "num_output_files": 1, "total_output_size": 7426873, "num_input_records": 5121, "num_output_records": 4603, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000058.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149120161846, "job": 30, "event": "table_file_deletion", "file_number": 58}
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000056.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149120165563, "job": 30, "event": "table_file_deletion", "file_number": 56}
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:18:40.102195) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:18:40.165748) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:18:40.165757) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:18:40.165759) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:18:40.165761) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:18:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:18:40.165763) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:18:40 compute-0 sudo[422075]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:18:40 compute-0 sudo[422075]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:40 compute-0 sudo[422075]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e126 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:18:40 compute-0 sudo[422102]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:18:40 compute-0 sudo[422102]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:18:40 compute-0 sudo[422102]: pam_unix(sudo:session): session closed for user root
Oct 11 02:18:41 compute-0 ceph-mon[191930]: pgmap v1218: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:18:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:18:41 compute-0 sshd-session[422080]: Invalid user ubuntu from 121.227.153.123 port 35816
Oct 11 02:18:41 compute-0 sshd-session[422080]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:18:41 compute-0 sshd-session[422080]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:18:41 compute-0 podman[422129]: 2025-10-11 02:18:41.578035605 +0000 UTC m=+0.144053471 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:18:41 compute-0 podman[422127]: 2025-10-11 02:18:41.583383354 +0000 UTC m=+0.154753399 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_managed=true)
Oct 11 02:18:41 compute-0 podman[422128]: 2025-10-11 02:18:41.607127553 +0000 UTC m=+0.181053859 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, vendor=Red Hat, Inc., container_name=openstack_network_exporter, version=9.6, release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., managed_by=edpm_ansible, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, name=ubi9-minimal, io.openshift.expose-services=, architecture=x86_64, distribution-scope=public, build-date=2025-08-20T13:12:41, com.redhat.component=ubi9-minimal-container, config_id=edpm, io.buildah.version=1.33.7, maintainer=Red Hat, Inc., url=https://catalog.redhat.com/en/search?searchType=containers, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vcs-type=git)
Oct 11 02:18:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1219: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:42 compute-0 nova_compute[356901]: 2025-10-11 02:18:42.875 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "0cc56d17-ec3a-4408-bccb-91b29427379e" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:18:42 compute-0 nova_compute[356901]: 2025-10-11 02:18:42.876 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "0cc56d17-ec3a-4408-bccb-91b29427379e" acquired by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:18:42 compute-0 nova_compute[356901]: 2025-10-11 02:18:42.907 2 DEBUG nova.compute.manager [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Starting instance... _do_build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2402
Oct 11 02:18:43 compute-0 nova_compute[356901]: 2025-10-11 02:18:43.022 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:18:43 compute-0 nova_compute[356901]: 2025-10-11 02:18:43.025 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:18:43 compute-0 nova_compute[356901]: 2025-10-11 02:18:43.040 2 DEBUG nova.virt.hardware [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Require both a host and instance NUMA topology to fit instance on host. numa_fit_instance_to_host /usr/lib/python3.9/site-packages/nova/virt/hardware.py:2368
Oct 11 02:18:43 compute-0 nova_compute[356901]: 2025-10-11 02:18:43.041 2 INFO nova.compute.claims [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Claim successful on node compute-0.ctlplane.example.com
Oct 11 02:18:43 compute-0 ceph-mon[191930]: pgmap v1219: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:43 compute-0 nova_compute[356901]: 2025-10-11 02:18:43.140 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:18:43 compute-0 sshd-session[422080]: Failed password for invalid user ubuntu from 121.227.153.123 port 35816 ssh2
Oct 11 02:18:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:18:43 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2162766710' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:18:43 compute-0 nova_compute[356901]: 2025-10-11 02:18:43.640 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.500s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:18:43 compute-0 nova_compute[356901]: 2025-10-11 02:18:43.653 2 DEBUG nova.compute.provider_tree [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:18:43 compute-0 nova_compute[356901]: 2025-10-11 02:18:43.675 2 DEBUG nova.scheduler.client.report [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:18:43 compute-0 nova_compute[356901]: 2025-10-11 02:18:43.701 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: held 0.676s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:18:43 compute-0 nova_compute[356901]: 2025-10-11 02:18:43.702 2 DEBUG nova.compute.manager [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Start building networks asynchronously for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2799
Oct 11 02:18:43 compute-0 nova_compute[356901]: 2025-10-11 02:18:43.759 2 DEBUG nova.compute.manager [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Allocating IP information in the background. _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1952
Oct 11 02:18:43 compute-0 nova_compute[356901]: 2025-10-11 02:18:43.760 2 DEBUG nova.network.neutron [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] allocate_for_instance() allocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1156
Oct 11 02:18:43 compute-0 nova_compute[356901]: 2025-10-11 02:18:43.792 2 INFO nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Ignoring supplied device name: /dev/vda. Libvirt can't honour user-supplied dev names
Oct 11 02:18:43 compute-0 nova_compute[356901]: 2025-10-11 02:18:43.829 2 DEBUG nova.compute.manager [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Start building block device mappings for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2834
Oct 11 02:18:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1220: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:43 compute-0 nova_compute[356901]: 2025-10-11 02:18:43.931 2 DEBUG nova.compute.manager [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Start spawning the instance on the hypervisor. _build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2608
Oct 11 02:18:43 compute-0 nova_compute[356901]: 2025-10-11 02:18:43.935 2 DEBUG nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Creating instance directory _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4723
Oct 11 02:18:43 compute-0 nova_compute[356901]: 2025-10-11 02:18:43.936 2 INFO nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Creating image(s)
Oct 11 02:18:43 compute-0 nova_compute[356901]: 2025-10-11 02:18:43.992 2 DEBUG nova.storage.rbd_utils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 0cc56d17-ec3a-4408-bccb-91b29427379e_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:18:44 compute-0 nova_compute[356901]: 2025-10-11 02:18:44.059 2 DEBUG nova.storage.rbd_utils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 0cc56d17-ec3a-4408-bccb-91b29427379e_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:18:44 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2162766710' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:18:44 compute-0 nova_compute[356901]: 2025-10-11 02:18:44.121 2 DEBUG nova.storage.rbd_utils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 0cc56d17-ec3a-4408-bccb-91b29427379e_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:18:44 compute-0 nova_compute[356901]: 2025-10-11 02:18:44.133 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "c2a4b3f256e07592b38b9a83d173b78feaa2ba6d" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:18:44 compute-0 nova_compute[356901]: 2025-10-11 02:18:44.135 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "c2a4b3f256e07592b38b9a83d173b78feaa2ba6d" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:18:44 compute-0 sshd-session[422080]: Connection closed by invalid user ubuntu 121.227.153.123 port 35816 [preauth]
Oct 11 02:18:44 compute-0 nova_compute[356901]: 2025-10-11 02:18:44.618 2 WARNING oslo_policy.policy [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] JSON formatted policy_file support is deprecated since Victoria release. You need to use YAML format which will be default in future. You can use ``oslopolicy-convert-json-to-yaml`` tool to convert existing JSON-formatted policy file to YAML-formatted in backward compatible way: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html.
Oct 11 02:18:44 compute-0 nova_compute[356901]: 2025-10-11 02:18:44.620 2 WARNING oslo_policy.policy [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] JSON formatted policy_file support is deprecated since Victoria release. You need to use YAML format which will be default in future. You can use ``oslopolicy-convert-json-to-yaml`` tool to convert existing JSON-formatted policy file to YAML-formatted in backward compatible way: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html.
Oct 11 02:18:44 compute-0 nova_compute[356901]: 2025-10-11 02:18:44.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:18:44 compute-0 nova_compute[356901]: 2025-10-11 02:18:44.995 2 DEBUG nova.virt.libvirt.imagebackend [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Image locations are: [{'url': 'rbd://3c7617c3-7a20-523e-a9de-20c0d6ba41da/images/a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7/snap', 'metadata': {'store': 'default_backend'}}, {'url': 'rbd://3c7617c3-7a20-523e-a9de-20c0d6ba41da/images/a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7/snap', 'metadata': {}}] clone /usr/lib/python3.9/site-packages/nova/virt/libvirt/imagebackend.py:1085
Oct 11 02:18:45 compute-0 ceph-mon[191930]: pgmap v1220: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:45 compute-0 podman[422264]: 2025-10-11 02:18:45.21279604 +0000 UTC m=+0.103388746 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., architecture=x86_64, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.openshift.expose-services=, name=ubi9, release-0.7.12=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.tags=base rhel9, managed_by=edpm_ansible, config_id=edpm, summary=Provides the latest release of Red Hat Universal Base Image 9., vendor=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, version=9.4, distribution-scope=public, io.buildah.version=1.29.0, release=1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, container_name=kepler, build-date=2024-09-18T21:23:30, com.redhat.component=ubi9-container)
Oct 11 02:18:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e126 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:18:45 compute-0 nova_compute[356901]: 2025-10-11 02:18:45.345 2 DEBUG nova.network.neutron [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Successfully created port: 64dfc81b-528a-4adc-9787-66719d2f9f93 _create_port_minimal /usr/lib/python3.9/site-packages/nova/network/neutron.py:548
Oct 11 02:18:45 compute-0 sshd-session[422262]: Invalid user ubuntu from 121.227.153.123 port 35826
Oct 11 02:18:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1221: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:45 compute-0 nova_compute[356901]: 2025-10-11 02:18:45.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:18:45 compute-0 sshd-session[422262]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:18:45 compute-0 sshd-session[422262]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:18:46 compute-0 nova_compute[356901]: 2025-10-11 02:18:46.667 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d.part --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:18:46 compute-0 nova_compute[356901]: 2025-10-11 02:18:46.767 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d.part --force-share --output=json" returned: 0 in 0.099s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:18:46 compute-0 nova_compute[356901]: 2025-10-11 02:18:46.768 2 DEBUG nova.virt.images [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7 was qcow2, converting to raw fetch_to_raw /usr/lib/python3.9/site-packages/nova/virt/images.py:242
Oct 11 02:18:46 compute-0 nova_compute[356901]: 2025-10-11 02:18:46.770 2 DEBUG nova.privsep.utils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Path '/var/lib/nova/instances' supports direct I/O supports_direct_io /usr/lib/python3.9/site-packages/nova/privsep/utils.py:63
Oct 11 02:18:46 compute-0 nova_compute[356901]: 2025-10-11 02:18:46.771 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): qemu-img convert -t none -O raw -f qcow2 /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d.part /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d.converted execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:18:46 compute-0 nova_compute[356901]: 2025-10-11 02:18:46.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:18:47 compute-0 ceph-mon[191930]: pgmap v1221: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:18:47 compute-0 nova_compute[356901]: 2025-10-11 02:18:47.119 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "qemu-img convert -t none -O raw -f qcow2 /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d.part /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d.converted" returned: 0 in 0.348s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:18:47 compute-0 nova_compute[356901]: 2025-10-11 02:18:47.126 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d.converted --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:18:47 compute-0 nova_compute[356901]: 2025-10-11 02:18:47.222 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d.converted --force-share --output=json" returned: 0 in 0.096s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:18:47 compute-0 nova_compute[356901]: 2025-10-11 02:18:47.225 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "c2a4b3f256e07592b38b9a83d173b78feaa2ba6d" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 3.089s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:18:47 compute-0 nova_compute[356901]: 2025-10-11 02:18:47.277 2 DEBUG nova.storage.rbd_utils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 0cc56d17-ec3a-4408-bccb-91b29427379e_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:18:47 compute-0 nova_compute[356901]: 2025-10-11 02:18:47.290 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d 0cc56d17-ec3a-4408-bccb-91b29427379e_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:18:47 compute-0 nova_compute[356901]: 2025-10-11 02:18:47.323 2 DEBUG nova.network.neutron [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Successfully updated port: 64dfc81b-528a-4adc-9787-66719d2f9f93 _update_port /usr/lib/python3.9/site-packages/nova/network/neutron.py:586
Oct 11 02:18:47 compute-0 nova_compute[356901]: 2025-10-11 02:18:47.347 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:18:47 compute-0 nova_compute[356901]: 2025-10-11 02:18:47.348 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:18:47 compute-0 nova_compute[356901]: 2025-10-11 02:18:47.348 2 DEBUG nova.network.neutron [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Building network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2010
Oct 11 02:18:47 compute-0 nova_compute[356901]: 2025-10-11 02:18:47.666 2 DEBUG nova.network.neutron [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Instance cache missing network info. _get_preexisting_port_ids /usr/lib/python3.9/site-packages/nova/network/neutron.py:3323
Oct 11 02:18:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1222: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail; 687 KiB/s rd, 85 B/s wr, 6 op/s
Oct 11 02:18:47 compute-0 nova_compute[356901]: 2025-10-11 02:18:47.892 2 DEBUG nova.compute.manager [req-8d9c0417-ba70-48a4-abe3-991b3d7b61db req-a1e6a749-c8c5-4b84-942b-38acafcf835c 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Received event network-changed-64dfc81b-528a-4adc-9787-66719d2f9f93 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:18:47 compute-0 nova_compute[356901]: 2025-10-11 02:18:47.893 2 DEBUG nova.compute.manager [req-8d9c0417-ba70-48a4-abe3-991b3d7b61db req-a1e6a749-c8c5-4b84-942b-38acafcf835c 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Refreshing instance network info cache due to event network-changed-64dfc81b-528a-4adc-9787-66719d2f9f93. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:18:47 compute-0 nova_compute[356901]: 2025-10-11 02:18:47.893 2 DEBUG oslo_concurrency.lockutils [req-8d9c0417-ba70-48a4-abe3-991b3d7b61db req-a1e6a749-c8c5-4b84-942b-38acafcf835c 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:18:47 compute-0 sshd-session[422262]: Failed password for invalid user ubuntu from 121.227.153.123 port 35826 ssh2
Oct 11 02:18:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e126 do_prune osdmap full prune enabled
Oct 11 02:18:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e127 e127: 3 total, 3 up, 3 in
Oct 11 02:18:48 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e127: 3 total, 3 up, 3 in
Oct 11 02:18:48 compute-0 nova_compute[356901]: 2025-10-11 02:18:48.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:18:48 compute-0 nova_compute[356901]: 2025-10-11 02:18:48.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:18:48 compute-0 nova_compute[356901]: 2025-10-11 02:18:48.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:18:48 compute-0 nova_compute[356901]: 2025-10-11 02:18:48.917 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Skipping network cache update for instance because it is Building. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9871
Oct 11 02:18:48 compute-0 nova_compute[356901]: 2025-10-11 02:18:48.917 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944
Oct 11 02:18:49 compute-0 sshd-session[422262]: Connection closed by invalid user ubuntu 121.227.153.123 port 35826 [preauth]
Oct 11 02:18:49 compute-0 ceph-mon[191930]: pgmap v1222: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail; 687 KiB/s rd, 85 B/s wr, 6 op/s
Oct 11 02:18:49 compute-0 ceph-mon[191930]: osdmap e127: 3 total, 3 up, 3 in
Oct 11 02:18:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e127 do_prune osdmap full prune enabled
Oct 11 02:18:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e128 e128: 3 total, 3 up, 3 in
Oct 11 02:18:49 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e128: 3 total, 3 up, 3 in
Oct 11 02:18:49 compute-0 nova_compute[356901]: 2025-10-11 02:18:49.546 2 DEBUG nova.network.neutron [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:18:49 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 02:18:49 compute-0 nova_compute[356901]: 2025-10-11 02:18:49.577 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:18:49 compute-0 nova_compute[356901]: 2025-10-11 02:18:49.577 2 DEBUG nova.compute.manager [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Instance network_info: |[{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}]| _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1967
Oct 11 02:18:49 compute-0 nova_compute[356901]: 2025-10-11 02:18:49.578 2 DEBUG oslo_concurrency.lockutils [req-8d9c0417-ba70-48a4-abe3-991b3d7b61db req-a1e6a749-c8c5-4b84-942b-38acafcf835c 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:18:49 compute-0 nova_compute[356901]: 2025-10-11 02:18:49.578 2 DEBUG nova.network.neutron [req-8d9c0417-ba70-48a4-abe3-991b3d7b61db req-a1e6a749-c8c5-4b84-942b-38acafcf835c 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Refreshing network info cache for port 64dfc81b-528a-4adc-9787-66719d2f9f93 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:18:49 compute-0 nova_compute[356901]: 2025-10-11 02:18:49.595 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d 0cc56d17-ec3a-4408-bccb-91b29427379e_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 2.305s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:18:49 compute-0 nova_compute[356901]: 2025-10-11 02:18:49.760 2 DEBUG nova.storage.rbd_utils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] resizing rbd image 0cc56d17-ec3a-4408-bccb-91b29427379e_disk to 1073741824 resize /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:288
Oct 11 02:18:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1225: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail; 1.0 MiB/s rd, 127 B/s wr, 9 op/s
Oct 11 02:18:50 compute-0 nova_compute[356901]: 2025-10-11 02:18:50.070 2 DEBUG nova.objects.instance [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lazy-loading 'migration_context' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:18:50 compute-0 nova_compute[356901]: 2025-10-11 02:18:50.136 2 DEBUG nova.storage.rbd_utils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 0cc56d17-ec3a-4408-bccb-91b29427379e_disk.eph0 does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:18:50 compute-0 ceph-mon[191930]: osdmap e128: 3 total, 3 up, 3 in
Oct 11 02:18:50 compute-0 nova_compute[356901]: 2025-10-11 02:18:50.210 2 DEBUG nova.storage.rbd_utils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 0cc56d17-ec3a-4408-bccb-91b29427379e_disk.eph0 does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:18:50 compute-0 nova_compute[356901]: 2025-10-11 02:18:50.219 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "ephemeral_1_0706d66" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:18:50 compute-0 nova_compute[356901]: 2025-10-11 02:18:50.221 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "ephemeral_1_0706d66" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:18:50 compute-0 nova_compute[356901]: 2025-10-11 02:18:50.222 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): env LC_ALL=C LANG=C qemu-img create -f raw /var/lib/nova/instances/_base/ephemeral_1_0706d66 1G execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:18:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e128 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:18:50 compute-0 nova_compute[356901]: 2025-10-11 02:18:50.269 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "env LC_ALL=C LANG=C qemu-img create -f raw /var/lib/nova/instances/_base/ephemeral_1_0706d66 1G" returned: 0 in 0.047s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:18:50 compute-0 nova_compute[356901]: 2025-10-11 02:18:50.271 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): mkfs -t vfat -n ephemeral0 /var/lib/nova/instances/_base/ephemeral_1_0706d66 execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:18:50 compute-0 nova_compute[356901]: 2025-10-11 02:18:50.340 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "mkfs -t vfat -n ephemeral0 /var/lib/nova/instances/_base/ephemeral_1_0706d66" returned: 0 in 0.070s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:18:50 compute-0 nova_compute[356901]: 2025-10-11 02:18:50.341 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "ephemeral_1_0706d66" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 0.121s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:18:50 compute-0 sshd-session[422335]: Invalid user ubuntu from 121.227.153.123 port 53156
Oct 11 02:18:50 compute-0 nova_compute[356901]: 2025-10-11 02:18:50.383 2 DEBUG nova.storage.rbd_utils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 0cc56d17-ec3a-4408-bccb-91b29427379e_disk.eph0 does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:18:50 compute-0 nova_compute[356901]: 2025-10-11 02:18:50.394 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/ephemeral_1_0706d66 0cc56d17-ec3a-4408-bccb-91b29427379e_disk.eph0 --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:18:50 compute-0 sshd-session[422335]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:18:50 compute-0 sshd-session[422335]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.053 2 DEBUG nova.network.neutron [req-8d9c0417-ba70-48a4-abe3-991b3d7b61db req-a1e6a749-c8c5-4b84-942b-38acafcf835c 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated VIF entry in instance network info cache for port 64dfc81b-528a-4adc-9787-66719d2f9f93. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.054 2 DEBUG nova.network.neutron [req-8d9c0417-ba70-48a4-abe3-991b3d7b61db req-a1e6a749-c8c5-4b84-942b-38acafcf835c 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.071 2 DEBUG oslo_concurrency.lockutils [req-8d9c0417-ba70-48a4-abe3-991b3d7b61db req-a1e6a749-c8c5-4b84-942b-38acafcf835c 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:18:51 compute-0 ceph-mon[191930]: pgmap v1225: 321 pgs: 321 active+clean; 16 MiB data, 164 MiB used, 60 GiB / 60 GiB avail; 1.0 MiB/s rd, 127 B/s wr, 9 op/s
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.559 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/ephemeral_1_0706d66 0cc56d17-ec3a-4408-bccb-91b29427379e_disk.eph0 --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 1.165s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.802 2 DEBUG nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Created local disks _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4857
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.803 2 DEBUG nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Ensure instance console log exists: /var/lib/nova/instances/0cc56d17-ec3a-4408-bccb-91b29427379e/console.log _ensure_console_log_for_instance /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4609
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.804 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "vgpu_resources" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.805 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "vgpu_resources" acquired by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.805 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "vgpu_resources" "released" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.809 2 DEBUG nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Start _get_guest_xml network_info=[{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] disk_info={'disk_bus': 'virtio', 'cdrom_bus': 'sata', 'mapping': {'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'disk.config': {'bus': 'sata', 'dev': 'sda', 'type': 'cdrom'}}} image_meta=ImageMeta(checksum='b874c39491a2377b8490f5f1e89761a4',container_format='bare',created_at=2025-10-11T02:17:33Z,direct_url=<?>,disk_format='qcow2',id=a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7,min_disk=0,min_ram=0,name='cirros',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=16300544,status='active',tags=<?>,updated_at=2025-10-11T02:17:37Z,virtual_size=<?>,visibility=<?>) rescue=None block_device_info={'root_device_name': '/dev/vda', 'image': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'boot_index': 0, 'device_name': '/dev/vda', 'size': 0, 'encryption_format': None, 'image_id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}], 'ephemerals': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'device_name': '/dev/vdb', 'size': 1, 'encryption_format': None}], 'block_device_mapping': [], 'swap': None} _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7549
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.819 2 WARNING nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.829 2 DEBUG nova.virt.libvirt.host [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V1... _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1653
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.830 2 DEBUG nova.virt.libvirt.host [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CPU controller missing on host. _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1663
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.836 2 DEBUG nova.virt.libvirt.host [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V2... _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1672
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.837 2 DEBUG nova.virt.libvirt.host [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CPU controller found on host. _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1679
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.838 2 DEBUG nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CPU mode 'host-model' models '' was chosen, with extra flags: '' _get_guest_cpu_model_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:5396
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.838 2 DEBUG nova.virt.hardware [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Getting desirable topologies for flavor Flavor(created_at=2025-10-11T02:17:41Z,deleted=False,deleted_at=None,description=None,disabled=False,ephemeral_gb=1,extra_specs={},flavorid='486e1451-345c-45d6-b075-f4717e759025',id=1,is_public=True,memory_mb=512,name='m1.small',projects=<?>,root_gb=1,rxtx_factor=1.0,swap=0,updated_at=None,vcpu_weight=0,vcpus=1) and image_meta ImageMeta(checksum='b874c39491a2377b8490f5f1e89761a4',container_format='bare',created_at=2025-10-11T02:17:33Z,direct_url=<?>,disk_format='qcow2',id=a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7,min_disk=0,min_ram=0,name='cirros',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=16300544,status='active',tags=<?>,updated_at=2025-10-11T02:17:37Z,virtual_size=<?>,visibility=<?>), allow threads: True _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:563
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.840 2 DEBUG nova.virt.hardware [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Flavor limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:348
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.840 2 DEBUG nova.virt.hardware [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Image limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:352
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.841 2 DEBUG nova.virt.hardware [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Flavor pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:388
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.841 2 DEBUG nova.virt.hardware [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Image pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:392
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.842 2 DEBUG nova.virt.hardware [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Chose sockets=0, cores=0, threads=0; limits were sockets=65536, cores=65536, threads=65536 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:430
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.842 2 DEBUG nova.virt.hardware [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Topology preferred VirtCPUTopology(cores=0,sockets=0,threads=0), maximum VirtCPUTopology(cores=65536,sockets=65536,threads=65536) _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:569
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.842 2 DEBUG nova.virt.hardware [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Build topologies for 1 vcpu(s) 1:1:1 _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:471
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.843 2 DEBUG nova.virt.hardware [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Got 1 possible topologies _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:501
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.843 2 DEBUG nova.virt.hardware [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Possible topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:575
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.844 2 DEBUG nova.virt.hardware [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Sorted desired topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:577
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.848 2 DEBUG nova.privsep.utils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Path '/var/lib/nova/instances' supports direct I/O supports_direct_io /usr/lib/python3.9/site-packages/nova/privsep/utils.py:63
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.849 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:18:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1226: 321 pgs: 321 active+clean; 20 MiB data, 165 MiB used, 60 GiB / 60 GiB avail; 2.0 MiB/s rd, 143 KiB/s wr, 27 op/s
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.898 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.931 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.931 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.932 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.932 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:18:51 compute-0 nova_compute[356901]: 2025-10-11 02:18:51.933 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:18:52 compute-0 sshd-session[422335]: Failed password for invalid user ubuntu from 121.227.153.123 port 53156 ssh2
Oct 11 02:18:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:18:52 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3451861494' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:18:52 compute-0 nova_compute[356901]: 2025-10-11 02:18:52.353 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.503s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:18:52 compute-0 nova_compute[356901]: 2025-10-11 02:18:52.354 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:18:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:18:52 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2353894882' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:18:52 compute-0 nova_compute[356901]: 2025-10-11 02:18:52.452 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.519s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:18:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:18:52 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2818591901' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:18:52 compute-0 nova_compute[356901]: 2025-10-11 02:18:52.863 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.509s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:18:52 compute-0 nova_compute[356901]: 2025-10-11 02:18:52.906 2 DEBUG nova.storage.rbd_utils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 0cc56d17-ec3a-4408-bccb-91b29427379e_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:18:52 compute-0 nova_compute[356901]: 2025-10-11 02:18:52.915 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.038 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.041 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=4547MB free_disk=59.98719024658203GB free_vcpus=8 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.041 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.041 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.126 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.127 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 1 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.127 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1024MB phys_disk=59GB used_disk=2GB total_vcpus=8 used_vcpus=1 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.160 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:18:53 compute-0 ceph-mon[191930]: pgmap v1226: 321 pgs: 321 active+clean; 20 MiB data, 165 MiB used, 60 GiB / 60 GiB avail; 2.0 MiB/s rd, 143 KiB/s wr, 27 op/s
Oct 11 02:18:53 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3451861494' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:18:53 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2353894882' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:18:53 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2818591901' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:18:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:18:53 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/33816056' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.436 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.521s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.440 2 DEBUG nova.virt.libvirt.vif [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:18:39Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description='test_0',display_name='test_0',ec2_ids=EC2Ids,ephemeral_gb=1,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(1),hidden=False,host='compute-0.ctlplane.example.com',hostname='test-0',id=1,image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',info_cache=InstanceInfoCache,instance_type_id=1,kernel_id='',key_data=None,key_name=None,keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=512,metadata={},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='97026531b3404a11869cb85a059c4a0d',ramdisk_id='',reservation_id='r-yofl639s',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,admin,reader',image_base_image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_min_disk='1',image_min_ram='0',image_owner_specified.openstack.md5='',image_owner_specified.openstack.object='images/cirros',image_owner_specified.openstack.sha256='',network_allocated='True',owner_project_name='admin',owner_user_name='admin'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:18:43Z,user_data=None,user_id='d215f3ebbc07435493ccd666fc80109d',uuid=0cc56d17-ec3a-4408-bccb-91b29427379e,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} virt_type=kvm get_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:563
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.440 2 DEBUG nova.network.os_vif_util [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converting VIF {"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.443 2 DEBUG nova.network.os_vif_util [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:cc:aa:96,bridge_name='br-int',has_traffic_filtering=True,id=64dfc81b-528a-4adc-9787-66719d2f9f93,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap64dfc81b-52') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.448 2 DEBUG nova.objects.instance [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lazy-loading 'pci_devices' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.467 2 DEBUG nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] End _get_guest_xml xml=<domain type="kvm">
Oct 11 02:18:53 compute-0 nova_compute[356901]:   <uuid>0cc56d17-ec3a-4408-bccb-91b29427379e</uuid>
Oct 11 02:18:53 compute-0 nova_compute[356901]:   <name>instance-00000001</name>
Oct 11 02:18:53 compute-0 nova_compute[356901]:   <memory>524288</memory>
Oct 11 02:18:53 compute-0 nova_compute[356901]:   <vcpu>1</vcpu>
Oct 11 02:18:53 compute-0 nova_compute[356901]:   <metadata>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.1">
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <nova:package version="27.5.2-0.20250829104910.6f8decf.el9"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <nova:name>test_0</nova:name>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <nova:creationTime>2025-10-11 02:18:51</nova:creationTime>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <nova:flavor name="m1.small">
Oct 11 02:18:53 compute-0 nova_compute[356901]:         <nova:memory>512</nova:memory>
Oct 11 02:18:53 compute-0 nova_compute[356901]:         <nova:disk>1</nova:disk>
Oct 11 02:18:53 compute-0 nova_compute[356901]:         <nova:swap>0</nova:swap>
Oct 11 02:18:53 compute-0 nova_compute[356901]:         <nova:ephemeral>1</nova:ephemeral>
Oct 11 02:18:53 compute-0 nova_compute[356901]:         <nova:vcpus>1</nova:vcpus>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       </nova:flavor>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <nova:owner>
Oct 11 02:18:53 compute-0 nova_compute[356901]:         <nova:user uuid="d215f3ebbc07435493ccd666fc80109d">admin</nova:user>
Oct 11 02:18:53 compute-0 nova_compute[356901]:         <nova:project uuid="97026531b3404a11869cb85a059c4a0d">admin</nova:project>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       </nova:owner>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <nova:root type="image" uuid="a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <nova:ports>
Oct 11 02:18:53 compute-0 nova_compute[356901]:         <nova:port uuid="64dfc81b-528a-4adc-9787-66719d2f9f93">
Oct 11 02:18:53 compute-0 nova_compute[356901]:           <nova:ip type="fixed" address="192.168.0.236" ipVersion="4"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:         </nova:port>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       </nova:ports>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     </nova:instance>
Oct 11 02:18:53 compute-0 nova_compute[356901]:   </metadata>
Oct 11 02:18:53 compute-0 nova_compute[356901]:   <sysinfo type="smbios">
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <system>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <entry name="manufacturer">RDO</entry>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <entry name="product">OpenStack Compute</entry>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <entry name="version">27.5.2-0.20250829104910.6f8decf.el9</entry>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <entry name="serial">0cc56d17-ec3a-4408-bccb-91b29427379e</entry>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <entry name="uuid">0cc56d17-ec3a-4408-bccb-91b29427379e</entry>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <entry name="family">Virtual Machine</entry>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     </system>
Oct 11 02:18:53 compute-0 nova_compute[356901]:   </sysinfo>
Oct 11 02:18:53 compute-0 nova_compute[356901]:   <os>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <type arch="x86_64" machine="q35">hvm</type>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <boot dev="hd"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <smbios mode="sysinfo"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:   </os>
Oct 11 02:18:53 compute-0 nova_compute[356901]:   <features>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <acpi/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <apic/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <vmcoreinfo/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:   </features>
Oct 11 02:18:53 compute-0 nova_compute[356901]:   <clock offset="utc">
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <timer name="pit" tickpolicy="delay"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <timer name="rtc" tickpolicy="catchup"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <timer name="hpet" present="no"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:   </clock>
Oct 11 02:18:53 compute-0 nova_compute[356901]:   <cpu mode="host-model" match="exact">
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <topology sockets="1" cores="1" threads="1"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:18:53 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/0cc56d17-ec3a-4408-bccb-91b29427379e_disk">
Oct 11 02:18:53 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       </source>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:18:53 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <target dev="vda" bus="virtio"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/0cc56d17-ec3a-4408-bccb-91b29427379e_disk.eph0">
Oct 11 02:18:53 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       </source>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:18:53 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <target dev="vdb" bus="virtio"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <disk type="network" device="cdrom">
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/0cc56d17-ec3a-4408-bccb-91b29427379e_disk.config">
Oct 11 02:18:53 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       </source>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:18:53 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <target dev="sda" bus="sata"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <interface type="ethernet">
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <mac address="fa:16:3e:cc:aa:96"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <driver name="vhost" rx_queue_size="512"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <mtu size="1442"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <target dev="tap64dfc81b-52"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     </interface>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <serial type="pty">
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <log file="/var/lib/nova/instances/0cc56d17-ec3a-4408-bccb-91b29427379e/console.log" append="off"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     </serial>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <graphics type="vnc" autoport="yes" listen="::0"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <video>
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     </video>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <input type="tablet" bus="usb"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <rng model="virtio">
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <backend model="random">/dev/urandom</backend>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <controller type="usb" index="0"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     <memballoon model="virtio">
Oct 11 02:18:53 compute-0 nova_compute[356901]:       <stats period="10"/>
Oct 11 02:18:53 compute-0 nova_compute[356901]:     </memballoon>
Oct 11 02:18:53 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:18:53 compute-0 nova_compute[356901]: </domain>
Oct 11 02:18:53 compute-0 nova_compute[356901]:  _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7555
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.487 2 DEBUG nova.compute.manager [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Preparing to wait for external event network-vif-plugged-64dfc81b-528a-4adc-9787-66719d2f9f93 prepare_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:283
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.487 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "0cc56d17-ec3a-4408-bccb-91b29427379e-events" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.488 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "0cc56d17-ec3a-4408-bccb-91b29427379e-events" acquired by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.488 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "0cc56d17-ec3a-4408-bccb-91b29427379e-events" "released" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.489 2 DEBUG nova.virt.libvirt.vif [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:18:39Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description='test_0',display_name='test_0',ec2_ids=EC2Ids,ephemeral_gb=1,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(1),hidden=False,host='compute-0.ctlplane.example.com',hostname='test-0',id=1,image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',info_cache=InstanceInfoCache,instance_type_id=1,kernel_id='',key_data=None,key_name=None,keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=512,metadata={},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=PciDeviceList,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='97026531b3404a11869cb85a059c4a0d',ramdisk_id='',reservation_id='r-yofl639s',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,admin,reader',image_base_image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_min_disk='1',image_min_ram='0',image_owner_specified.openstack.md5='',image_owner_specified.openstack.object='images/cirros',image_owner_specified.openstack.sha256='',network_allocated='True',owner_project_name='admin',owner_user_name='admin'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:18:43Z,user_data=None,user_id='d215f3ebbc07435493ccd666fc80109d',uuid=0cc56d17-ec3a-4408-bccb-91b29427379e,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} plug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:710
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.490 2 DEBUG nova.network.os_vif_util [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converting VIF {"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.491 2 DEBUG nova.network.os_vif_util [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:cc:aa:96,bridge_name='br-int',has_traffic_filtering=True,id=64dfc81b-528a-4adc-9787-66719d2f9f93,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap64dfc81b-52') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.491 2 DEBUG os_vif [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Plugging vif VIFOpenVSwitch(active=False,address=fa:16:3e:cc:aa:96,bridge_name='br-int',has_traffic_filtering=True,id=64dfc81b-528a-4adc-9787-66719d2f9f93,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap64dfc81b-52') plug /usr/lib/python3.9/site-packages/os_vif/__init__.py:76
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.547 2 DEBUG ovsdbapp.backend.ovs_idl [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Created schema index Interface.name autocreate_indices /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/__init__.py:106
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.548 2 DEBUG ovsdbapp.backend.ovs_idl [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Created schema index Port.name autocreate_indices /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/__init__.py:106
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.548 2 DEBUG ovsdbapp.backend.ovs_idl [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Created schema index Bridge.name autocreate_indices /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/__init__.py:106
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.549 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] tcp:127.0.0.1:6640: entering CONNECTING _transition /usr/lib64/python3.9/site-packages/ovs/reconnect.py:519
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.550 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [POLLOUT] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.550 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] tcp:127.0.0.1:6640: entering ACTIVE _transition /usr/lib64/python3.9/site-packages/ovs/reconnect.py:519
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.552 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.554 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.557 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.567 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.568 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddBridgeCommand(_result=None, name=br-int, may_exist=True, datapath_type=system) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.568 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.570 2 INFO oslo.privsep.daemon [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running privsep helper: ['sudo', 'nova-rootwrap', '/etc/nova/rootwrap.conf', 'privsep-helper', '--config-file', '/etc/nova/nova.conf', '--config-file', '/etc/nova/nova-compute.conf', '--config-dir', '/etc/nova/nova.conf.d', '--privsep_context', 'vif_plug_ovs.privsep.vif_plug', '--privsep_sock_path', '/tmp/tmpdxn9_pua/privsep.sock']
Oct 11 02:18:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:18:53 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2528986065' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.664 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.504s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.678 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating inventory in ProviderTree for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 with inventory: {'MEMORY_MB': {'total': 7680, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0, 'reserved': 512}, 'VCPU': {'total': 8, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0, 'reserved': 0}, 'DISK_GB': {'total': 59, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9, 'reserved': 1}} update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:176
Oct 11 02:18:53 compute-0 sshd-session[422335]: Connection closed by invalid user ubuntu 121.227.153.123 port 53156 [preauth]
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.759 2 ERROR nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [req-de77501b-0ab7-4eb8-87ce-df008661e460] Failed to update inventory to [{'MEMORY_MB': {'total': 7680, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0, 'reserved': 512}, 'VCPU': {'total': 8, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0, 'reserved': 0}, 'DISK_GB': {'total': 59, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9, 'reserved': 1}}] for resource provider with UUID 256b11da-7f71-42c0-941c-ea1e909a35f8.  Got 409: {"errors": [{"status": 409, "title": "Conflict", "detail": "There was a conflict when trying to complete your request.\n\n resource provider generation conflict  ", "code": "placement.concurrent_update", "request_id": "req-de77501b-0ab7-4eb8-87ce-df008661e460"}]}
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.780 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing inventories for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:804
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.800 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating ProviderTree inventory for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 from _refresh_and_get_inventory using data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} _refresh_and_get_inventory /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:768
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.800 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating inventory in ProviderTree for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 with inventory: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 0, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:176
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.822 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing aggregate associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, aggregates: None _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:813
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.862 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing trait associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, traits: COMPUTE_VOLUME_EXTEND,COMPUTE_NET_VIF_MODEL_VMXNET3,HW_CPU_X86_SSSE3,COMPUTE_RESCUE_BFV,COMPUTE_SOCKET_PCI_NUMA_AFFINITY,COMPUTE_NODE,HW_CPU_X86_SVM,COMPUTE_STORAGE_BUS_SCSI,HW_CPU_X86_FMA3,COMPUTE_GRAPHICS_MODEL_NONE,COMPUTE_NET_VIF_MODEL_RTL8139,HW_CPU_X86_SSE4A,COMPUTE_IMAGE_TYPE_QCOW2,HW_CPU_X86_BMI2,HW_CPU_X86_SSE42,HW_CPU_X86_AVX2,COMPUTE_IMAGE_TYPE_RAW,COMPUTE_VIOMMU_MODEL_VIRTIO,HW_CPU_X86_AESNI,COMPUTE_STORAGE_BUS_FDC,COMPUTE_GRAPHICS_MODEL_VIRTIO,HW_CPU_X86_AMD_SVM,COMPUTE_NET_VIF_MODEL_NE2K_PCI,COMPUTE_ACCELERATORS,HW_CPU_X86_SSE2,COMPUTE_GRAPHICS_MODEL_VGA,HW_CPU_X86_ABM,HW_CPU_X86_AVX,COMPUTE_NET_VIF_MODEL_E1000,COMPUTE_STORAGE_BUS_USB,COMPUTE_NET_ATTACH_INTERFACE,HW_CPU_X86_MMX,COMPUTE_SECURITY_TPM_2_0,COMPUTE_IMAGE_TYPE_ISO,HW_CPU_X86_SSE41,COMPUTE_IMAGE_TYPE_AKI,COMPUTE_IMAGE_TYPE_AMI,COMPUTE_NET_ATTACH_INTERFACE_WITH_TAG,COMPUTE_DEVICE_TAGGING,COMPUTE_SECURITY_UEFI_SECURE_BOOT,COMPUTE_TRUSTED_CERTS,COMPUTE_NET_VIF_MODEL_VIRTIO,COMPUTE_VIOMMU_MODEL_INTEL,COMPUTE_STORAGE_BUS_SATA,HW_CPU_X86_SSE,COMPUTE_STORAGE_BUS_VIRTIO,COMPUTE_NET_VIF_MODEL_PCNET,COMPUTE_GRAPHICS_MODEL_CIRRUS,HW_CPU_X86_SHA,HW_CPU_X86_BMI,COMPUTE_NET_VIF_MODEL_E1000E,COMPUTE_NET_VIF_MODEL_SPAPR_VLAN,COMPUTE_VOLUME_ATTACH_WITH_TAG,COMPUTE_GRAPHICS_MODEL_BOCHS,COMPUTE_VIOMMU_MODEL_AUTO,COMPUTE_IMAGE_TYPE_ARI,HW_CPU_X86_CLMUL,COMPUTE_STORAGE_BUS_IDE,COMPUTE_VOLUME_MULTI_ATTACH,HW_CPU_X86_F16C,COMPUTE_SECURITY_TPM_1_2 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:825
Oct 11 02:18:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1227: 321 pgs: 321 active+clean; 49 MiB data, 180 MiB used, 60 GiB / 60 GiB avail; 2.0 MiB/s rd, 2.0 MiB/s wr, 64 op/s
Oct 11 02:18:53 compute-0 nova_compute[356901]: 2025-10-11 02:18:53.919 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.187 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:18:54 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/33816056' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:18:54 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2528986065' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.363 2 INFO oslo.privsep.daemon [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Spawned new privsep daemon via rootwrap
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.215 1091 INFO oslo.privsep.daemon [-] privsep daemon starting
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.224 1091 INFO oslo.privsep.daemon [-] privsep process running with uid/gid: 0/0
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.228 1091 INFO oslo.privsep.daemon [-] privsep process running with capabilities (eff/prm/inh): CAP_DAC_OVERRIDE|CAP_NET_ADMIN/CAP_DAC_OVERRIDE|CAP_NET_ADMIN/none
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.229 1091 INFO oslo.privsep.daemon [-] privsep daemon running as pid 1091
Oct 11 02:18:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:18:54 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3970001872' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.446 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.527s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.459 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating inventory in ProviderTree for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 with inventory: {'MEMORY_MB': {'total': 7680, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0, 'reserved': 512}, 'VCPU': {'total': 8, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0, 'reserved': 0}, 'DISK_GB': {'total': 59, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9, 'reserved': 1}} update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:176
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.515 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updated inventory for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 with generation 3 in Placement from set_inventory_for_provider using data: {'MEMORY_MB': {'total': 7680, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0, 'reserved': 512}, 'VCPU': {'total': 8, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0, 'reserved': 0}, 'DISK_GB': {'total': 59, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9, 'reserved': 1}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:957
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.516 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8 generation from 3 to 4 during operation: update_inventory _update_generation /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:164
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.516 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating inventory in ProviderTree for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 with inventory: {'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:176
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.543 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.543 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 1.502s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.778 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.779 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tap64dfc81b-52, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.780 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbSetCommand(_result=None, table=Interface, record=tap64dfc81b-52, col_values=(('external_ids', {'iface-id': '64dfc81b-528a-4adc-9787-66719d2f9f93', 'iface-status': 'active', 'attached-mac': 'fa:16:3e:cc:aa:96', 'vm-uuid': '0cc56d17-ec3a-4408-bccb-91b29427379e'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.784 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:18:54 compute-0 NetworkManager[44908]: <info>  [1760149134.7879] manager: (tap64dfc81b-52): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/23)
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.788 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.799 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.801 2 INFO os_vif [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Successfully plugged vif VIFOpenVSwitch(active=False,address=fa:16:3e:cc:aa:96,bridge_name='br-int',has_traffic_filtering=True,id=64dfc81b-528a-4adc-9787-66719d2f9f93,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap64dfc81b-52')
Oct 11 02:18:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:54.842 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:18:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:54.843 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:18:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:54.843 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:18:54 compute-0 sshd-session[422675]: Invalid user ubuntu from 121.227.153.123 port 53164
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.891 2 DEBUG nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] No BDM found with device name vda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.892 2 DEBUG nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] No BDM found with device name vdb, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.892 2 DEBUG nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] No BDM found with device name sda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.893 2 DEBUG nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] No VIF found with MAC fa:16:3e:cc:aa:96, not building metadata _build_interface_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12092
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.894 2 INFO nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Using config drive
Oct 11 02:18:54 compute-0 nova_compute[356901]: 2025-10-11 02:18:54.950 2 DEBUG nova.storage.rbd_utils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 0cc56d17-ec3a-4408-bccb-91b29427379e_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:18:54 compute-0 podman[422705]: 2025-10-11 02:18:54.988936748 +0000 UTC m=+0.102028553 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:18:55 compute-0 podman[422707]: 2025-10-11 02:18:55.025816353 +0000 UTC m=+0.127296392 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_compute, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251007)
Oct 11 02:18:55 compute-0 podman[422708]: 2025-10-11 02:18:55.057644908 +0000 UTC m=+0.143980998 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, org.label-schema.license=GPLv2, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_managed=true, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:18:55 compute-0 podman[422706]: 2025-10-11 02:18:55.079614692 +0000 UTC m=+0.183439185 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, io.buildah.version=1.41.3, managed_by=edpm_ansible, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Oct 11 02:18:55 compute-0 sshd-session[422675]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:18:55 compute-0 sshd-session[422675]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:18:55 compute-0 ceph-mon[191930]: pgmap v1227: 321 pgs: 321 active+clean; 49 MiB data, 180 MiB used, 60 GiB / 60 GiB avail; 2.0 MiB/s rd, 2.0 MiB/s wr, 64 op/s
Oct 11 02:18:55 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3970001872' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:18:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e128 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:18:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e128 do_prune osdmap full prune enabled
Oct 11 02:18:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 e129: 3 total, 3 up, 3 in
Oct 11 02:18:55 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e129: 3 total, 3 up, 3 in
Oct 11 02:18:55 compute-0 nova_compute[356901]: 2025-10-11 02:18:55.546 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:18:55 compute-0 nova_compute[356901]: 2025-10-11 02:18:55.857 2 INFO nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Creating config drive at /var/lib/nova/instances/0cc56d17-ec3a-4408-bccb-91b29427379e/disk.config
Oct 11 02:18:55 compute-0 nova_compute[356901]: 2025-10-11 02:18:55.866 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): /usr/bin/mkisofs -o /var/lib/nova/instances/0cc56d17-ec3a-4408-bccb-91b29427379e/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmp2s1wekyr execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:18:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1229: 321 pgs: 321 active+clean; 49 MiB data, 180 MiB used, 60 GiB / 60 GiB avail; 1.0 MiB/s rd, 2.1 MiB/s wr, 58 op/s
Oct 11 02:18:56 compute-0 nova_compute[356901]: 2025-10-11 02:18:56.032 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "/usr/bin/mkisofs -o /var/lib/nova/instances/0cc56d17-ec3a-4408-bccb-91b29427379e/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmp2s1wekyr" returned: 0 in 0.166s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:18:56 compute-0 nova_compute[356901]: 2025-10-11 02:18:56.102 2 DEBUG nova.storage.rbd_utils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 0cc56d17-ec3a-4408-bccb-91b29427379e_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:18:56 compute-0 nova_compute[356901]: 2025-10-11 02:18:56.116 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/0cc56d17-ec3a-4408-bccb-91b29427379e/disk.config 0cc56d17-ec3a-4408-bccb-91b29427379e_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:18:56 compute-0 ceph-mon[191930]: osdmap e129: 3 total, 3 up, 3 in
Oct 11 02:18:56 compute-0 ceph-mon[191930]: pgmap v1229: 321 pgs: 321 active+clean; 49 MiB data, 180 MiB used, 60 GiB / 60 GiB avail; 1.0 MiB/s rd, 2.1 MiB/s wr, 58 op/s
Oct 11 02:18:56 compute-0 nova_compute[356901]: 2025-10-11 02:18:56.436 2 DEBUG oslo_concurrency.processutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/0cc56d17-ec3a-4408-bccb-91b29427379e/disk.config 0cc56d17-ec3a-4408-bccb-91b29427379e_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.320s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:18:56 compute-0 nova_compute[356901]: 2025-10-11 02:18:56.437 2 INFO nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Deleting local config drive /var/lib/nova/instances/0cc56d17-ec3a-4408-bccb-91b29427379e/disk.config because it was imported into RBD.
Oct 11 02:18:56 compute-0 systemd[1]: Starting libvirt secret daemon...
Oct 11 02:18:56 compute-0 systemd[1]: Started libvirt secret daemon.
Oct 11 02:18:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:18:56
Oct 11 02:18:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:18:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:18:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['volumes', 'default.rgw.meta', 'default.rgw.log', 'default.rgw.control', 'images', '.rgw.root', 'cephfs.cephfs.meta', 'cephfs.cephfs.data', 'vms', 'backups', '.mgr']
Oct 11 02:18:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:18:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:18:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:18:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:18:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:18:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:18:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:18:56 compute-0 kernel: tun: Universal TUN/TAP device driver, 1.6
Oct 11 02:18:56 compute-0 kernel: tap64dfc81b-52: entered promiscuous mode
Oct 11 02:18:56 compute-0 NetworkManager[44908]: <info>  [1760149136.6423] manager: (tap64dfc81b-52): new Tun device (/org/freedesktop/NetworkManager/Devices/24)
Oct 11 02:18:56 compute-0 nova_compute[356901]: 2025-10-11 02:18:56.644 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:18:56 compute-0 ovn_controller[88370]: 2025-10-11T02:18:56Z|00027|binding|INFO|Claiming lport 64dfc81b-528a-4adc-9787-66719d2f9f93 for this chassis.
Oct 11 02:18:56 compute-0 ovn_controller[88370]: 2025-10-11T02:18:56Z|00028|binding|INFO|64dfc81b-528a-4adc-9787-66719d2f9f93: Claiming fa:16:3e:cc:aa:96 192.168.0.236
Oct 11 02:18:56 compute-0 nova_compute[356901]: 2025-10-11 02:18:56.662 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:18:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:56.673 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:cc:aa:96 192.168.0.236'], port_security=['fa:16:3e:cc:aa:96 192.168.0.236'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '192.168.0.236/24', 'neutron:device_id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': '97026531b3404a11869cb85a059c4a0d', 'neutron:revision_number': '2', 'neutron:security_group_ids': 'c0c90d87-d29f-4e96-98a1-ffb301424ea4', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=d19b0dd1-1656-436b-911a-8f2dcc98f6bf, chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], tunnel_key=3, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=64dfc81b-528a-4adc-9787-66719d2f9f93) old=Port_Binding(chassis=[]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:18:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:56.677 286362 INFO neutron.agent.ovn.metadata.agent [-] Port 64dfc81b-528a-4adc-9787-66719d2f9f93 in datapath d4dded16-3268-4cf9-bb6b-aa5200d5e4ec bound to our chassis
Oct 11 02:18:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:56.682 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network d4dded16-3268-4cf9-bb6b-aa5200d5e4ec
Oct 11 02:18:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:56.687 286362 INFO oslo.privsep.daemon [-] Running privsep helper: ['sudo', 'neutron-rootwrap', '/etc/neutron/rootwrap.conf', 'privsep-helper', '--config-file', '/etc/neutron/neutron.conf', '--config-dir', '/etc/neutron.conf.d', '--privsep_context', 'neutron.privileged.default', '--privsep_sock_path', '/tmp/tmpodrzhc3m/privsep.sock']
Oct 11 02:18:56 compute-0 systemd-udevd[422878]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:18:56 compute-0 NetworkManager[44908]: <info>  [1760149136.7491] device (tap64dfc81b-52): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 02:18:56 compute-0 NetworkManager[44908]: <info>  [1760149136.7576] device (tap64dfc81b-52): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Oct 11 02:18:56 compute-0 systemd-machined[137586]: New machine qemu-1-instance-00000001.
Oct 11 02:18:56 compute-0 nova_compute[356901]: 2025-10-11 02:18:56.775 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:18:56 compute-0 systemd[1]: Started Virtual Machine qemu-1-instance-00000001.
Oct 11 02:18:56 compute-0 ovn_controller[88370]: 2025-10-11T02:18:56Z|00029|binding|INFO|Setting lport 64dfc81b-528a-4adc-9787-66719d2f9f93 ovn-installed in OVS
Oct 11 02:18:56 compute-0 ovn_controller[88370]: 2025-10-11T02:18:56Z|00030|binding|INFO|Setting lport 64dfc81b-528a-4adc-9787-66719d2f9f93 up in Southbound
Oct 11 02:18:56 compute-0 nova_compute[356901]: 2025-10-11 02:18:56.789 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:18:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:18:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:18:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:18:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:18:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:18:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:18:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:18:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:18:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:18:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:18:57 compute-0 sshd-session[422675]: Failed password for invalid user ubuntu from 121.227.153.123 port 53164 ssh2
Oct 11 02:18:57 compute-0 nova_compute[356901]: 2025-10-11 02:18:57.219 2 DEBUG nova.compute.manager [req-2bec1c20-5061-469c-9edc-cbfeac85f521 req-199b1a2b-e9f7-4d35-8f55-83329ca3a823 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Received event network-vif-plugged-64dfc81b-528a-4adc-9787-66719d2f9f93 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:18:57 compute-0 nova_compute[356901]: 2025-10-11 02:18:57.220 2 DEBUG oslo_concurrency.lockutils [req-2bec1c20-5061-469c-9edc-cbfeac85f521 req-199b1a2b-e9f7-4d35-8f55-83329ca3a823 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "0cc56d17-ec3a-4408-bccb-91b29427379e-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:18:57 compute-0 nova_compute[356901]: 2025-10-11 02:18:57.221 2 DEBUG oslo_concurrency.lockutils [req-2bec1c20-5061-469c-9edc-cbfeac85f521 req-199b1a2b-e9f7-4d35-8f55-83329ca3a823 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "0cc56d17-ec3a-4408-bccb-91b29427379e-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:18:57 compute-0 nova_compute[356901]: 2025-10-11 02:18:57.221 2 DEBUG oslo_concurrency.lockutils [req-2bec1c20-5061-469c-9edc-cbfeac85f521 req-199b1a2b-e9f7-4d35-8f55-83329ca3a823 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "0cc56d17-ec3a-4408-bccb-91b29427379e-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:18:57 compute-0 nova_compute[356901]: 2025-10-11 02:18:57.222 2 DEBUG nova.compute.manager [req-2bec1c20-5061-469c-9edc-cbfeac85f521 req-199b1a2b-e9f7-4d35-8f55-83329ca3a823 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Processing event network-vif-plugged-64dfc81b-528a-4adc-9787-66719d2f9f93 _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10808
Oct 11 02:18:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:57.495 286362 INFO oslo.privsep.daemon [-] Spawned new privsep daemon via rootwrap
Oct 11 02:18:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:57.496 286362 DEBUG oslo.privsep.daemon [-] Accepted privsep connection to /tmp/tmpodrzhc3m/privsep.sock __init__ /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:362
Oct 11 02:18:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:57.362 422955 INFO oslo.privsep.daemon [-] privsep daemon starting
Oct 11 02:18:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:57.372 422955 INFO oslo.privsep.daemon [-] privsep process running with uid/gid: 0/0
Oct 11 02:18:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:57.377 422955 INFO oslo.privsep.daemon [-] privsep process running with capabilities (eff/prm/inh): CAP_DAC_OVERRIDE|CAP_DAC_READ_SEARCH|CAP_NET_ADMIN|CAP_SYS_ADMIN|CAP_SYS_PTRACE/CAP_DAC_OVERRIDE|CAP_DAC_READ_SEARCH|CAP_NET_ADMIN|CAP_SYS_ADMIN|CAP_SYS_PTRACE/none
Oct 11 02:18:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:57.378 422955 INFO oslo.privsep.daemon [-] privsep daemon running as pid 422955
Oct 11 02:18:57 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:57.500 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[2e2319d7-8b77-4919-ab4c-4698eecfd009]: (2,) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:18:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1230: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 920 KiB/s rd, 1.9 MiB/s wr, 54 op/s
Oct 11 02:18:58 compute-0 sshd-session[422675]: Connection closed by invalid user ubuntu 121.227.153.123 port 53164 [preauth]
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.174 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760149138.1734743, 0cc56d17-ec3a-4408-bccb-91b29427379e => Started> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.175 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] VM Started (Lifecycle Event)
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.179 2 DEBUG nova.compute.manager [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Instance event wait completed in 0 seconds for network-vif-plugged wait_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:577
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.195 2 DEBUG nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Guest created on hypervisor spawn /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4417
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.204 2 INFO nova.virt.libvirt.driver [-] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Instance spawned successfully.
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.204 2 DEBUG nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Attempting to register defaults for the following image properties: ['hw_cdrom_bus', 'hw_disk_bus', 'hw_input_bus', 'hw_pointer_model', 'hw_video_model', 'hw_vif_model'] _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:917
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.214 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.226 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Synchronizing instance power state after lifecycle event "Started"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.234 2 DEBUG nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Found default for hw_cdrom_bus of sata _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.234 2 DEBUG nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Found default for hw_disk_bus of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.234 2 DEBUG nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Found default for hw_input_bus of usb _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.235 2 DEBUG nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Found default for hw_pointer_model of usbtablet _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.235 2 DEBUG nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Found default for hw_video_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.236 2 DEBUG nova.virt.libvirt.driver [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Found default for hw_vif_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.240 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.240 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760149138.1736283, 0cc56d17-ec3a-4408-bccb-91b29427379e => Paused> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.241 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] VM Paused (Lifecycle Event)
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.283 2 INFO nova.compute.manager [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Took 14.35 seconds to spawn the instance on the hypervisor.
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.284 2 DEBUG nova.compute.manager [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.293 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.298 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760149138.1941807, 0cc56d17-ec3a-4408-bccb-91b29427379e => Resumed> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.298 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] VM Resumed (Lifecycle Event)
Oct 11 02:18:58 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:58.314 422955 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "context-manager" by "neutron_lib.db.api._create_context_manager" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:18:58 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:58.314 422955 DEBUG oslo_concurrency.lockutils [-] Lock "context-manager" acquired by "neutron_lib.db.api._create_context_manager" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:18:58 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:58.314 422955 DEBUG oslo_concurrency.lockutils [-] Lock "context-manager" "released" by "neutron_lib.db.api._create_context_manager" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.369 2 INFO nova.compute.manager [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Took 15.39 seconds to build instance.
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.389 2 DEBUG oslo_concurrency.lockutils [None req-8b3a141b-b34b-4f45-a488-f506ce716dfe d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "0cc56d17-ec3a-4408-bccb-91b29427379e" "released" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: held 15.513s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.397 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:18:58 compute-0 nova_compute[356901]: 2025-10-11 02:18:58.402 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Synchronizing instance power state after lifecycle event "Resumed"; current vm_state: active, current task_state: None, current DB power_state: 1, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:18:58 compute-0 ceph-mon[191930]: pgmap v1230: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 920 KiB/s rd, 1.9 MiB/s wr, 54 op/s
Oct 11 02:18:59 compute-0 systemd[1]: Starting libvirt proxy daemon...
Oct 11 02:18:59 compute-0 systemd[1]: Started libvirt proxy daemon.
Oct 11 02:18:59 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:59.186 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[27f85a4f-6b15-48cb-be90-4a832a62cb31]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:18:59 compute-0 nova_compute[356901]: 2025-10-11 02:18:59.188 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:18:59 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:59.190 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Creating VETH tapd4dded16-31 in ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec namespace provision_datapath /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:665
Oct 11 02:18:59 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:59.192 422955 DEBUG neutron.privileged.agent.linux.ip_lib [-] Interface tapd4dded16-30 not found in namespace None get_link_id /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:204
Oct 11 02:18:59 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:59.193 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[20cb457e-27e3-4f5f-afe8-57d817b9bfbe]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:18:59 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:59.198 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[07aac07f-50e1-4e52-9193-065dc81957bf]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:18:59 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:59.235 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[c780ef0f-fec7-47cd-8a93-0e834826eab2]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:18:59 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:59.278 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[5560a054-10f9-4b9d-8707-0671ba0e484b]: (4, ('net.ipv4.conf.all.promote_secondaries = 1\n', '', 0)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:18:59 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:59.281 286362 INFO oslo.privsep.daemon [-] Running privsep helper: ['sudo', 'neutron-rootwrap', '/etc/neutron/rootwrap.conf', 'privsep-helper', '--config-file', '/etc/neutron/neutron.conf', '--config-dir', '/etc/neutron.conf.d', '--privsep_context', 'neutron.privileged.link_cmd', '--privsep_sock_path', '/tmp/tmpo7kd018u/privsep.sock']
Oct 11 02:18:59 compute-0 nova_compute[356901]: 2025-10-11 02:18:59.340 2 DEBUG nova.compute.manager [req-ec1f7e17-8c6e-444c-a8d4-29e90acf35c3 req-06ca4c88-9d6c-4346-81ba-62c3b5fbb9ae 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Received event network-vif-plugged-64dfc81b-528a-4adc-9787-66719d2f9f93 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:18:59 compute-0 nova_compute[356901]: 2025-10-11 02:18:59.341 2 DEBUG oslo_concurrency.lockutils [req-ec1f7e17-8c6e-444c-a8d4-29e90acf35c3 req-06ca4c88-9d6c-4346-81ba-62c3b5fbb9ae 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "0cc56d17-ec3a-4408-bccb-91b29427379e-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:18:59 compute-0 nova_compute[356901]: 2025-10-11 02:18:59.341 2 DEBUG oslo_concurrency.lockutils [req-ec1f7e17-8c6e-444c-a8d4-29e90acf35c3 req-06ca4c88-9d6c-4346-81ba-62c3b5fbb9ae 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "0cc56d17-ec3a-4408-bccb-91b29427379e-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:18:59 compute-0 nova_compute[356901]: 2025-10-11 02:18:59.342 2 DEBUG oslo_concurrency.lockutils [req-ec1f7e17-8c6e-444c-a8d4-29e90acf35c3 req-06ca4c88-9d6c-4346-81ba-62c3b5fbb9ae 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "0cc56d17-ec3a-4408-bccb-91b29427379e-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:18:59 compute-0 nova_compute[356901]: 2025-10-11 02:18:59.342 2 DEBUG nova.compute.manager [req-ec1f7e17-8c6e-444c-a8d4-29e90acf35c3 req-06ca4c88-9d6c-4346-81ba-62c3b5fbb9ae 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] No waiting events found dispatching network-vif-plugged-64dfc81b-528a-4adc-9787-66719d2f9f93 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:18:59 compute-0 nova_compute[356901]: 2025-10-11 02:18:59.342 2 WARNING nova.compute.manager [req-ec1f7e17-8c6e-444c-a8d4-29e90acf35c3 req-06ca4c88-9d6c-4346-81ba-62c3b5fbb9ae 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Received unexpected event network-vif-plugged-64dfc81b-528a-4adc-9787-66719d2f9f93 for instance with vm_state active and task_state None.
Oct 11 02:18:59 compute-0 podman[157119]: time="2025-10-11T02:18:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:18:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:18:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 45035 "" "Go-http-client/1.1"
Oct 11 02:18:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:18:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 8557 "" "Go-http-client/1.1"
Oct 11 02:18:59 compute-0 nova_compute[356901]: 2025-10-11 02:18:59.786 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:18:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1231: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 811 KiB/s rd, 1.6 MiB/s wr, 58 op/s
Oct 11 02:19:00 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:00.110 286362 INFO oslo.privsep.daemon [-] Spawned new privsep daemon via rootwrap
Oct 11 02:19:00 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:00.110 286362 DEBUG oslo.privsep.daemon [-] Accepted privsep connection to /tmp/tmpo7kd018u/privsep.sock __init__ /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:362
Oct 11 02:19:00 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:59.941 422988 INFO oslo.privsep.daemon [-] privsep daemon starting
Oct 11 02:19:00 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:59.948 422988 INFO oslo.privsep.daemon [-] privsep process running with uid/gid: 0/0
Oct 11 02:19:00 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:59.950 422988 INFO oslo.privsep.daemon [-] privsep process running with capabilities (eff/prm/inh): CAP_NET_ADMIN|CAP_SYS_ADMIN/CAP_NET_ADMIN|CAP_SYS_ADMIN/none
Oct 11 02:19:00 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:18:59.951 422988 INFO oslo.privsep.daemon [-] privsep daemon running as pid 422988
Oct 11 02:19:00 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:00.113 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[8d77b4bf-55d9-462d-86be-68a8dc2087a4]: (2,) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:19:00 compute-0 sshd-session[422960]: Invalid user ubuntu from 121.227.153.123 port 53170
Oct 11 02:19:00 compute-0 podman[422989]: 2025-10-11 02:19:00.246855207 +0000 UTC m=+0.130021578 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3, tcib_managed=true, org.label-schema.build-date=20251009)
Oct 11 02:19:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:19:00 compute-0 podman[422990]: 2025-10-11 02:19:00.281555143 +0000 UTC m=+0.165636673 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, container_name=iscsid, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, config_id=iscsid, org.label-schema.build-date=20251009, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:19:00 compute-0 sshd-session[422960]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:19:00 compute-0 sshd-session[422960]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:19:00 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:00.678 422988 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "context-manager" by "neutron_lib.db.api._create_context_manager" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:19:00 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:00.678 422988 DEBUG oslo_concurrency.lockutils [-] Lock "context-manager" acquired by "neutron_lib.db.api._create_context_manager" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:19:00 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:00.678 422988 DEBUG oslo_concurrency.lockutils [-] Lock "context-manager" "released" by "neutron_lib.db.api._create_context_manager" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:19:00 compute-0 ceph-mon[191930]: pgmap v1231: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 811 KiB/s rd, 1.6 MiB/s wr, 58 op/s
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:01.277 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[90e33e25-40a7-48ad-afce-64c6b8834bb7]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:01.284 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[e974a4ce-f4dd-4a64-9955-0957f86f6392]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:19:01 compute-0 NetworkManager[44908]: <info>  [1760149141.2854] manager: (tapd4dded16-30): new Veth device (/org/freedesktop/NetworkManager/Devices/25)
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:01.327 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[ae6271ab-3d99-4237-98fd-676c06d0e3e9]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:01.331 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[f61a942e-2c78-41d9-9a82-681454906233]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:19:01 compute-0 systemd-udevd[423037]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:19:01 compute-0 NetworkManager[44908]: <info>  [1760149141.3653] device (tapd4dded16-30): carrier: link connected
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:01.372 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[b70e6b92-1c99-47fd-bc2e-e7d3f4047284]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:01.396 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[4ac6e2af-c2d3-4711-a93e-7c8ba581aa60]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapd4dded16-31'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:11:50:48'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 15], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 548909, 'reachable_time': 26127, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 1, 'outoctets': 76, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 1, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 76, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 1, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 423056, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:19:01 compute-0 openstack_network_exporter[374316]: ERROR   02:19:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:19:01 compute-0 openstack_network_exporter[374316]: ERROR   02:19:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:19:01 compute-0 openstack_network_exporter[374316]: ERROR   02:19:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:19:01 compute-0 openstack_network_exporter[374316]: ERROR   02:19:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:19:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:01.421 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[1ad37d11-5b40-4d20-a068-a34603f40537]: (4, ({'family': 10, 'prefixlen': 64, 'flags': 192, 'scope': 253, 'index': 2, 'attrs': [['IFA_ADDRESS', 'fe80::f816:3eff:fe11:5048'], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 548909, 'tstamp': 548909}], ['IFA_FLAGS', 192]], 'header': {'length': 72, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 423057, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'},)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:19:01 compute-0 openstack_network_exporter[374316]: ERROR   02:19:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:19:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:01.445 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[57eb9a91-4799-46fc-83fc-d17e619ef4a0]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapd4dded16-31'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:11:50:48'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 15], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 548909, 'reachable_time': 26127, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 1, 'outoctets': 76, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 1, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 76, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 1, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 0, 'sequence_number': 255, 'pid': 423058, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:01.499 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[ad6b73ea-4b5d-47a6-a0c3-35b48a7a34b4]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:01.590 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[38707562-e2e4-4119-870e-23f10fb45032]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:01.594 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapd4dded16-30, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:01.594 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:01.595 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapd4dded16-30, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:19:01 compute-0 kernel: tapd4dded16-30: entered promiscuous mode
Oct 11 02:19:01 compute-0 nova_compute[356901]: 2025-10-11 02:19:01.599 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:01 compute-0 NetworkManager[44908]: <info>  [1760149141.6023] manager: (tapd4dded16-30): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/26)
Oct 11 02:19:01 compute-0 nova_compute[356901]: 2025-10-11 02:19:01.608 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:01.611 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tapd4dded16-30, col_values=(('external_ids', {'iface-id': 'f0f8488b-423f-46a5-8a6a-984c2ae3438e'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:19:01 compute-0 nova_compute[356901]: 2025-10-11 02:19:01.613 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:01 compute-0 ovn_controller[88370]: 2025-10-11T02:19:01Z|00031|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:19:01 compute-0 nova_compute[356901]: 2025-10-11 02:19:01.617 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:01.619 286362 DEBUG neutron.agent.linux.utils [-] Unable to access /var/lib/neutron/external/pids/d4dded16-3268-4cf9-bb6b-aa5200d5e4ec.pid.haproxy; Error: [Errno 2] No such file or directory: '/var/lib/neutron/external/pids/d4dded16-3268-4cf9-bb6b-aa5200d5e4ec.pid.haproxy' get_value_from_file /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:252
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:01.620 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[491f69ef-f89e-4486-ae72-a1371441a254]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:01.622 286362 DEBUG neutron.agent.ovn.metadata.driver [-] haproxy_cfg = 
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: global
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     log         /dev/log local0 debug
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     log-tag     haproxy-metadata-proxy-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     user        root
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     group       root
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     maxconn     1024
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     pidfile     /var/lib/neutron/external/pids/d4dded16-3268-4cf9-bb6b-aa5200d5e4ec.pid.haproxy
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     daemon
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: defaults
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     log global
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     mode http
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     option httplog
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     option dontlognull
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     option http-server-close
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     option forwardfor
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     retries                 3
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     timeout http-request    30s
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     timeout connect         30s
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     timeout client          32s
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     timeout server          32s
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     timeout http-keep-alive 30s
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: listen listener
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     bind 169.254.169.254:80
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     server metadata /var/lib/neutron/metadata_proxy
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:     http-request add-header X-OVN-Network-ID d4dded16-3268-4cf9-bb6b-aa5200d5e4ec
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]:  create_config_file /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/driver.py:107
Oct 11 02:19:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:01.623 286362 DEBUG neutron.agent.linux.utils [-] Running command: ['sudo', 'neutron-rootwrap', '/etc/neutron/rootwrap.conf', 'ip', 'netns', 'exec', 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'env', 'PROCESS_TAG=haproxy-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'haproxy', '-f', '/var/lib/neutron/ovn-metadata-proxy/d4dded16-3268-4cf9-bb6b-aa5200d5e4ec.conf'] create_process /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:84
Oct 11 02:19:01 compute-0 nova_compute[356901]: 2025-10-11 02:19:01.631 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1232: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 136 KiB/s rd, 1.5 MiB/s wr, 48 op/s
Oct 11 02:19:02 compute-0 sshd-session[422960]: Failed password for invalid user ubuntu from 121.227.153.123 port 53170 ssh2
Oct 11 02:19:02 compute-0 podman[423091]: 2025-10-11 02:19:02.15304429 +0000 UTC m=+0.095863659 container create 93fa354f164212807bed3321af3daedb607f27ec6d142af7575923307c260f9a (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:19:02 compute-0 podman[423091]: 2025-10-11 02:19:02.103640429 +0000 UTC m=+0.046459868 image pull 1061e4fafe13e0b9aa1ef2c904ba4ad70c44f3e87b1d831f16c6db34937f4022 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Oct 11 02:19:02 compute-0 systemd[1]: Started libpod-conmon-93fa354f164212807bed3321af3daedb607f27ec6d142af7575923307c260f9a.scope.
Oct 11 02:19:02 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:19:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8b28b3f1a89afdda3c1f80d9c45f87b71c51b085efa1a1680e02d92518ede197/merged/var/lib/neutron supports timestamps until 2038 (0x7fffffff)
Oct 11 02:19:02 compute-0 podman[423091]: 2025-10-11 02:19:02.303796041 +0000 UTC m=+0.246615440 container init 93fa354f164212807bed3321af3daedb607f27ec6d142af7575923307c260f9a (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:19:02 compute-0 podman[423091]: 2025-10-11 02:19:02.314024404 +0000 UTC m=+0.256843773 container start 93fa354f164212807bed3321af3daedb607f27ec6d142af7575923307c260f9a (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:19:02 compute-0 neutron-haproxy-ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec[423106]: [NOTICE]   (423110) : New worker (423112) forked
Oct 11 02:19:02 compute-0 neutron-haproxy-ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec[423106]: [NOTICE]   (423110) : Loading success.
Oct 11 02:19:02 compute-0 ceph-mon[191930]: pgmap v1232: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 136 KiB/s rd, 1.5 MiB/s wr, 48 op/s
Oct 11 02:19:03 compute-0 sshd-session[422960]: Connection closed by invalid user ubuntu 121.227.153.123 port 53170 [preauth]
Oct 11 02:19:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1233: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 1.8 MiB/s rd, 15 KiB/s wr, 73 op/s
Oct 11 02:19:04 compute-0 nova_compute[356901]: 2025-10-11 02:19:04.190 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:04 compute-0 nova_compute[356901]: 2025-10-11 02:19:04.789 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:04 compute-0 sshd-session[423121]: Invalid user ubuntu from 121.227.153.123 port 43182
Oct 11 02:19:04 compute-0 ceph-mon[191930]: pgmap v1233: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 1.8 MiB/s rd, 15 KiB/s wr, 73 op/s
Oct 11 02:19:05 compute-0 sshd-session[423121]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:19:05 compute-0 sshd-session[423121]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:19:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:19:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1234: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 14 KiB/s wr, 69 op/s
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0002673989263853617 of space, bias 1.0, pg target 0.08021967791560852 quantized to 32 (current 32)
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00025334537995702286 of space, bias 1.0, pg target 0.07600361398710685 quantized to 32 (current 32)
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:19:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:19:06 compute-0 ceph-mon[191930]: pgmap v1234: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 14 KiB/s wr, 69 op/s
Oct 11 02:19:07 compute-0 sshd-session[423121]: Failed password for invalid user ubuntu from 121.227.153.123 port 43182 ssh2
Oct 11 02:19:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1235: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 12 KiB/s wr, 60 op/s
Oct 11 02:19:08 compute-0 sshd-session[423121]: Connection closed by invalid user ubuntu 121.227.153.123 port 43182 [preauth]
Oct 11 02:19:08 compute-0 ovn_controller[88370]: 2025-10-11T02:19:08Z|00032|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:19:08 compute-0 NetworkManager[44908]: <info>  [1760149148.4217] manager: (patch-br-int-to-provnet-2b86b28c-3842-4c1a-86db-34afe38ac747): new Open vSwitch Interface device (/org/freedesktop/NetworkManager/Devices/27)
Oct 11 02:19:08 compute-0 nova_compute[356901]: 2025-10-11 02:19:08.421 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:08 compute-0 NetworkManager[44908]: <info>  [1760149148.4229] device (patch-br-int-to-provnet-2b86b28c-3842-4c1a-86db-34afe38ac747)[Open vSwitch Interface]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Oct 11 02:19:08 compute-0 NetworkManager[44908]: <info>  [1760149148.4250] manager: (patch-provnet-2b86b28c-3842-4c1a-86db-34afe38ac747-to-br-int): new Open vSwitch Interface device (/org/freedesktop/NetworkManager/Devices/28)
Oct 11 02:19:08 compute-0 NetworkManager[44908]: <info>  [1760149148.4255] device (patch-provnet-2b86b28c-3842-4c1a-86db-34afe38ac747-to-br-int)[Open vSwitch Interface]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Oct 11 02:19:08 compute-0 NetworkManager[44908]: <info>  [1760149148.4269] manager: (patch-provnet-2b86b28c-3842-4c1a-86db-34afe38ac747-to-br-int): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/29)
Oct 11 02:19:08 compute-0 NetworkManager[44908]: <info>  [1760149148.4279] manager: (patch-br-int-to-provnet-2b86b28c-3842-4c1a-86db-34afe38ac747): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/30)
Oct 11 02:19:08 compute-0 NetworkManager[44908]: <info>  [1760149148.4287] device (patch-br-int-to-provnet-2b86b28c-3842-4c1a-86db-34afe38ac747)[Open vSwitch Interface]: state change: unavailable -> disconnected (reason 'none', managed-type: 'full')
Oct 11 02:19:08 compute-0 NetworkManager[44908]: <info>  [1760149148.4291] device (patch-provnet-2b86b28c-3842-4c1a-86db-34afe38ac747-to-br-int)[Open vSwitch Interface]: state change: unavailable -> disconnected (reason 'none', managed-type: 'full')
Oct 11 02:19:08 compute-0 ovn_controller[88370]: 2025-10-11T02:19:08Z|00033|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:19:08 compute-0 nova_compute[356901]: 2025-10-11 02:19:08.471 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:08 compute-0 nova_compute[356901]: 2025-10-11 02:19:08.481 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:08 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:08.599 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: SbGlobalUpdateEvent(events=('update',), table='SB_Global', conditions=None, old_conditions=None), priority=20 to row=SB_Global(external_ids={}, nb_cfg=4, options={'arp_ns_explicit_output': 'true', 'mac_prefix': 'fe:55:97', 'max_tunid': '16711680', 'northd_internal_version': '24.03.7-20.33.0-76.8', 'svc_monitor_mac': 'ce:9c:4f:b4:85:9b'}, ipsec=False) old=SB_Global(nb_cfg=3) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:19:08 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:08.601 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Delaying updating chassis table for 0 seconds run /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:274
Oct 11 02:19:08 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:08.602 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '4'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:19:08 compute-0 nova_compute[356901]: 2025-10-11 02:19:08.604 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:08 compute-0 nova_compute[356901]: 2025-10-11 02:19:08.763 2 DEBUG nova.compute.manager [req-0bf8c05f-9431-472a-bb1c-fabef8955c31 req-35dab72b-4cbe-4ad5-ad2e-044af1a8e6a5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Received event network-changed-64dfc81b-528a-4adc-9787-66719d2f9f93 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:19:08 compute-0 nova_compute[356901]: 2025-10-11 02:19:08.763 2 DEBUG nova.compute.manager [req-0bf8c05f-9431-472a-bb1c-fabef8955c31 req-35dab72b-4cbe-4ad5-ad2e-044af1a8e6a5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Refreshing instance network info cache due to event network-changed-64dfc81b-528a-4adc-9787-66719d2f9f93. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:19:08 compute-0 nova_compute[356901]: 2025-10-11 02:19:08.763 2 DEBUG oslo_concurrency.lockutils [req-0bf8c05f-9431-472a-bb1c-fabef8955c31 req-35dab72b-4cbe-4ad5-ad2e-044af1a8e6a5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:19:08 compute-0 nova_compute[356901]: 2025-10-11 02:19:08.764 2 DEBUG oslo_concurrency.lockutils [req-0bf8c05f-9431-472a-bb1c-fabef8955c31 req-35dab72b-4cbe-4ad5-ad2e-044af1a8e6a5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:19:08 compute-0 nova_compute[356901]: 2025-10-11 02:19:08.764 2 DEBUG nova.network.neutron [req-0bf8c05f-9431-472a-bb1c-fabef8955c31 req-35dab72b-4cbe-4ad5-ad2e-044af1a8e6a5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Refreshing network info cache for port 64dfc81b-528a-4adc-9787-66719d2f9f93 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:19:09 compute-0 ceph-mon[191930]: pgmap v1235: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 12 KiB/s wr, 60 op/s
Oct 11 02:19:09 compute-0 nova_compute[356901]: 2025-10-11 02:19:09.196 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:09 compute-0 nova_compute[356901]: 2025-10-11 02:19:09.792 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1236: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 255 B/s wr, 58 op/s
Oct 11 02:19:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:19:10 compute-0 sshd-session[423124]: Invalid user ubuntu from 121.227.153.123 port 43190
Oct 11 02:19:10 compute-0 sshd-session[423124]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:19:10 compute-0 sshd-session[423124]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:19:11 compute-0 ceph-mon[191930]: pgmap v1236: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 255 B/s wr, 58 op/s
Oct 11 02:19:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1237: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 50 op/s
Oct 11 02:19:12 compute-0 podman[423128]: 2025-10-11 02:19:12.256973169 +0000 UTC m=+0.137089411 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://catalog.redhat.com/en/search?searchType=containers, release=1755695350, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., distribution-scope=public, io.buildah.version=1.33.7, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, name=ubi9-minimal, container_name=openstack_network_exporter, version=9.6, maintainer=Red Hat, Inc., io.openshift.tags=minimal rhel9, vendor=Red Hat, Inc., build-date=2025-08-20T13:12:41, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, architecture=x86_64, managed_by=edpm_ansible, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.expose-services=, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-type=git, com.redhat.component=ubi9-minimal-container)
Oct 11 02:19:12 compute-0 podman[423129]: 2025-10-11 02:19:12.262037889 +0000 UTC m=+0.131647729 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:19:12 compute-0 podman[423127]: 2025-10-11 02:19:12.281437021 +0000 UTC m=+0.159849709 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:19:12 compute-0 nova_compute[356901]: 2025-10-11 02:19:12.496 2 DEBUG nova.network.neutron [req-0bf8c05f-9431-472a-bb1c-fabef8955c31 req-35dab72b-4cbe-4ad5-ad2e-044af1a8e6a5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated VIF entry in instance network info cache for port 64dfc81b-528a-4adc-9787-66719d2f9f93. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:19:12 compute-0 nova_compute[356901]: 2025-10-11 02:19:12.497 2 DEBUG nova.network.neutron [req-0bf8c05f-9431-472a-bb1c-fabef8955c31 req-35dab72b-4cbe-4ad5-ad2e-044af1a8e6a5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:19:12 compute-0 nova_compute[356901]: 2025-10-11 02:19:12.517 2 DEBUG oslo_concurrency.lockutils [req-0bf8c05f-9431-472a-bb1c-fabef8955c31 req-35dab72b-4cbe-4ad5-ad2e-044af1a8e6a5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:19:13 compute-0 sshd-session[423124]: Failed password for invalid user ubuntu from 121.227.153.123 port 43190 ssh2
Oct 11 02:19:13 compute-0 ceph-mon[191930]: pgmap v1237: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 50 op/s
Oct 11 02:19:13 compute-0 sshd-session[423124]: Connection closed by invalid user ubuntu 121.227.153.123 port 43190 [preauth]
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.860 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.861 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.862 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.873 14 DEBUG ceilometer.compute.discovery [-] Querying metadata for instance 0cc56d17-ec3a-4408-bccb-91b29427379e from Nova API get_server /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:176
Oct 11 02:19:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:19:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1238: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 1.4 MiB/s rd, 45 op/s
Oct 11 02:19:14 compute-0 nova_compute[356901]: 2025-10-11 02:19:14.200 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:14.276 14 DEBUG novaclient.v2.client [-] REQ: curl -g -i -X GET https://nova-internal.openstack.svc:8774/v2.1/servers/0cc56d17-ec3a-4408-bccb-91b29427379e -H "Accept: application/json" -H "User-Agent: python-novaclient" -H "X-Auth-Token: {SHA256}d674387017edb5d8543811c363b3a2965950a94ddf4462840fede0e79ac258e9" -H "X-OpenStack-Nova-API-Version: 2.1" _http_log_request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:572
Oct 11 02:19:14 compute-0 nova_compute[356901]: 2025-10-11 02:19:14.797 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:15 compute-0 sshd-session[423188]: Invalid user ubuntu from 121.227.153.123 port 59108
Oct 11 02:19:15 compute-0 ceph-mon[191930]: pgmap v1238: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 1.4 MiB/s rd, 45 op/s
Oct 11 02:19:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:19:15 compute-0 sshd-session[423188]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:19:15 compute-0 sshd-session[423188]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:19:15 compute-0 podman[423190]: 2025-10-11 02:19:15.434471563 +0000 UTC m=+0.134489259 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.display-name=Red Hat Universal Base Image 9, managed_by=edpm_ansible, architecture=x86_64, build-date=2024-09-18T21:23:30, config_id=edpm, distribution-scope=public, com.redhat.component=ubi9-container, release=1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., version=9.4, io.openshift.expose-services=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, maintainer=Red Hat, Inc., io.openshift.tags=base rhel9, name=ubi9, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-type=git, container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release-0.7.12=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.buildah.version=1.29.0)
Oct 11 02:19:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1239: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 21 KiB/s rd, 0 op/s
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.553 14 DEBUG novaclient.v2.client [-] RESP: [200] Connection: Keep-Alive Content-Length: 1850 Content-Type: application/json Date: Sat, 11 Oct 2025 02:19:14 GMT Keep-Alive: timeout=5, max=100 OpenStack-API-Version: compute 2.1 Server: Apache Vary: OpenStack-API-Version,X-OpenStack-Nova-API-Version X-OpenStack-Nova-API-Version: 2.1 x-compute-request-id: req-6b38e6e5-e5d0-43c7-81e5-eaa1b75f3c3b x-openstack-request-id: req-6b38e6e5-e5d0-43c7-81e5-eaa1b75f3c3b _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:613
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.553 14 DEBUG novaclient.v2.client [-] RESP BODY: {"server": {"id": "0cc56d17-ec3a-4408-bccb-91b29427379e", "name": "test_0", "status": "ACTIVE", "tenant_id": "97026531b3404a11869cb85a059c4a0d", "user_id": "d215f3ebbc07435493ccd666fc80109d", "metadata": {}, "hostId": "2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736", "image": {"id": "a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/images/a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7"}]}, "flavor": {"id": "486e1451-345c-45d6-b075-f4717e759025", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/flavors/486e1451-345c-45d6-b075-f4717e759025"}]}, "created": "2025-10-11T02:18:39Z", "updated": "2025-10-11T02:18:58Z", "addresses": {"private": [{"version": 4, "addr": "192.168.0.236", "OS-EXT-IPS:type": "fixed", "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:cc:aa:96"}, {"version": 4, "addr": "192.168.122.201", "OS-EXT-IPS:type": "floating", "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:cc:aa:96"}]}, "accessIPv4": "", "accessIPv6": "", "links": [{"rel": "self", "href": "https://nova-internal.openstack.svc:8774/v2.1/servers/0cc56d17-ec3a-4408-bccb-91b29427379e"}, {"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/servers/0cc56d17-ec3a-4408-bccb-91b29427379e"}], "OS-DCF:diskConfig": "MANUAL", "progress": 0, "OS-EXT-AZ:availability_zone": "nova", "config_drive": "True", "key_name": null, "OS-SRV-USG:launched_at": "2025-10-11T02:18:58.000000", "OS-SRV-USG:terminated_at": null, "security_groups": [{"name": "basic"}], "OS-EXT-SRV-ATTR:host": "compute-0.ctlplane.example.com", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:hypervisor_hostname": "compute-0.ctlplane.example.com", "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-EXT-STS:power_state": 1, "os-extended-volumes:volumes_attached": []}} _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:648
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.553 14 DEBUG novaclient.v2.client [-] GET call to compute for https://nova-internal.openstack.svc:8774/v2.1/servers/0cc56d17-ec3a-4408-bccb-91b29427379e used request id req-6b38e6e5-e5d0-43c7-81e5-eaa1b75f3c3b request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:1073
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.555 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.555 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.555 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.556 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.557 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:19:16.556197) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.556 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.564 14 DEBUG ceilometer.compute.virt.libvirt.inspector [-] No delta meter predecessor for 0cc56d17-ec3a-4408-bccb-91b29427379e / tap64dfc81b-52 inspect_vnics /usr/lib/python3.12/site-packages/ceilometer/compute/virt/libvirt/inspector.py:143
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.565 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 110 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.566 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.566 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.566 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.567 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.567 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.567 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:19:16.567190) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.567 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.567 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.568 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.568 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.568 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.568 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.568 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.569 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.569 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.569 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.569 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.570 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:19:16.569030) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.570 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.570 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.570 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.570 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.571 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.571 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.571 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.571 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:19:16.570825) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.572 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.572 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.572 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.572 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.573 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:19:16.572520) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.596 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.597 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.597 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.598 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.598 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.598 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.599 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.599 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.599 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.600 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:19:16.599441) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.647 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 18348032 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.648 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.648 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 2048 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.649 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.649 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.649 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.650 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.650 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.650 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.650 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:19:16.650219) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.651 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1335584531 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.651 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.651 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 2928103 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.652 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.652 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.652 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.652 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.652 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.652 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.652 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 573 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.653 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.653 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.653 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:19:16.652694) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.654 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.654 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.654 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.655 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.655 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.655 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.655 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:19:16.655317) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.655 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.656 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.656 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.657 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.657 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.657 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.657 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.657 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.657 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.657 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.658 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.658 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:19:16.657660) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.658 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.659 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.659 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.659 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.659 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.659 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.660 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.660 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.660 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:19:16.660018) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.661 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.661 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.661 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.661 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.662 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.662 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.662 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.662 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.663 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:19:16.662557) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.690 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.691 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.691 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.691 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.691 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.691 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.691 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.692 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:19:16.691729) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.692 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.692 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.692 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.693 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.693 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.693 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.693 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.693 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.693 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.693 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.694 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.694 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:19:16.693671) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.694 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.694 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.rate in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.694 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.694 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.694 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.rate heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.695 14 DEBUG ceilometer.compute.pollsters [-] LibvirtInspector does not provide data for IncomingBytesRatePollster get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:162
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.695 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.rate (2025-10-11T02:19:16.694878) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.695 14 ERROR ceilometer.polling.manager [-] Prevent pollster network.incoming.bytes.rate from polling [<NovaLikeServer: test_0>] on source pollsters anymore!: ceilometer.polling.plugin_base.PollsterPermanentError: [<NovaLikeServer: test_0>]
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.697 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.697 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.697 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.697 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.697 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.697 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.697 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.698 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.698 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:19:16.697443) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.698 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.698 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.698 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.698 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:19:16.698676) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.699 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.699 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.699 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.699 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.699 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.699 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.700 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.700 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:19:16.699970) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.700 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.700 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.700 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.701 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.701 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.701 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.701 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.701 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:19:16.701350) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.702 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.702 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.702 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.702 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.702 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.702 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.702 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:19:16.702610) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.703 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.703 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.703 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.703 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.703 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.703 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.704 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.704 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:19:16.703991) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.704 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.704 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.705 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.705 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.705 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.705 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.705 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.705 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.705 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.706 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:19:16.705936) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.706 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.706 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.707 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.707 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.707 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.707 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.707 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.707 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:19:16.707493) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.707 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 17950000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.708 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.708 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.708 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.708 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.708 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.708 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.709 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:19:16.708778) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.709 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.709 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.709 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.709 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.709 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.709 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.710 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.710 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:19:16.710055) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.710 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: Unavailable _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.710 14 WARNING ceilometer.compute.pollsters [-] memory.usage statistic in not available for instance 0cc56d17-ec3a-4408-bccb-91b29427379e: ceilometer.compute.pollsters.NoVolumeException
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.710 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.710 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.711 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.rate in the context of pollsters
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.711 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.711 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.711 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.rate heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.711 14 DEBUG ceilometer.compute.pollsters [-] LibvirtInspector does not provide data for OutgoingBytesRatePollster get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:162
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.711 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.rate (2025-10-11T02:19:16.711356) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.712 14 ERROR ceilometer.polling.manager [-] Prevent pollster network.outgoing.bytes.rate from polling [<NovaLikeServer: test_0>] on source pollsters anymore!: ceilometer.polling.plugin_base.PollsterPermanentError: [<NovaLikeServer: test_0>]
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.712 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.713 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.713 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.713 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.714 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.714 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.714 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.714 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.715 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.715 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.715 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.715 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.715 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.716 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.716 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.716 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.716 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.716 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.717 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.717 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.717 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.718 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.718 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.718 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.718 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:19:16.718 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:19:17 compute-0 ceph-mon[191930]: pgmap v1239: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail; 21 KiB/s rd, 0 op/s
Oct 11 02:19:17 compute-0 sshd-session[423188]: Failed password for invalid user ubuntu from 121.227.153.123 port 59108 ssh2
Oct 11 02:19:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1240: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:18 compute-0 sshd-session[423188]: Connection closed by invalid user ubuntu 121.227.153.123 port 59108 [preauth]
Oct 11 02:19:19 compute-0 ceph-mon[191930]: pgmap v1240: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:19 compute-0 nova_compute[356901]: 2025-10-11 02:19:19.204 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:19 compute-0 sshd-session[423210]: Invalid user ubuntu from 121.227.153.123 port 59124
Oct 11 02:19:19 compute-0 sshd-session[423210]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:19:19 compute-0 sshd-session[423210]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:19:19 compute-0 nova_compute[356901]: 2025-10-11 02:19:19.801 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1241: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:19:21 compute-0 ceph-mon[191930]: pgmap v1241: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:21 compute-0 sshd-session[423210]: Failed password for invalid user ubuntu from 121.227.153.123 port 59124 ssh2
Oct 11 02:19:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1242: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:22 compute-0 sshd-session[423210]: Connection closed by invalid user ubuntu 121.227.153.123 port 59124 [preauth]
Oct 11 02:19:23 compute-0 ceph-mon[191930]: pgmap v1242: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1243: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:23 compute-0 sshd-session[423213]: Invalid user ubuntu from 121.227.153.123 port 43154
Oct 11 02:19:24 compute-0 sshd-session[423213]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:19:24 compute-0 sshd-session[423213]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:19:24 compute-0 nova_compute[356901]: 2025-10-11 02:19:24.207 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:24 compute-0 nova_compute[356901]: 2025-10-11 02:19:24.805 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:25 compute-0 ceph-mon[191930]: pgmap v1243: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:25 compute-0 podman[423215]: 2025-10-11 02:19:25.244642563 +0000 UTC m=+0.129553281 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 02:19:25 compute-0 podman[423216]: 2025-10-11 02:19:25.249647523 +0000 UTC m=+0.122478979 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251007, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=edpm, org.label-schema.schema-version=1.0)
Oct 11 02:19:25 compute-0 podman[423217]: 2025-10-11 02:19:25.250841679 +0000 UTC m=+0.122947657 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']})
Oct 11 02:19:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:19:25 compute-0 podman[423218]: 2025-10-11 02:19:25.272453113 +0000 UTC m=+0.150365308 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_managed=true, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Oct 11 02:19:25 compute-0 sshd-session[423213]: Failed password for invalid user ubuntu from 121.227.153.123 port 43154 ssh2
Oct 11 02:19:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1244: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:19:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:19:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:19:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:19:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:19:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:19:27 compute-0 ceph-mon[191930]: pgmap v1244: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:27 compute-0 sshd-session[423213]: Connection closed by invalid user ubuntu 121.227.153.123 port 43154 [preauth]
Oct 11 02:19:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:19:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/4088418337' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:19:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:19:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/4088418337' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:19:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1245: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/4088418337' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:19:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/4088418337' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:19:28 compute-0 sshd-session[423296]: Invalid user ubuntu from 121.227.153.123 port 43156
Oct 11 02:19:28 compute-0 sshd-session[423296]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:19:28 compute-0 sshd-session[423296]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:19:29 compute-0 ceph-mon[191930]: pgmap v1245: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:29 compute-0 nova_compute[356901]: 2025-10-11 02:19:29.212 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:29 compute-0 podman[157119]: time="2025-10-11T02:19:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:19:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:19:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:19:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:19:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9035 "" "Go-http-client/1.1"
Oct 11 02:19:29 compute-0 nova_compute[356901]: 2025-10-11 02:19:29.809 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1246: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:19:30 compute-0 sshd-session[423296]: Failed password for invalid user ubuntu from 121.227.153.123 port 43156 ssh2
Oct 11 02:19:31 compute-0 ceph-mon[191930]: pgmap v1246: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:31 compute-0 podman[423298]: 2025-10-11 02:19:31.244023501 +0000 UTC m=+0.127945016 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, container_name=multipathd, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_managed=true)
Oct 11 02:19:31 compute-0 podman[423299]: 2025-10-11 02:19:31.259847229 +0000 UTC m=+0.140934568 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=iscsid, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, io.buildah.version=1.41.3)
Oct 11 02:19:31 compute-0 openstack_network_exporter[374316]: ERROR   02:19:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:19:31 compute-0 openstack_network_exporter[374316]: ERROR   02:19:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:19:31 compute-0 openstack_network_exporter[374316]: ERROR   02:19:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:19:31 compute-0 openstack_network_exporter[374316]: ERROR   02:19:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:19:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:19:31 compute-0 openstack_network_exporter[374316]: ERROR   02:19:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:19:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:19:31 compute-0 sshd-session[423296]: Connection closed by invalid user ubuntu 121.227.153.123 port 43156 [preauth]
Oct 11 02:19:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1247: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:32 compute-0 ovn_controller[88370]: 2025-10-11T02:19:32Z|00004|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:cc:aa:96 192.168.0.236
Oct 11 02:19:32 compute-0 ovn_controller[88370]: 2025-10-11T02:19:32Z|00005|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:cc:aa:96 192.168.0.236
Oct 11 02:19:32 compute-0 sshd-session[423335]: Invalid user ubuntu from 121.227.153.123 port 44590
Oct 11 02:19:33 compute-0 ceph-mon[191930]: pgmap v1247: 321 pgs: 321 active+clean; 49 MiB data, 181 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:33 compute-0 sshd-session[423335]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:19:33 compute-0 sshd-session[423335]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:19:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1248: 321 pgs: 321 active+clean; 61 MiB data, 192 MiB used, 60 GiB / 60 GiB avail; 109 KiB/s rd, 954 KiB/s wr, 34 op/s
Oct 11 02:19:34 compute-0 nova_compute[356901]: 2025-10-11 02:19:34.212 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:34 compute-0 nova_compute[356901]: 2025-10-11 02:19:34.813 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:35 compute-0 ceph-mon[191930]: pgmap v1248: 321 pgs: 321 active+clean; 61 MiB data, 192 MiB used, 60 GiB / 60 GiB avail; 109 KiB/s rd, 954 KiB/s wr, 34 op/s
Oct 11 02:19:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:19:35 compute-0 sshd-session[423335]: Failed password for invalid user ubuntu from 121.227.153.123 port 44590 ssh2
Oct 11 02:19:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1249: 321 pgs: 321 active+clean; 69 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 113 KiB/s rd, 1.4 MiB/s wr, 40 op/s
Oct 11 02:19:36 compute-0 sshd-session[423335]: Connection closed by invalid user ubuntu 121.227.153.123 port 44590 [preauth]
Oct 11 02:19:37 compute-0 ceph-mon[191930]: pgmap v1249: 321 pgs: 321 active+clean; 69 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 113 KiB/s rd, 1.4 MiB/s wr, 40 op/s
Oct 11 02:19:37 compute-0 sshd-session[423338]: Invalid user ubuntu from 121.227.153.123 port 44596
Oct 11 02:19:37 compute-0 sshd-session[423338]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:19:37 compute-0 sshd-session[423338]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:19:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1250: 321 pgs: 321 active+clean; 75 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 140 KiB/s rd, 1.5 MiB/s wr, 53 op/s
Oct 11 02:19:38 compute-0 ovn_controller[88370]: 2025-10-11T02:19:38Z|00034|memory_trim|INFO|Detected inactivity (last active 30005 ms ago): trimming memory
Oct 11 02:19:39 compute-0 nova_compute[356901]: 2025-10-11 02:19:39.214 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:39 compute-0 ceph-mon[191930]: pgmap v1250: 321 pgs: 321 active+clean; 75 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 140 KiB/s rd, 1.5 MiB/s wr, 53 op/s
Oct 11 02:19:39 compute-0 sshd-session[423338]: Failed password for invalid user ubuntu from 121.227.153.123 port 44596 ssh2
Oct 11 02:19:39 compute-0 nova_compute[356901]: 2025-10-11 02:19:39.817 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1251: 321 pgs: 321 active+clean; 77 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 157 KiB/s rd, 1.5 MiB/s wr, 56 op/s
Oct 11 02:19:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:19:40 compute-0 ceph-mon[191930]: pgmap v1251: 321 pgs: 321 active+clean; 77 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 157 KiB/s rd, 1.5 MiB/s wr, 56 op/s
Oct 11 02:19:40 compute-0 sudo[423340]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:19:40 compute-0 sudo[423340]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:19:40 compute-0 sudo[423340]: pam_unix(sudo:session): session closed for user root
Oct 11 02:19:40 compute-0 sudo[423365]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:19:40 compute-0 sudo[423365]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:19:40 compute-0 sudo[423365]: pam_unix(sudo:session): session closed for user root
Oct 11 02:19:40 compute-0 sshd-session[423338]: Connection closed by invalid user ubuntu 121.227.153.123 port 44596 [preauth]
Oct 11 02:19:40 compute-0 sudo[423390]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:19:40 compute-0 sudo[423390]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:19:40 compute-0 sudo[423390]: pam_unix(sudo:session): session closed for user root
Oct 11 02:19:40 compute-0 sudo[423415]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:19:40 compute-0 sudo[423415]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:19:41 compute-0 sudo[423415]: pam_unix(sudo:session): session closed for user root
Oct 11 02:19:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:19:41 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:19:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:19:41 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:19:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:19:41 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:19:41 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 6fa24797-df21-439d-ac30-19b5ab40a58a does not exist
Oct 11 02:19:41 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 218101df-cb13-4bb9-abd3-b4f87095f098 does not exist
Oct 11 02:19:41 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 915bc28f-de59-4b07-bcf6-39a86d2bfbe9 does not exist
Oct 11 02:19:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:19:41 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:19:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:19:41 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:19:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:19:41 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:19:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:19:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:19:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:19:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:19:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:19:41 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:19:41 compute-0 sudo[423474]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:19:41 compute-0 sudo[423474]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:19:41 compute-0 sudo[423474]: pam_unix(sudo:session): session closed for user root
Oct 11 02:19:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1252: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 157 KiB/s rd, 1.5 MiB/s wr, 57 op/s
Oct 11 02:19:42 compute-0 sudo[423499]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:19:42 compute-0 sudo[423499]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:19:42 compute-0 sudo[423499]: pam_unix(sudo:session): session closed for user root
Oct 11 02:19:42 compute-0 sshd-session[423440]: Invalid user ubuntu from 121.227.153.123 port 46360
Oct 11 02:19:42 compute-0 sudo[423524]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:19:42 compute-0 sudo[423524]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:19:42 compute-0 sudo[423524]: pam_unix(sudo:session): session closed for user root
Oct 11 02:19:42 compute-0 sudo[423549]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:19:42 compute-0 sshd-session[423440]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:19:42 compute-0 sshd-session[423440]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:19:42 compute-0 sudo[423549]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:19:42 compute-0 podman[423574]: 2025-10-11 02:19:42.506922834 +0000 UTC m=+0.130001515 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, container_name=openstack_network_exporter, io.openshift.tags=minimal rhel9, build-date=2025-08-20T13:12:41, distribution-scope=public, vendor=Red Hat, Inc., architecture=x86_64, config_id=edpm, maintainer=Red Hat, Inc., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., version=9.6, com.redhat.component=ubi9-minimal-container, io.openshift.expose-services=, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.buildah.version=1.33.7, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1755695350, name=ubi9-minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:19:42 compute-0 podman[423578]: 2025-10-11 02:19:42.537613545 +0000 UTC m=+0.143689683 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:19:42 compute-0 podman[423573]: 2025-10-11 02:19:42.544992044 +0000 UTC m=+0.185187791 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_managed=true, config_id=edpm, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.3)
Oct 11 02:19:42 compute-0 ceph-mon[191930]: pgmap v1252: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 157 KiB/s rd, 1.5 MiB/s wr, 57 op/s
Oct 11 02:19:42 compute-0 podman[423672]: 2025-10-11 02:19:42.996596468 +0000 UTC m=+0.101754843 container create 29fad08daa5ea31dc98a4fc13067a5626ea9a0ad0715b0a76d9e1dbac60aefac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_pike, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:19:43 compute-0 podman[423672]: 2025-10-11 02:19:42.957365106 +0000 UTC m=+0.062523531 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:19:43 compute-0 systemd[1]: Started libpod-conmon-29fad08daa5ea31dc98a4fc13067a5626ea9a0ad0715b0a76d9e1dbac60aefac.scope.
Oct 11 02:19:43 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:19:43 compute-0 podman[423672]: 2025-10-11 02:19:43.172003307 +0000 UTC m=+0.277161692 container init 29fad08daa5ea31dc98a4fc13067a5626ea9a0ad0715b0a76d9e1dbac60aefac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_pike, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:19:43 compute-0 podman[423672]: 2025-10-11 02:19:43.189385376 +0000 UTC m=+0.294543751 container start 29fad08daa5ea31dc98a4fc13067a5626ea9a0ad0715b0a76d9e1dbac60aefac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_pike, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 02:19:43 compute-0 podman[423672]: 2025-10-11 02:19:43.195648619 +0000 UTC m=+0.300807054 container attach 29fad08daa5ea31dc98a4fc13067a5626ea9a0ad0715b0a76d9e1dbac60aefac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_pike, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 02:19:43 compute-0 hopeful_pike[423687]: 167 167
Oct 11 02:19:43 compute-0 systemd[1]: libpod-29fad08daa5ea31dc98a4fc13067a5626ea9a0ad0715b0a76d9e1dbac60aefac.scope: Deactivated successfully.
Oct 11 02:19:43 compute-0 podman[423672]: 2025-10-11 02:19:43.200880037 +0000 UTC m=+0.306038432 container died 29fad08daa5ea31dc98a4fc13067a5626ea9a0ad0715b0a76d9e1dbac60aefac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_pike, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 02:19:43 compute-0 systemd[1]: var-lib-containers-storage-overlay-220cafd8628abc92d11b28d1461c888743f1bda0e751fb08208e1b3a051a4bca-merged.mount: Deactivated successfully.
Oct 11 02:19:43 compute-0 podman[423672]: 2025-10-11 02:19:43.283183518 +0000 UTC m=+0.388341903 container remove 29fad08daa5ea31dc98a4fc13067a5626ea9a0ad0715b0a76d9e1dbac60aefac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_pike, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:19:43 compute-0 systemd[1]: libpod-conmon-29fad08daa5ea31dc98a4fc13067a5626ea9a0ad0715b0a76d9e1dbac60aefac.scope: Deactivated successfully.
Oct 11 02:19:43 compute-0 podman[423710]: 2025-10-11 02:19:43.614143787 +0000 UTC m=+0.101701887 container create 0da345a6c2ce83a2edb59c7506eadeab115fc6763033caba34c95d26e017841a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_almeida, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True)
Oct 11 02:19:43 compute-0 podman[423710]: 2025-10-11 02:19:43.581536973 +0000 UTC m=+0.069095123 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:19:43 compute-0 systemd[1]: Started libpod-conmon-0da345a6c2ce83a2edb59c7506eadeab115fc6763033caba34c95d26e017841a.scope.
Oct 11 02:19:43 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:19:43 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b20f83cc90c3a5be6f8b4e5045c857832f9be90e0728f84f2285f14f4b982cc5/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:19:43 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b20f83cc90c3a5be6f8b4e5045c857832f9be90e0728f84f2285f14f4b982cc5/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:19:43 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b20f83cc90c3a5be6f8b4e5045c857832f9be90e0728f84f2285f14f4b982cc5/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:19:43 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b20f83cc90c3a5be6f8b4e5045c857832f9be90e0728f84f2285f14f4b982cc5/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:19:43 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b20f83cc90c3a5be6f8b4e5045c857832f9be90e0728f84f2285f14f4b982cc5/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:19:43 compute-0 podman[423710]: 2025-10-11 02:19:43.758363625 +0000 UTC m=+0.245921705 container init 0da345a6c2ce83a2edb59c7506eadeab115fc6763033caba34c95d26e017841a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_almeida, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0)
Oct 11 02:19:43 compute-0 podman[423710]: 2025-10-11 02:19:43.774930304 +0000 UTC m=+0.262488404 container start 0da345a6c2ce83a2edb59c7506eadeab115fc6763033caba34c95d26e017841a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_almeida, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:19:43 compute-0 podman[423710]: 2025-10-11 02:19:43.783249538 +0000 UTC m=+0.270807618 container attach 0da345a6c2ce83a2edb59c7506eadeab115fc6763033caba34c95d26e017841a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_almeida, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2)
Oct 11 02:19:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1253: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 157 KiB/s rd, 1.5 MiB/s wr, 57 op/s
Oct 11 02:19:44 compute-0 sshd-session[423440]: Failed password for invalid user ubuntu from 121.227.153.123 port 46360 ssh2
Oct 11 02:19:44 compute-0 nova_compute[356901]: 2025-10-11 02:19:44.216 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:44 compute-0 nova_compute[356901]: 2025-10-11 02:19:44.821 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:44 compute-0 ceph-mon[191930]: pgmap v1253: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 157 KiB/s rd, 1.5 MiB/s wr, 57 op/s
Oct 11 02:19:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:19:45 compute-0 heuristic_almeida[423727]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:19:45 compute-0 heuristic_almeida[423727]: --> relative data size: 1.0
Oct 11 02:19:45 compute-0 heuristic_almeida[423727]: --> All data devices are unavailable
Oct 11 02:19:45 compute-0 systemd[1]: libpod-0da345a6c2ce83a2edb59c7506eadeab115fc6763033caba34c95d26e017841a.scope: Deactivated successfully.
Oct 11 02:19:45 compute-0 systemd[1]: libpod-0da345a6c2ce83a2edb59c7506eadeab115fc6763033caba34c95d26e017841a.scope: Consumed 1.467s CPU time.
Oct 11 02:19:45 compute-0 podman[423710]: 2025-10-11 02:19:45.34080585 +0000 UTC m=+1.828363940 container died 0da345a6c2ce83a2edb59c7506eadeab115fc6763033caba34c95d26e017841a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_almeida, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 02:19:45 compute-0 systemd[1]: var-lib-containers-storage-overlay-b20f83cc90c3a5be6f8b4e5045c857832f9be90e0728f84f2285f14f4b982cc5-merged.mount: Deactivated successfully.
Oct 11 02:19:45 compute-0 sshd-session[423440]: Connection closed by invalid user ubuntu 121.227.153.123 port 46360 [preauth]
Oct 11 02:19:45 compute-0 podman[423710]: 2025-10-11 02:19:45.447640561 +0000 UTC m=+1.935198641 container remove 0da345a6c2ce83a2edb59c7506eadeab115fc6763033caba34c95d26e017841a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_almeida, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:19:45 compute-0 systemd[1]: libpod-conmon-0da345a6c2ce83a2edb59c7506eadeab115fc6763033caba34c95d26e017841a.scope: Deactivated successfully.
Oct 11 02:19:45 compute-0 sudo[423549]: pam_unix(sudo:session): session closed for user root
Oct 11 02:19:45 compute-0 podman[423767]: 2025-10-11 02:19:45.618881003 +0000 UTC m=+0.115452123 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.29.0, name=ubi9, version=9.4, build-date=2024-09-18T21:23:30, managed_by=edpm_ansible, vcs-type=git, io.openshift.tags=base rhel9, com.redhat.component=ubi9-container, config_id=edpm, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release-0.7.12=, vendor=Red Hat, Inc., architecture=x86_64, container_name=kepler, distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.k8s.display-name=Red Hat Universal Base Image 9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, release=1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.openshift.expose-services=)
Oct 11 02:19:45 compute-0 sudo[423773]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:19:45 compute-0 sudo[423773]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:19:45 compute-0 sudo[423773]: pam_unix(sudo:session): session closed for user root
Oct 11 02:19:45 compute-0 sudo[423812]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:19:45 compute-0 sudo[423812]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:19:45 compute-0 sudo[423812]: pam_unix(sudo:session): session closed for user root
Oct 11 02:19:45 compute-0 sudo[423839]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:19:45 compute-0 sudo[423839]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:19:45 compute-0 sudo[423839]: pam_unix(sudo:session): session closed for user root
Oct 11 02:19:45 compute-0 nova_compute[356901]: 2025-10-11 02:19:45.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:19:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1254: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 48 KiB/s rd, 566 KiB/s wr, 22 op/s
Oct 11 02:19:45 compute-0 sudo[423864]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:19:45 compute-0 sudo[423864]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:19:46 compute-0 podman[423928]: 2025-10-11 02:19:46.557662294 +0000 UTC m=+0.083256288 container create 0d68054e53c0930b631b6e71ac6ce6a41d38efad9aca508e7b8362fac2b7eefa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=confident_mendel, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, ceph=True, OSD_FLAVOR=default)
Oct 11 02:19:46 compute-0 systemd[1]: Started libpod-conmon-0d68054e53c0930b631b6e71ac6ce6a41d38efad9aca508e7b8362fac2b7eefa.scope.
Oct 11 02:19:46 compute-0 podman[423928]: 2025-10-11 02:19:46.521890154 +0000 UTC m=+0.047484128 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:19:46 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:19:46 compute-0 sshd-session[423813]: Invalid user ubuntu from 121.227.153.123 port 46372
Oct 11 02:19:46 compute-0 podman[423928]: 2025-10-11 02:19:46.685519118 +0000 UTC m=+0.211113162 container init 0d68054e53c0930b631b6e71ac6ce6a41d38efad9aca508e7b8362fac2b7eefa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=confident_mendel, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:19:46 compute-0 podman[423928]: 2025-10-11 02:19:46.701381471 +0000 UTC m=+0.226975425 container start 0d68054e53c0930b631b6e71ac6ce6a41d38efad9aca508e7b8362fac2b7eefa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=confident_mendel, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default)
Oct 11 02:19:46 compute-0 podman[423928]: 2025-10-11 02:19:46.709383967 +0000 UTC m=+0.234978011 container attach 0d68054e53c0930b631b6e71ac6ce6a41d38efad9aca508e7b8362fac2b7eefa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=confident_mendel, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:19:46 compute-0 confident_mendel[423945]: 167 167
Oct 11 02:19:46 compute-0 systemd[1]: libpod-0d68054e53c0930b631b6e71ac6ce6a41d38efad9aca508e7b8362fac2b7eefa.scope: Deactivated successfully.
Oct 11 02:19:46 compute-0 podman[423928]: 2025-10-11 02:19:46.714209465 +0000 UTC m=+0.239803429 container died 0d68054e53c0930b631b6e71ac6ce6a41d38efad9aca508e7b8362fac2b7eefa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=confident_mendel, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:19:46 compute-0 systemd[1]: var-lib-containers-storage-overlay-ed2e71f8aeb3718427b00057c95966606806651a28d3c6ed9f262b474872395d-merged.mount: Deactivated successfully.
Oct 11 02:19:46 compute-0 podman[423928]: 2025-10-11 02:19:46.786468942 +0000 UTC m=+0.312062896 container remove 0d68054e53c0930b631b6e71ac6ce6a41d38efad9aca508e7b8362fac2b7eefa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=confident_mendel, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default)
Oct 11 02:19:46 compute-0 systemd[1]: libpod-conmon-0d68054e53c0930b631b6e71ac6ce6a41d38efad9aca508e7b8362fac2b7eefa.scope: Deactivated successfully.
Oct 11 02:19:46 compute-0 nova_compute[356901]: 2025-10-11 02:19:46.893 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:19:46 compute-0 nova_compute[356901]: 2025-10-11 02:19:46.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:19:46 compute-0 sshd-session[423813]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:19:46 compute-0 sshd-session[423813]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:19:47 compute-0 ceph-mon[191930]: pgmap v1254: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 48 KiB/s rd, 566 KiB/s wr, 22 op/s
Oct 11 02:19:47 compute-0 podman[423967]: 2025-10-11 02:19:47.103761635 +0000 UTC m=+0.097962111 container create b33edbec6a19c981bb223c3af8a2b2dd04ed0cfbb5498eab081c9fafad2cc20e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_archimedes, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:19:47 compute-0 podman[423967]: 2025-10-11 02:19:47.062799952 +0000 UTC m=+0.057000508 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:19:47 compute-0 systemd[1]: Started libpod-conmon-b33edbec6a19c981bb223c3af8a2b2dd04ed0cfbb5498eab081c9fafad2cc20e.scope.
Oct 11 02:19:47 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:19:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e369a6e6ca58982aa10fa55a2837d2e392ada76916a3e4dd6d28a63622f3e627/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:19:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e369a6e6ca58982aa10fa55a2837d2e392ada76916a3e4dd6d28a63622f3e627/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:19:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e369a6e6ca58982aa10fa55a2837d2e392ada76916a3e4dd6d28a63622f3e627/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:19:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e369a6e6ca58982aa10fa55a2837d2e392ada76916a3e4dd6d28a63622f3e627/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:19:47 compute-0 podman[423967]: 2025-10-11 02:19:47.252935527 +0000 UTC m=+0.247135983 container init b33edbec6a19c981bb223c3af8a2b2dd04ed0cfbb5498eab081c9fafad2cc20e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_archimedes, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:19:47 compute-0 podman[423967]: 2025-10-11 02:19:47.282562888 +0000 UTC m=+0.276763314 container start b33edbec6a19c981bb223c3af8a2b2dd04ed0cfbb5498eab081c9fafad2cc20e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_archimedes, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 02:19:47 compute-0 podman[423967]: 2025-10-11 02:19:47.287096491 +0000 UTC m=+0.281296927 container attach b33edbec6a19c981bb223c3af8a2b2dd04ed0cfbb5498eab081c9fafad2cc20e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_archimedes, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 02:19:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1255: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 44 KiB/s rd, 35 KiB/s wr, 16 op/s
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]: {
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:     "0": [
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:         {
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "devices": [
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "/dev/loop3"
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             ],
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "lv_name": "ceph_lv0",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "lv_size": "21470642176",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "name": "ceph_lv0",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "tags": {
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.cluster_name": "ceph",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.crush_device_class": "",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.encrypted": "0",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.osd_id": "0",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.type": "block",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.vdo": "0"
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             },
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "type": "block",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "vg_name": "ceph_vg0"
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:         }
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:     ],
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:     "1": [
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:         {
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "devices": [
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "/dev/loop4"
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             ],
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "lv_name": "ceph_lv1",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "lv_size": "21470642176",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "name": "ceph_lv1",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "tags": {
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.cluster_name": "ceph",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.crush_device_class": "",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.encrypted": "0",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.osd_id": "1",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.type": "block",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.vdo": "0"
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             },
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "type": "block",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "vg_name": "ceph_vg1"
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:         }
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:     ],
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:     "2": [
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:         {
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "devices": [
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "/dev/loop5"
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             ],
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "lv_name": "ceph_lv2",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "lv_size": "21470642176",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "name": "ceph_lv2",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "tags": {
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.cluster_name": "ceph",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.crush_device_class": "",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.encrypted": "0",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.osd_id": "2",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.type": "block",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:                 "ceph.vdo": "0"
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             },
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "type": "block",
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:             "vg_name": "ceph_vg2"
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:         }
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]:     ]
Oct 11 02:19:48 compute-0 naughty_archimedes[423983]: }
Oct 11 02:19:48 compute-0 systemd[1]: libpod-b33edbec6a19c981bb223c3af8a2b2dd04ed0cfbb5498eab081c9fafad2cc20e.scope: Deactivated successfully.
Oct 11 02:19:48 compute-0 podman[423967]: 2025-10-11 02:19:48.192196244 +0000 UTC m=+1.186396690 container died b33edbec6a19c981bb223c3af8a2b2dd04ed0cfbb5498eab081c9fafad2cc20e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_archimedes, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:19:48 compute-0 systemd[1]: var-lib-containers-storage-overlay-e369a6e6ca58982aa10fa55a2837d2e392ada76916a3e4dd6d28a63622f3e627-merged.mount: Deactivated successfully.
Oct 11 02:19:48 compute-0 podman[423967]: 2025-10-11 02:19:48.308425909 +0000 UTC m=+1.302626385 container remove b33edbec6a19c981bb223c3af8a2b2dd04ed0cfbb5498eab081c9fafad2cc20e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_archimedes, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:19:48 compute-0 systemd[1]: libpod-conmon-b33edbec6a19c981bb223c3af8a2b2dd04ed0cfbb5498eab081c9fafad2cc20e.scope: Deactivated successfully.
Oct 11 02:19:48 compute-0 sudo[423864]: pam_unix(sudo:session): session closed for user root
Oct 11 02:19:48 compute-0 sudo[424005]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:19:48 compute-0 sudo[424005]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:19:48 compute-0 sudo[424005]: pam_unix(sudo:session): session closed for user root
Oct 11 02:19:48 compute-0 sudo[424030]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:19:48 compute-0 sudo[424030]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:19:48 compute-0 sudo[424030]: pam_unix(sudo:session): session closed for user root
Oct 11 02:19:48 compute-0 sudo[424055]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:19:48 compute-0 sudo[424055]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:19:48 compute-0 sudo[424055]: pam_unix(sudo:session): session closed for user root
Oct 11 02:19:48 compute-0 sshd-session[423813]: Failed password for invalid user ubuntu from 121.227.153.123 port 46372 ssh2
Oct 11 02:19:48 compute-0 sudo[424080]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:19:48 compute-0 sudo[424080]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:19:49 compute-0 ceph-mon[191930]: pgmap v1255: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 44 KiB/s rd, 35 KiB/s wr, 16 op/s
Oct 11 02:19:49 compute-0 nova_compute[356901]: 2025-10-11 02:19:49.220 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:49 compute-0 podman[424145]: 2025-10-11 02:19:49.611066671 +0000 UTC m=+0.091656616 container create 931590a72d3bddbf296019d490a046389050aa6c5905729d3846c5928409b998 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_hofstadter, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.build-date=20250507, io.buildah.version=1.39.3)
Oct 11 02:19:49 compute-0 systemd[1]: Started libpod-conmon-931590a72d3bddbf296019d490a046389050aa6c5905729d3846c5928409b998.scope.
Oct 11 02:19:49 compute-0 podman[424145]: 2025-10-11 02:19:49.57716732 +0000 UTC m=+0.057757275 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:19:49 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:19:49 compute-0 podman[424145]: 2025-10-11 02:19:49.757390718 +0000 UTC m=+0.237980733 container init 931590a72d3bddbf296019d490a046389050aa6c5905729d3846c5928409b998 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_hofstadter, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:19:49 compute-0 podman[424145]: 2025-10-11 02:19:49.777493395 +0000 UTC m=+0.258083340 container start 931590a72d3bddbf296019d490a046389050aa6c5905729d3846c5928409b998 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_hofstadter, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 02:19:49 compute-0 podman[424145]: 2025-10-11 02:19:49.784467175 +0000 UTC m=+0.265057100 container attach 931590a72d3bddbf296019d490a046389050aa6c5905729d3846c5928409b998 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_hofstadter, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:19:49 compute-0 competent_hofstadter[424161]: 167 167
Oct 11 02:19:49 compute-0 systemd[1]: libpod-931590a72d3bddbf296019d490a046389050aa6c5905729d3846c5928409b998.scope: Deactivated successfully.
Oct 11 02:19:49 compute-0 podman[424145]: 2025-10-11 02:19:49.790624183 +0000 UTC m=+0.271214168 container died 931590a72d3bddbf296019d490a046389050aa6c5905729d3846c5928409b998 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_hofstadter, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:19:49 compute-0 nova_compute[356901]: 2025-10-11 02:19:49.824 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:49 compute-0 systemd[1]: var-lib-containers-storage-overlay-e9162fee977df57b412e8457b3a9cc3727c710fa388de183e39eb526e448fa59-merged.mount: Deactivated successfully.
Oct 11 02:19:49 compute-0 podman[424145]: 2025-10-11 02:19:49.871613925 +0000 UTC m=+0.352203870 container remove 931590a72d3bddbf296019d490a046389050aa6c5905729d3846c5928409b998 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_hofstadter, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3)
Oct 11 02:19:49 compute-0 systemd[1]: libpod-conmon-931590a72d3bddbf296019d490a046389050aa6c5905729d3846c5928409b998.scope: Deactivated successfully.
Oct 11 02:19:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1256: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 17 KiB/s rd, 12 KiB/s wr, 3 op/s
Oct 11 02:19:49 compute-0 sshd-session[423813]: Connection closed by invalid user ubuntu 121.227.153.123 port 46372 [preauth]
Oct 11 02:19:50 compute-0 podman[424184]: 2025-10-11 02:19:50.222704441 +0000 UTC m=+0.101311828 container create 098234b1910adc6d3e815a4cb88d9263bf47083e5dd887bdb11018d123360118 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pedantic_pasteur, ceph=True, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:19:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:19:50 compute-0 podman[424184]: 2025-10-11 02:19:50.192397069 +0000 UTC m=+0.071004496 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:19:50 compute-0 systemd[1]: Started libpod-conmon-098234b1910adc6d3e815a4cb88d9263bf47083e5dd887bdb11018d123360118.scope.
Oct 11 02:19:50 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:19:50 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/975e684641753ad5f708ee410ba0c49daa639eb64c02461e4432fc18607b9d26/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:19:50 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/975e684641753ad5f708ee410ba0c49daa639eb64c02461e4432fc18607b9d26/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:19:50 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/975e684641753ad5f708ee410ba0c49daa639eb64c02461e4432fc18607b9d26/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:19:50 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/975e684641753ad5f708ee410ba0c49daa639eb64c02461e4432fc18607b9d26/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:19:50 compute-0 podman[424184]: 2025-10-11 02:19:50.411891097 +0000 UTC m=+0.290498514 container init 098234b1910adc6d3e815a4cb88d9263bf47083e5dd887bdb11018d123360118 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pedantic_pasteur, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, OSD_FLAVOR=default)
Oct 11 02:19:50 compute-0 podman[424184]: 2025-10-11 02:19:50.440907199 +0000 UTC m=+0.319514576 container start 098234b1910adc6d3e815a4cb88d9263bf47083e5dd887bdb11018d123360118 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pedantic_pasteur, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0)
Oct 11 02:19:50 compute-0 podman[424184]: 2025-10-11 02:19:50.447796435 +0000 UTC m=+0.326403862 container attach 098234b1910adc6d3e815a4cb88d9263bf47083e5dd887bdb11018d123360118 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pedantic_pasteur, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:19:50 compute-0 nova_compute[356901]: 2025-10-11 02:19:50.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:19:50 compute-0 nova_compute[356901]: 2025-10-11 02:19:50.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:19:50 compute-0 nova_compute[356901]: 2025-10-11 02:19:50.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:19:51 compute-0 ceph-mon[191930]: pgmap v1256: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 17 KiB/s rd, 12 KiB/s wr, 3 op/s
Oct 11 02:19:51 compute-0 sshd-session[424195]: Invalid user ubuntu from 121.227.153.123 port 35622
Oct 11 02:19:51 compute-0 sshd-session[424195]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:19:51 compute-0 sshd-session[424195]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:19:51 compute-0 nova_compute[356901]: 2025-10-11 02:19:51.552 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:19:51 compute-0 nova_compute[356901]: 2025-10-11 02:19:51.554 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:19:51 compute-0 nova_compute[356901]: 2025-10-11 02:19:51.555 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:19:51 compute-0 nova_compute[356901]: 2025-10-11 02:19:51.555 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]: {
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:         "osd_id": 1,
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:         "type": "bluestore"
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:     },
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:         "osd_id": 2,
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:         "type": "bluestore"
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:     },
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:         "osd_id": 0,
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:         "type": "bluestore"
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]:     }
Oct 11 02:19:51 compute-0 pedantic_pasteur[424201]: }
Oct 11 02:19:51 compute-0 systemd[1]: libpod-098234b1910adc6d3e815a4cb88d9263bf47083e5dd887bdb11018d123360118.scope: Deactivated successfully.
Oct 11 02:19:51 compute-0 systemd[1]: libpod-098234b1910adc6d3e815a4cb88d9263bf47083e5dd887bdb11018d123360118.scope: Consumed 1.248s CPU time.
Oct 11 02:19:51 compute-0 podman[424184]: 2025-10-11 02:19:51.699713296 +0000 UTC m=+1.578320653 container died 098234b1910adc6d3e815a4cb88d9263bf47083e5dd887bdb11018d123360118 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pedantic_pasteur, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:19:51 compute-0 systemd[1]: var-lib-containers-storage-overlay-975e684641753ad5f708ee410ba0c49daa639eb64c02461e4432fc18607b9d26-merged.mount: Deactivated successfully.
Oct 11 02:19:51 compute-0 podman[424184]: 2025-10-11 02:19:51.780427077 +0000 UTC m=+1.659034424 container remove 098234b1910adc6d3e815a4cb88d9263bf47083e5dd887bdb11018d123360118 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pedantic_pasteur, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default)
Oct 11 02:19:51 compute-0 systemd[1]: libpod-conmon-098234b1910adc6d3e815a4cb88d9263bf47083e5dd887bdb11018d123360118.scope: Deactivated successfully.
Oct 11 02:19:51 compute-0 sudo[424080]: pam_unix(sudo:session): session closed for user root
Oct 11 02:19:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:19:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:19:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:19:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:19:51 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 401775b7-49d2-471d-b797-266ad78ed306 does not exist
Oct 11 02:19:51 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev ee73387f-3acd-45bf-bac8-52794dbc1cb1 does not exist
Oct 11 02:19:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1257: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 8.0 KiB/s wr, 0 op/s
Oct 11 02:19:51 compute-0 sudo[424246]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:19:51 compute-0 sudo[424246]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:19:51 compute-0 sudo[424246]: pam_unix(sudo:session): session closed for user root
Oct 11 02:19:52 compute-0 sudo[424271]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:19:52 compute-0 sudo[424271]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:19:52 compute-0 sudo[424271]: pam_unix(sudo:session): session closed for user root
Oct 11 02:19:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:19:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:19:52 compute-0 ceph-mon[191930]: pgmap v1257: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 8.0 KiB/s wr, 0 op/s
Oct 11 02:19:53 compute-0 sshd-session[424195]: Failed password for invalid user ubuntu from 121.227.153.123 port 35622 ssh2
Oct 11 02:19:53 compute-0 nova_compute[356901]: 2025-10-11 02:19:53.438 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:19:53 compute-0 nova_compute[356901]: 2025-10-11 02:19:53.476 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:19:53 compute-0 nova_compute[356901]: 2025-10-11 02:19:53.477 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:19:53 compute-0 nova_compute[356901]: 2025-10-11 02:19:53.478 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:19:53 compute-0 nova_compute[356901]: 2025-10-11 02:19:53.479 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:19:53 compute-0 nova_compute[356901]: 2025-10-11 02:19:53.480 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:19:53 compute-0 nova_compute[356901]: 2025-10-11 02:19:53.481 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:19:53 compute-0 nova_compute[356901]: 2025-10-11 02:19:53.506 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:19:53 compute-0 nova_compute[356901]: 2025-10-11 02:19:53.507 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:19:53 compute-0 nova_compute[356901]: 2025-10-11 02:19:53.508 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:19:53 compute-0 nova_compute[356901]: 2025-10-11 02:19:53.508 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:19:53 compute-0 nova_compute[356901]: 2025-10-11 02:19:53.509 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:19:53 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:53.634 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: SbGlobalUpdateEvent(events=('update',), table='SB_Global', conditions=None, old_conditions=None), priority=20 to row=SB_Global(external_ids={}, nb_cfg=5, options={'arp_ns_explicit_output': 'true', 'mac_prefix': 'fe:55:97', 'max_tunid': '16711680', 'northd_internal_version': '24.03.7-20.33.0-76.8', 'svc_monitor_mac': 'ce:9c:4f:b4:85:9b'}, ipsec=False) old=SB_Global(nb_cfg=4) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:19:53 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:53.635 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Delaying updating chassis table for 6 seconds run /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:274
Oct 11 02:19:53 compute-0 nova_compute[356901]: 2025-10-11 02:19:53.644 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1258: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:19:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:19:53 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3241016106' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:19:54 compute-0 nova_compute[356901]: 2025-10-11 02:19:54.024 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.515s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:19:54 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3241016106' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:19:54 compute-0 nova_compute[356901]: 2025-10-11 02:19:54.169 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:19:54 compute-0 nova_compute[356901]: 2025-10-11 02:19:54.169 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:19:54 compute-0 nova_compute[356901]: 2025-10-11 02:19:54.169 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:19:54 compute-0 nova_compute[356901]: 2025-10-11 02:19:54.221 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:54 compute-0 nova_compute[356901]: 2025-10-11 02:19:54.775 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:19:54 compute-0 nova_compute[356901]: 2025-10-11 02:19:54.779 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3941MB free_disk=59.955204010009766GB free_vcpus=7 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:19:54 compute-0 nova_compute[356901]: 2025-10-11 02:19:54.780 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:19:54 compute-0 nova_compute[356901]: 2025-10-11 02:19:54.781 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:19:54 compute-0 nova_compute[356901]: 2025-10-11 02:19:54.835 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:54.844 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:19:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:54.845 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:19:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:54.846 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:19:54 compute-0 nova_compute[356901]: 2025-10-11 02:19:54.895 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:19:54 compute-0 nova_compute[356901]: 2025-10-11 02:19:54.895 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 1 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:19:54 compute-0 nova_compute[356901]: 2025-10-11 02:19:54.896 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1024MB phys_disk=59GB used_disk=2GB total_vcpus=8 used_vcpus=1 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:19:54 compute-0 nova_compute[356901]: 2025-10-11 02:19:54.937 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:19:54 compute-0 sshd-session[424195]: Connection closed by invalid user ubuntu 121.227.153.123 port 35622 [preauth]
Oct 11 02:19:55 compute-0 ceph-mon[191930]: pgmap v1258: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:19:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:19:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:19:55 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1304199544' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:19:55 compute-0 nova_compute[356901]: 2025-10-11 02:19:55.507 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.570s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:19:55 compute-0 nova_compute[356901]: 2025-10-11 02:19:55.521 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:19:55 compute-0 nova_compute[356901]: 2025-10-11 02:19:55.548 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:19:55 compute-0 nova_compute[356901]: 2025-10-11 02:19:55.586 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:19:55 compute-0 nova_compute[356901]: 2025-10-11 02:19:55.586 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.806s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:19:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1259: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:56 compute-0 nova_compute[356901]: 2025-10-11 02:19:56.006 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:19:56 compute-0 nova_compute[356901]: 2025-10-11 02:19:56.028 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:19:56 compute-0 nova_compute[356901]: 2025-10-11 02:19:56.029 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:19:56 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1304199544' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:19:56 compute-0 podman[424346]: 2025-10-11 02:19:56.254358487 +0000 UTC m=+0.127729240 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_managed=true, container_name=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:19:56 compute-0 podman[424345]: 2025-10-11 02:19:56.258715406 +0000 UTC m=+0.128699858 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251007, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true)
Oct 11 02:19:56 compute-0 podman[424343]: 2025-10-11 02:19:56.267644691 +0000 UTC m=+0.151846663 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:19:56 compute-0 sshd-session[424339]: Invalid user ubuntu from 121.227.153.123 port 35634
Oct 11 02:19:56 compute-0 podman[424344]: 2025-10-11 02:19:56.290912871 +0000 UTC m=+0.163691421 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, container_name=ovn_controller, io.buildah.version=1.41.3)
Oct 11 02:19:56 compute-0 sshd-session[424339]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:19:56 compute-0 sshd-session[424339]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:19:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:19:56
Oct 11 02:19:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:19:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:19:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['.rgw.root', 'default.rgw.log', '.mgr', 'default.rgw.meta', 'images', 'backups', 'default.rgw.control', 'cephfs.cephfs.data', 'cephfs.cephfs.meta', 'vms', 'volumes']
Oct 11 02:19:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:19:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:19:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:19:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:19:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:19:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:19:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:19:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:19:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:19:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:19:57 compute-0 ceph-mon[191930]: pgmap v1259: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:19:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:19:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:19:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:19:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:19:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:19:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:19:57 compute-0 nova_compute[356901]: 2025-10-11 02:19:57.844 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:19:57 compute-0 nova_compute[356901]: 2025-10-11 02:19:57.845 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78" acquired by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:19:57 compute-0 nova_compute[356901]: 2025-10-11 02:19:57.865 2 DEBUG nova.compute.manager [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Starting instance... _do_build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2402
Oct 11 02:19:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1260: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:57 compute-0 nova_compute[356901]: 2025-10-11 02:19:57.954 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:19:57 compute-0 nova_compute[356901]: 2025-10-11 02:19:57.954 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:19:57 compute-0 nova_compute[356901]: 2025-10-11 02:19:57.966 2 DEBUG nova.virt.hardware [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Require both a host and instance NUMA topology to fit instance on host. numa_fit_instance_to_host /usr/lib/python3.9/site-packages/nova/virt/hardware.py:2368
Oct 11 02:19:57 compute-0 nova_compute[356901]: 2025-10-11 02:19:57.967 2 INFO nova.compute.claims [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Claim successful on node compute-0.ctlplane.example.com
Oct 11 02:19:58 compute-0 nova_compute[356901]: 2025-10-11 02:19:58.275 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:19:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:19:58 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/4190213887' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:19:58 compute-0 nova_compute[356901]: 2025-10-11 02:19:58.787 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.513s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:19:58 compute-0 nova_compute[356901]: 2025-10-11 02:19:58.799 2 DEBUG nova.compute.provider_tree [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:19:58 compute-0 nova_compute[356901]: 2025-10-11 02:19:58.816 2 DEBUG nova.scheduler.client.report [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:19:58 compute-0 nova_compute[356901]: 2025-10-11 02:19:58.839 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: held 0.884s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:19:58 compute-0 nova_compute[356901]: 2025-10-11 02:19:58.840 2 DEBUG nova.compute.manager [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Start building networks asynchronously for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2799
Oct 11 02:19:58 compute-0 sshd-session[424339]: Failed password for invalid user ubuntu from 121.227.153.123 port 35634 ssh2
Oct 11 02:19:58 compute-0 nova_compute[356901]: 2025-10-11 02:19:58.884 2 DEBUG nova.compute.manager [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Allocating IP information in the background. _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1952
Oct 11 02:19:58 compute-0 nova_compute[356901]: 2025-10-11 02:19:58.885 2 DEBUG nova.network.neutron [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] allocate_for_instance() allocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1156
Oct 11 02:19:58 compute-0 nova_compute[356901]: 2025-10-11 02:19:58.902 2 INFO nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Ignoring supplied device name: /dev/vda. Libvirt can't honour user-supplied dev names
Oct 11 02:19:58 compute-0 nova_compute[356901]: 2025-10-11 02:19:58.941 2 DEBUG nova.compute.manager [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Start building block device mappings for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2834
Oct 11 02:19:59 compute-0 nova_compute[356901]: 2025-10-11 02:19:59.035 2 DEBUG nova.compute.manager [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Start spawning the instance on the hypervisor. _build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2608
Oct 11 02:19:59 compute-0 nova_compute[356901]: 2025-10-11 02:19:59.037 2 DEBUG nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Creating instance directory _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4723
Oct 11 02:19:59 compute-0 nova_compute[356901]: 2025-10-11 02:19:59.038 2 INFO nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Creating image(s)
Oct 11 02:19:59 compute-0 ceph-mon[191930]: pgmap v1260: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:59 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/4190213887' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:19:59 compute-0 nova_compute[356901]: 2025-10-11 02:19:59.092 2 DEBUG nova.storage.rbd_utils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:19:59 compute-0 nova_compute[356901]: 2025-10-11 02:19:59.145 2 DEBUG nova.storage.rbd_utils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:19:59 compute-0 nova_compute[356901]: 2025-10-11 02:19:59.199 2 DEBUG nova.storage.rbd_utils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:19:59 compute-0 nova_compute[356901]: 2025-10-11 02:19:59.211 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:19:59 compute-0 nova_compute[356901]: 2025-10-11 02:19:59.248 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:59 compute-0 nova_compute[356901]: 2025-10-11 02:19:59.301 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d --force-share --output=json" returned: 0 in 0.090s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:19:59 compute-0 nova_compute[356901]: 2025-10-11 02:19:59.302 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "c2a4b3f256e07592b38b9a83d173b78feaa2ba6d" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:19:59 compute-0 nova_compute[356901]: 2025-10-11 02:19:59.304 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "c2a4b3f256e07592b38b9a83d173b78feaa2ba6d" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:19:59 compute-0 nova_compute[356901]: 2025-10-11 02:19:59.305 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "c2a4b3f256e07592b38b9a83d173b78feaa2ba6d" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:19:59 compute-0 nova_compute[356901]: 2025-10-11 02:19:59.350 2 DEBUG nova.storage.rbd_utils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:19:59 compute-0 nova_compute[356901]: 2025-10-11 02:19:59.361 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:19:59 compute-0 sshd-session[424339]: Connection closed by invalid user ubuntu 121.227.153.123 port 35634 [preauth]
Oct 11 02:19:59 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:19:59.637 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '5'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:19:59 compute-0 podman[157119]: time="2025-10-11T02:19:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:19:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:19:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:19:59 compute-0 nova_compute[356901]: 2025-10-11 02:19:59.785 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.424s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:19:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:19:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9031 "" "Go-http-client/1.1"
Oct 11 02:19:59 compute-0 nova_compute[356901]: 2025-10-11 02:19:59.867 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:19:59 compute-0 unix_chkpwd[424564]: password check failed for user (root)
Oct 11 02:19:59 compute-0 sshd-session[424481]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.33  user=root
Oct 11 02:19:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1261: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:19:59 compute-0 nova_compute[356901]: 2025-10-11 02:19:59.953 2 DEBUG nova.storage.rbd_utils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] resizing rbd image d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk to 1073741824 resize /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:288
Oct 11 02:20:00 compute-0 nova_compute[356901]: 2025-10-11 02:20:00.190 2 DEBUG nova.objects.instance [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lazy-loading 'migration_context' on Instance uuid d60d7ea1-5d00-4902-90e6-3ae67eb09a78 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:20:00 compute-0 nova_compute[356901]: 2025-10-11 02:20:00.261 2 DEBUG nova.storage.rbd_utils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk.eph0 does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:20:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:20:00 compute-0 nova_compute[356901]: 2025-10-11 02:20:00.372 2 DEBUG nova.storage.rbd_utils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk.eph0 does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:20:00 compute-0 nova_compute[356901]: 2025-10-11 02:20:00.401 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/ephemeral_1_0706d66 --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:20:00 compute-0 nova_compute[356901]: 2025-10-11 02:20:00.489 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/ephemeral_1_0706d66 --force-share --output=json" returned: 0 in 0.089s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:20:00 compute-0 nova_compute[356901]: 2025-10-11 02:20:00.490 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "ephemeral_1_0706d66" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:20:00 compute-0 nova_compute[356901]: 2025-10-11 02:20:00.492 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "ephemeral_1_0706d66" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:20:00 compute-0 nova_compute[356901]: 2025-10-11 02:20:00.492 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "ephemeral_1_0706d66" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:20:00 compute-0 nova_compute[356901]: 2025-10-11 02:20:00.535 2 DEBUG nova.storage.rbd_utils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk.eph0 does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:20:00 compute-0 nova_compute[356901]: 2025-10-11 02:20:00.543 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/ephemeral_1_0706d66 d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk.eph0 --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:20:00 compute-0 nova_compute[356901]: 2025-10-11 02:20:00.808 2 DEBUG nova.network.neutron [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Successfully updated port: a7108c4c-c96c-4354-a4bf-99b1d2160514 _update_port /usr/lib/python3.9/site-packages/nova/network/neutron.py:586
Oct 11 02:20:00 compute-0 nova_compute[356901]: 2025-10-11 02:20:00.827 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:20:00 compute-0 nova_compute[356901]: 2025-10-11 02:20:00.828 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquired lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:20:00 compute-0 nova_compute[356901]: 2025-10-11 02:20:00.828 2 DEBUG nova.network.neutron [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Building network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2010
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.038 2 DEBUG nova.network.neutron [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Instance cache missing network info. _get_preexisting_port_ids /usr/lib/python3.9/site-packages/nova/network/neutron.py:3323
Oct 11 02:20:01 compute-0 ceph-mon[191930]: pgmap v1261: 321 pgs: 321 active+clean; 78 MiB data, 198 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.114 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/ephemeral_1_0706d66 d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk.eph0 --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.571s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.263 2 DEBUG nova.compute.manager [req-83f1e2d4-63b8-4779-bf7b-7cd5be4fa799 req-a40b2dd0-9ddb-49f9-b803-457df6847f65 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Received event network-changed-a7108c4c-c96c-4354-a4bf-99b1d2160514 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.263 2 DEBUG nova.compute.manager [req-83f1e2d4-63b8-4779-bf7b-7cd5be4fa799 req-a40b2dd0-9ddb-49f9-b803-457df6847f65 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Refreshing instance network info cache due to event network-changed-a7108c4c-c96c-4354-a4bf-99b1d2160514. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.263 2 DEBUG oslo_concurrency.lockutils [req-83f1e2d4-63b8-4779-bf7b-7cd5be4fa799 req-a40b2dd0-9ddb-49f9-b803-457df6847f65 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.353 2 DEBUG nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Created local disks _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4857
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.354 2 DEBUG nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Ensure instance console log exists: /var/lib/nova/instances/d60d7ea1-5d00-4902-90e6-3ae67eb09a78/console.log _ensure_console_log_for_instance /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4609
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.355 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "vgpu_resources" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.356 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "vgpu_resources" acquired by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.357 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "vgpu_resources" "released" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:20:01 compute-0 openstack_network_exporter[374316]: ERROR   02:20:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:20:01 compute-0 openstack_network_exporter[374316]: ERROR   02:20:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:20:01 compute-0 openstack_network_exporter[374316]: ERROR   02:20:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:20:01 compute-0 openstack_network_exporter[374316]: ERROR   02:20:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:20:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:20:01 compute-0 openstack_network_exporter[374316]: ERROR   02:20:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:20:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:20:01 compute-0 sshd-session[424559]: Invalid user ubuntu from 121.227.153.123 port 57104
Oct 11 02:20:01 compute-0 podman[424749]: 2025-10-11 02:20:01.811520674 +0000 UTC m=+0.130618092 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_id=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, container_name=multipathd, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']})
Oct 11 02:20:01 compute-0 podman[424750]: 2025-10-11 02:20:01.821577543 +0000 UTC m=+0.133134859 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, config_id=iscsid, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=iscsid)
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.907 2 DEBUG nova.network.neutron [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Updating instance_info_cache with network_info: [{"id": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "address": "fa:16:3e:c2:ee:14", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.80", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa7108c4c-c9", "ovs_interfaceid": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:20:01 compute-0 sshd-session[424559]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:20:01 compute-0 sshd-session[424559]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.928 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Releasing lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.929 2 DEBUG nova.compute.manager [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Instance network_info: |[{"id": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "address": "fa:16:3e:c2:ee:14", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.80", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa7108c4c-c9", "ovs_interfaceid": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}]| _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1967
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.929 2 DEBUG oslo_concurrency.lockutils [req-83f1e2d4-63b8-4779-bf7b-7cd5be4fa799 req-a40b2dd0-9ddb-49f9-b803-457df6847f65 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.930 2 DEBUG nova.network.neutron [req-83f1e2d4-63b8-4779-bf7b-7cd5be4fa799 req-a40b2dd0-9ddb-49f9-b803-457df6847f65 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Refreshing network info cache for port a7108c4c-c96c-4354-a4bf-99b1d2160514 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:20:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1262: 321 pgs: 321 active+clean; 92 MiB data, 203 MiB used, 60 GiB / 60 GiB avail; 5.2 KiB/s rd, 460 KiB/s wr, 9 op/s
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.936 2 DEBUG nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Start _get_guest_xml network_info=[{"id": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "address": "fa:16:3e:c2:ee:14", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.80", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa7108c4c-c9", "ovs_interfaceid": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}] disk_info={'disk_bus': 'virtio', 'cdrom_bus': 'sata', 'mapping': {'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'disk.config': {'bus': 'sata', 'dev': 'sda', 'type': 'cdrom'}}} image_meta=ImageMeta(checksum='b874c39491a2377b8490f5f1e89761a4',container_format='bare',created_at=2025-10-11T02:17:33Z,direct_url=<?>,disk_format='qcow2',id=a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7,min_disk=0,min_ram=0,name='cirros',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=16300544,status='active',tags=<?>,updated_at=2025-10-11T02:17:37Z,virtual_size=<?>,visibility=<?>) rescue=None block_device_info={'root_device_name': '/dev/vda', 'image': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'boot_index': 0, 'device_name': '/dev/vda', 'size': 0, 'encryption_format': None, 'image_id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}], 'ephemerals': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'device_name': '/dev/vdb', 'size': 1, 'encryption_format': None}], 'block_device_mapping': [], 'swap': None} _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7549
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.945 2 WARNING nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.957 2 DEBUG nova.virt.libvirt.host [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V1... _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1653
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.958 2 DEBUG nova.virt.libvirt.host [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CPU controller missing on host. _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1663
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.965 2 DEBUG nova.virt.libvirt.host [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V2... _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1672
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.965 2 DEBUG nova.virt.libvirt.host [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CPU controller found on host. _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1679
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.966 2 DEBUG nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CPU mode 'host-model' models '' was chosen, with extra flags: '' _get_guest_cpu_model_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:5396
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.967 2 DEBUG nova.virt.hardware [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Getting desirable topologies for flavor Flavor(created_at=2025-10-11T02:17:41Z,deleted=False,deleted_at=None,description=None,disabled=False,ephemeral_gb=1,extra_specs={},flavorid='486e1451-345c-45d6-b075-f4717e759025',id=1,is_public=True,memory_mb=512,name='m1.small',projects=<?>,root_gb=1,rxtx_factor=1.0,swap=0,updated_at=None,vcpu_weight=0,vcpus=1) and image_meta ImageMeta(checksum='b874c39491a2377b8490f5f1e89761a4',container_format='bare',created_at=2025-10-11T02:17:33Z,direct_url=<?>,disk_format='qcow2',id=a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7,min_disk=0,min_ram=0,name='cirros',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=16300544,status='active',tags=<?>,updated_at=2025-10-11T02:17:37Z,virtual_size=<?>,visibility=<?>), allow threads: True _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:563
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.968 2 DEBUG nova.virt.hardware [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Flavor limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:348
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.968 2 DEBUG nova.virt.hardware [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Image limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:352
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.969 2 DEBUG nova.virt.hardware [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Flavor pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:388
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.969 2 DEBUG nova.virt.hardware [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Image pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:392
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.969 2 DEBUG nova.virt.hardware [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Chose sockets=0, cores=0, threads=0; limits were sockets=65536, cores=65536, threads=65536 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:430
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.970 2 DEBUG nova.virt.hardware [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Topology preferred VirtCPUTopology(cores=0,sockets=0,threads=0), maximum VirtCPUTopology(cores=65536,sockets=65536,threads=65536) _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:569
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.970 2 DEBUG nova.virt.hardware [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Build topologies for 1 vcpu(s) 1:1:1 _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:471
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.970 2 DEBUG nova.virt.hardware [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Got 1 possible topologies _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:501
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.971 2 DEBUG nova.virt.hardware [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Possible topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:575
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.971 2 DEBUG nova.virt.hardware [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Sorted desired topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:577
Oct 11 02:20:01 compute-0 nova_compute[356901]: 2025-10-11 02:20:01.976 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:20:01 compute-0 sshd-session[424481]: Failed password for root from 193.46.255.33 port 10308 ssh2
Oct 11 02:20:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:20:02 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2705497987' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:20:02 compute-0 nova_compute[356901]: 2025-10-11 02:20:02.480 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.504s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:20:02 compute-0 nova_compute[356901]: 2025-10-11 02:20:02.483 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:20:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:20:02 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2775440822' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:20:02 compute-0 nova_compute[356901]: 2025-10-11 02:20:02.941 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.458s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:20:02 compute-0 nova_compute[356901]: 2025-10-11 02:20:02.980 2 DEBUG nova.storage.rbd_utils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:20:02 compute-0 nova_compute[356901]: 2025-10-11 02:20:02.990 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:20:03 compute-0 ceph-mon[191930]: pgmap v1262: 321 pgs: 321 active+clean; 92 MiB data, 203 MiB used, 60 GiB / 60 GiB avail; 5.2 KiB/s rd, 460 KiB/s wr, 9 op/s
Oct 11 02:20:03 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2705497987' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:20:03 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2775440822' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:20:03 compute-0 unix_chkpwd[424849]: password check failed for user (root)
Oct 11 02:20:03 compute-0 sshd-session[424559]: Failed password for invalid user ubuntu from 121.227.153.123 port 57104 ssh2
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.363 2 DEBUG nova.network.neutron [req-83f1e2d4-63b8-4779-bf7b-7cd5be4fa799 req-a40b2dd0-9ddb-49f9-b803-457df6847f65 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Updated VIF entry in instance network info cache for port a7108c4c-c96c-4354-a4bf-99b1d2160514. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.364 2 DEBUG nova.network.neutron [req-83f1e2d4-63b8-4779-bf7b-7cd5be4fa799 req-a40b2dd0-9ddb-49f9-b803-457df6847f65 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Updating instance_info_cache with network_info: [{"id": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "address": "fa:16:3e:c2:ee:14", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.80", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa7108c4c-c9", "ovs_interfaceid": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.382 2 DEBUG oslo_concurrency.lockutils [req-83f1e2d4-63b8-4779-bf7b-7cd5be4fa799 req-a40b2dd0-9ddb-49f9-b803-457df6847f65 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:20:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:20:03 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3299734709' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.550 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.560s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.554 2 DEBUG nova.virt.libvirt.vif [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:19:56Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description=None,display_name='vn-vgckve2-ittzoa6m3dmq-egfg3ceao3k4-vnf-rvnztbwt2zgh',ec2_ids=EC2Ids,ephemeral_gb=1,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(1),hidden=False,host='compute-0.ctlplane.example.com',hostname='vn-vgckve2-ittzoa6m3dmq-egfg3ceao3k4-vnf-rvnztbwt2zgh',id=2,image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',info_cache=InstanceInfoCache,instance_type_id=1,kernel_id='',key_data=None,key_name=None,keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=512,metadata={metering.server_group='3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='97026531b3404a11869cb85a059c4a0d',ramdisk_id='',reservation_id='r-hkja6gj9',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='reader,member,admin',image_base_image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_min_disk='1',image_min_ram='0',image_owner_specified.openstack.md5='',image_owner_specified.openstack.object='images/cirros',image_owner_specified.openstack.sha256='',network_allocated='True',owner_project_name='admin',owner_user_name='admin'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:19:58Z,user_data='Q29udGVudC1UeXBlOiBtdWx0aXBhcnQvbWl4ZWQ7IGJvdW5kYXJ5PSI9PT09PT09PT09PT09PT0zMTQ2MTUzMDI2MzE5OTEyMDEyPT0iCk1JTUUtVmVyc2lvbjogMS4wCgotLT09PT09PT09PT09PT09PTMxNDYxNTMwMjYzMTk5MTIwMTI9PQpDb250ZW50LVR5cGU6IHRleHQvY2xvdWQtY29uZmlnOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0iY2xvdWQtY29uZmlnIgoKCgojIENhcHR1cmUgYWxsIHN1YnByb2Nlc3Mgb3V0cHV0IGludG8gYSBsb2dmaWxlCiMgVXNlZnVsIGZvciB0cm91Ymxlc2hvb3RpbmcgY2xvdWQtaW5pdCBpc3N1ZXMKb3V0cHV0OiB7YWxsOiAnfCB0ZWUgLWEgL3Zhci9sb2cvY2xvdWQtaW5pdC1vdXRwdXQubG9nJ30KCi0tPT09PT09PT09PT09PT09MzE0NjE1MzAyNjMxOTkxMjAxMj09CkNvbnRlbnQtVHlwZTogdGV4dC9jbG91ZC1ib290aG9vazsgY2hhcnNldD0idXMtYXNjaWkiCk1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IDdiaXQKQ29udGVudC1EaXNwb3NpdGlvbjogYXR0YWNobWVudDsgZmlsZW5hbWU9ImJvb3Rob29rLnNoIgoKIyEvdXNyL2Jpbi9iYXNoCgojIEZJWE1FKHNoYWRvd2VyKSB0aGlzIGlzIGEgd29ya2Fyb3VuZCBmb3IgY2xvdWQtaW5pdCAwLjYuMyBwcmVzZW50IGluIFVidW50dQojIDEyLjA0IExUUzoKIyBodHRwczovL2J1Z3MubGF1bmNocGFkLm5ldC9oZWF0LytidWcvMTI1NzQxMAojCiMgVGhlIG9sZCBjbG91ZC1pbml0IGRvZXNuJ3QgY3JlYXRlIHRoZSB1c2VycyBkaXJlY3RseSBzbyB0aGUgY29tbWFuZHMgdG8gZG8KIyB0aGlzIGFyZSBpbmplY3RlZCB0aG91Z2ggbm92YV91dGlscy5weS4KIwojIE9uY2Ugd2UgZHJvcCBzdXBwb3J0IGZvciAwLjYuMywgd2UgY2FuIHNhZmVseSByZW1vdmUgdGhpcy4KCgojIGluIGNhc2UgaGVhdC1jZm50b29scyBoYXMgYmVlbiBpbnN0YWxsZWQgZnJvbSBwYWNrYWdlIGJ1dCBubyBzeW1saW5rcwojIGFyZSB5ZXQgaW4gL29wdC9hd3MvYmluLwpjZm4tY3JlYXRlLWF3cy1zeW1saW5rcwoKIyBEbyBub3QgcmVtb3ZlIC0gdGhlIGNsb3VkIGJvb3Rob29rIHNob3VsZCBhbHdheXMgcmV0dXJuIHN1Y2Nlc3MKZXhpdCAwCgotLT09PT09PT09PT09PT09PTMxNDYxNTMwMjYzMTk5MTIwMTI9PQpDb250ZW50LVR5cGU6IHRleHQvcGFydC1oYW5kbGVyOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0icGFydC1oYW5kbGVyLnB5IgoKIyBwYXJ0LWhhbmRsZXIKIwojICAgIExpY2Vuc2VkIHVuZGVyIHRoZSBBcGFjaGUgTGljZW5zZSwgVmVyc2lvbiAyLjAgKHRoZSAiTGljZW5zZSIpOyB5b3UgbWF5CiMgICAgbm90IHVzZSB0aGlzIGZpbGUgZXhjZXB0IGluIGNvbXBsaWFuY2Ugd2l0aCB0aGUgTGljZW5zZS4gWW91IG1heSBvYnRhaW4KIyAgICBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgICAgICAgaHR0cDovL3d3dy5hcGFjaGUub3JnL2xpY2Vuc2VzL0xJQ0VOU0UtMi4wCiMKIyAgICBVbmxlc3MgcmVxdWlyZWQgYnkgYXBwbGljYWJsZSBsYXcgb3IgYWdyZWVkIHRvIGluIHdyaXRpbmcsIHNvZnR3YXJlCiMgICAgZGlzdHJpYnV0ZWQgdW5kZXIgdGhlIExpY2Vuc2UgaXMgZGlzdHJpYnV0ZWQgb24gYW4gIkFTIElTIiBCQVNJUywgV0lUSE9VVAojICAgIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4gU2VlIHRoZQojICAgIExpY2Vuc2UgZm9yIHRoZSBzcGVjaWZpYyBsYW5ndWFnZSBnb3Zlcm5pbmcgcGVybWlzc2lvbnMgYW5kIGxpbWl0YXRpb25zCiMgICAgdW5kZXIgdGhlIExpY2Vuc2UuCgppbXBvcnQgZGF0ZXRpbWUKaW1wb3J0IGVycm5vCmltcG9ydCBvcwppbXBvcnQgc3lzCgoKZGVmIGxpc3RfdHlwZXMoKToKICAgIHJldHVybiBbInRleHQveC1jZm5pbml0ZGF0YSJdCgoKZGVmIGhhbmRsZV9wYXJ0KGRhdGEsIGN0eXBlLCBmaWxlbmFtZSwgcGF5bG9hZCk6CiAgICBpZiBjdHlwZSA9PSAiX19iZWdpbl9fIjoKICAgICAgICB0cnk6CiAgICAgICAgICAgIG9zLm1ha2VkaXJzKCcvdmFyL2xpYi9oZWF0LWNmbnRvb2xzJywgaW50KCI3MDAiLCA4KSkKICAgICAgICBleGNlcHQgT1NFcnJvcjoKICAgICAgICAgICAgZXhfdHlwZSwgZSwgdGIgPSBzeXMuZXhjX2luZm8oKQogICAgICAgICAgICBpZiBlLmVycm5vICE9IGVycm5vLkVFWElTVDoKICAgICAgICAgICAgICAgIHJhaXNlCiAgICAgICAgcmV0dXJuCgogICAgaWYgY3R5cGUgPT0gIl9fZW5kX18iOgogICAgICAgIHJldHVybgoKICAgIHRpbWVzdGFtcCA9IGRhdGV0aW1lLmRhdGV0aW1lLm5vdygpCiAgICB3aXRoIG9wZW4oJy92YXIvbG9nL3BhcnQtaGFuZGxlci5sb2cnLCAnYScpIGFzIGxvZzoKICAgICAgICBsb2cud3JpdGUoJyVzIGZpbGVuYW1lOiVzLCBjdHlwZTolc1xuJyAlICh0aW1lc3RhbXAsIGZpbGVuYW1lLCBjdHlwZSkpCgogICAgaWYgY3R5cGUgPT0gJ3RleHQveC1jZm5pbml0ZGF0YSc6CiAgICAgICAgd2l0aCBvcGVuKCcvdmFyL2xpYi9oZWF0LWNmbnRvb2xzLyVzJyAlIGZpbGVuYW1lLCAndycpIGFzIGY6CiAgICAgICAgICAgIGYud3JpdGUocGF5bG9hZCkKCiAgICAgICAgIyBUT0RPKHNkYWtlKSBob3BlZnVsbHkgdGVtcG9yYXJ5IHVudGlsIHVzZXJzIG1vdmUgdG8gaGVhdC1jZm50b29scy0xLjMKICAgICAgICB3aXRoIG9wZW4oJy92YXIvbGliL2Nsb3VkL2RhdGEvJXMnICUgZmlsZW5hbWUsICd3JykgYXMgZjoKICAgICAgICAgICAgZi53cml0ZShwYXlsb2FkKQoKLS09PT09PT09PT09PT09PT0zMTQ2MTUzMDI2MzE5OTEyMDEyPT0KQ29udGVudC1UeXBlOiB0ZXh0L3gtY2ZuaW5pdGRhdGE7IGNoYXJzZXQ9InVzLWFzY2lpIgpNSU1FLVZlcnNpb246IDEuMApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkNvbnRlbnQtRGlzcG9zaXRpb246IGF0dGFjaG1lbnQ7IGZpbGVuYW1lPSJjZm4tdXNlcmRhdGEiCgoKLS09PT09PT09PT09PT09PT0zMTQ2MTUzMDI2MzE5OTEyMDEyPT0KQ29udGVudC1UeXBlOiB0ZXh0L3gtc2hlbGxzY3JpcHQ7IGNoYXJzZXQ9InVzLWFzY2lpIgpNSU1FLVZlcnNpb246IDEuMApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkNvbnRlbnQtRGlzcG9zaXRpb246IGF0dGFjaG1lbnQ7IGZpbGVuYW1lPSJsb2d1c2VyZGF0YS5weSIKCiMhL3Vzci9iaW4vZW52IHB5dGhvbjMKIwojICAgIExpY2Vuc2VkIHVuZGVyIHRoZSBBcGFjaGUgTGljZW5zZSwgVmVyc2lvbiAyLjAgKHRoZSAiTGljZW5zZSIpOyB5b3UgbWF5CiMgICAgbm90IHVzZSB0aGlzIGZpbGUgZXhjZXB0IGluIGNvbXBsaWFuY2Ugd2l0aCB0aGUgTGljZW5zZS4gWW91IG1heSBvYnRhaW4KIyAgICBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgICAgICAgaHR0cDovL3d3dy5hcGFjaGUub3JnL2xpY2Vuc2VzL0xJQ0VOU0UtMi4wCiMKIyAgICBVbmxlc3MgcmVxdWlyZWQgYnkgYXBwbGljYWJsZSBsYXcgb3IgYWdyZWVkIHRvIGluIHdyaXRpbmcsIHNvZnR3YXJlCiMgICAgZGlzdHJpYnV0ZWQgdW5kZXIgdGhlIExpY2Vuc2UgaXMgZGlzdHJpYnV0ZWQgb24gYW4gIkFTIElTIiBCQVNJUywgV0lUSE9VVAojICAgIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4gU2VlIHRoZQojICAgIExpY2Vuc2UgZm9yIHRoZSBzcGVjaWZpYyBsYW5ndWFnZSBnb3Zlcm5pbmcgcGVybWlzc2lvbnMgYW5kIGxpbWl0YXRpb25zCiMgICAgdW5kZXIgdGhlIExpY2Vuc2UuCgppbXBvcnQgZGF0ZXRpbWUKaW1wb3J0IGVycm5vCmltcG9ydCBsb2dnaW5nCmltcG9ydCBvcwppbXBvcnQgc3VicHJvY2VzcwppbXBvcnQgc3lzCgoKVkFSX1BBVEggPSAnL3Zhci9saWIvaGVhdC1jZm50b29scycKTE9HID0gbG9nZ2luZy5nZXRMb2dnZXIoJ2hlYXQtcHJvdmlzaW9uJykKCgpkZWYgaW5pdF9sb2dnaW5nKCk6CiAgICBMT0cuc2V0TGV2ZWwobG9nZ2luZy5JTkZPKQogICAgTE9HLmFkZEhhbmRsZXIobG9nZ2luZy5TdHJlYW1IYW5kbGVyKCkpCiAgICBmaCA9IGxvZ2dpbmcuRmlsZUhhbmRsZXIoIi92YXIvbG9nL2hlYXQtcHJvdmlzaW9uLmxvZyIpCiAgICBvcy5jaG1vZChmaC5iYXNlRmlsZW5hbWUsIGludCgiNjAwIiwgOCkpCiAgICBMT0cuYWRkSGFuZGxlcihmaCkKCgpkZWYgY2FsbChhcmdzKToKCiAgICBjbGFzcyBMb2dTdHJlYW0ob2JqZWN0KToKCiAgICAgICAgZGVmIHdyaXRlKHNlbGYsIGRhdGEpOgogICAgICAgICAgICBMT0cuaW5mbyhkYXRhKQoKICAgIExPRy5pbmZvKCclc1xuJywgJyAnLmpvaW4oYXJncykpICAjIG5vcWEKICAgIHRyeToKICAgICAgICBscyA9IExvZ1N0cmVhbSgpCiAgICAgICAgcCA9IHN1YnByb2Nlc3MuUG9wZW4oYXJncywgc3
Oct 11 02:20:03 compute-0 nova_compute[356901]: Rkb3V0PXN1YnByb2Nlc3MuUElQRSwKICAgICAgICAgICAgICAgICAgICAgICAgICAgICBzdGRlcnI9c3VicHJvY2Vzcy5QSVBFKQogICAgICAgIGRhdGEgPSBwLmNvbW11bmljYXRlKCkKICAgICAgICBpZiBkYXRhOgogICAgICAgICAgICBmb3IgeCBpbiBkYXRhOgogICAgICAgICAgICAgICAgbHMud3JpdGUoeCkKICAgIGV4Y2VwdCBPU0Vycm9yOgogICAgICAgIGV4X3R5cGUsIGV4LCB0YiA9IHN5cy5leGNfaW5mbygpCiAgICAgICAgaWYgZXguZXJybm8gPT0gZXJybm8uRU5PRVhFQzoKICAgICAgICAgICAgTE9HLmVycm9yKCdVc2VyZGF0YSBlbXB0eSBvciBub3QgZXhlY3V0YWJsZTogJXMnLCBleCkKICAgICAgICAgICAgcmV0dXJuIG9zLkVYX09LCiAgICAgICAgZWxzZToKICAgICAgICAgICAgTE9HLmVycm9yKCdPUyBlcnJvciBydW5uaW5nIHVzZXJkYXRhOiAlcycsIGV4KQogICAgICAgICAgICByZXR1cm4gb3MuRVhfT1NFUlIKICAgIGV4Y2VwdCBFeGNlcHRpb246CiAgICAgICAgZXhfdHlwZSwgZXgsIHRiID0gc3lzLmV4Y19pbmZvKCkKICAgICAgICBMT0cuZXJyb3IoJ1Vua25vd24gZXJyb3IgcnVubmluZyB1c2VyZGF0YTogJXMnLCBleCkKICAgICAgICByZXR1cm4gb3MuRVhfU09GVFdBUkUKICAgIHJldHVybiBwLnJldHVybmNvZGUKCgpkZWYgbWFpbigpOgogICAgdXNlcmRhdGFfcGF0aCA9IG9zLnBhdGguam9pbihWQVJfUEFUSCwgJ2Nmbi11c2VyZGF0YScpCiAgICBvcy5jaG1vZCh1c2VyZGF0YV9wYXRoLCBpbnQoIjcwMCIsIDgpKQoKICAgIExPRy5pbmZvKCdQcm92aXNpb24gYmVnYW46ICVzJywgZGF0ZXRpbWUuZGF0ZXRpbWUubm93KCkpCiAgICByZXR1cm5jb2RlID0gY2FsbChbdXNlcmRhdGFfcGF0aF0pCiAgICBMT0cuaW5mbygnUHJvdmlzaW9uIGRvbmU6ICVzJywgZGF0ZXRpbWUuZGF0ZXRpbWUubm93KCkpCiAgICBpZiByZXR1cm5jb2RlOgogICAgICAgIHJldHVybiByZXR1cm5jb2RlCgoKaWYgX19uYW1lX18gPT0gJ19fbWFpbl9fJzoKICAgIGluaXRfbG9nZ2luZygpCgogICAgY29kZSA9IG1haW4oKQogICAgaWYgY29kZToKICAgICAgICBMT0cuZXJyb3IoJ1Byb3Zpc2lvbiBmYWlsZWQgd2l0aCBleGl0IGNvZGUgJXMnLCBjb2RlKQogICAgICAgIHN5cy5leGl0KGNvZGUpCgogICAgcHJvdmlzaW9uX2xvZyA9IG9zLnBhdGguam9pbihWQVJfUEFUSCwgJ3Byb3Zpc2lvbi1maW5pc2hlZCcpCiAgICAjIHRvdWNoIHRoZSBmaWxlIHNvIGl0IGlzIHRpbWVzdGFtcGVkIHdpdGggd2hlbiBmaW5pc2hlZAogICAgd2l0aCBvcGVuKHByb3Zpc2lvbl9sb2csICdhJyk6CiAgICAgICAgb3MudXRpbWUocHJvdmlzaW9uX2xvZywgTm9uZSkKCi0tPT09PT09PT09PT09PT09MzE0NjE1MzAyNjMxOTkxMjAxMj09CkNvbnRlbnQtVHlwZTogdGV4dC94LWNmbmluaXRkYXRhOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0iY2ZuLW1ldGFkYXRhLXNlcnZlciIKCmh0dHBzOi8vaGVhdC1jZm5hcGktaW50ZXJuYWwub3BlbnN0YWNrLnN2Yzo4MDAwL3YxLwotLT09PT09PT09PT09PT09PTMxNDYxNTMwMjYzMTk5MTIwMTI9PQpDb250ZW50LVR5cGU6IHRleHQveC1jZm5pbml0ZGF0YTsgY2hhcnNldD0idXMtYXNjaWkiCk1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IDdiaXQKQ29udGVudC1EaXNwb3NpdGlvbjogYXR0YWNobWVudDsgZmlsZW5hbWU9ImNmbi1ib3RvLWNmZyIKCltCb3RvXQpkZWJ1ZyA9IDAKaXNfc2VjdXJlID0gMApodHRwc192YWxpZGF0ZV9jZXJ0aWZpY2F0ZXMgPSAxCmNmbl9yZWdpb25fbmFtZSA9IGhlYXQKY2ZuX3JlZ2lvbl9lbmRwb2ludCA9IGhlYXQtY2ZuYXBpLWludGVybmFsLm9wZW5zdGFjay5zdmMKLS09PT09PT09PT09PT09PT0zMTQ2MTUzMDI2MzE5OTEyMDEyPT0tLQo=',user_id='d215f3ebbc07435493ccd666fc80109d',uuid=d60d7ea1-5d00-4902-90e6-3ae67eb09a78,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "address": "fa:16:3e:c2:ee:14", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.80", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa7108c4c-c9", "ovs_interfaceid": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}} virt_type=kvm get_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:563
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.556 2 DEBUG nova.network.os_vif_util [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converting VIF {"id": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "address": "fa:16:3e:c2:ee:14", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.80", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa7108c4c-c9", "ovs_interfaceid": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:20:03 compute-0 rsyslogd[187706]: message too long (8192) with configured size 8096, begin of message is: 2025-10-11 02:20:03.554 2 DEBUG nova.virt.libvirt.vif [None req-bcab2cd3-6c42-48 [v8.2506.0-2.el9 try https://www.rsyslog.com/e/2445 ]
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.558 2 DEBUG nova.network.os_vif_util [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:c2:ee:14,bridge_name='br-int',has_traffic_filtering=True,id=a7108c4c-c96c-4354-a4bf-99b1d2160514,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tapa7108c4c-c9') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.561 2 DEBUG nova.objects.instance [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lazy-loading 'pci_devices' on Instance uuid d60d7ea1-5d00-4902-90e6-3ae67eb09a78 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.578 2 DEBUG nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] End _get_guest_xml xml=<domain type="kvm">
Oct 11 02:20:03 compute-0 nova_compute[356901]:   <uuid>d60d7ea1-5d00-4902-90e6-3ae67eb09a78</uuid>
Oct 11 02:20:03 compute-0 nova_compute[356901]:   <name>instance-00000002</name>
Oct 11 02:20:03 compute-0 nova_compute[356901]:   <memory>524288</memory>
Oct 11 02:20:03 compute-0 nova_compute[356901]:   <vcpu>1</vcpu>
Oct 11 02:20:03 compute-0 nova_compute[356901]:   <metadata>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.1">
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <nova:package version="27.5.2-0.20250829104910.6f8decf.el9"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <nova:name>vn-vgckve2-ittzoa6m3dmq-egfg3ceao3k4-vnf-rvnztbwt2zgh</nova:name>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <nova:creationTime>2025-10-11 02:20:01</nova:creationTime>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <nova:flavor name="m1.small">
Oct 11 02:20:03 compute-0 nova_compute[356901]:         <nova:memory>512</nova:memory>
Oct 11 02:20:03 compute-0 nova_compute[356901]:         <nova:disk>1</nova:disk>
Oct 11 02:20:03 compute-0 nova_compute[356901]:         <nova:swap>0</nova:swap>
Oct 11 02:20:03 compute-0 nova_compute[356901]:         <nova:ephemeral>1</nova:ephemeral>
Oct 11 02:20:03 compute-0 nova_compute[356901]:         <nova:vcpus>1</nova:vcpus>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       </nova:flavor>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <nova:owner>
Oct 11 02:20:03 compute-0 nova_compute[356901]:         <nova:user uuid="d215f3ebbc07435493ccd666fc80109d">admin</nova:user>
Oct 11 02:20:03 compute-0 nova_compute[356901]:         <nova:project uuid="97026531b3404a11869cb85a059c4a0d">admin</nova:project>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       </nova:owner>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <nova:root type="image" uuid="a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <nova:ports>
Oct 11 02:20:03 compute-0 nova_compute[356901]:         <nova:port uuid="a7108c4c-c96c-4354-a4bf-99b1d2160514">
Oct 11 02:20:03 compute-0 nova_compute[356901]:           <nova:ip type="fixed" address="192.168.0.80" ipVersion="4"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:         </nova:port>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       </nova:ports>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     </nova:instance>
Oct 11 02:20:03 compute-0 nova_compute[356901]:   </metadata>
Oct 11 02:20:03 compute-0 nova_compute[356901]:   <sysinfo type="smbios">
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <system>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <entry name="manufacturer">RDO</entry>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <entry name="product">OpenStack Compute</entry>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <entry name="version">27.5.2-0.20250829104910.6f8decf.el9</entry>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <entry name="serial">d60d7ea1-5d00-4902-90e6-3ae67eb09a78</entry>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <entry name="uuid">d60d7ea1-5d00-4902-90e6-3ae67eb09a78</entry>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <entry name="family">Virtual Machine</entry>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     </system>
Oct 11 02:20:03 compute-0 nova_compute[356901]:   </sysinfo>
Oct 11 02:20:03 compute-0 nova_compute[356901]:   <os>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <type arch="x86_64" machine="q35">hvm</type>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <boot dev="hd"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <smbios mode="sysinfo"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:   </os>
Oct 11 02:20:03 compute-0 nova_compute[356901]:   <features>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <acpi/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <apic/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <vmcoreinfo/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:   </features>
Oct 11 02:20:03 compute-0 nova_compute[356901]:   <clock offset="utc">
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <timer name="pit" tickpolicy="delay"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <timer name="rtc" tickpolicy="catchup"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <timer name="hpet" present="no"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:   </clock>
Oct 11 02:20:03 compute-0 nova_compute[356901]:   <cpu mode="host-model" match="exact">
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <topology sockets="1" cores="1" threads="1"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:20:03 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk">
Oct 11 02:20:03 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:20:03 compute-0 sshd-session[424559]: Connection closed by invalid user ubuntu 121.227.153.123 port 57104 [preauth]
Oct 11 02:20:03 compute-0 nova_compute[356901]:       </source>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:20:03 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <target dev="vda" bus="virtio"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk.eph0">
Oct 11 02:20:03 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       </source>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:20:03 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <target dev="vdb" bus="virtio"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <disk type="network" device="cdrom">
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk.config">
Oct 11 02:20:03 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       </source>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:20:03 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <target dev="sda" bus="sata"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <interface type="ethernet">
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <mac address="fa:16:3e:c2:ee:14"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <driver name="vhost" rx_queue_size="512"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <mtu size="1442"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <target dev="tapa7108c4c-c9"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     </interface>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <serial type="pty">
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <log file="/var/lib/nova/instances/d60d7ea1-5d00-4902-90e6-3ae67eb09a78/console.log" append="off"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     </serial>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <graphics type="vnc" autoport="yes" listen="::0"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <video>
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     </video>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <input type="tablet" bus="usb"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <rng model="virtio">
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <backend model="random">/dev/urandom</backend>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <controller type="usb" index="0"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     <memballoon model="virtio">
Oct 11 02:20:03 compute-0 nova_compute[356901]:       <stats period="10"/>
Oct 11 02:20:03 compute-0 nova_compute[356901]:     </memballoon>
Oct 11 02:20:03 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:20:03 compute-0 nova_compute[356901]: </domain>
Oct 11 02:20:03 compute-0 nova_compute[356901]:  _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7555
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.581 2 DEBUG nova.compute.manager [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Preparing to wait for external event network-vif-plugged-a7108c4c-c96c-4354-a4bf-99b1d2160514 prepare_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:283
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.581 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78-events" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.582 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78-events" acquired by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.582 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78-events" "released" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.584 2 DEBUG nova.virt.libvirt.vif [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:19:56Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description=None,display_name='vn-vgckve2-ittzoa6m3dmq-egfg3ceao3k4-vnf-rvnztbwt2zgh',ec2_ids=EC2Ids,ephemeral_gb=1,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(1),hidden=False,host='compute-0.ctlplane.example.com',hostname='vn-vgckve2-ittzoa6m3dmq-egfg3ceao3k4-vnf-rvnztbwt2zgh',id=2,image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',info_cache=InstanceInfoCache,instance_type_id=1,kernel_id='',key_data=None,key_name=None,keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=512,metadata={metering.server_group='3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=PciDeviceList,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='97026531b3404a11869cb85a059c4a0d',ramdisk_id='',reservation_id='r-hkja6gj9',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='reader,member,admin',image_base_image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_min_disk='1',image_min_ram='0',image_owner_specified.openstack.md5='',image_owner_specified.openstack.object='images/cirros',image_owner_specified.openstack.sha256='',network_allocated='True',owner_project_name='admin',owner_user_name='admin'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:19:58Z,user_data='Q29udGVudC1UeXBlOiBtdWx0aXBhcnQvbWl4ZWQ7IGJvdW5kYXJ5PSI9PT09PT09PT09PT09PT0zMTQ2MTUzMDI2MzE5OTEyMDEyPT0iCk1JTUUtVmVyc2lvbjogMS4wCgotLT09PT09PT09PT09PT09PTMxNDYxNTMwMjYzMTk5MTIwMTI9PQpDb250ZW50LVR5cGU6IHRleHQvY2xvdWQtY29uZmlnOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0iY2xvdWQtY29uZmlnIgoKCgojIENhcHR1cmUgYWxsIHN1YnByb2Nlc3Mgb3V0cHV0IGludG8gYSBsb2dmaWxlCiMgVXNlZnVsIGZvciB0cm91Ymxlc2hvb3RpbmcgY2xvdWQtaW5pdCBpc3N1ZXMKb3V0cHV0OiB7YWxsOiAnfCB0ZWUgLWEgL3Zhci9sb2cvY2xvdWQtaW5pdC1vdXRwdXQubG9nJ30KCi0tPT09PT09PT09PT09PT09MzE0NjE1MzAyNjMxOTkxMjAxMj09CkNvbnRlbnQtVHlwZTogdGV4dC9jbG91ZC1ib290aG9vazsgY2hhcnNldD0idXMtYXNjaWkiCk1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IDdiaXQKQ29udGVudC1EaXNwb3NpdGlvbjogYXR0YWNobWVudDsgZmlsZW5hbWU9ImJvb3Rob29rLnNoIgoKIyEvdXNyL2Jpbi9iYXNoCgojIEZJWE1FKHNoYWRvd2VyKSB0aGlzIGlzIGEgd29ya2Fyb3VuZCBmb3IgY2xvdWQtaW5pdCAwLjYuMyBwcmVzZW50IGluIFVidW50dQojIDEyLjA0IExUUzoKIyBodHRwczovL2J1Z3MubGF1bmNocGFkLm5ldC9oZWF0LytidWcvMTI1NzQxMAojCiMgVGhlIG9sZCBjbG91ZC1pbml0IGRvZXNuJ3QgY3JlYXRlIHRoZSB1c2VycyBkaXJlY3RseSBzbyB0aGUgY29tbWFuZHMgdG8gZG8KIyB0aGlzIGFyZSBpbmplY3RlZCB0aG91Z2ggbm92YV91dGlscy5weS4KIwojIE9uY2Ugd2UgZHJvcCBzdXBwb3J0IGZvciAwLjYuMywgd2UgY2FuIHNhZmVseSByZW1vdmUgdGhpcy4KCgojIGluIGNhc2UgaGVhdC1jZm50b29scyBoYXMgYmVlbiBpbnN0YWxsZWQgZnJvbSBwYWNrYWdlIGJ1dCBubyBzeW1saW5rcwojIGFyZSB5ZXQgaW4gL29wdC9hd3MvYmluLwpjZm4tY3JlYXRlLWF3cy1zeW1saW5rcwoKIyBEbyBub3QgcmVtb3ZlIC0gdGhlIGNsb3VkIGJvb3Rob29rIHNob3VsZCBhbHdheXMgcmV0dXJuIHN1Y2Nlc3MKZXhpdCAwCgotLT09PT09PT09PT09PT09PTMxNDYxNTMwMjYzMTk5MTIwMTI9PQpDb250ZW50LVR5cGU6IHRleHQvcGFydC1oYW5kbGVyOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0icGFydC1oYW5kbGVyLnB5IgoKIyBwYXJ0LWhhbmRsZXIKIwojICAgIExpY2Vuc2VkIHVuZGVyIHRoZSBBcGFjaGUgTGljZW5zZSwgVmVyc2lvbiAyLjAgKHRoZSAiTGljZW5zZSIpOyB5b3UgbWF5CiMgICAgbm90IHVzZSB0aGlzIGZpbGUgZXhjZXB0IGluIGNvbXBsaWFuY2Ugd2l0aCB0aGUgTGljZW5zZS4gWW91IG1heSBvYnRhaW4KIyAgICBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgICAgICAgaHR0cDovL3d3dy5hcGFjaGUub3JnL2xpY2Vuc2VzL0xJQ0VOU0UtMi4wCiMKIyAgICBVbmxlc3MgcmVxdWlyZWQgYnkgYXBwbGljYWJsZSBsYXcgb3IgYWdyZWVkIHRvIGluIHdyaXRpbmcsIHNvZnR3YXJlCiMgICAgZGlzdHJpYnV0ZWQgdW5kZXIgdGhlIExpY2Vuc2UgaXMgZGlzdHJpYnV0ZWQgb24gYW4gIkFTIElTIiBCQVNJUywgV0lUSE9VVAojICAgIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4gU2VlIHRoZQojICAgIExpY2Vuc2UgZm9yIHRoZSBzcGVjaWZpYyBsYW5ndWFnZSBnb3Zlcm5pbmcgcGVybWlzc2lvbnMgYW5kIGxpbWl0YXRpb25zCiMgICAgdW5kZXIgdGhlIExpY2Vuc2UuCgppbXBvcnQgZGF0ZXRpbWUKaW1wb3J0IGVycm5vCmltcG9ydCBvcwppbXBvcnQgc3lzCgoKZGVmIGxpc3RfdHlwZXMoKToKICAgIHJldHVybiBbInRleHQveC1jZm5pbml0ZGF0YSJdCgoKZGVmIGhhbmRsZV9wYXJ0KGRhdGEsIGN0eXBlLCBmaWxlbmFtZSwgcGF5bG9hZCk6CiAgICBpZiBjdHlwZSA9PSAiX19iZWdpbl9fIjoKICAgICAgICB0cnk6CiAgICAgICAgICAgIG9zLm1ha2VkaXJzKCcvdmFyL2xpYi9oZWF0LWNmbnRvb2xzJywgaW50KCI3MDAiLCA4KSkKICAgICAgICBleGNlcHQgT1NFcnJvcjoKICAgICAgICAgICAgZXhfdHlwZSwgZSwgdGIgPSBzeXMuZXhjX2luZm8oKQogICAgICAgICAgICBpZiBlLmVycm5vICE9IGVycm5vLkVFWElTVDoKICAgICAgICAgICAgICAgIHJhaXNlCiAgICAgICAgcmV0dXJuCgogICAgaWYgY3R5cGUgPT0gIl9fZW5kX18iOgogICAgICAgIHJldHVybgoKICAgIHRpbWVzdGFtcCA9IGRhdGV0aW1lLmRhdGV0aW1lLm5vdygpCiAgICB3aXRoIG9wZW4oJy92YXIvbG9nL3BhcnQtaGFuZGxlci5sb2cnLCAnYScpIGFzIGxvZzoKICAgICAgICBsb2cud3JpdGUoJyVzIGZpbGVuYW1lOiVzLCBjdHlwZTolc1xuJyAlICh0aW1lc3RhbXAsIGZpbGVuYW1lLCBjdHlwZSkpCgogICAgaWYgY3R5cGUgPT0gJ3RleHQveC1jZm5pbml0ZGF0YSc6CiAgICAgICAgd2l0aCBvcGVuKCcvdmFyL2xpYi9oZWF0LWNmbnRvb2xzLyVzJyAlIGZpbGVuYW1lLCAndycpIGFzIGY6CiAgICAgICAgICAgIGYud3JpdGUocGF5bG9hZCkKCiAgICAgICAgIyBUT0RPKHNkYWtlKSBob3BlZnVsbHkgdGVtcG9yYXJ5IHVudGlsIHVzZXJzIG1vdmUgdG8gaGVhdC1jZm50b29scy0xLjMKICAgICAgICB3aXRoIG9wZW4oJy92YXIvbGliL2Nsb3VkL2RhdGEvJXMnICUgZmlsZW5hbWUsICd3JykgYXMgZjoKICAgICAgICAgICAgZi53cml0ZShwYXlsb2FkKQoKLS09PT09PT09PT09PT09PT0zMTQ2MTUzMDI2MzE5OTEyMDEyPT0KQ29udGVudC1UeXBlOiB0ZXh0L3gtY2ZuaW5pdGRhdGE7IGNoYXJzZXQ9InVzLWFzY2lpIgpNSU1FLVZlcnNpb246IDEuMApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkNvbnRlbnQtRGlzcG9zaXRpb246IGF0dGFjaG1lbnQ7IGZpbGVuYW1lPSJjZm4tdXNlcmRhdGEiCgoKLS09PT09PT09PT09PT09PT0zMTQ2MTUzMDI2MzE5OTEyMDEyPT0KQ29udGVudC1UeXBlOiB0ZXh0L3gtc2hlbGxzY3JpcHQ7IGNoYXJzZXQ9InVzLWFzY2lpIgpNSU1FLVZlcnNpb246IDEuMApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkNvbnRlbnQtRGlzcG9zaXRpb246IGF0dGFjaG1lbnQ7IGZpbGVuYW1lPSJsb2d1c2VyZGF0YS5weSIKCiMhL3Vzci9iaW4vZW52IHB5dGhvbjMKIwojICAgIExpY2Vuc2VkIHVuZGVyIHRoZSBBcGFjaGUgTGljZW5zZSwgVmVyc2lvbiAyLjAgKHRoZSAiTGljZW5zZSIpOyB5b3UgbWF5CiMgICAgbm90IHVzZSB0aGlzIGZpbGUgZXhjZXB0IGluIGNvbXBsaWFuY2Ugd2l0aCB0aGUgTGljZW5zZS4gWW91IG1heSBvYnRhaW4KIyAgICBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgICAgICAgaHR0cDovL3d3dy5hcGFjaGUub3JnL2xpY2Vuc2VzL0xJQ0VOU0UtMi4wCiMKIyAgICBVbmxlc3MgcmVxdWlyZWQgYnkgYXBwbGljYWJsZSBsYXcgb3IgYWdyZWVkIHRvIGluIHdyaXRpbmcsIHNvZnR3YXJlCiMgICAgZGlzdHJpYnV0ZWQgdW5kZXIgdGhlIExpY2Vuc2UgaXMgZGlzdHJpYnV0ZWQgb24gYW4gIkFTIElTIiBCQVNJUywgV0lUSE9VVAojICAgIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4gU2VlIHRoZQojICAgIExpY2Vuc2UgZm9yIHRoZSBzcGVjaWZpYyBsYW5ndWFnZSBnb3Zlcm5pbmcgcGVybWlzc2lvbnMgYW5kIGxpbWl0YXRpb25zCiMgICAgdW5kZXIgdGhlIExpY2Vuc2UuCgppbXBvcnQgZGF0ZXRpbWUKaW1wb3J0IGVycm5vCmltcG9ydCBsb2dnaW5nCmltcG9ydCBvcwppbXBvcnQgc3VicHJvY2VzcwppbXBvcnQgc3lzCgoKVkFSX1BBVEggPSAnL3Zhci9saWIvaGVhdC1jZm50b29scycKTE9HID0gbG9nZ2luZy5nZXRMb2dnZXIoJ2hlYXQtcHJvdmlzaW9uJykKCgpkZWYgaW5pdF9sb2dnaW5nKCk6CiAgICBMT0cuc2V0TGV2ZWwobG9nZ2luZy5JTkZPKQogICAgTE9HLmFkZEhhbmRsZXIobG9nZ2luZy5TdHJlYW1IYW5kbGVyKCkpCiAgICBmaCA9IGxvZ2dpbmcuRmlsZUhhbmRsZXIoIi92YXIvbG9nL2hlYXQtcHJvdmlzaW9uLmxvZyIpCiAgICBvcy5jaG1vZChmaC5iYXNlRmlsZW5hbWUsIGludCgiNjAwIiwgOCkpCiAgICBMT0cuYWRkSGFuZGxlcihmaCkKCgpkZWYgY2FsbChhcmdzKToKCiAgICBjbGFzcyBMb2dTdHJlYW0ob2JqZWN0KToKCiAgICAgICAgZGVmIHdyaXRlKHNlbGYsIGRhdGEpOgogICAgICAgICAgICBMT0cuaW5mbyhkYXRhKQoKICAgIExPRy5pbmZvKCclc1xuJywgJyAnLmpvaW4oYXJncykpICAjIG5vcWEKICAgIHRyeToKICAgICAgICBscyA9IExvZ1N0cmVhbSgpCiAgICAgICAgcCA9IHN1YnByb2Nlc3MuUG9wZW4o
Oct 11 02:20:03 compute-0 nova_compute[356901]: YXJncywgc3Rkb3V0PXN1YnByb2Nlc3MuUElQRSwKICAgICAgICAgICAgICAgICAgICAgICAgICAgICBzdGRlcnI9c3VicHJvY2Vzcy5QSVBFKQogICAgICAgIGRhdGEgPSBwLmNvbW11bmljYXRlKCkKICAgICAgICBpZiBkYXRhOgogICAgICAgICAgICBmb3IgeCBpbiBkYXRhOgogICAgICAgICAgICAgICAgbHMud3JpdGUoeCkKICAgIGV4Y2VwdCBPU0Vycm9yOgogICAgICAgIGV4X3R5cGUsIGV4LCB0YiA9IHN5cy5leGNfaW5mbygpCiAgICAgICAgaWYgZXguZXJybm8gPT0gZXJybm8uRU5PRVhFQzoKICAgICAgICAgICAgTE9HLmVycm9yKCdVc2VyZGF0YSBlbXB0eSBvciBub3QgZXhlY3V0YWJsZTogJXMnLCBleCkKICAgICAgICAgICAgcmV0dXJuIG9zLkVYX09LCiAgICAgICAgZWxzZToKICAgICAgICAgICAgTE9HLmVycm9yKCdPUyBlcnJvciBydW5uaW5nIHVzZXJkYXRhOiAlcycsIGV4KQogICAgICAgICAgICByZXR1cm4gb3MuRVhfT1NFUlIKICAgIGV4Y2VwdCBFeGNlcHRpb246CiAgICAgICAgZXhfdHlwZSwgZXgsIHRiID0gc3lzLmV4Y19pbmZvKCkKICAgICAgICBMT0cuZXJyb3IoJ1Vua25vd24gZXJyb3IgcnVubmluZyB1c2VyZGF0YTogJXMnLCBleCkKICAgICAgICByZXR1cm4gb3MuRVhfU09GVFdBUkUKICAgIHJldHVybiBwLnJldHVybmNvZGUKCgpkZWYgbWFpbigpOgogICAgdXNlcmRhdGFfcGF0aCA9IG9zLnBhdGguam9pbihWQVJfUEFUSCwgJ2Nmbi11c2VyZGF0YScpCiAgICBvcy5jaG1vZCh1c2VyZGF0YV9wYXRoLCBpbnQoIjcwMCIsIDgpKQoKICAgIExPRy5pbmZvKCdQcm92aXNpb24gYmVnYW46ICVzJywgZGF0ZXRpbWUuZGF0ZXRpbWUubm93KCkpCiAgICByZXR1cm5jb2RlID0gY2FsbChbdXNlcmRhdGFfcGF0aF0pCiAgICBMT0cuaW5mbygnUHJvdmlzaW9uIGRvbmU6ICVzJywgZGF0ZXRpbWUuZGF0ZXRpbWUubm93KCkpCiAgICBpZiByZXR1cm5jb2RlOgogICAgICAgIHJldHVybiByZXR1cm5jb2RlCgoKaWYgX19uYW1lX18gPT0gJ19fbWFpbl9fJzoKICAgIGluaXRfbG9nZ2luZygpCgogICAgY29kZSA9IG1haW4oKQogICAgaWYgY29kZToKICAgICAgICBMT0cuZXJyb3IoJ1Byb3Zpc2lvbiBmYWlsZWQgd2l0aCBleGl0IGNvZGUgJXMnLCBjb2RlKQogICAgICAgIHN5cy5leGl0KGNvZGUpCgogICAgcHJvdmlzaW9uX2xvZyA9IG9zLnBhdGguam9pbihWQVJfUEFUSCwgJ3Byb3Zpc2lvbi1maW5pc2hlZCcpCiAgICAjIHRvdWNoIHRoZSBmaWxlIHNvIGl0IGlzIHRpbWVzdGFtcGVkIHdpdGggd2hlbiBmaW5pc2hlZAogICAgd2l0aCBvcGVuKHByb3Zpc2lvbl9sb2csICdhJyk6CiAgICAgICAgb3MudXRpbWUocHJvdmlzaW9uX2xvZywgTm9uZSkKCi0tPT09PT09PT09PT09PT09MzE0NjE1MzAyNjMxOTkxMjAxMj09CkNvbnRlbnQtVHlwZTogdGV4dC94LWNmbmluaXRkYXRhOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0iY2ZuLW1ldGFkYXRhLXNlcnZlciIKCmh0dHBzOi8vaGVhdC1jZm5hcGktaW50ZXJuYWwub3BlbnN0YWNrLnN2Yzo4MDAwL3YxLwotLT09PT09PT09PT09PT09PTMxNDYxNTMwMjYzMTk5MTIwMTI9PQpDb250ZW50LVR5cGU6IHRleHQveC1jZm5pbml0ZGF0YTsgY2hhcnNldD0idXMtYXNjaWkiCk1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IDdiaXQKQ29udGVudC1EaXNwb3NpdGlvbjogYXR0YWNobWVudDsgZmlsZW5hbWU9ImNmbi1ib3RvLWNmZyIKCltCb3RvXQpkZWJ1ZyA9IDAKaXNfc2VjdXJlID0gMApodHRwc192YWxpZGF0ZV9jZXJ0aWZpY2F0ZXMgPSAxCmNmbl9yZWdpb25fbmFtZSA9IGhlYXQKY2ZuX3JlZ2lvbl9lbmRwb2ludCA9IGhlYXQtY2ZuYXBpLWludGVybmFsLm9wZW5zdGFjay5zdmMKLS09PT09PT09PT09PT09PT0zMTQ2MTUzMDI2MzE5OTEyMDEyPT0tLQo=',user_id='d215f3ebbc07435493ccd666fc80109d',uuid=d60d7ea1-5d00-4902-90e6-3ae67eb09a78,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "address": "fa:16:3e:c2:ee:14", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.80", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa7108c4c-c9", "ovs_interfaceid": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}} plug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:710
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.584 2 DEBUG nova.network.os_vif_util [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converting VIF {"id": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "address": "fa:16:3e:c2:ee:14", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.80", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa7108c4c-c9", "ovs_interfaceid": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.585 2 DEBUG nova.network.os_vif_util [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:c2:ee:14,bridge_name='br-int',has_traffic_filtering=True,id=a7108c4c-c96c-4354-a4bf-99b1d2160514,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tapa7108c4c-c9') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.586 2 DEBUG os_vif [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Plugging vif VIFOpenVSwitch(active=False,address=fa:16:3e:c2:ee:14,bridge_name='br-int',has_traffic_filtering=True,id=a7108c4c-c96c-4354-a4bf-99b1d2160514,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tapa7108c4c-c9') plug /usr/lib/python3.9/site-packages/os_vif/__init__.py:76
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.587 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.588 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddBridgeCommand(_result=None, name=br-int, may_exist=True, datapath_type=system) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.588 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.595 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.596 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapa7108c4c-c9, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.597 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbSetCommand(_result=None, table=Interface, record=tapa7108c4c-c9, col_values=(('external_ids', {'iface-id': 'a7108c4c-c96c-4354-a4bf-99b1d2160514', 'iface-status': 'active', 'attached-mac': 'fa:16:3e:c2:ee:14', 'vm-uuid': 'd60d7ea1-5d00-4902-90e6-3ae67eb09a78'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.601 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:03 compute-0 NetworkManager[44908]: <info>  [1760149203.6044] manager: (tapa7108c4c-c9): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/31)
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.606 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.619 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.621 2 INFO os_vif [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Successfully plugged vif VIFOpenVSwitch(active=False,address=fa:16:3e:c2:ee:14,bridge_name='br-int',has_traffic_filtering=True,id=a7108c4c-c96c-4354-a4bf-99b1d2160514,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tapa7108c4c-c9')
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.696 2 DEBUG nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] No BDM found with device name vda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.697 2 DEBUG nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] No BDM found with device name vdb, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.698 2 DEBUG nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] No BDM found with device name sda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.698 2 DEBUG nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] No VIF found with MAC fa:16:3e:c2:ee:14, not building metadata _build_interface_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12092
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.699 2 INFO nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Using config drive
Oct 11 02:20:03 compute-0 nova_compute[356901]: 2025-10-11 02:20:03.762 2 DEBUG nova.storage.rbd_utils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:20:03 compute-0 rsyslogd[187706]: message too long (8192) with configured size 8096, begin of message is: 2025-10-11 02:20:03.584 2 DEBUG nova.virt.libvirt.vif [None req-bcab2cd3-6c42-48 [v8.2506.0-2.el9 try https://www.rsyslog.com/e/2445 ]
Oct 11 02:20:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1263: 321 pgs: 321 active+clean; 110 MiB data, 214 MiB used, 60 GiB / 60 GiB avail; 17 KiB/s rd, 1.4 MiB/s wr, 26 op/s
Oct 11 02:20:04 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3299734709' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:20:04 compute-0 nova_compute[356901]: 2025-10-11 02:20:04.228 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:04 compute-0 nova_compute[356901]: 2025-10-11 02:20:04.685 2 INFO nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Creating config drive at /var/lib/nova/instances/d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.config
Oct 11 02:20:04 compute-0 nova_compute[356901]: 2025-10-11 02:20:04.697 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): /usr/bin/mkisofs -o /var/lib/nova/instances/d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmpjv3lavse execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:20:04 compute-0 sshd-session[424891]: Invalid user ubuntu from 121.227.153.123 port 57116
Oct 11 02:20:04 compute-0 nova_compute[356901]: 2025-10-11 02:20:04.855 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "/usr/bin/mkisofs -o /var/lib/nova/instances/d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmpjv3lavse" returned: 0 in 0.159s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:20:04 compute-0 nova_compute[356901]: 2025-10-11 02:20:04.907 2 DEBUG nova.storage.rbd_utils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:20:04 compute-0 nova_compute[356901]: 2025-10-11 02:20:04.921 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.config d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:20:05 compute-0 sshd-session[424481]: Failed password for root from 193.46.255.33 port 10308 ssh2
Oct 11 02:20:05 compute-0 sshd-session[424891]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:20:05 compute-0 sshd-session[424891]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:20:05 compute-0 ceph-mon[191930]: pgmap v1263: 321 pgs: 321 active+clean; 110 MiB data, 214 MiB used, 60 GiB / 60 GiB avail; 17 KiB/s rd, 1.4 MiB/s wr, 26 op/s
Oct 11 02:20:05 compute-0 nova_compute[356901]: 2025-10-11 02:20:05.242 2 DEBUG oslo_concurrency.processutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.config d60d7ea1-5d00-4902-90e6-3ae67eb09a78_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.322s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:20:05 compute-0 nova_compute[356901]: 2025-10-11 02:20:05.244 2 INFO nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Deleting local config drive /var/lib/nova/instances/d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.config because it was imported into RBD.
Oct 11 02:20:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:20:05 compute-0 kernel: tapa7108c4c-c9: entered promiscuous mode
Oct 11 02:20:05 compute-0 NetworkManager[44908]: <info>  [1760149205.3470] manager: (tapa7108c4c-c9): new Tun device (/org/freedesktop/NetworkManager/Devices/32)
Oct 11 02:20:05 compute-0 nova_compute[356901]: 2025-10-11 02:20:05.349 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:05 compute-0 ovn_controller[88370]: 2025-10-11T02:20:05Z|00035|binding|INFO|Claiming lport a7108c4c-c96c-4354-a4bf-99b1d2160514 for this chassis.
Oct 11 02:20:05 compute-0 ovn_controller[88370]: 2025-10-11T02:20:05Z|00036|binding|INFO|a7108c4c-c96c-4354-a4bf-99b1d2160514: Claiming fa:16:3e:c2:ee:14 192.168.0.80
Oct 11 02:20:05 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:20:05.362 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:c2:ee:14 192.168.0.80'], port_security=['fa:16:3e:c2:ee:14 192.168.0.80'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'name': 'vnf-scaleup_group-wzkjkvgckve2-ittzoa6m3dmq-egfg3ceao3k4-port-r4lbf7nhvsnm', 'neutron:cidrs': '192.168.0.80/24', 'neutron:device_id': 'd60d7ea1-5d00-4902-90e6-3ae67eb09a78', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'neutron:port_capabilities': '', 'neutron:port_name': 'vnf-scaleup_group-wzkjkvgckve2-ittzoa6m3dmq-egfg3ceao3k4-port-r4lbf7nhvsnm', 'neutron:project_id': '97026531b3404a11869cb85a059c4a0d', 'neutron:revision_number': '2', 'neutron:security_group_ids': 'c0c90d87-d29f-4e96-98a1-ffb301424ea4', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:port_fip': '192.168.122.245'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=d19b0dd1-1656-436b-911a-8f2dcc98f6bf, chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], tunnel_key=4, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=a7108c4c-c96c-4354-a4bf-99b1d2160514) old=Port_Binding(chassis=[]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:20:05 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:20:05.365 286362 INFO neutron.agent.ovn.metadata.agent [-] Port a7108c4c-c96c-4354-a4bf-99b1d2160514 in datapath d4dded16-3268-4cf9-bb6b-aa5200d5e4ec bound to our chassis
Oct 11 02:20:05 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:20:05.367 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network d4dded16-3268-4cf9-bb6b-aa5200d5e4ec
Oct 11 02:20:05 compute-0 ovn_controller[88370]: 2025-10-11T02:20:05Z|00037|binding|INFO|Setting lport a7108c4c-c96c-4354-a4bf-99b1d2160514 ovn-installed in OVS
Oct 11 02:20:05 compute-0 ovn_controller[88370]: 2025-10-11T02:20:05Z|00038|binding|INFO|Setting lport a7108c4c-c96c-4354-a4bf-99b1d2160514 up in Southbound
Oct 11 02:20:05 compute-0 nova_compute[356901]: 2025-10-11 02:20:05.384 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:05 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:20:05.404 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[de93a616-dca7-405e-8815-0f93ffb02b69]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:20:05 compute-0 systemd-udevd[424948]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:20:05 compute-0 systemd-machined[137586]: New machine qemu-2-instance-00000002.
Oct 11 02:20:05 compute-0 systemd[1]: Started Virtual Machine qemu-2-instance-00000002.
Oct 11 02:20:05 compute-0 NetworkManager[44908]: <info>  [1760149205.4433] device (tapa7108c4c-c9): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 02:20:05 compute-0 NetworkManager[44908]: <info>  [1760149205.4458] device (tapa7108c4c-c9): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Oct 11 02:20:05 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:20:05.449 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[d4886aa2-e4af-4fc8-890c-0d0e0d1fc9d2]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:20:05 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:20:05.454 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[c97de70d-000d-4073-aa28-4782c0442abc]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:20:05 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:20:05.499 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[8ac3a05b-cf5d-453c-8e77-6c63e0e9f2da]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:20:05 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:20:05.528 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[9844c52f-0f44-4fcc-a289-c3f2828477e5]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapd4dded16-31'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:11:50:48'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 8, 'tx_packets': 5, 'rx_bytes': 832, 'tx_bytes': 354, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 8, 'tx_packets': 5, 'rx_bytes': 832, 'tx_bytes': 354, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 15], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 548909, 'reachable_time': 26127, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 8, 'inoctets': 720, 'indelivers': 1, 'outforwdatagrams': 0, 'outpkts': 3, 'outoctets': 228, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 8, 'outmcastpkts': 3, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 720, 'outmcastoctets': 228, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 8, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 1, 'inerrors': 0, 'outmsgs': 3, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 424958, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:20:05 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:20:05.557 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[7c05df1a-9a0d-450c-8ab0-da7616725576]: (4, ({'family': 2, 'prefixlen': 32, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '169.254.169.254'], ['IFA_LOCAL', '169.254.169.254'], ['IFA_BROADCAST', '169.254.169.254'], ['IFA_LABEL', 'tapd4dded16-31'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 548926, 'tstamp': 548926}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 424960, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'}, {'family': 2, 'prefixlen': 24, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '192.168.0.2'], ['IFA_LOCAL', '192.168.0.2'], ['IFA_BROADCAST', '192.168.0.255'], ['IFA_LABEL', 'tapd4dded16-31'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 548931, 'tstamp': 548931}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 424960, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'})) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:20:05 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:20:05.560 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapd4dded16-30, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:20:05 compute-0 nova_compute[356901]: 2025-10-11 02:20:05.564 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:05 compute-0 nova_compute[356901]: 2025-10-11 02:20:05.566 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:05 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:20:05.566 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapd4dded16-30, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:20:05 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:20:05.567 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:20:05 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:20:05.568 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tapd4dded16-30, col_values=(('external_ids', {'iface-id': 'f0f8488b-423f-46a5-8a6a-984c2ae3438e'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:20:05 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:20:05.568 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:20:05 compute-0 nova_compute[356901]: 2025-10-11 02:20:05.762 2 DEBUG nova.compute.manager [req-a4fe96ca-8fcf-416c-8c37-6c8d000eb478 req-ec870dbb-feb4-4cd3-9df3-1ecbcf580b45 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Received event network-vif-plugged-a7108c4c-c96c-4354-a4bf-99b1d2160514 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:20:05 compute-0 nova_compute[356901]: 2025-10-11 02:20:05.763 2 DEBUG oslo_concurrency.lockutils [req-a4fe96ca-8fcf-416c-8c37-6c8d000eb478 req-ec870dbb-feb4-4cd3-9df3-1ecbcf580b45 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:20:05 compute-0 nova_compute[356901]: 2025-10-11 02:20:05.764 2 DEBUG oslo_concurrency.lockutils [req-a4fe96ca-8fcf-416c-8c37-6c8d000eb478 req-ec870dbb-feb4-4cd3-9df3-1ecbcf580b45 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:20:05 compute-0 nova_compute[356901]: 2025-10-11 02:20:05.764 2 DEBUG oslo_concurrency.lockutils [req-a4fe96ca-8fcf-416c-8c37-6c8d000eb478 req-ec870dbb-feb4-4cd3-9df3-1ecbcf580b45 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:20:05 compute-0 nova_compute[356901]: 2025-10-11 02:20:05.764 2 DEBUG nova.compute.manager [req-a4fe96ca-8fcf-416c-8c37-6c8d000eb478 req-ec870dbb-feb4-4cd3-9df3-1ecbcf580b45 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Processing event network-vif-plugged-a7108c4c-c96c-4354-a4bf-99b1d2160514 _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10808
Oct 11 02:20:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1264: 321 pgs: 321 active+clean; 111 MiB data, 214 MiB used, 60 GiB / 60 GiB avail; 27 KiB/s rd, 1.4 MiB/s wr, 40 op/s
Oct 11 02:20:06 compute-0 unix_chkpwd[424978]: password check failed for user (root)
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0008201929494692974 of space, bias 1.0, pg target 0.24605788484078922 quantized to 32 (current 32)
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00025334537995702286 of space, bias 1.0, pg target 0.07600361398710685 quantized to 32 (current 32)
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:20:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:20:07 compute-0 ceph-mon[191930]: pgmap v1264: 321 pgs: 321 active+clean; 111 MiB data, 214 MiB used, 60 GiB / 60 GiB avail; 27 KiB/s rd, 1.4 MiB/s wr, 40 op/s
Oct 11 02:20:07 compute-0 sshd-session[424891]: Failed password for invalid user ubuntu from 121.227.153.123 port 57116 ssh2
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.758 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760149207.7568502, d60d7ea1-5d00-4902-90e6-3ae67eb09a78 => Started> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.759 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] VM Started (Lifecycle Event)
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.764 2 DEBUG nova.compute.manager [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Instance event wait completed in 0 seconds for network-vif-plugged wait_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:577
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.773 2 DEBUG nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Guest created on hypervisor spawn /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4417
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.778 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.787 2 INFO nova.virt.libvirt.driver [-] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Instance spawned successfully.
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.788 2 DEBUG nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Attempting to register defaults for the following image properties: ['hw_cdrom_bus', 'hw_disk_bus', 'hw_input_bus', 'hw_pointer_model', 'hw_video_model', 'hw_vif_model'] _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:917
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.792 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Synchronizing instance power state after lifecycle event "Started"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.814 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.815 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760149207.7570703, d60d7ea1-5d00-4902-90e6-3ae67eb09a78 => Paused> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.816 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] VM Paused (Lifecycle Event)
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.829 2 DEBUG nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Found default for hw_cdrom_bus of sata _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.830 2 DEBUG nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Found default for hw_disk_bus of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.831 2 DEBUG nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Found default for hw_input_bus of usb _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.832 2 DEBUG nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Found default for hw_pointer_model of usbtablet _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.833 2 DEBUG nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Found default for hw_video_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.834 2 DEBUG nova.virt.libvirt.driver [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Found default for hw_vif_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.841 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.851 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760149207.771084, d60d7ea1-5d00-4902-90e6-3ae67eb09a78 => Resumed> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.851 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] VM Resumed (Lifecycle Event)
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.873 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.880 2 DEBUG nova.compute.manager [req-3d8a0da6-32e9-43e2-a880-69186fa2000e req-2a4191b2-dddd-43be-aa06-9ebd79059f0f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Received event network-vif-plugged-a7108c4c-c96c-4354-a4bf-99b1d2160514 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.880 2 DEBUG oslo_concurrency.lockutils [req-3d8a0da6-32e9-43e2-a880-69186fa2000e req-2a4191b2-dddd-43be-aa06-9ebd79059f0f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.881 2 DEBUG oslo_concurrency.lockutils [req-3d8a0da6-32e9-43e2-a880-69186fa2000e req-2a4191b2-dddd-43be-aa06-9ebd79059f0f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.881 2 DEBUG oslo_concurrency.lockutils [req-3d8a0da6-32e9-43e2-a880-69186fa2000e req-2a4191b2-dddd-43be-aa06-9ebd79059f0f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.882 2 DEBUG nova.compute.manager [req-3d8a0da6-32e9-43e2-a880-69186fa2000e req-2a4191b2-dddd-43be-aa06-9ebd79059f0f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] No waiting events found dispatching network-vif-plugged-a7108c4c-c96c-4354-a4bf-99b1d2160514 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.882 2 WARNING nova.compute.manager [req-3d8a0da6-32e9-43e2-a880-69186fa2000e req-2a4191b2-dddd-43be-aa06-9ebd79059f0f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Received unexpected event network-vif-plugged-a7108c4c-c96c-4354-a4bf-99b1d2160514 for instance with vm_state building and task_state spawning.
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.886 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Synchronizing instance power state after lifecycle event "Resumed"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.890 2 INFO nova.compute.manager [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Took 8.85 seconds to spawn the instance on the hypervisor.
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.890 2 DEBUG nova.compute.manager [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.902 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:20:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1265: 321 pgs: 321 active+clean; 111 MiB data, 214 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.4 MiB/s wr, 41 op/s
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.951 2 INFO nova.compute.manager [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Took 10.02 seconds to build instance.
Oct 11 02:20:07 compute-0 nova_compute[356901]: 2025-10-11 02:20:07.967 2 DEBUG oslo_concurrency.lockutils [None req-bcab2cd3-6c42-4898-8edb-f230e1da811e d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78" "released" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: held 10.122s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:20:08 compute-0 sshd-session[424481]: Failed password for root from 193.46.255.33 port 10308 ssh2
Oct 11 02:20:08 compute-0 sshd-session[424891]: Connection closed by invalid user ubuntu 121.227.153.123 port 57116 [preauth]
Oct 11 02:20:08 compute-0 nova_compute[356901]: 2025-10-11 02:20:08.603 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:09 compute-0 ceph-mon[191930]: pgmap v1265: 321 pgs: 321 active+clean; 111 MiB data, 214 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.4 MiB/s wr, 41 op/s
Oct 11 02:20:09 compute-0 nova_compute[356901]: 2025-10-11 02:20:09.231 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:09 compute-0 sshd-session[424481]: Received disconnect from 193.46.255.33 port 10308:11:  [preauth]
Oct 11 02:20:09 compute-0 sshd-session[424481]: Disconnected from authenticating user root 193.46.255.33 port 10308 [preauth]
Oct 11 02:20:09 compute-0 sshd-session[424481]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.33  user=root
Oct 11 02:20:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1266: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 30 KiB/s rd, 1.4 MiB/s wr, 45 op/s
Oct 11 02:20:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:20:10 compute-0 unix_chkpwd[425028]: password check failed for user (root)
Oct 11 02:20:10 compute-0 sshd-session[425026]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.33  user=root
Oct 11 02:20:10 compute-0 sshd-session[425024]: Invalid user ubuntu from 121.227.153.123 port 57122
Oct 11 02:20:10 compute-0 sshd-session[425024]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:20:10 compute-0 sshd-session[425024]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:20:11 compute-0 ceph-mon[191930]: pgmap v1266: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 30 KiB/s rd, 1.4 MiB/s wr, 45 op/s
Oct 11 02:20:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1267: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 249 KiB/s rd, 1.4 MiB/s wr, 57 op/s
Oct 11 02:20:12 compute-0 sshd-session[425026]: Failed password for root from 193.46.255.33 port 22756 ssh2
Oct 11 02:20:12 compute-0 sshd-session[425024]: Failed password for invalid user ubuntu from 121.227.153.123 port 57122 ssh2
Oct 11 02:20:13 compute-0 ceph-mon[191930]: pgmap v1267: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 249 KiB/s rd, 1.4 MiB/s wr, 57 op/s
Oct 11 02:20:13 compute-0 podman[425029]: 2025-10-11 02:20:13.237734192 +0000 UTC m=+0.127643897 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS)
Oct 11 02:20:13 compute-0 podman[425031]: 2025-10-11 02:20:13.272002807 +0000 UTC m=+0.139323258 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:20:13 compute-0 podman[425030]: 2025-10-11 02:20:13.296961202 +0000 UTC m=+0.173269571 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, maintainer=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, url=https://catalog.redhat.com/en/search?searchType=containers, io.openshift.expose-services=, release=1755695350, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, version=9.6, io.openshift.tags=minimal rhel9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.33.7, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, name=ubi9-minimal, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., distribution-scope=public, config_id=edpm, architecture=x86_64, container_name=openstack_network_exporter, managed_by=edpm_ansible, vendor=Red Hat, Inc., com.redhat.component=ubi9-minimal-container)
Oct 11 02:20:13 compute-0 nova_compute[356901]: 2025-10-11 02:20:13.609 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:13 compute-0 unix_chkpwd[425091]: password check failed for user (root)
Oct 11 02:20:13 compute-0 sshd-session[425024]: Connection closed by invalid user ubuntu 121.227.153.123 port 57122 [preauth]
Oct 11 02:20:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1268: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 1.2 MiB/s rd, 950 KiB/s wr, 79 op/s
Oct 11 02:20:14 compute-0 nova_compute[356901]: 2025-10-11 02:20:14.234 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:14 compute-0 sshd-session[425026]: Failed password for root from 193.46.255.33 port 22756 ssh2
Oct 11 02:20:15 compute-0 sshd-session[425092]: Invalid user ubuntu from 121.227.153.123 port 38704
Oct 11 02:20:15 compute-0 ceph-mon[191930]: pgmap v1268: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 1.2 MiB/s rd, 950 KiB/s wr, 79 op/s
Oct 11 02:20:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:20:15 compute-0 unix_chkpwd[425094]: password check failed for user (root)
Oct 11 02:20:15 compute-0 sshd-session[425092]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:20:15 compute-0 sshd-session[425092]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:20:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1269: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 22 KiB/s wr, 72 op/s
Oct 11 02:20:16 compute-0 podman[425095]: 2025-10-11 02:20:16.245950636 +0000 UTC m=+0.126656399 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vendor=Red Hat, Inc., container_name=kepler, distribution-scope=public, config_id=edpm, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-type=git, io.openshift.tags=base rhel9, name=ubi9, architecture=x86_64, build-date=2024-09-18T21:23:30, io.k8s.display-name=Red Hat Universal Base Image 9, release=1214.1726694543, release-0.7.12=, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, com.redhat.component=ubi9-container, managed_by=edpm_ansible, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.buildah.version=1.29.0, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, maintainer=Red Hat, Inc., io.openshift.expose-services=, version=9.4, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:20:17 compute-0 ceph-mon[191930]: pgmap v1269: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 22 KiB/s wr, 72 op/s
Oct 11 02:20:17 compute-0 sshd-session[425026]: Failed password for root from 193.46.255.33 port 22756 ssh2
Oct 11 02:20:17 compute-0 sshd-session[425092]: Failed password for invalid user ubuntu from 121.227.153.123 port 38704 ssh2
Oct 11 02:20:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1270: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 1.3 KiB/s wr, 57 op/s
Oct 11 02:20:18 compute-0 sshd-session[425092]: Connection closed by invalid user ubuntu 121.227.153.123 port 38704 [preauth]
Oct 11 02:20:18 compute-0 sshd-session[425026]: Received disconnect from 193.46.255.33 port 22756:11:  [preauth]
Oct 11 02:20:18 compute-0 sshd-session[425026]: Disconnected from authenticating user root 193.46.255.33 port 22756 [preauth]
Oct 11 02:20:18 compute-0 sshd-session[425026]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.33  user=root
Oct 11 02:20:18 compute-0 nova_compute[356901]: 2025-10-11 02:20:18.613 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:19 compute-0 ceph-mon[191930]: pgmap v1270: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 1.3 KiB/s wr, 57 op/s
Oct 11 02:20:19 compute-0 nova_compute[356901]: 2025-10-11 02:20:19.241 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:19 compute-0 unix_chkpwd[425116]: password check failed for user (root)
Oct 11 02:20:19 compute-0 sshd-session[425114]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.33  user=root
Oct 11 02:20:19 compute-0 sshd-session[425112]: Invalid user ubuntu from 121.227.153.123 port 38710
Oct 11 02:20:19 compute-0 sshd-session[425112]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:20:19 compute-0 sshd-session[425112]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:20:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1271: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 341 B/s wr, 56 op/s
Oct 11 02:20:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:20:21 compute-0 ceph-mon[191930]: pgmap v1271: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 341 B/s wr, 56 op/s
Oct 11 02:20:21 compute-0 sshd-session[425114]: Failed password for root from 193.46.255.33 port 55266 ssh2
Oct 11 02:20:21 compute-0 sshd-session[425112]: Failed password for invalid user ubuntu from 121.227.153.123 port 38710 ssh2
Oct 11 02:20:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1272: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 52 op/s
Oct 11 02:20:22 compute-0 ceph-mon[191930]: pgmap v1272: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 52 op/s
Oct 11 02:20:22 compute-0 unix_chkpwd[425118]: password check failed for user (root)
Oct 11 02:20:22 compute-0 sshd-session[425112]: Connection closed by invalid user ubuntu 121.227.153.123 port 38710 [preauth]
Oct 11 02:20:23 compute-0 nova_compute[356901]: 2025-10-11 02:20:23.619 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:23 compute-0 sshd-session[425119]: Invalid user ubuntu from 121.227.153.123 port 53466
Oct 11 02:20:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1273: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 1.3 MiB/s rd, 41 op/s
Oct 11 02:20:24 compute-0 sshd-session[425119]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:20:24 compute-0 sshd-session[425119]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:20:24 compute-0 nova_compute[356901]: 2025-10-11 02:20:24.243 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:24 compute-0 sshd-session[425114]: Failed password for root from 193.46.255.33 port 55266 ssh2
Oct 11 02:20:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:20:25 compute-0 ceph-mon[191930]: pgmap v1273: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 1.3 MiB/s rd, 41 op/s
Oct 11 02:20:25 compute-0 sshd-session[425119]: Failed password for invalid user ubuntu from 121.227.153.123 port 53466 ssh2
Oct 11 02:20:25 compute-0 sshd-session[425119]: Connection closed by invalid user ubuntu 121.227.153.123 port 53466 [preauth]
Oct 11 02:20:25 compute-0 unix_chkpwd[425121]: password check failed for user (root)
Oct 11 02:20:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1274: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 286 KiB/s rd, 8 op/s
Oct 11 02:20:26 compute-0 sshd[113062]: drop connection #1 from [121.227.153.123]:53468 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:26 compute-0 ceph-mon[191930]: pgmap v1274: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 286 KiB/s rd, 8 op/s
Oct 11 02:20:26 compute-0 sshd[113062]: drop connection #1 from [121.227.153.123]:53470 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:20:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:20:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:20:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:20:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:20:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:20:27 compute-0 sshd[113062]: drop connection #1 from [121.227.153.123]:53480 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:27 compute-0 podman[425122]: 2025-10-11 02:20:27.237633269 +0000 UTC m=+0.125197772 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:20:27 compute-0 podman[425124]: 2025-10-11 02:20:27.243870125 +0000 UTC m=+0.107067902 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 10 Base Image, config_id=edpm, io.buildah.version=1.41.4)
Oct 11 02:20:27 compute-0 podman[425125]: 2025-10-11 02:20:27.270883417 +0000 UTC m=+0.132474748 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_metadata_agent, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:20:27 compute-0 podman[425123]: 2025-10-11 02:20:27.30173025 +0000 UTC m=+0.181247341 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_managed=true)
Oct 11 02:20:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:20:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3448236464' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:20:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:20:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3448236464' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:20:27 compute-0 sshd-session[425114]: Failed password for root from 193.46.255.33 port 55266 ssh2
Oct 11 02:20:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1275: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:20:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3448236464' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:20:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3448236464' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:20:28 compute-0 sshd[113062]: drop connection #1 from [121.227.153.123]:53488 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:28 compute-0 nova_compute[356901]: 2025-10-11 02:20:28.624 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:29 compute-0 sshd-session[425114]: Received disconnect from 193.46.255.33 port 55266:11:  [preauth]
Oct 11 02:20:29 compute-0 sshd-session[425114]: Disconnected from authenticating user root 193.46.255.33 port 55266 [preauth]
Oct 11 02:20:29 compute-0 sshd-session[425114]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.33  user=root
Oct 11 02:20:29 compute-0 nova_compute[356901]: 2025-10-11 02:20:29.247 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:29 compute-0 ceph-mon[191930]: pgmap v1275: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:20:29 compute-0 podman[157119]: time="2025-10-11T02:20:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:20:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:20:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:20:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:20:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9048 "" "Go-http-client/1.1"
Oct 11 02:20:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1276: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:20:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:20:30 compute-0 ceph-mon[191930]: pgmap v1276: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:20:31 compute-0 openstack_network_exporter[374316]: ERROR   02:20:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:20:31 compute-0 openstack_network_exporter[374316]: ERROR   02:20:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:20:31 compute-0 openstack_network_exporter[374316]: ERROR   02:20:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:20:31 compute-0 openstack_network_exporter[374316]: ERROR   02:20:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:20:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:20:31 compute-0 openstack_network_exporter[374316]: ERROR   02:20:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:20:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:20:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1277: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:20:32 compute-0 podman[425201]: 2025-10-11 02:20:32.253960456 +0000 UTC m=+0.136261617 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_id=multipathd, container_name=multipathd, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:20:32 compute-0 podman[425202]: 2025-10-11 02:20:32.285727823 +0000 UTC m=+0.161626800 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, config_id=iscsid, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, managed_by=edpm_ansible, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true)
Oct 11 02:20:33 compute-0 ceph-mon[191930]: pgmap v1277: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:20:33 compute-0 nova_compute[356901]: 2025-10-11 02:20:33.630 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1278: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:20:34 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:39442 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:34 compute-0 nova_compute[356901]: 2025-10-11 02:20:34.254 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:34 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:39446 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:34 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:39462 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:20:35 compute-0 ceph-mon[191930]: pgmap v1278: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:20:35 compute-0 ovn_controller[88370]: 2025-10-11T02:20:35Z|00039|memory_trim|INFO|Detected inactivity (last active 30005 ms ago): trimming memory
Oct 11 02:20:35 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:39470 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:35 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:39474 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1279: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:20:36 compute-0 ceph-mon[191930]: pgmap v1279: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:20:36 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:39486 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:36 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:39488 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:37 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:39496 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:37 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:39502 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1280: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 5.3 KiB/s wr, 0 op/s
Oct 11 02:20:38 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:39508 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:38 compute-0 nova_compute[356901]: 2025-10-11 02:20:38.634 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:38 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:39510 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:39 compute-0 ceph-mon[191930]: pgmap v1280: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 5.3 KiB/s wr, 0 op/s
Oct 11 02:20:39 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:38744 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:39 compute-0 nova_compute[356901]: 2025-10-11 02:20:39.261 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:39 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:38748 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1281: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 5.3 KiB/s wr, 0 op/s
Oct 11 02:20:40 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:38762 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:20:40 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:38770 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:41 compute-0 ceph-mon[191930]: pgmap v1281: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 5.3 KiB/s wr, 0 op/s
Oct 11 02:20:41 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:38782 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1282: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 5.3 KiB/s wr, 0 op/s
Oct 11 02:20:42 compute-0 sshd[113062]: drop connection #0 from [121.227.153.123]:38796 on [38.102.83.82]:22 penalty: failed authentication
Oct 11 02:20:42 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [L] New memtable created with log file: #43. Immutable memtables: 0.
Oct 11 02:20:42 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [L] New memtable created with log file: #43. Immutable memtables: 0.
Oct 11 02:20:43 compute-0 ceph-mon[191930]: pgmap v1282: 321 pgs: 321 active+clean; 111 MiB data, 215 MiB used, 60 GiB / 60 GiB avail; 5.3 KiB/s wr, 0 op/s
Oct 11 02:20:43 compute-0 sshd-session[425238]: Invalid user debian from 121.227.153.123 port 38806
Oct 11 02:20:43 compute-0 nova_compute[356901]: 2025-10-11 02:20:43.642 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:43 compute-0 podman[425242]: 2025-10-11 02:20:43.752346941 +0000 UTC m=+0.096550333 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:20:43 compute-0 podman[425241]: 2025-10-11 02:20:43.763940981 +0000 UTC m=+0.130837940 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, release=1755695350, managed_by=edpm_ansible, name=ubi9-minimal, config_id=edpm, io.openshift.expose-services=, maintainer=Red Hat, Inc., vendor=Red Hat, Inc., build-date=2025-08-20T13:12:41, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, com.redhat.component=ubi9-minimal-container, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, container_name=openstack_network_exporter, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, io.openshift.tags=minimal rhel9, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, version=9.6, io.buildah.version=1.33.7, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, distribution-scope=public, vcs-type=git)
Oct 11 02:20:43 compute-0 podman[425240]: 2025-10-11 02:20:43.76644694 +0000 UTC m=+0.128729960 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, container_name=ceilometer_agent_ipmi, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:20:43 compute-0 ovn_controller[88370]: 2025-10-11T02:20:43Z|00006|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:c2:ee:14 192.168.0.80
Oct 11 02:20:43 compute-0 ovn_controller[88370]: 2025-10-11T02:20:43Z|00007|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:c2:ee:14 192.168.0.80
Oct 11 02:20:43 compute-0 sshd-session[425238]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:20:43 compute-0 sshd-session[425238]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:20:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1283: 321 pgs: 321 active+clean; 125 MiB data, 223 MiB used, 60 GiB / 60 GiB avail; 48 KiB/s rd, 917 KiB/s wr, 15 op/s
Oct 11 02:20:44 compute-0 nova_compute[356901]: 2025-10-11 02:20:44.266 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:45 compute-0 ceph-mon[191930]: pgmap v1283: 321 pgs: 321 active+clean; 125 MiB data, 223 MiB used, 60 GiB / 60 GiB avail; 48 KiB/s rd, 917 KiB/s wr, 15 op/s
Oct 11 02:20:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:20:45 compute-0 sshd-session[425238]: Failed password for invalid user debian from 121.227.153.123 port 38806 ssh2
Oct 11 02:20:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1284: 321 pgs: 321 active+clean; 132 MiB data, 247 MiB used, 60 GiB / 60 GiB avail; 102 KiB/s rd, 1.4 MiB/s wr, 44 op/s
Oct 11 02:20:46 compute-0 sshd-session[425238]: Connection closed by invalid user debian 121.227.153.123 port 38806 [preauth]
Oct 11 02:20:46 compute-0 nova_compute[356901]: 2025-10-11 02:20:46.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:20:46 compute-0 nova_compute[356901]: 2025-10-11 02:20:46.899 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:20:47 compute-0 ceph-mon[191930]: pgmap v1284: 321 pgs: 321 active+clean; 132 MiB data, 247 MiB used, 60 GiB / 60 GiB avail; 102 KiB/s rd, 1.4 MiB/s wr, 44 op/s
Oct 11 02:20:47 compute-0 podman[425302]: 2025-10-11 02:20:47.244609267 +0000 UTC m=+0.124050847 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., summary=Provides the latest release of Red Hat Universal Base Image 9., io.openshift.expose-services=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=kepler, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, io.buildah.version=1.29.0, com.redhat.component=ubi9-container, release-0.7.12=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, release=1214.1726694543, vcs-type=git, name=ubi9, version=9.4, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, managed_by=edpm_ansible, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, config_id=edpm, distribution-scope=public, io.openshift.tags=base rhel9, architecture=x86_64, build-date=2024-09-18T21:23:30, vendor=Red Hat, Inc.)
Oct 11 02:20:47 compute-0 nova_compute[356901]: 2025-10-11 02:20:47.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:20:47 compute-0 nova_compute[356901]: 2025-10-11 02:20:47.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_incomplete_migrations run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:20:47 compute-0 nova_compute[356901]: 2025-10-11 02:20:47.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances with incomplete migration  _cleanup_incomplete_migrations /usr/lib/python3.9/site-packages/nova/compute/manager.py:11183
Oct 11 02:20:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1285: 321 pgs: 321 active+clean; 133 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 128 KiB/s rd, 1.5 MiB/s wr, 48 op/s
Oct 11 02:20:48 compute-0 sshd-session[425301]: Invalid user debian from 121.227.153.123 port 38820
Oct 11 02:20:48 compute-0 sshd-session[425301]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:20:48 compute-0 sshd-session[425301]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:20:48 compute-0 nova_compute[356901]: 2025-10-11 02:20:48.647 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:49 compute-0 ceph-mon[191930]: pgmap v1285: 321 pgs: 321 active+clean; 133 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 128 KiB/s rd, 1.5 MiB/s wr, 48 op/s
Oct 11 02:20:49 compute-0 nova_compute[356901]: 2025-10-11 02:20:49.270 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1286: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 166 KiB/s rd, 1.5 MiB/s wr, 57 op/s
Oct 11 02:20:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:20:50 compute-0 sshd-session[425301]: Failed password for invalid user debian from 121.227.153.123 port 38820 ssh2
Oct 11 02:20:50 compute-0 sshd-session[425301]: Connection closed by invalid user debian 121.227.153.123 port 38820 [preauth]
Oct 11 02:20:51 compute-0 ceph-mon[191930]: pgmap v1286: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 166 KiB/s rd, 1.5 MiB/s wr, 57 op/s
Oct 11 02:20:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1287: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 166 KiB/s rd, 1.5 MiB/s wr, 58 op/s
Oct 11 02:20:52 compute-0 sshd-session[425324]: Invalid user debian from 121.227.153.123 port 34106
Oct 11 02:20:52 compute-0 sudo[425326]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:20:52 compute-0 sudo[425326]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:20:52 compute-0 sudo[425326]: pam_unix(sudo:session): session closed for user root
Oct 11 02:20:52 compute-0 sshd-session[425324]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:20:52 compute-0 sshd-session[425324]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:20:52 compute-0 sudo[425351]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:20:52 compute-0 sudo[425351]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:20:52 compute-0 sudo[425351]: pam_unix(sudo:session): session closed for user root
Oct 11 02:20:52 compute-0 sudo[425376]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:20:52 compute-0 sudo[425376]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:20:52 compute-0 sudo[425376]: pam_unix(sudo:session): session closed for user root
Oct 11 02:20:52 compute-0 sudo[425401]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:20:52 compute-0 sudo[425401]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:20:52 compute-0 nova_compute[356901]: 2025-10-11 02:20:52.921 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:20:52 compute-0 nova_compute[356901]: 2025-10-11 02:20:52.922 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:20:52 compute-0 nova_compute[356901]: 2025-10-11 02:20:52.922 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:20:53 compute-0 ceph-mon[191930]: pgmap v1287: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 166 KiB/s rd, 1.5 MiB/s wr, 58 op/s
Oct 11 02:20:53 compute-0 sudo[425401]: pam_unix(sudo:session): session closed for user root
Oct 11 02:20:53 compute-0 nova_compute[356901]: 2025-10-11 02:20:53.411 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:20:53 compute-0 nova_compute[356901]: 2025-10-11 02:20:53.412 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:20:53 compute-0 nova_compute[356901]: 2025-10-11 02:20:53.412 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:20:53 compute-0 nova_compute[356901]: 2025-10-11 02:20:53.413 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:20:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:20:53 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:20:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:20:53 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:20:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:20:53 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:20:53 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev f81d3007-dff6-4a05-8ed4-d284c1d0e4e1 does not exist
Oct 11 02:20:53 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev f7542176-c017-434a-92c9-1728b82da1ea does not exist
Oct 11 02:20:53 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 9308f12f-90bb-4dbe-a0c5-fff2f10b18da does not exist
Oct 11 02:20:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:20:53 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:20:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:20:53 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:20:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:20:53 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:20:53 compute-0 sudo[425457]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:20:53 compute-0 sudo[425457]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:20:53 compute-0 sudo[425457]: pam_unix(sudo:session): session closed for user root
Oct 11 02:20:53 compute-0 nova_compute[356901]: 2025-10-11 02:20:53.651 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:53 compute-0 sudo[425482]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:20:53 compute-0 sudo[425482]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:20:53 compute-0 sudo[425482]: pam_unix(sudo:session): session closed for user root
Oct 11 02:20:53 compute-0 sudo[425507]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:20:53 compute-0 sudo[425507]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:20:53 compute-0 sudo[425507]: pam_unix(sudo:session): session closed for user root
Oct 11 02:20:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1288: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 166 KiB/s rd, 1.5 MiB/s wr, 58 op/s
Oct 11 02:20:54 compute-0 sudo[425532]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:20:54 compute-0 sudo[425532]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:20:54 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:20:54 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:20:54 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:20:54 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:20:54 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:20:54 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:20:54 compute-0 nova_compute[356901]: 2025-10-11 02:20:54.274 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:54 compute-0 sshd-session[425324]: Failed password for invalid user debian from 121.227.153.123 port 34106 ssh2
Oct 11 02:20:54 compute-0 podman[425595]: 2025-10-11 02:20:54.66939242 +0000 UTC m=+0.076079971 container create da3b426b240f9d745cbe31672626796d029bec71d1b1ce2588a998886dddbd2f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stoic_clarke, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:20:54 compute-0 podman[425595]: 2025-10-11 02:20:54.637616273 +0000 UTC m=+0.044303834 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:20:54 compute-0 systemd[1]: Started libpod-conmon-da3b426b240f9d745cbe31672626796d029bec71d1b1ce2588a998886dddbd2f.scope.
Oct 11 02:20:54 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:20:54 compute-0 podman[425595]: 2025-10-11 02:20:54.825560641 +0000 UTC m=+0.232248182 container init da3b426b240f9d745cbe31672626796d029bec71d1b1ce2588a998886dddbd2f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stoic_clarke, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:20:54 compute-0 podman[425595]: 2025-10-11 02:20:54.843512313 +0000 UTC m=+0.250199824 container start da3b426b240f9d745cbe31672626796d029bec71d1b1ce2588a998886dddbd2f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stoic_clarke, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:20:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:20:54.845 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:20:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:20:54.848 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.003s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:20:54 compute-0 podman[425595]: 2025-10-11 02:20:54.84976874 +0000 UTC m=+0.256456351 container attach da3b426b240f9d745cbe31672626796d029bec71d1b1ce2588a998886dddbd2f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stoic_clarke, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True)
Oct 11 02:20:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:20:54.850 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:20:54 compute-0 stoic_clarke[425610]: 167 167
Oct 11 02:20:54 compute-0 systemd[1]: libpod-da3b426b240f9d745cbe31672626796d029bec71d1b1ce2588a998886dddbd2f.scope: Deactivated successfully.
Oct 11 02:20:54 compute-0 podman[425615]: 2025-10-11 02:20:54.935497708 +0000 UTC m=+0.051344528 container died da3b426b240f9d745cbe31672626796d029bec71d1b1ce2588a998886dddbd2f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stoic_clarke, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, OSD_FLAVOR=default)
Oct 11 02:20:54 compute-0 systemd[1]: var-lib-containers-storage-overlay-daf83d38254a5a9b0bb77f3c46f3689fe9843b946fbea17e054ba778cfe8051d-merged.mount: Deactivated successfully.
Oct 11 02:20:54 compute-0 podman[425615]: 2025-10-11 02:20:54.998930618 +0000 UTC m=+0.114777398 container remove da3b426b240f9d745cbe31672626796d029bec71d1b1ce2588a998886dddbd2f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stoic_clarke, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=reef, io.buildah.version=1.39.3)
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.012 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:20:55 compute-0 systemd[1]: libpod-conmon-da3b426b240f9d745cbe31672626796d029bec71d1b1ce2588a998886dddbd2f.scope: Deactivated successfully.
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.029 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.030 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.031 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.031 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.032 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.032 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.033 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.036 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.070 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.070 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.071 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.071 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.072 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:20:55 compute-0 ceph-mon[191930]: pgmap v1288: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 166 KiB/s rd, 1.5 MiB/s wr, 58 op/s
Oct 11 02:20:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:20:55 compute-0 podman[425641]: 2025-10-11 02:20:55.310726234 +0000 UTC m=+0.072677370 container create a2709f2a1c7916b636285a8a8682939b5527299cd3f562a198d7a20bff4b1424 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_murdock, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:20:55 compute-0 podman[425641]: 2025-10-11 02:20:55.288632845 +0000 UTC m=+0.050584001 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:20:55 compute-0 systemd[1]: Started libpod-conmon-a2709f2a1c7916b636285a8a8682939b5527299cd3f562a198d7a20bff4b1424.scope.
Oct 11 02:20:55 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:20:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cda16cc27aa577c04cb892a3f2362158459fb437478f92c6b102d24ef2275815/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:20:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cda16cc27aa577c04cb892a3f2362158459fb437478f92c6b102d24ef2275815/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:20:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cda16cc27aa577c04cb892a3f2362158459fb437478f92c6b102d24ef2275815/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:20:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cda16cc27aa577c04cb892a3f2362158459fb437478f92c6b102d24ef2275815/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:20:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cda16cc27aa577c04cb892a3f2362158459fb437478f92c6b102d24ef2275815/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:20:55 compute-0 podman[425641]: 2025-10-11 02:20:55.474441452 +0000 UTC m=+0.236392628 container init a2709f2a1c7916b636285a8a8682939b5527299cd3f562a198d7a20bff4b1424 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_murdock, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:20:55 compute-0 podman[425641]: 2025-10-11 02:20:55.496150212 +0000 UTC m=+0.258101358 container start a2709f2a1c7916b636285a8a8682939b5527299cd3f562a198d7a20bff4b1424 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_murdock, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507)
Oct 11 02:20:55 compute-0 podman[425641]: 2025-10-11 02:20:55.502768066 +0000 UTC m=+0.264719222 container attach a2709f2a1c7916b636285a8a8682939b5527299cd3f562a198d7a20bff4b1424 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_murdock, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:20:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:20:55 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/4229017382' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.553 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.482s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.677 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.678 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.678 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.688 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.689 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:20:55 compute-0 nova_compute[356901]: 2025-10-11 02:20:55.689 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:20:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1289: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 118 KiB/s rd, 607 KiB/s wr, 43 op/s
Oct 11 02:20:56 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/4229017382' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:20:56 compute-0 nova_compute[356901]: 2025-10-11 02:20:56.302 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:20:56 compute-0 nova_compute[356901]: 2025-10-11 02:20:56.304 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3721MB free_disk=59.922035217285156GB free_vcpus=6 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:20:56 compute-0 nova_compute[356901]: 2025-10-11 02:20:56.304 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:20:56 compute-0 nova_compute[356901]: 2025-10-11 02:20:56.305 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:20:56 compute-0 nova_compute[356901]: 2025-10-11 02:20:56.519 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:20:56 compute-0 nova_compute[356901]: 2025-10-11 02:20:56.520 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance d60d7ea1-5d00-4902-90e6-3ae67eb09a78 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:20:56 compute-0 nova_compute[356901]: 2025-10-11 02:20:56.520 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 2 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:20:56 compute-0 nova_compute[356901]: 2025-10-11 02:20:56.520 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1536MB phys_disk=59GB used_disk=4GB total_vcpus=8 used_vcpus=2 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:20:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:20:56
Oct 11 02:20:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:20:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:20:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['.rgw.root', 'backups', 'default.rgw.control', 'default.rgw.log', 'default.rgw.meta', '.mgr', 'volumes', 'cephfs.cephfs.meta', 'vms', 'cephfs.cephfs.data', 'images']
Oct 11 02:20:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:20:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:20:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:20:56 compute-0 sshd-session[425324]: Connection closed by invalid user debian 121.227.153.123 port 34106 [preauth]
Oct 11 02:20:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:20:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:20:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:20:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:20:56 compute-0 nova_compute[356901]: 2025-10-11 02:20:56.712 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:20:56 compute-0 upbeat_murdock[425672]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:20:56 compute-0 upbeat_murdock[425672]: --> relative data size: 1.0
Oct 11 02:20:56 compute-0 upbeat_murdock[425672]: --> All data devices are unavailable
Oct 11 02:20:56 compute-0 systemd[1]: libpod-a2709f2a1c7916b636285a8a8682939b5527299cd3f562a198d7a20bff4b1424.scope: Deactivated successfully.
Oct 11 02:20:56 compute-0 podman[425641]: 2025-10-11 02:20:56.908549255 +0000 UTC m=+1.670500431 container died a2709f2a1c7916b636285a8a8682939b5527299cd3f562a198d7a20bff4b1424 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_murdock, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:20:56 compute-0 systemd[1]: libpod-a2709f2a1c7916b636285a8a8682939b5527299cd3f562a198d7a20bff4b1424.scope: Consumed 1.275s CPU time.
Oct 11 02:20:56 compute-0 systemd[1]: var-lib-containers-storage-overlay-cda16cc27aa577c04cb892a3f2362158459fb437478f92c6b102d24ef2275815-merged.mount: Deactivated successfully.
Oct 11 02:20:56 compute-0 podman[425641]: 2025-10-11 02:20:56.998584577 +0000 UTC m=+1.760535713 container remove a2709f2a1c7916b636285a8a8682939b5527299cd3f562a198d7a20bff4b1424 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_murdock, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_REF=reef)
Oct 11 02:20:57 compute-0 sudo[425532]: pam_unix(sudo:session): session closed for user root
Oct 11 02:20:57 compute-0 systemd[1]: libpod-conmon-a2709f2a1c7916b636285a8a8682939b5527299cd3f562a198d7a20bff4b1424.scope: Deactivated successfully.
Oct 11 02:20:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:20:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:20:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:20:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:20:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:20:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:20:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:20:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:20:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:20:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:20:57 compute-0 sudo[425737]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:20:57 compute-0 sudo[425737]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:20:57 compute-0 sudo[425737]: pam_unix(sudo:session): session closed for user root
Oct 11 02:20:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:20:57 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3982520526' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:20:57 compute-0 ceph-mon[191930]: pgmap v1289: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 118 KiB/s rd, 607 KiB/s wr, 43 op/s
Oct 11 02:20:57 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3982520526' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:20:57 compute-0 nova_compute[356901]: 2025-10-11 02:20:57.239 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.527s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:20:57 compute-0 nova_compute[356901]: 2025-10-11 02:20:57.248 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:20:57 compute-0 sudo[425762]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:20:57 compute-0 sudo[425762]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:20:57 compute-0 sudo[425762]: pam_unix(sudo:session): session closed for user root
Oct 11 02:20:57 compute-0 nova_compute[356901]: 2025-10-11 02:20:57.441 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:20:57 compute-0 podman[425788]: 2025-10-11 02:20:57.446802886 +0000 UTC m=+0.114779348 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:20:57 compute-0 sudo[425815]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:20:57 compute-0 sudo[425815]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:20:57 compute-0 sudo[425815]: pam_unix(sudo:session): session closed for user root
Oct 11 02:20:57 compute-0 podman[425791]: 2025-10-11 02:20:57.471556401 +0000 UTC m=+0.128041807 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']})
Oct 11 02:20:57 compute-0 podman[425789]: 2025-10-11 02:20:57.481511073 +0000 UTC m=+0.142776816 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 10 Base Image, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.vendor=CentOS, config_id=edpm, io.buildah.version=1.41.4, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 02:20:57 compute-0 nova_compute[356901]: 2025-10-11 02:20:57.482 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:20:57 compute-0 nova_compute[356901]: 2025-10-11 02:20:57.482 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 1.177s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:20:57 compute-0 nova_compute[356901]: 2025-10-11 02:20:57.483 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_expired_console_auth_tokens run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:20:57 compute-0 podman[425790]: 2025-10-11 02:20:57.491441774 +0000 UTC m=+0.143516221 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_controller, org.label-schema.vendor=CentOS, config_id=ovn_controller, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:20:57 compute-0 sudo[425895]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:20:57 compute-0 sudo[425895]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:20:57 compute-0 sshd-session[425703]: Invalid user debian from 121.227.153.123 port 34120
Oct 11 02:20:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1290: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 63 KiB/s rd, 41 KiB/s wr, 14 op/s
Oct 11 02:20:57 compute-0 sshd-session[425703]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:20:57 compute-0 sshd-session[425703]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:20:58 compute-0 podman[425960]: 2025-10-11 02:20:58.007411137 +0000 UTC m=+0.063063583 container create 88975d6dc00c9606b14b4d8547f3d920f0e66b5c21b3fc15c4146e8ffb7f70a3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_benz, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:20:58 compute-0 systemd[1]: Started libpod-conmon-88975d6dc00c9606b14b4d8547f3d920f0e66b5c21b3fc15c4146e8ffb7f70a3.scope.
Oct 11 02:20:58 compute-0 podman[425960]: 2025-10-11 02:20:57.976548253 +0000 UTC m=+0.032200719 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:20:58 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:20:58 compute-0 podman[425960]: 2025-10-11 02:20:58.126403904 +0000 UTC m=+0.182056390 container init 88975d6dc00c9606b14b4d8547f3d920f0e66b5c21b3fc15c4146e8ffb7f70a3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_benz, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:20:58 compute-0 podman[425960]: 2025-10-11 02:20:58.138875016 +0000 UTC m=+0.194527472 container start 88975d6dc00c9606b14b4d8547f3d920f0e66b5c21b3fc15c4146e8ffb7f70a3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_benz, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0)
Oct 11 02:20:58 compute-0 podman[425960]: 2025-10-11 02:20:58.146074057 +0000 UTC m=+0.201726553 container attach 88975d6dc00c9606b14b4d8547f3d920f0e66b5c21b3fc15c4146e8ffb7f70a3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_benz, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:20:58 compute-0 happy_benz[425974]: 167 167
Oct 11 02:20:58 compute-0 systemd[1]: libpod-88975d6dc00c9606b14b4d8547f3d920f0e66b5c21b3fc15c4146e8ffb7f70a3.scope: Deactivated successfully.
Oct 11 02:20:58 compute-0 conmon[425974]: conmon 88975d6dc00c9606b14b <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-88975d6dc00c9606b14b4d8547f3d920f0e66b5c21b3fc15c4146e8ffb7f70a3.scope/container/memory.events
Oct 11 02:20:58 compute-0 podman[425960]: 2025-10-11 02:20:58.153998573 +0000 UTC m=+0.209651029 container died 88975d6dc00c9606b14b4d8547f3d920f0e66b5c21b3fc15c4146e8ffb7f70a3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_benz, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, io.buildah.version=1.39.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0)
Oct 11 02:20:58 compute-0 systemd[1]: var-lib-containers-storage-overlay-78a82b5089944852cfec123cdbfd8db26c848bbba6f59a8da03ad85acc686177-merged.mount: Deactivated successfully.
Oct 11 02:20:58 compute-0 podman[425960]: 2025-10-11 02:20:58.234583847 +0000 UTC m=+0.290236293 container remove 88975d6dc00c9606b14b4d8547f3d920f0e66b5c21b3fc15c4146e8ffb7f70a3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_benz, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:20:58 compute-0 systemd[1]: libpod-conmon-88975d6dc00c9606b14b4d8547f3d920f0e66b5c21b3fc15c4146e8ffb7f70a3.scope: Deactivated successfully.
Oct 11 02:20:58 compute-0 podman[425999]: 2025-10-11 02:20:58.515001924 +0000 UTC m=+0.095419999 container create f9c618689c42549f886f6535a1021e2e86b42dbcf32c9df2c87a56e5ccb9a763 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=awesome_satoshi, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:20:58 compute-0 podman[425999]: 2025-10-11 02:20:58.464076397 +0000 UTC m=+0.044494502 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:20:58 compute-0 systemd[1]: Started libpod-conmon-f9c618689c42549f886f6535a1021e2e86b42dbcf32c9df2c87a56e5ccb9a763.scope.
Oct 11 02:20:58 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:20:58 compute-0 nova_compute[356901]: 2025-10-11 02:20:58.654 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f16df41e7ba301182484272d3ce700f75c6a3b0ed5956663404b5c9b9abecbf2/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:20:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f16df41e7ba301182484272d3ce700f75c6a3b0ed5956663404b5c9b9abecbf2/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:20:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f16df41e7ba301182484272d3ce700f75c6a3b0ed5956663404b5c9b9abecbf2/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:20:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f16df41e7ba301182484272d3ce700f75c6a3b0ed5956663404b5c9b9abecbf2/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:20:58 compute-0 podman[425999]: 2025-10-11 02:20:58.690982615 +0000 UTC m=+0.271400700 container init f9c618689c42549f886f6535a1021e2e86b42dbcf32c9df2c87a56e5ccb9a763 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=awesome_satoshi, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 02:20:58 compute-0 podman[425999]: 2025-10-11 02:20:58.717177398 +0000 UTC m=+0.297595463 container start f9c618689c42549f886f6535a1021e2e86b42dbcf32c9df2c87a56e5ccb9a763 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=awesome_satoshi, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 02:20:58 compute-0 podman[425999]: 2025-10-11 02:20:58.722829356 +0000 UTC m=+0.303247481 container attach f9c618689c42549f886f6535a1021e2e86b42dbcf32c9df2c87a56e5ccb9a763 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=awesome_satoshi, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:20:58 compute-0 nova_compute[356901]: 2025-10-11 02:20:58.915 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._run_pending_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:20:58 compute-0 nova_compute[356901]: 2025-10-11 02:20:58.915 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11145
Oct 11 02:20:58 compute-0 nova_compute[356901]: 2025-10-11 02:20:58.935 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] There are 0 instances to clean _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11154
Oct 11 02:20:59 compute-0 ceph-mon[191930]: pgmap v1290: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 63 KiB/s rd, 41 KiB/s wr, 14 op/s
Oct 11 02:20:59 compute-0 nova_compute[356901]: 2025-10-11 02:20:59.277 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]: {
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:     "0": [
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:         {
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "devices": [
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "/dev/loop3"
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             ],
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "lv_name": "ceph_lv0",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "lv_size": "21470642176",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "name": "ceph_lv0",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "tags": {
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.cluster_name": "ceph",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.crush_device_class": "",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.encrypted": "0",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.osd_id": "0",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.type": "block",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.vdo": "0"
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             },
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "type": "block",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "vg_name": "ceph_vg0"
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:         }
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:     ],
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:     "1": [
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:         {
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "devices": [
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "/dev/loop4"
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             ],
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "lv_name": "ceph_lv1",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "lv_size": "21470642176",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "name": "ceph_lv1",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "tags": {
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.cluster_name": "ceph",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.crush_device_class": "",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.encrypted": "0",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.osd_id": "1",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.type": "block",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.vdo": "0"
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             },
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "type": "block",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "vg_name": "ceph_vg1"
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:         }
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:     ],
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:     "2": [
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:         {
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "devices": [
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "/dev/loop5"
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             ],
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "lv_name": "ceph_lv2",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "lv_size": "21470642176",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "name": "ceph_lv2",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "tags": {
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.cluster_name": "ceph",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.crush_device_class": "",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.encrypted": "0",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.osd_id": "2",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.type": "block",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:                 "ceph.vdo": "0"
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             },
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "type": "block",
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:             "vg_name": "ceph_vg2"
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:         }
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]:     ]
Oct 11 02:20:59 compute-0 awesome_satoshi[426015]: }
Oct 11 02:20:59 compute-0 systemd[1]: libpod-f9c618689c42549f886f6535a1021e2e86b42dbcf32c9df2c87a56e5ccb9a763.scope: Deactivated successfully.
Oct 11 02:20:59 compute-0 podman[425999]: 2025-10-11 02:20:59.645042387 +0000 UTC m=+1.225460462 container died f9c618689c42549f886f6535a1021e2e86b42dbcf32c9df2c87a56e5ccb9a763 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=awesome_satoshi, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507)
Oct 11 02:20:59 compute-0 systemd[1]: var-lib-containers-storage-overlay-f16df41e7ba301182484272d3ce700f75c6a3b0ed5956663404b5c9b9abecbf2-merged.mount: Deactivated successfully.
Oct 11 02:20:59 compute-0 podman[425999]: 2025-10-11 02:20:59.7392997 +0000 UTC m=+1.319717765 container remove f9c618689c42549f886f6535a1021e2e86b42dbcf32c9df2c87a56e5ccb9a763 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=awesome_satoshi, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:20:59 compute-0 podman[157119]: time="2025-10-11T02:20:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:20:59 compute-0 systemd[1]: libpod-conmon-f9c618689c42549f886f6535a1021e2e86b42dbcf32c9df2c87a56e5ccb9a763.scope: Deactivated successfully.
Oct 11 02:20:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:20:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:20:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:20:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9050 "" "Go-http-client/1.1"
Oct 11 02:20:59 compute-0 sudo[425895]: pam_unix(sudo:session): session closed for user root
Oct 11 02:20:59 compute-0 sudo[426036]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:20:59 compute-0 sudo[426036]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:20:59 compute-0 sudo[426036]: pam_unix(sudo:session): session closed for user root
Oct 11 02:20:59 compute-0 sudo[426061]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:20:59 compute-0 sudo[426061]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:20:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1291: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 38 KiB/s rd, 39 KiB/s wr, 10 op/s
Oct 11 02:20:59 compute-0 sudo[426061]: pam_unix(sudo:session): session closed for user root
Oct 11 02:21:00 compute-0 sshd-session[425703]: Failed password for invalid user debian from 121.227.153.123 port 34120 ssh2
Oct 11 02:21:00 compute-0 sudo[426086]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:21:00 compute-0 sudo[426086]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:21:00 compute-0 sudo[426086]: pam_unix(sudo:session): session closed for user root
Oct 11 02:21:00 compute-0 sudo[426111]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:21:00 compute-0 sudo[426111]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:21:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:21:00 compute-0 podman[426175]: 2025-10-11 02:21:00.733498477 +0000 UTC m=+0.077743300 container create 7c7e80ed35832c1231a51ef68649da5ee9f66d58d9e86084c33d9f2474347bdc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_heyrovsky, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:21:00 compute-0 podman[426175]: 2025-10-11 02:21:00.708839617 +0000 UTC m=+0.053084470 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:21:00 compute-0 systemd[1]: Started libpod-conmon-7c7e80ed35832c1231a51ef68649da5ee9f66d58d9e86084c33d9f2474347bdc.scope.
Oct 11 02:21:00 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:21:00 compute-0 podman[426175]: 2025-10-11 02:21:00.904731883 +0000 UTC m=+0.248976756 container init 7c7e80ed35832c1231a51ef68649da5ee9f66d58d9e86084c33d9f2474347bdc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_heyrovsky, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:21:00 compute-0 podman[426175]: 2025-10-11 02:21:00.922861643 +0000 UTC m=+0.267106476 container start 7c7e80ed35832c1231a51ef68649da5ee9f66d58d9e86084c33d9f2474347bdc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_heyrovsky, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:21:00 compute-0 podman[426175]: 2025-10-11 02:21:00.928144704 +0000 UTC m=+0.272389547 container attach 7c7e80ed35832c1231a51ef68649da5ee9f66d58d9e86084c33d9f2474347bdc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_heyrovsky, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:21:00 compute-0 angry_heyrovsky[426191]: 167 167
Oct 11 02:21:00 compute-0 systemd[1]: libpod-7c7e80ed35832c1231a51ef68649da5ee9f66d58d9e86084c33d9f2474347bdc.scope: Deactivated successfully.
Oct 11 02:21:00 compute-0 conmon[426191]: conmon 7c7e80ed35832c1231a5 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-7c7e80ed35832c1231a51ef68649da5ee9f66d58d9e86084c33d9f2474347bdc.scope/container/memory.events
Oct 11 02:21:00 compute-0 podman[426175]: 2025-10-11 02:21:00.937620164 +0000 UTC m=+0.281864987 container died 7c7e80ed35832c1231a51ef68649da5ee9f66d58d9e86084c33d9f2474347bdc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_heyrovsky, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:21:01 compute-0 systemd[1]: var-lib-containers-storage-overlay-78ee424cbd9ad67c4147bf5c14e0c865e204e5c452e7a82af9d789fa7e3aba6f-merged.mount: Deactivated successfully.
Oct 11 02:21:01 compute-0 podman[426175]: 2025-10-11 02:21:01.071214633 +0000 UTC m=+0.415459456 container remove 7c7e80ed35832c1231a51ef68649da5ee9f66d58d9e86084c33d9f2474347bdc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_heyrovsky, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:21:01 compute-0 systemd[1]: libpod-conmon-7c7e80ed35832c1231a51ef68649da5ee9f66d58d9e86084c33d9f2474347bdc.scope: Deactivated successfully.
Oct 11 02:21:01 compute-0 ceph-mon[191930]: pgmap v1291: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 38 KiB/s rd, 39 KiB/s wr, 10 op/s
Oct 11 02:21:01 compute-0 podman[426214]: 2025-10-11 02:21:01.318329938 +0000 UTC m=+0.085729499 container create 3304e20ee27a05f01d5030d5d9097e7c31f77fb8165dd82a11412c700788ce7b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pedantic_kilby, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 02:21:01 compute-0 podman[426214]: 2025-10-11 02:21:01.285363804 +0000 UTC m=+0.052763425 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:21:01 compute-0 systemd[1]: Started libpod-conmon-3304e20ee27a05f01d5030d5d9097e7c31f77fb8165dd82a11412c700788ce7b.scope.
Oct 11 02:21:01 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:21:01 compute-0 openstack_network_exporter[374316]: ERROR   02:21:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:21:01 compute-0 openstack_network_exporter[374316]: ERROR   02:21:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:21:01 compute-0 openstack_network_exporter[374316]: ERROR   02:21:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:21:01 compute-0 openstack_network_exporter[374316]: ERROR   02:21:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:21:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:21:01 compute-0 openstack_network_exporter[374316]: ERROR   02:21:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:21:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:21:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e43d430d2d0c417e611021f6cdd3da9eb5e6b38d0192b27d458012e65d8b1b48/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:21:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e43d430d2d0c417e611021f6cdd3da9eb5e6b38d0192b27d458012e65d8b1b48/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:21:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e43d430d2d0c417e611021f6cdd3da9eb5e6b38d0192b27d458012e65d8b1b48/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:21:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e43d430d2d0c417e611021f6cdd3da9eb5e6b38d0192b27d458012e65d8b1b48/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:21:01 compute-0 podman[426214]: 2025-10-11 02:21:01.463954969 +0000 UTC m=+0.231354530 container init 3304e20ee27a05f01d5030d5d9097e7c31f77fb8165dd82a11412c700788ce7b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pedantic_kilby, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0)
Oct 11 02:21:01 compute-0 podman[426214]: 2025-10-11 02:21:01.472847231 +0000 UTC m=+0.240246752 container start 3304e20ee27a05f01d5030d5d9097e7c31f77fb8165dd82a11412c700788ce7b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pedantic_kilby, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:21:01 compute-0 podman[426214]: 2025-10-11 02:21:01.477425258 +0000 UTC m=+0.244824779 container attach 3304e20ee27a05f01d5030d5d9097e7c31f77fb8165dd82a11412c700788ce7b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pedantic_kilby, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True)
Oct 11 02:21:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1292: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 8.7 KiB/s wr, 0 op/s
Oct 11 02:21:02 compute-0 sshd-session[425703]: Connection closed by invalid user debian 121.227.153.123 port 34120 [preauth]
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]: {
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:         "osd_id": 1,
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:         "type": "bluestore"
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:     },
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:         "osd_id": 2,
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:         "type": "bluestore"
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:     },
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:         "osd_id": 0,
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:         "type": "bluestore"
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]:     }
Oct 11 02:21:02 compute-0 pedantic_kilby[426230]: }
Oct 11 02:21:02 compute-0 systemd[1]: libpod-3304e20ee27a05f01d5030d5d9097e7c31f77fb8165dd82a11412c700788ce7b.scope: Deactivated successfully.
Oct 11 02:21:02 compute-0 systemd[1]: libpod-3304e20ee27a05f01d5030d5d9097e7c31f77fb8165dd82a11412c700788ce7b.scope: Consumed 1.042s CPU time.
Oct 11 02:21:02 compute-0 podman[426214]: 2025-10-11 02:21:02.517648569 +0000 UTC m=+1.285048110 container died 3304e20ee27a05f01d5030d5d9097e7c31f77fb8165dd82a11412c700788ce7b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pedantic_kilby, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 02:21:02 compute-0 systemd[1]: var-lib-containers-storage-overlay-e43d430d2d0c417e611021f6cdd3da9eb5e6b38d0192b27d458012e65d8b1b48-merged.mount: Deactivated successfully.
Oct 11 02:21:02 compute-0 podman[426214]: 2025-10-11 02:21:02.589971671 +0000 UTC m=+1.357371212 container remove 3304e20ee27a05f01d5030d5d9097e7c31f77fb8165dd82a11412c700788ce7b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pedantic_kilby, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True)
Oct 11 02:21:02 compute-0 systemd[1]: libpod-conmon-3304e20ee27a05f01d5030d5d9097e7c31f77fb8165dd82a11412c700788ce7b.scope: Deactivated successfully.
Oct 11 02:21:02 compute-0 sudo[426111]: pam_unix(sudo:session): session closed for user root
Oct 11 02:21:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:21:02 compute-0 podman[426266]: 2025-10-11 02:21:02.644850775 +0000 UTC m=+0.094275604 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, config_id=multipathd, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:21:02 compute-0 podman[426268]: 2025-10-11 02:21:02.645389361 +0000 UTC m=+0.091878311 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_managed=true, org.label-schema.vendor=CentOS, container_name=iscsid, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:21:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:21:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:21:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:21:02 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 00b36607-3941-4c5c-804c-3d7f5e633f8a does not exist
Oct 11 02:21:02 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 0f1c144c-5a27-47e2-882d-3ea8a2539f2e does not exist
Oct 11 02:21:02 compute-0 sudo[426313]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:21:02 compute-0 sudo[426313]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:21:02 compute-0 sudo[426313]: pam_unix(sudo:session): session closed for user root
Oct 11 02:21:02 compute-0 sudo[426338]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:21:02 compute-0 sudo[426338]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:21:02 compute-0 sudo[426338]: pam_unix(sudo:session): session closed for user root
Oct 11 02:21:03 compute-0 ceph-mon[191930]: pgmap v1292: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 8.7 KiB/s wr, 0 op/s
Oct 11 02:21:03 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:21:03 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:21:03 compute-0 sshd-session[426263]: Invalid user debian from 121.227.153.123 port 34662
Oct 11 02:21:03 compute-0 nova_compute[356901]: 2025-10-11 02:21:03.662 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:03 compute-0 sshd-session[426263]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:21:03 compute-0 sshd-session[426263]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:21:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1293: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:21:04 compute-0 nova_compute[356901]: 2025-10-11 02:21:04.280 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:05 compute-0 ceph-mon[191930]: pgmap v1293: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:21:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:21:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1294: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:21:06 compute-0 sshd-session[426263]: Failed password for invalid user debian from 121.227.153.123 port 34662 ssh2
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0011043162320114605 of space, bias 1.0, pg target 0.3312948696034382 quantized to 32 (current 32)
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00025334537995702286 of space, bias 1.0, pg target 0.07600361398710685 quantized to 32 (current 32)
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:21:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:21:07 compute-0 ceph-mon[191930]: pgmap v1294: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:21:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1295: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:08 compute-0 sshd-session[426263]: Connection closed by invalid user debian 121.227.153.123 port 34662 [preauth]
Oct 11 02:21:08 compute-0 ceph-mon[191930]: pgmap v1295: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:08 compute-0 nova_compute[356901]: 2025-10-11 02:21:08.666 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:09 compute-0 nova_compute[356901]: 2025-10-11 02:21:09.284 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:09 compute-0 sshd-session[426363]: Invalid user debian from 121.227.153.123 port 34668
Oct 11 02:21:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:21:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 2400.0 total, 600.0 interval
                                            Cumulative writes: 5974 writes, 26K keys, 5974 commit groups, 1.0 writes per commit group, ingest: 0.04 GB, 0.02 MB/s
                                            Cumulative WAL: 5974 writes, 5974 syncs, 1.00 writes per sync, written: 0.04 GB, 0.02 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 1365 writes, 6152 keys, 1365 commit groups, 1.0 writes per commit group, ingest: 8.80 MB, 0.01 MB/s
                                            Interval WAL: 1365 writes, 1365 syncs, 1.00 writes per sync, written: 0.01 GB, 0.01 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
                                            
                                            ** Compaction Stats [default] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0    120.5      0.25              0.13        15    0.017       0      0       0.0       0.0
                                              L6      1/0    7.08 MB   0.0      0.1     0.0      0.1       0.1      0.0       0.0   3.3    174.7    141.6      0.70              0.45        14    0.050     63K   7813       0.0       0.0
                                             Sum      1/0    7.08 MB   0.0      0.1     0.0      0.1       0.1      0.0       0.0   4.3    128.6    136.0      0.95              0.58        29    0.033     63K   7813       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   4.6    128.5    129.6      0.29              0.17         8    0.037     20K   2551       0.0       0.0
                                            
                                            ** Compaction Stats [default] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Low      0/0    0.00 KB   0.0      0.1     0.0      0.1       0.1      0.0       0.0   0.0    174.7    141.6      0.70              0.45        14    0.050     63K   7813       0.0       0.0
                                            High      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0    122.4      0.25              0.13        14    0.018       0      0       0.0       0.0
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0     12.0      0.00              0.00         1    0.004       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 2400.0 total, 600.0 interval
                                            Flush(GB): cumulative 0.030, interval 0.008
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.13 GB write, 0.05 MB/s write, 0.12 GB read, 0.05 MB/s read, 1.0 seconds
                                            Interval compaction: 0.04 GB write, 0.06 MB/s write, 0.04 GB read, 0.06 MB/s read, 0.3 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x55816e47f1f0#2 capacity: 308.00 MB usage: 13.51 MB table_size: 0 occupancy: 18446744073709551615 collections: 5 last_copies: 0 last_secs: 0.000174 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(854,13.00 MB,4.21953%) FilterBlock(30,183.92 KB,0.0583153%) IndexBlock(30,342.45 KB,0.10858%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [default] **
Oct 11 02:21:09 compute-0 sshd-session[426363]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:21:09 compute-0 sshd-session[426363]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:21:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1296: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:21:11 compute-0 ceph-mon[191930]: pgmap v1296: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:11 compute-0 sshd-session[426363]: Failed password for invalid user debian from 121.227.153.123 port 34668 ssh2
Oct 11 02:21:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1297: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:13 compute-0 ceph-mon[191930]: pgmap v1297: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:13 compute-0 nova_compute[356901]: 2025-10-11 02:21:13.672 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:13 compute-0 sshd-session[426363]: Connection closed by invalid user debian 121.227.153.123 port 34668 [preauth]
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.861 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.862 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.862 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.863 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.871 14 DEBUG ceilometer.compute.discovery [-] Querying metadata for instance d60d7ea1-5d00-4902-90e6-3ae67eb09a78 from Nova API get_server /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:176
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.878 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.878 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.878 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.878 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.879 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:21:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:13.875 14 DEBUG novaclient.v2.client [-] REQ: curl -g -i -X GET https://nova-internal.openstack.svc:8774/v2.1/servers/d60d7ea1-5d00-4902-90e6-3ae67eb09a78 -H "Accept: application/json" -H "User-Agent: python-novaclient" -H "X-Auth-Token: {SHA256}d674387017edb5d8543811c363b3a2965950a94ddf4462840fede0e79ac258e9" -H "X-OpenStack-Nova-API-Version: 2.1" _http_log_request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:572
Oct 11 02:21:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1298: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:14 compute-0 podman[426368]: 2025-10-11 02:21:14.24350146 +0000 UTC m=+0.120646016 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, managed_by=edpm_ansible, com.redhat.component=ubi9-minimal-container, io.buildah.version=1.33.7, release=1755695350, vcs-type=git, architecture=x86_64, url=https://catalog.redhat.com/en/search?searchType=containers, version=9.6, config_id=edpm, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vendor=Red Hat, Inc., io.openshift.expose-services=, build-date=2025-08-20T13:12:41, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., container_name=openstack_network_exporter, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.tags=minimal rhel9, name=ubi9-minimal)
Oct 11 02:21:14 compute-0 podman[426366]: 2025-10-11 02:21:14.246461531 +0000 UTC m=+0.131707491 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, managed_by=edpm_ansible, config_id=edpm, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true)
Oct 11 02:21:14 compute-0 podman[426369]: 2025-10-11 02:21:14.26542335 +0000 UTC m=+0.134395828 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:21:14 compute-0 nova_compute[356901]: 2025-10-11 02:21:14.287 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.943 14 DEBUG novaclient.v2.client [-] RESP: [200] Connection: Keep-Alive Content-Length: 1959 Content-Type: application/json Date: Sat, 11 Oct 2025 02:21:13 GMT Keep-Alive: timeout=5, max=100 OpenStack-API-Version: compute 2.1 Server: Apache Vary: OpenStack-API-Version,X-OpenStack-Nova-API-Version X-OpenStack-Nova-API-Version: 2.1 x-compute-request-id: req-373477d7-72e5-4a51-9a24-98b864c57d03 x-openstack-request-id: req-373477d7-72e5-4a51-9a24-98b864c57d03 _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:613
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.944 14 DEBUG novaclient.v2.client [-] RESP BODY: {"server": {"id": "d60d7ea1-5d00-4902-90e6-3ae67eb09a78", "name": "vn-vgckve2-ittzoa6m3dmq-egfg3ceao3k4-vnf-rvnztbwt2zgh", "status": "ACTIVE", "tenant_id": "97026531b3404a11869cb85a059c4a0d", "user_id": "d215f3ebbc07435493ccd666fc80109d", "metadata": {"metering.server_group": "3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e"}, "hostId": "2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736", "image": {"id": "a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/images/a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7"}]}, "flavor": {"id": "486e1451-345c-45d6-b075-f4717e759025", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/flavors/486e1451-345c-45d6-b075-f4717e759025"}]}, "created": "2025-10-11T02:19:56Z", "updated": "2025-10-11T02:20:07Z", "addresses": {"private": [{"version": 4, "addr": "192.168.0.80", "OS-EXT-IPS:type": "fixed", "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:c2:ee:14"}, {"version": 4, "addr": "192.168.122.245", "OS-EXT-IPS:type": "floating", "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:c2:ee:14"}]}, "accessIPv4": "", "accessIPv6": "", "links": [{"rel": "self", "href": "https://nova-internal.openstack.svc:8774/v2.1/servers/d60d7ea1-5d00-4902-90e6-3ae67eb09a78"}, {"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/servers/d60d7ea1-5d00-4902-90e6-3ae67eb09a78"}], "OS-DCF:diskConfig": "MANUAL", "progress": 0, "OS-EXT-AZ:availability_zone": "nova", "config_drive": "True", "key_name": null, "OS-SRV-USG:launched_at": "2025-10-11T02:20:07.000000", "OS-SRV-USG:terminated_at": null, "security_groups": [{"name": "basic"}], "OS-EXT-SRV-ATTR:host": "compute-0.ctlplane.example.com", "OS-EXT-SRV-ATTR:instance_name": "instance-00000002", "OS-EXT-SRV-ATTR:hypervisor_hostname": "compute-0.ctlplane.example.com", "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-EXT-STS:power_state": 1, "os-extended-volumes:volumes_attached": []}} _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:648
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.944 14 DEBUG novaclient.v2.client [-] GET call to compute for https://nova-internal.openstack.svc:8774/v2.1/servers/d60d7ea1-5d00-4902-90e6-3ae67eb09a78 used request id req-373477d7-72e5-4a51-9a24-98b864c57d03 request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:1073
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.946 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': 'd60d7ea1-5d00-4902-90e6-3ae67eb09a78', 'name': 'vn-vgckve2-ittzoa6m3dmq-egfg3ceao3k4-vnf-rvnztbwt2zgh', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000002', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {'metering.server_group': '3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.952 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.953 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.953 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.953 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.954 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.955 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:21:14.954083) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.965 14 DEBUG ceilometer.compute.virt.libvirt.inspector [-] No delta meter predecessor for d60d7ea1-5d00-4902-90e6-3ae67eb09a78 / tapa7108c4c-c9 inspect_vnics /usr/lib/python3.12/site-packages/ceilometer/compute/virt/libvirt/inspector.py:143
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.965 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.bytes volume: 1828 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.974 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2268 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.975 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.975 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.975 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.975 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.976 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.976 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.976 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.packets volume: 20 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.977 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:21:14.976503) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.978 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 20 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.979 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.979 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.979 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.979 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.980 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.980 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.980 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.981 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:21:14.980506) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.981 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.982 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.983 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.983 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.983 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.984 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.984 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.984 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.985 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.986 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:21:14.984440) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.986 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.987 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.987 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.987 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.988 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.988 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:14.988 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:21:14.988380) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.019 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.020 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.021 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.capacity volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.057 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.058 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.059 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.060 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.061 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.061 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.061 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.061 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.062 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.063 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:21:15.062190) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceph-mon[191930]: pgmap v1298: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:15 compute-0 sshd-session[426367]: Invalid user debian from 121.227.153.123 port 44686
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.135 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.137 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.138 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.bytes volume: 385378 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.204 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.205 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.205 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.206 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.206 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.206 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.206 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.206 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.207 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.207 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.latency volume: 1837766330 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.207 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.latency volume: 293231554 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.207 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.latency volume: 250459547 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.208 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.208 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.208 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.209 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.209 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.209 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.209 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.209 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.209 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.210 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.210 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:21:15.207018) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.210 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.210 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:21:15.209936) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.210 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.requests volume: 124 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.211 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.211 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.211 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.211 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.212 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.212 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.212 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.212 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.212 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.212 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.212 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.213 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.usage volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.213 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:21:15.212417) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.213 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.213 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.214 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.214 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.214 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.214 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.214 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.215 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.215 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.215 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.bytes volume: 41697280 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.215 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.215 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.215 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:21:15.215051) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.216 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.216 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.216 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.217 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.217 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.217 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.217 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.217 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.218 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.218 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.latency volume: 4811296701 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.218 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:21:15.218023) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.218 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.latency volume: 26893276 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.218 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.219 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.219 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.219 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.220 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.220 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.220 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.220 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.220 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.220 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.221 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:21:15.220668) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.252 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.278 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.279 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.279 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.279 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.280 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.280 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.280 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.281 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.requests volume: 220 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.281 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.282 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.282 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.283 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.281 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:21:15.280801) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.283 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.284 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.284 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.284 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.284 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.285 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.285 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.285 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.285 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:21:15.285303) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.286 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 2158 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.286 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.287 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.287 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.rate in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.287 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.287 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.287 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.rate heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.288 14 DEBUG ceilometer.compute.pollsters [-] LibvirtInspector does not provide data for IncomingBytesRatePollster get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:162
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.288 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.rate (2025-10-11T02:21:15.287762) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.288 14 ERROR ceilometer.polling.manager [-] Prevent pollster network.incoming.bytes.rate from polling [<NovaLikeServer: vn-vgckve2-ittzoa6m3dmq-egfg3ceao3k4-vnf-rvnztbwt2zgh>] on source pollsters anymore!: ceilometer.polling.plugin_base.PollsterPermanentError: [<NovaLikeServer: vn-vgckve2-ittzoa6m3dmq-egfg3ceao3k4-vnf-rvnztbwt2zgh>]
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.289 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.289 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.289 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.289 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.289 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.290 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:21:15.289688) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.290 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.291 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.291 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.291 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.291 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.292 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.292 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.packets volume: 15 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.292 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 19 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.293 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.293 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.294 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.294 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:21:15.291940) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.294 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.295 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.295 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.296 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.296 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 2132 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.296 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:21:15.295686) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.297 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.298 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.298 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.298 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.298 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.299 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.299 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:21:15.298962) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.300 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.300 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.301 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.301 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.301 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.301 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.302 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.302 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.303 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.303 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.304 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.304 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.304 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.304 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.305 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.305 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.305 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:21:15.301856) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.306 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.allocation volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.306 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.306 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:21:15.304773) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.307 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.307 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.308 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.308 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.308 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.308 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.308 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.308 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.309 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.309 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.309 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.310 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.310 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:21:15.308916) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.310 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.310 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.310 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.310 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.311 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/cpu volume: 35400000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.311 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:21:15.310869) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.311 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 35070000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.311 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.312 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.312 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.312 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.312 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.312 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.312 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.bytes volume: 2188 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.313 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2132 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.313 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:21:15.312512) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.313 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.313 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.314 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.314 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.314 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.314 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.314 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/memory.usage volume: 49.5078125 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.314 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:21:15.314305) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.314 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.87109375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.315 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.315 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.315 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.rate in the context of pollsters
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.315 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.315 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.315 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.rate heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.315 14 DEBUG ceilometer.compute.pollsters [-] LibvirtInspector does not provide data for OutgoingBytesRatePollster get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:162
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.316 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.rate (2025-10-11T02:21:15.315830) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.316 14 ERROR ceilometer.polling.manager [-] Prevent pollster network.outgoing.bytes.rate from polling [<NovaLikeServer: vn-vgckve2-ittzoa6m3dmq-egfg3ceao3k4-vnf-rvnztbwt2zgh>] on source pollsters anymore!: ceilometer.polling.plugin_base.PollsterPermanentError: [<NovaLikeServer: vn-vgckve2-ittzoa6m3dmq-egfg3ceao3k4-vnf-rvnztbwt2zgh>]
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.316 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.317 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.317 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.317 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.317 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.317 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.317 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.317 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.317 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.317 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.318 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.318 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.318 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.318 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.318 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.318 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.318 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.318 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.318 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.318 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.318 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.319 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.319 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.319 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.319 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:21:15.319 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:21:15 compute-0 sshd-session[426367]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:21:15 compute-0 sshd-session[426367]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:21:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1299: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:21:17 compute-0 ceph-mon[191930]: pgmap v1299: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:21:17 compute-0 sshd-session[426367]: Failed password for invalid user debian from 121.227.153.123 port 44686 ssh2
Oct 11 02:21:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1300: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:21:18 compute-0 podman[426429]: 2025-10-11 02:21:18.254821567 +0000 UTC m=+0.139345613 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, config_id=edpm, release-0.7.12=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.buildah.version=1.29.0, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.expose-services=, architecture=x86_64, com.redhat.component=ubi9-container, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of Red Hat Universal Base Image 9., vendor=Red Hat, Inc., distribution-scope=public, io.openshift.tags=base rhel9, name=ubi9, release=1214.1726694543, managed_by=edpm_ansible, container_name=kepler, maintainer=Red Hat, Inc., build-date=2024-09-18T21:23:30, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, version=9.4)
Oct 11 02:21:18 compute-0 nova_compute[356901]: 2025-10-11 02:21:18.678 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:19 compute-0 ceph-mon[191930]: pgmap v1300: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:21:19 compute-0 nova_compute[356901]: 2025-10-11 02:21:19.291 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:19 compute-0 sshd-session[426367]: Connection closed by invalid user debian 121.227.153.123 port 44686 [preauth]
Oct 11 02:21:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1301: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.7 KiB/s wr, 1 op/s
Oct 11 02:21:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:21:20 compute-0 sshd-session[426450]: Invalid user debian from 121.227.153.123 port 41346
Oct 11 02:21:20 compute-0 sshd-session[426450]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:21:20 compute-0 sshd-session[426450]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:21:21 compute-0 ceph-mon[191930]: pgmap v1301: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.7 KiB/s wr, 1 op/s
Oct 11 02:21:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1302: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.7 KiB/s wr, 1 op/s
Oct 11 02:21:22 compute-0 sshd-session[426450]: Failed password for invalid user debian from 121.227.153.123 port 41346 ssh2
Oct 11 02:21:23 compute-0 ceph-mon[191930]: pgmap v1302: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.7 KiB/s wr, 1 op/s
Oct 11 02:21:23 compute-0 sshd-session[426450]: Connection closed by invalid user debian 121.227.153.123 port 41346 [preauth]
Oct 11 02:21:23 compute-0 nova_compute[356901]: 2025-10-11 02:21:23.684 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1303: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.8 KiB/s wr, 1 op/s
Oct 11 02:21:24 compute-0 nova_compute[356901]: 2025-10-11 02:21:24.294 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:24 compute-0 sshd-session[426452]: Invalid user debian from 121.227.153.123 port 41360
Oct 11 02:21:24 compute-0 sshd-session[426452]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:21:24 compute-0 sshd-session[426452]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:21:25 compute-0 ceph-mon[191930]: pgmap v1303: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.8 KiB/s wr, 1 op/s
Oct 11 02:21:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:21:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1304: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 6.8 KiB/s wr, 1 op/s
Oct 11 02:21:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:21:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:21:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:21:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:21:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:21:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:21:27 compute-0 ceph-mon[191930]: pgmap v1304: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 6.8 KiB/s wr, 1 op/s
Oct 11 02:21:27 compute-0 sshd-session[426452]: Failed password for invalid user debian from 121.227.153.123 port 41360 ssh2
Oct 11 02:21:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:21:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3907250316' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:21:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:21:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3907250316' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:21:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1305: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 5.8 KiB/s wr, 1 op/s
Oct 11 02:21:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3907250316' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:21:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3907250316' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:21:28 compute-0 podman[426456]: 2025-10-11 02:21:28.203050608 +0000 UTC m=+0.087042920 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=edpm, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute)
Oct 11 02:21:28 compute-0 podman[426457]: 2025-10-11 02:21:28.22843243 +0000 UTC m=+0.101574852 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent)
Oct 11 02:21:28 compute-0 podman[426454]: 2025-10-11 02:21:28.237006978 +0000 UTC m=+0.127920859 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:21:28 compute-0 podman[426455]: 2025-10-11 02:21:28.240968501 +0000 UTC m=+0.125835014 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, io.buildah.version=1.41.3, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=ovn_controller)
Oct 11 02:21:28 compute-0 nova_compute[356901]: 2025-10-11 02:21:28.689 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:28 compute-0 sshd-session[426452]: Connection closed by invalid user debian 121.227.153.123 port 41360 [preauth]
Oct 11 02:21:29 compute-0 ceph-mon[191930]: pgmap v1305: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 5.8 KiB/s wr, 1 op/s
Oct 11 02:21:29 compute-0 nova_compute[356901]: 2025-10-11 02:21:29.297 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:29 compute-0 podman[157119]: time="2025-10-11T02:21:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:21:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:21:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:21:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:21:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9045 "" "Go-http-client/1.1"
Oct 11 02:21:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1306: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 5.8 KiB/s wr, 1 op/s
Oct 11 02:21:30 compute-0 sshd-session[426539]: Invalid user debian from 121.227.153.123 port 41372
Oct 11 02:21:30 compute-0 sshd-session[426539]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:21:30 compute-0 sshd-session[426539]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:21:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:21:31 compute-0 ceph-mon[191930]: pgmap v1306: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 5.8 KiB/s wr, 1 op/s
Oct 11 02:21:31 compute-0 openstack_network_exporter[374316]: ERROR   02:21:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:21:31 compute-0 openstack_network_exporter[374316]: ERROR   02:21:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:21:31 compute-0 openstack_network_exporter[374316]: ERROR   02:21:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:21:31 compute-0 openstack_network_exporter[374316]: ERROR   02:21:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:21:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:21:31 compute-0 openstack_network_exporter[374316]: ERROR   02:21:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:21:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:21:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1307: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 2.7 KiB/s wr, 0 op/s
Oct 11 02:21:32 compute-0 sshd-session[426539]: Failed password for invalid user debian from 121.227.153.123 port 41372 ssh2
Oct 11 02:21:32 compute-0 sshd-session[426539]: Connection closed by invalid user debian 121.227.153.123 port 41372 [preauth]
Oct 11 02:21:33 compute-0 podman[426544]: 2025-10-11 02:21:33.21292397 +0000 UTC m=+0.099868510 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, config_id=iscsid, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20251009)
Oct 11 02:21:33 compute-0 ceph-mon[191930]: pgmap v1307: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 2.7 KiB/s wr, 0 op/s
Oct 11 02:21:33 compute-0 podman[426543]: 2025-10-11 02:21:33.229066491 +0000 UTC m=+0.119228847 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, container_name=multipathd, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=multipathd, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']})
Oct 11 02:21:33 compute-0 nova_compute[356901]: 2025-10-11 02:21:33.693 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1308: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 2.7 KiB/s wr, 0 op/s
Oct 11 02:21:34 compute-0 sshd-session[426541]: Invalid user debian from 121.227.153.123 port 34284
Oct 11 02:21:34 compute-0 nova_compute[356901]: 2025-10-11 02:21:34.299 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:34 compute-0 sshd-session[426541]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:21:34 compute-0 sshd-session[426541]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:21:35 compute-0 ceph-mon[191930]: pgmap v1308: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 2.7 KiB/s wr, 0 op/s
Oct 11 02:21:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:21:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1309: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 2.7 KiB/s wr, 0 op/s
Oct 11 02:21:36 compute-0 sshd-session[426541]: Failed password for invalid user debian from 121.227.153.123 port 34284 ssh2
Oct 11 02:21:36 compute-0 sshd-session[426541]: Connection closed by invalid user debian 121.227.153.123 port 34284 [preauth]
Oct 11 02:21:37 compute-0 ceph-mon[191930]: pgmap v1309: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 2.7 KiB/s wr, 0 op/s
Oct 11 02:21:37 compute-0 sshd-session[426582]: Invalid user debian from 121.227.153.123 port 34292
Oct 11 02:21:37 compute-0 sshd-session[426582]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:21:37 compute-0 sshd-session[426582]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:21:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1310: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 2.7 KiB/s wr, 0 op/s
Oct 11 02:21:38 compute-0 nova_compute[356901]: 2025-10-11 02:21:38.699 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:39 compute-0 ceph-mon[191930]: pgmap v1310: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 2.7 KiB/s wr, 0 op/s
Oct 11 02:21:39 compute-0 nova_compute[356901]: 2025-10-11 02:21:39.302 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:39 compute-0 sshd-session[426582]: Failed password for invalid user debian from 121.227.153.123 port 34292 ssh2
Oct 11 02:21:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1311: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 2.7 KiB/s wr, 0 op/s
Oct 11 02:21:40 compute-0 sshd-session[426582]: Connection closed by invalid user debian 121.227.153.123 port 34292 [preauth]
Oct 11 02:21:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:21:41 compute-0 ceph-mon[191930]: pgmap v1311: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 2.7 KiB/s wr, 0 op/s
Oct 11 02:21:41 compute-0 sshd-session[426584]: Invalid user debian from 121.227.153.123 port 41588
Oct 11 02:21:41 compute-0 sshd-session[426584]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:21:41 compute-0 sshd-session[426584]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:21:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1312: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 2.7 KiB/s wr, 0 op/s
Oct 11 02:21:43 compute-0 ceph-mon[191930]: pgmap v1312: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 2.7 KiB/s wr, 0 op/s
Oct 11 02:21:43 compute-0 nova_compute[356901]: 2025-10-11 02:21:43.706 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:43 compute-0 sshd-session[426584]: Failed password for invalid user debian from 121.227.153.123 port 41588 ssh2
Oct 11 02:21:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1313: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:44 compute-0 nova_compute[356901]: 2025-10-11 02:21:44.306 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:44 compute-0 podman[426586]: 2025-10-11 02:21:44.791772434 +0000 UTC m=+0.096983267 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:21:44 compute-0 podman[426587]: 2025-10-11 02:21:44.802394016 +0000 UTC m=+0.097976563 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=1755695350, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., name=ubi9-minimal, vendor=Red Hat, Inc., container_name=openstack_network_exporter, distribution-scope=public, vcs-type=git, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.component=ubi9-minimal-container, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.6, url=https://catalog.redhat.com/en/search?searchType=containers, io.buildah.version=1.33.7, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, architecture=x86_64, config_id=edpm, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.expose-services=)
Oct 11 02:21:44 compute-0 podman[426588]: 2025-10-11 02:21:44.811180152 +0000 UTC m=+0.111598533 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:21:45 compute-0 ceph-mon[191930]: pgmap v1313: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:21:45 compute-0 sshd-session[426584]: Connection closed by invalid user debian 121.227.153.123 port 41588 [preauth]
Oct 11 02:21:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1314: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:46 compute-0 nova_compute[356901]: 2025-10-11 02:21:46.918 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:21:47 compute-0 sshd-session[426645]: Invalid user debian from 121.227.153.123 port 41592
Oct 11 02:21:47 compute-0 ceph-mon[191930]: pgmap v1314: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:47 compute-0 sshd-session[426645]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:21:47 compute-0 sshd-session[426645]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:21:47 compute-0 nova_compute[356901]: 2025-10-11 02:21:47.899 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:21:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1315: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:48 compute-0 ceph-mon[191930]: pgmap v1315: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:48 compute-0 nova_compute[356901]: 2025-10-11 02:21:48.711 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:49 compute-0 podman[426647]: 2025-10-11 02:21:49.228817837 +0000 UTC m=+0.118473079 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, container_name=kepler, maintainer=Red Hat, Inc., name=ubi9, summary=Provides the latest release of Red Hat Universal Base Image 9., build-date=2024-09-18T21:23:30, com.redhat.component=ubi9-container, distribution-scope=public, io.buildah.version=1.29.0, io.k8s.display-name=Red Hat Universal Base Image 9, vcs-type=git, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, vendor=Red Hat, Inc., release-0.7.12=, managed_by=edpm_ansible, release=1214.1726694543, version=9.4, config_id=edpm, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.openshift.tags=base rhel9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64)
Oct 11 02:21:49 compute-0 nova_compute[356901]: 2025-10-11 02:21:49.308 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:49 compute-0 sshd-session[426645]: Failed password for invalid user debian from 121.227.153.123 port 41592 ssh2
Oct 11 02:21:49 compute-0 nova_compute[356901]: 2025-10-11 02:21:49.894 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:21:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1316: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:21:51 compute-0 ceph-mon[191930]: pgmap v1316: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:51 compute-0 sshd-session[426645]: Connection closed by invalid user debian 121.227.153.123 port 41592 [preauth]
Oct 11 02:21:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1317: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:53 compute-0 ceph-mon[191930]: pgmap v1317: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:53 compute-0 sshd-session[426668]: Invalid user debian from 121.227.153.123 port 34972
Oct 11 02:21:53 compute-0 sshd-session[426668]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:21:53 compute-0 sshd-session[426668]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:21:53 compute-0 nova_compute[356901]: 2025-10-11 02:21:53.728 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:53 compute-0 nova_compute[356901]: 2025-10-11 02:21:53.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:21:53 compute-0 nova_compute[356901]: 2025-10-11 02:21:53.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:21:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1318: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:54 compute-0 nova_compute[356901]: 2025-10-11 02:21:54.310 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:54 compute-0 nova_compute[356901]: 2025-10-11 02:21:54.694 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:21:54 compute-0 nova_compute[356901]: 2025-10-11 02:21:54.695 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:21:54 compute-0 nova_compute[356901]: 2025-10-11 02:21:54.696 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:21:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:21:54.847 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:21:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:21:54.847 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:21:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:21:54.848 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:21:55 compute-0 ceph-mon[191930]: pgmap v1318: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:21:55 compute-0 sshd-session[426668]: Failed password for invalid user debian from 121.227.153.123 port 34972 ssh2
Oct 11 02:21:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1319: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:21:56
Oct 11 02:21:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:21:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:21:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['volumes', 'vms', 'default.rgw.meta', 'images', 'cephfs.cephfs.data', '.mgr', '.rgw.root', 'default.rgw.log', 'default.rgw.control', 'backups', 'cephfs.cephfs.meta']
Oct 11 02:21:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:21:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:21:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:21:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:21:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:21:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:21:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:21:56 compute-0 nova_compute[356901]: 2025-10-11 02:21:56.752 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Updating instance_info_cache with network_info: [{"id": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "address": "fa:16:3e:c2:ee:14", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.80", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa7108c4c-c9", "ovs_interfaceid": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:21:56 compute-0 nova_compute[356901]: 2025-10-11 02:21:56.780 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:21:56 compute-0 nova_compute[356901]: 2025-10-11 02:21:56.781 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:21:56 compute-0 nova_compute[356901]: 2025-10-11 02:21:56.782 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:21:56 compute-0 nova_compute[356901]: 2025-10-11 02:21:56.783 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:21:56 compute-0 nova_compute[356901]: 2025-10-11 02:21:56.784 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:21:56 compute-0 nova_compute[356901]: 2025-10-11 02:21:56.784 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:21:56 compute-0 nova_compute[356901]: 2025-10-11 02:21:56.785 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:21:56 compute-0 nova_compute[356901]: 2025-10-11 02:21:56.833 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:21:56 compute-0 nova_compute[356901]: 2025-10-11 02:21:56.833 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:21:56 compute-0 nova_compute[356901]: 2025-10-11 02:21:56.834 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:21:56 compute-0 nova_compute[356901]: 2025-10-11 02:21:56.835 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:21:56 compute-0 nova_compute[356901]: 2025-10-11 02:21:56.836 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:21:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:21:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:21:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:21:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:21:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:21:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:21:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:21:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:21:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:21:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:21:57 compute-0 ceph-mon[191930]: pgmap v1319: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:21:57 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3771436038' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:21:57 compute-0 nova_compute[356901]: 2025-10-11 02:21:57.401 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.565s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:21:57 compute-0 nova_compute[356901]: 2025-10-11 02:21:57.515 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:21:57 compute-0 nova_compute[356901]: 2025-10-11 02:21:57.516 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:21:57 compute-0 nova_compute[356901]: 2025-10-11 02:21:57.516 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:21:57 compute-0 nova_compute[356901]: 2025-10-11 02:21:57.521 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:21:57 compute-0 nova_compute[356901]: 2025-10-11 02:21:57.521 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:21:57 compute-0 nova_compute[356901]: 2025-10-11 02:21:57.522 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:21:57 compute-0 sshd-session[426668]: Connection closed by invalid user debian 121.227.153.123 port 34972 [preauth]
Oct 11 02:21:57 compute-0 nova_compute[356901]: 2025-10-11 02:21:57.954 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:21:57 compute-0 nova_compute[356901]: 2025-10-11 02:21:57.955 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3722MB free_disk=59.92200469970703GB free_vcpus=6 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:21:57 compute-0 nova_compute[356901]: 2025-10-11 02:21:57.956 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:21:57 compute-0 nova_compute[356901]: 2025-10-11 02:21:57.957 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:21:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1320: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:58 compute-0 nova_compute[356901]: 2025-10-11 02:21:58.119 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:21:58 compute-0 nova_compute[356901]: 2025-10-11 02:21:58.119 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance d60d7ea1-5d00-4902-90e6-3ae67eb09a78 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:21:58 compute-0 nova_compute[356901]: 2025-10-11 02:21:58.120 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 2 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:21:58 compute-0 nova_compute[356901]: 2025-10-11 02:21:58.120 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1536MB phys_disk=59GB used_disk=4GB total_vcpus=8 used_vcpus=2 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:21:58 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3771436038' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:21:58 compute-0 nova_compute[356901]: 2025-10-11 02:21:58.172 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:21:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:21:58 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1373731472' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:21:58 compute-0 nova_compute[356901]: 2025-10-11 02:21:58.662 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.490s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:21:58 compute-0 nova_compute[356901]: 2025-10-11 02:21:58.674 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:21:58 compute-0 nova_compute[356901]: 2025-10-11 02:21:58.695 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:21:58 compute-0 nova_compute[356901]: 2025-10-11 02:21:58.698 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:21:58 compute-0 nova_compute[356901]: 2025-10-11 02:21:58.699 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.743s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:21:58 compute-0 nova_compute[356901]: 2025-10-11 02:21:58.732 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:58 compute-0 nova_compute[356901]: 2025-10-11 02:21:58.814 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:21:58 compute-0 nova_compute[356901]: 2025-10-11 02:21:58.839 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:21:59 compute-0 ceph-mon[191930]: pgmap v1320: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:21:59 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1373731472' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:21:59 compute-0 sshd-session[426692]: Invalid user debian from 121.227.153.123 port 35000
Oct 11 02:21:59 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [L] New memtable created with log file: #43. Immutable memtables: 0.
Oct 11 02:21:59 compute-0 podman[426719]: 2025-10-11 02:21:59.22013836 +0000 UTC m=+0.103252302 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, tcib_managed=true, container_name=ovn_metadata_agent, org.label-schema.license=GPLv2)
Oct 11 02:21:59 compute-0 podman[426716]: 2025-10-11 02:21:59.224630251 +0000 UTC m=+0.113272912 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:21:59 compute-0 podman[426718]: 2025-10-11 02:21:59.266083271 +0000 UTC m=+0.154709182 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_id=edpm, org.label-schema.license=GPLv2, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.build-date=20251007, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 02:21:59 compute-0 podman[426717]: 2025-10-11 02:21:59.292101467 +0000 UTC m=+0.177142859 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, org.label-schema.license=GPLv2)
Oct 11 02:21:59 compute-0 nova_compute[356901]: 2025-10-11 02:21:59.311 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:21:59 compute-0 sshd-session[426692]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:21:59 compute-0 sshd-session[426692]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:21:59 compute-0 podman[157119]: time="2025-10-11T02:21:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:21:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:21:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:21:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:21:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9048 "" "Go-http-client/1.1"
Oct 11 02:22:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1321: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:22:01 compute-0 ceph-mon[191930]: pgmap v1321: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:01 compute-0 openstack_network_exporter[374316]: ERROR   02:22:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:22:01 compute-0 openstack_network_exporter[374316]: ERROR   02:22:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:22:01 compute-0 openstack_network_exporter[374316]: ERROR   02:22:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:22:01 compute-0 openstack_network_exporter[374316]: ERROR   02:22:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:22:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:22:01 compute-0 openstack_network_exporter[374316]: ERROR   02:22:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:22:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:22:01 compute-0 sshd-session[426692]: Failed password for invalid user debian from 121.227.153.123 port 35000 ssh2
Oct 11 02:22:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1322: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:02 compute-0 sudo[426796]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:22:02 compute-0 sudo[426796]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:22:02 compute-0 sudo[426796]: pam_unix(sudo:session): session closed for user root
Oct 11 02:22:03 compute-0 sudo[426821]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:22:03 compute-0 sudo[426821]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:22:03 compute-0 sudo[426821]: pam_unix(sudo:session): session closed for user root
Oct 11 02:22:03 compute-0 ceph-mon[191930]: pgmap v1322: 321 pgs: 321 active+clean; 139 MiB data, 267 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:03 compute-0 sudo[426846]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:22:03 compute-0 sudo[426846]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:22:03 compute-0 sudo[426846]: pam_unix(sudo:session): session closed for user root
Oct 11 02:22:03 compute-0 sudo[426887]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:22:03 compute-0 sudo[426887]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:22:03 compute-0 podman[426870]: 2025-10-11 02:22:03.411617827 +0000 UTC m=+0.137680370 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=iscsid, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=iscsid, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:22:03 compute-0 podman[426871]: 2025-10-11 02:22:03.423174252 +0000 UTC m=+0.143388805 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, io.buildah.version=1.41.3, container_name=multipathd, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:22:03 compute-0 sshd-session[426692]: Connection closed by invalid user debian 121.227.153.123 port 35000 [preauth]
Oct 11 02:22:03 compute-0 nova_compute[356901]: 2025-10-11 02:22:03.735 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1323: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail; 2.0 KiB/s wr, 0 op/s
Oct 11 02:22:04 compute-0 sudo[426887]: pam_unix(sudo:session): session closed for user root
Oct 11 02:22:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:22:04 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:22:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:22:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:22:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:22:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:22:04 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev ed824637-4f59-4119-8dc3-9905ddc0e8f2 does not exist
Oct 11 02:22:04 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev fe824532-0914-4c30-b631-191e8c5cd023 does not exist
Oct 11 02:22:04 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 8d6e7327-dbab-4e58-85d1-9091ae1353d7 does not exist
Oct 11 02:22:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:22:04 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:22:04 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:22:04 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:22:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:22:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:22:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:22:04 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:22:04 compute-0 nova_compute[356901]: 2025-10-11 02:22:04.314 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:04 compute-0 sudo[426970]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:22:04 compute-0 sudo[426970]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:22:04 compute-0 sudo[426970]: pam_unix(sudo:session): session closed for user root
Oct 11 02:22:04 compute-0 sudo[426995]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:22:04 compute-0 sudo[426995]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:22:04 compute-0 sudo[426995]: pam_unix(sudo:session): session closed for user root
Oct 11 02:22:04 compute-0 sudo[427020]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:22:04 compute-0 sudo[427020]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:22:04 compute-0 sudo[427020]: pam_unix(sudo:session): session closed for user root
Oct 11 02:22:04 compute-0 sudo[427045]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:22:04 compute-0 sudo[427045]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:22:05 compute-0 sshd-session[426956]: Invalid user debian from 121.227.153.123 port 40372
Oct 11 02:22:05 compute-0 ceph-mon[191930]: pgmap v1323: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail; 2.0 KiB/s wr, 0 op/s
Oct 11 02:22:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:22:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:22:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:22:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:22:05 compute-0 sshd-session[426956]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:22:05 compute-0 sshd-session[426956]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:22:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:22:05 compute-0 podman[427108]: 2025-10-11 02:22:05.318833299 +0000 UTC m=+0.078448801 container create af2c7c2cbf976d161e1ec8006f45791d10198008abc34a3d22ff0e76d11251e5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_shtern, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:22:05 compute-0 systemd[1]: virtsecretd.service: Deactivated successfully.
Oct 11 02:22:05 compute-0 podman[427108]: 2025-10-11 02:22:05.281935893 +0000 UTC m=+0.041551385 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:22:05 compute-0 systemd[1]: Started libpod-conmon-af2c7c2cbf976d161e1ec8006f45791d10198008abc34a3d22ff0e76d11251e5.scope.
Oct 11 02:22:05 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:22:05 compute-0 podman[427108]: 2025-10-11 02:22:05.45658407 +0000 UTC m=+0.216199552 container init af2c7c2cbf976d161e1ec8006f45791d10198008abc34a3d22ff0e76d11251e5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_shtern, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default)
Oct 11 02:22:05 compute-0 podman[427108]: 2025-10-11 02:22:05.470379606 +0000 UTC m=+0.229995058 container start af2c7c2cbf976d161e1ec8006f45791d10198008abc34a3d22ff0e76d11251e5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_shtern, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0)
Oct 11 02:22:05 compute-0 podman[427108]: 2025-10-11 02:22:05.47548778 +0000 UTC m=+0.235103262 container attach af2c7c2cbf976d161e1ec8006f45791d10198008abc34a3d22ff0e76d11251e5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_shtern, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_REF=reef)
Oct 11 02:22:05 compute-0 objective_shtern[427125]: 167 167
Oct 11 02:22:05 compute-0 systemd[1]: libpod-af2c7c2cbf976d161e1ec8006f45791d10198008abc34a3d22ff0e76d11251e5.scope: Deactivated successfully.
Oct 11 02:22:05 compute-0 conmon[427125]: conmon af2c7c2cbf976d161e1e <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-af2c7c2cbf976d161e1ec8006f45791d10198008abc34a3d22ff0e76d11251e5.scope/container/memory.events
Oct 11 02:22:05 compute-0 podman[427108]: 2025-10-11 02:22:05.489073978 +0000 UTC m=+0.248689430 container died af2c7c2cbf976d161e1ec8006f45791d10198008abc34a3d22ff0e76d11251e5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_shtern, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:22:05 compute-0 systemd[1]: var-lib-containers-storage-overlay-5b3c3e44934bca10c728916202e44931492e74937b6c73fcf609315765d475ec-merged.mount: Deactivated successfully.
Oct 11 02:22:05 compute-0 podman[427108]: 2025-10-11 02:22:05.558434311 +0000 UTC m=+0.318049763 container remove af2c7c2cbf976d161e1ec8006f45791d10198008abc34a3d22ff0e76d11251e5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=objective_shtern, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:22:05 compute-0 systemd[1]: libpod-conmon-af2c7c2cbf976d161e1ec8006f45791d10198008abc34a3d22ff0e76d11251e5.scope: Deactivated successfully.
Oct 11 02:22:05 compute-0 podman[427148]: 2025-10-11 02:22:05.898371619 +0000 UTC m=+0.124005628 container create d77d78d7d37b5b5559072f7be56cfc0eafb6a09fe1dbe82eb820905db469df9e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_goldwasser, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507)
Oct 11 02:22:05 compute-0 podman[427148]: 2025-10-11 02:22:05.841730293 +0000 UTC m=+0.067364392 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:22:06 compute-0 systemd[1]: Started libpod-conmon-d77d78d7d37b5b5559072f7be56cfc0eafb6a09fe1dbe82eb820905db469df9e.scope.
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1324: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail; 2.0 KiB/s wr, 0 op/s
Oct 11 02:22:06 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:22:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5f2ba49dbba864017576b0e13b23f05e8975e80c5ada7ad33abeddf5354b8923/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:22:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5f2ba49dbba864017576b0e13b23f05e8975e80c5ada7ad33abeddf5354b8923/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:22:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5f2ba49dbba864017576b0e13b23f05e8975e80c5ada7ad33abeddf5354b8923/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:22:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5f2ba49dbba864017576b0e13b23f05e8975e80c5ada7ad33abeddf5354b8923/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:22:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5f2ba49dbba864017576b0e13b23f05e8975e80c5ada7ad33abeddf5354b8923/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:22:06 compute-0 podman[427148]: 2025-10-11 02:22:06.097814368 +0000 UTC m=+0.323448447 container init d77d78d7d37b5b5559072f7be56cfc0eafb6a09fe1dbe82eb820905db469df9e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_goldwasser, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:22:06 compute-0 podman[427148]: 2025-10-11 02:22:06.115541425 +0000 UTC m=+0.341175464 container start d77d78d7d37b5b5559072f7be56cfc0eafb6a09fe1dbe82eb820905db469df9e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_goldwasser, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:22:06 compute-0 podman[427148]: 2025-10-11 02:22:06.124093342 +0000 UTC m=+0.349727381 container attach d77d78d7d37b5b5559072f7be56cfc0eafb6a09fe1dbe82eb820905db469df9e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_goldwasser, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0011048249576740248 of space, bias 1.0, pg target 0.33144748730220747 quantized to 32 (current 32)
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00025334537995702286 of space, bias 1.0, pg target 0.07600361398710685 quantized to 32 (current 32)
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:22:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:22:07 compute-0 sshd-session[426956]: Failed password for invalid user debian from 121.227.153.123 port 40372 ssh2
Oct 11 02:22:07 compute-0 ceph-mon[191930]: pgmap v1324: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail; 2.0 KiB/s wr, 0 op/s
Oct 11 02:22:07 compute-0 blissful_goldwasser[427164]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:22:07 compute-0 blissful_goldwasser[427164]: --> relative data size: 1.0
Oct 11 02:22:07 compute-0 blissful_goldwasser[427164]: --> All data devices are unavailable
Oct 11 02:22:07 compute-0 systemd[1]: libpod-d77d78d7d37b5b5559072f7be56cfc0eafb6a09fe1dbe82eb820905db469df9e.scope: Deactivated successfully.
Oct 11 02:22:07 compute-0 podman[427148]: 2025-10-11 02:22:07.426804336 +0000 UTC m=+1.652438375 container died d77d78d7d37b5b5559072f7be56cfc0eafb6a09fe1dbe82eb820905db469df9e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_goldwasser, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0)
Oct 11 02:22:07 compute-0 systemd[1]: libpod-d77d78d7d37b5b5559072f7be56cfc0eafb6a09fe1dbe82eb820905db469df9e.scope: Consumed 1.252s CPU time.
Oct 11 02:22:07 compute-0 systemd[1]: var-lib-containers-storage-overlay-5f2ba49dbba864017576b0e13b23f05e8975e80c5ada7ad33abeddf5354b8923-merged.mount: Deactivated successfully.
Oct 11 02:22:07 compute-0 podman[427148]: 2025-10-11 02:22:07.549534618 +0000 UTC m=+1.775168627 container remove d77d78d7d37b5b5559072f7be56cfc0eafb6a09fe1dbe82eb820905db469df9e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_goldwasser, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:22:07 compute-0 sshd-session[426956]: Connection closed by invalid user debian 121.227.153.123 port 40372 [preauth]
Oct 11 02:22:07 compute-0 systemd[1]: libpod-conmon-d77d78d7d37b5b5559072f7be56cfc0eafb6a09fe1dbe82eb820905db469df9e.scope: Deactivated successfully.
Oct 11 02:22:07 compute-0 sudo[427045]: pam_unix(sudo:session): session closed for user root
Oct 11 02:22:07 compute-0 sudo[427205]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:22:07 compute-0 sudo[427205]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:22:07 compute-0 sudo[427205]: pam_unix(sudo:session): session closed for user root
Oct 11 02:22:07 compute-0 sudo[427230]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:22:07 compute-0 sudo[427230]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:22:07 compute-0 sudo[427230]: pam_unix(sudo:session): session closed for user root
Oct 11 02:22:07 compute-0 sudo[427257]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:22:07 compute-0 sudo[427257]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:22:07 compute-0 sudo[427257]: pam_unix(sudo:session): session closed for user root
Oct 11 02:22:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1325: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail; 2.0 KiB/s wr, 0 op/s
Oct 11 02:22:08 compute-0 sudo[427282]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:22:08 compute-0 sudo[427282]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:22:08 compute-0 systemd[1]: virtproxyd.service: Deactivated successfully.
Oct 11 02:22:08 compute-0 podman[427345]: 2025-10-11 02:22:08.708567178 +0000 UTC m=+0.095344538 container create d354ea1565c31644a305b2386486975d308a1cfa583878bf24e73603c03886b4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_bose, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:22:08 compute-0 nova_compute[356901]: 2025-10-11 02:22:08.740 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:08 compute-0 podman[427345]: 2025-10-11 02:22:08.669418671 +0000 UTC m=+0.056196081 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:22:08 compute-0 systemd[1]: Started libpod-conmon-d354ea1565c31644a305b2386486975d308a1cfa583878bf24e73603c03886b4.scope.
Oct 11 02:22:08 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:22:08 compute-0 sshd-session[427243]: Invalid user debian from 121.227.153.123 port 40384
Oct 11 02:22:08 compute-0 podman[427345]: 2025-10-11 02:22:08.881655249 +0000 UTC m=+0.268432649 container init d354ea1565c31644a305b2386486975d308a1cfa583878bf24e73603c03886b4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_bose, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, ceph=True)
Oct 11 02:22:08 compute-0 podman[427345]: 2025-10-11 02:22:08.895425624 +0000 UTC m=+0.282202954 container start d354ea1565c31644a305b2386486975d308a1cfa583878bf24e73603c03886b4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_bose, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef)
Oct 11 02:22:08 compute-0 podman[427345]: 2025-10-11 02:22:08.901058877 +0000 UTC m=+0.287836287 container attach d354ea1565c31644a305b2386486975d308a1cfa583878bf24e73603c03886b4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_bose, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, OSD_FLAVOR=default)
Oct 11 02:22:08 compute-0 inspiring_bose[427361]: 167 167
Oct 11 02:22:08 compute-0 systemd[1]: libpod-d354ea1565c31644a305b2386486975d308a1cfa583878bf24e73603c03886b4.scope: Deactivated successfully.
Oct 11 02:22:08 compute-0 podman[427345]: 2025-10-11 02:22:08.910392212 +0000 UTC m=+0.297169562 container died d354ea1565c31644a305b2386486975d308a1cfa583878bf24e73603c03886b4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_bose, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:22:08 compute-0 systemd[1]: var-lib-containers-storage-overlay-4b64c62c413d140f471c3901d110679c35c5e400bab6a4235ba390df4438ef99-merged.mount: Deactivated successfully.
Oct 11 02:22:08 compute-0 podman[427345]: 2025-10-11 02:22:08.977070779 +0000 UTC m=+0.363848109 container remove d354ea1565c31644a305b2386486975d308a1cfa583878bf24e73603c03886b4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_bose, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:22:09 compute-0 systemd[1]: libpod-conmon-d354ea1565c31644a305b2386486975d308a1cfa583878bf24e73603c03886b4.scope: Deactivated successfully.
Oct 11 02:22:09 compute-0 sshd-session[427243]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:22:09 compute-0 sshd-session[427243]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:22:09 compute-0 podman[427384]: 2025-10-11 02:22:09.226032836 +0000 UTC m=+0.076762289 container create aec33a1c112ecc9d692bec8d4dc049399ad41fda242a780a3caec14393e53967 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_neumann, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:22:09 compute-0 ceph-mon[191930]: pgmap v1325: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail; 2.0 KiB/s wr, 0 op/s
Oct 11 02:22:09 compute-0 podman[427384]: 2025-10-11 02:22:09.189514015 +0000 UTC m=+0.040243548 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:22:09 compute-0 systemd[1]: Started libpod-conmon-aec33a1c112ecc9d692bec8d4dc049399ad41fda242a780a3caec14393e53967.scope.
Oct 11 02:22:09 compute-0 nova_compute[356901]: 2025-10-11 02:22:09.316 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:09 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:22:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/961b2c895caae511b72c1fb192c6e6ece0d7d56d626ab511e61345e5eceb0f0d/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:22:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/961b2c895caae511b72c1fb192c6e6ece0d7d56d626ab511e61345e5eceb0f0d/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:22:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/961b2c895caae511b72c1fb192c6e6ece0d7d56d626ab511e61345e5eceb0f0d/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:22:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/961b2c895caae511b72c1fb192c6e6ece0d7d56d626ab511e61345e5eceb0f0d/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:22:09 compute-0 podman[427384]: 2025-10-11 02:22:09.361154283 +0000 UTC m=+0.211883756 container init aec33a1c112ecc9d692bec8d4dc049399ad41fda242a780a3caec14393e53967 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_neumann, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:22:09 compute-0 podman[427384]: 2025-10-11 02:22:09.373702654 +0000 UTC m=+0.224432107 container start aec33a1c112ecc9d692bec8d4dc049399ad41fda242a780a3caec14393e53967 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_neumann, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_REF=reef, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:22:09 compute-0 podman[427384]: 2025-10-11 02:22:09.378489816 +0000 UTC m=+0.229219269 container attach aec33a1c112ecc9d692bec8d4dc049399ad41fda242a780a3caec14393e53967 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_neumann, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 02:22:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1326: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail; 2.0 KiB/s wr, 0 op/s
Oct 11 02:22:10 compute-0 hungry_neumann[427401]: {
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:     "0": [
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:         {
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "devices": [
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "/dev/loop3"
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             ],
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "lv_name": "ceph_lv0",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "lv_size": "21470642176",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "name": "ceph_lv0",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "tags": {
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.cluster_name": "ceph",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.crush_device_class": "",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.encrypted": "0",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.osd_id": "0",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.type": "block",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.vdo": "0"
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             },
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "type": "block",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "vg_name": "ceph_vg0"
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:         }
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:     ],
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:     "1": [
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:         {
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "devices": [
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "/dev/loop4"
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             ],
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "lv_name": "ceph_lv1",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "lv_size": "21470642176",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "name": "ceph_lv1",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "tags": {
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.cluster_name": "ceph",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.crush_device_class": "",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.encrypted": "0",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.osd_id": "1",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.type": "block",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.vdo": "0"
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             },
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "type": "block",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "vg_name": "ceph_vg1"
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:         }
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:     ],
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:     "2": [
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:         {
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "devices": [
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "/dev/loop5"
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             ],
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "lv_name": "ceph_lv2",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "lv_size": "21470642176",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "name": "ceph_lv2",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "tags": {
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.cluster_name": "ceph",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.crush_device_class": "",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.encrypted": "0",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.osd_id": "2",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.type": "block",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:                 "ceph.vdo": "0"
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             },
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "type": "block",
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:             "vg_name": "ceph_vg2"
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:         }
Oct 11 02:22:10 compute-0 hungry_neumann[427401]:     ]
Oct 11 02:22:10 compute-0 hungry_neumann[427401]: }
Oct 11 02:22:10 compute-0 systemd[1]: libpod-aec33a1c112ecc9d692bec8d4dc049399ad41fda242a780a3caec14393e53967.scope: Deactivated successfully.
Oct 11 02:22:10 compute-0 podman[427384]: 2025-10-11 02:22:10.202032418 +0000 UTC m=+1.052761861 container died aec33a1c112ecc9d692bec8d4dc049399ad41fda242a780a3caec14393e53967 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_neumann, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:22:10 compute-0 systemd[1]: var-lib-containers-storage-overlay-961b2c895caae511b72c1fb192c6e6ece0d7d56d626ab511e61345e5eceb0f0d-merged.mount: Deactivated successfully.
Oct 11 02:22:10 compute-0 podman[427384]: 2025-10-11 02:22:10.288862829 +0000 UTC m=+1.139592282 container remove aec33a1c112ecc9d692bec8d4dc049399ad41fda242a780a3caec14393e53967 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_neumann, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:22:10 compute-0 systemd[1]: libpod-conmon-aec33a1c112ecc9d692bec8d4dc049399ad41fda242a780a3caec14393e53967.scope: Deactivated successfully.
Oct 11 02:22:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:22:10 compute-0 sudo[427282]: pam_unix(sudo:session): session closed for user root
Oct 11 02:22:10 compute-0 sudo[427423]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:22:10 compute-0 sudo[427423]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:22:10 compute-0 sudo[427423]: pam_unix(sudo:session): session closed for user root
Oct 11 02:22:10 compute-0 sudo[427448]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:22:10 compute-0 sudo[427448]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:22:10 compute-0 sudo[427448]: pam_unix(sudo:session): session closed for user root
Oct 11 02:22:10 compute-0 sudo[427473]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:22:10 compute-0 sudo[427473]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:22:10 compute-0 sudo[427473]: pam_unix(sudo:session): session closed for user root
Oct 11 02:22:10 compute-0 sshd-session[427243]: Failed password for invalid user debian from 121.227.153.123 port 40384 ssh2
Oct 11 02:22:10 compute-0 sudo[427498]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:22:10 compute-0 sudo[427498]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:22:11 compute-0 ceph-mon[191930]: pgmap v1326: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail; 2.0 KiB/s wr, 0 op/s
Oct 11 02:22:11 compute-0 podman[427560]: 2025-10-11 02:22:11.272760884 +0000 UTC m=+0.074895453 container create 189eecea478094d68a673aba5d13bcdd133dbb3db5389e5bd35751542ce73b6b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_snyder, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default)
Oct 11 02:22:11 compute-0 systemd[1]: Started libpod-conmon-189eecea478094d68a673aba5d13bcdd133dbb3db5389e5bd35751542ce73b6b.scope.
Oct 11 02:22:11 compute-0 podman[427560]: 2025-10-11 02:22:11.248107708 +0000 UTC m=+0.050242297 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:22:11 compute-0 sshd-session[427243]: Connection closed by invalid user debian 121.227.153.123 port 40384 [preauth]
Oct 11 02:22:11 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:22:11 compute-0 podman[427560]: 2025-10-11 02:22:11.422662622 +0000 UTC m=+0.224797251 container init 189eecea478094d68a673aba5d13bcdd133dbb3db5389e5bd35751542ce73b6b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_snyder, ceph=True, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 02:22:11 compute-0 podman[427560]: 2025-10-11 02:22:11.440481612 +0000 UTC m=+0.242616211 container start 189eecea478094d68a673aba5d13bcdd133dbb3db5389e5bd35751542ce73b6b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_snyder, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20250507)
Oct 11 02:22:11 compute-0 podman[427560]: 2025-10-11 02:22:11.449161814 +0000 UTC m=+0.251296453 container attach 189eecea478094d68a673aba5d13bcdd133dbb3db5389e5bd35751542ce73b6b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_snyder, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:22:11 compute-0 recursing_snyder[427576]: 167 167
Oct 11 02:22:11 compute-0 systemd[1]: libpod-189eecea478094d68a673aba5d13bcdd133dbb3db5389e5bd35751542ce73b6b.scope: Deactivated successfully.
Oct 11 02:22:11 compute-0 podman[427560]: 2025-10-11 02:22:11.456409395 +0000 UTC m=+0.258544004 container died 189eecea478094d68a673aba5d13bcdd133dbb3db5389e5bd35751542ce73b6b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_snyder, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:22:11 compute-0 systemd[1]: var-lib-containers-storage-overlay-070d35eae80040c514f37be23fc2ee1080b4f3be5bce63af2c31827165749e3c-merged.mount: Deactivated successfully.
Oct 11 02:22:11 compute-0 podman[427560]: 2025-10-11 02:22:11.542206419 +0000 UTC m=+0.344340998 container remove 189eecea478094d68a673aba5d13bcdd133dbb3db5389e5bd35751542ce73b6b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_snyder, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:22:11 compute-0 systemd[1]: libpod-conmon-189eecea478094d68a673aba5d13bcdd133dbb3db5389e5bd35751542ce73b6b.scope: Deactivated successfully.
Oct 11 02:22:11 compute-0 podman[427601]: 2025-10-11 02:22:11.792571418 +0000 UTC m=+0.083809544 container create 33563e478b22ec669a7630fa2d4cbcb0a373ae23257bd6072a77f5fbe5e8c420 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_shirley, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507)
Oct 11 02:22:11 compute-0 podman[427601]: 2025-10-11 02:22:11.76009691 +0000 UTC m=+0.051335126 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:22:11 compute-0 systemd[1]: Started libpod-conmon-33563e478b22ec669a7630fa2d4cbcb0a373ae23257bd6072a77f5fbe5e8c420.scope.
Oct 11 02:22:11 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:22:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/384dbc84d34c42c6a7e989880c3103e139ea237100dc08bf375dda69391cd5c1/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:22:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/384dbc84d34c42c6a7e989880c3103e139ea237100dc08bf375dda69391cd5c1/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:22:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/384dbc84d34c42c6a7e989880c3103e139ea237100dc08bf375dda69391cd5c1/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:22:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/384dbc84d34c42c6a7e989880c3103e139ea237100dc08bf375dda69391cd5c1/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:22:11 compute-0 podman[427601]: 2025-10-11 02:22:11.923534835 +0000 UTC m=+0.214772961 container init 33563e478b22ec669a7630fa2d4cbcb0a373ae23257bd6072a77f5fbe5e8c420 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_shirley, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:22:11 compute-0 podman[427601]: 2025-10-11 02:22:11.936998169 +0000 UTC m=+0.228236295 container start 33563e478b22ec669a7630fa2d4cbcb0a373ae23257bd6072a77f5fbe5e8c420 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_shirley, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:22:11 compute-0 podman[427601]: 2025-10-11 02:22:11.941928076 +0000 UTC m=+0.233166352 container attach 33563e478b22ec669a7630fa2d4cbcb0a373ae23257bd6072a77f5fbe5e8c420 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_shirley, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507)
Oct 11 02:22:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1327: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail; 2.0 KiB/s wr, 0 op/s
Oct 11 02:22:12 compute-0 sshd-session[427594]: Invalid user debian from 121.227.153.123 port 52492
Oct 11 02:22:12 compute-0 sshd-session[427594]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:22:12 compute-0 sshd-session[427594]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:22:13 compute-0 nervous_shirley[427617]: {
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:         "osd_id": 1,
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:         "type": "bluestore"
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:     },
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:         "osd_id": 2,
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:         "type": "bluestore"
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:     },
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:         "osd_id": 0,
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:         "type": "bluestore"
Oct 11 02:22:13 compute-0 nervous_shirley[427617]:     }
Oct 11 02:22:13 compute-0 nervous_shirley[427617]: }
Oct 11 02:22:13 compute-0 systemd[1]: libpod-33563e478b22ec669a7630fa2d4cbcb0a373ae23257bd6072a77f5fbe5e8c420.scope: Deactivated successfully.
Oct 11 02:22:13 compute-0 systemd[1]: libpod-33563e478b22ec669a7630fa2d4cbcb0a373ae23257bd6072a77f5fbe5e8c420.scope: Consumed 1.246s CPU time.
Oct 11 02:22:13 compute-0 ceph-mon[191930]: pgmap v1327: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail; 2.0 KiB/s wr, 0 op/s
Oct 11 02:22:13 compute-0 podman[427650]: 2025-10-11 02:22:13.296561576 +0000 UTC m=+0.063840076 container died 33563e478b22ec669a7630fa2d4cbcb0a373ae23257bd6072a77f5fbe5e8c420 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_shirley, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2)
Oct 11 02:22:13 compute-0 systemd[1]: var-lib-containers-storage-overlay-384dbc84d34c42c6a7e989880c3103e139ea237100dc08bf375dda69391cd5c1-merged.mount: Deactivated successfully.
Oct 11 02:22:13 compute-0 podman[427650]: 2025-10-11 02:22:13.382492565 +0000 UTC m=+0.149770995 container remove 33563e478b22ec669a7630fa2d4cbcb0a373ae23257bd6072a77f5fbe5e8c420 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_shirley, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:22:13 compute-0 systemd[1]: libpod-conmon-33563e478b22ec669a7630fa2d4cbcb0a373ae23257bd6072a77f5fbe5e8c420.scope: Deactivated successfully.
Oct 11 02:22:13 compute-0 sudo[427498]: pam_unix(sudo:session): session closed for user root
Oct 11 02:22:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:22:13 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:22:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:22:13 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:22:13 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 85135616-a188-40dd-9574-b760b44b8959 does not exist
Oct 11 02:22:13 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev e0352a9e-b864-4f54-92ac-775271400d56 does not exist
Oct 11 02:22:13 compute-0 sudo[427666]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:22:13 compute-0 sudo[427666]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:22:13 compute-0 sudo[427666]: pam_unix(sudo:session): session closed for user root
Oct 11 02:22:13 compute-0 sudo[427691]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:22:13 compute-0 sudo[427691]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:22:13 compute-0 sudo[427691]: pam_unix(sudo:session): session closed for user root
Oct 11 02:22:13 compute-0 nova_compute[356901]: 2025-10-11 02:22:13.745 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1328: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail; 2.0 KiB/s wr, 0 op/s
Oct 11 02:22:14 compute-0 nova_compute[356901]: 2025-10-11 02:22:14.324 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:22:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:22:14 compute-0 ceph-mon[191930]: pgmap v1328: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail; 2.0 KiB/s wr, 0 op/s
Oct 11 02:22:15 compute-0 podman[427718]: 2025-10-11 02:22:15.20492668 +0000 UTC m=+0.089559361 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:22:15 compute-0 podman[427716]: 2025-10-11 02:22:15.211660322 +0000 UTC m=+0.099472207 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, tcib_managed=true, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, config_id=edpm, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:22:15 compute-0 podman[427717]: 2025-10-11 02:22:15.273063909 +0000 UTC m=+0.153734677 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, build-date=2025-08-20T13:12:41, io.buildah.version=1.33.7, name=ubi9-minimal, release=1755695350, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, config_id=edpm, maintainer=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, distribution-scope=public, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vendor=Red Hat, Inc., url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.component=ubi9-minimal-container, managed_by=edpm_ansible, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., container_name=openstack_network_exporter, io.openshift.expose-services=, vcs-type=git, architecture=x86_64, io.openshift.tags=minimal rhel9, version=9.6)
Oct 11 02:22:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:22:15 compute-0 sshd-session[427594]: Failed password for invalid user debian from 121.227.153.123 port 52492 ssh2
Oct 11 02:22:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1329: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:17 compute-0 ceph-mon[191930]: pgmap v1329: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:17 compute-0 sshd-session[427594]: Connection closed by invalid user debian 121.227.153.123 port 52492 [preauth]
Oct 11 02:22:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1330: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:18 compute-0 sshd-session[427776]: Invalid user debian from 121.227.153.123 port 52502
Oct 11 02:22:18 compute-0 sshd-session[427776]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:22:18 compute-0 sshd-session[427776]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:22:18 compute-0 nova_compute[356901]: 2025-10-11 02:22:18.753 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:19 compute-0 ceph-mon[191930]: pgmap v1330: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:19 compute-0 nova_compute[356901]: 2025-10-11 02:22:19.322 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1331: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:20 compute-0 podman[427779]: 2025-10-11 02:22:20.28069108 +0000 UTC m=+0.166246677 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.display-name=Red Hat Universal Base Image 9, vendor=Red Hat, Inc., com.redhat.component=ubi9-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, name=ubi9, architecture=x86_64, build-date=2024-09-18T21:23:30, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of Red Hat Universal Base Image 9., container_name=kepler, io.buildah.version=1.29.0, io.openshift.expose-services=, config_id=edpm, maintainer=Red Hat, Inc., io.openshift.tags=base rhel9, vcs-type=git, version=9.4, release=1214.1726694543, release-0.7.12=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543)
Oct 11 02:22:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:22:20 compute-0 sshd-session[427776]: Failed password for invalid user debian from 121.227.153.123 port 52502 ssh2
Oct 11 02:22:21 compute-0 ceph-mon[191930]: pgmap v1331: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1332: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:22 compute-0 sshd-session[427776]: Connection closed by invalid user debian 121.227.153.123 port 52502 [preauth]
Oct 11 02:22:23 compute-0 ceph-mon[191930]: pgmap v1332: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:23 compute-0 nova_compute[356901]: 2025-10-11 02:22:23.758 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:24 compute-0 sshd-session[427799]: Invalid user debian from 121.227.153.123 port 39094
Oct 11 02:22:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1333: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:24 compute-0 sshd-session[427799]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:22:24 compute-0 sshd-session[427799]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:22:24 compute-0 nova_compute[356901]: 2025-10-11 02:22:24.327 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:25 compute-0 ceph-mon[191930]: pgmap v1333: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #60. Immutable memtables: 0.
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:22:25.168756) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 31] Flushing memtable with next log file: 60
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149345168837, "job": 31, "event": "flush_started", "num_memtables": 1, "num_entries": 2063, "num_deletes": 251, "total_data_size": 3371759, "memory_usage": 3435840, "flush_reason": "Manual Compaction"}
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 31] Level-0 flush table #61: started
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149345188005, "cf_name": "default", "job": 31, "event": "table_file_creation", "file_number": 61, "file_size": 3316312, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 25523, "largest_seqno": 27585, "table_properties": {"data_size": 3306934, "index_size": 5936, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 2373, "raw_key_size": 18999, "raw_average_key_size": 20, "raw_value_size": 3288137, "raw_average_value_size": 3494, "num_data_blocks": 263, "num_entries": 941, "num_filter_entries": 941, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760149121, "oldest_key_time": 1760149121, "file_creation_time": 1760149345, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 61, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 31] Flush lasted 19292 microseconds, and 7627 cpu microseconds.
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:22:25.188066) [db/flush_job.cc:967] [default] [JOB 31] Level-0 flush table #61: 3316312 bytes OK
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:22:25.188088) [db/memtable_list.cc:519] [default] Level-0 commit table #61 started
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:22:25.190796) [db/memtable_list.cc:722] [default] Level-0 commit table #61: memtable #1 done
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:22:25.190833) EVENT_LOG_v1 {"time_micros": 1760149345190821, "job": 31, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:22:25.190868) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 31] Try to delete WAL files size 3363127, prev total WAL file size 3363127, number of live WAL files 2.
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000057.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:22:25.192399) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F730032323539' seq:72057594037927935, type:22 .. '7061786F730032353131' seq:0, type:0; will stop at (end)
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 32] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 31 Base level 0, inputs: [61(3238KB)], [59(7252KB)]
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149345192511, "job": 32, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [61], "files_L6": [59], "score": -1, "input_data_size": 10743185, "oldest_snapshot_seqno": -1}
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 32] Generated table #62: 5026 keys, 8997002 bytes, temperature: kUnknown
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149345254815, "cf_name": "default", "job": 32, "event": "table_file_creation", "file_number": 62, "file_size": 8997002, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 8961751, "index_size": 21573, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 12613, "raw_key_size": 124681, "raw_average_key_size": 24, "raw_value_size": 8869143, "raw_average_value_size": 1764, "num_data_blocks": 894, "num_entries": 5026, "num_filter_entries": 5026, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760149345, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 62, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:22:25.255723) [db/compaction/compaction_job.cc:1663] [default] [JOB 32] Compacted 1@0 + 1@6 files to L6 => 8997002 bytes
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:22:25.257941) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 172.2 rd, 144.2 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(3.2, 7.1 +0.0 blob) out(8.6 +0.0 blob), read-write-amplify(6.0) write-amplify(2.7) OK, records in: 5544, records dropped: 518 output_compression: NoCompression
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:22:25.257964) EVENT_LOG_v1 {"time_micros": 1760149345257953, "job": 32, "event": "compaction_finished", "compaction_time_micros": 62398, "compaction_time_cpu_micros": 44342, "output_level": 6, "num_output_files": 1, "total_output_size": 8997002, "num_input_records": 5544, "num_output_records": 5026, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000061.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149345259287, "job": 32, "event": "table_file_deletion", "file_number": 61}
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000059.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149345262053, "job": 32, "event": "table_file_deletion", "file_number": 59}
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:22:25.192024) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:22:25.262268) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:22:25.262277) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:22:25.262280) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:22:25.262282) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:22:25 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:22:25.262284) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:22:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:22:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1334: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:26 compute-0 sshd-session[427799]: Failed password for invalid user debian from 121.227.153.123 port 39094 ssh2
Oct 11 02:22:26 compute-0 sshd-session[427799]: Connection closed by invalid user debian 121.227.153.123 port 39094 [preauth]
Oct 11 02:22:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:22:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:22:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:22:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:22:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:22:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:22:27 compute-0 ceph-mon[191930]: pgmap v1334: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:22:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2376928246' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:22:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:22:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2376928246' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:22:27 compute-0 sshd-session[427801]: Invalid user debian from 121.227.153.123 port 39100
Oct 11 02:22:28 compute-0 sshd-session[427801]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:22:28 compute-0 sshd-session[427801]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:22:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1335: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2376928246' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:22:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2376928246' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:22:28 compute-0 nova_compute[356901]: 2025-10-11 02:22:28.763 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:29 compute-0 ceph-mon[191930]: pgmap v1335: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:29 compute-0 nova_compute[356901]: 2025-10-11 02:22:29.329 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:29 compute-0 podman[157119]: time="2025-10-11T02:22:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:22:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:22:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:22:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:22:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9045 "" "Go-http-client/1.1"
Oct 11 02:22:29 compute-0 sshd-session[427801]: Failed password for invalid user debian from 121.227.153.123 port 39100 ssh2
Oct 11 02:22:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1336: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:30 compute-0 podman[427803]: 2025-10-11 02:22:30.19390331 +0000 UTC m=+0.089102374 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 02:22:30 compute-0 podman[427806]: 2025-10-11 02:22:30.206159182 +0000 UTC m=+0.074965166 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, org.label-schema.schema-version=1.0, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:22:30 compute-0 podman[427805]: 2025-10-11 02:22:30.21826675 +0000 UTC m=+0.098488206 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']})
Oct 11 02:22:30 compute-0 sshd-session[427801]: Connection closed by invalid user debian 121.227.153.123 port 39100 [preauth]
Oct 11 02:22:30 compute-0 podman[427804]: 2025-10-11 02:22:30.282825889 +0000 UTC m=+0.166141647 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 02:22:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:22:31 compute-0 ceph-mon[191930]: pgmap v1336: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:31 compute-0 openstack_network_exporter[374316]: ERROR   02:22:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:22:31 compute-0 openstack_network_exporter[374316]: ERROR   02:22:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:22:31 compute-0 openstack_network_exporter[374316]: ERROR   02:22:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:22:31 compute-0 openstack_network_exporter[374316]: ERROR   02:22:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:22:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:22:31 compute-0 openstack_network_exporter[374316]: ERROR   02:22:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:22:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:22:31 compute-0 sshd-session[427886]: Invalid user debian from 121.227.153.123 port 40366
Oct 11 02:22:31 compute-0 sshd-session[427886]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:22:31 compute-0 sshd-session[427886]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:22:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1337: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:33 compute-0 sshd-session[427886]: Failed password for invalid user debian from 121.227.153.123 port 40366 ssh2
Oct 11 02:22:33 compute-0 ceph-mon[191930]: pgmap v1337: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:33 compute-0 nova_compute[356901]: 2025-10-11 02:22:33.768 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1338: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:34 compute-0 sshd-session[427886]: Connection closed by invalid user debian 121.227.153.123 port 40366 [preauth]
Oct 11 02:22:34 compute-0 podman[427888]: 2025-10-11 02:22:34.249800892 +0000 UTC m=+0.125686038 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, container_name=multipathd, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']})
Oct 11 02:22:34 compute-0 podman[427889]: 2025-10-11 02:22:34.255086628 +0000 UTC m=+0.126389234 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, config_id=iscsid, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 02:22:34 compute-0 nova_compute[356901]: 2025-10-11 02:22:34.329 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:35 compute-0 ceph-mon[191930]: pgmap v1338: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:22:35 compute-0 sshd-session[427926]: Invalid user debian from 121.227.153.123 port 40382
Oct 11 02:22:35 compute-0 sshd-session[427926]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:22:35 compute-0 sshd-session[427926]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:22:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1339: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:37 compute-0 ceph-mon[191930]: pgmap v1339: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:37 compute-0 sshd-session[427926]: Failed password for invalid user debian from 121.227.153.123 port 40382 ssh2
Oct 11 02:22:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1340: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:38 compute-0 nova_compute[356901]: 2025-10-11 02:22:38.773 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:39 compute-0 ceph-mon[191930]: pgmap v1340: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:39 compute-0 nova_compute[356901]: 2025-10-11 02:22:39.333 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:39 compute-0 sshd-session[427926]: Connection closed by invalid user debian 121.227.153.123 port 40382 [preauth]
Oct 11 02:22:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1341: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:22:41 compute-0 sshd-session[427928]: Invalid user debian from 121.227.153.123 port 37428
Oct 11 02:22:41 compute-0 ceph-mon[191930]: pgmap v1341: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:41 compute-0 sshd-session[427928]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:22:41 compute-0 sshd-session[427928]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:22:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1342: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:43 compute-0 ceph-mon[191930]: pgmap v1342: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:43 compute-0 nova_compute[356901]: 2025-10-11 02:22:43.779 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:43 compute-0 sshd-session[427928]: Failed password for invalid user debian from 121.227.153.123 port 37428 ssh2
Oct 11 02:22:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1343: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:44 compute-0 nova_compute[356901]: 2025-10-11 02:22:44.335 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:45 compute-0 ceph-mon[191930]: pgmap v1343: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:22:45 compute-0 sshd-session[427928]: Connection closed by invalid user debian 121.227.153.123 port 37428 [preauth]
Oct 11 02:22:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1344: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:46 compute-0 podman[427934]: 2025-10-11 02:22:46.26714795 +0000 UTC m=+0.126095423 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:22:46 compute-0 podman[427933]: 2025-10-11 02:22:46.269709351 +0000 UTC m=+0.134840149 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, release=1755695350, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=openstack_network_exporter, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, distribution-scope=public, vcs-type=git, architecture=x86_64, io.openshift.expose-services=, vendor=Red Hat, Inc., io.openshift.tags=minimal rhel9, version=9.6, url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, build-date=2025-08-20T13:12:41, name=ubi9-minimal, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc., com.redhat.component=ubi9-minimal-container)
Oct 11 02:22:46 compute-0 podman[427932]: 2025-10-11 02:22:46.286499885 +0000 UTC m=+0.159001636 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=edpm, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:22:46 compute-0 sshd-session[427930]: Invalid user debian from 121.227.153.123 port 37440
Oct 11 02:22:47 compute-0 sshd-session[427930]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:22:47 compute-0 sshd-session[427930]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:22:47 compute-0 ceph-mon[191930]: pgmap v1344: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:47 compute-0 nova_compute[356901]: 2025-10-11 02:22:47.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:22:47 compute-0 nova_compute[356901]: 2025-10-11 02:22:47.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:22:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1345: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:48 compute-0 nova_compute[356901]: 2025-10-11 02:22:48.785 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:49 compute-0 ceph-mon[191930]: pgmap v1345: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:49 compute-0 nova_compute[356901]: 2025-10-11 02:22:49.339 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:49 compute-0 sshd-session[427930]: Failed password for invalid user debian from 121.227.153.123 port 37440 ssh2
Oct 11 02:22:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1346: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:22:50 compute-0 ceph-mon[191930]: pgmap v1346: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:50 compute-0 nova_compute[356901]: 2025-10-11 02:22:50.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:22:51 compute-0 podman[427993]: 2025-10-11 02:22:51.242430339 +0000 UTC m=+0.117892019 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., summary=Provides the latest release of Red Hat Universal Base Image 9., config_id=edpm, vcs-type=git, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.buildah.version=1.29.0, version=9.4, build-date=2024-09-18T21:23:30, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, io.k8s.display-name=Red Hat Universal Base Image 9, distribution-scope=public, release=1214.1726694543, com.redhat.component=ubi9-container, io.openshift.tags=base rhel9, managed_by=edpm_ansible, container_name=kepler, release-0.7.12=, io.openshift.expose-services=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f)
Oct 11 02:22:51 compute-0 sshd-session[427930]: Connection closed by invalid user debian 121.227.153.123 port 37440 [preauth]
Oct 11 02:22:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1347: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:52 compute-0 sshd-session[428013]: Invalid user debian from 121.227.153.123 port 55088
Oct 11 02:22:52 compute-0 sshd-session[428013]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:22:52 compute-0 sshd-session[428013]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:22:53 compute-0 ceph-mon[191930]: pgmap v1347: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:53 compute-0 nova_compute[356901]: 2025-10-11 02:22:53.790 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:53 compute-0 nova_compute[356901]: 2025-10-11 02:22:53.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:22:53 compute-0 nova_compute[356901]: 2025-10-11 02:22:53.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:22:53 compute-0 nova_compute[356901]: 2025-10-11 02:22:53.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:22:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1348: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:54 compute-0 nova_compute[356901]: 2025-10-11 02:22:54.342 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:54 compute-0 nova_compute[356901]: 2025-10-11 02:22:54.750 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:22:54 compute-0 nova_compute[356901]: 2025-10-11 02:22:54.751 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:22:54 compute-0 nova_compute[356901]: 2025-10-11 02:22:54.752 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:22:54 compute-0 nova_compute[356901]: 2025-10-11 02:22:54.752 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:22:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:22:54.848 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:22:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:22:54.850 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:22:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:22:54.851 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:22:55 compute-0 ceph-mon[191930]: pgmap v1348: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:55 compute-0 sshd-session[428013]: Failed password for invalid user debian from 121.227.153.123 port 55088 ssh2
Oct 11 02:22:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:22:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1349: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:22:56
Oct 11 02:22:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:22:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:22:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['.mgr', 'images', 'cephfs.cephfs.data', '.rgw.root', 'default.rgw.meta', 'volumes', 'default.rgw.log', 'backups', 'cephfs.cephfs.meta', 'default.rgw.control', 'vms']
Oct 11 02:22:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:22:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:22:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:22:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:22:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:22:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:22:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:22:56 compute-0 nova_compute[356901]: 2025-10-11 02:22:56.795 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:22:56 compute-0 nova_compute[356901]: 2025-10-11 02:22:56.811 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:22:56 compute-0 nova_compute[356901]: 2025-10-11 02:22:56.812 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:22:56 compute-0 nova_compute[356901]: 2025-10-11 02:22:56.813 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:22:56 compute-0 nova_compute[356901]: 2025-10-11 02:22:56.814 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:22:56 compute-0 nova_compute[356901]: 2025-10-11 02:22:56.814 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:22:56 compute-0 nova_compute[356901]: 2025-10-11 02:22:56.815 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:22:56 compute-0 nova_compute[356901]: 2025-10-11 02:22:56.870 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:22:56 compute-0 nova_compute[356901]: 2025-10-11 02:22:56.871 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:22:56 compute-0 nova_compute[356901]: 2025-10-11 02:22:56.871 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:22:56 compute-0 nova_compute[356901]: 2025-10-11 02:22:56.872 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:22:56 compute-0 nova_compute[356901]: 2025-10-11 02:22:56.872 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:22:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:22:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:22:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:22:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:22:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:22:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:22:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:22:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:22:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:22:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:22:57 compute-0 ceph-mon[191930]: pgmap v1349: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:57 compute-0 sshd-session[428013]: Connection closed by invalid user debian 121.227.153.123 port 55088 [preauth]
Oct 11 02:22:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:22:57 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/4199728650' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:22:57 compute-0 nova_compute[356901]: 2025-10-11 02:22:57.424 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.552s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:22:57 compute-0 nova_compute[356901]: 2025-10-11 02:22:57.576 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:22:57 compute-0 nova_compute[356901]: 2025-10-11 02:22:57.577 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:22:57 compute-0 nova_compute[356901]: 2025-10-11 02:22:57.578 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:22:57 compute-0 nova_compute[356901]: 2025-10-11 02:22:57.587 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:22:57 compute-0 nova_compute[356901]: 2025-10-11 02:22:57.588 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:22:57 compute-0 nova_compute[356901]: 2025-10-11 02:22:57.589 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:22:58 compute-0 nova_compute[356901]: 2025-10-11 02:22:58.058 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:22:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1350: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:58 compute-0 nova_compute[356901]: 2025-10-11 02:22:58.060 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3715MB free_disk=59.92200469970703GB free_vcpus=6 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:22:58 compute-0 nova_compute[356901]: 2025-10-11 02:22:58.061 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:22:58 compute-0 nova_compute[356901]: 2025-10-11 02:22:58.062 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:22:58 compute-0 nova_compute[356901]: 2025-10-11 02:22:58.140 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:22:58 compute-0 nova_compute[356901]: 2025-10-11 02:22:58.141 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance d60d7ea1-5d00-4902-90e6-3ae67eb09a78 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:22:58 compute-0 nova_compute[356901]: 2025-10-11 02:22:58.142 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 2 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:22:58 compute-0 nova_compute[356901]: 2025-10-11 02:22:58.142 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1536MB phys_disk=59GB used_disk=4GB total_vcpus=8 used_vcpus=2 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:22:58 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/4199728650' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:22:58 compute-0 nova_compute[356901]: 2025-10-11 02:22:58.189 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:22:58 compute-0 sshd-session[428037]: Invalid user debian from 121.227.153.123 port 55102
Oct 11 02:22:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:22:58 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/311439879' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:22:58 compute-0 sshd-session[428037]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:22:58 compute-0 sshd-session[428037]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:22:58 compute-0 nova_compute[356901]: 2025-10-11 02:22:58.704 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.514s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:22:58 compute-0 nova_compute[356901]: 2025-10-11 02:22:58.714 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:22:58 compute-0 nova_compute[356901]: 2025-10-11 02:22:58.733 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:22:58 compute-0 nova_compute[356901]: 2025-10-11 02:22:58.735 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:22:58 compute-0 nova_compute[356901]: 2025-10-11 02:22:58.735 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.674s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:22:58 compute-0 nova_compute[356901]: 2025-10-11 02:22:58.796 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:58 compute-0 nova_compute[356901]: 2025-10-11 02:22:58.818 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:22:58 compute-0 nova_compute[356901]: 2025-10-11 02:22:58.818 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:22:59 compute-0 ceph-mon[191930]: pgmap v1350: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:22:59 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/311439879' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:22:59 compute-0 nova_compute[356901]: 2025-10-11 02:22:59.345 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:22:59 compute-0 podman[157119]: time="2025-10-11T02:22:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:22:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:22:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:22:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:22:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9060 "" "Go-http-client/1.1"
Oct 11 02:23:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1351: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:23:00 compute-0 sshd-session[428037]: Failed password for invalid user debian from 121.227.153.123 port 55102 ssh2
Oct 11 02:23:00 compute-0 sshd-session[428037]: Connection closed by invalid user debian 121.227.153.123 port 55102 [preauth]
Oct 11 02:23:01 compute-0 ceph-mon[191930]: pgmap v1351: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:01 compute-0 podman[428061]: 2025-10-11 02:23:01.213517871 +0000 UTC m=+0.103854549 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:23:01 compute-0 podman[428063]: 2025-10-11 02:23:01.234147722 +0000 UTC m=+0.102026023 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_id=edpm, org.label-schema.name=CentOS Stream 10 Base Image, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 02:23:01 compute-0 podman[428069]: 2025-10-11 02:23:01.234976603 +0000 UTC m=+0.092160677 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:23:01 compute-0 podman[428062]: 2025-10-11 02:23:01.280798735 +0000 UTC m=+0.155990721 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, org.label-schema.build-date=20251009)
Oct 11 02:23:01 compute-0 openstack_network_exporter[374316]: ERROR   02:23:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:23:01 compute-0 openstack_network_exporter[374316]: ERROR   02:23:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:23:01 compute-0 openstack_network_exporter[374316]: ERROR   02:23:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:23:01 compute-0 openstack_network_exporter[374316]: ERROR   02:23:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:23:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:23:01 compute-0 openstack_network_exporter[374316]: ERROR   02:23:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:23:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:23:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1352: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:02 compute-0 sshd-session[428099]: Invalid user debian from 121.227.153.123 port 57504
Oct 11 02:23:02 compute-0 sshd-session[428099]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:23:02 compute-0 sshd-session[428099]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:23:03 compute-0 ceph-mon[191930]: pgmap v1352: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:03 compute-0 nova_compute[356901]: 2025-10-11 02:23:03.801 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:03 compute-0 sshd-session[428099]: Failed password for invalid user debian from 121.227.153.123 port 57504 ssh2
Oct 11 02:23:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1353: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:04 compute-0 nova_compute[356901]: 2025-10-11 02:23:04.348 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:04 compute-0 sshd-session[428099]: Connection closed by invalid user debian 121.227.153.123 port 57504 [preauth]
Oct 11 02:23:05 compute-0 ceph-mon[191930]: pgmap v1353: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:05 compute-0 podman[428146]: 2025-10-11 02:23:05.250467144 +0000 UTC m=+0.123539293 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, tcib_managed=true, config_id=iscsid, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.schema-version=1.0)
Oct 11 02:23:05 compute-0 podman[428145]: 2025-10-11 02:23:05.2766719 +0000 UTC m=+0.166929039 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, managed_by=edpm_ansible, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, config_id=multipathd, container_name=multipathd, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 02:23:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1354: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:06 compute-0 sshd-session[428143]: Invalid user debian from 121.227.153.123 port 57520
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0011048249576740248 of space, bias 1.0, pg target 0.33144748730220747 quantized to 32 (current 32)
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00025334537995702286 of space, bias 1.0, pg target 0.07600361398710685 quantized to 32 (current 32)
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:23:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:23:07 compute-0 sshd-session[428143]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:23:07 compute-0 sshd-session[428143]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:23:07 compute-0 ceph-mon[191930]: pgmap v1354: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1355: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:08 compute-0 sshd-session[428143]: Failed password for invalid user debian from 121.227.153.123 port 57520 ssh2
Oct 11 02:23:08 compute-0 nova_compute[356901]: 2025-10-11 02:23:08.806 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:09 compute-0 ceph-mon[191930]: pgmap v1355: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:09 compute-0 sshd-session[428143]: Connection closed by invalid user debian 121.227.153.123 port 57520 [preauth]
Oct 11 02:23:09 compute-0 nova_compute[356901]: 2025-10-11 02:23:09.352 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1356: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:23:10 compute-0 sshd-session[428184]: Invalid user debian from 121.227.153.123 port 50052
Oct 11 02:23:10 compute-0 sshd-session[428184]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:23:10 compute-0 sshd-session[428184]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:23:11 compute-0 ceph-mon[191930]: pgmap v1356: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:23:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 2400.1 total, 600.0 interval
                                            Cumulative writes: 6483 writes, 26K keys, 6483 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.01 MB/s
                                            Cumulative WAL: 6483 writes, 1251 syncs, 5.18 writes per sync, written: 0.02 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 632 writes, 1942 keys, 632 commit groups, 1.0 writes per commit group, ingest: 1.89 MB, 0.00 MB/s
                                            Interval WAL: 632 writes, 260 syncs, 2.43 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:23:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1357: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:12 compute-0 sshd-session[428184]: Failed password for invalid user debian from 121.227.153.123 port 50052 ssh2
Oct 11 02:23:13 compute-0 sshd-session[428184]: Connection closed by invalid user debian 121.227.153.123 port 50052 [preauth]
Oct 11 02:23:13 compute-0 ceph-mon[191930]: pgmap v1357: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:13 compute-0 nova_compute[356901]: 2025-10-11 02:23:13.811 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.862 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.863 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.864 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 sudo[428188]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb08710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.873 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': 'd60d7ea1-5d00-4902-90e6-3ae67eb09a78', 'name': 'vn-vgckve2-ittzoa6m3dmq-egfg3ceao3k4-vnf-rvnztbwt2zgh', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000002', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {'metering.server_group': '3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.876 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.876 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.876 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.876 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.876 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.877 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:23:13.876806) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:13 compute-0 sudo[428188]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.883 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.bytes volume: 5149 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:13 compute-0 sudo[428188]: pam_unix(sudo:session): session closed for user root
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.888 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2268 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.889 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.889 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.889 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.890 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.890 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.890 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.890 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.packets volume: 41 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.891 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:23:13.890600) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.892 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 21 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.892 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.893 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.893 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.893 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.893 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.894 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.894 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.894 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:23:13.894036) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.895 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.896 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.896 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.896 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.896 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.897 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.897 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.897 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:23:13.897178) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.898 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.898 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.899 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.899 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.900 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.900 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.900 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.900 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.901 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:23:13.900529) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.926 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.927 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.927 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.capacity volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.970 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.970 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.971 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.972 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.972 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.972 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.973 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.973 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.973 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:13.975 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:23:13.973701) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:14 compute-0 sudo[428214]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:23:14 compute-0 sudo[428214]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:23:14 compute-0 sudo[428214]: pam_unix(sudo:session): session closed for user root
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.046 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.bytes volume: 23325184 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.047 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.048 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.bytes volume: 385378 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1358: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:14 compute-0 sudo[428239]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:23:14 compute-0 sudo[428239]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:23:14 compute-0 sudo[428239]: pam_unix(sudo:session): session closed for user root
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.136 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.137 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.138 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.138 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.139 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.139 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.139 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.139 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.140 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.140 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.latency volume: 1853196562 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.140 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.latency volume: 293231554 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.141 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:23:14.139949) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.142 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.latency volume: 250459547 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.142 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.143 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.143 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.144 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.144 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.145 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.145 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.145 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.145 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.146 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:23:14.145720) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.145 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.requests volume: 844 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.147 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.147 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.requests volume: 124 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.147 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.148 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.149 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.149 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.150 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.150 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.150 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.150 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.150 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.151 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.151 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.152 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.usage volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.152 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:23:14.150846) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.152 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.153 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.153 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.154 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.154 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.155 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.155 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.155 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.155 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.155 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.bytes volume: 41836544 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.156 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:23:14.155629) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.156 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.157 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.158 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.158 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.158 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.159 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.159 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.159 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.160 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.160 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.160 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.160 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.latency volume: 5140134066 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.161 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.latency volume: 26893276 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.161 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.161 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.162 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.162 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.163 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.164 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:23:14.160370) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.164 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.164 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.164 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.165 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.165 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.167 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:23:14.165388) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.195 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.227 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.228 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.228 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.228 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.229 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.229 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.229 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.229 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.requests volume: 238 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.230 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.231 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.231 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.232 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:23:14.229588) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.232 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.233 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 sudo[428264]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.234 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.234 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.234 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.234 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.234 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.235 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.235 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.bytes.delta volume: 3321 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 sudo[428264]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.238 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.239 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.239 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:23:14.235038) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.240 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.240 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.240 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.240 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.240 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.241 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.241 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.241 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:23:14.241179) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.242 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.242 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.243 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.243 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.243 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.243 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.243 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.packets volume: 33 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.244 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 19 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.245 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.245 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.246 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.246 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.246 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.246 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.247 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.bytes.delta volume: 2572 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.247 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:23:14.243755) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.247 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:23:14.246840) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.248 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 70 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.248 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.249 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.249 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.249 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.249 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.250 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.250 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.251 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.251 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.251 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.251 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.252 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.252 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:23:14.249978) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.252 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:23:14.251979) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.252 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.253 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.254 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.254 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.254 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.254 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.254 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.255 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.255 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:23:14.255070) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.255 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.256 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.256 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.allocation volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.257 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.258 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.258 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.259 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.259 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.259 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.259 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.260 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.260 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.260 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.260 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:23:14.260190) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.262 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.262 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.262 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.263 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.263 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.263 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.263 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.263 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/cpu volume: 145010000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.263 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 36970000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.264 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.264 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.264 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.264 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.264 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.264 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.265 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.bytes volume: 4760 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.265 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2202 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.265 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.266 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.266 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:23:14.263424) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.266 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:23:14.264971) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.267 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.267 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.267 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.267 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.267 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/memory.usage volume: 49.09375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.267 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.87109375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.268 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.268 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:23:14.267457) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.269 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.269 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.269 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.270 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.270 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.270 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.270 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.270 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.271 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.271 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.271 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.271 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.271 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.272 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.272 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.272 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.272 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.272 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.272 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.273 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.273 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.273 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.273 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.273 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.273 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.274 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.274 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:23:14.274 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:23:14 compute-0 nova_compute[356901]: 2025-10-11 02:23:14.356 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:14 compute-0 sshd-session[428186]: Invalid user debian from 121.227.153.123 port 50062
Oct 11 02:23:14 compute-0 sshd-session[428186]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:23:14 compute-0 sshd-session[428186]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:23:14 compute-0 sudo[428264]: pam_unix(sudo:session): session closed for user root
Oct 11 02:23:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:23:14 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:23:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:23:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:23:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:23:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:23:14 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 2417a2a8-764e-4c0e-af29-4a7d3793c057 does not exist
Oct 11 02:23:14 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 95b8f54a-a9ec-4a8d-b6c1-d169367179fb does not exist
Oct 11 02:23:14 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 7cd56cd7-1111-400a-b287-c23b1ef06244 does not exist
Oct 11 02:23:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:23:14 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:23:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:23:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:23:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:23:14 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:23:15 compute-0 sudo[428319]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:23:15 compute-0 sudo[428319]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:23:15 compute-0 sudo[428319]: pam_unix(sudo:session): session closed for user root
Oct 11 02:23:15 compute-0 sudo[428344]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:23:15 compute-0 sudo[428344]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:23:15 compute-0 sudo[428344]: pam_unix(sudo:session): session closed for user root
Oct 11 02:23:15 compute-0 ceph-mon[191930]: pgmap v1358: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:23:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:23:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:23:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:23:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:23:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:23:15 compute-0 sudo[428369]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:23:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:23:15 compute-0 sudo[428369]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:23:15 compute-0 sudo[428369]: pam_unix(sudo:session): session closed for user root
Oct 11 02:23:15 compute-0 sudo[428394]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:23:15 compute-0 sudo[428394]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:23:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1359: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:16 compute-0 podman[428457]: 2025-10-11 02:23:16.072588378 +0000 UTC m=+0.092545980 container create 86d31f350fd2ff4e9f0a8e908b26b5669176d80f6583a6689559bce57ae14826 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_torvalds, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 02:23:16 compute-0 podman[428457]: 2025-10-11 02:23:16.029871248 +0000 UTC m=+0.049828890 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:23:16 compute-0 systemd[1]: Started libpod-conmon-86d31f350fd2ff4e9f0a8e908b26b5669176d80f6583a6689559bce57ae14826.scope.
Oct 11 02:23:16 compute-0 sshd-session[428186]: Failed password for invalid user debian from 121.227.153.123 port 50062 ssh2
Oct 11 02:23:16 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:23:16 compute-0 podman[428457]: 2025-10-11 02:23:16.238674548 +0000 UTC m=+0.258632130 container init 86d31f350fd2ff4e9f0a8e908b26b5669176d80f6583a6689559bce57ae14826 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_torvalds, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:23:16 compute-0 podman[428457]: 2025-10-11 02:23:16.252354566 +0000 UTC m=+0.272312168 container start 86d31f350fd2ff4e9f0a8e908b26b5669176d80f6583a6689559bce57ae14826 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_torvalds, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef)
Oct 11 02:23:16 compute-0 podman[428457]: 2025-10-11 02:23:16.258322756 +0000 UTC m=+0.278280358 container attach 86d31f350fd2ff4e9f0a8e908b26b5669176d80f6583a6689559bce57ae14826 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_torvalds, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 02:23:16 compute-0 cranky_torvalds[428473]: 167 167
Oct 11 02:23:16 compute-0 systemd[1]: libpod-86d31f350fd2ff4e9f0a8e908b26b5669176d80f6583a6689559bce57ae14826.scope: Deactivated successfully.
Oct 11 02:23:16 compute-0 podman[428457]: 2025-10-11 02:23:16.266949195 +0000 UTC m=+0.286906797 container died 86d31f350fd2ff4e9f0a8e908b26b5669176d80f6583a6689559bce57ae14826 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_torvalds, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3)
Oct 11 02:23:16 compute-0 systemd[1]: var-lib-containers-storage-overlay-7b995ea5375fa2da38dfeac9a0375afbbf49151f3a75fa1bf2c87015dd7a6187-merged.mount: Deactivated successfully.
Oct 11 02:23:16 compute-0 podman[428457]: 2025-10-11 02:23:16.352354484 +0000 UTC m=+0.372312056 container remove 86d31f350fd2ff4e9f0a8e908b26b5669176d80f6583a6689559bce57ae14826 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_torvalds, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:23:16 compute-0 systemd[1]: libpod-conmon-86d31f350fd2ff4e9f0a8e908b26b5669176d80f6583a6689559bce57ae14826.scope: Deactivated successfully.
Oct 11 02:23:16 compute-0 podman[428492]: 2025-10-11 02:23:16.449207537 +0000 UTC m=+0.077816066 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:23:16 compute-0 podman[428491]: 2025-10-11 02:23:16.455422975 +0000 UTC m=+0.106086423 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=1755695350, vcs-type=git, com.redhat.component=ubi9-minimal-container, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., architecture=x86_64, config_id=edpm, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, maintainer=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, distribution-scope=public, io.buildah.version=1.33.7, version=9.6, container_name=openstack_network_exporter, io.openshift.tags=minimal rhel9, url=https://catalog.redhat.com/en/search?searchType=containers, vendor=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, build-date=2025-08-20T13:12:41, managed_by=edpm_ansible, name=ubi9-minimal, io.openshift.expose-services=)
Oct 11 02:23:16 compute-0 podman[428490]: 2025-10-11 02:23:16.465153861 +0000 UTC m=+0.112876120 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_id=edpm, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']})
Oct 11 02:23:16 compute-0 podman[428556]: 2025-10-11 02:23:16.588895614 +0000 UTC m=+0.070073567 container create d6ae6b0882913e610090c535a970309a0b084797afdab7272a78951246fbc320 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_albattani, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:23:16 compute-0 podman[428556]: 2025-10-11 02:23:16.559305623 +0000 UTC m=+0.040483546 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:23:16 compute-0 systemd[1]: Started libpod-conmon-d6ae6b0882913e610090c535a970309a0b084797afdab7272a78951246fbc320.scope.
Oct 11 02:23:16 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:23:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d9239a89d359dfec2c45153d3269e9d905c8ddc7e99eb08d7e8ce7727315acc0/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:23:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d9239a89d359dfec2c45153d3269e9d905c8ddc7e99eb08d7e8ce7727315acc0/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:23:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d9239a89d359dfec2c45153d3269e9d905c8ddc7e99eb08d7e8ce7727315acc0/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:23:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d9239a89d359dfec2c45153d3269e9d905c8ddc7e99eb08d7e8ce7727315acc0/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:23:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d9239a89d359dfec2c45153d3269e9d905c8ddc7e99eb08d7e8ce7727315acc0/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:23:16 compute-0 podman[428556]: 2025-10-11 02:23:16.741365989 +0000 UTC m=+0.222543992 container init d6ae6b0882913e610090c535a970309a0b084797afdab7272a78951246fbc320 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_albattani, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507)
Oct 11 02:23:16 compute-0 podman[428556]: 2025-10-11 02:23:16.766081236 +0000 UTC m=+0.247259189 container start d6ae6b0882913e610090c535a970309a0b084797afdab7272a78951246fbc320 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_albattani, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:23:16 compute-0 podman[428556]: 2025-10-11 02:23:16.774469617 +0000 UTC m=+0.255647560 container attach d6ae6b0882913e610090c535a970309a0b084797afdab7272a78951246fbc320 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_albattani, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2)
Oct 11 02:23:17 compute-0 ceph-mon[191930]: pgmap v1359: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:17 compute-0 sshd-session[428186]: Connection closed by invalid user debian 121.227.153.123 port 50062 [preauth]
Oct 11 02:23:18 compute-0 brave_albattani[428573]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:23:18 compute-0 brave_albattani[428573]: --> relative data size: 1.0
Oct 11 02:23:18 compute-0 brave_albattani[428573]: --> All data devices are unavailable
Oct 11 02:23:18 compute-0 systemd[1]: libpod-d6ae6b0882913e610090c535a970309a0b084797afdab7272a78951246fbc320.scope: Deactivated successfully.
Oct 11 02:23:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1360: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:18 compute-0 systemd[1]: libpod-d6ae6b0882913e610090c535a970309a0b084797afdab7272a78951246fbc320.scope: Consumed 1.230s CPU time.
Oct 11 02:23:18 compute-0 podman[428556]: 2025-10-11 02:23:18.07237863 +0000 UTC m=+1.553556553 container died d6ae6b0882913e610090c535a970309a0b084797afdab7272a78951246fbc320 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_albattani, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507)
Oct 11 02:23:18 compute-0 systemd[1]: var-lib-containers-storage-overlay-d9239a89d359dfec2c45153d3269e9d905c8ddc7e99eb08d7e8ce7727315acc0-merged.mount: Deactivated successfully.
Oct 11 02:23:18 compute-0 podman[428556]: 2025-10-11 02:23:18.148681544 +0000 UTC m=+1.629859457 container remove d6ae6b0882913e610090c535a970309a0b084797afdab7272a78951246fbc320 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_albattani, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:23:18 compute-0 sudo[428394]: pam_unix(sudo:session): session closed for user root
Oct 11 02:23:18 compute-0 systemd[1]: libpod-conmon-d6ae6b0882913e610090c535a970309a0b084797afdab7272a78951246fbc320.scope: Deactivated successfully.
Oct 11 02:23:18 compute-0 sudo[428614]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:23:18 compute-0 sudo[428614]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:23:18 compute-0 sudo[428614]: pam_unix(sudo:session): session closed for user root
Oct 11 02:23:18 compute-0 sudo[428639]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:23:18 compute-0 sudo[428639]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:23:18 compute-0 sudo[428639]: pam_unix(sudo:session): session closed for user root
Oct 11 02:23:18 compute-0 sudo[428664]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:23:18 compute-0 sudo[428664]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:23:18 compute-0 sudo[428664]: pam_unix(sudo:session): session closed for user root
Oct 11 02:23:18 compute-0 sudo[428689]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:23:18 compute-0 sudo[428689]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:23:18 compute-0 nova_compute[356901]: 2025-10-11 02:23:18.815 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:23:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 2400.1 total, 600.0 interval
                                            Cumulative writes: 7815 writes, 31K keys, 7815 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.01 MB/s
                                            Cumulative WAL: 7815 writes, 1623 syncs, 4.82 writes per sync, written: 0.02 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 688 writes, 2167 keys, 688 commit groups, 1.0 writes per commit group, ingest: 2.15 MB, 0.00 MB/s
                                            Interval WAL: 688 writes, 284 syncs, 2.42 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:23:19 compute-0 podman[428752]: 2025-10-11 02:23:19.039385395 +0000 UTC m=+0.062900797 container create 7ba787431a1b13d9b18f8659753857afb2b784ffe2a55711a716c322f3a520a8 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_brattain, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507)
Oct 11 02:23:19 compute-0 systemd[1]: Started libpod-conmon-7ba787431a1b13d9b18f8659753857afb2b784ffe2a55711a716c322f3a520a8.scope.
Oct 11 02:23:19 compute-0 podman[428752]: 2025-10-11 02:23:19.019183909 +0000 UTC m=+0.042699331 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:23:19 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:23:19 compute-0 podman[428752]: 2025-10-11 02:23:19.164608758 +0000 UTC m=+0.188124210 container init 7ba787431a1b13d9b18f8659753857afb2b784ffe2a55711a716c322f3a520a8 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_brattain, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:23:19 compute-0 podman[428752]: 2025-10-11 02:23:19.176718223 +0000 UTC m=+0.200233625 container start 7ba787431a1b13d9b18f8659753857afb2b784ffe2a55711a716c322f3a520a8 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_brattain, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, ceph=True)
Oct 11 02:23:19 compute-0 podman[428752]: 2025-10-11 02:23:19.18110021 +0000 UTC m=+0.204615662 container attach 7ba787431a1b13d9b18f8659753857afb2b784ffe2a55711a716c322f3a520a8 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_brattain, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507)
Oct 11 02:23:19 compute-0 quizzical_brattain[428768]: 167 167
Oct 11 02:23:19 compute-0 podman[428752]: 2025-10-11 02:23:19.190442943 +0000 UTC m=+0.213958335 container died 7ba787431a1b13d9b18f8659753857afb2b784ffe2a55711a716c322f3a520a8 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_brattain, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:23:19 compute-0 systemd[1]: libpod-7ba787431a1b13d9b18f8659753857afb2b784ffe2a55711a716c322f3a520a8.scope: Deactivated successfully.
Oct 11 02:23:19 compute-0 systemd[1]: var-lib-containers-storage-overlay-de7b275fefccdea6d00670c69a95a198afe833caea4b911211584c4cd54fbd11-merged.mount: Deactivated successfully.
Oct 11 02:23:19 compute-0 podman[428752]: 2025-10-11 02:23:19.24473169 +0000 UTC m=+0.268247102 container remove 7ba787431a1b13d9b18f8659753857afb2b784ffe2a55711a716c322f3a520a8 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_brattain, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2)
Oct 11 02:23:19 compute-0 systemd[1]: libpod-conmon-7ba787431a1b13d9b18f8659753857afb2b784ffe2a55711a716c322f3a520a8.scope: Deactivated successfully.
Oct 11 02:23:19 compute-0 ceph-mon[191930]: pgmap v1360: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:19 compute-0 nova_compute[356901]: 2025-10-11 02:23:19.359 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:19 compute-0 podman[428793]: 2025-10-11 02:23:19.507040823 +0000 UTC m=+0.072870721 container create 23ce5d3ab3b4b6d76658a6999d326b370bd332b2f46f71928d0592b45c6ad23c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_villani, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:23:19 compute-0 systemd[1]: Started libpod-conmon-23ce5d3ab3b4b6d76658a6999d326b370bd332b2f46f71928d0592b45c6ad23c.scope.
Oct 11 02:23:19 compute-0 podman[428793]: 2025-10-11 02:23:19.475032531 +0000 UTC m=+0.040862399 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:23:19 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:23:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c452180c71b27524c1d941b3d1f7e7cacd4783593a9632fe21ea0881fec7cce0/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:23:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c452180c71b27524c1d941b3d1f7e7cacd4783593a9632fe21ea0881fec7cce0/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:23:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c452180c71b27524c1d941b3d1f7e7cacd4783593a9632fe21ea0881fec7cce0/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:23:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c452180c71b27524c1d941b3d1f7e7cacd4783593a9632fe21ea0881fec7cce0/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:23:19 compute-0 podman[428793]: 2025-10-11 02:23:19.655864865 +0000 UTC m=+0.221694753 container init 23ce5d3ab3b4b6d76658a6999d326b370bd332b2f46f71928d0592b45c6ad23c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_villani, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True)
Oct 11 02:23:19 compute-0 podman[428793]: 2025-10-11 02:23:19.669309876 +0000 UTC m=+0.235139744 container start 23ce5d3ab3b4b6d76658a6999d326b370bd332b2f46f71928d0592b45c6ad23c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_villani, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:23:19 compute-0 podman[428793]: 2025-10-11 02:23:19.675334307 +0000 UTC m=+0.241164175 container attach 23ce5d3ab3b4b6d76658a6999d326b370bd332b2f46f71928d0592b45c6ad23c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_villani, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, OSD_FLAVOR=default)
Oct 11 02:23:19 compute-0 sshd-session[428592]: Invalid user debian from 121.227.153.123 port 50066
Oct 11 02:23:20 compute-0 sshd-session[428592]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:23:20 compute-0 sshd-session[428592]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:23:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1361: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:23:20 compute-0 suspicious_villani[428811]: {
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:     "0": [
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:         {
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "devices": [
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "/dev/loop3"
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             ],
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "lv_name": "ceph_lv0",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "lv_size": "21470642176",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "name": "ceph_lv0",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "tags": {
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.cluster_name": "ceph",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.crush_device_class": "",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.encrypted": "0",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.osd_id": "0",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.type": "block",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.vdo": "0"
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             },
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "type": "block",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "vg_name": "ceph_vg0"
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:         }
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:     ],
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:     "1": [
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:         {
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "devices": [
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "/dev/loop4"
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             ],
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "lv_name": "ceph_lv1",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "lv_size": "21470642176",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "name": "ceph_lv1",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "tags": {
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.cluster_name": "ceph",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.crush_device_class": "",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.encrypted": "0",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.osd_id": "1",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.type": "block",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.vdo": "0"
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             },
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "type": "block",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "vg_name": "ceph_vg1"
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:         }
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:     ],
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:     "2": [
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:         {
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "devices": [
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "/dev/loop5"
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             ],
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "lv_name": "ceph_lv2",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "lv_size": "21470642176",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "name": "ceph_lv2",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "tags": {
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.cluster_name": "ceph",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.crush_device_class": "",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.encrypted": "0",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.osd_id": "2",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.type": "block",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:                 "ceph.vdo": "0"
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             },
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "type": "block",
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:             "vg_name": "ceph_vg2"
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:         }
Oct 11 02:23:20 compute-0 suspicious_villani[428811]:     ]
Oct 11 02:23:20 compute-0 suspicious_villani[428811]: }
Oct 11 02:23:20 compute-0 systemd[1]: libpod-23ce5d3ab3b4b6d76658a6999d326b370bd332b2f46f71928d0592b45c6ad23c.scope: Deactivated successfully.
Oct 11 02:23:20 compute-0 podman[428793]: 2025-10-11 02:23:20.56040503 +0000 UTC m=+1.126234928 container died 23ce5d3ab3b4b6d76658a6999d326b370bd332b2f46f71928d0592b45c6ad23c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_villani, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:23:20 compute-0 systemd[1]: var-lib-containers-storage-overlay-c452180c71b27524c1d941b3d1f7e7cacd4783593a9632fe21ea0881fec7cce0-merged.mount: Deactivated successfully.
Oct 11 02:23:20 compute-0 podman[428793]: 2025-10-11 02:23:20.664175824 +0000 UTC m=+1.230005692 container remove 23ce5d3ab3b4b6d76658a6999d326b370bd332b2f46f71928d0592b45c6ad23c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_villani, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2)
Oct 11 02:23:20 compute-0 systemd[1]: libpod-conmon-23ce5d3ab3b4b6d76658a6999d326b370bd332b2f46f71928d0592b45c6ad23c.scope: Deactivated successfully.
Oct 11 02:23:20 compute-0 sudo[428689]: pam_unix(sudo:session): session closed for user root
Oct 11 02:23:20 compute-0 sudo[428831]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:23:20 compute-0 sudo[428831]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:23:20 compute-0 sudo[428831]: pam_unix(sudo:session): session closed for user root
Oct 11 02:23:20 compute-0 sudo[428856]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:23:20 compute-0 sudo[428856]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:23:20 compute-0 sudo[428856]: pam_unix(sudo:session): session closed for user root
Oct 11 02:23:20 compute-0 sudo[428881]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:23:20 compute-0 sudo[428881]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:23:20 compute-0 sudo[428881]: pam_unix(sudo:session): session closed for user root
Oct 11 02:23:21 compute-0 sudo[428906]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:23:21 compute-0 sudo[428906]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:23:21 compute-0 ceph-mon[191930]: pgmap v1361: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:21 compute-0 podman[428970]: 2025-10-11 02:23:21.40780526 +0000 UTC m=+0.046100865 container create a83aa8b8c827f0b711320ea9106f6f0647f2944b931504440b995b66545df3a4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_beaver, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:23:21 compute-0 systemd[1]: Started libpod-conmon-a83aa8b8c827f0b711320ea9106f6f0647f2944b931504440b995b66545df3a4.scope.
Oct 11 02:23:21 compute-0 podman[428970]: 2025-10-11 02:23:21.388972749 +0000 UTC m=+0.027268374 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:23:21 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:23:21 compute-0 podman[428970]: 2025-10-11 02:23:21.519687206 +0000 UTC m=+0.157982841 container init a83aa8b8c827f0b711320ea9106f6f0647f2944b931504440b995b66545df3a4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_beaver, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS)
Oct 11 02:23:21 compute-0 podman[428970]: 2025-10-11 02:23:21.530516348 +0000 UTC m=+0.168811953 container start a83aa8b8c827f0b711320ea9106f6f0647f2944b931504440b995b66545df3a4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_beaver, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3)
Oct 11 02:23:21 compute-0 podman[428970]: 2025-10-11 02:23:21.536306502 +0000 UTC m=+0.174602127 container attach a83aa8b8c827f0b711320ea9106f6f0647f2944b931504440b995b66545df3a4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_beaver, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default)
Oct 11 02:23:21 compute-0 intelligent_beaver[428993]: 167 167
Oct 11 02:23:21 compute-0 systemd[1]: libpod-a83aa8b8c827f0b711320ea9106f6f0647f2944b931504440b995b66545df3a4.scope: Deactivated successfully.
Oct 11 02:23:21 compute-0 podman[428970]: 2025-10-11 02:23:21.539970635 +0000 UTC m=+0.178266240 container died a83aa8b8c827f0b711320ea9106f6f0647f2944b931504440b995b66545df3a4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_beaver, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, ceph=True)
Oct 11 02:23:21 compute-0 podman[428985]: 2025-10-11 02:23:21.564459835 +0000 UTC m=+0.108468083 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, summary=Provides the latest release of Red Hat Universal Base Image 9., name=ubi9, architecture=x86_64, container_name=kepler, distribution-scope=public, io.buildah.version=1.29.0, release-0.7.12=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, config_id=edpm, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, io.k8s.display-name=Red Hat Universal Base Image 9, managed_by=edpm_ansible, release=1214.1726694543, vcs-type=git, io.openshift.expose-services=, version=9.4, build-date=2024-09-18T21:23:30, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi9-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., maintainer=Red Hat, Inc.)
Oct 11 02:23:21 compute-0 systemd[1]: var-lib-containers-storage-overlay-c90d703f5c190452226498d1493e2bc55d12abf76ecf155c7ed957d2d6b3cba2-merged.mount: Deactivated successfully.
Oct 11 02:23:21 compute-0 podman[428970]: 2025-10-11 02:23:21.601740613 +0000 UTC m=+0.240036218 container remove a83aa8b8c827f0b711320ea9106f6f0647f2944b931504440b995b66545df3a4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_beaver, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:23:21 compute-0 systemd[1]: libpod-conmon-a83aa8b8c827f0b711320ea9106f6f0647f2944b931504440b995b66545df3a4.scope: Deactivated successfully.
Oct 11 02:23:21 compute-0 podman[429030]: 2025-10-11 02:23:21.835892562 +0000 UTC m=+0.085325297 container create f2c5c67cd6ebcc7538c9d466932e3fa0e563fd135de6d35e842dbf57d176438c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_sammet, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 02:23:21 compute-0 podman[429030]: 2025-10-11 02:23:21.793167392 +0000 UTC m=+0.042600117 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:23:21 compute-0 systemd[1]: Started libpod-conmon-f2c5c67cd6ebcc7538c9d466932e3fa0e563fd135de6d35e842dbf57d176438c.scope.
Oct 11 02:23:21 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:23:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a3a67f8e6bb09c29ab721561b54e10e1457caee885e4fbef19b666300459a2f4/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:23:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a3a67f8e6bb09c29ab721561b54e10e1457caee885e4fbef19b666300459a2f4/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:23:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a3a67f8e6bb09c29ab721561b54e10e1457caee885e4fbef19b666300459a2f4/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:23:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a3a67f8e6bb09c29ab721561b54e10e1457caee885e4fbef19b666300459a2f4/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:23:21 compute-0 podman[429030]: 2025-10-11 02:23:21.98158479 +0000 UTC m=+0.231017515 container init f2c5c67cd6ebcc7538c9d466932e3fa0e563fd135de6d35e842dbf57d176438c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_sammet, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0)
Oct 11 02:23:22 compute-0 podman[429030]: 2025-10-11 02:23:22.00038981 +0000 UTC m=+0.249822525 container start f2c5c67cd6ebcc7538c9d466932e3fa0e563fd135de6d35e842dbf57d176438c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_sammet, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507)
Oct 11 02:23:22 compute-0 podman[429030]: 2025-10-11 02:23:22.005437029 +0000 UTC m=+0.254869754 container attach f2c5c67cd6ebcc7538c9d466932e3fa0e563fd135de6d35e842dbf57d176438c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_sammet, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 02:23:22 compute-0 sshd-session[428592]: Failed password for invalid user debian from 121.227.153.123 port 50066 ssh2
Oct 11 02:23:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1362: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:22 compute-0 ceph-mon[191930]: pgmap v1362: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:23 compute-0 hungry_sammet[429046]: {
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:         "osd_id": 1,
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:         "type": "bluestore"
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:     },
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:         "osd_id": 2,
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:         "type": "bluestore"
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:     },
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:         "osd_id": 0,
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:         "type": "bluestore"
Oct 11 02:23:23 compute-0 hungry_sammet[429046]:     }
Oct 11 02:23:23 compute-0 hungry_sammet[429046]: }
Oct 11 02:23:23 compute-0 systemd[1]: libpod-f2c5c67cd6ebcc7538c9d466932e3fa0e563fd135de6d35e842dbf57d176438c.scope: Deactivated successfully.
Oct 11 02:23:23 compute-0 systemd[1]: libpod-f2c5c67cd6ebcc7538c9d466932e3fa0e563fd135de6d35e842dbf57d176438c.scope: Consumed 1.279s CPU time.
Oct 11 02:23:23 compute-0 podman[429079]: 2025-10-11 02:23:23.397052 +0000 UTC m=+0.069637842 container died f2c5c67cd6ebcc7538c9d466932e3fa0e563fd135de6d35e842dbf57d176438c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_sammet, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:23:23 compute-0 systemd[1]: var-lib-containers-storage-overlay-a3a67f8e6bb09c29ab721561b54e10e1457caee885e4fbef19b666300459a2f4-merged.mount: Deactivated successfully.
Oct 11 02:23:23 compute-0 podman[429079]: 2025-10-11 02:23:23.514685699 +0000 UTC m=+0.187271511 container remove f2c5c67cd6ebcc7538c9d466932e3fa0e563fd135de6d35e842dbf57d176438c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_sammet, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3, CEPH_REF=reef)
Oct 11 02:23:23 compute-0 systemd[1]: libpod-conmon-f2c5c67cd6ebcc7538c9d466932e3fa0e563fd135de6d35e842dbf57d176438c.scope: Deactivated successfully.
Oct 11 02:23:23 compute-0 sudo[428906]: pam_unix(sudo:session): session closed for user root
Oct 11 02:23:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:23:23 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:23:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:23:23 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:23:23 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev b5c4319d-0b96-4f0e-886c-4173f7183da8 does not exist
Oct 11 02:23:23 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 47e36a5e-e96d-4259-8a0e-e94262ff66df does not exist
Oct 11 02:23:23 compute-0 sudo[429094]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:23:23 compute-0 sudo[429094]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:23:23 compute-0 sudo[429094]: pam_unix(sudo:session): session closed for user root
Oct 11 02:23:23 compute-0 nova_compute[356901]: 2025-10-11 02:23:23.819 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:23 compute-0 sudo[429119]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:23:23 compute-0 sudo[429119]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:23:23 compute-0 sudo[429119]: pam_unix(sudo:session): session closed for user root
Oct 11 02:23:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1363: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:24 compute-0 sshd-session[428592]: Connection closed by invalid user debian 121.227.153.123 port 50066 [preauth]
Oct 11 02:23:24 compute-0 nova_compute[356901]: 2025-10-11 02:23:24.361 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:23:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:23:24 compute-0 ceph-mon[191930]: pgmap v1363: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:23:25 compute-0 sshd-session[429144]: Invalid user debian from 121.227.153.123 port 42174
Oct 11 02:23:25 compute-0 sshd-session[429144]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:23:25 compute-0 sshd-session[429144]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:23:26 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:23:26 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 2400.2 total, 600.0 interval
                                            Cumulative writes: 6489 writes, 26K keys, 6489 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.01 MB/s
                                            Cumulative WAL: 6489 writes, 1236 syncs, 5.25 writes per sync, written: 0.02 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 581 writes, 1890 keys, 581 commit groups, 1.0 writes per commit group, ingest: 2.05 MB, 0.00 MB/s
                                            Interval WAL: 581 writes, 226 syncs, 2.57 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:23:26 compute-0 ceph-mgr[192233]: [devicehealth INFO root] Check health
Oct 11 02:23:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1364: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:23:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:23:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:23:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:23:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:23:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:23:27 compute-0 ceph-mon[191930]: pgmap v1364: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:27 compute-0 sshd-session[429144]: Failed password for invalid user debian from 121.227.153.123 port 42174 ssh2
Oct 11 02:23:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:23:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/186118985' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:23:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:23:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/186118985' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:23:28 compute-0 sshd-session[429144]: Connection closed by invalid user debian 121.227.153.123 port 42174 [preauth]
Oct 11 02:23:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1365: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/186118985' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:23:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/186118985' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:23:28 compute-0 nova_compute[356901]: 2025-10-11 02:23:28.829 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:29 compute-0 ceph-mon[191930]: pgmap v1365: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:29 compute-0 nova_compute[356901]: 2025-10-11 02:23:29.368 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:29 compute-0 podman[157119]: time="2025-10-11T02:23:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:23:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:23:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:23:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:23:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9065 "" "Go-http-client/1.1"
Oct 11 02:23:29 compute-0 sshd-session[429146]: Invalid user debian from 121.227.153.123 port 42176
Oct 11 02:23:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1366: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:30 compute-0 sshd-session[429146]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:23:30 compute-0 sshd-session[429146]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:23:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:23:31 compute-0 ceph-mon[191930]: pgmap v1366: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:31 compute-0 openstack_network_exporter[374316]: ERROR   02:23:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:23:31 compute-0 openstack_network_exporter[374316]: ERROR   02:23:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:23:31 compute-0 openstack_network_exporter[374316]: ERROR   02:23:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:23:31 compute-0 openstack_network_exporter[374316]: ERROR   02:23:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:23:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:23:31 compute-0 openstack_network_exporter[374316]: ERROR   02:23:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:23:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:23:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1367: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:32 compute-0 podman[429148]: 2025-10-11 02:23:32.21812621 +0000 UTC m=+0.109035452 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:23:32 compute-0 sshd-session[429146]: Failed password for invalid user debian from 121.227.153.123 port 42176 ssh2
Oct 11 02:23:32 compute-0 podman[429156]: 2025-10-11 02:23:32.246940684 +0000 UTC m=+0.097749623 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:23:32 compute-0 podman[429150]: 2025-10-11 02:23:32.282686941 +0000 UTC m=+0.147475968 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.4, managed_by=edpm_ansible, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:23:32 compute-0 podman[429149]: 2025-10-11 02:23:32.286959654 +0000 UTC m=+0.157953859 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, container_name=ovn_controller, io.buildah.version=1.41.3)
Oct 11 02:23:33 compute-0 ceph-mon[191930]: pgmap v1367: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:33 compute-0 nova_compute[356901]: 2025-10-11 02:23:33.834 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1368: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:34 compute-0 nova_compute[356901]: 2025-10-11 02:23:34.375 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:34 compute-0 sshd-session[429146]: Connection closed by invalid user debian 121.227.153.123 port 42176 [preauth]
Oct 11 02:23:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:23:35 compute-0 ceph-mon[191930]: pgmap v1368: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:35 compute-0 sshd-session[429229]: Invalid user debian from 121.227.153.123 port 52048
Oct 11 02:23:35 compute-0 podman[429231]: 2025-10-11 02:23:35.764441264 +0000 UTC m=+0.110647330 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, config_id=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=multipathd, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:23:35 compute-0 podman[429232]: 2025-10-11 02:23:35.784953415 +0000 UTC m=+0.113999624 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=iscsid, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:23:35 compute-0 sshd-session[429229]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:23:35 compute-0 sshd-session[429229]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:23:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1369: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:36 compute-0 ceph-mon[191930]: pgmap v1369: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1370: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:38 compute-0 sshd-session[429229]: Failed password for invalid user debian from 121.227.153.123 port 52048 ssh2
Oct 11 02:23:38 compute-0 nova_compute[356901]: 2025-10-11 02:23:38.839 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:39 compute-0 ceph-mon[191930]: pgmap v1370: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:39 compute-0 nova_compute[356901]: 2025-10-11 02:23:39.381 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1371: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:40 compute-0 sshd-session[429229]: Connection closed by invalid user debian 121.227.153.123 port 52048 [preauth]
Oct 11 02:23:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:23:41 compute-0 ceph-mon[191930]: pgmap v1371: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:41 compute-0 sshd-session[429272]: Invalid user debian from 121.227.153.123 port 43784
Oct 11 02:23:41 compute-0 sshd-session[429272]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:23:41 compute-0 sshd-session[429272]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:23:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1372: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:43 compute-0 ceph-mon[191930]: pgmap v1372: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:43 compute-0 sshd-session[429272]: Failed password for invalid user debian from 121.227.153.123 port 43784 ssh2
Oct 11 02:23:43 compute-0 nova_compute[356901]: 2025-10-11 02:23:43.845 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1373: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:44 compute-0 nova_compute[356901]: 2025-10-11 02:23:44.385 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:23:45 compute-0 ceph-mon[191930]: pgmap v1373: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:45 compute-0 sshd-session[429272]: Connection closed by invalid user debian 121.227.153.123 port 43784 [preauth]
Oct 11 02:23:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1374: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:46 compute-0 ceph-mon[191930]: pgmap v1374: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:46 compute-0 sshd-session[429274]: Invalid user debian from 121.227.153.123 port 43786
Oct 11 02:23:47 compute-0 podman[429278]: 2025-10-11 02:23:47.091644992 +0000 UTC m=+0.093808624 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:23:47 compute-0 podman[429277]: 2025-10-11 02:23:47.115644812 +0000 UTC m=+0.115726959 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., name=ubi9-minimal, url=https://catalog.redhat.com/en/search?searchType=containers, build-date=2025-08-20T13:12:41, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, com.redhat.component=ubi9-minimal-container, io.openshift.tags=minimal rhel9, config_id=edpm, io.buildah.version=1.33.7, io.openshift.expose-services=, release=1755695350, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, container_name=openstack_network_exporter, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vendor=Red Hat, Inc., distribution-scope=public, architecture=x86_64, version=9.6, managed_by=edpm_ansible, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:23:47 compute-0 podman[429276]: 2025-10-11 02:23:47.127465651 +0000 UTC m=+0.136342293 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, managed_by=edpm_ansible, maintainer=OpenStack Kubernetes Operator team, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_ipmi)
Oct 11 02:23:47 compute-0 sshd-session[429274]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:23:47 compute-0 sshd-session[429274]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:23:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1375: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:48 compute-0 nova_compute[356901]: 2025-10-11 02:23:48.849 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:48 compute-0 nova_compute[356901]: 2025-10-11 02:23:48.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:23:48 compute-0 nova_compute[356901]: 2025-10-11 02:23:48.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:23:49 compute-0 sshd-session[429274]: Failed password for invalid user debian from 121.227.153.123 port 43786 ssh2
Oct 11 02:23:49 compute-0 ceph-mon[191930]: pgmap v1375: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:49 compute-0 nova_compute[356901]: 2025-10-11 02:23:49.389 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1376: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:23:50 compute-0 ceph-mon[191930]: pgmap v1376: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:50 compute-0 nova_compute[356901]: 2025-10-11 02:23:50.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:23:51 compute-0 sshd-session[429274]: Connection closed by invalid user debian 121.227.153.123 port 43786 [preauth]
Oct 11 02:23:51 compute-0 nova_compute[356901]: 2025-10-11 02:23:51.737 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:51 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:23:51.738 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: SbGlobalUpdateEvent(events=('update',), table='SB_Global', conditions=None, old_conditions=None), priority=20 to row=SB_Global(external_ids={}, nb_cfg=6, options={'arp_ns_explicit_output': 'true', 'mac_prefix': 'fe:55:97', 'max_tunid': '16711680', 'northd_internal_version': '24.03.7-20.33.0-76.8', 'svc_monitor_mac': 'ce:9c:4f:b4:85:9b'}, ipsec=False) old=SB_Global(nb_cfg=5) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:23:51 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:23:51.741 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Delaying updating chassis table for 5 seconds run /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:274
Oct 11 02:23:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1377: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:52 compute-0 podman[429340]: 2025-10-11 02:23:52.236728254 +0000 UTC m=+0.124796502 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, version=9.4, io.openshift.expose-services=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, container_name=kepler, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, config_id=edpm, distribution-scope=public, maintainer=Red Hat, Inc., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, io.openshift.tags=base rhel9, summary=Provides the latest release of Red Hat Universal Base Image 9., com.redhat.component=ubi9-container, vendor=Red Hat, Inc., io.buildah.version=1.29.0, io.k8s.display-name=Red Hat Universal Base Image 9, build-date=2024-09-18T21:23:30, release-0.7.12=, managed_by=edpm_ansible, release=1214.1726694543, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, vcs-type=git)
Oct 11 02:23:52 compute-0 sshd-session[429338]: Invalid user debian from 121.227.153.123 port 51876
Oct 11 02:23:53 compute-0 sshd-session[429338]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:23:53 compute-0 sshd-session[429338]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:23:53 compute-0 ceph-mon[191930]: pgmap v1377: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:53 compute-0 nova_compute[356901]: 2025-10-11 02:23:53.853 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:53 compute-0 nova_compute[356901]: 2025-10-11 02:23:53.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:23:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1378: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:54 compute-0 nova_compute[356901]: 2025-10-11 02:23:54.393 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:23:54.849 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:23:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:23:54.851 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:23:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:23:54.852 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:23:54 compute-0 nova_compute[356901]: 2025-10-11 02:23:54.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:23:54 compute-0 nova_compute[356901]: 2025-10-11 02:23:54.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:23:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:23:55 compute-0 ceph-mon[191930]: pgmap v1378: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:55 compute-0 sshd-session[429338]: Failed password for invalid user debian from 121.227.153.123 port 51876 ssh2
Oct 11 02:23:55 compute-0 nova_compute[356901]: 2025-10-11 02:23:55.805 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:23:55 compute-0 nova_compute[356901]: 2025-10-11 02:23:55.818 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:23:55 compute-0 nova_compute[356901]: 2025-10-11 02:23:55.820 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:23:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1379: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:56 compute-0 nova_compute[356901]: 2025-10-11 02:23:56.377 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "7513b93e-d2b8-4ae0-8f1c-3df190945259" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:23:56 compute-0 nova_compute[356901]: 2025-10-11 02:23:56.378 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259" acquired by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:23:56 compute-0 nova_compute[356901]: 2025-10-11 02:23:56.396 2 DEBUG nova.compute.manager [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Starting instance... _do_build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2402
Oct 11 02:23:56 compute-0 ceph-mon[191930]: pgmap v1379: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:56 compute-0 nova_compute[356901]: 2025-10-11 02:23:56.478 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:23:56 compute-0 nova_compute[356901]: 2025-10-11 02:23:56.480 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:23:56 compute-0 nova_compute[356901]: 2025-10-11 02:23:56.489 2 DEBUG nova.virt.hardware [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Require both a host and instance NUMA topology to fit instance on host. numa_fit_instance_to_host /usr/lib/python3.9/site-packages/nova/virt/hardware.py:2368
Oct 11 02:23:56 compute-0 nova_compute[356901]: 2025-10-11 02:23:56.489 2 INFO nova.compute.claims [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Claim successful on node compute-0.ctlplane.example.com
Oct 11 02:23:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:23:56
Oct 11 02:23:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:23:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:23:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['backups', 'default.rgw.control', 'cephfs.cephfs.data', 'default.rgw.meta', 'images', 'default.rgw.log', 'vms', 'volumes', '.rgw.root', 'cephfs.cephfs.meta', '.mgr']
Oct 11 02:23:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:23:56 compute-0 nova_compute[356901]: 2025-10-11 02:23:56.574 2 DEBUG nova.scheduler.client.report [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Refreshing inventories for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:804
Oct 11 02:23:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:23:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:23:56 compute-0 nova_compute[356901]: 2025-10-11 02:23:56.596 2 DEBUG nova.scheduler.client.report [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Updating ProviderTree inventory for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 from _refresh_and_get_inventory using data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} _refresh_and_get_inventory /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:768
Oct 11 02:23:56 compute-0 nova_compute[356901]: 2025-10-11 02:23:56.597 2 DEBUG nova.compute.provider_tree [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Updating inventory in ProviderTree for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 with inventory: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:176
Oct 11 02:23:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:23:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:23:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:23:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:23:56 compute-0 nova_compute[356901]: 2025-10-11 02:23:56.613 2 DEBUG nova.scheduler.client.report [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Refreshing aggregate associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, aggregates: None _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:813
Oct 11 02:23:56 compute-0 nova_compute[356901]: 2025-10-11 02:23:56.637 2 DEBUG nova.scheduler.client.report [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Refreshing trait associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, traits: COMPUTE_VOLUME_EXTEND,COMPUTE_NET_VIF_MODEL_VMXNET3,HW_CPU_X86_SSSE3,COMPUTE_RESCUE_BFV,COMPUTE_SOCKET_PCI_NUMA_AFFINITY,COMPUTE_NODE,HW_CPU_X86_SVM,COMPUTE_STORAGE_BUS_SCSI,HW_CPU_X86_FMA3,COMPUTE_GRAPHICS_MODEL_NONE,COMPUTE_NET_VIF_MODEL_RTL8139,HW_CPU_X86_SSE4A,COMPUTE_IMAGE_TYPE_QCOW2,HW_CPU_X86_BMI2,HW_CPU_X86_SSE42,HW_CPU_X86_AVX2,COMPUTE_IMAGE_TYPE_RAW,COMPUTE_VIOMMU_MODEL_VIRTIO,HW_CPU_X86_AESNI,COMPUTE_STORAGE_BUS_FDC,COMPUTE_GRAPHICS_MODEL_VIRTIO,HW_CPU_X86_AMD_SVM,COMPUTE_NET_VIF_MODEL_NE2K_PCI,COMPUTE_ACCELERATORS,HW_CPU_X86_SSE2,COMPUTE_GRAPHICS_MODEL_VGA,HW_CPU_X86_ABM,HW_CPU_X86_AVX,COMPUTE_NET_VIF_MODEL_E1000,COMPUTE_STORAGE_BUS_USB,COMPUTE_NET_ATTACH_INTERFACE,HW_CPU_X86_MMX,COMPUTE_SECURITY_TPM_2_0,COMPUTE_IMAGE_TYPE_ISO,HW_CPU_X86_SSE41,COMPUTE_IMAGE_TYPE_AKI,COMPUTE_IMAGE_TYPE_AMI,COMPUTE_NET_ATTACH_INTERFACE_WITH_TAG,COMPUTE_DEVICE_TAGGING,COMPUTE_SECURITY_UEFI_SECURE_BOOT,COMPUTE_TRUSTED_CERTS,COMPUTE_NET_VIF_MODEL_VIRTIO,COMPUTE_VIOMMU_MODEL_INTEL,COMPUTE_STORAGE_BUS_SATA,HW_CPU_X86_SSE,COMPUTE_STORAGE_BUS_VIRTIO,COMPUTE_NET_VIF_MODEL_PCNET,COMPUTE_GRAPHICS_MODEL_CIRRUS,HW_CPU_X86_SHA,HW_CPU_X86_BMI,COMPUTE_NET_VIF_MODEL_E1000E,COMPUTE_NET_VIF_MODEL_SPAPR_VLAN,COMPUTE_VOLUME_ATTACH_WITH_TAG,COMPUTE_GRAPHICS_MODEL_BOCHS,COMPUTE_VIOMMU_MODEL_AUTO,COMPUTE_IMAGE_TYPE_ARI,HW_CPU_X86_CLMUL,COMPUTE_STORAGE_BUS_IDE,COMPUTE_VOLUME_MULTI_ATTACH,HW_CPU_X86_F16C,COMPUTE_SECURITY_TPM_1_2 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:825
Oct 11 02:23:56 compute-0 nova_compute[356901]: 2025-10-11 02:23:56.730 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:23:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:23:56.744 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '6'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.050 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Updating instance_info_cache with network_info: [{"id": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "address": "fa:16:3e:c2:ee:14", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.80", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa7108c4c-c9", "ovs_interfaceid": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.077 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.077 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.078 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.079 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.079 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.079 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.108 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:23:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:23:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:23:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:23:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:23:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:23:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:23:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:23:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:23:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:23:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:23:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:23:57 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/953141794' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.169 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.439s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.181 2 DEBUG nova.compute.provider_tree [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.200 2 DEBUG nova.scheduler.client.report [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.231 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: held 0.752s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.233 2 DEBUG nova.compute.manager [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Start building networks asynchronously for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2799
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.237 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.130s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.238 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.239 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.240 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:23:57 compute-0 sshd-session[429338]: Connection closed by invalid user debian 121.227.153.123 port 51876 [preauth]
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.334 2 DEBUG nova.compute.manager [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Allocating IP information in the background. _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1952
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.336 2 DEBUG nova.network.neutron [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] allocate_for_instance() allocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1156
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.365 2 INFO nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Ignoring supplied device name: /dev/vda. Libvirt can't honour user-supplied dev names
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.418 2 DEBUG nova.compute.manager [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Start building block device mappings for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2834
Oct 11 02:23:57 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/953141794' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.526 2 DEBUG nova.compute.manager [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Start spawning the instance on the hypervisor. _build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2608
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.530 2 DEBUG nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Creating instance directory _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4723
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.531 2 INFO nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Creating image(s)
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.582 2 DEBUG nova.storage.rbd_utils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 7513b93e-d2b8-4ae0-8f1c-3df190945259_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.678 2 DEBUG nova.storage.rbd_utils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 7513b93e-d2b8-4ae0-8f1c-3df190945259_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:23:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:23:57 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2165534884' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.741 2 DEBUG nova.storage.rbd_utils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 7513b93e-d2b8-4ae0-8f1c-3df190945259_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.751 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.785 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.545s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.853 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d --force-share --output=json" returned: 0 in 0.101s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.854 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "c2a4b3f256e07592b38b9a83d173b78feaa2ba6d" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.855 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "c2a4b3f256e07592b38b9a83d173b78feaa2ba6d" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.855 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "c2a4b3f256e07592b38b9a83d173b78feaa2ba6d" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.891 2 DEBUG nova.storage.rbd_utils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 7513b93e-d2b8-4ae0-8f1c-3df190945259_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.898 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d 7513b93e-d2b8-4ae0-8f1c-3df190945259_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.953 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.954 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.954 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.961 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.961 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:23:57 compute-0 nova_compute[356901]: 2025-10-11 02:23:57.962 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:23:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1380: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.253 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d 7513b93e-d2b8-4ae0-8f1c-3df190945259_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.355s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.334 2 DEBUG nova.storage.rbd_utils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] resizing rbd image 7513b93e-d2b8-4ae0-8f1c-3df190945259_disk to 1073741824 resize /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:288
Oct 11 02:23:58 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2165534884' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:23:58 compute-0 ceph-mon[191930]: pgmap v1380: 321 pgs: 321 active+clean; 139 MiB data, 285 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.491 2 DEBUG nova.objects.instance [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lazy-loading 'migration_context' on Instance uuid 7513b93e-d2b8-4ae0-8f1c-3df190945259 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.530 2 DEBUG nova.storage.rbd_utils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 7513b93e-d2b8-4ae0-8f1c-3df190945259_disk.eph0 does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.566 2 DEBUG nova.storage.rbd_utils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 7513b93e-d2b8-4ae0-8f1c-3df190945259_disk.eph0 does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.573 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/ephemeral_1_0706d66 --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.648 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/ephemeral_1_0706d66 --force-share --output=json" returned: 0 in 0.075s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.649 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "ephemeral_1_0706d66" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.650 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "ephemeral_1_0706d66" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.650 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "ephemeral_1_0706d66" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.679 2 DEBUG nova.storage.rbd_utils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 7513b93e-d2b8-4ae0-8f1c-3df190945259_disk.eph0 does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.685 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/ephemeral_1_0706d66 7513b93e-d2b8-4ae0-8f1c-3df190945259_disk.eph0 --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:23:58 compute-0 sshd-session[429419]: Invalid user debian from 121.227.153.123 port 51888
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.718 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.720 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3666MB free_disk=59.92200469970703GB free_vcpus=6 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.720 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.720 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.822 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.823 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance d60d7ea1-5d00-4902-90e6-3ae67eb09a78 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.823 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 7513b93e-d2b8-4ae0-8f1c-3df190945259 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.823 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 3 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.823 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=2048MB phys_disk=59GB used_disk=6GB total_vcpus=8 used_vcpus=3 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.859 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:58 compute-0 nova_compute[356901]: 2025-10-11 02:23:58.927 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:23:58 compute-0 sshd-session[429419]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:23:58 compute-0 sshd-session[429419]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.181 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/ephemeral_1_0706d66 7513b93e-d2b8-4ae0-8f1c-3df190945259_disk.eph0 --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.496s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.369 2 DEBUG nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Created local disks _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4857
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.370 2 DEBUG nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Ensure instance console log exists: /var/lib/nova/instances/7513b93e-d2b8-4ae0-8f1c-3df190945259/console.log _ensure_console_log_for_instance /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4609
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.371 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "vgpu_resources" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.371 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "vgpu_resources" acquired by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.372 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "vgpu_resources" "released" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.396 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:23:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:23:59 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3153298011' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.455 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.528s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:23:59 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3153298011' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.473 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.498 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.525 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.526 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.805s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:23:59 compute-0 podman[157119]: time="2025-10-11T02:23:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:23:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:23:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:23:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:23:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9050 "" "Go-http-client/1.1"
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.810 2 DEBUG nova.network.neutron [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Successfully updated port: a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e _update_port /usr/lib/python3.9/site-packages/nova/network/neutron.py:586
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.833 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "refresh_cache-7513b93e-d2b8-4ae0-8f1c-3df190945259" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.834 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquired lock "refresh_cache-7513b93e-d2b8-4ae0-8f1c-3df190945259" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.834 2 DEBUG nova.network.neutron [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Building network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2010
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.928 2 DEBUG nova.compute.manager [req-e16c3744-d6c6-40da-be2d-dff9625f917f req-20f445a1-9ec8-476a-902a-0790ac966578 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Received event network-changed-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.928 2 DEBUG nova.compute.manager [req-e16c3744-d6c6-40da-be2d-dff9625f917f req-20f445a1-9ec8-476a-902a-0790ac966578 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Refreshing instance network info cache due to event network-changed-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.929 2 DEBUG oslo_concurrency.lockutils [req-e16c3744-d6c6-40da-be2d-dff9625f917f req-20f445a1-9ec8-476a-902a-0790ac966578 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-7513b93e-d2b8-4ae0-8f1c-3df190945259" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:23:59 compute-0 nova_compute[356901]: 2025-10-11 02:23:59.992 2 DEBUG nova.network.neutron [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Instance cache missing network info. _get_preexisting_port_ids /usr/lib/python3.9/site-packages/nova/network/neutron.py:3323
Oct 11 02:24:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1381: 321 pgs: 321 active+clean; 155 MiB data, 293 MiB used, 60 GiB / 60 GiB avail; 10 KiB/s rd, 654 KiB/s wr, 14 op/s
Oct 11 02:24:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:24:00 compute-0 nova_compute[356901]: 2025-10-11 02:24:00.521 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:24:00 compute-0 nova_compute[356901]: 2025-10-11 02:24:00.550 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:24:01 compute-0 sshd-session[429419]: Failed password for invalid user debian from 121.227.153.123 port 51888 ssh2
Oct 11 02:24:01 compute-0 ceph-mon[191930]: pgmap v1381: 321 pgs: 321 active+clean; 155 MiB data, 293 MiB used, 60 GiB / 60 GiB avail; 10 KiB/s rd, 654 KiB/s wr, 14 op/s
Oct 11 02:24:01 compute-0 openstack_network_exporter[374316]: ERROR   02:24:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:24:01 compute-0 openstack_network_exporter[374316]: ERROR   02:24:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:24:01 compute-0 openstack_network_exporter[374316]: ERROR   02:24:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:24:01 compute-0 openstack_network_exporter[374316]: ERROR   02:24:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:24:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:24:01 compute-0 openstack_network_exporter[374316]: ERROR   02:24:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:24:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.842 2 DEBUG nova.network.neutron [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Updating instance_info_cache with network_info: [{"id": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "address": "fa:16:3e:16:ee:dc", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.225", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.204", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa942acb1-1e", "ovs_interfaceid": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.875 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Releasing lock "refresh_cache-7513b93e-d2b8-4ae0-8f1c-3df190945259" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.876 2 DEBUG nova.compute.manager [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Instance network_info: |[{"id": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "address": "fa:16:3e:16:ee:dc", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.225", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.204", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa942acb1-1e", "ovs_interfaceid": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}]| _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1967
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.878 2 DEBUG oslo_concurrency.lockutils [req-e16c3744-d6c6-40da-be2d-dff9625f917f req-20f445a1-9ec8-476a-902a-0790ac966578 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-7513b93e-d2b8-4ae0-8f1c-3df190945259" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.878 2 DEBUG nova.network.neutron [req-e16c3744-d6c6-40da-be2d-dff9625f917f req-20f445a1-9ec8-476a-902a-0790ac966578 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Refreshing network info cache for port a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.884 2 DEBUG nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Start _get_guest_xml network_info=[{"id": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "address": "fa:16:3e:16:ee:dc", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.225", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.204", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa942acb1-1e", "ovs_interfaceid": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}] disk_info={'disk_bus': 'virtio', 'cdrom_bus': 'sata', 'mapping': {'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'disk.config': {'bus': 'sata', 'dev': 'sda', 'type': 'cdrom'}}} image_meta=ImageMeta(checksum='b874c39491a2377b8490f5f1e89761a4',container_format='bare',created_at=2025-10-11T02:17:33Z,direct_url=<?>,disk_format='qcow2',id=a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7,min_disk=0,min_ram=0,name='cirros',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=16300544,status='active',tags=<?>,updated_at=2025-10-11T02:17:37Z,virtual_size=<?>,visibility=<?>) rescue=None block_device_info={'root_device_name': '/dev/vda', 'image': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'boot_index': 0, 'device_name': '/dev/vda', 'size': 0, 'encryption_format': None, 'image_id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}], 'ephemerals': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'device_name': '/dev/vdb', 'size': 1, 'encryption_format': None}], 'block_device_mapping': [], 'swap': None} _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7549
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.896 2 WARNING nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.918 2 DEBUG nova.virt.libvirt.host [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V1... _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1653
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.919 2 DEBUG nova.virt.libvirt.host [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CPU controller missing on host. _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1663
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.927 2 DEBUG nova.virt.libvirt.host [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V2... _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1672
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.928 2 DEBUG nova.virt.libvirt.host [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CPU controller found on host. _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1679
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.929 2 DEBUG nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CPU mode 'host-model' models '' was chosen, with extra flags: '' _get_guest_cpu_model_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:5396
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.930 2 DEBUG nova.virt.hardware [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Getting desirable topologies for flavor Flavor(created_at=2025-10-11T02:17:41Z,deleted=False,deleted_at=None,description=None,disabled=False,ephemeral_gb=1,extra_specs={},flavorid='486e1451-345c-45d6-b075-f4717e759025',id=1,is_public=True,memory_mb=512,name='m1.small',projects=<?>,root_gb=1,rxtx_factor=1.0,swap=0,updated_at=None,vcpu_weight=0,vcpus=1) and image_meta ImageMeta(checksum='b874c39491a2377b8490f5f1e89761a4',container_format='bare',created_at=2025-10-11T02:17:33Z,direct_url=<?>,disk_format='qcow2',id=a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7,min_disk=0,min_ram=0,name='cirros',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=16300544,status='active',tags=<?>,updated_at=2025-10-11T02:17:37Z,virtual_size=<?>,visibility=<?>), allow threads: True _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:563
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.932 2 DEBUG nova.virt.hardware [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Flavor limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:348
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.932 2 DEBUG nova.virt.hardware [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Image limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:352
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.933 2 DEBUG nova.virt.hardware [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Flavor pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:388
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.934 2 DEBUG nova.virt.hardware [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Image pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:392
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.936 2 DEBUG nova.virt.hardware [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Chose sockets=0, cores=0, threads=0; limits were sockets=65536, cores=65536, threads=65536 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:430
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.937 2 DEBUG nova.virt.hardware [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Topology preferred VirtCPUTopology(cores=0,sockets=0,threads=0), maximum VirtCPUTopology(cores=65536,sockets=65536,threads=65536) _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:569
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.938 2 DEBUG nova.virt.hardware [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Build topologies for 1 vcpu(s) 1:1:1 _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:471
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.938 2 DEBUG nova.virt.hardware [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Got 1 possible topologies _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:501
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.939 2 DEBUG nova.virt.hardware [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Possible topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:575
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.940 2 DEBUG nova.virt.hardware [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Sorted desired topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:577
Oct 11 02:24:01 compute-0 nova_compute[356901]: 2025-10-11 02:24:01.945 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:24:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1382: 321 pgs: 321 active+clean; 162 MiB data, 294 MiB used, 60 GiB / 60 GiB avail; 15 KiB/s rd, 821 KiB/s wr, 23 op/s
Oct 11 02:24:02 compute-0 ceph-mon[191930]: pgmap v1382: 321 pgs: 321 active+clean; 162 MiB data, 294 MiB used, 60 GiB / 60 GiB avail; 15 KiB/s rd, 821 KiB/s wr, 23 op/s
Oct 11 02:24:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:24:02 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1954550332' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:24:02 compute-0 nova_compute[356901]: 2025-10-11 02:24:02.512 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.567s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:24:02 compute-0 nova_compute[356901]: 2025-10-11 02:24:02.515 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:24:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:24:02 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2432606697' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.003 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.488s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.062 2 DEBUG nova.storage.rbd_utils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 7513b93e-d2b8-4ae0-8f1c-3df190945259_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.082 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:24:03 compute-0 podman[429796]: 2025-10-11 02:24:03.231297744 +0000 UTC m=+0.095270929 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, managed_by=edpm_ansible)
Oct 11 02:24:03 compute-0 podman[429790]: 2025-10-11 02:24:03.256793501 +0000 UTC m=+0.126875667 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 02:24:03 compute-0 podman[429787]: 2025-10-11 02:24:03.264219233 +0000 UTC m=+0.150760533 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:24:03 compute-0 sshd-session[429419]: Connection closed by invalid user debian 121.227.153.123 port 51888 [preauth]
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.274 2 DEBUG nova.network.neutron [req-e16c3744-d6c6-40da-be2d-dff9625f917f req-20f445a1-9ec8-476a-902a-0790ac966578 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Updated VIF entry in instance network info cache for port a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.275 2 DEBUG nova.network.neutron [req-e16c3744-d6c6-40da-be2d-dff9625f917f req-20f445a1-9ec8-476a-902a-0790ac966578 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Updating instance_info_cache with network_info: [{"id": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "address": "fa:16:3e:16:ee:dc", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.225", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.204", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa942acb1-1e", "ovs_interfaceid": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:24:03 compute-0 podman[429789]: 2025-10-11 02:24:03.291057912 +0000 UTC m=+0.157864995 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller)
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.293 2 DEBUG oslo_concurrency.lockutils [req-e16c3744-d6c6-40da-be2d-dff9625f917f req-20f445a1-9ec8-476a-902a-0790ac966578 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-7513b93e-d2b8-4ae0-8f1c-3df190945259" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:24:03 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1954550332' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:24:03 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2432606697' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:24:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:24:03 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2979624712' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.593 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.511s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.595 2 DEBUG nova.virt.libvirt.vif [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:23:54Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description=None,display_name='vn-vgckve2-djjfpphdsuuh-gthznuj2xct2-vnf-jmvtgw3mflyn',ec2_ids=EC2Ids,ephemeral_gb=1,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(1),hidden=False,host='compute-0.ctlplane.example.com',hostname='vn-vgckve2-djjfpphdsuuh-gthznuj2xct2-vnf-jmvtgw3mflyn',id=3,image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',info_cache=InstanceInfoCache,instance_type_id=1,kernel_id='',key_data=None,key_name=None,keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=512,metadata={metering.server_group='3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='97026531b3404a11869cb85a059c4a0d',ramdisk_id='',reservation_id='r-t8a5mh5u',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader,admin',image_base_image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_min_disk='1',image_min_ram='0',image_owner_specified.openstack.md5='',image_owner_specified.openstack.object='images/cirros',image_owner_specified.openstack.sha256='',network_allocated='True',owner_project_name='admin',owner_user_name='admin'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:23:57Z,user_data='Q29udGVudC1UeXBlOiBtdWx0aXBhcnQvbWl4ZWQ7IGJvdW5kYXJ5PSI9PT09PT09PT09PT09PT0zOTM4MTI3NjM1NDAwNDM1NzIyPT0iCk1JTUUtVmVyc2lvbjogMS4wCgotLT09PT09PT09PT09PT09PTM5MzgxMjc2MzU0MDA0MzU3MjI9PQpDb250ZW50LVR5cGU6IHRleHQvY2xvdWQtY29uZmlnOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0iY2xvdWQtY29uZmlnIgoKCgojIENhcHR1cmUgYWxsIHN1YnByb2Nlc3Mgb3V0cHV0IGludG8gYSBsb2dmaWxlCiMgVXNlZnVsIGZvciB0cm91Ymxlc2hvb3RpbmcgY2xvdWQtaW5pdCBpc3N1ZXMKb3V0cHV0OiB7YWxsOiAnfCB0ZWUgLWEgL3Zhci9sb2cvY2xvdWQtaW5pdC1vdXRwdXQubG9nJ30KCi0tPT09PT09PT09PT09PT09MzkzODEyNzYzNTQwMDQzNTcyMj09CkNvbnRlbnQtVHlwZTogdGV4dC9jbG91ZC1ib290aG9vazsgY2hhcnNldD0idXMtYXNjaWkiCk1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IDdiaXQKQ29udGVudC1EaXNwb3NpdGlvbjogYXR0YWNobWVudDsgZmlsZW5hbWU9ImJvb3Rob29rLnNoIgoKIyEvdXNyL2Jpbi9iYXNoCgojIEZJWE1FKHNoYWRvd2VyKSB0aGlzIGlzIGEgd29ya2Fyb3VuZCBmb3IgY2xvdWQtaW5pdCAwLjYuMyBwcmVzZW50IGluIFVidW50dQojIDEyLjA0IExUUzoKIyBodHRwczovL2J1Z3MubGF1bmNocGFkLm5ldC9oZWF0LytidWcvMTI1NzQxMAojCiMgVGhlIG9sZCBjbG91ZC1pbml0IGRvZXNuJ3QgY3JlYXRlIHRoZSB1c2VycyBkaXJlY3RseSBzbyB0aGUgY29tbWFuZHMgdG8gZG8KIyB0aGlzIGFyZSBpbmplY3RlZCB0aG91Z2ggbm92YV91dGlscy5weS4KIwojIE9uY2Ugd2UgZHJvcCBzdXBwb3J0IGZvciAwLjYuMywgd2UgY2FuIHNhZmVseSByZW1vdmUgdGhpcy4KCgojIGluIGNhc2UgaGVhdC1jZm50b29scyBoYXMgYmVlbiBpbnN0YWxsZWQgZnJvbSBwYWNrYWdlIGJ1dCBubyBzeW1saW5rcwojIGFyZSB5ZXQgaW4gL29wdC9hd3MvYmluLwpjZm4tY3JlYXRlLWF3cy1zeW1saW5rcwoKIyBEbyBub3QgcmVtb3ZlIC0gdGhlIGNsb3VkIGJvb3Rob29rIHNob3VsZCBhbHdheXMgcmV0dXJuIHN1Y2Nlc3MKZXhpdCAwCgotLT09PT09PT09PT09PT09PTM5MzgxMjc2MzU0MDA0MzU3MjI9PQpDb250ZW50LVR5cGU6IHRleHQvcGFydC1oYW5kbGVyOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0icGFydC1oYW5kbGVyLnB5IgoKIyBwYXJ0LWhhbmRsZXIKIwojICAgIExpY2Vuc2VkIHVuZGVyIHRoZSBBcGFjaGUgTGljZW5zZSwgVmVyc2lvbiAyLjAgKHRoZSAiTGljZW5zZSIpOyB5b3UgbWF5CiMgICAgbm90IHVzZSB0aGlzIGZpbGUgZXhjZXB0IGluIGNvbXBsaWFuY2Ugd2l0aCB0aGUgTGljZW5zZS4gWW91IG1heSBvYnRhaW4KIyAgICBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgICAgICAgaHR0cDovL3d3dy5hcGFjaGUub3JnL2xpY2Vuc2VzL0xJQ0VOU0UtMi4wCiMKIyAgICBVbmxlc3MgcmVxdWlyZWQgYnkgYXBwbGljYWJsZSBsYXcgb3IgYWdyZWVkIHRvIGluIHdyaXRpbmcsIHNvZnR3YXJlCiMgICAgZGlzdHJpYnV0ZWQgdW5kZXIgdGhlIExpY2Vuc2UgaXMgZGlzdHJpYnV0ZWQgb24gYW4gIkFTIElTIiBCQVNJUywgV0lUSE9VVAojICAgIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4gU2VlIHRoZQojICAgIExpY2Vuc2UgZm9yIHRoZSBzcGVjaWZpYyBsYW5ndWFnZSBnb3Zlcm5pbmcgcGVybWlzc2lvbnMgYW5kIGxpbWl0YXRpb25zCiMgICAgdW5kZXIgdGhlIExpY2Vuc2UuCgppbXBvcnQgZGF0ZXRpbWUKaW1wb3J0IGVycm5vCmltcG9ydCBvcwppbXBvcnQgc3lzCgoKZGVmIGxpc3RfdHlwZXMoKToKICAgIHJldHVybiBbInRleHQveC1jZm5pbml0ZGF0YSJdCgoKZGVmIGhhbmRsZV9wYXJ0KGRhdGEsIGN0eXBlLCBmaWxlbmFtZSwgcGF5bG9hZCk6CiAgICBpZiBjdHlwZSA9PSAiX19iZWdpbl9fIjoKICAgICAgICB0cnk6CiAgICAgICAgICAgIG9zLm1ha2VkaXJzKCcvdmFyL2xpYi9oZWF0LWNmbnRvb2xzJywgaW50KCI3MDAiLCA4KSkKICAgICAgICBleGNlcHQgT1NFcnJvcjoKICAgICAgICAgICAgZXhfdHlwZSwgZSwgdGIgPSBzeXMuZXhjX2luZm8oKQogICAgICAgICAgICBpZiBlLmVycm5vICE9IGVycm5vLkVFWElTVDoKICAgICAgICAgICAgICAgIHJhaXNlCiAgICAgICAgcmV0dXJuCgogICAgaWYgY3R5cGUgPT0gIl9fZW5kX18iOgogICAgICAgIHJldHVybgoKICAgIHRpbWVzdGFtcCA9IGRhdGV0aW1lLmRhdGV0aW1lLm5vdygpCiAgICB3aXRoIG9wZW4oJy92YXIvbG9nL3BhcnQtaGFuZGxlci5sb2cnLCAnYScpIGFzIGxvZzoKICAgICAgICBsb2cud3JpdGUoJyVzIGZpbGVuYW1lOiVzLCBjdHlwZTolc1xuJyAlICh0aW1lc3RhbXAsIGZpbGVuYW1lLCBjdHlwZSkpCgogICAgaWYgY3R5cGUgPT0gJ3RleHQveC1jZm5pbml0ZGF0YSc6CiAgICAgICAgd2l0aCBvcGVuKCcvdmFyL2xpYi9oZWF0LWNmbnRvb2xzLyVzJyAlIGZpbGVuYW1lLCAndycpIGFzIGY6CiAgICAgICAgICAgIGYud3JpdGUocGF5bG9hZCkKCiAgICAgICAgIyBUT0RPKHNkYWtlKSBob3BlZnVsbHkgdGVtcG9yYXJ5IHVudGlsIHVzZXJzIG1vdmUgdG8gaGVhdC1jZm50b29scy0xLjMKICAgICAgICB3aXRoIG9wZW4oJy92YXIvbGliL2Nsb3VkL2RhdGEvJXMnICUgZmlsZW5hbWUsICd3JykgYXMgZjoKICAgICAgICAgICAgZi53cml0ZShwYXlsb2FkKQoKLS09PT09PT09PT09PT09PT0zOTM4MTI3NjM1NDAwNDM1NzIyPT0KQ29udGVudC1UeXBlOiB0ZXh0L3gtY2ZuaW5pdGRhdGE7IGNoYXJzZXQ9InVzLWFzY2lpIgpNSU1FLVZlcnNpb246IDEuMApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkNvbnRlbnQtRGlzcG9zaXRpb246IGF0dGFjaG1lbnQ7IGZpbGVuYW1lPSJjZm4tdXNlcmRhdGEiCgoKLS09PT09PT09PT09PT09PT0zOTM4MTI3NjM1NDAwNDM1NzIyPT0KQ29udGVudC1UeXBlOiB0ZXh0L3gtc2hlbGxzY3JpcHQ7IGNoYXJzZXQ9InVzLWFzY2lpIgpNSU1FLVZlcnNpb246IDEuMApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkNvbnRlbnQtRGlzcG9zaXRpb246IGF0dGFjaG1lbnQ7IGZpbGVuYW1lPSJsb2d1c2VyZGF0YS5weSIKCiMhL3Vzci9iaW4vZW52IHB5dGhvbjMKIwojICAgIExpY2Vuc2VkIHVuZGVyIHRoZSBBcGFjaGUgTGljZW5zZSwgVmVyc2lvbiAyLjAgKHRoZSAiTGljZW5zZSIpOyB5b3UgbWF5CiMgICAgbm90IHVzZSB0aGlzIGZpbGUgZXhjZXB0IGluIGNvbXBsaWFuY2Ugd2l0aCB0aGUgTGljZW5zZS4gWW91IG1heSBvYnRhaW4KIyAgICBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgICAgICAgaHR0cDovL3d3dy5hcGFjaGUub3JnL2xpY2Vuc2VzL0xJQ0VOU0UtMi4wCiMKIyAgICBVbmxlc3MgcmVxdWlyZWQgYnkgYXBwbGljYWJsZSBsYXcgb3IgYWdyZWVkIHRvIGluIHdyaXRpbmcsIHNvZnR3YXJlCiMgICAgZGlzdHJpYnV0ZWQgdW5kZXIgdGhlIExpY2Vuc2UgaXMgZGlzdHJpYnV0ZWQgb24gYW4gIkFTIElTIiBCQVNJUywgV0lUSE9VVAojICAgIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4gU2VlIHRoZQojICAgIExpY2Vuc2UgZm9yIHRoZSBzcGVjaWZpYyBsYW5ndWFnZSBnb3Zlcm5pbmcgcGVybWlzc2lvbnMgYW5kIGxpbWl0YXRpb25zCiMgICAgdW5kZXIgdGhlIExpY2Vuc2UuCgppbXBvcnQgZGF0ZXRpbWUKaW1wb3J0IGVycm5vCmltcG9ydCBsb2dnaW5nCmltcG9ydCBvcwppbXBvcnQgc3VicHJvY2VzcwppbXBvcnQgc3lzCgoKVkFSX1BBVEggPSAnL3Zhci9saWIvaGVhdC1jZm50b29scycKTE9HID0gbG9nZ2luZy5nZXRMb2dnZXIoJ2hlYXQtcHJvdmlzaW9uJykKCgpkZWYgaW5pdF9sb2dnaW5nKCk6CiAgICBMT0cuc2V0TGV2ZWwobG9nZ2luZy5JTkZPKQogICAgTE9HLmFkZEhhbmRsZXIobG9nZ2luZy5TdHJlYW1IYW5kbGVyKCkpCiAgICBmaCA9IGxvZ2dpbmcuRmlsZUhhbmRsZXIoIi92YXIvbG9nL2hlYXQtcHJvdmlzaW9uLmxvZyIpCiAgICBvcy5jaG1vZChmaC5iYXNlRmlsZW5hbWUsIGludCgiNjAwIiwgOCkpCiAgICBMT0cuYWRkSGFuZGxlcihmaCkKCgpkZWYgY2FsbChhcmdzKToKCiAgICBjbGFzcyBMb2dTdHJlYW0ob2JqZWN0KToKCiAgICAgICAgZGVmIHdyaXRlKHNlbGYsIGRhdGEpOgogICAgICAgICAgICBMT0cuaW5mbyhkYXRhKQoKICAgIExPRy5pbmZvKCclc1xuJywgJyAnLmpvaW4oYXJncykpICAjIG5vcWEKICAgIHRyeToKICAgICAgICBscyA9IExvZ1N0cmVhbSgpCiAgICAgICAgcCA9IHN1YnByb2Nlc3MuUG9wZW4oYXJncywgc3
Oct 11 02:24:03 compute-0 nova_compute[356901]: Rkb3V0PXN1YnByb2Nlc3MuUElQRSwKICAgICAgICAgICAgICAgICAgICAgICAgICAgICBzdGRlcnI9c3VicHJvY2Vzcy5QSVBFKQogICAgICAgIGRhdGEgPSBwLmNvbW11bmljYXRlKCkKICAgICAgICBpZiBkYXRhOgogICAgICAgICAgICBmb3IgeCBpbiBkYXRhOgogICAgICAgICAgICAgICAgbHMud3JpdGUoeCkKICAgIGV4Y2VwdCBPU0Vycm9yOgogICAgICAgIGV4X3R5cGUsIGV4LCB0YiA9IHN5cy5leGNfaW5mbygpCiAgICAgICAgaWYgZXguZXJybm8gPT0gZXJybm8uRU5PRVhFQzoKICAgICAgICAgICAgTE9HLmVycm9yKCdVc2VyZGF0YSBlbXB0eSBvciBub3QgZXhlY3V0YWJsZTogJXMnLCBleCkKICAgICAgICAgICAgcmV0dXJuIG9zLkVYX09LCiAgICAgICAgZWxzZToKICAgICAgICAgICAgTE9HLmVycm9yKCdPUyBlcnJvciBydW5uaW5nIHVzZXJkYXRhOiAlcycsIGV4KQogICAgICAgICAgICByZXR1cm4gb3MuRVhfT1NFUlIKICAgIGV4Y2VwdCBFeGNlcHRpb246CiAgICAgICAgZXhfdHlwZSwgZXgsIHRiID0gc3lzLmV4Y19pbmZvKCkKICAgICAgICBMT0cuZXJyb3IoJ1Vua25vd24gZXJyb3IgcnVubmluZyB1c2VyZGF0YTogJXMnLCBleCkKICAgICAgICByZXR1cm4gb3MuRVhfU09GVFdBUkUKICAgIHJldHVybiBwLnJldHVybmNvZGUKCgpkZWYgbWFpbigpOgogICAgdXNlcmRhdGFfcGF0aCA9IG9zLnBhdGguam9pbihWQVJfUEFUSCwgJ2Nmbi11c2VyZGF0YScpCiAgICBvcy5jaG1vZCh1c2VyZGF0YV9wYXRoLCBpbnQoIjcwMCIsIDgpKQoKICAgIExPRy5pbmZvKCdQcm92aXNpb24gYmVnYW46ICVzJywgZGF0ZXRpbWUuZGF0ZXRpbWUubm93KCkpCiAgICByZXR1cm5jb2RlID0gY2FsbChbdXNlcmRhdGFfcGF0aF0pCiAgICBMT0cuaW5mbygnUHJvdmlzaW9uIGRvbmU6ICVzJywgZGF0ZXRpbWUuZGF0ZXRpbWUubm93KCkpCiAgICBpZiByZXR1cm5jb2RlOgogICAgICAgIHJldHVybiByZXR1cm5jb2RlCgoKaWYgX19uYW1lX18gPT0gJ19fbWFpbl9fJzoKICAgIGluaXRfbG9nZ2luZygpCgogICAgY29kZSA9IG1haW4oKQogICAgaWYgY29kZToKICAgICAgICBMT0cuZXJyb3IoJ1Byb3Zpc2lvbiBmYWlsZWQgd2l0aCBleGl0IGNvZGUgJXMnLCBjb2RlKQogICAgICAgIHN5cy5leGl0KGNvZGUpCgogICAgcHJvdmlzaW9uX2xvZyA9IG9zLnBhdGguam9pbihWQVJfUEFUSCwgJ3Byb3Zpc2lvbi1maW5pc2hlZCcpCiAgICAjIHRvdWNoIHRoZSBmaWxlIHNvIGl0IGlzIHRpbWVzdGFtcGVkIHdpdGggd2hlbiBmaW5pc2hlZAogICAgd2l0aCBvcGVuKHByb3Zpc2lvbl9sb2csICdhJyk6CiAgICAgICAgb3MudXRpbWUocHJvdmlzaW9uX2xvZywgTm9uZSkKCi0tPT09PT09PT09PT09PT09MzkzODEyNzYzNTQwMDQzNTcyMj09CkNvbnRlbnQtVHlwZTogdGV4dC94LWNmbmluaXRkYXRhOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0iY2ZuLW1ldGFkYXRhLXNlcnZlciIKCmh0dHBzOi8vaGVhdC1jZm5hcGktaW50ZXJuYWwub3BlbnN0YWNrLnN2Yzo4MDAwL3YxLwotLT09PT09PT09PT09PT09PTM5MzgxMjc2MzU0MDA0MzU3MjI9PQpDb250ZW50LVR5cGU6IHRleHQveC1jZm5pbml0ZGF0YTsgY2hhcnNldD0idXMtYXNjaWkiCk1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IDdiaXQKQ29udGVudC1EaXNwb3NpdGlvbjogYXR0YWNobWVudDsgZmlsZW5hbWU9ImNmbi1ib3RvLWNmZyIKCltCb3RvXQpkZWJ1ZyA9IDAKaXNfc2VjdXJlID0gMApodHRwc192YWxpZGF0ZV9jZXJ0aWZpY2F0ZXMgPSAxCmNmbl9yZWdpb25fbmFtZSA9IGhlYXQKY2ZuX3JlZ2lvbl9lbmRwb2ludCA9IGhlYXQtY2ZuYXBpLWludGVybmFsLm9wZW5zdGFjay5zdmMKLS09PT09PT09PT09PT09PT0zOTM4MTI3NjM1NDAwNDM1NzIyPT0tLQo=',user_id='d215f3ebbc07435493ccd666fc80109d',uuid=7513b93e-d2b8-4ae0-8f1c-3df190945259,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "address": "fa:16:3e:16:ee:dc", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.225", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.204", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa942acb1-1e", "ovs_interfaceid": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}} virt_type=kvm get_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:563
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.596 2 DEBUG nova.network.os_vif_util [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converting VIF {"id": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "address": "fa:16:3e:16:ee:dc", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.225", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.204", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa942acb1-1e", "ovs_interfaceid": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.597 2 DEBUG nova.network.os_vif_util [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:16:ee:dc,bridge_name='br-int',has_traffic_filtering=True,id=a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tapa942acb1-1e') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.598 2 DEBUG nova.objects.instance [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lazy-loading 'pci_devices' on Instance uuid 7513b93e-d2b8-4ae0-8f1c-3df190945259 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.611 2 DEBUG nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] End _get_guest_xml xml=<domain type="kvm">
Oct 11 02:24:03 compute-0 nova_compute[356901]:   <uuid>7513b93e-d2b8-4ae0-8f1c-3df190945259</uuid>
Oct 11 02:24:03 compute-0 nova_compute[356901]:   <name>instance-00000003</name>
Oct 11 02:24:03 compute-0 nova_compute[356901]:   <memory>524288</memory>
Oct 11 02:24:03 compute-0 nova_compute[356901]:   <vcpu>1</vcpu>
Oct 11 02:24:03 compute-0 nova_compute[356901]:   <metadata>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.1">
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <nova:package version="27.5.2-0.20250829104910.6f8decf.el9"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <nova:name>vn-vgckve2-djjfpphdsuuh-gthznuj2xct2-vnf-jmvtgw3mflyn</nova:name>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <nova:creationTime>2025-10-11 02:24:01</nova:creationTime>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <nova:flavor name="m1.small">
Oct 11 02:24:03 compute-0 nova_compute[356901]:         <nova:memory>512</nova:memory>
Oct 11 02:24:03 compute-0 nova_compute[356901]:         <nova:disk>1</nova:disk>
Oct 11 02:24:03 compute-0 nova_compute[356901]:         <nova:swap>0</nova:swap>
Oct 11 02:24:03 compute-0 nova_compute[356901]:         <nova:ephemeral>1</nova:ephemeral>
Oct 11 02:24:03 compute-0 nova_compute[356901]:         <nova:vcpus>1</nova:vcpus>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       </nova:flavor>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <nova:owner>
Oct 11 02:24:03 compute-0 nova_compute[356901]:         <nova:user uuid="d215f3ebbc07435493ccd666fc80109d">admin</nova:user>
Oct 11 02:24:03 compute-0 nova_compute[356901]:         <nova:project uuid="97026531b3404a11869cb85a059c4a0d">admin</nova:project>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       </nova:owner>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <nova:root type="image" uuid="a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <nova:ports>
Oct 11 02:24:03 compute-0 nova_compute[356901]:         <nova:port uuid="a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e">
Oct 11 02:24:03 compute-0 nova_compute[356901]:           <nova:ip type="fixed" address="192.168.0.225" ipVersion="4"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:         </nova:port>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       </nova:ports>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     </nova:instance>
Oct 11 02:24:03 compute-0 nova_compute[356901]:   </metadata>
Oct 11 02:24:03 compute-0 nova_compute[356901]:   <sysinfo type="smbios">
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <system>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <entry name="manufacturer">RDO</entry>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <entry name="product">OpenStack Compute</entry>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <entry name="version">27.5.2-0.20250829104910.6f8decf.el9</entry>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <entry name="serial">7513b93e-d2b8-4ae0-8f1c-3df190945259</entry>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <entry name="uuid">7513b93e-d2b8-4ae0-8f1c-3df190945259</entry>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <entry name="family">Virtual Machine</entry>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     </system>
Oct 11 02:24:03 compute-0 nova_compute[356901]:   </sysinfo>
Oct 11 02:24:03 compute-0 nova_compute[356901]:   <os>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <type arch="x86_64" machine="q35">hvm</type>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <boot dev="hd"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <smbios mode="sysinfo"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:   </os>
Oct 11 02:24:03 compute-0 nova_compute[356901]:   <features>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <acpi/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <apic/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <vmcoreinfo/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:   </features>
Oct 11 02:24:03 compute-0 nova_compute[356901]:   <clock offset="utc">
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <timer name="pit" tickpolicy="delay"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <timer name="rtc" tickpolicy="catchup"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <timer name="hpet" present="no"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:   </clock>
Oct 11 02:24:03 compute-0 nova_compute[356901]:   <cpu mode="host-model" match="exact">
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <topology sockets="1" cores="1" threads="1"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:24:03 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/7513b93e-d2b8-4ae0-8f1c-3df190945259_disk">
Oct 11 02:24:03 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       </source>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:24:03 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <target dev="vda" bus="virtio"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/7513b93e-d2b8-4ae0-8f1c-3df190945259_disk.eph0">
Oct 11 02:24:03 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       </source>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:24:03 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <target dev="vdb" bus="virtio"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <disk type="network" device="cdrom">
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/7513b93e-d2b8-4ae0-8f1c-3df190945259_disk.config">
Oct 11 02:24:03 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       </source>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:24:03 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <target dev="sda" bus="sata"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <interface type="ethernet">
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <mac address="fa:16:3e:16:ee:dc"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <driver name="vhost" rx_queue_size="512"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <mtu size="1442"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <target dev="tapa942acb1-1e"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     </interface>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <serial type="pty">
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <log file="/var/lib/nova/instances/7513b93e-d2b8-4ae0-8f1c-3df190945259/console.log" append="off"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     </serial>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <graphics type="vnc" autoport="yes" listen="::0"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <video>
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     </video>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <input type="tablet" bus="usb"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <rng model="virtio">
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <backend model="random">/dev/urandom</backend>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <controller type="usb" index="0"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     <memballoon model="virtio">
Oct 11 02:24:03 compute-0 nova_compute[356901]:       <stats period="10"/>
Oct 11 02:24:03 compute-0 nova_compute[356901]:     </memballoon>
Oct 11 02:24:03 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:24:03 compute-0 nova_compute[356901]: </domain>
Oct 11 02:24:03 compute-0 nova_compute[356901]:  _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7555
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.612 2 DEBUG nova.compute.manager [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Preparing to wait for external event network-vif-plugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e prepare_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:283
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.613 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.613 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" acquired by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.613 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" "released" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.614 2 DEBUG nova.virt.libvirt.vif [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:23:54Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description=None,display_name='vn-vgckve2-djjfpphdsuuh-gthznuj2xct2-vnf-jmvtgw3mflyn',ec2_ids=EC2Ids,ephemeral_gb=1,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(1),hidden=False,host='compute-0.ctlplane.example.com',hostname='vn-vgckve2-djjfpphdsuuh-gthznuj2xct2-vnf-jmvtgw3mflyn',id=3,image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',info_cache=InstanceInfoCache,instance_type_id=1,kernel_id='',key_data=None,key_name=None,keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=512,metadata={metering.server_group='3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=PciDeviceList,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='97026531b3404a11869cb85a059c4a0d',ramdisk_id='',reservation_id='r-t8a5mh5u',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader,admin',image_base_image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_min_disk='1',image_min_ram='0',image_owner_specified.openstack.md5='',image_owner_specified.openstack.object='images/cirros',image_owner_specified.openstack.sha256='',network_allocated='True',owner_project_name='admin',owner_user_name='admin'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:23:57Z,user_data='Q29udGVudC1UeXBlOiBtdWx0aXBhcnQvbWl4ZWQ7IGJvdW5kYXJ5PSI9PT09PT09PT09PT09PT0zOTM4MTI3NjM1NDAwNDM1NzIyPT0iCk1JTUUtVmVyc2lvbjogMS4wCgotLT09PT09PT09PT09PT09PTM5MzgxMjc2MzU0MDA0MzU3MjI9PQpDb250ZW50LVR5cGU6IHRleHQvY2xvdWQtY29uZmlnOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0iY2xvdWQtY29uZmlnIgoKCgojIENhcHR1cmUgYWxsIHN1YnByb2Nlc3Mgb3V0cHV0IGludG8gYSBsb2dmaWxlCiMgVXNlZnVsIGZvciB0cm91Ymxlc2hvb3RpbmcgY2xvdWQtaW5pdCBpc3N1ZXMKb3V0cHV0OiB7YWxsOiAnfCB0ZWUgLWEgL3Zhci9sb2cvY2xvdWQtaW5pdC1vdXRwdXQubG9nJ30KCi0tPT09PT09PT09PT09PT09MzkzODEyNzYzNTQwMDQzNTcyMj09CkNvbnRlbnQtVHlwZTogdGV4dC9jbG91ZC1ib290aG9vazsgY2hhcnNldD0idXMtYXNjaWkiCk1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IDdiaXQKQ29udGVudC1EaXNwb3NpdGlvbjogYXR0YWNobWVudDsgZmlsZW5hbWU9ImJvb3Rob29rLnNoIgoKIyEvdXNyL2Jpbi9iYXNoCgojIEZJWE1FKHNoYWRvd2VyKSB0aGlzIGlzIGEgd29ya2Fyb3VuZCBmb3IgY2xvdWQtaW5pdCAwLjYuMyBwcmVzZW50IGluIFVidW50dQojIDEyLjA0IExUUzoKIyBodHRwczovL2J1Z3MubGF1bmNocGFkLm5ldC9oZWF0LytidWcvMTI1NzQxMAojCiMgVGhlIG9sZCBjbG91ZC1pbml0IGRvZXNuJ3QgY3JlYXRlIHRoZSB1c2VycyBkaXJlY3RseSBzbyB0aGUgY29tbWFuZHMgdG8gZG8KIyB0aGlzIGFyZSBpbmplY3RlZCB0aG91Z2ggbm92YV91dGlscy5weS4KIwojIE9uY2Ugd2UgZHJvcCBzdXBwb3J0IGZvciAwLjYuMywgd2UgY2FuIHNhZmVseSByZW1vdmUgdGhpcy4KCgojIGluIGNhc2UgaGVhdC1jZm50b29scyBoYXMgYmVlbiBpbnN0YWxsZWQgZnJvbSBwYWNrYWdlIGJ1dCBubyBzeW1saW5rcwojIGFyZSB5ZXQgaW4gL29wdC9hd3MvYmluLwpjZm4tY3JlYXRlLWF3cy1zeW1saW5rcwoKIyBEbyBub3QgcmVtb3ZlIC0gdGhlIGNsb3VkIGJvb3Rob29rIHNob3VsZCBhbHdheXMgcmV0dXJuIHN1Y2Nlc3MKZXhpdCAwCgotLT09PT09PT09PT09PT09PTM5MzgxMjc2MzU0MDA0MzU3MjI9PQpDb250ZW50LVR5cGU6IHRleHQvcGFydC1oYW5kbGVyOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0icGFydC1oYW5kbGVyLnB5IgoKIyBwYXJ0LWhhbmRsZXIKIwojICAgIExpY2Vuc2VkIHVuZGVyIHRoZSBBcGFjaGUgTGljZW5zZSwgVmVyc2lvbiAyLjAgKHRoZSAiTGljZW5zZSIpOyB5b3UgbWF5CiMgICAgbm90IHVzZSB0aGlzIGZpbGUgZXhjZXB0IGluIGNvbXBsaWFuY2Ugd2l0aCB0aGUgTGljZW5zZS4gWW91IG1heSBvYnRhaW4KIyAgICBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgICAgICAgaHR0cDovL3d3dy5hcGFjaGUub3JnL2xpY2Vuc2VzL0xJQ0VOU0UtMi4wCiMKIyAgICBVbmxlc3MgcmVxdWlyZWQgYnkgYXBwbGljYWJsZSBsYXcgb3IgYWdyZWVkIHRvIGluIHdyaXRpbmcsIHNvZnR3YXJlCiMgICAgZGlzdHJpYnV0ZWQgdW5kZXIgdGhlIExpY2Vuc2UgaXMgZGlzdHJpYnV0ZWQgb24gYW4gIkFTIElTIiBCQVNJUywgV0lUSE9VVAojICAgIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4gU2VlIHRoZQojICAgIExpY2Vuc2UgZm9yIHRoZSBzcGVjaWZpYyBsYW5ndWFnZSBnb3Zlcm5pbmcgcGVybWlzc2lvbnMgYW5kIGxpbWl0YXRpb25zCiMgICAgdW5kZXIgdGhlIExpY2Vuc2UuCgppbXBvcnQgZGF0ZXRpbWUKaW1wb3J0IGVycm5vCmltcG9ydCBvcwppbXBvcnQgc3lzCgoKZGVmIGxpc3RfdHlwZXMoKToKICAgIHJldHVybiBbInRleHQveC1jZm5pbml0ZGF0YSJdCgoKZGVmIGhhbmRsZV9wYXJ0KGRhdGEsIGN0eXBlLCBmaWxlbmFtZSwgcGF5bG9hZCk6CiAgICBpZiBjdHlwZSA9PSAiX19iZWdpbl9fIjoKICAgICAgICB0cnk6CiAgICAgICAgICAgIG9zLm1ha2VkaXJzKCcvdmFyL2xpYi9oZWF0LWNmbnRvb2xzJywgaW50KCI3MDAiLCA4KSkKICAgICAgICBleGNlcHQgT1NFcnJvcjoKICAgICAgICAgICAgZXhfdHlwZSwgZSwgdGIgPSBzeXMuZXhjX2luZm8oKQogICAgICAgICAgICBpZiBlLmVycm5vICE9IGVycm5vLkVFWElTVDoKICAgICAgICAgICAgICAgIHJhaXNlCiAgICAgICAgcmV0dXJuCgogICAgaWYgY3R5cGUgPT0gIl9fZW5kX18iOgogICAgICAgIHJldHVybgoKICAgIHRpbWVzdGFtcCA9IGRhdGV0aW1lLmRhdGV0aW1lLm5vdygpCiAgICB3aXRoIG9wZW4oJy92YXIvbG9nL3BhcnQtaGFuZGxlci5sb2cnLCAnYScpIGFzIGxvZzoKICAgICAgICBsb2cud3JpdGUoJyVzIGZpbGVuYW1lOiVzLCBjdHlwZTolc1xuJyAlICh0aW1lc3RhbXAsIGZpbGVuYW1lLCBjdHlwZSkpCgogICAgaWYgY3R5cGUgPT0gJ3RleHQveC1jZm5pbml0ZGF0YSc6CiAgICAgICAgd2l0aCBvcGVuKCcvdmFyL2xpYi9oZWF0LWNmbnRvb2xzLyVzJyAlIGZpbGVuYW1lLCAndycpIGFzIGY6CiAgICAgICAgICAgIGYud3JpdGUocGF5bG9hZCkKCiAgICAgICAgIyBUT0RPKHNkYWtlKSBob3BlZnVsbHkgdGVtcG9yYXJ5IHVudGlsIHVzZXJzIG1vdmUgdG8gaGVhdC1jZm50b29scy0xLjMKICAgICAgICB3aXRoIG9wZW4oJy92YXIvbGliL2Nsb3VkL2RhdGEvJXMnICUgZmlsZW5hbWUsICd3JykgYXMgZjoKICAgICAgICAgICAgZi53cml0ZShwYXlsb2FkKQoKLS09PT09PT09PT09PT09PT0zOTM4MTI3NjM1NDAwNDM1NzIyPT0KQ29udGVudC1UeXBlOiB0ZXh0L3gtY2ZuaW5pdGRhdGE7IGNoYXJzZXQ9InVzLWFzY2lpIgpNSU1FLVZlcnNpb246IDEuMApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkNvbnRlbnQtRGlzcG9zaXRpb246IGF0dGFjaG1lbnQ7IGZpbGVuYW1lPSJjZm4tdXNlcmRhdGEiCgoKLS09PT09PT09PT09PT09PT0zOTM4MTI3NjM1NDAwNDM1NzIyPT0KQ29udGVudC1UeXBlOiB0ZXh0L3gtc2hlbGxzY3JpcHQ7IGNoYXJzZXQ9InVzLWFzY2lpIgpNSU1FLVZlcnNpb246IDEuMApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkNvbnRlbnQtRGlzcG9zaXRpb246IGF0dGFjaG1lbnQ7IGZpbGVuYW1lPSJsb2d1c2VyZGF0YS5weSIKCiMhL3Vzci9iaW4vZW52IHB5dGhvbjMKIwojICAgIExpY2Vuc2VkIHVuZGVyIHRoZSBBcGFjaGUgTGljZW5zZSwgVmVyc2lvbiAyLjAgKHRoZSAiTGljZW5zZSIpOyB5b3UgbWF5CiMgICAgbm90IHVzZSB0aGlzIGZpbGUgZXhjZXB0IGluIGNvbXBsaWFuY2Ugd2l0aCB0aGUgTGljZW5zZS4gWW91IG1heSBvYnRhaW4KIyAgICBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgICAgICAgaHR0cDovL3d3dy5hcGFjaGUub3JnL2xpY2Vuc2VzL0xJQ0VOU0UtMi4wCiMKIyAgICBVbmxlc3MgcmVxdWlyZWQgYnkgYXBwbGljYWJsZSBsYXcgb3IgYWdyZWVkIHRvIGluIHdyaXRpbmcsIHNvZnR3YXJlCiMgICAgZGlzdHJpYnV0ZWQgdW5kZXIgdGhlIExpY2Vuc2UgaXMgZGlzdHJpYnV0ZWQgb24gYW4gIkFTIElTIiBCQVNJUywgV0lUSE9VVAojICAgIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4gU2VlIHRoZQojICAgIExpY2Vuc2UgZm9yIHRoZSBzcGVjaWZpYyBsYW5ndWFnZSBnb3Zlcm5pbmcgcGVybWlzc2lvbnMgYW5kIGxpbWl0YXRpb25zCiMgICAgdW5kZXIgdGhlIExpY2Vuc2UuCgppbXBvcnQgZGF0ZXRpbWUKaW1wb3J0IGVycm5vCmltcG9ydCBsb2dnaW5nCmltcG9ydCBvcwppbXBvcnQgc3VicHJvY2VzcwppbXBvcnQgc3lzCgoKVkFSX1BBVEggPSAnL3Zhci9saWIvaGVhdC1jZm50b29scycKTE9HID0gbG9nZ2luZy5nZXRMb2dnZXIoJ2hlYXQtcHJvdmlzaW9uJykKCgpkZWYgaW5pdF9sb2dnaW5nKCk6CiAgICBMT0cuc2V0TGV2ZWwobG9nZ2luZy5JTkZPKQogICAgTE9HLmFkZEhhbmRsZXIobG9nZ2luZy5TdHJlYW1IYW5kbGVyKCkpCiAgICBmaCA9IGxvZ2dpbmcuRmlsZUhhbmRsZXIoIi92YXIvbG9nL2hlYXQtcHJvdmlzaW9uLmxvZyIpCiAgICBvcy5jaG1vZChmaC5iYXNlRmlsZW5hbWUsIGludCgiNjAwIiwgOCkpCiAgICBMT0cuYWRkSGFuZGxlcihmaCkKCgpkZWYgY2FsbChhcmdzKToKCiAgICBjbGFzcyBMb2dTdHJlYW0ob2JqZWN0KToKCiAgICAgICAgZGVmIHdyaXRlKHNlbGYsIGRhdGEpOgogICAgICAgICAgICBMT0cuaW5mbyhkYXRhKQoKICAgIExPRy5pbmZvKCclc1xuJywgJyAnLmpvaW4oYXJncykpICAjIG5vcWEKICAgIHRyeToKICAgICAgICBscyA9IExvZ1N0cmVhbSgpCiAgICAgICAgcCA9IHN1YnByb2Nlc3MuUG9wZW4o
Oct 11 02:24:03 compute-0 nova_compute[356901]: YXJncywgc3Rkb3V0PXN1YnByb2Nlc3MuUElQRSwKICAgICAgICAgICAgICAgICAgICAgICAgICAgICBzdGRlcnI9c3VicHJvY2Vzcy5QSVBFKQogICAgICAgIGRhdGEgPSBwLmNvbW11bmljYXRlKCkKICAgICAgICBpZiBkYXRhOgogICAgICAgICAgICBmb3IgeCBpbiBkYXRhOgogICAgICAgICAgICAgICAgbHMud3JpdGUoeCkKICAgIGV4Y2VwdCBPU0Vycm9yOgogICAgICAgIGV4X3R5cGUsIGV4LCB0YiA9IHN5cy5leGNfaW5mbygpCiAgICAgICAgaWYgZXguZXJybm8gPT0gZXJybm8uRU5PRVhFQzoKICAgICAgICAgICAgTE9HLmVycm9yKCdVc2VyZGF0YSBlbXB0eSBvciBub3QgZXhlY3V0YWJsZTogJXMnLCBleCkKICAgICAgICAgICAgcmV0dXJuIG9zLkVYX09LCiAgICAgICAgZWxzZToKICAgICAgICAgICAgTE9HLmVycm9yKCdPUyBlcnJvciBydW5uaW5nIHVzZXJkYXRhOiAlcycsIGV4KQogICAgICAgICAgICByZXR1cm4gb3MuRVhfT1NFUlIKICAgIGV4Y2VwdCBFeGNlcHRpb246CiAgICAgICAgZXhfdHlwZSwgZXgsIHRiID0gc3lzLmV4Y19pbmZvKCkKICAgICAgICBMT0cuZXJyb3IoJ1Vua25vd24gZXJyb3IgcnVubmluZyB1c2VyZGF0YTogJXMnLCBleCkKICAgICAgICByZXR1cm4gb3MuRVhfU09GVFdBUkUKICAgIHJldHVybiBwLnJldHVybmNvZGUKCgpkZWYgbWFpbigpOgogICAgdXNlcmRhdGFfcGF0aCA9IG9zLnBhdGguam9pbihWQVJfUEFUSCwgJ2Nmbi11c2VyZGF0YScpCiAgICBvcy5jaG1vZCh1c2VyZGF0YV9wYXRoLCBpbnQoIjcwMCIsIDgpKQoKICAgIExPRy5pbmZvKCdQcm92aXNpb24gYmVnYW46ICVzJywgZGF0ZXRpbWUuZGF0ZXRpbWUubm93KCkpCiAgICByZXR1cm5jb2RlID0gY2FsbChbdXNlcmRhdGFfcGF0aF0pCiAgICBMT0cuaW5mbygnUHJvdmlzaW9uIGRvbmU6ICVzJywgZGF0ZXRpbWUuZGF0ZXRpbWUubm93KCkpCiAgICBpZiByZXR1cm5jb2RlOgogICAgICAgIHJldHVybiByZXR1cm5jb2RlCgoKaWYgX19uYW1lX18gPT0gJ19fbWFpbl9fJzoKICAgIGluaXRfbG9nZ2luZygpCgogICAgY29kZSA9IG1haW4oKQogICAgaWYgY29kZToKICAgICAgICBMT0cuZXJyb3IoJ1Byb3Zpc2lvbiBmYWlsZWQgd2l0aCBleGl0IGNvZGUgJXMnLCBjb2RlKQogICAgICAgIHN5cy5leGl0KGNvZGUpCgogICAgcHJvdmlzaW9uX2xvZyA9IG9zLnBhdGguam9pbihWQVJfUEFUSCwgJ3Byb3Zpc2lvbi1maW5pc2hlZCcpCiAgICAjIHRvdWNoIHRoZSBmaWxlIHNvIGl0IGlzIHRpbWVzdGFtcGVkIHdpdGggd2hlbiBmaW5pc2hlZAogICAgd2l0aCBvcGVuKHByb3Zpc2lvbl9sb2csICdhJyk6CiAgICAgICAgb3MudXRpbWUocHJvdmlzaW9uX2xvZywgTm9uZSkKCi0tPT09PT09PT09PT09PT09MzkzODEyNzYzNTQwMDQzNTcyMj09CkNvbnRlbnQtVHlwZTogdGV4dC94LWNmbmluaXRkYXRhOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0iY2ZuLW1ldGFkYXRhLXNlcnZlciIKCmh0dHBzOi8vaGVhdC1jZm5hcGktaW50ZXJuYWwub3BlbnN0YWNrLnN2Yzo4MDAwL3YxLwotLT09PT09PT09PT09PT09PTM5MzgxMjc2MzU0MDA0MzU3MjI9PQpDb250ZW50LVR5cGU6IHRleHQveC1jZm5pbml0ZGF0YTsgY2hhcnNldD0idXMtYXNjaWkiCk1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IDdiaXQKQ29udGVudC1EaXNwb3NpdGlvbjogYXR0YWNobWVudDsgZmlsZW5hbWU9ImNmbi1ib3RvLWNmZyIKCltCb3RvXQpkZWJ1ZyA9IDAKaXNfc2VjdXJlID0gMApodHRwc192YWxpZGF0ZV9jZXJ0aWZpY2F0ZXMgPSAxCmNmbl9yZWdpb25fbmFtZSA9IGhlYXQKY2ZuX3JlZ2lvbl9lbmRwb2ludCA9IGhlYXQtY2ZuYXBpLWludGVybmFsLm9wZW5zdGFjay5zdmMKLS09PT09PT09PT09PT09PT0zOTM4MTI3NjM1NDAwNDM1NzIyPT0tLQo=',user_id='d215f3ebbc07435493ccd666fc80109d',uuid=7513b93e-d2b8-4ae0-8f1c-3df190945259,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "address": "fa:16:3e:16:ee:dc", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.225", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.204", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa942acb1-1e", "ovs_interfaceid": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}} plug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:710
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.615 2 DEBUG nova.network.os_vif_util [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converting VIF {"id": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "address": "fa:16:3e:16:ee:dc", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.225", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.204", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa942acb1-1e", "ovs_interfaceid": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.615 2 DEBUG nova.network.os_vif_util [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:16:ee:dc,bridge_name='br-int',has_traffic_filtering=True,id=a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tapa942acb1-1e') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.616 2 DEBUG os_vif [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Plugging vif VIFOpenVSwitch(active=False,address=fa:16:3e:16:ee:dc,bridge_name='br-int',has_traffic_filtering=True,id=a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tapa942acb1-1e') plug /usr/lib/python3.9/site-packages/os_vif/__init__.py:76
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.616 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.617 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddBridgeCommand(_result=None, name=br-int, may_exist=True, datapath_type=system) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.618 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.621 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.621 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapa942acb1-1e, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.622 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbSetCommand(_result=None, table=Interface, record=tapa942acb1-1e, col_values=(('external_ids', {'iface-id': 'a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e', 'iface-status': 'active', 'attached-mac': 'fa:16:3e:16:ee:dc', 'vm-uuid': '7513b93e-d2b8-4ae0-8f1c-3df190945259'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.624 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.626 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:24:03 compute-0 NetworkManager[44908]: <info>  [1760149443.6266] manager: (tapa942acb1-1e): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/33)
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.640 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.642 2 INFO os_vif [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Successfully plugged vif VIFOpenVSwitch(active=False,address=fa:16:3e:16:ee:dc,bridge_name='br-int',has_traffic_filtering=True,id=a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tapa942acb1-1e')
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.707 2 DEBUG nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] No BDM found with device name vda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.708 2 DEBUG nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] No BDM found with device name vdb, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.708 2 DEBUG nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] No BDM found with device name sda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.708 2 DEBUG nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] No VIF found with MAC fa:16:3e:16:ee:dc, not building metadata _build_interface_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12092
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.709 2 INFO nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Using config drive
Oct 11 02:24:03 compute-0 nova_compute[356901]: 2025-10-11 02:24:03.743 2 DEBUG nova.storage.rbd_utils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 7513b93e-d2b8-4ae0-8f1c-3df190945259_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:24:03 compute-0 rsyslogd[187706]: message too long (8192) with configured size 8096, begin of message is: 2025-10-11 02:24:03.595 2 DEBUG nova.virt.libvirt.vif [None req-45007437-32c1-4d [v8.2506.0-2.el9 try https://www.rsyslog.com/e/2445 ]
Oct 11 02:24:03 compute-0 rsyslogd[187706]: message too long (8192) with configured size 8096, begin of message is: 2025-10-11 02:24:03.614 2 DEBUG nova.virt.libvirt.vif [None req-45007437-32c1-4d [v8.2506.0-2.el9 try https://www.rsyslog.com/e/2445 ]
Oct 11 02:24:04 compute-0 nova_compute[356901]: 2025-10-11 02:24:04.059 2 INFO nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Creating config drive at /var/lib/nova/instances/7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.config
Oct 11 02:24:04 compute-0 nova_compute[356901]: 2025-10-11 02:24:04.071 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): /usr/bin/mkisofs -o /var/lib/nova/instances/7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmpsszcmpiq execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:24:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1383: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 25 KiB/s rd, 1.4 MiB/s wr, 37 op/s
Oct 11 02:24:04 compute-0 nova_compute[356901]: 2025-10-11 02:24:04.223 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "/usr/bin/mkisofs -o /var/lib/nova/instances/7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmpsszcmpiq" returned: 0 in 0.152s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:24:04 compute-0 nova_compute[356901]: 2025-10-11 02:24:04.279 2 DEBUG nova.storage.rbd_utils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 7513b93e-d2b8-4ae0-8f1c-3df190945259_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:24:04 compute-0 nova_compute[356901]: 2025-10-11 02:24:04.287 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.config 7513b93e-d2b8-4ae0-8f1c-3df190945259_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:24:04 compute-0 nova_compute[356901]: 2025-10-11 02:24:04.398 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:04 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2979624712' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:24:04 compute-0 ceph-mon[191930]: pgmap v1383: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 25 KiB/s rd, 1.4 MiB/s wr, 37 op/s
Oct 11 02:24:04 compute-0 sshd-session[429890]: Invalid user debian from 121.227.153.123 port 49278
Oct 11 02:24:04 compute-0 nova_compute[356901]: 2025-10-11 02:24:04.596 2 DEBUG oslo_concurrency.processutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.config 7513b93e-d2b8-4ae0-8f1c-3df190945259_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.309s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:24:04 compute-0 nova_compute[356901]: 2025-10-11 02:24:04.597 2 INFO nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Deleting local config drive /var/lib/nova/instances/7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.config because it was imported into RBD.
Oct 11 02:24:04 compute-0 systemd[1]: Starting libvirt secret daemon...
Oct 11 02:24:04 compute-0 systemd[1]: Started libvirt secret daemon.
Oct 11 02:24:04 compute-0 sshd-session[429890]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:24:04 compute-0 sshd-session[429890]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:24:04 compute-0 kernel: tapa942acb1-1e: entered promiscuous mode
Oct 11 02:24:04 compute-0 NetworkManager[44908]: <info>  [1760149444.7832] manager: (tapa942acb1-1e): new Tun device (/org/freedesktop/NetworkManager/Devices/34)
Oct 11 02:24:04 compute-0 ovn_controller[88370]: 2025-10-11T02:24:04Z|00040|binding|INFO|Claiming lport a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e for this chassis.
Oct 11 02:24:04 compute-0 ovn_controller[88370]: 2025-10-11T02:24:04Z|00041|binding|INFO|a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e: Claiming fa:16:3e:16:ee:dc 192.168.0.225
Oct 11 02:24:04 compute-0 nova_compute[356901]: 2025-10-11 02:24:04.794 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:04 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:24:04.801 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:16:ee:dc 192.168.0.225'], port_security=['fa:16:3e:16:ee:dc 192.168.0.225'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'name': 'vnf-scaleup_group-wzkjkvgckve2-djjfpphdsuuh-gthznuj2xct2-port-zo7fokg3iel2', 'neutron:cidrs': '192.168.0.225/24', 'neutron:device_id': '7513b93e-d2b8-4ae0-8f1c-3df190945259', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'neutron:port_capabilities': '', 'neutron:port_name': 'vnf-scaleup_group-wzkjkvgckve2-djjfpphdsuuh-gthznuj2xct2-port-zo7fokg3iel2', 'neutron:project_id': '97026531b3404a11869cb85a059c4a0d', 'neutron:revision_number': '2', 'neutron:security_group_ids': 'c0c90d87-d29f-4e96-98a1-ffb301424ea4', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:port_fip': '192.168.122.204'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=d19b0dd1-1656-436b-911a-8f2dcc98f6bf, chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], tunnel_key=5, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e) old=Port_Binding(chassis=[]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:24:04 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:24:04.803 286362 INFO neutron.agent.ovn.metadata.agent [-] Port a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e in datapath d4dded16-3268-4cf9-bb6b-aa5200d5e4ec bound to our chassis
Oct 11 02:24:04 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:24:04.807 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network d4dded16-3268-4cf9-bb6b-aa5200d5e4ec
Oct 11 02:24:04 compute-0 nova_compute[356901]: 2025-10-11 02:24:04.833 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:04 compute-0 ovn_controller[88370]: 2025-10-11T02:24:04Z|00042|binding|INFO|Setting lport a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e ovn-installed in OVS
Oct 11 02:24:04 compute-0 ovn_controller[88370]: 2025-10-11T02:24:04Z|00043|binding|INFO|Setting lport a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e up in Southbound
Oct 11 02:24:04 compute-0 nova_compute[356901]: 2025-10-11 02:24:04.840 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:04 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:24:04.838 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[63c7ee3c-ed6d-49ad-bf97-c0def9736184]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:24:04 compute-0 systemd-udevd[429984]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:24:04 compute-0 systemd-machined[137586]: New machine qemu-3-instance-00000003.
Oct 11 02:24:04 compute-0 NetworkManager[44908]: <info>  [1760149444.8588] device (tapa942acb1-1e): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 02:24:04 compute-0 systemd[1]: Started Virtual Machine qemu-3-instance-00000003.
Oct 11 02:24:04 compute-0 NetworkManager[44908]: <info>  [1760149444.8604] device (tapa942acb1-1e): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Oct 11 02:24:04 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:24:04.895 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[3d2e73d8-62fd-4cf5-8513-d0ae442896cc]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:24:04 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:24:04.901 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[532a8c01-fa19-43e9-8e0c-43642e3b620a]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:24:04 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:24:04.937 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[b1b40cbc-5e3d-43f6-bcb1-c1f2111fa8a1]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:24:04 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:24:04.964 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[a3cc7698-248d-402c-8f54-528cf95921e8]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapd4dded16-31'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:11:50:48'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 8, 'tx_packets': 7, 'rx_bytes': 832, 'tx_bytes': 438, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 8, 'tx_packets': 7, 'rx_bytes': 832, 'tx_bytes': 438, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 15], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 548909, 'reachable_time': 31539, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 8, 'inoctets': 720, 'indelivers': 1, 'outforwdatagrams': 0, 'outpkts': 3, 'outoctets': 228, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 8, 'outmcastpkts': 3, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 720, 'outmcastoctets': 228, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 8, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 1, 'inerrors': 0, 'outmsgs': 3, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 429997, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:24:04 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:24:04.990 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[5dd3be2a-e551-45e8-9fae-cc1925b08d70]: (4, ({'family': 2, 'prefixlen': 32, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '169.254.169.254'], ['IFA_LOCAL', '169.254.169.254'], ['IFA_BROADCAST', '169.254.169.254'], ['IFA_LABEL', 'tapd4dded16-31'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 548926, 'tstamp': 548926}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 429999, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'}, {'family': 2, 'prefixlen': 24, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '192.168.0.2'], ['IFA_LOCAL', '192.168.0.2'], ['IFA_BROADCAST', '192.168.0.255'], ['IFA_LABEL', 'tapd4dded16-31'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 548931, 'tstamp': 548931}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 429999, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'})) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:24:04 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:24:04.992 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapd4dded16-30, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:24:04 compute-0 nova_compute[356901]: 2025-10-11 02:24:04.995 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:04 compute-0 nova_compute[356901]: 2025-10-11 02:24:04.997 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:04 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:24:04.997 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapd4dded16-30, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:24:04 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:24:04.998 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:24:04 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:24:04.998 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tapd4dded16-30, col_values=(('external_ids', {'iface-id': 'f0f8488b-423f-46a5-8a6a-984c2ae3438e'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:24:04 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:24:04.999 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:24:05 compute-0 systemd[1]: Starting libvirt proxy daemon...
Oct 11 02:24:05 compute-0 systemd[1]: Started libvirt proxy daemon.
Oct 11 02:24:05 compute-0 nova_compute[356901]: 2025-10-11 02:24:05.111 2 DEBUG nova.compute.manager [req-d9aa94b9-997e-4692-b3a9-e50ea26a99ff req-45a14509-53c8-48e2-b68f-1d2baae542c9 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Received event network-vif-plugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:24:05 compute-0 nova_compute[356901]: 2025-10-11 02:24:05.112 2 DEBUG oslo_concurrency.lockutils [req-d9aa94b9-997e-4692-b3a9-e50ea26a99ff req-45a14509-53c8-48e2-b68f-1d2baae542c9 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:24:05 compute-0 nova_compute[356901]: 2025-10-11 02:24:05.113 2 DEBUG oslo_concurrency.lockutils [req-d9aa94b9-997e-4692-b3a9-e50ea26a99ff req-45a14509-53c8-48e2-b68f-1d2baae542c9 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:24:05 compute-0 nova_compute[356901]: 2025-10-11 02:24:05.113 2 DEBUG oslo_concurrency.lockutils [req-d9aa94b9-997e-4692-b3a9-e50ea26a99ff req-45a14509-53c8-48e2-b68f-1d2baae542c9 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:24:05 compute-0 nova_compute[356901]: 2025-10-11 02:24:05.113 2 DEBUG nova.compute.manager [req-d9aa94b9-997e-4692-b3a9-e50ea26a99ff req-45a14509-53c8-48e2-b68f-1d2baae542c9 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Processing event network-vif-plugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10808
Oct 11 02:24:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1384: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 27 KiB/s rd, 1.4 MiB/s wr, 40 op/s
Oct 11 02:24:06 compute-0 podman[430078]: 2025-10-11 02:24:06.263838763 +0000 UTC m=+0.139429429 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, tcib_managed=true, config_id=iscsid, container_name=iscsid, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:24:06 compute-0 podman[430077]: 2025-10-11 02:24:06.27973604 +0000 UTC m=+0.159728024 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, container_name=multipathd, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.683 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760149446.6827397, 7513b93e-d2b8-4ae0-8f1c-3df190945259 => Started> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.684 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] VM Started (Lifecycle Event)
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.688 2 DEBUG nova.compute.manager [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Instance event wait completed in 0 seconds for network-vif-plugged wait_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:577
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.698 2 DEBUG nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Guest created on hypervisor spawn /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4417
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.708 2 INFO nova.virt.libvirt.driver [-] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Instance spawned successfully.
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.709 2 DEBUG nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Attempting to register defaults for the following image properties: ['hw_cdrom_bus', 'hw_disk_bus', 'hw_input_bus', 'hw_pointer_model', 'hw_video_model', 'hw_vif_model'] _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:917
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.740 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.755 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Synchronizing instance power state after lifecycle event "Started"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.760 2 DEBUG nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Found default for hw_cdrom_bus of sata _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.761 2 DEBUG nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Found default for hw_disk_bus of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.761 2 DEBUG nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Found default for hw_input_bus of usb _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.761 2 DEBUG nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Found default for hw_pointer_model of usbtablet _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:24:06 compute-0 sshd-session[429890]: Failed password for invalid user debian from 121.227.153.123 port 49278 ssh2
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.762 2 DEBUG nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Found default for hw_video_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.764 2 DEBUG nova.virt.libvirt.driver [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Found default for hw_vif_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.789 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.789 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760149446.6830359, 7513b93e-d2b8-4ae0-8f1c-3df190945259 => Paused> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.789 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] VM Paused (Lifecycle Event)
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.821 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.826 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760149446.694404, 7513b93e-d2b8-4ae0-8f1c-3df190945259 => Resumed> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.826 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] VM Resumed (Lifecycle Event)
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.842 2 INFO nova.compute.manager [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Took 9.31 seconds to spawn the instance on the hypervisor.
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.843 2 DEBUG nova.compute.manager [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0013698074371622062 of space, bias 1.0, pg target 0.41094223114866185 quantized to 32 (current 32)
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00025334537995702286 of space, bias 1.0, pg target 0.07600361398710685 quantized to 32 (current 32)
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:24:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.857 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.862 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Synchronizing instance power state after lifecycle event "Resumed"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.894 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.914 2 INFO nova.compute.manager [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Took 10.47 seconds to build instance.
Oct 11 02:24:06 compute-0 nova_compute[356901]: 2025-10-11 02:24:06.930 2 DEBUG oslo_concurrency.lockutils [None req-45007437-32c1-4d8b-bf21-28766bca5dec d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259" "released" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: held 10.552s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:24:07 compute-0 ceph-mon[191930]: pgmap v1384: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 27 KiB/s rd, 1.4 MiB/s wr, 40 op/s
Oct 11 02:24:07 compute-0 nova_compute[356901]: 2025-10-11 02:24:07.194 2 DEBUG nova.compute.manager [req-4d179f1a-62b4-435d-80f7-f7c4ce3855a8 req-a39ee783-80d0-4a15-989c-ec2666fadfe1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Received event network-vif-plugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:24:07 compute-0 nova_compute[356901]: 2025-10-11 02:24:07.196 2 DEBUG oslo_concurrency.lockutils [req-4d179f1a-62b4-435d-80f7-f7c4ce3855a8 req-a39ee783-80d0-4a15-989c-ec2666fadfe1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:24:07 compute-0 nova_compute[356901]: 2025-10-11 02:24:07.196 2 DEBUG oslo_concurrency.lockutils [req-4d179f1a-62b4-435d-80f7-f7c4ce3855a8 req-a39ee783-80d0-4a15-989c-ec2666fadfe1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:24:07 compute-0 nova_compute[356901]: 2025-10-11 02:24:07.197 2 DEBUG oslo_concurrency.lockutils [req-4d179f1a-62b4-435d-80f7-f7c4ce3855a8 req-a39ee783-80d0-4a15-989c-ec2666fadfe1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:24:07 compute-0 nova_compute[356901]: 2025-10-11 02:24:07.197 2 DEBUG nova.compute.manager [req-4d179f1a-62b4-435d-80f7-f7c4ce3855a8 req-a39ee783-80d0-4a15-989c-ec2666fadfe1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] No waiting events found dispatching network-vif-plugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:24:07 compute-0 nova_compute[356901]: 2025-10-11 02:24:07.198 2 WARNING nova.compute.manager [req-4d179f1a-62b4-435d-80f7-f7c4ce3855a8 req-a39ee783-80d0-4a15-989c-ec2666fadfe1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Received unexpected event network-vif-plugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e for instance with vm_state active and task_state None.
Oct 11 02:24:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1385: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 30 KiB/s rd, 1.4 MiB/s wr, 43 op/s
Oct 11 02:24:08 compute-0 nova_compute[356901]: 2025-10-11 02:24:08.626 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:08 compute-0 sshd-session[429890]: Connection closed by invalid user debian 121.227.153.123 port 49278 [preauth]
Oct 11 02:24:09 compute-0 ceph-mon[191930]: pgmap v1385: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 30 KiB/s rd, 1.4 MiB/s wr, 43 op/s
Oct 11 02:24:09 compute-0 nova_compute[356901]: 2025-10-11 02:24:09.401 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1386: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 302 KiB/s rd, 1.4 MiB/s wr, 55 op/s
Oct 11 02:24:10 compute-0 sshd-session[430113]: Invalid user debian from 121.227.153.123 port 54206
Oct 11 02:24:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:24:10 compute-0 sshd-session[430113]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:24:10 compute-0 sshd-session[430113]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:24:10 compute-0 ceph-mon[191930]: pgmap v1386: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 302 KiB/s rd, 1.4 MiB/s wr, 55 op/s
Oct 11 02:24:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1387: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 495 KiB/s rd, 756 KiB/s wr, 50 op/s
Oct 11 02:24:12 compute-0 sshd-session[430113]: Failed password for invalid user debian from 121.227.153.123 port 54206 ssh2
Oct 11 02:24:13 compute-0 ceph-mon[191930]: pgmap v1387: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 495 KiB/s rd, 756 KiB/s wr, 50 op/s
Oct 11 02:24:13 compute-0 nova_compute[356901]: 2025-10-11 02:24:13.630 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1388: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 588 KiB/s wr, 73 op/s
Oct 11 02:24:14 compute-0 nova_compute[356901]: 2025-10-11 02:24:14.404 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:14 compute-0 sshd-session[430113]: Connection closed by invalid user debian 121.227.153.123 port 54206 [preauth]
Oct 11 02:24:15 compute-0 ceph-mon[191930]: pgmap v1388: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 588 KiB/s wr, 73 op/s
Oct 11 02:24:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #63. Immutable memtables: 0.
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:24:15.389381) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 33] Flushing memtable with next log file: 63
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149455389460, "job": 33, "event": "flush_started", "num_memtables": 1, "num_entries": 1370, "num_deletes": 511, "total_data_size": 1623815, "memory_usage": 1653640, "flush_reason": "Manual Compaction"}
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 33] Level-0 flush table #64: started
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149455404169, "cf_name": "default", "job": 33, "event": "table_file_creation", "file_number": 64, "file_size": 963648, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 27586, "largest_seqno": 28955, "table_properties": {"data_size": 958887, "index_size": 1709, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1989, "raw_key_size": 15173, "raw_average_key_size": 19, "raw_value_size": 946544, "raw_average_value_size": 1196, "num_data_blocks": 78, "num_entries": 791, "num_filter_entries": 791, "num_deletions": 511, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760149346, "oldest_key_time": 1760149346, "file_creation_time": 1760149455, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 64, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 33] Flush lasted 14940 microseconds, and 4799 cpu microseconds.
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:24:15.404330) [db/flush_job.cc:967] [default] [JOB 33] Level-0 flush table #64: 963648 bytes OK
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:24:15.404363) [db/memtable_list.cc:519] [default] Level-0 commit table #64 started
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:24:15.407818) [db/memtable_list.cc:722] [default] Level-0 commit table #64: memtable #1 done
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:24:15.407843) EVENT_LOG_v1 {"time_micros": 1760149455407834, "job": 33, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:24:15.407866) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 33] Try to delete WAL files size 1616618, prev total WAL file size 1616618, number of live WAL files 2.
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000060.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:24:15.409096) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '6D6772737461740031303032' seq:72057594037927935, type:22 .. '6D6772737461740031323535' seq:0, type:0; will stop at (end)
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 34] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 33 Base level 0, inputs: [64(941KB)], [62(8786KB)]
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149455409159, "job": 34, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [64], "files_L6": [62], "score": -1, "input_data_size": 9960650, "oldest_snapshot_seqno": -1}
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 34] Generated table #65: 4831 keys, 7195375 bytes, temperature: kUnknown
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149455463117, "cf_name": "default", "job": 34, "event": "table_file_creation", "file_number": 65, "file_size": 7195375, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 7164162, "index_size": 18047, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 12101, "raw_key_size": 121977, "raw_average_key_size": 25, "raw_value_size": 7077711, "raw_average_value_size": 1465, "num_data_blocks": 747, "num_entries": 4831, "num_filter_entries": 4831, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760149455, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 65, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:24:15.463448) [db/compaction/compaction_job.cc:1663] [default] [JOB 34] Compacted 1@0 + 1@6 files to L6 => 7195375 bytes
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:24:15.466822) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 184.1 rd, 133.0 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(0.9, 8.6 +0.0 blob) out(6.9 +0.0 blob), read-write-amplify(17.8) write-amplify(7.5) OK, records in: 5817, records dropped: 986 output_compression: NoCompression
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:24:15.466875) EVENT_LOG_v1 {"time_micros": 1760149455466854, "job": 34, "event": "compaction_finished", "compaction_time_micros": 54099, "compaction_time_cpu_micros": 28538, "output_level": 6, "num_output_files": 1, "total_output_size": 7195375, "num_input_records": 5817, "num_output_records": 4831, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000064.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149455483767, "job": 34, "event": "table_file_deletion", "file_number": 64}
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000062.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149455486673, "job": 34, "event": "table_file_deletion", "file_number": 62}
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:24:15.408833) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:24:15.487323) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:24:15.487329) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:24:15.487330) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:24:15.487332) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:24:15 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:24:15.487333) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:24:15 compute-0 sshd-session[430115]: Invalid user debian from 121.227.153.123 port 54210
Oct 11 02:24:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1389: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 20 KiB/s wr, 60 op/s
Oct 11 02:24:16 compute-0 sshd-session[430115]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:24:16 compute-0 sshd-session[430115]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:24:16 compute-0 ceph-mon[191930]: pgmap v1389: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 20 KiB/s wr, 60 op/s
Oct 11 02:24:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1390: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 20 KiB/s wr, 57 op/s
Oct 11 02:24:18 compute-0 sshd-session[430115]: Failed password for invalid user debian from 121.227.153.123 port 54210 ssh2
Oct 11 02:24:18 compute-0 podman[430117]: 2025-10-11 02:24:18.22064729 +0000 UTC m=+0.103974821 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:24:18 compute-0 podman[430119]: 2025-10-11 02:24:18.230225419 +0000 UTC m=+0.104408395 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:24:18 compute-0 podman[430118]: 2025-10-11 02:24:18.25585087 +0000 UTC m=+0.130266533 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.expose-services=, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, name=ubi9-minimal, maintainer=Red Hat, Inc., version=9.6, vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, managed_by=edpm_ansible, container_name=openstack_network_exporter, release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, build-date=2025-08-20T13:12:41, com.redhat.component=ubi9-minimal-container, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.tags=minimal rhel9, url=https://catalog.redhat.com/en/search?searchType=containers, distribution-scope=public, architecture=x86_64)
Oct 11 02:24:18 compute-0 nova_compute[356901]: 2025-10-11 02:24:18.634 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:19 compute-0 ceph-mon[191930]: pgmap v1390: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 20 KiB/s wr, 57 op/s
Oct 11 02:24:19 compute-0 nova_compute[356901]: 2025-10-11 02:24:19.408 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1391: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 340 B/s wr, 54 op/s
Oct 11 02:24:20 compute-0 sshd-session[430115]: Connection closed by invalid user debian 121.227.153.123 port 54210 [preauth]
Oct 11 02:24:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:24:21 compute-0 ceph-mon[191930]: pgmap v1391: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 340 B/s wr, 54 op/s
Oct 11 02:24:21 compute-0 sshd-session[430178]: Invalid user debian from 121.227.153.123 port 60538
Oct 11 02:24:21 compute-0 sshd-session[430178]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:24:21 compute-0 sshd-session[430178]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:24:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1392: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 1.2 MiB/s rd, 41 op/s
Oct 11 02:24:23 compute-0 ceph-mon[191930]: pgmap v1392: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 1.2 MiB/s rd, 41 op/s
Oct 11 02:24:23 compute-0 podman[430180]: 2025-10-11 02:24:23.247116014 +0000 UTC m=+0.138851341 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.tags=base rhel9, io.buildah.version=1.29.0, summary=Provides the latest release of Red Hat Universal Base Image 9., version=9.4, vendor=Red Hat, Inc., managed_by=edpm_ansible, release=1214.1726694543, io.openshift.expose-services=, name=ubi9, com.redhat.component=ubi9-container, distribution-scope=public, release-0.7.12=, container_name=kepler, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, config_id=edpm, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, architecture=x86_64, build-date=2024-09-18T21:23:30, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, maintainer=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 9)
Oct 11 02:24:23 compute-0 nova_compute[356901]: 2025-10-11 02:24:23.638 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:23 compute-0 sshd-session[430178]: Failed password for invalid user debian from 121.227.153.123 port 60538 ssh2
Oct 11 02:24:24 compute-0 sudo[430200]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:24:24 compute-0 sudo[430200]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:24:24 compute-0 sudo[430200]: pam_unix(sudo:session): session closed for user root
Oct 11 02:24:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1393: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 1.0 MiB/s rd, 32 op/s
Oct 11 02:24:24 compute-0 sudo[430225]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:24:24 compute-0 sudo[430225]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:24:24 compute-0 sudo[430225]: pam_unix(sudo:session): session closed for user root
Oct 11 02:24:24 compute-0 sudo[430250]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:24:24 compute-0 sudo[430250]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:24:24 compute-0 sudo[430250]: pam_unix(sudo:session): session closed for user root
Oct 11 02:24:24 compute-0 nova_compute[356901]: 2025-10-11 02:24:24.410 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:24 compute-0 sudo[430275]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:24:24 compute-0 sudo[430275]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:24:25 compute-0 sudo[430275]: pam_unix(sudo:session): session closed for user root
Oct 11 02:24:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:24:25 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:24:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:24:25 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:24:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:24:25 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:24:25 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 4d567486-1432-467a-b25f-bc0a1ccb2466 does not exist
Oct 11 02:24:25 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 128a9f33-7968-4ddf-b93e-c464ccaf64f2 does not exist
Oct 11 02:24:25 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 5ced5571-69dc-4b97-b178-eb973d89b344 does not exist
Oct 11 02:24:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:24:25 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:24:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:24:25 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:24:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:24:25 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:24:25 compute-0 ceph-mon[191930]: pgmap v1393: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail; 1.0 MiB/s rd, 32 op/s
Oct 11 02:24:25 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:24:25 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:24:25 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:24:25 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:24:25 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:24:25 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:24:25 compute-0 sudo[430332]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:24:25 compute-0 sudo[430332]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:24:25 compute-0 sudo[430332]: pam_unix(sudo:session): session closed for user root
Oct 11 02:24:25 compute-0 sudo[430357]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:24:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:24:25 compute-0 sudo[430357]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:24:25 compute-0 sudo[430357]: pam_unix(sudo:session): session closed for user root
Oct 11 02:24:25 compute-0 sudo[430382]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:24:25 compute-0 sudo[430382]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:24:25 compute-0 sudo[430382]: pam_unix(sudo:session): session closed for user root
Oct 11 02:24:25 compute-0 sudo[430407]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:24:25 compute-0 sudo[430407]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:24:26 compute-0 podman[430469]: 2025-10-11 02:24:26.089470798 +0000 UTC m=+0.087099254 container create 6aa764c8894ca1896abe8b2e2e2c7b18c7c83498966031e8b65be2430585de19 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=busy_austin, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3)
Oct 11 02:24:26 compute-0 sshd-session[430178]: Connection closed by invalid user debian 121.227.153.123 port 60538 [preauth]
Oct 11 02:24:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1394: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:24:26 compute-0 systemd[1]: Started libpod-conmon-6aa764c8894ca1896abe8b2e2e2c7b18c7c83498966031e8b65be2430585de19.scope.
Oct 11 02:24:26 compute-0 podman[430469]: 2025-10-11 02:24:26.069807663 +0000 UTC m=+0.067436169 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:24:26 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:24:26 compute-0 podman[430469]: 2025-10-11 02:24:26.209637374 +0000 UTC m=+0.207265850 container init 6aa764c8894ca1896abe8b2e2e2c7b18c7c83498966031e8b65be2430585de19 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=busy_austin, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:24:26 compute-0 podman[430469]: 2025-10-11 02:24:26.222072193 +0000 UTC m=+0.219700659 container start 6aa764c8894ca1896abe8b2e2e2c7b18c7c83498966031e8b65be2430585de19 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=busy_austin, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:24:26 compute-0 podman[430469]: 2025-10-11 02:24:26.226169091 +0000 UTC m=+0.223797577 container attach 6aa764c8894ca1896abe8b2e2e2c7b18c7c83498966031e8b65be2430585de19 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=busy_austin, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:24:26 compute-0 busy_austin[430485]: 167 167
Oct 11 02:24:26 compute-0 systemd[1]: libpod-6aa764c8894ca1896abe8b2e2e2c7b18c7c83498966031e8b65be2430585de19.scope: Deactivated successfully.
Oct 11 02:24:26 compute-0 conmon[430485]: conmon 6aa764c8894ca1896abe <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-6aa764c8894ca1896abe8b2e2e2c7b18c7c83498966031e8b65be2430585de19.scope/container/memory.events
Oct 11 02:24:26 compute-0 podman[430490]: 2025-10-11 02:24:26.292918227 +0000 UTC m=+0.039909848 container died 6aa764c8894ca1896abe8b2e2e2c7b18c7c83498966031e8b65be2430585de19 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=busy_austin, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:24:26 compute-0 systemd[1]: var-lib-containers-storage-overlay-27593eb39288543678bbe13cf4dfaeb2bff059fd4ef8afc27bc2f4b1b25ccb71-merged.mount: Deactivated successfully.
Oct 11 02:24:26 compute-0 podman[430490]: 2025-10-11 02:24:26.340008269 +0000 UTC m=+0.086999870 container remove 6aa764c8894ca1896abe8b2e2e2c7b18c7c83498966031e8b65be2430585de19 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=busy_austin, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_REF=reef)
Oct 11 02:24:26 compute-0 systemd[1]: libpod-conmon-6aa764c8894ca1896abe8b2e2e2c7b18c7c83498966031e8b65be2430585de19.scope: Deactivated successfully.
Oct 11 02:24:26 compute-0 podman[430513]: 2025-10-11 02:24:26.574559781 +0000 UTC m=+0.065734106 container create ae381f345eaf410b1d53cb72a809136a345fe8fdb709d85baee32f2898af12d0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_maxwell, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default)
Oct 11 02:24:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:24:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:24:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:24:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:24:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:24:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:24:26 compute-0 podman[430513]: 2025-10-11 02:24:26.550427786 +0000 UTC m=+0.041602141 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:24:26 compute-0 systemd[1]: Started libpod-conmon-ae381f345eaf410b1d53cb72a809136a345fe8fdb709d85baee32f2898af12d0.scope.
Oct 11 02:24:26 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:24:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c04f8d62c80f7571ddc7a90ffad30f03289f9c2b9d7860289e1db81c8c898802/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:24:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c04f8d62c80f7571ddc7a90ffad30f03289f9c2b9d7860289e1db81c8c898802/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:24:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c04f8d62c80f7571ddc7a90ffad30f03289f9c2b9d7860289e1db81c8c898802/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:24:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c04f8d62c80f7571ddc7a90ffad30f03289f9c2b9d7860289e1db81c8c898802/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:24:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c04f8d62c80f7571ddc7a90ffad30f03289f9c2b9d7860289e1db81c8c898802/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:24:26 compute-0 podman[430513]: 2025-10-11 02:24:26.73193283 +0000 UTC m=+0.223107255 container init ae381f345eaf410b1d53cb72a809136a345fe8fdb709d85baee32f2898af12d0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_maxwell, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3)
Oct 11 02:24:26 compute-0 podman[430513]: 2025-10-11 02:24:26.747537937 +0000 UTC m=+0.238712272 container start ae381f345eaf410b1d53cb72a809136a345fe8fdb709d85baee32f2898af12d0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_maxwell, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:24:26 compute-0 podman[430513]: 2025-10-11 02:24:26.756491687 +0000 UTC m=+0.247666072 container attach ae381f345eaf410b1d53cb72a809136a345fe8fdb709d85baee32f2898af12d0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_maxwell, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:24:27 compute-0 ceph-mon[191930]: pgmap v1394: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:24:27 compute-0 sshd-session[430506]: Invalid user debian from 121.227.153.123 port 60548
Oct 11 02:24:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:24:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1814992677' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:24:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:24:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1814992677' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:24:27 compute-0 sshd-session[430506]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:24:27 compute-0 sshd-session[430506]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:24:27 compute-0 pensive_maxwell[430529]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:24:27 compute-0 pensive_maxwell[430529]: --> relative data size: 1.0
Oct 11 02:24:27 compute-0 pensive_maxwell[430529]: --> All data devices are unavailable
Oct 11 02:24:27 compute-0 systemd[1]: libpod-ae381f345eaf410b1d53cb72a809136a345fe8fdb709d85baee32f2898af12d0.scope: Deactivated successfully.
Oct 11 02:24:27 compute-0 systemd[1]: libpod-ae381f345eaf410b1d53cb72a809136a345fe8fdb709d85baee32f2898af12d0.scope: Consumed 1.131s CPU time.
Oct 11 02:24:28 compute-0 podman[430558]: 2025-10-11 02:24:28.020111845 +0000 UTC m=+0.044831612 container died ae381f345eaf410b1d53cb72a809136a345fe8fdb709d85baee32f2898af12d0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_maxwell, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:24:28 compute-0 systemd[1]: var-lib-containers-storage-overlay-c04f8d62c80f7571ddc7a90ffad30f03289f9c2b9d7860289e1db81c8c898802-merged.mount: Deactivated successfully.
Oct 11 02:24:28 compute-0 podman[430558]: 2025-10-11 02:24:28.095513112 +0000 UTC m=+0.120232849 container remove ae381f345eaf410b1d53cb72a809136a345fe8fdb709d85baee32f2898af12d0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_maxwell, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:24:28 compute-0 systemd[1]: libpod-conmon-ae381f345eaf410b1d53cb72a809136a345fe8fdb709d85baee32f2898af12d0.scope: Deactivated successfully.
Oct 11 02:24:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1395: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:24:28 compute-0 sudo[430407]: pam_unix(sudo:session): session closed for user root
Oct 11 02:24:28 compute-0 sudo[430573]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:24:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1814992677' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:24:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1814992677' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:24:28 compute-0 sudo[430573]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:24:28 compute-0 sudo[430573]: pam_unix(sudo:session): session closed for user root
Oct 11 02:24:28 compute-0 sudo[430598]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:24:28 compute-0 sudo[430598]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:24:28 compute-0 sudo[430598]: pam_unix(sudo:session): session closed for user root
Oct 11 02:24:28 compute-0 sudo[430623]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:24:28 compute-0 sudo[430623]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:24:28 compute-0 sudo[430623]: pam_unix(sudo:session): session closed for user root
Oct 11 02:24:28 compute-0 sudo[430648]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:24:28 compute-0 sudo[430648]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:24:28 compute-0 nova_compute[356901]: 2025-10-11 02:24:28.642 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:29 compute-0 podman[430714]: 2025-10-11 02:24:29.068073421 +0000 UTC m=+0.085919787 container create c8bfc2cafa28a8176df2db0363d62e733ce578c2467c3bf76a247a90373a7538 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_sanderson, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:24:29 compute-0 podman[430714]: 2025-10-11 02:24:29.019065379 +0000 UTC m=+0.036911815 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:24:29 compute-0 systemd[1]: Started libpod-conmon-c8bfc2cafa28a8176df2db0363d62e733ce578c2467c3bf76a247a90373a7538.scope.
Oct 11 02:24:29 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:24:29 compute-0 podman[430714]: 2025-10-11 02:24:29.215033044 +0000 UTC m=+0.232879420 container init c8bfc2cafa28a8176df2db0363d62e733ce578c2467c3bf76a247a90373a7538 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_sanderson, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:24:29 compute-0 podman[430714]: 2025-10-11 02:24:29.232000125 +0000 UTC m=+0.249846511 container start c8bfc2cafa28a8176df2db0363d62e733ce578c2467c3bf76a247a90373a7538 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_sanderson, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:24:29 compute-0 podman[430714]: 2025-10-11 02:24:29.238164617 +0000 UTC m=+0.256011003 container attach c8bfc2cafa28a8176df2db0363d62e733ce578c2467c3bf76a247a90373a7538 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_sanderson, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default)
Oct 11 02:24:29 compute-0 eager_sanderson[430730]: 167 167
Oct 11 02:24:29 compute-0 systemd[1]: libpod-c8bfc2cafa28a8176df2db0363d62e733ce578c2467c3bf76a247a90373a7538.scope: Deactivated successfully.
Oct 11 02:24:29 compute-0 podman[430714]: 2025-10-11 02:24:29.243157184 +0000 UTC m=+0.261003560 container died c8bfc2cafa28a8176df2db0363d62e733ce578c2467c3bf76a247a90373a7538 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_sanderson, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.vendor=CentOS)
Oct 11 02:24:29 compute-0 ceph-mon[191930]: pgmap v1395: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:24:29 compute-0 systemd[1]: var-lib-containers-storage-overlay-f6f72f061f491c6b3152bc68d80b8087e1bd63cad53e7aa3a278238466d6122d-merged.mount: Deactivated successfully.
Oct 11 02:24:29 compute-0 podman[430714]: 2025-10-11 02:24:29.302976923 +0000 UTC m=+0.320823279 container remove c8bfc2cafa28a8176df2db0363d62e733ce578c2467c3bf76a247a90373a7538 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_sanderson, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2)
Oct 11 02:24:29 compute-0 systemd[1]: libpod-conmon-c8bfc2cafa28a8176df2db0363d62e733ce578c2467c3bf76a247a90373a7538.scope: Deactivated successfully.
Oct 11 02:24:29 compute-0 nova_compute[356901]: 2025-10-11 02:24:29.412 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:29 compute-0 podman[430754]: 2025-10-11 02:24:29.601418462 +0000 UTC m=+0.116950517 container create 999e79204efb739d2e55190e58b1f3b7aa00ede8444edb723b8911cdedefa7ac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_davinci, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:24:29 compute-0 podman[430754]: 2025-10-11 02:24:29.543036457 +0000 UTC m=+0.058568562 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:24:29 compute-0 sshd-session[430506]: Failed password for invalid user debian from 121.227.153.123 port 60548 ssh2
Oct 11 02:24:29 compute-0 systemd[1]: Started libpod-conmon-999e79204efb739d2e55190e58b1f3b7aa00ede8444edb723b8911cdedefa7ac.scope.
Oct 11 02:24:29 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:24:29 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6bcb0af82d24b0c409b745df0c7163be8146cf99fae7bea40e794b3e5599b87f/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:24:29 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6bcb0af82d24b0c409b745df0c7163be8146cf99fae7bea40e794b3e5599b87f/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:24:29 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6bcb0af82d24b0c409b745df0c7163be8146cf99fae7bea40e794b3e5599b87f/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:24:29 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6bcb0af82d24b0c409b745df0c7163be8146cf99fae7bea40e794b3e5599b87f/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:24:29 compute-0 podman[430754]: 2025-10-11 02:24:29.742830842 +0000 UTC m=+0.258362857 container init 999e79204efb739d2e55190e58b1f3b7aa00ede8444edb723b8911cdedefa7ac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_davinci, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 02:24:29 compute-0 podman[157119]: time="2025-10-11T02:24:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:24:29 compute-0 podman[430754]: 2025-10-11 02:24:29.762357712 +0000 UTC m=+0.277889767 container start 999e79204efb739d2e55190e58b1f3b7aa00ede8444edb723b8911cdedefa7ac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_davinci, OSD_FLAVOR=default, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:24:29 compute-0 podman[430754]: 2025-10-11 02:24:29.771803078 +0000 UTC m=+0.287335093 container attach 999e79204efb739d2e55190e58b1f3b7aa00ede8444edb723b8911cdedefa7ac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_davinci, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0)
Oct 11 02:24:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:24:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47845 "" "Go-http-client/1.1"
Oct 11 02:24:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:24:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9468 "" "Go-http-client/1.1"
Oct 11 02:24:30 compute-0 sshd-session[430506]: Connection closed by invalid user debian 121.227.153.123 port 60548 [preauth]
Oct 11 02:24:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1396: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:24:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]: {
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:     "0": [
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:         {
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "devices": [
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "/dev/loop3"
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             ],
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "lv_name": "ceph_lv0",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "lv_size": "21470642176",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "name": "ceph_lv0",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "tags": {
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.cluster_name": "ceph",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.crush_device_class": "",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.encrypted": "0",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.osd_id": "0",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.type": "block",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.vdo": "0"
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             },
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "type": "block",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "vg_name": "ceph_vg0"
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:         }
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:     ],
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:     "1": [
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:         {
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "devices": [
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "/dev/loop4"
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             ],
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "lv_name": "ceph_lv1",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "lv_size": "21470642176",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "name": "ceph_lv1",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "tags": {
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.cluster_name": "ceph",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.crush_device_class": "",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.encrypted": "0",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.osd_id": "1",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.type": "block",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.vdo": "0"
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             },
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "type": "block",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "vg_name": "ceph_vg1"
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:         }
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:     ],
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:     "2": [
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:         {
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "devices": [
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "/dev/loop5"
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             ],
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "lv_name": "ceph_lv2",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "lv_size": "21470642176",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "name": "ceph_lv2",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "tags": {
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.cluster_name": "ceph",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.crush_device_class": "",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.encrypted": "0",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.osd_id": "2",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.type": "block",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:                 "ceph.vdo": "0"
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             },
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "type": "block",
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:             "vg_name": "ceph_vg2"
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:         }
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]:     ]
Oct 11 02:24:30 compute-0 vigorous_davinci[430768]: }
Oct 11 02:24:30 compute-0 systemd[1]: libpod-999e79204efb739d2e55190e58b1f3b7aa00ede8444edb723b8911cdedefa7ac.scope: Deactivated successfully.
Oct 11 02:24:30 compute-0 podman[430754]: 2025-10-11 02:24:30.661035083 +0000 UTC m=+1.176567138 container died 999e79204efb739d2e55190e58b1f3b7aa00ede8444edb723b8911cdedefa7ac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_davinci, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, ceph=True, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.schema-version=1.0)
Oct 11 02:24:30 compute-0 systemd[1]: var-lib-containers-storage-overlay-6bcb0af82d24b0c409b745df0c7163be8146cf99fae7bea40e794b3e5599b87f-merged.mount: Deactivated successfully.
Oct 11 02:24:30 compute-0 podman[430754]: 2025-10-11 02:24:30.750025615 +0000 UTC m=+1.265557630 container remove 999e79204efb739d2e55190e58b1f3b7aa00ede8444edb723b8911cdedefa7ac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_davinci, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS)
Oct 11 02:24:30 compute-0 systemd[1]: libpod-conmon-999e79204efb739d2e55190e58b1f3b7aa00ede8444edb723b8911cdedefa7ac.scope: Deactivated successfully.
Oct 11 02:24:30 compute-0 sudo[430648]: pam_unix(sudo:session): session closed for user root
Oct 11 02:24:30 compute-0 sudo[430792]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:24:30 compute-0 sudo[430792]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:24:30 compute-0 sudo[430792]: pam_unix(sudo:session): session closed for user root
Oct 11 02:24:31 compute-0 sudo[430817]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:24:31 compute-0 sudo[430817]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:24:31 compute-0 sudo[430817]: pam_unix(sudo:session): session closed for user root
Oct 11 02:24:31 compute-0 sudo[430842]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:24:31 compute-0 sudo[430842]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:24:31 compute-0 sudo[430842]: pam_unix(sudo:session): session closed for user root
Oct 11 02:24:31 compute-0 sudo[430867]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:24:31 compute-0 sudo[430867]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:24:31 compute-0 sshd-session[430776]: Invalid user debian from 121.227.153.123 port 35638
Oct 11 02:24:31 compute-0 ceph-mon[191930]: pgmap v1396: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:24:31 compute-0 openstack_network_exporter[374316]: ERROR   02:24:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:24:31 compute-0 openstack_network_exporter[374316]: ERROR   02:24:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:24:31 compute-0 openstack_network_exporter[374316]: ERROR   02:24:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:24:31 compute-0 openstack_network_exporter[374316]: ERROR   02:24:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:24:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:24:31 compute-0 openstack_network_exporter[374316]: ERROR   02:24:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:24:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:24:31 compute-0 sshd-session[430776]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:24:31 compute-0 sshd-session[430776]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:24:31 compute-0 podman[430926]: 2025-10-11 02:24:31.881955476 +0000 UTC m=+0.093850955 container create f8e18d9bb7feb1184c6522f5a336ebc3d64674f6645ffcbed5f1cd6a4d8ce7a4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_antonelli, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:24:31 compute-0 podman[430926]: 2025-10-11 02:24:31.841062038 +0000 UTC m=+0.052957537 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:24:31 compute-0 systemd[1]: Started libpod-conmon-f8e18d9bb7feb1184c6522f5a336ebc3d64674f6645ffcbed5f1cd6a4d8ce7a4.scope.
Oct 11 02:24:31 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:24:32 compute-0 podman[430926]: 2025-10-11 02:24:32.026391111 +0000 UTC m=+0.238286640 container init f8e18d9bb7feb1184c6522f5a336ebc3d64674f6645ffcbed5f1cd6a4d8ce7a4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_antonelli, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:24:32 compute-0 podman[430926]: 2025-10-11 02:24:32.042617478 +0000 UTC m=+0.254512937 container start f8e18d9bb7feb1184c6522f5a336ebc3d64674f6645ffcbed5f1cd6a4d8ce7a4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_antonelli, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:24:32 compute-0 podman[430926]: 2025-10-11 02:24:32.048997007 +0000 UTC m=+0.260892496 container attach f8e18d9bb7feb1184c6522f5a336ebc3d64674f6645ffcbed5f1cd6a4d8ce7a4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_antonelli, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 02:24:32 compute-0 affectionate_antonelli[430942]: 167 167
Oct 11 02:24:32 compute-0 systemd[1]: libpod-f8e18d9bb7feb1184c6522f5a336ebc3d64674f6645ffcbed5f1cd6a4d8ce7a4.scope: Deactivated successfully.
Oct 11 02:24:32 compute-0 podman[430926]: 2025-10-11 02:24:32.05869598 +0000 UTC m=+0.270591479 container died f8e18d9bb7feb1184c6522f5a336ebc3d64674f6645ffcbed5f1cd6a4d8ce7a4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_antonelli, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, io.buildah.version=1.39.3)
Oct 11 02:24:32 compute-0 systemd[1]: var-lib-containers-storage-overlay-3a12e1432dca28cb8872d01f58fe41a8c97fdf643e2ccbae90db8835857881ed-merged.mount: Deactivated successfully.
Oct 11 02:24:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1397: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:24:32 compute-0 podman[430926]: 2025-10-11 02:24:32.148359113 +0000 UTC m=+0.360254582 container remove f8e18d9bb7feb1184c6522f5a336ebc3d64674f6645ffcbed5f1cd6a4d8ce7a4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=affectionate_antonelli, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3)
Oct 11 02:24:32 compute-0 systemd[1]: libpod-conmon-f8e18d9bb7feb1184c6522f5a336ebc3d64674f6645ffcbed5f1cd6a4d8ce7a4.scope: Deactivated successfully.
Oct 11 02:24:32 compute-0 podman[430964]: 2025-10-11 02:24:32.365781089 +0000 UTC m=+0.059761419 container create 69e0929727aa52a7b06a90f226d9de3afb5e07483c6dfe1b6c5f16bbb7c1e1e5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_satoshi, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:24:32 compute-0 systemd[1]: Started libpod-conmon-69e0929727aa52a7b06a90f226d9de3afb5e07483c6dfe1b6c5f16bbb7c1e1e5.scope.
Oct 11 02:24:32 compute-0 podman[430964]: 2025-10-11 02:24:32.347031783 +0000 UTC m=+0.041012133 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:24:32 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:24:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cc15bad8db08d0966b0508c770ddabe2d68c9aacf0dd4d1f28eec260592bd46f/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:24:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cc15bad8db08d0966b0508c770ddabe2d68c9aacf0dd4d1f28eec260592bd46f/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:24:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cc15bad8db08d0966b0508c770ddabe2d68c9aacf0dd4d1f28eec260592bd46f/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:24:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cc15bad8db08d0966b0508c770ddabe2d68c9aacf0dd4d1f28eec260592bd46f/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:24:32 compute-0 podman[430964]: 2025-10-11 02:24:32.515159877 +0000 UTC m=+0.209140207 container init 69e0929727aa52a7b06a90f226d9de3afb5e07483c6dfe1b6c5f16bbb7c1e1e5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_satoshi, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2)
Oct 11 02:24:32 compute-0 podman[430964]: 2025-10-11 02:24:32.528061291 +0000 UTC m=+0.222041621 container start 69e0929727aa52a7b06a90f226d9de3afb5e07483c6dfe1b6c5f16bbb7c1e1e5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_satoshi, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:24:32 compute-0 podman[430964]: 2025-10-11 02:24:32.534671177 +0000 UTC m=+0.228651517 container attach 69e0929727aa52a7b06a90f226d9de3afb5e07483c6dfe1b6c5f16bbb7c1e1e5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_satoshi, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:24:32 compute-0 sshd-session[430776]: Failed password for invalid user debian from 121.227.153.123 port 35638 ssh2
Oct 11 02:24:33 compute-0 ceph-mon[191930]: pgmap v1397: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]: {
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:         "osd_id": 1,
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:         "type": "bluestore"
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:     },
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:         "osd_id": 2,
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:         "type": "bluestore"
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:     },
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:         "osd_id": 0,
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:         "type": "bluestore"
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]:     }
Oct 11 02:24:33 compute-0 crazy_satoshi[430981]: }
Oct 11 02:24:33 compute-0 systemd[1]: libpod-69e0929727aa52a7b06a90f226d9de3afb5e07483c6dfe1b6c5f16bbb7c1e1e5.scope: Deactivated successfully.
Oct 11 02:24:33 compute-0 systemd[1]: libpod-69e0929727aa52a7b06a90f226d9de3afb5e07483c6dfe1b6c5f16bbb7c1e1e5.scope: Consumed 1.084s CPU time.
Oct 11 02:24:33 compute-0 podman[430964]: 2025-10-11 02:24:33.630779499 +0000 UTC m=+1.324759839 container died 69e0929727aa52a7b06a90f226d9de3afb5e07483c6dfe1b6c5f16bbb7c1e1e5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_satoshi, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0)
Oct 11 02:24:33 compute-0 nova_compute[356901]: 2025-10-11 02:24:33.656 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:33 compute-0 systemd[1]: var-lib-containers-storage-overlay-cc15bad8db08d0966b0508c770ddabe2d68c9aacf0dd4d1f28eec260592bd46f-merged.mount: Deactivated successfully.
Oct 11 02:24:33 compute-0 podman[430964]: 2025-10-11 02:24:33.721388161 +0000 UTC m=+1.415368481 container remove 69e0929727aa52a7b06a90f226d9de3afb5e07483c6dfe1b6c5f16bbb7c1e1e5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_satoshi, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True)
Oct 11 02:24:33 compute-0 sudo[430867]: pam_unix(sudo:session): session closed for user root
Oct 11 02:24:33 compute-0 systemd[1]: libpod-conmon-69e0929727aa52a7b06a90f226d9de3afb5e07483c6dfe1b6c5f16bbb7c1e1e5.scope: Deactivated successfully.
Oct 11 02:24:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:24:33 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:24:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:24:33 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:24:33 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev cf14fdd4-fbc0-448e-ba5d-7626836d6b2b does not exist
Oct 11 02:24:33 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 3a9a855e-fdc8-4db1-84fe-d4eb1a149794 does not exist
Oct 11 02:24:33 compute-0 sshd-session[430776]: Connection closed by invalid user debian 121.227.153.123 port 35638 [preauth]
Oct 11 02:24:33 compute-0 podman[431015]: 2025-10-11 02:24:33.821430978 +0000 UTC m=+0.150105583 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:24:33 compute-0 podman[431023]: 2025-10-11 02:24:33.835866709 +0000 UTC m=+0.160722664 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, managed_by=edpm_ansible, org.label-schema.build-date=20251007, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:24:33 compute-0 podman[431034]: 2025-10-11 02:24:33.843468847 +0000 UTC m=+0.145895621 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 02:24:33 compute-0 podman[431017]: 2025-10-11 02:24:33.861904413 +0000 UTC m=+0.195362317 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, config_id=ovn_controller, container_name=ovn_controller, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Oct 11 02:24:33 compute-0 sudo[431083]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:24:33 compute-0 sudo[431083]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:24:33 compute-0 sudo[431083]: pam_unix(sudo:session): session closed for user root
Oct 11 02:24:33 compute-0 sudo[431129]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:24:33 compute-0 sudo[431129]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:24:33 compute-0 sudo[431129]: pam_unix(sudo:session): session closed for user root
Oct 11 02:24:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1398: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:24:34 compute-0 nova_compute[356901]: 2025-10-11 02:24:34.415 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:34 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:24:34 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:24:34 compute-0 ceph-mon[191930]: pgmap v1398: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:24:34 compute-0 ovn_controller[88370]: 2025-10-11T02:24:34Z|00044|memory_trim|INFO|Detected inactivity (last active 30002 ms ago): trimming memory
Oct 11 02:24:35 compute-0 sshd-session[431154]: Invalid user debian from 121.227.153.123 port 35654
Oct 11 02:24:35 compute-0 sshd-session[431154]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:24:35 compute-0 sshd-session[431154]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:24:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:24:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1399: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:24:37 compute-0 ceph-mon[191930]: pgmap v1399: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:24:37 compute-0 podman[431157]: 2025-10-11 02:24:37.238217068 +0000 UTC m=+0.122521001 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid)
Oct 11 02:24:37 compute-0 sshd-session[431154]: Failed password for invalid user debian from 121.227.153.123 port 35654 ssh2
Oct 11 02:24:37 compute-0 podman[431156]: 2025-10-11 02:24:37.267802552 +0000 UTC m=+0.149064300 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, container_name=multipathd, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 02:24:37 compute-0 sshd-session[431154]: Connection closed by invalid user debian 121.227.153.123 port 35654 [preauth]
Oct 11 02:24:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1400: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:24:38 compute-0 nova_compute[356901]: 2025-10-11 02:24:38.662 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:38 compute-0 sshd-session[431192]: Invalid user debian from 121.227.153.123 port 35660
Oct 11 02:24:38 compute-0 sshd-session[431192]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:24:38 compute-0 sshd-session[431192]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:24:39 compute-0 ceph-mon[191930]: pgmap v1400: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:24:39 compute-0 nova_compute[356901]: 2025-10-11 02:24:39.416 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1401: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:24:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:24:41 compute-0 sshd-session[431192]: Failed password for invalid user debian from 121.227.153.123 port 35660 ssh2
Oct 11 02:24:41 compute-0 ovn_controller[88370]: 2025-10-11T02:24:41Z|00008|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:16:ee:dc 192.168.0.225
Oct 11 02:24:41 compute-0 ovn_controller[88370]: 2025-10-11T02:24:41Z|00009|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:16:ee:dc 192.168.0.225
Oct 11 02:24:41 compute-0 ceph-mon[191930]: pgmap v1401: 321 pgs: 321 active+clean; 172 MiB data, 301 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:24:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1402: 321 pgs: 321 active+clean; 173 MiB data, 303 MiB used, 60 GiB / 60 GiB avail; 26 KiB/s rd, 155 KiB/s wr, 8 op/s
Oct 11 02:24:43 compute-0 sshd-session[431192]: Connection closed by invalid user debian 121.227.153.123 port 35660 [preauth]
Oct 11 02:24:43 compute-0 ceph-mon[191930]: pgmap v1402: 321 pgs: 321 active+clean; 173 MiB data, 303 MiB used, 60 GiB / 60 GiB avail; 26 KiB/s rd, 155 KiB/s wr, 8 op/s
Oct 11 02:24:43 compute-0 nova_compute[356901]: 2025-10-11 02:24:43.666 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1403: 321 pgs: 321 active+clean; 199 MiB data, 319 MiB used, 60 GiB / 60 GiB avail; 116 KiB/s rd, 1.5 MiB/s wr, 40 op/s
Oct 11 02:24:44 compute-0 sshd-session[431194]: Invalid user debian from 121.227.153.123 port 53802
Oct 11 02:24:44 compute-0 nova_compute[356901]: 2025-10-11 02:24:44.419 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:44 compute-0 sshd-session[431194]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:24:44 compute-0 sshd-session[431194]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:24:45 compute-0 ceph-mon[191930]: pgmap v1403: 321 pgs: 321 active+clean; 199 MiB data, 319 MiB used, 60 GiB / 60 GiB avail; 116 KiB/s rd, 1.5 MiB/s wr, 40 op/s
Oct 11 02:24:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:24:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1404: 321 pgs: 321 active+clean; 201 MiB data, 319 MiB used, 60 GiB / 60 GiB avail; 166 KiB/s rd, 1.5 MiB/s wr, 57 op/s
Oct 11 02:24:46 compute-0 sshd-session[431194]: Failed password for invalid user debian from 121.227.153.123 port 53802 ssh2
Oct 11 02:24:46 compute-0 sshd-session[431194]: Connection closed by invalid user debian 121.227.153.123 port 53802 [preauth]
Oct 11 02:24:47 compute-0 ceph-mon[191930]: pgmap v1404: 321 pgs: 321 active+clean; 201 MiB data, 319 MiB used, 60 GiB / 60 GiB avail; 166 KiB/s rd, 1.5 MiB/s wr, 57 op/s
Oct 11 02:24:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1405: 321 pgs: 321 active+clean; 201 MiB data, 319 MiB used, 60 GiB / 60 GiB avail; 166 KiB/s rd, 1.5 MiB/s wr, 57 op/s
Oct 11 02:24:48 compute-0 sshd-session[431196]: Invalid user debian from 121.227.153.123 port 53814
Oct 11 02:24:48 compute-0 sshd-session[431196]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:24:48 compute-0 sshd-session[431196]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:24:48 compute-0 podman[431199]: 2025-10-11 02:24:48.667478278 +0000 UTC m=+0.117572853 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.33.7, maintainer=Red Hat, Inc., url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.expose-services=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=minimal rhel9, config_id=edpm, container_name=openstack_network_exporter, architecture=x86_64, distribution-scope=public, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.6, managed_by=edpm_ansible, name=ubi9-minimal, com.redhat.component=ubi9-minimal-container, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., build-date=2025-08-20T13:12:41, vendor=Red Hat, Inc., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, release=1755695350, vcs-type=git)
Oct 11 02:24:48 compute-0 nova_compute[356901]: 2025-10-11 02:24:48.669 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:48 compute-0 podman[431198]: 2025-10-11 02:24:48.683267085 +0000 UTC m=+0.143521044 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, org.label-schema.license=GPLv2, config_id=edpm, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.vendor=CentOS)
Oct 11 02:24:48 compute-0 podman[431200]: 2025-10-11 02:24:48.697943334 +0000 UTC m=+0.140067480 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:24:48 compute-0 nova_compute[356901]: 2025-10-11 02:24:48.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:24:48 compute-0 nova_compute[356901]: 2025-10-11 02:24:48.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:24:49 compute-0 ceph-mon[191930]: pgmap v1405: 321 pgs: 321 active+clean; 201 MiB data, 319 MiB used, 60 GiB / 60 GiB avail; 166 KiB/s rd, 1.5 MiB/s wr, 57 op/s
Oct 11 02:24:49 compute-0 nova_compute[356901]: 2025-10-11 02:24:49.423 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1406: 321 pgs: 321 active+clean; 201 MiB data, 319 MiB used, 60 GiB / 60 GiB avail; 166 KiB/s rd, 1.5 MiB/s wr, 57 op/s
Oct 11 02:24:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:24:51 compute-0 sshd-session[431196]: Failed password for invalid user debian from 121.227.153.123 port 53814 ssh2
Oct 11 02:24:51 compute-0 ceph-mon[191930]: pgmap v1406: 321 pgs: 321 active+clean; 201 MiB data, 319 MiB used, 60 GiB / 60 GiB avail; 166 KiB/s rd, 1.5 MiB/s wr, 57 op/s
Oct 11 02:24:51 compute-0 nova_compute[356901]: 2025-10-11 02:24:51.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:24:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1407: 321 pgs: 321 active+clean; 201 MiB data, 319 MiB used, 60 GiB / 60 GiB avail; 167 KiB/s rd, 1.5 MiB/s wr, 59 op/s
Oct 11 02:24:52 compute-0 sshd-session[431196]: Connection closed by invalid user debian 121.227.153.123 port 53814 [preauth]
Oct 11 02:24:53 compute-0 ceph-mon[191930]: pgmap v1407: 321 pgs: 321 active+clean; 201 MiB data, 319 MiB used, 60 GiB / 60 GiB avail; 167 KiB/s rd, 1.5 MiB/s wr, 59 op/s
Oct 11 02:24:53 compute-0 nova_compute[356901]: 2025-10-11 02:24:53.675 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:53 compute-0 nova_compute[356901]: 2025-10-11 02:24:53.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:24:54 compute-0 sshd-session[431257]: Invalid user debian from 121.227.153.123 port 40850
Oct 11 02:24:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1408: 321 pgs: 321 active+clean; 201 MiB data, 319 MiB used, 60 GiB / 60 GiB avail; 163 KiB/s rd, 1.3 MiB/s wr, 88 op/s
Oct 11 02:24:54 compute-0 podman[431259]: 2025-10-11 02:24:54.21311274 +0000 UTC m=+0.144638871 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, com.redhat.component=ubi9-container, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.29.0, io.openshift.tags=base rhel9, name=ubi9, build-date=2024-09-18T21:23:30, vendor=Red Hat, Inc., container_name=kepler, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-type=git, release=1214.1726694543, config_id=edpm, version=9.4, io.openshift.expose-services=, architecture=x86_64, managed_by=edpm_ansible, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.k8s.display-name=Red Hat Universal Base Image 9, maintainer=Red Hat, Inc., release-0.7.12=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Oct 11 02:24:54 compute-0 sshd-session[431257]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:24:54 compute-0 sshd-session[431257]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:24:54 compute-0 nova_compute[356901]: 2025-10-11 02:24:54.427 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:24:54.851 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:24:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:24:54.851 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:24:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:24:54.852 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:24:55 compute-0 ceph-mon[191930]: pgmap v1408: 321 pgs: 321 active+clean; 201 MiB data, 319 MiB used, 60 GiB / 60 GiB avail; 163 KiB/s rd, 1.3 MiB/s wr, 88 op/s
Oct 11 02:24:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:24:55 compute-0 nova_compute[356901]: 2025-10-11 02:24:55.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:24:55 compute-0 nova_compute[356901]: 2025-10-11 02:24:55.936 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:24:55 compute-0 nova_compute[356901]: 2025-10-11 02:24:55.937 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:24:55 compute-0 nova_compute[356901]: 2025-10-11 02:24:55.937 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:24:55 compute-0 nova_compute[356901]: 2025-10-11 02:24:55.938 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:24:55 compute-0 nova_compute[356901]: 2025-10-11 02:24:55.939 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:24:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1409: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 83 KiB/s rd, 31 KiB/s wr, 73 op/s
Oct 11 02:24:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:24:56 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/124982170' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:24:56 compute-0 nova_compute[356901]: 2025-10-11 02:24:56.484 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.545s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:24:56 compute-0 sshd-session[431257]: Failed password for invalid user debian from 121.227.153.123 port 40850 ssh2
Oct 11 02:24:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:24:56
Oct 11 02:24:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:24:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:24:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['volumes', 'default.rgw.meta', 'backups', '.mgr', '.rgw.root', 'images', 'cephfs.cephfs.data', 'cephfs.cephfs.meta', 'default.rgw.log', 'default.rgw.control', 'vms']
Oct 11 02:24:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:24:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:24:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:24:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:24:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:24:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:24:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:24:56 compute-0 nova_compute[356901]: 2025-10-11 02:24:56.621 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:24:56 compute-0 nova_compute[356901]: 2025-10-11 02:24:56.622 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:24:56 compute-0 nova_compute[356901]: 2025-10-11 02:24:56.622 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:24:56 compute-0 nova_compute[356901]: 2025-10-11 02:24:56.631 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:24:56 compute-0 nova_compute[356901]: 2025-10-11 02:24:56.631 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:24:56 compute-0 nova_compute[356901]: 2025-10-11 02:24:56.632 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:24:56 compute-0 nova_compute[356901]: 2025-10-11 02:24:56.640 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:24:56 compute-0 nova_compute[356901]: 2025-10-11 02:24:56.641 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:24:56 compute-0 nova_compute[356901]: 2025-10-11 02:24:56.642 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:24:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:24:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:24:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:24:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:24:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:24:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:24:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:24:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:24:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:24:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:24:57 compute-0 nova_compute[356901]: 2025-10-11 02:24:57.289 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:24:57 compute-0 nova_compute[356901]: 2025-10-11 02:24:57.291 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3476MB free_disk=59.88883590698242GB free_vcpus=5 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:24:57 compute-0 nova_compute[356901]: 2025-10-11 02:24:57.291 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:24:57 compute-0 nova_compute[356901]: 2025-10-11 02:24:57.292 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:24:57 compute-0 ceph-mon[191930]: pgmap v1409: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 83 KiB/s rd, 31 KiB/s wr, 73 op/s
Oct 11 02:24:57 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/124982170' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:24:57 compute-0 nova_compute[356901]: 2025-10-11 02:24:57.418 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:24:57 compute-0 nova_compute[356901]: 2025-10-11 02:24:57.419 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance d60d7ea1-5d00-4902-90e6-3ae67eb09a78 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:24:57 compute-0 nova_compute[356901]: 2025-10-11 02:24:57.419 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 7513b93e-d2b8-4ae0-8f1c-3df190945259 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:24:57 compute-0 nova_compute[356901]: 2025-10-11 02:24:57.420 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 3 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:24:57 compute-0 nova_compute[356901]: 2025-10-11 02:24:57.420 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=2048MB phys_disk=59GB used_disk=6GB total_vcpus=8 used_vcpus=3 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:24:57 compute-0 nova_compute[356901]: 2025-10-11 02:24:57.507 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:24:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:24:57 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2710883428' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:24:58 compute-0 nova_compute[356901]: 2025-10-11 02:24:58.028 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.521s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:24:58 compute-0 nova_compute[356901]: 2025-10-11 02:24:58.042 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:24:58 compute-0 nova_compute[356901]: 2025-10-11 02:24:58.064 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:24:58 compute-0 nova_compute[356901]: 2025-10-11 02:24:58.089 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:24:58 compute-0 nova_compute[356901]: 2025-10-11 02:24:58.089 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.797s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:24:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1410: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 8.7 KiB/s wr, 59 op/s
Oct 11 02:24:58 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2710883428' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:24:58 compute-0 sshd-session[431257]: Connection closed by invalid user debian 121.227.153.123 port 40850 [preauth]
Oct 11 02:24:58 compute-0 nova_compute[356901]: 2025-10-11 02:24:58.681 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:59 compute-0 nova_compute[356901]: 2025-10-11 02:24:59.090 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:24:59 compute-0 nova_compute[356901]: 2025-10-11 02:24:59.091 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:24:59 compute-0 nova_compute[356901]: 2025-10-11 02:24:59.093 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:24:59 compute-0 ceph-mon[191930]: pgmap v1410: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 8.7 KiB/s wr, 59 op/s
Oct 11 02:24:59 compute-0 nova_compute[356901]: 2025-10-11 02:24:59.430 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:24:59 compute-0 podman[157119]: time="2025-10-11T02:24:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:24:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:24:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:24:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:24:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9056 "" "Go-http-client/1.1"
Oct 11 02:24:59 compute-0 nova_compute[356901]: 2025-10-11 02:24:59.788 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:24:59 compute-0 nova_compute[356901]: 2025-10-11 02:24:59.789 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:24:59 compute-0 nova_compute[356901]: 2025-10-11 02:24:59.789 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:24:59 compute-0 nova_compute[356901]: 2025-10-11 02:24:59.790 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:24:59 compute-0 sshd-session[431322]: Invalid user debian from 121.227.153.123 port 40860
Oct 11 02:25:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1411: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:25:00 compute-0 sshd-session[431322]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:25:00 compute-0 sshd-session[431322]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:25:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:25:01 compute-0 nova_compute[356901]: 2025-10-11 02:25:01.210 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:25:01 compute-0 nova_compute[356901]: 2025-10-11 02:25:01.227 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:25:01 compute-0 nova_compute[356901]: 2025-10-11 02:25:01.227 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:25:01 compute-0 nova_compute[356901]: 2025-10-11 02:25:01.228 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:25:01 compute-0 nova_compute[356901]: 2025-10-11 02:25:01.229 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:25:01 compute-0 nova_compute[356901]: 2025-10-11 02:25:01.230 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:25:01 compute-0 nova_compute[356901]: 2025-10-11 02:25:01.230 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:25:01 compute-0 ceph-mon[191930]: pgmap v1411: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:25:01 compute-0 openstack_network_exporter[374316]: ERROR   02:25:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:25:01 compute-0 openstack_network_exporter[374316]: ERROR   02:25:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:25:01 compute-0 openstack_network_exporter[374316]: ERROR   02:25:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:25:01 compute-0 openstack_network_exporter[374316]: ERROR   02:25:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:25:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:25:01 compute-0 openstack_network_exporter[374316]: ERROR   02:25:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:25:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:25:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1412: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:25:02 compute-0 ceph-mon[191930]: pgmap v1412: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:25:02 compute-0 sshd-session[431322]: Failed password for invalid user debian from 121.227.153.123 port 40860 ssh2
Oct 11 02:25:03 compute-0 nova_compute[356901]: 2025-10-11 02:25:03.688 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1413: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 35 KiB/s rd, 0 B/s wr, 57 op/s
Oct 11 02:25:04 compute-0 podman[431327]: 2025-10-11 02:25:04.238459024 +0000 UTC m=+0.108882992 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.build-date=20251007, io.buildah.version=1.41.4, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 02:25:04 compute-0 podman[431325]: 2025-10-11 02:25:04.263843479 +0000 UTC m=+0.136407783 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:25:04 compute-0 podman[431328]: 2025-10-11 02:25:04.276880567 +0000 UTC m=+0.124095197 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, managed_by=edpm_ansible)
Oct 11 02:25:04 compute-0 podman[431326]: 2025-10-11 02:25:04.280784072 +0000 UTC m=+0.151318752 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, container_name=ovn_controller)
Oct 11 02:25:04 compute-0 nova_compute[356901]: 2025-10-11 02:25:04.433 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:04 compute-0 sshd-session[431322]: Connection closed by invalid user debian 121.227.153.123 port 40860 [preauth]
Oct 11 02:25:05 compute-0 ceph-mon[191930]: pgmap v1413: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 35 KiB/s rd, 0 B/s wr, 57 op/s
Oct 11 02:25:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1414: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 12 KiB/s rd, 0 B/s wr, 20 op/s
Oct 11 02:25:06 compute-0 sshd-session[431408]: Invalid user debian from 121.227.153.123 port 32850
Oct 11 02:25:06 compute-0 sshd-session[431408]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:25:06 compute-0 sshd-session[431408]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0016577461621736017 of space, bias 1.0, pg target 0.4973238486520805 quantized to 32 (current 32)
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00025334537995702286 of space, bias 1.0, pg target 0.07600361398710685 quantized to 32 (current 32)
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:25:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:25:07 compute-0 ceph-mon[191930]: pgmap v1414: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 12 KiB/s rd, 0 B/s wr, 20 op/s
Oct 11 02:25:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1415: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 2.0 KiB/s rd, 0 B/s wr, 3 op/s
Oct 11 02:25:08 compute-0 podman[431411]: 2025-10-11 02:25:08.216678083 +0000 UTC m=+0.102907102 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, tcib_managed=true, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible)
Oct 11 02:25:08 compute-0 podman[431410]: 2025-10-11 02:25:08.22548669 +0000 UTC m=+0.117470555 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, managed_by=edpm_ansible, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, container_name=multipathd)
Oct 11 02:25:08 compute-0 nova_compute[356901]: 2025-10-11 02:25:08.692 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:09 compute-0 sshd-session[431408]: Failed password for invalid user debian from 121.227.153.123 port 32850 ssh2
Oct 11 02:25:09 compute-0 ceph-mon[191930]: pgmap v1415: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 2.0 KiB/s rd, 0 B/s wr, 3 op/s
Oct 11 02:25:09 compute-0 nova_compute[356901]: 2025-10-11 02:25:09.437 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1416: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:25:10 compute-0 sshd-session[431408]: Connection closed by invalid user debian 121.227.153.123 port 32850 [preauth]
Oct 11 02:25:11 compute-0 ceph-mon[191930]: pgmap v1416: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:12 compute-0 sshd-session[431450]: Invalid user debian from 121.227.153.123 port 58124
Oct 11 02:25:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1417: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:12 compute-0 sshd-session[431450]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:25:12 compute-0 sshd-session[431450]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:25:13 compute-0 ceph-mon[191930]: pgmap v1417: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:13 compute-0 nova_compute[356901]: 2025-10-11 02:25:13.697 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.862 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.863 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.863 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.864 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2cb09e20>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.873 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': 'd60d7ea1-5d00-4902-90e6-3ae67eb09a78', 'name': 'vn-vgckve2-ittzoa6m3dmq-egfg3ceao3k4-vnf-rvnztbwt2zgh', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000002', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {'metering.server_group': '3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.877 14 DEBUG ceilometer.compute.discovery [-] Querying metadata for instance 7513b93e-d2b8-4ae0-8f1c-3df190945259 from Nova API get_server /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:176
Oct 11 02:25:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:13.879 14 DEBUG novaclient.v2.client [-] REQ: curl -g -i -X GET https://nova-internal.openstack.svc:8774/v2.1/servers/7513b93e-d2b8-4ae0-8f1c-3df190945259 -H "Accept: application/json" -H "User-Agent: python-novaclient" -H "X-Auth-Token: {SHA256}d674387017edb5d8543811c363b3a2965950a94ddf4462840fede0e79ac258e9" -H "X-OpenStack-Nova-API-Version: 2.1" _http_log_request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:572
Oct 11 02:25:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1418: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 767 B/s wr, 0 op/s
Oct 11 02:25:14 compute-0 nova_compute[356901]: 2025-10-11 02:25:14.439 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:14 compute-0 sshd-session[431450]: Failed password for invalid user debian from 121.227.153.123 port 58124 ssh2
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.829 14 DEBUG novaclient.v2.client [-] RESP: [200] Connection: Keep-Alive Content-Length: 1960 Content-Type: application/json Date: Sat, 11 Oct 2025 02:25:13 GMT Keep-Alive: timeout=5, max=100 OpenStack-API-Version: compute 2.1 Server: Apache Vary: OpenStack-API-Version,X-OpenStack-Nova-API-Version X-OpenStack-Nova-API-Version: 2.1 x-compute-request-id: req-a4a0fa4a-53ef-4609-bd23-cc5093c7697c x-openstack-request-id: req-a4a0fa4a-53ef-4609-bd23-cc5093c7697c _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:613
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.829 14 DEBUG novaclient.v2.client [-] RESP BODY: {"server": {"id": "7513b93e-d2b8-4ae0-8f1c-3df190945259", "name": "vn-vgckve2-djjfpphdsuuh-gthznuj2xct2-vnf-jmvtgw3mflyn", "status": "ACTIVE", "tenant_id": "97026531b3404a11869cb85a059c4a0d", "user_id": "d215f3ebbc07435493ccd666fc80109d", "metadata": {"metering.server_group": "3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e"}, "hostId": "2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736", "image": {"id": "a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/images/a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7"}]}, "flavor": {"id": "486e1451-345c-45d6-b075-f4717e759025", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/flavors/486e1451-345c-45d6-b075-f4717e759025"}]}, "created": "2025-10-11T02:23:54Z", "updated": "2025-10-11T02:24:06Z", "addresses": {"private": [{"version": 4, "addr": "192.168.0.225", "OS-EXT-IPS:type": "fixed", "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:16:ee:dc"}, {"version": 4, "addr": "192.168.122.204", "OS-EXT-IPS:type": "floating", "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:16:ee:dc"}]}, "accessIPv4": "", "accessIPv6": "", "links": [{"rel": "self", "href": "https://nova-internal.openstack.svc:8774/v2.1/servers/7513b93e-d2b8-4ae0-8f1c-3df190945259"}, {"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/servers/7513b93e-d2b8-4ae0-8f1c-3df190945259"}], "OS-DCF:diskConfig": "MANUAL", "progress": 0, "OS-EXT-AZ:availability_zone": "nova", "config_drive": "True", "key_name": null, "OS-SRV-USG:launched_at": "2025-10-11T02:24:06.000000", "OS-SRV-USG:terminated_at": null, "security_groups": [{"name": "basic"}], "OS-EXT-SRV-ATTR:host": "compute-0.ctlplane.example.com", "OS-EXT-SRV-ATTR:instance_name": "instance-00000003", "OS-EXT-SRV-ATTR:hypervisor_hostname": "compute-0.ctlplane.example.com", "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-EXT-STS:power_state": 1, "os-extended-volumes:volumes_attached": []}} _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:648
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.829 14 DEBUG novaclient.v2.client [-] GET call to compute for https://nova-internal.openstack.svc:8774/v2.1/servers/7513b93e-d2b8-4ae0-8f1c-3df190945259 used request id req-a4a0fa4a-53ef-4609-bd23-cc5093c7697c request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:1073
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.831 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '7513b93e-d2b8-4ae0-8f1c-3df190945259', 'name': 'vn-vgckve2-djjfpphdsuuh-gthznuj2xct2-vnf-jmvtgw3mflyn', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000003', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {'metering.server_group': '3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.836 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.836 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.836 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.837 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.837 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.838 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:25:14.837196) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.845 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.bytes volume: 5233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.851 14 DEBUG ceilometer.compute.virt.libvirt.inspector [-] No delta meter predecessor for 7513b93e-d2b8-4ae0-8f1c-3df190945259 / tapa942acb1-1e inspect_vnics /usr/lib/python3.12/site-packages/ceilometer/compute/virt/libvirt/inspector.py:143
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.852 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.bytes volume: 1828 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.857 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2352 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.858 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.859 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.859 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.859 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.859 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.860 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:25:14.860071) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.860 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.860 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.packets volume: 42 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.861 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.packets volume: 20 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.861 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 22 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.862 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.863 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.863 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.863 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.863 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.864 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.864 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.865 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:25:14.863957) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.864 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.865 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.866 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.866 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.866 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.867 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.867 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.867 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.867 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:25:14.867436) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.868 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.868 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.869 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.869 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.870 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.870 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.870 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.870 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.871 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.871 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:25:14.870919) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.907 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.908 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.908 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.capacity volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.932 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.933 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.934 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.capacity volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.966 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.967 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.968 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.969 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.969 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.970 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.970 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.970 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.970 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:14.971 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:25:14.970675) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.043 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.bytes volume: 23325184 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.044 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.045 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.bytes volume: 385378 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.094 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.095 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.095 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.bytes volume: 385378 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.149 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.150 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.150 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.152 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.152 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.152 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.152 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.153 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.153 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.153 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.latency volume: 1853196562 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.154 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.latency volume: 293231554 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.154 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:25:15.153219) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.155 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.latency volume: 250459547 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.155 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.latency volume: 1696814304 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.156 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.latency volume: 210864290 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.156 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.latency volume: 178724423 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.157 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.157 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.157 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.158 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.159 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.159 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.159 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.159 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.159 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.160 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:25:15.159762) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.160 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.requests volume: 844 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.161 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.161 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.requests volume: 124 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.161 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.162 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.162 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.requests volume: 124 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.163 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.163 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.164 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.165 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.165 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.165 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.165 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.166 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.166 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.166 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.167 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.167 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:25:15.166185) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.167 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.usage volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.168 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.168 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.169 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.usage volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.169 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.170 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.170 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.171 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.171 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.172 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.172 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.172 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.172 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.173 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:25:15.172768) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.173 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.bytes volume: 41836544 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.173 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.174 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.174 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.bytes volume: 41713664 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.175 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.175 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.176 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.176 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.177 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.178 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.178 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.178 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.178 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.178 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.179 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.179 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:25:15.179023) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.179 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.latency volume: 5140134066 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.180 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.latency volume: 26893276 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.180 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.180 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.latency volume: 5874180027 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.181 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.latency volume: 25967717 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.181 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.182 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.182 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.183 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.184 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.184 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.184 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.184 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.184 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.185 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.185 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:25:15.184970) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.217 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.240 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.283 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.284 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.285 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.285 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.285 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.285 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.286 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.286 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.requests volume: 238 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.286 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:25:15.285918) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.287 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.287 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.288 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.requests volume: 225 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.288 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.289 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.289 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.290 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.290 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.292 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.292 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.292 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.292 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.293 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.293 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.293 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.bytes.delta volume: 84 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.294 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:25:15.293210) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.294 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.294 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 84 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.295 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.295 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.296 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.rate in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.296 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.296 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.296 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.rate heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.297 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.rate (2025-10-11T02:25:15.296682) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.297 14 DEBUG ceilometer.compute.pollsters [-] LibvirtInspector does not provide data for IncomingBytesRatePollster get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:162
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.297 14 ERROR ceilometer.polling.manager [-] Prevent pollster network.incoming.bytes.rate from polling [<NovaLikeServer: vn-vgckve2-djjfpphdsuuh-gthznuj2xct2-vnf-jmvtgw3mflyn>] on source pollsters anymore!: ceilometer.polling.plugin_base.PollsterPermanentError: [<NovaLikeServer: vn-vgckve2-djjfpphdsuuh-gthznuj2xct2-vnf-jmvtgw3mflyn>]
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.297 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.298 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.298 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.298 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.298 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.299 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:25:15.298604) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.300 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.300 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.300 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.300 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.300 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.301 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.301 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.packets volume: 35 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.301 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.packets volume: 15 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.302 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:25:15.301003) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.302 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 21 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.303 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.303 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.303 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.304 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.304 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.304 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.304 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.bytes.delta volume: 70 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.305 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.305 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:25:15.304422) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.306 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 70 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.306 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.307 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.307 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.307 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.307 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.308 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.308 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:25:15.308047) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.309 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.309 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.310 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.310 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.310 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.310 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.311 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:25:15.310635) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.311 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.312 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.312 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.313 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.313 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.313 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.314 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.314 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceph-mon[191930]: pgmap v1418: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 767 B/s wr, 0 op/s
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.314 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.315 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.315 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.316 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.allocation volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.316 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:25:15.314760) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.316 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.317 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.317 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.allocation volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.318 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.319 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.319 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.321 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.321 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.321 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.322 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.322 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.322 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.323 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:25:15.322705) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.323 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.324 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.324 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.325 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.326 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.326 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.326 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.327 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.327 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.327 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:25:15.327162) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.327 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/cpu volume: 264470000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.328 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/cpu volume: 33860000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.329 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 38860000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.329 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.330 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.330 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.330 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.330 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.330 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.331 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.bytes volume: 4830 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.332 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.bytes volume: 2188 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.332 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2272 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.333 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:25:15.330858) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.333 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.334 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.334 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.334 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.334 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.335 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:25:15.334966) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.335 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.335 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/memory.usage volume: 49.125 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.336 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/memory.usage volume: 49.6640625 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.336 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.87109375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.337 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.337 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.337 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.rate in the context of pollsters
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.337 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.337 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.338 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.rate heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.338 14 DEBUG ceilometer.compute.pollsters [-] LibvirtInspector does not provide data for OutgoingBytesRatePollster get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:162
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.338 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.rate (2025-10-11T02:25:15.338004) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.338 14 ERROR ceilometer.polling.manager [-] Prevent pollster network.outgoing.bytes.rate from polling [<NovaLikeServer: vn-vgckve2-djjfpphdsuuh-gthznuj2xct2-vnf-jmvtgw3mflyn>] on source pollsters anymore!: ceilometer.polling.plugin_base.PollsterPermanentError: [<NovaLikeServer: vn-vgckve2-djjfpphdsuuh-gthznuj2xct2-vnf-jmvtgw3mflyn>]
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.341 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.341 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.341 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.341 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.341 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.341 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.341 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.341 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.341 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.341 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.342 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.342 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.342 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.342 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.342 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.342 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.342 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.342 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.342 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.342 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.342 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.342 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.342 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.342 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.343 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:25:15.343 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:25:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:25:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1419: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.4 KiB/s wr, 0 op/s
Oct 11 02:25:16 compute-0 sshd-session[431450]: Connection closed by invalid user debian 121.227.153.123 port 58124 [preauth]
Oct 11 02:25:17 compute-0 ceph-mon[191930]: pgmap v1419: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.4 KiB/s wr, 0 op/s
Oct 11 02:25:17 compute-0 sshd-session[431453]: Invalid user debian from 121.227.153.123 port 58138
Oct 11 02:25:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1420: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.4 KiB/s wr, 0 op/s
Oct 11 02:25:18 compute-0 sshd-session[431453]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:25:18 compute-0 sshd-session[431453]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:25:18 compute-0 nova_compute[356901]: 2025-10-11 02:25:18.702 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:19 compute-0 podman[431457]: 2025-10-11 02:25:19.231828081 +0000 UTC m=+0.095222524 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:25:19 compute-0 podman[431455]: 2025-10-11 02:25:19.232104715 +0000 UTC m=+0.119163884 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, config_id=edpm, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, org.label-schema.build-date=20251009, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']})
Oct 11 02:25:19 compute-0 podman[431456]: 2025-10-11 02:25:19.262130127 +0000 UTC m=+0.142231660 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, version=9.6, build-date=2025-08-20T13:12:41, managed_by=edpm_ansible, io.buildah.version=1.33.7, release=1755695350, url=https://catalog.redhat.com/en/search?searchType=containers, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, maintainer=Red Hat, Inc., name=ubi9-minimal, com.redhat.component=ubi9-minimal-container, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, container_name=openstack_network_exporter, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_id=edpm, vendor=Red Hat, Inc., architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-type=git, distribution-scope=public, io.openshift.expose-services=)
Oct 11 02:25:19 compute-0 ceph-mon[191930]: pgmap v1420: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.4 KiB/s wr, 0 op/s
Oct 11 02:25:19 compute-0 nova_compute[356901]: 2025-10-11 02:25:19.443 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1421: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.4 KiB/s wr, 0 op/s
Oct 11 02:25:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:25:20 compute-0 sshd-session[431453]: Failed password for invalid user debian from 121.227.153.123 port 58138 ssh2
Oct 11 02:25:21 compute-0 ceph-mon[191930]: pgmap v1421: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.4 KiB/s wr, 0 op/s
Oct 11 02:25:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1422: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.4 KiB/s wr, 0 op/s
Oct 11 02:25:22 compute-0 sshd-session[431453]: Connection closed by invalid user debian 121.227.153.123 port 58138 [preauth]
Oct 11 02:25:23 compute-0 ceph-mon[191930]: pgmap v1422: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.4 KiB/s wr, 0 op/s
Oct 11 02:25:23 compute-0 nova_compute[356901]: 2025-10-11 02:25:23.707 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:23 compute-0 sshd-session[431516]: Invalid user debian from 121.227.153.123 port 39718
Oct 11 02:25:24 compute-0 sshd-session[431516]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:25:24 compute-0 sshd-session[431516]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:25:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1423: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.4 KiB/s wr, 0 op/s
Oct 11 02:25:24 compute-0 nova_compute[356901]: 2025-10-11 02:25:24.447 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:25 compute-0 podman[431518]: 2025-10-11 02:25:25.23386018 +0000 UTC m=+0.116290306 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, release-0.7.12=, version=9.4, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, build-date=2024-09-18T21:23:30, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.buildah.version=1.29.0, name=ubi9, config_id=edpm, io.openshift.tags=base rhel9, maintainer=Red Hat, Inc., vcs-type=git, summary=Provides the latest release of Red Hat Universal Base Image 9., container_name=kepler, io.openshift.expose-services=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, release=1214.1726694543, architecture=x86_64, distribution-scope=public, vendor=Red Hat, Inc., com.redhat.component=ubi9-container)
Oct 11 02:25:25 compute-0 ceph-mon[191930]: pgmap v1423: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.4 KiB/s wr, 0 op/s
Oct 11 02:25:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:25:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1424: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 682 B/s wr, 0 op/s
Oct 11 02:25:26 compute-0 sshd-session[431516]: Failed password for invalid user debian from 121.227.153.123 port 39718 ssh2
Oct 11 02:25:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:25:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:25:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:25:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:25:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:25:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:25:27 compute-0 ceph-mon[191930]: pgmap v1424: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 682 B/s wr, 0 op/s
Oct 11 02:25:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:25:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/193393855' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:25:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:25:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/193393855' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:25:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1425: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:28 compute-0 sshd-session[431516]: Connection closed by invalid user debian 121.227.153.123 port 39718 [preauth]
Oct 11 02:25:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/193393855' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:25:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/193393855' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:25:28 compute-0 nova_compute[356901]: 2025-10-11 02:25:28.711 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:29 compute-0 ceph-mon[191930]: pgmap v1425: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:29 compute-0 nova_compute[356901]: 2025-10-11 02:25:29.448 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:29 compute-0 sshd-session[431537]: Invalid user debian from 121.227.153.123 port 39720
Oct 11 02:25:29 compute-0 podman[157119]: time="2025-10-11T02:25:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:25:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:25:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:25:29 compute-0 sshd-session[431537]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:25:29 compute-0 sshd-session[431537]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:25:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:25:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9051 "" "Go-http-client/1.1"
Oct 11 02:25:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1426: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:25:30 compute-0 ceph-mon[191930]: pgmap v1426: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:31 compute-0 openstack_network_exporter[374316]: ERROR   02:25:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:25:31 compute-0 openstack_network_exporter[374316]: ERROR   02:25:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:25:31 compute-0 openstack_network_exporter[374316]: ERROR   02:25:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:25:31 compute-0 openstack_network_exporter[374316]: ERROR   02:25:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:25:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:25:31 compute-0 openstack_network_exporter[374316]: ERROR   02:25:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:25:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:25:31 compute-0 sshd-session[431537]: Failed password for invalid user debian from 121.227.153.123 port 39720 ssh2
Oct 11 02:25:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1427: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:33 compute-0 ceph-mon[191930]: pgmap v1427: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:33 compute-0 nova_compute[356901]: 2025-10-11 02:25:33.715 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:34 compute-0 sshd-session[431537]: Connection closed by invalid user debian 121.227.153.123 port 39720 [preauth]
Oct 11 02:25:34 compute-0 sudo[431539]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:25:34 compute-0 sudo[431539]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:34 compute-0 sudo[431539]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1428: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:34 compute-0 sudo[431564]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:25:34 compute-0 sudo[431564]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:34 compute-0 sudo[431564]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:34 compute-0 sudo[431590]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:25:34 compute-0 sudo[431590]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:34 compute-0 sudo[431590]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:34 compute-0 nova_compute[356901]: 2025-10-11 02:25:34.453 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:34 compute-0 podman[431615]: 2025-10-11 02:25:34.49259098 +0000 UTC m=+0.100562073 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:25:34 compute-0 sudo[431643]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 check-host
Oct 11 02:25:34 compute-0 sudo[431643]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:34 compute-0 podman[431617]: 2025-10-11 02:25:34.510154474 +0000 UTC m=+0.103819928 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_id=edpm, container_name=ceilometer_agent_compute, org.label-schema.name=CentOS Stream 10 Base Image, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.build-date=20251007, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:25:34 compute-0 podman[431622]: 2025-10-11 02:25:34.517940724 +0000 UTC m=+0.097563473 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 02:25:34 compute-0 podman[431616]: 2025-10-11 02:25:34.54044459 +0000 UTC m=+0.139462323 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=ovn_controller, org.label-schema.build-date=20251009)
Oct 11 02:25:34 compute-0 sudo[431643]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:25:34 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:25:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:25:34 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:25:35 compute-0 sudo[431740]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:25:35 compute-0 sudo[431740]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:35 compute-0 sudo[431740]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:35 compute-0 sudo[431765]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:25:35 compute-0 sudo[431765]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:35 compute-0 sudo[431765]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:35 compute-0 ceph-mon[191930]: pgmap v1428: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:35 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:25:35 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:25:35 compute-0 sudo[431790]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:25:35 compute-0 sudo[431790]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:35 compute-0 sudo[431790]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:35 compute-0 sshd-session[431589]: Invalid user debian from 121.227.153.123 port 57832
Oct 11 02:25:35 compute-0 sudo[431815]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:25:35 compute-0 sudo[431815]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:25:35 compute-0 sshd-session[431589]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:25:35 compute-0 sshd-session[431589]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:25:36 compute-0 sudo[431815]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1429: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:25:36 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:25:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:25:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:25:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:25:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:25:36 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev c7d4c127-f72c-4557-b852-6967b89c1fe2 does not exist
Oct 11 02:25:36 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev f0c8d5db-0fb8-41d7-8c38-92a28177f418 does not exist
Oct 11 02:25:36 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev bf22ceb5-cc9e-4f01-85cf-43d5ad924418 does not exist
Oct 11 02:25:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:25:36 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:25:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:25:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:25:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:25:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:25:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:25:36 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:25:36 compute-0 sudo[431870]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:25:36 compute-0 sudo[431870]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:36 compute-0 sudo[431870]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:36 compute-0 sudo[431895]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:25:36 compute-0 sudo[431895]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:36 compute-0 sudo[431895]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:36 compute-0 sudo[431920]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:25:36 compute-0 sudo[431920]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:36 compute-0 sudo[431920]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:36 compute-0 sudo[431945]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:25:36 compute-0 sudo[431945]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:37 compute-0 ceph-mon[191930]: pgmap v1429: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:37 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:25:37 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:25:37 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:25:37 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:25:37 compute-0 podman[432008]: 2025-10-11 02:25:37.438148017 +0000 UTC m=+0.067355988 container create cee892487e2359e04a9e0651a48eee2b7d8b97c927843ae2b8fa3c7a45c3215d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_proskuriakova, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0)
Oct 11 02:25:37 compute-0 podman[432008]: 2025-10-11 02:25:37.414223056 +0000 UTC m=+0.043431047 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:25:37 compute-0 systemd[1]: Started libpod-conmon-cee892487e2359e04a9e0651a48eee2b7d8b97c927843ae2b8fa3c7a45c3215d.scope.
Oct 11 02:25:37 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:25:37 compute-0 podman[432008]: 2025-10-11 02:25:37.576881647 +0000 UTC m=+0.206089698 container init cee892487e2359e04a9e0651a48eee2b7d8b97c927843ae2b8fa3c7a45c3215d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_proskuriakova, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:25:37 compute-0 podman[432008]: 2025-10-11 02:25:37.594605164 +0000 UTC m=+0.223813155 container start cee892487e2359e04a9e0651a48eee2b7d8b97c927843ae2b8fa3c7a45c3215d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_proskuriakova, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:25:37 compute-0 podman[432008]: 2025-10-11 02:25:37.601403977 +0000 UTC m=+0.230612018 container attach cee892487e2359e04a9e0651a48eee2b7d8b97c927843ae2b8fa3c7a45c3215d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_proskuriakova, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:25:37 compute-0 compassionate_proskuriakova[432022]: 167 167
Oct 11 02:25:37 compute-0 podman[432008]: 2025-10-11 02:25:37.60873722 +0000 UTC m=+0.237945191 container died cee892487e2359e04a9e0651a48eee2b7d8b97c927843ae2b8fa3c7a45c3215d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_proskuriakova, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507)
Oct 11 02:25:37 compute-0 systemd[1]: libpod-cee892487e2359e04a9e0651a48eee2b7d8b97c927843ae2b8fa3c7a45c3215d.scope: Deactivated successfully.
Oct 11 02:25:37 compute-0 systemd[1]: var-lib-containers-storage-overlay-c0cd28f0c3118af5ca425470e01fe9fe5d1a6f6e2dca5b186f4f8a62d0d88836-merged.mount: Deactivated successfully.
Oct 11 02:25:37 compute-0 podman[432008]: 2025-10-11 02:25:37.695834997 +0000 UTC m=+0.325042948 container remove cee892487e2359e04a9e0651a48eee2b7d8b97c927843ae2b8fa3c7a45c3215d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_proskuriakova, ceph=True, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2)
Oct 11 02:25:37 compute-0 systemd[1]: libpod-conmon-cee892487e2359e04a9e0651a48eee2b7d8b97c927843ae2b8fa3c7a45c3215d.scope: Deactivated successfully.
Oct 11 02:25:37 compute-0 podman[432046]: 2025-10-11 02:25:37.945406281 +0000 UTC m=+0.083945165 container create 4a85056cf43bc491bd26d451e9139ff090139efbc8ce0d8d6b6f2666d8958338 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=reverent_edison, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:25:37 compute-0 podman[432046]: 2025-10-11 02:25:37.909828466 +0000 UTC m=+0.048367370 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:25:38 compute-0 systemd[1]: Started libpod-conmon-4a85056cf43bc491bd26d451e9139ff090139efbc8ce0d8d6b6f2666d8958338.scope.
Oct 11 02:25:38 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:25:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f09bb10e7274cc4b44d6d6efe7498ddaf620cc3d98ebd2dd7a454980622d9987/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:25:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f09bb10e7274cc4b44d6d6efe7498ddaf620cc3d98ebd2dd7a454980622d9987/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:25:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f09bb10e7274cc4b44d6d6efe7498ddaf620cc3d98ebd2dd7a454980622d9987/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:25:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f09bb10e7274cc4b44d6d6efe7498ddaf620cc3d98ebd2dd7a454980622d9987/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:25:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f09bb10e7274cc4b44d6d6efe7498ddaf620cc3d98ebd2dd7a454980622d9987/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:25:38 compute-0 sshd-session[431589]: Failed password for invalid user debian from 121.227.153.123 port 57832 ssh2
Oct 11 02:25:38 compute-0 podman[432046]: 2025-10-11 02:25:38.140813139 +0000 UTC m=+0.279352073 container init 4a85056cf43bc491bd26d451e9139ff090139efbc8ce0d8d6b6f2666d8958338 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=reverent_edison, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:25:38 compute-0 podman[432046]: 2025-10-11 02:25:38.161552616 +0000 UTC m=+0.300091490 container start 4a85056cf43bc491bd26d451e9139ff090139efbc8ce0d8d6b6f2666d8958338 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=reverent_edison, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0)
Oct 11 02:25:38 compute-0 podman[432046]: 2025-10-11 02:25:38.167468535 +0000 UTC m=+0.306007409 container attach 4a85056cf43bc491bd26d451e9139ff090139efbc8ce0d8d6b6f2666d8958338 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=reverent_edison, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2)
Oct 11 02:25:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1430: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:38 compute-0 nova_compute[356901]: 2025-10-11 02:25:38.719 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:39 compute-0 podman[432080]: 2025-10-11 02:25:39.233421555 +0000 UTC m=+0.132141022 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.schema-version=1.0)
Oct 11 02:25:39 compute-0 podman[432082]: 2025-10-11 02:25:39.253703504 +0000 UTC m=+0.139991743 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, container_name=iscsid)
Oct 11 02:25:39 compute-0 ceph-mon[191930]: pgmap v1430: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:39 compute-0 reverent_edison[432062]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:25:39 compute-0 reverent_edison[432062]: --> relative data size: 1.0
Oct 11 02:25:39 compute-0 reverent_edison[432062]: --> All data devices are unavailable
Oct 11 02:25:39 compute-0 systemd[1]: libpod-4a85056cf43bc491bd26d451e9139ff090139efbc8ce0d8d6b6f2666d8958338.scope: Deactivated successfully.
Oct 11 02:25:39 compute-0 systemd[1]: libpod-4a85056cf43bc491bd26d451e9139ff090139efbc8ce0d8d6b6f2666d8958338.scope: Consumed 1.135s CPU time.
Oct 11 02:25:39 compute-0 podman[432046]: 2025-10-11 02:25:39.365456353 +0000 UTC m=+1.503995207 container died 4a85056cf43bc491bd26d451e9139ff090139efbc8ce0d8d6b6f2666d8958338 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=reverent_edison, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:25:39 compute-0 systemd[1]: var-lib-containers-storage-overlay-f09bb10e7274cc4b44d6d6efe7498ddaf620cc3d98ebd2dd7a454980622d9987-merged.mount: Deactivated successfully.
Oct 11 02:25:39 compute-0 podman[432046]: 2025-10-11 02:25:39.449804284 +0000 UTC m=+1.588343128 container remove 4a85056cf43bc491bd26d451e9139ff090139efbc8ce0d8d6b6f2666d8958338 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=reverent_edison, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:25:39 compute-0 nova_compute[356901]: 2025-10-11 02:25:39.455 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:39 compute-0 systemd[1]: libpod-conmon-4a85056cf43bc491bd26d451e9139ff090139efbc8ce0d8d6b6f2666d8958338.scope: Deactivated successfully.
Oct 11 02:25:39 compute-0 sudo[431945]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:39 compute-0 sudo[432139]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:25:39 compute-0 sudo[432139]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:39 compute-0 sudo[432139]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:39 compute-0 sudo[432164]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:25:39 compute-0 sudo[432164]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:39 compute-0 sudo[432164]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:39 compute-0 sshd-session[431589]: Connection closed by invalid user debian 121.227.153.123 port 57832 [preauth]
Oct 11 02:25:39 compute-0 sudo[432189]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:25:39 compute-0 sudo[432189]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:39 compute-0 sudo[432189]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:40 compute-0 sudo[432214]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:25:40 compute-0 sudo[432214]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1431: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:25:40 compute-0 podman[432280]: 2025-10-11 02:25:40.728430669 +0000 UTC m=+0.093050177 container create 96b4cc44a71631364aeb9565fefcfd9446be54162f10512febde138fa4b908b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_shaw, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 02:25:40 compute-0 podman[432280]: 2025-10-11 02:25:40.688968979 +0000 UTC m=+0.053588517 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:25:40 compute-0 systemd[1]: Started libpod-conmon-96b4cc44a71631364aeb9565fefcfd9446be54162f10512febde138fa4b908b7.scope.
Oct 11 02:25:40 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:25:40 compute-0 podman[432280]: 2025-10-11 02:25:40.895000875 +0000 UTC m=+0.259620413 container init 96b4cc44a71631364aeb9565fefcfd9446be54162f10512febde138fa4b908b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_shaw, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:25:40 compute-0 podman[432280]: 2025-10-11 02:25:40.915146222 +0000 UTC m=+0.279765750 container start 96b4cc44a71631364aeb9565fefcfd9446be54162f10512febde138fa4b908b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_shaw, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef)
Oct 11 02:25:40 compute-0 podman[432280]: 2025-10-11 02:25:40.921850864 +0000 UTC m=+0.286470372 container attach 96b4cc44a71631364aeb9565fefcfd9446be54162f10512febde138fa4b908b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_shaw, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef)
Oct 11 02:25:40 compute-0 lucid_shaw[432296]: 167 167
Oct 11 02:25:40 compute-0 systemd[1]: libpod-96b4cc44a71631364aeb9565fefcfd9446be54162f10512febde138fa4b908b7.scope: Deactivated successfully.
Oct 11 02:25:40 compute-0 podman[432280]: 2025-10-11 02:25:40.931189991 +0000 UTC m=+0.295809519 container died 96b4cc44a71631364aeb9565fefcfd9446be54162f10512febde138fa4b908b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_shaw, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, ceph=True, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:25:40 compute-0 systemd[1]: var-lib-containers-storage-overlay-b8615e09ad334fafed0df402a2f9c59d9cc5456646da8eec46faa9e41cae57a7-merged.mount: Deactivated successfully.
Oct 11 02:25:41 compute-0 podman[432280]: 2025-10-11 02:25:41.030707975 +0000 UTC m=+0.395327483 container remove 96b4cc44a71631364aeb9565fefcfd9446be54162f10512febde138fa4b908b7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_shaw, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, ceph=True)
Oct 11 02:25:41 compute-0 systemd[1]: libpod-conmon-96b4cc44a71631364aeb9565fefcfd9446be54162f10512febde138fa4b908b7.scope: Deactivated successfully.
Oct 11 02:25:41 compute-0 sshd-session[432239]: Invalid user debian from 121.227.153.123 port 60736
Oct 11 02:25:41 compute-0 podman[432318]: 2025-10-11 02:25:41.295948252 +0000 UTC m=+0.073757315 container create efab44f1d18e0a0a75d695f32f512cdf32eb4467692dd9cec84a5bf049abbcaa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_bose, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS)
Oct 11 02:25:41 compute-0 sshd-session[432239]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:25:41 compute-0 sshd-session[432239]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:25:41 compute-0 ceph-mon[191930]: pgmap v1431: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:41 compute-0 podman[432318]: 2025-10-11 02:25:41.267293062 +0000 UTC m=+0.045102165 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:25:41 compute-0 systemd[1]: Started libpod-conmon-efab44f1d18e0a0a75d695f32f512cdf32eb4467692dd9cec84a5bf049abbcaa.scope.
Oct 11 02:25:41 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:25:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d49337a0b009caadbe16b76e2ff057aabe50050efacde02fb62bed19b771ae84/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:25:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d49337a0b009caadbe16b76e2ff057aabe50050efacde02fb62bed19b771ae84/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:25:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d49337a0b009caadbe16b76e2ff057aabe50050efacde02fb62bed19b771ae84/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:25:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d49337a0b009caadbe16b76e2ff057aabe50050efacde02fb62bed19b771ae84/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:25:41 compute-0 podman[432318]: 2025-10-11 02:25:41.462548678 +0000 UTC m=+0.240357781 container init efab44f1d18e0a0a75d695f32f512cdf32eb4467692dd9cec84a5bf049abbcaa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_bose, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:25:41 compute-0 podman[432318]: 2025-10-11 02:25:41.486271025 +0000 UTC m=+0.264080078 container start efab44f1d18e0a0a75d695f32f512cdf32eb4467692dd9cec84a5bf049abbcaa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_bose, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:25:41 compute-0 podman[432318]: 2025-10-11 02:25:41.492598061 +0000 UTC m=+0.270407154 container attach efab44f1d18e0a0a75d695f32f512cdf32eb4467692dd9cec84a5bf049abbcaa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_bose, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:25:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1432: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:42 compute-0 recursing_bose[432334]: {
Oct 11 02:25:42 compute-0 recursing_bose[432334]:     "0": [
Oct 11 02:25:42 compute-0 recursing_bose[432334]:         {
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "devices": [
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "/dev/loop3"
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             ],
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "lv_name": "ceph_lv0",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "lv_size": "21470642176",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "name": "ceph_lv0",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "tags": {
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.cluster_name": "ceph",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.crush_device_class": "",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.encrypted": "0",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.osd_id": "0",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.type": "block",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.vdo": "0"
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             },
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "type": "block",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "vg_name": "ceph_vg0"
Oct 11 02:25:42 compute-0 recursing_bose[432334]:         }
Oct 11 02:25:42 compute-0 recursing_bose[432334]:     ],
Oct 11 02:25:42 compute-0 recursing_bose[432334]:     "1": [
Oct 11 02:25:42 compute-0 recursing_bose[432334]:         {
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "devices": [
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "/dev/loop4"
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             ],
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "lv_name": "ceph_lv1",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "lv_size": "21470642176",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "name": "ceph_lv1",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "tags": {
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.cluster_name": "ceph",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.crush_device_class": "",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.encrypted": "0",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.osd_id": "1",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.type": "block",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.vdo": "0"
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             },
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "type": "block",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "vg_name": "ceph_vg1"
Oct 11 02:25:42 compute-0 recursing_bose[432334]:         }
Oct 11 02:25:42 compute-0 recursing_bose[432334]:     ],
Oct 11 02:25:42 compute-0 recursing_bose[432334]:     "2": [
Oct 11 02:25:42 compute-0 recursing_bose[432334]:         {
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "devices": [
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "/dev/loop5"
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             ],
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "lv_name": "ceph_lv2",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "lv_size": "21470642176",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "name": "ceph_lv2",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "tags": {
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.cluster_name": "ceph",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.crush_device_class": "",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.encrypted": "0",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.osd_id": "2",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.type": "block",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:                 "ceph.vdo": "0"
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             },
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "type": "block",
Oct 11 02:25:42 compute-0 recursing_bose[432334]:             "vg_name": "ceph_vg2"
Oct 11 02:25:42 compute-0 recursing_bose[432334]:         }
Oct 11 02:25:42 compute-0 recursing_bose[432334]:     ]
Oct 11 02:25:42 compute-0 recursing_bose[432334]: }
Oct 11 02:25:42 compute-0 systemd[1]: libpod-efab44f1d18e0a0a75d695f32f512cdf32eb4467692dd9cec84a5bf049abbcaa.scope: Deactivated successfully.
Oct 11 02:25:42 compute-0 conmon[432334]: conmon efab44f1d18e0a0a75d6 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-efab44f1d18e0a0a75d695f32f512cdf32eb4467692dd9cec84a5bf049abbcaa.scope/container/memory.events
Oct 11 02:25:42 compute-0 podman[432318]: 2025-10-11 02:25:42.464520617 +0000 UTC m=+1.242329680 container died efab44f1d18e0a0a75d695f32f512cdf32eb4467692dd9cec84a5bf049abbcaa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_bose, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:25:42 compute-0 systemd[1]: var-lib-containers-storage-overlay-d49337a0b009caadbe16b76e2ff057aabe50050efacde02fb62bed19b771ae84-merged.mount: Deactivated successfully.
Oct 11 02:25:42 compute-0 podman[432318]: 2025-10-11 02:25:42.552971497 +0000 UTC m=+1.330780550 container remove efab44f1d18e0a0a75d695f32f512cdf32eb4467692dd9cec84a5bf049abbcaa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_bose, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=reef)
Oct 11 02:25:42 compute-0 systemd[1]: libpod-conmon-efab44f1d18e0a0a75d695f32f512cdf32eb4467692dd9cec84a5bf049abbcaa.scope: Deactivated successfully.
Oct 11 02:25:42 compute-0 sudo[432214]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:42 compute-0 sudo[432354]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:25:42 compute-0 sudo[432354]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:42 compute-0 sudo[432354]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:42 compute-0 sudo[432379]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:25:42 compute-0 sudo[432379]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:42 compute-0 sudo[432379]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:42 compute-0 sudo[432404]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:25:42 compute-0 sudo[432404]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:43 compute-0 sudo[432404]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:43 compute-0 sudo[432429]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:25:43 compute-0 sudo[432429]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:43 compute-0 ceph-mon[191930]: pgmap v1432: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:43 compute-0 podman[432492]: 2025-10-11 02:25:43.641133291 +0000 UTC m=+0.067151418 container create 7df726cb42b0859ba60702824435ca851f31478c9e1fc0cda55b2b19f1de1e58 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=busy_northcutt, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:25:43 compute-0 systemd[1]: Started libpod-conmon-7df726cb42b0859ba60702824435ca851f31478c9e1fc0cda55b2b19f1de1e58.scope.
Oct 11 02:25:43 compute-0 sshd-session[432239]: Failed password for invalid user debian from 121.227.153.123 port 60736 ssh2
Oct 11 02:25:43 compute-0 podman[432492]: 2025-10-11 02:25:43.614680793 +0000 UTC m=+0.040698900 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:25:43 compute-0 nova_compute[356901]: 2025-10-11 02:25:43.724 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:43 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:25:43 compute-0 podman[432492]: 2025-10-11 02:25:43.768128132 +0000 UTC m=+0.194146289 container init 7df726cb42b0859ba60702824435ca851f31478c9e1fc0cda55b2b19f1de1e58 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=busy_northcutt, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 02:25:43 compute-0 podman[432492]: 2025-10-11 02:25:43.785435313 +0000 UTC m=+0.211453410 container start 7df726cb42b0859ba60702824435ca851f31478c9e1fc0cda55b2b19f1de1e58 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=busy_northcutt, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Oct 11 02:25:43 compute-0 podman[432492]: 2025-10-11 02:25:43.790270314 +0000 UTC m=+0.216288421 container attach 7df726cb42b0859ba60702824435ca851f31478c9e1fc0cda55b2b19f1de1e58 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=busy_northcutt, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:25:43 compute-0 busy_northcutt[432506]: 167 167
Oct 11 02:25:43 compute-0 podman[432492]: 2025-10-11 02:25:43.799560891 +0000 UTC m=+0.225578988 container died 7df726cb42b0859ba60702824435ca851f31478c9e1fc0cda55b2b19f1de1e58 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=busy_northcutt, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef)
Oct 11 02:25:43 compute-0 systemd[1]: libpod-7df726cb42b0859ba60702824435ca851f31478c9e1fc0cda55b2b19f1de1e58.scope: Deactivated successfully.
Oct 11 02:25:43 compute-0 systemd[1]: var-lib-containers-storage-overlay-b4f6e978366e4929b10cbdaabb1ecc9ef39d18f7060c8dc52b7748beb36d69af-merged.mount: Deactivated successfully.
Oct 11 02:25:43 compute-0 podman[432492]: 2025-10-11 02:25:43.86907584 +0000 UTC m=+0.295093947 container remove 7df726cb42b0859ba60702824435ca851f31478c9e1fc0cda55b2b19f1de1e58 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=busy_northcutt, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_REF=reef)
Oct 11 02:25:43 compute-0 systemd[1]: libpod-conmon-7df726cb42b0859ba60702824435ca851f31478c9e1fc0cda55b2b19f1de1e58.scope: Deactivated successfully.
Oct 11 02:25:44 compute-0 podman[432532]: 2025-10-11 02:25:44.145691869 +0000 UTC m=+0.083127033 container create 63156c2af568a3c8f7b20af089905a68366b36e5040fecd5d22c1fbe9f0de67b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_elion, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507)
Oct 11 02:25:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1433: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:44 compute-0 podman[432532]: 2025-10-11 02:25:44.112024926 +0000 UTC m=+0.049460150 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:25:44 compute-0 systemd[1]: Started libpod-conmon-63156c2af568a3c8f7b20af089905a68366b36e5040fecd5d22c1fbe9f0de67b.scope.
Oct 11 02:25:44 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:25:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7d5c0eb244fb263347b19815ff24d54d36c7f7a86e96f78367d7f4f57fb5d076/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:25:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7d5c0eb244fb263347b19815ff24d54d36c7f7a86e96f78367d7f4f57fb5d076/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:25:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7d5c0eb244fb263347b19815ff24d54d36c7f7a86e96f78367d7f4f57fb5d076/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:25:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7d5c0eb244fb263347b19815ff24d54d36c7f7a86e96f78367d7f4f57fb5d076/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:25:44 compute-0 podman[432532]: 2025-10-11 02:25:44.334055643 +0000 UTC m=+0.271490817 container init 63156c2af568a3c8f7b20af089905a68366b36e5040fecd5d22c1fbe9f0de67b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_elion, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:25:44 compute-0 podman[432532]: 2025-10-11 02:25:44.354670549 +0000 UTC m=+0.292105683 container start 63156c2af568a3c8f7b20af089905a68366b36e5040fecd5d22c1fbe9f0de67b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_elion, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:25:44 compute-0 podman[432532]: 2025-10-11 02:25:44.361189497 +0000 UTC m=+0.298624731 container attach 63156c2af568a3c8f7b20af089905a68366b36e5040fecd5d22c1fbe9f0de67b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_elion, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:25:44 compute-0 nova_compute[356901]: 2025-10-11 02:25:44.458 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:45 compute-0 ceph-mon[191930]: pgmap v1433: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:25:45 compute-0 loving_elion[432548]: {
Oct 11 02:25:45 compute-0 loving_elion[432548]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:25:45 compute-0 loving_elion[432548]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:25:45 compute-0 loving_elion[432548]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:25:45 compute-0 loving_elion[432548]:         "osd_id": 1,
Oct 11 02:25:45 compute-0 loving_elion[432548]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:25:45 compute-0 loving_elion[432548]:         "type": "bluestore"
Oct 11 02:25:45 compute-0 loving_elion[432548]:     },
Oct 11 02:25:45 compute-0 loving_elion[432548]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:25:45 compute-0 loving_elion[432548]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:25:45 compute-0 loving_elion[432548]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:25:45 compute-0 loving_elion[432548]:         "osd_id": 2,
Oct 11 02:25:45 compute-0 loving_elion[432548]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:25:45 compute-0 loving_elion[432548]:         "type": "bluestore"
Oct 11 02:25:45 compute-0 loving_elion[432548]:     },
Oct 11 02:25:45 compute-0 loving_elion[432548]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:25:45 compute-0 loving_elion[432548]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:25:45 compute-0 loving_elion[432548]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:25:45 compute-0 loving_elion[432548]:         "osd_id": 0,
Oct 11 02:25:45 compute-0 loving_elion[432548]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:25:45 compute-0 loving_elion[432548]:         "type": "bluestore"
Oct 11 02:25:45 compute-0 loving_elion[432548]:     }
Oct 11 02:25:45 compute-0 loving_elion[432548]: }
Oct 11 02:25:45 compute-0 systemd[1]: libpod-63156c2af568a3c8f7b20af089905a68366b36e5040fecd5d22c1fbe9f0de67b.scope: Deactivated successfully.
Oct 11 02:25:45 compute-0 podman[432532]: 2025-10-11 02:25:45.488778357 +0000 UTC m=+1.426213491 container died 63156c2af568a3c8f7b20af089905a68366b36e5040fecd5d22c1fbe9f0de67b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_elion, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_REF=reef)
Oct 11 02:25:45 compute-0 systemd[1]: libpod-63156c2af568a3c8f7b20af089905a68366b36e5040fecd5d22c1fbe9f0de67b.scope: Consumed 1.100s CPU time.
Oct 11 02:25:45 compute-0 systemd[1]: var-lib-containers-storage-overlay-7d5c0eb244fb263347b19815ff24d54d36c7f7a86e96f78367d7f4f57fb5d076-merged.mount: Deactivated successfully.
Oct 11 02:25:45 compute-0 podman[432532]: 2025-10-11 02:25:45.578421546 +0000 UTC m=+1.515856670 container remove 63156c2af568a3c8f7b20af089905a68366b36e5040fecd5d22c1fbe9f0de67b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_elion, ceph=True, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:25:45 compute-0 sshd-session[432239]: Connection closed by invalid user debian 121.227.153.123 port 60736 [preauth]
Oct 11 02:25:45 compute-0 systemd[1]: libpod-conmon-63156c2af568a3c8f7b20af089905a68366b36e5040fecd5d22c1fbe9f0de67b.scope: Deactivated successfully.
Oct 11 02:25:45 compute-0 sudo[432429]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:25:45 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:25:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:25:45 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:25:45 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev cbff3d44-ce3c-46de-b552-9b395140cc54 does not exist
Oct 11 02:25:45 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 662ba9d9-e572-4609-8f15-67d9878be9e5 does not exist
Oct 11 02:25:45 compute-0 sudo[432594]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:25:45 compute-0 sudo[432594]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:45 compute-0 sudo[432594]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:45 compute-0 sudo[432619]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:25:45 compute-0 sudo[432619]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:25:45 compute-0 sudo[432619]: pam_unix(sudo:session): session closed for user root
Oct 11 02:25:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1434: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 5.3 KiB/s wr, 0 op/s
Oct 11 02:25:46 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:25:46 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:25:46 compute-0 ceph-mon[191930]: pgmap v1434: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 5.3 KiB/s wr, 0 op/s
Oct 11 02:25:46 compute-0 sshd-session[432626]: Invalid user debian from 121.227.153.123 port 60744
Oct 11 02:25:47 compute-0 sshd-session[432626]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:25:47 compute-0 sshd-session[432626]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:25:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1435: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 5.3 KiB/s wr, 0 op/s
Oct 11 02:25:48 compute-0 nova_compute[356901]: 2025-10-11 02:25:48.730 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:48 compute-0 nova_compute[356901]: 2025-10-11 02:25:48.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:25:48 compute-0 nova_compute[356901]: 2025-10-11 02:25:48.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_incomplete_migrations run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:25:48 compute-0 nova_compute[356901]: 2025-10-11 02:25:48.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances with incomplete migration  _cleanup_incomplete_migrations /usr/lib/python3.9/site-packages/nova/compute/manager.py:11183
Oct 11 02:25:49 compute-0 ceph-mon[191930]: pgmap v1435: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 5.3 KiB/s wr, 0 op/s
Oct 11 02:25:49 compute-0 sshd-session[432626]: Failed password for invalid user debian from 121.227.153.123 port 60744 ssh2
Oct 11 02:25:49 compute-0 nova_compute[356901]: 2025-10-11 02:25:49.461 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:49 compute-0 nova_compute[356901]: 2025-10-11 02:25:49.914 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:25:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1436: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 5.3 KiB/s wr, 0 op/s
Oct 11 02:25:50 compute-0 podman[432649]: 2025-10-11 02:25:50.258901596 +0000 UTC m=+0.122854459 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:25:50 compute-0 podman[432647]: 2025-10-11 02:25:50.264556925 +0000 UTC m=+0.132125276 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009)
Oct 11 02:25:50 compute-0 podman[432648]: 2025-10-11 02:25:50.271536258 +0000 UTC m=+0.134705302 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=openstack_network_exporter, com.redhat.component=ubi9-minimal-container, build-date=2025-08-20T13:12:41, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.buildah.version=1.33.7, release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., architecture=x86_64, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vendor=Red Hat, Inc., version=9.6, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, url=https://catalog.redhat.com/en/search?searchType=containers, distribution-scope=public, io.openshift.tags=minimal rhel9, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, managed_by=edpm_ansible, maintainer=Red Hat, Inc., vcs-type=git)
Oct 11 02:25:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:25:51 compute-0 ceph-mon[191930]: pgmap v1436: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 5.3 KiB/s wr, 0 op/s
Oct 11 02:25:51 compute-0 sshd-session[432626]: Connection closed by invalid user debian 121.227.153.123 port 60744 [preauth]
Oct 11 02:25:51 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:25:51.871 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: SbGlobalUpdateEvent(events=('update',), table='SB_Global', conditions=None, old_conditions=None), priority=20 to row=SB_Global(external_ids={}, nb_cfg=7, options={'arp_ns_explicit_output': 'true', 'mac_prefix': 'fe:55:97', 'max_tunid': '16711680', 'northd_internal_version': '24.03.7-20.33.0-76.8', 'svc_monitor_mac': 'ce:9c:4f:b4:85:9b'}, ipsec=False) old=SB_Global(nb_cfg=6) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:25:51 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:25:51.872 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Delaying updating chassis table for 8 seconds run /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:274
Oct 11 02:25:51 compute-0 nova_compute[356901]: 2025-10-11 02:25:51.881 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1437: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 5.3 KiB/s wr, 0 op/s
Oct 11 02:25:52 compute-0 sshd-session[432705]: Invalid user debian from 121.227.153.123 port 49960
Oct 11 02:25:52 compute-0 nova_compute[356901]: 2025-10-11 02:25:52.900 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:25:53 compute-0 sshd-session[432705]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:25:53 compute-0 sshd-session[432705]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:25:53 compute-0 ceph-mon[191930]: pgmap v1437: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 5.3 KiB/s wr, 0 op/s
Oct 11 02:25:53 compute-0 nova_compute[356901]: 2025-10-11 02:25:53.734 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1438: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 5.3 KiB/s wr, 0 op/s
Oct 11 02:25:54 compute-0 nova_compute[356901]: 2025-10-11 02:25:54.462 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:25:54.852 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:25:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:25:54.854 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:25:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:25:54.855 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:25:54 compute-0 nova_compute[356901]: 2025-10-11 02:25:54.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:25:55 compute-0 ceph-mon[191930]: pgmap v1438: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 5.3 KiB/s wr, 0 op/s
Oct 11 02:25:55 compute-0 sshd-session[432705]: Failed password for invalid user debian from 121.227.153.123 port 49960 ssh2
Oct 11 02:25:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:25:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1439: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 5.3 KiB/s wr, 0 op/s
Oct 11 02:25:56 compute-0 podman[432707]: 2025-10-11 02:25:56.24596124 +0000 UTC m=+0.130931943 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, build-date=2024-09-18T21:23:30, io.openshift.tags=base rhel9, vcs-type=git, managed_by=edpm_ansible, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, container_name=kepler, summary=Provides the latest release of Red Hat Universal Base Image 9., io.k8s.display-name=Red Hat Universal Base Image 9, version=9.4, com.redhat.component=ubi9-container, io.buildah.version=1.29.0, io.openshift.expose-services=, maintainer=Red Hat, Inc., vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, release-0.7.12=, config_id=edpm, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, name=ubi9, architecture=x86_64)
Oct 11 02:25:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:25:56
Oct 11 02:25:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:25:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:25:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['volumes', 'cephfs.cephfs.data', '.rgw.root', '.mgr', 'vms', 'default.rgw.log', 'cephfs.cephfs.meta', 'images', 'default.rgw.control', 'default.rgw.meta', 'backups']
Oct 11 02:25:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:25:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:25:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:25:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:25:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:25:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:25:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:25:56 compute-0 nova_compute[356901]: 2025-10-11 02:25:56.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:25:56 compute-0 nova_compute[356901]: 2025-10-11 02:25:56.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:25:57 compute-0 nova_compute[356901]: 2025-10-11 02:25:57.071 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:25:57 compute-0 nova_compute[356901]: 2025-10-11 02:25:57.072 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:25:57 compute-0 nova_compute[356901]: 2025-10-11 02:25:57.073 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:25:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:25:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:25:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:25:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:25:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:25:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:25:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:25:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:25:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:25:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:25:57 compute-0 ceph-mon[191930]: pgmap v1439: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 5.3 KiB/s wr, 0 op/s
Oct 11 02:25:57 compute-0 sshd-session[432705]: Connection closed by invalid user debian 121.227.153.123 port 49960 [preauth]
Oct 11 02:25:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1440: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:58 compute-0 nova_compute[356901]: 2025-10-11 02:25:58.572 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "358d31cf-2866-416a-b2fc-814ee4bfe89a" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:25:58 compute-0 nova_compute[356901]: 2025-10-11 02:25:58.573 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "358d31cf-2866-416a-b2fc-814ee4bfe89a" acquired by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:25:58 compute-0 nova_compute[356901]: 2025-10-11 02:25:58.583 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Updating instance_info_cache with network_info: [{"id": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "address": "fa:16:3e:c2:ee:14", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.80", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa7108c4c-c9", "ovs_interfaceid": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:25:58 compute-0 nova_compute[356901]: 2025-10-11 02:25:58.605 2 DEBUG nova.compute.manager [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Starting instance... _do_build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2402
Oct 11 02:25:58 compute-0 nova_compute[356901]: 2025-10-11 02:25:58.610 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:25:58 compute-0 nova_compute[356901]: 2025-10-11 02:25:58.611 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:25:58 compute-0 nova_compute[356901]: 2025-10-11 02:25:58.614 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:25:58 compute-0 nova_compute[356901]: 2025-10-11 02:25:58.614 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:25:58 compute-0 nova_compute[356901]: 2025-10-11 02:25:58.616 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:25:58 compute-0 nova_compute[356901]: 2025-10-11 02:25:58.658 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:25:58 compute-0 nova_compute[356901]: 2025-10-11 02:25:58.662 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.003s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:25:58 compute-0 nova_compute[356901]: 2025-10-11 02:25:58.662 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:25:58 compute-0 nova_compute[356901]: 2025-10-11 02:25:58.664 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:25:58 compute-0 nova_compute[356901]: 2025-10-11 02:25:58.665 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:25:58 compute-0 nova_compute[356901]: 2025-10-11 02:25:58.739 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:58 compute-0 nova_compute[356901]: 2025-10-11 02:25:58.859 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:25:58 compute-0 nova_compute[356901]: 2025-10-11 02:25:58.860 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:25:58 compute-0 nova_compute[356901]: 2025-10-11 02:25:58.872 2 DEBUG nova.virt.hardware [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Require both a host and instance NUMA topology to fit instance on host. numa_fit_instance_to_host /usr/lib/python3.9/site-packages/nova/virt/hardware.py:2368
Oct 11 02:25:58 compute-0 nova_compute[356901]: 2025-10-11 02:25:58.872 2 INFO nova.compute.claims [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Claim successful on node compute-0.ctlplane.example.com
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.116 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:25:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:25:59 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/719288367' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.177 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.512s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.303 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.306 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.306 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:25:59 compute-0 ceph-mon[191930]: pgmap v1440: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:25:59 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/719288367' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.315 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.316 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.318 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.328 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.329 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.330 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.467 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:25:59 compute-0 sshd-session[432727]: Invalid user debian from 121.227.153.123 port 49964
Oct 11 02:25:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:25:59 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3974119598' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.667 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.551s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.681 2 DEBUG nova.compute.provider_tree [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.701 2 DEBUG nova.scheduler.client.report [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.722 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: held 0.862s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.724 2 DEBUG nova.compute.manager [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Start building networks asynchronously for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2799
Oct 11 02:25:59 compute-0 podman[157119]: time="2025-10-11T02:25:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:25:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:25:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:25:59 compute-0 sshd-session[432727]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:25:59 compute-0 sshd-session[432727]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.775 2 DEBUG nova.compute.manager [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Allocating IP information in the background. _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1952
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.776 2 DEBUG nova.network.neutron [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] allocate_for_instance() allocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1156
Oct 11 02:25:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:25:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9056 "" "Go-http-client/1.1"
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.798 2 INFO nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Ignoring supplied device name: /dev/vda. Libvirt can't honour user-supplied dev names
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.849 2 DEBUG nova.compute.manager [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Start building block device mappings for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2834
Oct 11 02:25:59 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:25:59.874 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '7'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.931 2 DEBUG nova.compute.manager [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Start spawning the instance on the hypervisor. _build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2608
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.933 2 DEBUG nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Creating instance directory _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4723
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.933 2 INFO nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Creating image(s)
Oct 11 02:25:59 compute-0 nova_compute[356901]: 2025-10-11 02:25:59.975 2 DEBUG nova.storage.rbd_utils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 358d31cf-2866-416a-b2fc-814ee4bfe89a_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.041 2 DEBUG nova.storage.rbd_utils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 358d31cf-2866-416a-b2fc-814ee4bfe89a_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.099 2 DEBUG nova.storage.rbd_utils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 358d31cf-2866-416a-b2fc-814ee4bfe89a_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.112 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:26:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1441: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.214 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d --force-share --output=json" returned: 0 in 0.103s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.216 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "c2a4b3f256e07592b38b9a83d173b78feaa2ba6d" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.217 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "c2a4b3f256e07592b38b9a83d173b78feaa2ba6d" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.217 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "c2a4b3f256e07592b38b9a83d173b78feaa2ba6d" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.267 2 DEBUG nova.storage.rbd_utils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 358d31cf-2866-416a-b2fc-814ee4bfe89a_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.283 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d 358d31cf-2866-416a-b2fc-814ee4bfe89a_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:26:00 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3974119598' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.355 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.357 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3423MB free_disk=59.88883590698242GB free_vcpus=5 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.358 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.358 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:26:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.439 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.440 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance d60d7ea1-5d00-4902-90e6-3ae67eb09a78 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.441 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 7513b93e-d2b8-4ae0-8f1c-3df190945259 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.441 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 358d31cf-2866-416a-b2fc-814ee4bfe89a actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.442 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 4 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.442 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=2560MB phys_disk=59GB used_disk=8GB total_vcpus=8 used_vcpus=4 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.550 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.695 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d 358d31cf-2866-416a-b2fc-814ee4bfe89a_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.413s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:26:00 compute-0 nova_compute[356901]: 2025-10-11 02:26:00.822 2 DEBUG nova.storage.rbd_utils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] resizing rbd image 358d31cf-2866-416a-b2fc-814ee4bfe89a_disk to 1073741824 resize /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:288
Oct 11 02:26:01 compute-0 nova_compute[356901]: 2025-10-11 02:26:01.031 2 DEBUG nova.objects.instance [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lazy-loading 'migration_context' on Instance uuid 358d31cf-2866-416a-b2fc-814ee4bfe89a obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:26:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:26:01 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3222710133' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:26:01 compute-0 nova_compute[356901]: 2025-10-11 02:26:01.095 2 DEBUG nova.storage.rbd_utils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 358d31cf-2866-416a-b2fc-814ee4bfe89a_disk.eph0 does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:26:01 compute-0 nova_compute[356901]: 2025-10-11 02:26:01.148 2 DEBUG nova.storage.rbd_utils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 358d31cf-2866-416a-b2fc-814ee4bfe89a_disk.eph0 does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:26:01 compute-0 nova_compute[356901]: 2025-10-11 02:26:01.159 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/ephemeral_1_0706d66 --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:26:01 compute-0 nova_compute[356901]: 2025-10-11 02:26:01.184 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.635s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:26:01 compute-0 nova_compute[356901]: 2025-10-11 02:26:01.196 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:26:01 compute-0 nova_compute[356901]: 2025-10-11 02:26:01.211 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:26:01 compute-0 nova_compute[356901]: 2025-10-11 02:26:01.236 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/ephemeral_1_0706d66 --force-share --output=json" returned: 0 in 0.077s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:26:01 compute-0 nova_compute[356901]: 2025-10-11 02:26:01.237 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:26:01 compute-0 nova_compute[356901]: 2025-10-11 02:26:01.238 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.880s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:26:01 compute-0 nova_compute[356901]: 2025-10-11 02:26:01.239 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "ephemeral_1_0706d66" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:26:01 compute-0 nova_compute[356901]: 2025-10-11 02:26:01.240 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "ephemeral_1_0706d66" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:26:01 compute-0 nova_compute[356901]: 2025-10-11 02:26:01.241 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "ephemeral_1_0706d66" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:26:01 compute-0 nova_compute[356901]: 2025-10-11 02:26:01.282 2 DEBUG nova.storage.rbd_utils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 358d31cf-2866-416a-b2fc-814ee4bfe89a_disk.eph0 does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:26:01 compute-0 nova_compute[356901]: 2025-10-11 02:26:01.292 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/ephemeral_1_0706d66 358d31cf-2866-416a-b2fc-814ee4bfe89a_disk.eph0 --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:26:01 compute-0 ceph-mon[191930]: pgmap v1441: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:26:01 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3222710133' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:26:01 compute-0 openstack_network_exporter[374316]: ERROR   02:26:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:26:01 compute-0 openstack_network_exporter[374316]: ERROR   02:26:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:26:01 compute-0 openstack_network_exporter[374316]: ERROR   02:26:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:26:01 compute-0 openstack_network_exporter[374316]: ERROR   02:26:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:26:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:26:01 compute-0 openstack_network_exporter[374316]: ERROR   02:26:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:26:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:26:01 compute-0 anacron[26555]: Job `cron.monthly' started
Oct 11 02:26:01 compute-0 anacron[26555]: Job `cron.monthly' terminated
Oct 11 02:26:01 compute-0 anacron[26555]: Normal exit (3 jobs run)
Oct 11 02:26:01 compute-0 nova_compute[356901]: 2025-10-11 02:26:01.839 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/ephemeral_1_0706d66 358d31cf-2866-416a-b2fc-814ee4bfe89a_disk.eph0 --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.547s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:26:01 compute-0 sshd-session[432727]: Failed password for invalid user debian from 121.227.153.123 port 49964 ssh2
Oct 11 02:26:02 compute-0 nova_compute[356901]: 2025-10-11 02:26:02.061 2 DEBUG nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Created local disks _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4857
Oct 11 02:26:02 compute-0 nova_compute[356901]: 2025-10-11 02:26:02.062 2 DEBUG nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Ensure instance console log exists: /var/lib/nova/instances/358d31cf-2866-416a-b2fc-814ee4bfe89a/console.log _ensure_console_log_for_instance /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4609
Oct 11 02:26:02 compute-0 nova_compute[356901]: 2025-10-11 02:26:02.063 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "vgpu_resources" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:26:02 compute-0 nova_compute[356901]: 2025-10-11 02:26:02.064 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "vgpu_resources" acquired by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:26:02 compute-0 nova_compute[356901]: 2025-10-11 02:26:02.064 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "vgpu_resources" "released" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:26:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1442: 321 pgs: 321 active+clean; 208 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 544 KiB/s wr, 0 op/s
Oct 11 02:26:02 compute-0 nova_compute[356901]: 2025-10-11 02:26:02.601 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:26:02 compute-0 nova_compute[356901]: 2025-10-11 02:26:02.638 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:26:02 compute-0 nova_compute[356901]: 2025-10-11 02:26:02.639 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:26:02 compute-0 nova_compute[356901]: 2025-10-11 02:26:02.722 2 DEBUG nova.network.neutron [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Successfully updated port: 7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 _update_port /usr/lib/python3.9/site-packages/nova/network/neutron.py:586
Oct 11 02:26:02 compute-0 nova_compute[356901]: 2025-10-11 02:26:02.744 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "refresh_cache-358d31cf-2866-416a-b2fc-814ee4bfe89a" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:26:02 compute-0 nova_compute[356901]: 2025-10-11 02:26:02.745 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquired lock "refresh_cache-358d31cf-2866-416a-b2fc-814ee4bfe89a" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:26:02 compute-0 nova_compute[356901]: 2025-10-11 02:26:02.746 2 DEBUG nova.network.neutron [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Building network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2010
Oct 11 02:26:02 compute-0 nova_compute[356901]: 2025-10-11 02:26:02.853 2 DEBUG nova.compute.manager [req-2f20dcaa-375a-4808-893b-2910557121d6 req-d83a86a0-616c-472d-b256-61d822a17136 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Received event network-changed-7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:26:02 compute-0 nova_compute[356901]: 2025-10-11 02:26:02.855 2 DEBUG nova.compute.manager [req-2f20dcaa-375a-4808-893b-2910557121d6 req-d83a86a0-616c-472d-b256-61d822a17136 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Refreshing instance network info cache due to event network-changed-7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:26:02 compute-0 nova_compute[356901]: 2025-10-11 02:26:02.857 2 DEBUG oslo_concurrency.lockutils [req-2f20dcaa-375a-4808-893b-2910557121d6 req-d83a86a0-616c-472d-b256-61d822a17136 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-358d31cf-2866-416a-b2fc-814ee4bfe89a" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:26:02 compute-0 nova_compute[356901]: 2025-10-11 02:26:02.956 2 DEBUG nova.network.neutron [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Instance cache missing network info. _get_preexisting_port_ids /usr/lib/python3.9/site-packages/nova/network/neutron.py:3323
Oct 11 02:26:03 compute-0 ceph-mon[191930]: pgmap v1442: 321 pgs: 321 active+clean; 208 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 544 KiB/s wr, 0 op/s
Oct 11 02:26:03 compute-0 nova_compute[356901]: 2025-10-11 02:26:03.743 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:03 compute-0 nova_compute[356901]: 2025-10-11 02:26:03.877 2 DEBUG nova.network.neutron [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Updating instance_info_cache with network_info: [{"id": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "address": "fa:16:3e:b0:ca:41", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.152", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.173", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap7f4342b0-8a", "ovs_interfaceid": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:26:03 compute-0 nova_compute[356901]: 2025-10-11 02:26:03.898 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Releasing lock "refresh_cache-358d31cf-2866-416a-b2fc-814ee4bfe89a" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:26:03 compute-0 nova_compute[356901]: 2025-10-11 02:26:03.900 2 DEBUG nova.compute.manager [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Instance network_info: |[{"id": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "address": "fa:16:3e:b0:ca:41", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.152", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.173", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap7f4342b0-8a", "ovs_interfaceid": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}]| _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1967
Oct 11 02:26:03 compute-0 nova_compute[356901]: 2025-10-11 02:26:03.901 2 DEBUG oslo_concurrency.lockutils [req-2f20dcaa-375a-4808-893b-2910557121d6 req-d83a86a0-616c-472d-b256-61d822a17136 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-358d31cf-2866-416a-b2fc-814ee4bfe89a" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:26:03 compute-0 nova_compute[356901]: 2025-10-11 02:26:03.903 2 DEBUG nova.network.neutron [req-2f20dcaa-375a-4808-893b-2910557121d6 req-d83a86a0-616c-472d-b256-61d822a17136 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Refreshing network info cache for port 7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:26:03 compute-0 nova_compute[356901]: 2025-10-11 02:26:03.909 2 DEBUG nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Start _get_guest_xml network_info=[{"id": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "address": "fa:16:3e:b0:ca:41", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.152", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.173", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap7f4342b0-8a", "ovs_interfaceid": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}] disk_info={'disk_bus': 'virtio', 'cdrom_bus': 'sata', 'mapping': {'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'disk.config': {'bus': 'sata', 'dev': 'sda', 'type': 'cdrom'}}} image_meta=ImageMeta(checksum='b874c39491a2377b8490f5f1e89761a4',container_format='bare',created_at=2025-10-11T02:17:33Z,direct_url=<?>,disk_format='qcow2',id=a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7,min_disk=0,min_ram=0,name='cirros',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=16300544,status='active',tags=<?>,updated_at=2025-10-11T02:17:37Z,virtual_size=<?>,visibility=<?>) rescue=None block_device_info={'root_device_name': '/dev/vda', 'image': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'boot_index': 0, 'device_name': '/dev/vda', 'size': 0, 'encryption_format': None, 'image_id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}], 'ephemerals': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'device_name': '/dev/vdb', 'size': 1, 'encryption_format': None}], 'block_device_mapping': [], 'swap': None} _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7549
Oct 11 02:26:03 compute-0 nova_compute[356901]: 2025-10-11 02:26:03.924 2 WARNING nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:26:03 compute-0 nova_compute[356901]: 2025-10-11 02:26:03.946 2 DEBUG nova.virt.libvirt.host [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V1... _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1653
Oct 11 02:26:03 compute-0 nova_compute[356901]: 2025-10-11 02:26:03.949 2 DEBUG nova.virt.libvirt.host [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CPU controller missing on host. _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1663
Oct 11 02:26:03 compute-0 nova_compute[356901]: 2025-10-11 02:26:03.993 2 DEBUG nova.virt.libvirt.host [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V2... _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1672
Oct 11 02:26:03 compute-0 nova_compute[356901]: 2025-10-11 02:26:03.996 2 DEBUG nova.virt.libvirt.host [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CPU controller found on host. _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1679
Oct 11 02:26:03 compute-0 nova_compute[356901]: 2025-10-11 02:26:03.997 2 DEBUG nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CPU mode 'host-model' models '' was chosen, with extra flags: '' _get_guest_cpu_model_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:5396
Oct 11 02:26:03 compute-0 nova_compute[356901]: 2025-10-11 02:26:03.998 2 DEBUG nova.virt.hardware [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Getting desirable topologies for flavor Flavor(created_at=2025-10-11T02:17:41Z,deleted=False,deleted_at=None,description=None,disabled=False,ephemeral_gb=1,extra_specs={},flavorid='486e1451-345c-45d6-b075-f4717e759025',id=1,is_public=True,memory_mb=512,name='m1.small',projects=<?>,root_gb=1,rxtx_factor=1.0,swap=0,updated_at=None,vcpu_weight=0,vcpus=1) and image_meta ImageMeta(checksum='b874c39491a2377b8490f5f1e89761a4',container_format='bare',created_at=2025-10-11T02:17:33Z,direct_url=<?>,disk_format='qcow2',id=a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7,min_disk=0,min_ram=0,name='cirros',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=16300544,status='active',tags=<?>,updated_at=2025-10-11T02:17:37Z,virtual_size=<?>,visibility=<?>), allow threads: True _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:563
Oct 11 02:26:04 compute-0 nova_compute[356901]: 2025-10-11 02:26:04.000 2 DEBUG nova.virt.hardware [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Flavor limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:348
Oct 11 02:26:04 compute-0 nova_compute[356901]: 2025-10-11 02:26:04.001 2 DEBUG nova.virt.hardware [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Image limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:352
Oct 11 02:26:04 compute-0 nova_compute[356901]: 2025-10-11 02:26:04.002 2 DEBUG nova.virt.hardware [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Flavor pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:388
Oct 11 02:26:04 compute-0 nova_compute[356901]: 2025-10-11 02:26:04.003 2 DEBUG nova.virt.hardware [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Image pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:392
Oct 11 02:26:04 compute-0 nova_compute[356901]: 2025-10-11 02:26:04.004 2 DEBUG nova.virt.hardware [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Chose sockets=0, cores=0, threads=0; limits were sockets=65536, cores=65536, threads=65536 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:430
Oct 11 02:26:04 compute-0 nova_compute[356901]: 2025-10-11 02:26:04.005 2 DEBUG nova.virt.hardware [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Topology preferred VirtCPUTopology(cores=0,sockets=0,threads=0), maximum VirtCPUTopology(cores=65536,sockets=65536,threads=65536) _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:569
Oct 11 02:26:04 compute-0 nova_compute[356901]: 2025-10-11 02:26:04.006 2 DEBUG nova.virt.hardware [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Build topologies for 1 vcpu(s) 1:1:1 _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:471
Oct 11 02:26:04 compute-0 nova_compute[356901]: 2025-10-11 02:26:04.007 2 DEBUG nova.virt.hardware [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Got 1 possible topologies _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:501
Oct 11 02:26:04 compute-0 nova_compute[356901]: 2025-10-11 02:26:04.008 2 DEBUG nova.virt.hardware [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Possible topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:575
Oct 11 02:26:04 compute-0 nova_compute[356901]: 2025-10-11 02:26:04.009 2 DEBUG nova.virt.hardware [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Sorted desired topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:577
Oct 11 02:26:04 compute-0 nova_compute[356901]: 2025-10-11 02:26:04.015 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:26:04 compute-0 sshd-session[432727]: Connection closed by invalid user debian 121.227.153.123 port 49964 [preauth]
Oct 11 02:26:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1443: 321 pgs: 321 active+clean; 234 MiB data, 333 MiB used, 60 GiB / 60 GiB avail; 25 KiB/s rd, 1.4 MiB/s wr, 35 op/s
Oct 11 02:26:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:26:04 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2150176867' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:26:04 compute-0 nova_compute[356901]: 2025-10-11 02:26:04.470 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:04 compute-0 nova_compute[356901]: 2025-10-11 02:26:04.482 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.467s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:26:04 compute-0 nova_compute[356901]: 2025-10-11 02:26:04.484 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:26:04 compute-0 systemd[1]: virtsecretd.service: Deactivated successfully.
Oct 11 02:26:04 compute-0 podman[433140]: 2025-10-11 02:26:04.959980349 +0000 UTC m=+0.095872266 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:26:04 compute-0 podman[433143]: 2025-10-11 02:26:04.987106663 +0000 UTC m=+0.110793542 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true)
Oct 11 02:26:05 compute-0 podman[433142]: 2025-10-11 02:26:05.010979503 +0000 UTC m=+0.138891607 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_id=edpm, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, managed_by=edpm_ansible, org.label-schema.vendor=CentOS)
Oct 11 02:26:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:26:05 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/334188445' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:26:05 compute-0 podman[433141]: 2025-10-11 02:26:05.037461681 +0000 UTC m=+0.179610904 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, container_name=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, config_id=ovn_controller)
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.055 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.571s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.092 2 DEBUG nova.storage.rbd_utils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 358d31cf-2866-416a-b2fc-814ee4bfe89a_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.102 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:26:05 compute-0 ceph-mon[191930]: pgmap v1443: 321 pgs: 321 active+clean; 234 MiB data, 333 MiB used, 60 GiB / 60 GiB avail; 25 KiB/s rd, 1.4 MiB/s wr, 35 op/s
Oct 11 02:26:05 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2150176867' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:26:05 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/334188445' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:26:05 compute-0 sshd-session[433115]: Invalid user debian from 121.227.153.123 port 37904
Oct 11 02:26:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:26:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:26:05 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/992107428' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.553 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.451s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.556 2 DEBUG nova.virt.libvirt.vif [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:25:57Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description=None,display_name='vn-vgckve2-tqko7trrsvwg-ebwakep2a2y3-vnf-ihxi227vdpwh',ec2_ids=EC2Ids,ephemeral_gb=1,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(1),hidden=False,host='compute-0.ctlplane.example.com',hostname='vn-vgckve2-tqko7trrsvwg-ebwakep2a2y3-vnf-ihxi227vdpwh',id=4,image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',info_cache=InstanceInfoCache,instance_type_id=1,kernel_id='',key_data=None,key_name=None,keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=512,metadata={metering.server_group='3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='97026531b3404a11869cb85a059c4a0d',ramdisk_id='',reservation_id='r-3vv490li',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,admin,reader',image_base_image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_min_disk='1',image_min_ram='0',image_owner_specified.openstack.md5='',image_owner_specified.openstack.object='images/cirros',image_owner_specified.openstack.sha256='',network_allocated='True',owner_project_name='admin',owner_user_name='admin'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:25:59Z,user_data='Q29udGVudC1UeXBlOiBtdWx0aXBhcnQvbWl4ZWQ7IGJvdW5kYXJ5PSI9PT09PT09PT09PT09PT00MTYzNzY0OTY1NzU5NDY3NDE0PT0iCk1JTUUtVmVyc2lvbjogMS4wCgotLT09PT09PT09PT09PT09PTQxNjM3NjQ5NjU3NTk0Njc0MTQ9PQpDb250ZW50LVR5cGU6IHRleHQvY2xvdWQtY29uZmlnOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0iY2xvdWQtY29uZmlnIgoKCgojIENhcHR1cmUgYWxsIHN1YnByb2Nlc3Mgb3V0cHV0IGludG8gYSBsb2dmaWxlCiMgVXNlZnVsIGZvciB0cm91Ymxlc2hvb3RpbmcgY2xvdWQtaW5pdCBpc3N1ZXMKb3V0cHV0OiB7YWxsOiAnfCB0ZWUgLWEgL3Zhci9sb2cvY2xvdWQtaW5pdC1vdXRwdXQubG9nJ30KCi0tPT09PT09PT09PT09PT09NDE2Mzc2NDk2NTc1OTQ2NzQxND09CkNvbnRlbnQtVHlwZTogdGV4dC9jbG91ZC1ib290aG9vazsgY2hhcnNldD0idXMtYXNjaWkiCk1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IDdiaXQKQ29udGVudC1EaXNwb3NpdGlvbjogYXR0YWNobWVudDsgZmlsZW5hbWU9ImJvb3Rob29rLnNoIgoKIyEvdXNyL2Jpbi9iYXNoCgojIEZJWE1FKHNoYWRvd2VyKSB0aGlzIGlzIGEgd29ya2Fyb3VuZCBmb3IgY2xvdWQtaW5pdCAwLjYuMyBwcmVzZW50IGluIFVidW50dQojIDEyLjA0IExUUzoKIyBodHRwczovL2J1Z3MubGF1bmNocGFkLm5ldC9oZWF0LytidWcvMTI1NzQxMAojCiMgVGhlIG9sZCBjbG91ZC1pbml0IGRvZXNuJ3QgY3JlYXRlIHRoZSB1c2VycyBkaXJlY3RseSBzbyB0aGUgY29tbWFuZHMgdG8gZG8KIyB0aGlzIGFyZSBpbmplY3RlZCB0aG91Z2ggbm92YV91dGlscy5weS4KIwojIE9uY2Ugd2UgZHJvcCBzdXBwb3J0IGZvciAwLjYuMywgd2UgY2FuIHNhZmVseSByZW1vdmUgdGhpcy4KCgojIGluIGNhc2UgaGVhdC1jZm50b29scyBoYXMgYmVlbiBpbnN0YWxsZWQgZnJvbSBwYWNrYWdlIGJ1dCBubyBzeW1saW5rcwojIGFyZSB5ZXQgaW4gL29wdC9hd3MvYmluLwpjZm4tY3JlYXRlLWF3cy1zeW1saW5rcwoKIyBEbyBub3QgcmVtb3ZlIC0gdGhlIGNsb3VkIGJvb3Rob29rIHNob3VsZCBhbHdheXMgcmV0dXJuIHN1Y2Nlc3MKZXhpdCAwCgotLT09PT09PT09PT09PT09PTQxNjM3NjQ5NjU3NTk0Njc0MTQ9PQpDb250ZW50LVR5cGU6IHRleHQvcGFydC1oYW5kbGVyOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0icGFydC1oYW5kbGVyLnB5IgoKIyBwYXJ0LWhhbmRsZXIKIwojICAgIExpY2Vuc2VkIHVuZGVyIHRoZSBBcGFjaGUgTGljZW5zZSwgVmVyc2lvbiAyLjAgKHRoZSAiTGljZW5zZSIpOyB5b3UgbWF5CiMgICAgbm90IHVzZSB0aGlzIGZpbGUgZXhjZXB0IGluIGNvbXBsaWFuY2Ugd2l0aCB0aGUgTGljZW5zZS4gWW91IG1heSBvYnRhaW4KIyAgICBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgICAgICAgaHR0cDovL3d3dy5hcGFjaGUub3JnL2xpY2Vuc2VzL0xJQ0VOU0UtMi4wCiMKIyAgICBVbmxlc3MgcmVxdWlyZWQgYnkgYXBwbGljYWJsZSBsYXcgb3IgYWdyZWVkIHRvIGluIHdyaXRpbmcsIHNvZnR3YXJlCiMgICAgZGlzdHJpYnV0ZWQgdW5kZXIgdGhlIExpY2Vuc2UgaXMgZGlzdHJpYnV0ZWQgb24gYW4gIkFTIElTIiBCQVNJUywgV0lUSE9VVAojICAgIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4gU2VlIHRoZQojICAgIExpY2Vuc2UgZm9yIHRoZSBzcGVjaWZpYyBsYW5ndWFnZSBnb3Zlcm5pbmcgcGVybWlzc2lvbnMgYW5kIGxpbWl0YXRpb25zCiMgICAgdW5kZXIgdGhlIExpY2Vuc2UuCgppbXBvcnQgZGF0ZXRpbWUKaW1wb3J0IGVycm5vCmltcG9ydCBvcwppbXBvcnQgc3lzCgoKZGVmIGxpc3RfdHlwZXMoKToKICAgIHJldHVybiBbInRleHQveC1jZm5pbml0ZGF0YSJdCgoKZGVmIGhhbmRsZV9wYXJ0KGRhdGEsIGN0eXBlLCBmaWxlbmFtZSwgcGF5bG9hZCk6CiAgICBpZiBjdHlwZSA9PSAiX19iZWdpbl9fIjoKICAgICAgICB0cnk6CiAgICAgICAgICAgIG9zLm1ha2VkaXJzKCcvdmFyL2xpYi9oZWF0LWNmbnRvb2xzJywgaW50KCI3MDAiLCA4KSkKICAgICAgICBleGNlcHQgT1NFcnJvcjoKICAgICAgICAgICAgZXhfdHlwZSwgZSwgdGIgPSBzeXMuZXhjX2luZm8oKQogICAgICAgICAgICBpZiBlLmVycm5vICE9IGVycm5vLkVFWElTVDoKICAgICAgICAgICAgICAgIHJhaXNlCiAgICAgICAgcmV0dXJuCgogICAgaWYgY3R5cGUgPT0gIl9fZW5kX18iOgogICAgICAgIHJldHVybgoKICAgIHRpbWVzdGFtcCA9IGRhdGV0aW1lLmRhdGV0aW1lLm5vdygpCiAgICB3aXRoIG9wZW4oJy92YXIvbG9nL3BhcnQtaGFuZGxlci5sb2cnLCAnYScpIGFzIGxvZzoKICAgICAgICBsb2cud3JpdGUoJyVzIGZpbGVuYW1lOiVzLCBjdHlwZTolc1xuJyAlICh0aW1lc3RhbXAsIGZpbGVuYW1lLCBjdHlwZSkpCgogICAgaWYgY3R5cGUgPT0gJ3RleHQveC1jZm5pbml0ZGF0YSc6CiAgICAgICAgd2l0aCBvcGVuKCcvdmFyL2xpYi9oZWF0LWNmbnRvb2xzLyVzJyAlIGZpbGVuYW1lLCAndycpIGFzIGY6CiAgICAgICAgICAgIGYud3JpdGUocGF5bG9hZCkKCiAgICAgICAgIyBUT0RPKHNkYWtlKSBob3BlZnVsbHkgdGVtcG9yYXJ5IHVudGlsIHVzZXJzIG1vdmUgdG8gaGVhdC1jZm50b29scy0xLjMKICAgICAgICB3aXRoIG9wZW4oJy92YXIvbGliL2Nsb3VkL2RhdGEvJXMnICUgZmlsZW5hbWUsICd3JykgYXMgZjoKICAgICAgICAgICAgZi53cml0ZShwYXlsb2FkKQoKLS09PT09PT09PT09PT09PT00MTYzNzY0OTY1NzU5NDY3NDE0PT0KQ29udGVudC1UeXBlOiB0ZXh0L3gtY2ZuaW5pdGRhdGE7IGNoYXJzZXQ9InVzLWFzY2lpIgpNSU1FLVZlcnNpb246IDEuMApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkNvbnRlbnQtRGlzcG9zaXRpb246IGF0dGFjaG1lbnQ7IGZpbGVuYW1lPSJjZm4tdXNlcmRhdGEiCgoKLS09PT09PT09PT09PT09PT00MTYzNzY0OTY1NzU5NDY3NDE0PT0KQ29udGVudC1UeXBlOiB0ZXh0L3gtc2hlbGxzY3JpcHQ7IGNoYXJzZXQ9InVzLWFzY2lpIgpNSU1FLVZlcnNpb246IDEuMApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkNvbnRlbnQtRGlzcG9zaXRpb246IGF0dGFjaG1lbnQ7IGZpbGVuYW1lPSJsb2d1c2VyZGF0YS5weSIKCiMhL3Vzci9iaW4vZW52IHB5dGhvbjMKIwojICAgIExpY2Vuc2VkIHVuZGVyIHRoZSBBcGFjaGUgTGljZW5zZSwgVmVyc2lvbiAyLjAgKHRoZSAiTGljZW5zZSIpOyB5b3UgbWF5CiMgICAgbm90IHVzZSB0aGlzIGZpbGUgZXhjZXB0IGluIGNvbXBsaWFuY2Ugd2l0aCB0aGUgTGljZW5zZS4gWW91IG1heSBvYnRhaW4KIyAgICBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgICAgICAgaHR0cDovL3d3dy5hcGFjaGUub3JnL2xpY2Vuc2VzL0xJQ0VOU0UtMi4wCiMKIyAgICBVbmxlc3MgcmVxdWlyZWQgYnkgYXBwbGljYWJsZSBsYXcgb3IgYWdyZWVkIHRvIGluIHdyaXRpbmcsIHNvZnR3YXJlCiMgICAgZGlzdHJpYnV0ZWQgdW5kZXIgdGhlIExpY2Vuc2UgaXMgZGlzdHJpYnV0ZWQgb24gYW4gIkFTIElTIiBCQVNJUywgV0lUSE9VVAojICAgIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4gU2VlIHRoZQojICAgIExpY2Vuc2UgZm9yIHRoZSBzcGVjaWZpYyBsYW5ndWFnZSBnb3Zlcm5pbmcgcGVybWlzc2lvbnMgYW5kIGxpbWl0YXRpb25zCiMgICAgdW5kZXIgdGhlIExpY2Vuc2UuCgppbXBvcnQgZGF0ZXRpbWUKaW1wb3J0IGVycm5vCmltcG9ydCBsb2dnaW5nCmltcG9ydCBvcwppbXBvcnQgc3VicHJvY2VzcwppbXBvcnQgc3lzCgoKVkFSX1BBVEggPSAnL3Zhci9saWIvaGVhdC1jZm50b29scycKTE9HID0gbG9nZ2luZy5nZXRMb2dnZXIoJ2hlYXQtcHJvdmlzaW9uJykKCgpkZWYgaW5pdF9sb2dnaW5nKCk6CiAgICBMT0cuc2V0TGV2ZWwobG9nZ2luZy5JTkZPKQogICAgTE9HLmFkZEhhbmRsZXIobG9nZ2luZy5TdHJlYW1IYW5kbGVyKCkpCiAgICBmaCA9IGxvZ2dpbmcuRmlsZUhhbmRsZXIoIi92YXIvbG9nL2hlYXQtcHJvdmlzaW9uLmxvZyIpCiAgICBvcy5jaG1vZChmaC5iYXNlRmlsZW5hbWUsIGludCgiNjAwIiwgOCkpCiAgICBMT0cuYWRkSGFuZGxlcihmaCkKCgpkZWYgY2FsbChhcmdzKToKCiAgICBjbGFzcyBMb2dTdHJlYW0ob2JqZWN0KToKCiAgICAgICAgZGVmIHdyaXRlKHNlbGYsIGRhdGEpOgogICAgICAgICAgICBMT0cuaW5mbyhkYXRhKQoKICAgIExPRy5pbmZvKCclc1xuJywgJyAnLmpvaW4oYXJncykpICAjIG5vcWEKICAgIHRyeToKICAgICAgICBscyA9IExvZ1N0cmVhbSgpCiAgICAgICAgcCA9IHN1YnByb2Nlc3MuUG9wZW4oYXJncywgc3
Oct 11 02:26:05 compute-0 nova_compute[356901]: Rkb3V0PXN1YnByb2Nlc3MuUElQRSwKICAgICAgICAgICAgICAgICAgICAgICAgICAgICBzdGRlcnI9c3VicHJvY2Vzcy5QSVBFKQogICAgICAgIGRhdGEgPSBwLmNvbW11bmljYXRlKCkKICAgICAgICBpZiBkYXRhOgogICAgICAgICAgICBmb3IgeCBpbiBkYXRhOgogICAgICAgICAgICAgICAgbHMud3JpdGUoeCkKICAgIGV4Y2VwdCBPU0Vycm9yOgogICAgICAgIGV4X3R5cGUsIGV4LCB0YiA9IHN5cy5leGNfaW5mbygpCiAgICAgICAgaWYgZXguZXJybm8gPT0gZXJybm8uRU5PRVhFQzoKICAgICAgICAgICAgTE9HLmVycm9yKCdVc2VyZGF0YSBlbXB0eSBvciBub3QgZXhlY3V0YWJsZTogJXMnLCBleCkKICAgICAgICAgICAgcmV0dXJuIG9zLkVYX09LCiAgICAgICAgZWxzZToKICAgICAgICAgICAgTE9HLmVycm9yKCdPUyBlcnJvciBydW5uaW5nIHVzZXJkYXRhOiAlcycsIGV4KQogICAgICAgICAgICByZXR1cm4gb3MuRVhfT1NFUlIKICAgIGV4Y2VwdCBFeGNlcHRpb246CiAgICAgICAgZXhfdHlwZSwgZXgsIHRiID0gc3lzLmV4Y19pbmZvKCkKICAgICAgICBMT0cuZXJyb3IoJ1Vua25vd24gZXJyb3IgcnVubmluZyB1c2VyZGF0YTogJXMnLCBleCkKICAgICAgICByZXR1cm4gb3MuRVhfU09GVFdBUkUKICAgIHJldHVybiBwLnJldHVybmNvZGUKCgpkZWYgbWFpbigpOgogICAgdXNlcmRhdGFfcGF0aCA9IG9zLnBhdGguam9pbihWQVJfUEFUSCwgJ2Nmbi11c2VyZGF0YScpCiAgICBvcy5jaG1vZCh1c2VyZGF0YV9wYXRoLCBpbnQoIjcwMCIsIDgpKQoKICAgIExPRy5pbmZvKCdQcm92aXNpb24gYmVnYW46ICVzJywgZGF0ZXRpbWUuZGF0ZXRpbWUubm93KCkpCiAgICByZXR1cm5jb2RlID0gY2FsbChbdXNlcmRhdGFfcGF0aF0pCiAgICBMT0cuaW5mbygnUHJvdmlzaW9uIGRvbmU6ICVzJywgZGF0ZXRpbWUuZGF0ZXRpbWUubm93KCkpCiAgICBpZiByZXR1cm5jb2RlOgogICAgICAgIHJldHVybiByZXR1cm5jb2RlCgoKaWYgX19uYW1lX18gPT0gJ19fbWFpbl9fJzoKICAgIGluaXRfbG9nZ2luZygpCgogICAgY29kZSA9IG1haW4oKQogICAgaWYgY29kZToKICAgICAgICBMT0cuZXJyb3IoJ1Byb3Zpc2lvbiBmYWlsZWQgd2l0aCBleGl0IGNvZGUgJXMnLCBjb2RlKQogICAgICAgIHN5cy5leGl0KGNvZGUpCgogICAgcHJvdmlzaW9uX2xvZyA9IG9zLnBhdGguam9pbihWQVJfUEFUSCwgJ3Byb3Zpc2lvbi1maW5pc2hlZCcpCiAgICAjIHRvdWNoIHRoZSBmaWxlIHNvIGl0IGlzIHRpbWVzdGFtcGVkIHdpdGggd2hlbiBmaW5pc2hlZAogICAgd2l0aCBvcGVuKHByb3Zpc2lvbl9sb2csICdhJyk6CiAgICAgICAgb3MudXRpbWUocHJvdmlzaW9uX2xvZywgTm9uZSkKCi0tPT09PT09PT09PT09PT09NDE2Mzc2NDk2NTc1OTQ2NzQxND09CkNvbnRlbnQtVHlwZTogdGV4dC94LWNmbmluaXRkYXRhOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0iY2ZuLW1ldGFkYXRhLXNlcnZlciIKCmh0dHBzOi8vaGVhdC1jZm5hcGktaW50ZXJuYWwub3BlbnN0YWNrLnN2Yzo4MDAwL3YxLwotLT09PT09PT09PT09PT09PTQxNjM3NjQ5NjU3NTk0Njc0MTQ9PQpDb250ZW50LVR5cGU6IHRleHQveC1jZm5pbml0ZGF0YTsgY2hhcnNldD0idXMtYXNjaWkiCk1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IDdiaXQKQ29udGVudC1EaXNwb3NpdGlvbjogYXR0YWNobWVudDsgZmlsZW5hbWU9ImNmbi1ib3RvLWNmZyIKCltCb3RvXQpkZWJ1ZyA9IDAKaXNfc2VjdXJlID0gMApodHRwc192YWxpZGF0ZV9jZXJ0aWZpY2F0ZXMgPSAxCmNmbl9yZWdpb25fbmFtZSA9IGhlYXQKY2ZuX3JlZ2lvbl9lbmRwb2ludCA9IGhlYXQtY2ZuYXBpLWludGVybmFsLm9wZW5zdGFjay5zdmMKLS09PT09PT09PT09PT09PT00MTYzNzY0OTY1NzU5NDY3NDE0PT0tLQo=',user_id='d215f3ebbc07435493ccd666fc80109d',uuid=358d31cf-2866-416a-b2fc-814ee4bfe89a,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "address": "fa:16:3e:b0:ca:41", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.152", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.173", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap7f4342b0-8a", "ovs_interfaceid": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}} virt_type=kvm get_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:563
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.557 2 DEBUG nova.network.os_vif_util [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converting VIF {"id": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "address": "fa:16:3e:b0:ca:41", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.152", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.173", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap7f4342b0-8a", "ovs_interfaceid": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.558 2 DEBUG nova.network.os_vif_util [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:b0:ca:41,bridge_name='br-int',has_traffic_filtering=True,id=7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tap7f4342b0-8a') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.560 2 DEBUG nova.objects.instance [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lazy-loading 'pci_devices' on Instance uuid 358d31cf-2866-416a-b2fc-814ee4bfe89a obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.580 2 DEBUG nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] End _get_guest_xml xml=<domain type="kvm">
Oct 11 02:26:05 compute-0 nova_compute[356901]:   <uuid>358d31cf-2866-416a-b2fc-814ee4bfe89a</uuid>
Oct 11 02:26:05 compute-0 nova_compute[356901]:   <name>instance-00000004</name>
Oct 11 02:26:05 compute-0 nova_compute[356901]:   <memory>524288</memory>
Oct 11 02:26:05 compute-0 nova_compute[356901]:   <vcpu>1</vcpu>
Oct 11 02:26:05 compute-0 nova_compute[356901]:   <metadata>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.1">
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <nova:package version="27.5.2-0.20250829104910.6f8decf.el9"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <nova:name>vn-vgckve2-tqko7trrsvwg-ebwakep2a2y3-vnf-ihxi227vdpwh</nova:name>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <nova:creationTime>2025-10-11 02:26:03</nova:creationTime>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <nova:flavor name="m1.small">
Oct 11 02:26:05 compute-0 nova_compute[356901]:         <nova:memory>512</nova:memory>
Oct 11 02:26:05 compute-0 nova_compute[356901]:         <nova:disk>1</nova:disk>
Oct 11 02:26:05 compute-0 nova_compute[356901]:         <nova:swap>0</nova:swap>
Oct 11 02:26:05 compute-0 nova_compute[356901]:         <nova:ephemeral>1</nova:ephemeral>
Oct 11 02:26:05 compute-0 nova_compute[356901]:         <nova:vcpus>1</nova:vcpus>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       </nova:flavor>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <nova:owner>
Oct 11 02:26:05 compute-0 nova_compute[356901]:         <nova:user uuid="d215f3ebbc07435493ccd666fc80109d">admin</nova:user>
Oct 11 02:26:05 compute-0 nova_compute[356901]:         <nova:project uuid="97026531b3404a11869cb85a059c4a0d">admin</nova:project>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       </nova:owner>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <nova:root type="image" uuid="a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <nova:ports>
Oct 11 02:26:05 compute-0 nova_compute[356901]:         <nova:port uuid="7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8">
Oct 11 02:26:05 compute-0 nova_compute[356901]:           <nova:ip type="fixed" address="192.168.0.152" ipVersion="4"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:         </nova:port>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       </nova:ports>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     </nova:instance>
Oct 11 02:26:05 compute-0 nova_compute[356901]:   </metadata>
Oct 11 02:26:05 compute-0 nova_compute[356901]:   <sysinfo type="smbios">
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <system>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <entry name="manufacturer">RDO</entry>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <entry name="product">OpenStack Compute</entry>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <entry name="version">27.5.2-0.20250829104910.6f8decf.el9</entry>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <entry name="serial">358d31cf-2866-416a-b2fc-814ee4bfe89a</entry>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <entry name="uuid">358d31cf-2866-416a-b2fc-814ee4bfe89a</entry>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <entry name="family">Virtual Machine</entry>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     </system>
Oct 11 02:26:05 compute-0 nova_compute[356901]:   </sysinfo>
Oct 11 02:26:05 compute-0 nova_compute[356901]:   <os>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <type arch="x86_64" machine="q35">hvm</type>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <boot dev="hd"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <smbios mode="sysinfo"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:   </os>
Oct 11 02:26:05 compute-0 nova_compute[356901]:   <features>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <acpi/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <apic/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <vmcoreinfo/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:   </features>
Oct 11 02:26:05 compute-0 nova_compute[356901]:   <clock offset="utc">
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <timer name="pit" tickpolicy="delay"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <timer name="rtc" tickpolicy="catchup"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <timer name="hpet" present="no"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:   </clock>
Oct 11 02:26:05 compute-0 nova_compute[356901]:   <cpu mode="host-model" match="exact">
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <topology sockets="1" cores="1" threads="1"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:26:05 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/358d31cf-2866-416a-b2fc-814ee4bfe89a_disk">
Oct 11 02:26:05 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       </source>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:26:05 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <target dev="vda" bus="virtio"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/358d31cf-2866-416a-b2fc-814ee4bfe89a_disk.eph0">
Oct 11 02:26:05 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       </source>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:26:05 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <target dev="vdb" bus="virtio"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <disk type="network" device="cdrom">
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/358d31cf-2866-416a-b2fc-814ee4bfe89a_disk.config">
Oct 11 02:26:05 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       </source>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:26:05 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <target dev="sda" bus="sata"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <interface type="ethernet">
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <mac address="fa:16:3e:b0:ca:41"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <driver name="vhost" rx_queue_size="512"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <mtu size="1442"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <target dev="tap7f4342b0-8a"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     </interface>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <serial type="pty">
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <log file="/var/lib/nova/instances/358d31cf-2866-416a-b2fc-814ee4bfe89a/console.log" append="off"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     </serial>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <graphics type="vnc" autoport="yes" listen="::0"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <video>
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     </video>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <input type="tablet" bus="usb"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <rng model="virtio">
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <backend model="random">/dev/urandom</backend>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <controller type="usb" index="0"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     <memballoon model="virtio">
Oct 11 02:26:05 compute-0 nova_compute[356901]:       <stats period="10"/>
Oct 11 02:26:05 compute-0 nova_compute[356901]:     </memballoon>
Oct 11 02:26:05 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:26:05 compute-0 nova_compute[356901]: </domain>
Oct 11 02:26:05 compute-0 nova_compute[356901]:  _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7555
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.582 2 DEBUG nova.compute.manager [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Preparing to wait for external event network-vif-plugged-7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 prepare_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:283
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.583 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "358d31cf-2866-416a-b2fc-814ee4bfe89a-events" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.584 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "358d31cf-2866-416a-b2fc-814ee4bfe89a-events" acquired by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.584 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "358d31cf-2866-416a-b2fc-814ee4bfe89a-events" "released" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.585 2 DEBUG nova.virt.libvirt.vif [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:25:57Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description=None,display_name='vn-vgckve2-tqko7trrsvwg-ebwakep2a2y3-vnf-ihxi227vdpwh',ec2_ids=EC2Ids,ephemeral_gb=1,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(1),hidden=False,host='compute-0.ctlplane.example.com',hostname='vn-vgckve2-tqko7trrsvwg-ebwakep2a2y3-vnf-ihxi227vdpwh',id=4,image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',info_cache=InstanceInfoCache,instance_type_id=1,kernel_id='',key_data=None,key_name=None,keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=512,metadata={metering.server_group='3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=PciDeviceList,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='97026531b3404a11869cb85a059c4a0d',ramdisk_id='',reservation_id='r-3vv490li',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,admin,reader',image_base_image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_min_disk='1',image_min_ram='0',image_owner_specified.openstack.md5='',image_owner_specified.openstack.object='images/cirros',image_owner_specified.openstack.sha256='',network_allocated='True',owner_project_name='admin',owner_user_name='admin'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:25:59Z,user_data='Q29udGVudC1UeXBlOiBtdWx0aXBhcnQvbWl4ZWQ7IGJvdW5kYXJ5PSI9PT09PT09PT09PT09PT00MTYzNzY0OTY1NzU5NDY3NDE0PT0iCk1JTUUtVmVyc2lvbjogMS4wCgotLT09PT09PT09PT09PT09PTQxNjM3NjQ5NjU3NTk0Njc0MTQ9PQpDb250ZW50LVR5cGU6IHRleHQvY2xvdWQtY29uZmlnOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0iY2xvdWQtY29uZmlnIgoKCgojIENhcHR1cmUgYWxsIHN1YnByb2Nlc3Mgb3V0cHV0IGludG8gYSBsb2dmaWxlCiMgVXNlZnVsIGZvciB0cm91Ymxlc2hvb3RpbmcgY2xvdWQtaW5pdCBpc3N1ZXMKb3V0cHV0OiB7YWxsOiAnfCB0ZWUgLWEgL3Zhci9sb2cvY2xvdWQtaW5pdC1vdXRwdXQubG9nJ30KCi0tPT09PT09PT09PT09PT09NDE2Mzc2NDk2NTc1OTQ2NzQxND09CkNvbnRlbnQtVHlwZTogdGV4dC9jbG91ZC1ib290aG9vazsgY2hhcnNldD0idXMtYXNjaWkiCk1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IDdiaXQKQ29udGVudC1EaXNwb3NpdGlvbjogYXR0YWNobWVudDsgZmlsZW5hbWU9ImJvb3Rob29rLnNoIgoKIyEvdXNyL2Jpbi9iYXNoCgojIEZJWE1FKHNoYWRvd2VyKSB0aGlzIGlzIGEgd29ya2Fyb3VuZCBmb3IgY2xvdWQtaW5pdCAwLjYuMyBwcmVzZW50IGluIFVidW50dQojIDEyLjA0IExUUzoKIyBodHRwczovL2J1Z3MubGF1bmNocGFkLm5ldC9oZWF0LytidWcvMTI1NzQxMAojCiMgVGhlIG9sZCBjbG91ZC1pbml0IGRvZXNuJ3QgY3JlYXRlIHRoZSB1c2VycyBkaXJlY3RseSBzbyB0aGUgY29tbWFuZHMgdG8gZG8KIyB0aGlzIGFyZSBpbmplY3RlZCB0aG91Z2ggbm92YV91dGlscy5weS4KIwojIE9uY2Ugd2UgZHJvcCBzdXBwb3J0IGZvciAwLjYuMywgd2UgY2FuIHNhZmVseSByZW1vdmUgdGhpcy4KCgojIGluIGNhc2UgaGVhdC1jZm50b29scyBoYXMgYmVlbiBpbnN0YWxsZWQgZnJvbSBwYWNrYWdlIGJ1dCBubyBzeW1saW5rcwojIGFyZSB5ZXQgaW4gL29wdC9hd3MvYmluLwpjZm4tY3JlYXRlLWF3cy1zeW1saW5rcwoKIyBEbyBub3QgcmVtb3ZlIC0gdGhlIGNsb3VkIGJvb3Rob29rIHNob3VsZCBhbHdheXMgcmV0dXJuIHN1Y2Nlc3MKZXhpdCAwCgotLT09PT09PT09PT09PT09PTQxNjM3NjQ5NjU3NTk0Njc0MTQ9PQpDb250ZW50LVR5cGU6IHRleHQvcGFydC1oYW5kbGVyOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0icGFydC1oYW5kbGVyLnB5IgoKIyBwYXJ0LWhhbmRsZXIKIwojICAgIExpY2Vuc2VkIHVuZGVyIHRoZSBBcGFjaGUgTGljZW5zZSwgVmVyc2lvbiAyLjAgKHRoZSAiTGljZW5zZSIpOyB5b3UgbWF5CiMgICAgbm90IHVzZSB0aGlzIGZpbGUgZXhjZXB0IGluIGNvbXBsaWFuY2Ugd2l0aCB0aGUgTGljZW5zZS4gWW91IG1heSBvYnRhaW4KIyAgICBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgICAgICAgaHR0cDovL3d3dy5hcGFjaGUub3JnL2xpY2Vuc2VzL0xJQ0VOU0UtMi4wCiMKIyAgICBVbmxlc3MgcmVxdWlyZWQgYnkgYXBwbGljYWJsZSBsYXcgb3IgYWdyZWVkIHRvIGluIHdyaXRpbmcsIHNvZnR3YXJlCiMgICAgZGlzdHJpYnV0ZWQgdW5kZXIgdGhlIExpY2Vuc2UgaXMgZGlzdHJpYnV0ZWQgb24gYW4gIkFTIElTIiBCQVNJUywgV0lUSE9VVAojICAgIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4gU2VlIHRoZQojICAgIExpY2Vuc2UgZm9yIHRoZSBzcGVjaWZpYyBsYW5ndWFnZSBnb3Zlcm5pbmcgcGVybWlzc2lvbnMgYW5kIGxpbWl0YXRpb25zCiMgICAgdW5kZXIgdGhlIExpY2Vuc2UuCgppbXBvcnQgZGF0ZXRpbWUKaW1wb3J0IGVycm5vCmltcG9ydCBvcwppbXBvcnQgc3lzCgoKZGVmIGxpc3RfdHlwZXMoKToKICAgIHJldHVybiBbInRleHQveC1jZm5pbml0ZGF0YSJdCgoKZGVmIGhhbmRsZV9wYXJ0KGRhdGEsIGN0eXBlLCBmaWxlbmFtZSwgcGF5bG9hZCk6CiAgICBpZiBjdHlwZSA9PSAiX19iZWdpbl9fIjoKICAgICAgICB0cnk6CiAgICAgICAgICAgIG9zLm1ha2VkaXJzKCcvdmFyL2xpYi9oZWF0LWNmbnRvb2xzJywgaW50KCI3MDAiLCA4KSkKICAgICAgICBleGNlcHQgT1NFcnJvcjoKICAgICAgICAgICAgZXhfdHlwZSwgZSwgdGIgPSBzeXMuZXhjX2luZm8oKQogICAgICAgICAgICBpZiBlLmVycm5vICE9IGVycm5vLkVFWElTVDoKICAgICAgICAgICAgICAgIHJhaXNlCiAgICAgICAgcmV0dXJuCgogICAgaWYgY3R5cGUgPT0gIl9fZW5kX18iOgogICAgICAgIHJldHVybgoKICAgIHRpbWVzdGFtcCA9IGRhdGV0aW1lLmRhdGV0aW1lLm5vdygpCiAgICB3aXRoIG9wZW4oJy92YXIvbG9nL3BhcnQtaGFuZGxlci5sb2cnLCAnYScpIGFzIGxvZzoKICAgICAgICBsb2cud3JpdGUoJyVzIGZpbGVuYW1lOiVzLCBjdHlwZTolc1xuJyAlICh0aW1lc3RhbXAsIGZpbGVuYW1lLCBjdHlwZSkpCgogICAgaWYgY3R5cGUgPT0gJ3RleHQveC1jZm5pbml0ZGF0YSc6CiAgICAgICAgd2l0aCBvcGVuKCcvdmFyL2xpYi9oZWF0LWNmbnRvb2xzLyVzJyAlIGZpbGVuYW1lLCAndycpIGFzIGY6CiAgICAgICAgICAgIGYud3JpdGUocGF5bG9hZCkKCiAgICAgICAgIyBUT0RPKHNkYWtlKSBob3BlZnVsbHkgdGVtcG9yYXJ5IHVudGlsIHVzZXJzIG1vdmUgdG8gaGVhdC1jZm50b29scy0xLjMKICAgICAgICB3aXRoIG9wZW4oJy92YXIvbGliL2Nsb3VkL2RhdGEvJXMnICUgZmlsZW5hbWUsICd3JykgYXMgZjoKICAgICAgICAgICAgZi53cml0ZShwYXlsb2FkKQoKLS09PT09PT09PT09PT09PT00MTYzNzY0OTY1NzU5NDY3NDE0PT0KQ29udGVudC1UeXBlOiB0ZXh0L3gtY2ZuaW5pdGRhdGE7IGNoYXJzZXQ9InVzLWFzY2lpIgpNSU1FLVZlcnNpb246IDEuMApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkNvbnRlbnQtRGlzcG9zaXRpb246IGF0dGFjaG1lbnQ7IGZpbGVuYW1lPSJjZm4tdXNlcmRhdGEiCgoKLS09PT09PT09PT09PT09PT00MTYzNzY0OTY1NzU5NDY3NDE0PT0KQ29udGVudC1UeXBlOiB0ZXh0L3gtc2hlbGxzY3JpcHQ7IGNoYXJzZXQ9InVzLWFzY2lpIgpNSU1FLVZlcnNpb246IDEuMApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkNvbnRlbnQtRGlzcG9zaXRpb246IGF0dGFjaG1lbnQ7IGZpbGVuYW1lPSJsb2d1c2VyZGF0YS5weSIKCiMhL3Vzci9iaW4vZW52IHB5dGhvbjMKIwojICAgIExpY2Vuc2VkIHVuZGVyIHRoZSBBcGFjaGUgTGljZW5zZSwgVmVyc2lvbiAyLjAgKHRoZSAiTGljZW5zZSIpOyB5b3UgbWF5CiMgICAgbm90IHVzZSB0aGlzIGZpbGUgZXhjZXB0IGluIGNvbXBsaWFuY2Ugd2l0aCB0aGUgTGljZW5zZS4gWW91IG1heSBvYnRhaW4KIyAgICBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgICAgICAgaHR0cDovL3d3dy5hcGFjaGUub3JnL2xpY2Vuc2VzL0xJQ0VOU0UtMi4wCiMKIyAgICBVbmxlc3MgcmVxdWlyZWQgYnkgYXBwbGljYWJsZSBsYXcgb3IgYWdyZWVkIHRvIGluIHdyaXRpbmcsIHNvZnR3YXJlCiMgICAgZGlzdHJpYnV0ZWQgdW5kZXIgdGhlIExpY2Vuc2UgaXMgZGlzdHJpYnV0ZWQgb24gYW4gIkFTIElTIiBCQVNJUywgV0lUSE9VVAojICAgIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4gU2VlIHRoZQojICAgIExpY2Vuc2UgZm9yIHRoZSBzcGVjaWZpYyBsYW5ndWFnZSBnb3Zlcm5pbmcgcGVybWlzc2lvbnMgYW5kIGxpbWl0YXRpb25zCiMgICAgdW5kZXIgdGhlIExpY2Vuc2UuCgppbXBvcnQgZGF0ZXRpbWUKaW1wb3J0IGVycm5vCmltcG9ydCBsb2dnaW5nCmltcG9ydCBvcwppbXBvcnQgc3VicHJvY2VzcwppbXBvcnQgc3lzCgoKVkFSX1BBVEggPSAnL3Zhci9saWIvaGVhdC1jZm50b29scycKTE9HID0gbG9nZ2luZy5nZXRMb2dnZXIoJ2hlYXQtcHJvdmlzaW9uJykKCgpkZWYgaW5pdF9sb2dnaW5nKCk6CiAgICBMT0cuc2V0TGV2ZWwobG9nZ2luZy5JTkZPKQogICAgTE9HLmFkZEhhbmRsZXIobG9nZ2luZy5TdHJlYW1IYW5kbGVyKCkpCiAgICBmaCA9IGxvZ2dpbmcuRmlsZUhhbmRsZXIoIi92YXIvbG9nL2hlYXQtcHJvdmlzaW9uLmxvZyIpCiAgICBvcy5jaG1vZChmaC5iYXNlRmlsZW5hbWUsIGludCgiNjAwIiwgOCkpCiAgICBMT0cuYWRkSGFuZGxlcihmaCkKCgpkZWYgY2FsbChhcmdzKToKCiAgICBjbGFzcyBMb2dTdHJlYW0ob2JqZWN0KToKCiAgICAgICAgZGVmIHdyaXRlKHNlbGYsIGRhdGEpOgogICAgICAgICAgICBMT0cuaW5mbyhkYXRhKQoKICAgIExPRy5pbmZvKCclc1xuJywgJyAnLmpvaW4oYXJncykpICAjIG5vcWEKICAgIHRyeToKICAgICAgICBscyA9IExvZ1N0cmVhbSgpCiAgICAgICAgcCA9IHN1YnByb2Nlc3MuUG9wZW4o
Oct 11 02:26:05 compute-0 nova_compute[356901]: YXJncywgc3Rkb3V0PXN1YnByb2Nlc3MuUElQRSwKICAgICAgICAgICAgICAgICAgICAgICAgICAgICBzdGRlcnI9c3VicHJvY2Vzcy5QSVBFKQogICAgICAgIGRhdGEgPSBwLmNvbW11bmljYXRlKCkKICAgICAgICBpZiBkYXRhOgogICAgICAgICAgICBmb3IgeCBpbiBkYXRhOgogICAgICAgICAgICAgICAgbHMud3JpdGUoeCkKICAgIGV4Y2VwdCBPU0Vycm9yOgogICAgICAgIGV4X3R5cGUsIGV4LCB0YiA9IHN5cy5leGNfaW5mbygpCiAgICAgICAgaWYgZXguZXJybm8gPT0gZXJybm8uRU5PRVhFQzoKICAgICAgICAgICAgTE9HLmVycm9yKCdVc2VyZGF0YSBlbXB0eSBvciBub3QgZXhlY3V0YWJsZTogJXMnLCBleCkKICAgICAgICAgICAgcmV0dXJuIG9zLkVYX09LCiAgICAgICAgZWxzZToKICAgICAgICAgICAgTE9HLmVycm9yKCdPUyBlcnJvciBydW5uaW5nIHVzZXJkYXRhOiAlcycsIGV4KQogICAgICAgICAgICByZXR1cm4gb3MuRVhfT1NFUlIKICAgIGV4Y2VwdCBFeGNlcHRpb246CiAgICAgICAgZXhfdHlwZSwgZXgsIHRiID0gc3lzLmV4Y19pbmZvKCkKICAgICAgICBMT0cuZXJyb3IoJ1Vua25vd24gZXJyb3IgcnVubmluZyB1c2VyZGF0YTogJXMnLCBleCkKICAgICAgICByZXR1cm4gb3MuRVhfU09GVFdBUkUKICAgIHJldHVybiBwLnJldHVybmNvZGUKCgpkZWYgbWFpbigpOgogICAgdXNlcmRhdGFfcGF0aCA9IG9zLnBhdGguam9pbihWQVJfUEFUSCwgJ2Nmbi11c2VyZGF0YScpCiAgICBvcy5jaG1vZCh1c2VyZGF0YV9wYXRoLCBpbnQoIjcwMCIsIDgpKQoKICAgIExPRy5pbmZvKCdQcm92aXNpb24gYmVnYW46ICVzJywgZGF0ZXRpbWUuZGF0ZXRpbWUubm93KCkpCiAgICByZXR1cm5jb2RlID0gY2FsbChbdXNlcmRhdGFfcGF0aF0pCiAgICBMT0cuaW5mbygnUHJvdmlzaW9uIGRvbmU6ICVzJywgZGF0ZXRpbWUuZGF0ZXRpbWUubm93KCkpCiAgICBpZiByZXR1cm5jb2RlOgogICAgICAgIHJldHVybiByZXR1cm5jb2RlCgoKaWYgX19uYW1lX18gPT0gJ19fbWFpbl9fJzoKICAgIGluaXRfbG9nZ2luZygpCgogICAgY29kZSA9IG1haW4oKQogICAgaWYgY29kZToKICAgICAgICBMT0cuZXJyb3IoJ1Byb3Zpc2lvbiBmYWlsZWQgd2l0aCBleGl0IGNvZGUgJXMnLCBjb2RlKQogICAgICAgIHN5cy5leGl0KGNvZGUpCgogICAgcHJvdmlzaW9uX2xvZyA9IG9zLnBhdGguam9pbihWQVJfUEFUSCwgJ3Byb3Zpc2lvbi1maW5pc2hlZCcpCiAgICAjIHRvdWNoIHRoZSBmaWxlIHNvIGl0IGlzIHRpbWVzdGFtcGVkIHdpdGggd2hlbiBmaW5pc2hlZAogICAgd2l0aCBvcGVuKHByb3Zpc2lvbl9sb2csICdhJyk6CiAgICAgICAgb3MudXRpbWUocHJvdmlzaW9uX2xvZywgTm9uZSkKCi0tPT09PT09PT09PT09PT09NDE2Mzc2NDk2NTc1OTQ2NzQxND09CkNvbnRlbnQtVHlwZTogdGV4dC94LWNmbmluaXRkYXRhOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0iY2ZuLW1ldGFkYXRhLXNlcnZlciIKCmh0dHBzOi8vaGVhdC1jZm5hcGktaW50ZXJuYWwub3BlbnN0YWNrLnN2Yzo4MDAwL3YxLwotLT09PT09PT09PT09PT09PTQxNjM3NjQ5NjU3NTk0Njc0MTQ9PQpDb250ZW50LVR5cGU6IHRleHQveC1jZm5pbml0ZGF0YTsgY2hhcnNldD0idXMtYXNjaWkiCk1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IDdiaXQKQ29udGVudC1EaXNwb3NpdGlvbjogYXR0YWNobWVudDsgZmlsZW5hbWU9ImNmbi1ib3RvLWNmZyIKCltCb3RvXQpkZWJ1ZyA9IDAKaXNfc2VjdXJlID0gMApodHRwc192YWxpZGF0ZV9jZXJ0aWZpY2F0ZXMgPSAxCmNmbl9yZWdpb25fbmFtZSA9IGhlYXQKY2ZuX3JlZ2lvbl9lbmRwb2ludCA9IGhlYXQtY2ZuYXBpLWludGVybmFsLm9wZW5zdGFjay5zdmMKLS09PT09PT09PT09PT09PT00MTYzNzY0OTY1NzU5NDY3NDE0PT0tLQo=',user_id='d215f3ebbc07435493ccd666fc80109d',uuid=358d31cf-2866-416a-b2fc-814ee4bfe89a,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "address": "fa:16:3e:b0:ca:41", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.152", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.173", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap7f4342b0-8a", "ovs_interfaceid": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}} plug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:710
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.586 2 DEBUG nova.network.os_vif_util [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converting VIF {"id": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "address": "fa:16:3e:b0:ca:41", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.152", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.173", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap7f4342b0-8a", "ovs_interfaceid": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.588 2 DEBUG nova.network.os_vif_util [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:b0:ca:41,bridge_name='br-int',has_traffic_filtering=True,id=7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tap7f4342b0-8a') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.588 2 DEBUG os_vif [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Plugging vif VIFOpenVSwitch(active=False,address=fa:16:3e:b0:ca:41,bridge_name='br-int',has_traffic_filtering=True,id=7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tap7f4342b0-8a') plug /usr/lib/python3.9/site-packages/os_vif/__init__.py:76
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.589 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.590 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddBridgeCommand(_result=None, name=br-int, may_exist=True, datapath_type=system) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.591 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.596 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.597 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tap7f4342b0-8a, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.598 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbSetCommand(_result=None, table=Interface, record=tap7f4342b0-8a, col_values=(('external_ids', {'iface-id': '7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8', 'iface-status': 'active', 'attached-mac': 'fa:16:3e:b0:ca:41', 'vm-uuid': '358d31cf-2866-416a-b2fc-814ee4bfe89a'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.601 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:05 compute-0 NetworkManager[44908]: <info>  [1760149565.6027] manager: (tap7f4342b0-8a): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/35)
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.607 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.617 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.618 2 INFO os_vif [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Successfully plugged vif VIFOpenVSwitch(active=False,address=fa:16:3e:b0:ca:41,bridge_name='br-int',has_traffic_filtering=True,id=7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tap7f4342b0-8a')
Oct 11 02:26:05 compute-0 sshd-session[433115]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:26:05 compute-0 sshd-session[433115]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:26:05 compute-0 rsyslogd[187706]: message too long (8192) with configured size 8096, begin of message is: 2025-10-11 02:26:05.556 2 DEBUG nova.virt.libvirt.vif [None req-7a01a2f3-3b45-40 [v8.2506.0-2.el9 try https://www.rsyslog.com/e/2445 ]
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.678 2 DEBUG nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] No BDM found with device name vda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.679 2 DEBUG nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] No BDM found with device name vdb, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.680 2 DEBUG nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] No BDM found with device name sda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.680 2 DEBUG nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] No VIF found with MAC fa:16:3e:b0:ca:41, not building metadata _build_interface_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12092
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.681 2 INFO nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Using config drive
Oct 11 02:26:05 compute-0 nova_compute[356901]: 2025-10-11 02:26:05.728 2 DEBUG nova.storage.rbd_utils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 358d31cf-2866-416a-b2fc-814ee4bfe89a_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:26:05 compute-0 rsyslogd[187706]: message too long (8192) with configured size 8096, begin of message is: 2025-10-11 02:26:05.585 2 DEBUG nova.virt.libvirt.vif [None req-7a01a2f3-3b45-40 [v8.2506.0-2.el9 try https://www.rsyslog.com/e/2445 ]
Oct 11 02:26:06 compute-0 nova_compute[356901]: 2025-10-11 02:26:06.040 2 INFO nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Creating config drive at /var/lib/nova/instances/358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.config
Oct 11 02:26:06 compute-0 nova_compute[356901]: 2025-10-11 02:26:06.053 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): /usr/bin/mkisofs -o /var/lib/nova/instances/358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmph276m6z4 execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1444: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 25 KiB/s rd, 1.4 MiB/s wr, 37 op/s
Oct 11 02:26:06 compute-0 nova_compute[356901]: 2025-10-11 02:26:06.206 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "/usr/bin/mkisofs -o /var/lib/nova/instances/358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmph276m6z4" returned: 0 in 0.152s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:26:06 compute-0 nova_compute[356901]: 2025-10-11 02:26:06.261 2 DEBUG nova.storage.rbd_utils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image 358d31cf-2866-416a-b2fc-814ee4bfe89a_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:26:06 compute-0 nova_compute[356901]: 2025-10-11 02:26:06.277 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.config 358d31cf-2866-416a-b2fc-814ee4bfe89a_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:26:06 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/992107428' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:26:06 compute-0 nova_compute[356901]: 2025-10-11 02:26:06.622 2 DEBUG oslo_concurrency.processutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.config 358d31cf-2866-416a-b2fc-814ee4bfe89a_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.345s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:26:06 compute-0 nova_compute[356901]: 2025-10-11 02:26:06.626 2 INFO nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Deleting local config drive /var/lib/nova/instances/358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.config because it was imported into RBD.
Oct 11 02:26:06 compute-0 systemd[1]: Starting libvirt secret daemon...
Oct 11 02:26:06 compute-0 systemd[1]: Started libvirt secret daemon.
Oct 11 02:26:06 compute-0 kernel: tap7f4342b0-8a: entered promiscuous mode
Oct 11 02:26:06 compute-0 NetworkManager[44908]: <info>  [1760149566.8229] manager: (tap7f4342b0-8a): new Tun device (/org/freedesktop/NetworkManager/Devices/36)
Oct 11 02:26:06 compute-0 ovn_controller[88370]: 2025-10-11T02:26:06Z|00045|binding|INFO|Claiming lport 7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 for this chassis.
Oct 11 02:26:06 compute-0 ovn_controller[88370]: 2025-10-11T02:26:06Z|00046|binding|INFO|7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8: Claiming fa:16:3e:b0:ca:41 192.168.0.152
Oct 11 02:26:06 compute-0 nova_compute[356901]: 2025-10-11 02:26:06.833 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:06 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:26:06.839 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:b0:ca:41 192.168.0.152'], port_security=['fa:16:3e:b0:ca:41 192.168.0.152'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'name': 'vnf-scaleup_group-wzkjkvgckve2-tqko7trrsvwg-ebwakep2a2y3-port-pnpy23xi2rfl', 'neutron:cidrs': '192.168.0.152/24', 'neutron:device_id': '358d31cf-2866-416a-b2fc-814ee4bfe89a', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'neutron:port_capabilities': '', 'neutron:port_name': 'vnf-scaleup_group-wzkjkvgckve2-tqko7trrsvwg-ebwakep2a2y3-port-pnpy23xi2rfl', 'neutron:project_id': '97026531b3404a11869cb85a059c4a0d', 'neutron:revision_number': '2', 'neutron:security_group_ids': 'c0c90d87-d29f-4e96-98a1-ffb301424ea4', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:port_fip': '192.168.122.173'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=d19b0dd1-1656-436b-911a-8f2dcc98f6bf, chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], tunnel_key=6, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8) old=Port_Binding(chassis=[]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:26:06 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:26:06.841 286362 INFO neutron.agent.ovn.metadata.agent [-] Port 7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 in datapath d4dded16-3268-4cf9-bb6b-aa5200d5e4ec bound to our chassis
Oct 11 02:26:06 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:26:06.842 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network d4dded16-3268-4cf9-bb6b-aa5200d5e4ec
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0019226650509539625 of space, bias 1.0, pg target 0.5767995152861888 quantized to 32 (current 32)
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00025334537995702286 of space, bias 1.0, pg target 0.07600361398710685 quantized to 32 (current 32)
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:26:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:26:06 compute-0 ovn_controller[88370]: 2025-10-11T02:26:06Z|00047|binding|INFO|Setting lport 7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 ovn-installed in OVS
Oct 11 02:26:06 compute-0 ovn_controller[88370]: 2025-10-11T02:26:06Z|00048|binding|INFO|Setting lport 7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 up in Southbound
Oct 11 02:26:06 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:26:06.870 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[a7def02c-6c2a-4d9d-8e55-11cd3b227842]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:26:06 compute-0 nova_compute[356901]: 2025-10-11 02:26:06.872 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:06 compute-0 nova_compute[356901]: 2025-10-11 02:26:06.888 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:06 compute-0 systemd-udevd[433355]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:26:06 compute-0 systemd-machined[137586]: New machine qemu-4-instance-00000004.
Oct 11 02:26:06 compute-0 systemd[1]: Started Virtual Machine qemu-4-instance-00000004.
Oct 11 02:26:06 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:26:06.924 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[48411987-2104-47be-bb2d-b43bc3cce3c8]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:26:06 compute-0 NetworkManager[44908]: <info>  [1760149566.9317] device (tap7f4342b0-8a): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 02:26:06 compute-0 NetworkManager[44908]: <info>  [1760149566.9331] device (tap7f4342b0-8a): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Oct 11 02:26:06 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:26:06.933 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[13a3ae4e-c6e4-4725-8903-fbe8e1cc4659]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:26:06 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:26:06.977 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[d33f7c9b-c959-4a2b-bf54-cc65dcf33bc3]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:26:07 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:26:07.002 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[5ea3a34f-c9dc-4c16-96df-efc72c0d5175]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapd4dded16-31'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:11:50:48'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 8, 'tx_packets': 9, 'rx_bytes': 832, 'tx_bytes': 522, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 8, 'tx_packets': 9, 'rx_bytes': 832, 'tx_bytes': 522, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 15], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 548909, 'reachable_time': 31539, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 8, 'inoctets': 720, 'indelivers': 1, 'outforwdatagrams': 0, 'outpkts': 3, 'outoctets': 228, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 8, 'outmcastpkts': 3, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 720, 'outmcastoctets': 228, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 8, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 1, 'inerrors': 0, 'outmsgs': 3, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 433361, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:26:07 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:26:07.036 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[755cc8e0-8dad-45ca-a9e6-7cd0c9f01350]: (4, ({'family': 2, 'prefixlen': 32, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '169.254.169.254'], ['IFA_LOCAL', '169.254.169.254'], ['IFA_BROADCAST', '169.254.169.254'], ['IFA_LABEL', 'tapd4dded16-31'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 548926, 'tstamp': 548926}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 433366, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'}, {'family': 2, 'prefixlen': 24, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '192.168.0.2'], ['IFA_LOCAL', '192.168.0.2'], ['IFA_BROADCAST', '192.168.0.255'], ['IFA_LABEL', 'tapd4dded16-31'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 548931, 'tstamp': 548931}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 433366, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'})) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:26:07 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:26:07.039 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapd4dded16-30, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:26:07 compute-0 nova_compute[356901]: 2025-10-11 02:26:07.041 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:07 compute-0 nova_compute[356901]: 2025-10-11 02:26:07.043 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:07 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:26:07.044 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapd4dded16-30, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:26:07 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:26:07.044 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:26:07 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:26:07.045 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tapd4dded16-30, col_values=(('external_ids', {'iface-id': 'f0f8488b-423f-46a5-8a6a-984c2ae3438e'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:26:07 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:26:07.045 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:26:07 compute-0 nova_compute[356901]: 2025-10-11 02:26:07.079 2 DEBUG nova.compute.manager [req-f0a6346d-e377-4363-8283-e4ec82211cf1 req-fbdd5336-fd02-4a22-9513-72baff559e52 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Received event network-vif-plugged-7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:26:07 compute-0 nova_compute[356901]: 2025-10-11 02:26:07.080 2 DEBUG oslo_concurrency.lockutils [req-f0a6346d-e377-4363-8283-e4ec82211cf1 req-fbdd5336-fd02-4a22-9513-72baff559e52 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "358d31cf-2866-416a-b2fc-814ee4bfe89a-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:26:07 compute-0 nova_compute[356901]: 2025-10-11 02:26:07.080 2 DEBUG oslo_concurrency.lockutils [req-f0a6346d-e377-4363-8283-e4ec82211cf1 req-fbdd5336-fd02-4a22-9513-72baff559e52 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "358d31cf-2866-416a-b2fc-814ee4bfe89a-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:26:07 compute-0 nova_compute[356901]: 2025-10-11 02:26:07.081 2 DEBUG oslo_concurrency.lockutils [req-f0a6346d-e377-4363-8283-e4ec82211cf1 req-fbdd5336-fd02-4a22-9513-72baff559e52 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "358d31cf-2866-416a-b2fc-814ee4bfe89a-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:26:07 compute-0 nova_compute[356901]: 2025-10-11 02:26:07.081 2 DEBUG nova.compute.manager [req-f0a6346d-e377-4363-8283-e4ec82211cf1 req-fbdd5336-fd02-4a22-9513-72baff559e52 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Processing event network-vif-plugged-7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10808
Oct 11 02:26:07 compute-0 nova_compute[356901]: 2025-10-11 02:26:07.095 2 DEBUG nova.network.neutron [req-2f20dcaa-375a-4808-893b-2910557121d6 req-d83a86a0-616c-472d-b256-61d822a17136 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Updated VIF entry in instance network info cache for port 7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:26:07 compute-0 nova_compute[356901]: 2025-10-11 02:26:07.095 2 DEBUG nova.network.neutron [req-2f20dcaa-375a-4808-893b-2910557121d6 req-d83a86a0-616c-472d-b256-61d822a17136 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Updating instance_info_cache with network_info: [{"id": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "address": "fa:16:3e:b0:ca:41", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.152", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.173", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap7f4342b0-8a", "ovs_interfaceid": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:26:07 compute-0 nova_compute[356901]: 2025-10-11 02:26:07.113 2 DEBUG oslo_concurrency.lockutils [req-2f20dcaa-375a-4808-893b-2910557121d6 req-d83a86a0-616c-472d-b256-61d822a17136 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-358d31cf-2866-416a-b2fc-814ee4bfe89a" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:26:07 compute-0 ceph-mon[191930]: pgmap v1444: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 25 KiB/s rd, 1.4 MiB/s wr, 37 op/s
Oct 11 02:26:07 compute-0 nova_compute[356901]: 2025-10-11 02:26:07.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_expired_console_auth_tokens run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:26:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1445: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.4 MiB/s wr, 41 op/s
Oct 11 02:26:08 compute-0 sshd-session[433115]: Failed password for invalid user debian from 121.227.153.123 port 37904 ssh2
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.159 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760149569.1583707, 358d31cf-2866-416a-b2fc-814ee4bfe89a => Started> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.159 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] VM Started (Lifecycle Event)
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.165 2 DEBUG nova.compute.manager [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Instance event wait completed in 0 seconds for network-vif-plugged wait_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:577
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.171 2 DEBUG nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Guest created on hypervisor spawn /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4417
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.179 2 INFO nova.virt.libvirt.driver [-] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Instance spawned successfully.
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.180 2 DEBUG nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Attempting to register defaults for the following image properties: ['hw_cdrom_bus', 'hw_disk_bus', 'hw_input_bus', 'hw_pointer_model', 'hw_video_model', 'hw_vif_model'] _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:917
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.214 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.226 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Synchronizing instance power state after lifecycle event "Started"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.231 2 DEBUG nova.compute.manager [req-dfb48de9-d3f0-428f-b3c0-a2dc96f7695f req-a4feecf7-bf93-4f50-8b77-7e7ec54156d9 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Received event network-vif-plugged-7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.232 2 DEBUG oslo_concurrency.lockutils [req-dfb48de9-d3f0-428f-b3c0-a2dc96f7695f req-a4feecf7-bf93-4f50-8b77-7e7ec54156d9 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "358d31cf-2866-416a-b2fc-814ee4bfe89a-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.232 2 DEBUG oslo_concurrency.lockutils [req-dfb48de9-d3f0-428f-b3c0-a2dc96f7695f req-a4feecf7-bf93-4f50-8b77-7e7ec54156d9 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "358d31cf-2866-416a-b2fc-814ee4bfe89a-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.233 2 DEBUG oslo_concurrency.lockutils [req-dfb48de9-d3f0-428f-b3c0-a2dc96f7695f req-a4feecf7-bf93-4f50-8b77-7e7ec54156d9 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "358d31cf-2866-416a-b2fc-814ee4bfe89a-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.233 2 DEBUG nova.compute.manager [req-dfb48de9-d3f0-428f-b3c0-a2dc96f7695f req-a4feecf7-bf93-4f50-8b77-7e7ec54156d9 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] No waiting events found dispatching network-vif-plugged-7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.234 2 WARNING nova.compute.manager [req-dfb48de9-d3f0-428f-b3c0-a2dc96f7695f req-a4feecf7-bf93-4f50-8b77-7e7ec54156d9 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Received unexpected event network-vif-plugged-7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 for instance with vm_state building and task_state spawning.
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.236 2 DEBUG nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Found default for hw_cdrom_bus of sata _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.237 2 DEBUG nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Found default for hw_disk_bus of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.238 2 DEBUG nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Found default for hw_input_bus of usb _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.238 2 DEBUG nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Found default for hw_pointer_model of usbtablet _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.239 2 DEBUG nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Found default for hw_video_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.240 2 DEBUG nova.virt.libvirt.driver [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Found default for hw_vif_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.245 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.245 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760149569.1585433, 358d31cf-2866-416a-b2fc-814ee4bfe89a => Paused> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.246 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] VM Paused (Lifecycle Event)
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.266 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.273 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760149569.1700745, 358d31cf-2866-416a-b2fc-814ee4bfe89a => Resumed> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.273 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] VM Resumed (Lifecycle Event)
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.308 2 INFO nova.compute.manager [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Took 9.38 seconds to spawn the instance on the hypervisor.
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.309 2 DEBUG nova.compute.manager [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.315 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.325 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Synchronizing instance power state after lifecycle event "Resumed"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:26:09 compute-0 ceph-mon[191930]: pgmap v1445: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.4 MiB/s wr, 41 op/s
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.430 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.451 2 INFO nova.compute.manager [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Took 10.74 seconds to build instance.
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.466 2 DEBUG oslo_concurrency.lockutils [None req-7a01a2f3-3b45-4009-bf20-f9cf62eab373 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "358d31cf-2866-416a-b2fc-814ee4bfe89a" "released" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: held 10.893s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:26:09 compute-0 nova_compute[356901]: 2025-10-11 02:26:09.475 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:09 compute-0 sshd-session[433115]: Connection closed by invalid user debian 121.227.153.123 port 37904 [preauth]
Oct 11 02:26:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1446: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 30 KiB/s rd, 1.4 MiB/s wr, 44 op/s
Oct 11 02:26:10 compute-0 podman[433429]: 2025-10-11 02:26:10.23713846 +0000 UTC m=+0.117974428 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=multipathd, io.buildah.version=1.41.3)
Oct 11 02:26:10 compute-0 podman[433430]: 2025-10-11 02:26:10.252009796 +0000 UTC m=+0.134557162 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, container_name=iscsid, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, managed_by=edpm_ansible)
Oct 11 02:26:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:26:10 compute-0 nova_compute[356901]: 2025-10-11 02:26:10.603 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:11 compute-0 sshd-session[433463]: Invalid user debian from 121.227.153.123 port 41522
Oct 11 02:26:11 compute-0 ceph-mon[191930]: pgmap v1446: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 30 KiB/s rd, 1.4 MiB/s wr, 44 op/s
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #66. Immutable memtables: 0.
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:26:11.444082) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 35] Flushing memtable with next log file: 66
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149571444198, "job": 35, "event": "flush_started", "num_memtables": 1, "num_entries": 1190, "num_deletes": 251, "total_data_size": 1799389, "memory_usage": 1826608, "flush_reason": "Manual Compaction"}
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 35] Level-0 flush table #67: started
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149571463965, "cf_name": "default", "job": 35, "event": "table_file_creation", "file_number": 67, "file_size": 1760389, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 28956, "largest_seqno": 30145, "table_properties": {"data_size": 1754717, "index_size": 3067, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1541, "raw_key_size": 12145, "raw_average_key_size": 19, "raw_value_size": 1743293, "raw_average_value_size": 2843, "num_data_blocks": 138, "num_entries": 613, "num_filter_entries": 613, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760149456, "oldest_key_time": 1760149456, "file_creation_time": 1760149571, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 67, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 35] Flush lasted 19961 microseconds, and 10164 cpu microseconds.
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:26:11.464057) [db/flush_job.cc:967] [default] [JOB 35] Level-0 flush table #67: 1760389 bytes OK
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:26:11.464085) [db/memtable_list.cc:519] [default] Level-0 commit table #67 started
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:26:11.468430) [db/memtable_list.cc:722] [default] Level-0 commit table #67: memtable #1 done
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:26:11.468510) EVENT_LOG_v1 {"time_micros": 1760149571468468, "job": 35, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:26:11.468539) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 35] Try to delete WAL files size 1793928, prev total WAL file size 1793928, number of live WAL files 2.
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000063.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:26:11.470018) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F730032353130' seq:72057594037927935, type:22 .. '7061786F730032373632' seq:0, type:0; will stop at (end)
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 36] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 35 Base level 0, inputs: [67(1719KB)], [65(7026KB)]
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149571470088, "job": 36, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [67], "files_L6": [65], "score": -1, "input_data_size": 8955764, "oldest_snapshot_seqno": -1}
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 36] Generated table #68: 4930 keys, 7203889 bytes, temperature: kUnknown
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149571511949, "cf_name": "default", "job": 36, "event": "table_file_creation", "file_number": 68, "file_size": 7203889, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 7172062, "index_size": 18405, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 12357, "raw_key_size": 124695, "raw_average_key_size": 25, "raw_value_size": 7083836, "raw_average_value_size": 1436, "num_data_blocks": 757, "num_entries": 4930, "num_filter_entries": 4930, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760149571, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 68, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:26:11.512314) [db/compaction/compaction_job.cc:1663] [default] [JOB 36] Compacted 1@0 + 1@6 files to L6 => 7203889 bytes
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:26:11.514559) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 213.5 rd, 171.7 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(1.7, 6.9 +0.0 blob) out(6.9 +0.0 blob), read-write-amplify(9.2) write-amplify(4.1) OK, records in: 5444, records dropped: 514 output_compression: NoCompression
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:26:11.514577) EVENT_LOG_v1 {"time_micros": 1760149571514567, "job": 36, "event": "compaction_finished", "compaction_time_micros": 41953, "compaction_time_cpu_micros": 26331, "output_level": 6, "num_output_files": 1, "total_output_size": 7203889, "num_input_records": 5444, "num_output_records": 4930, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000067.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149571514987, "job": 36, "event": "table_file_deletion", "file_number": 67}
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000065.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149571516313, "job": 36, "event": "table_file_deletion", "file_number": 65}
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:26:11.469883) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:26:11.516442) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:26:11.516450) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:26:11.516452) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:26:11.516455) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:26:11 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:26:11.516457) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:26:11 compute-0 sshd-session[433463]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:26:11 compute-0 sshd-session[433463]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:26:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1447: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 167 KiB/s rd, 1.4 MiB/s wr, 53 op/s
Oct 11 02:26:12 compute-0 ceph-mon[191930]: pgmap v1447: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 167 KiB/s rd, 1.4 MiB/s wr, 53 op/s
Oct 11 02:26:12 compute-0 nova_compute[356901]: 2025-10-11 02:26:12.917 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._run_pending_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:26:12 compute-0 nova_compute[356901]: 2025-10-11 02:26:12.918 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11145
Oct 11 02:26:12 compute-0 nova_compute[356901]: 2025-10-11 02:26:12.939 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] There are 0 instances to clean _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11154
Oct 11 02:26:13 compute-0 sshd-session[433463]: Failed password for invalid user debian from 121.227.153.123 port 41522 ssh2
Oct 11 02:26:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1448: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 1.1 MiB/s rd, 866 KiB/s wr, 83 op/s
Oct 11 02:26:14 compute-0 nova_compute[356901]: 2025-10-11 02:26:14.479 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:15 compute-0 ceph-mon[191930]: pgmap v1448: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 1.1 MiB/s rd, 866 KiB/s wr, 83 op/s
Oct 11 02:26:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:26:15 compute-0 nova_compute[356901]: 2025-10-11 02:26:15.608 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:15 compute-0 sshd-session[433463]: Connection closed by invalid user debian 121.227.153.123 port 41522 [preauth]
Oct 11 02:26:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1449: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 22 KiB/s wr, 62 op/s
Oct 11 02:26:17 compute-0 sshd-session[433472]: Invalid user debian from 121.227.153.123 port 41526
Oct 11 02:26:17 compute-0 ceph-mon[191930]: pgmap v1449: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 22 KiB/s wr, 62 op/s
Oct 11 02:26:17 compute-0 sshd-session[433472]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:26:17 compute-0 sshd-session[433472]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:26:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1450: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 22 KiB/s wr, 60 op/s
Oct 11 02:26:19 compute-0 ceph-mon[191930]: pgmap v1450: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 22 KiB/s wr, 60 op/s
Oct 11 02:26:19 compute-0 nova_compute[356901]: 2025-10-11 02:26:19.484 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:20 compute-0 sshd-session[433472]: Failed password for invalid user debian from 121.227.153.123 port 41526 ssh2
Oct 11 02:26:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1451: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 21 KiB/s wr, 56 op/s
Oct 11 02:26:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:26:20 compute-0 nova_compute[356901]: 2025-10-11 02:26:20.611 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:21 compute-0 podman[433476]: 2025-10-11 02:26:21.243777468 +0000 UTC m=+0.124917080 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, distribution-scope=public, com.redhat.component=ubi9-minimal-container, io.openshift.expose-services=, release=1755695350, vendor=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, container_name=openstack_network_exporter, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., name=ubi9-minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, url=https://catalog.redhat.com/en/search?searchType=containers, io.buildah.version=1.33.7, io.openshift.tags=minimal rhel9, version=9.6, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, managed_by=edpm_ansible, architecture=x86_64, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., build-date=2025-08-20T13:12:41)
Oct 11 02:26:21 compute-0 podman[433475]: 2025-10-11 02:26:21.252629271 +0000 UTC m=+0.135521922 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, config_id=edpm, org.label-schema.build-date=20251009, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2)
Oct 11 02:26:21 compute-0 podman[433477]: 2025-10-11 02:26:21.264870139 +0000 UTC m=+0.150847052 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:26:21 compute-0 ceph-mon[191930]: pgmap v1451: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 21 KiB/s wr, 56 op/s
Oct 11 02:26:21 compute-0 sshd-session[433472]: Connection closed by invalid user debian 121.227.153.123 port 41526 [preauth]
Oct 11 02:26:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1452: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 1.2 KiB/s wr, 53 op/s
Oct 11 02:26:23 compute-0 ceph-mon[191930]: pgmap v1452: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 1.2 KiB/s wr, 53 op/s
Oct 11 02:26:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1453: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 1.3 MiB/s rd, 1.1 KiB/s wr, 44 op/s
Oct 11 02:26:24 compute-0 nova_compute[356901]: 2025-10-11 02:26:24.490 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:25 compute-0 ceph-mon[191930]: pgmap v1453: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 1.3 MiB/s rd, 1.1 KiB/s wr, 44 op/s
Oct 11 02:26:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:26:25 compute-0 nova_compute[356901]: 2025-10-11 02:26:25.615 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1454: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 431 KiB/s rd, 1023 B/s wr, 13 op/s
Oct 11 02:26:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:26:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:26:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:26:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:26:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:26:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:26:27 compute-0 podman[433539]: 2025-10-11 02:26:27.250817242 +0000 UTC m=+0.130126415 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.expose-services=, com.redhat.component=ubi9-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., distribution-scope=public, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., release-0.7.12=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, version=9.4, architecture=x86_64, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, container_name=kepler, io.buildah.version=1.29.0, vendor=Red Hat, Inc., name=ubi9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, build-date=2024-09-18T21:23:30, managed_by=edpm_ansible, io.openshift.tags=base rhel9, config_id=edpm)
Oct 11 02:26:27 compute-0 ceph-mon[191930]: pgmap v1454: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 431 KiB/s rd, 1023 B/s wr, 13 op/s
Oct 11 02:26:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:26:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2899473012' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:26:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:26:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2899473012' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:26:28 compute-0 sshd-session[433537]: Invalid user admin from 121.227.153.123 port 52418
Oct 11 02:26:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1455: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:26:28 compute-0 sshd-session[433537]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:26:28 compute-0 sshd-session[433537]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:26:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2899473012' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:26:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2899473012' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:26:29 compute-0 ceph-mon[191930]: pgmap v1455: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:26:29 compute-0 nova_compute[356901]: 2025-10-11 02:26:29.489 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:29 compute-0 podman[157119]: time="2025-10-11T02:26:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:26:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:26:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:26:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:26:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9060 "" "Go-http-client/1.1"
Oct 11 02:26:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1456: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:26:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:26:30 compute-0 nova_compute[356901]: 2025-10-11 02:26:30.619 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:30 compute-0 sshd-session[433537]: Failed password for invalid user admin from 121.227.153.123 port 52418 ssh2
Oct 11 02:26:31 compute-0 openstack_network_exporter[374316]: ERROR   02:26:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:26:31 compute-0 openstack_network_exporter[374316]: ERROR   02:26:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:26:31 compute-0 openstack_network_exporter[374316]: ERROR   02:26:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:26:31 compute-0 openstack_network_exporter[374316]: ERROR   02:26:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:26:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:26:31 compute-0 openstack_network_exporter[374316]: ERROR   02:26:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:26:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:26:31 compute-0 ceph-mon[191930]: pgmap v1456: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:26:31 compute-0 sshd-session[433537]: Connection closed by invalid user admin 121.227.153.123 port 52418 [preauth]
Oct 11 02:26:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1457: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:26:32 compute-0 ceph-mon[191930]: pgmap v1457: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:26:33 compute-0 sshd-session[433560]: Invalid user admin from 121.227.153.123 port 45044
Oct 11 02:26:33 compute-0 sshd-session[433560]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:26:33 compute-0 sshd-session[433560]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:26:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1458: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:26:34 compute-0 nova_compute[356901]: 2025-10-11 02:26:34.495 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:35 compute-0 podman[433564]: 2025-10-11 02:26:35.224898534 +0000 UTC m=+0.114458651 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.name=CentOS Stream 10 Base Image, container_name=ceilometer_agent_compute, org.label-schema.schema-version=1.0)
Oct 11 02:26:35 compute-0 podman[433562]: 2025-10-11 02:26:35.244759672 +0000 UTC m=+0.139071239 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:26:35 compute-0 podman[433565]: 2025-10-11 02:26:35.249850475 +0000 UTC m=+0.128125294 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, config_id=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.license=GPLv2, container_name=ovn_metadata_agent, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS)
Oct 11 02:26:35 compute-0 podman[433563]: 2025-10-11 02:26:35.259568967 +0000 UTC m=+0.148940922 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_managed=true, container_name=ovn_controller, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:26:35 compute-0 ceph-mon[191930]: pgmap v1458: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:26:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:26:35 compute-0 sshd-session[433560]: Failed password for invalid user admin from 121.227.153.123 port 45044 ssh2
Oct 11 02:26:35 compute-0 nova_compute[356901]: 2025-10-11 02:26:35.623 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1459: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:26:36 compute-0 ovn_controller[88370]: 2025-10-11T02:26:36Z|00049|memory_trim|INFO|Detected inactivity (last active 30001 ms ago): trimming memory
Oct 11 02:26:36 compute-0 sshd-session[433560]: Connection closed by invalid user admin 121.227.153.123 port 45044 [preauth]
Oct 11 02:26:37 compute-0 ceph-mon[191930]: pgmap v1459: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:26:38 compute-0 sshd-session[433642]: Invalid user admin from 121.227.153.123 port 45058
Oct 11 02:26:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1460: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:26:38 compute-0 sshd-session[433642]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:26:38 compute-0 sshd-session[433642]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:26:39 compute-0 ceph-mon[191930]: pgmap v1460: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:26:39 compute-0 nova_compute[356901]: 2025-10-11 02:26:39.501 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1461: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:26:40 compute-0 sshd-session[433642]: Failed password for invalid user admin from 121.227.153.123 port 45058 ssh2
Oct 11 02:26:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:26:40 compute-0 nova_compute[356901]: 2025-10-11 02:26:40.628 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:41 compute-0 podman[433645]: 2025-10-11 02:26:41.275935039 +0000 UTC m=+0.158877276 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team, config_id=iscsid, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']})
Oct 11 02:26:41 compute-0 podman[433644]: 2025-10-11 02:26:41.290675863 +0000 UTC m=+0.174788762 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_managed=true, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible)
Oct 11 02:26:41 compute-0 nova_compute[356901]: 2025-10-11 02:26:41.302 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_power_states run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:26:41 compute-0 ceph-mon[191930]: pgmap v1461: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:26:41 compute-0 nova_compute[356901]: 2025-10-11 02:26:41.343 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Triggering sync for uuid 0cc56d17-ec3a-4408-bccb-91b29427379e _sync_power_states /usr/lib/python3.9/site-packages/nova/compute/manager.py:10268
Oct 11 02:26:41 compute-0 nova_compute[356901]: 2025-10-11 02:26:41.344 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Triggering sync for uuid d60d7ea1-5d00-4902-90e6-3ae67eb09a78 _sync_power_states /usr/lib/python3.9/site-packages/nova/compute/manager.py:10268
Oct 11 02:26:41 compute-0 nova_compute[356901]: 2025-10-11 02:26:41.344 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Triggering sync for uuid 7513b93e-d2b8-4ae0-8f1c-3df190945259 _sync_power_states /usr/lib/python3.9/site-packages/nova/compute/manager.py:10268
Oct 11 02:26:41 compute-0 nova_compute[356901]: 2025-10-11 02:26:41.345 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Triggering sync for uuid 358d31cf-2866-416a-b2fc-814ee4bfe89a _sync_power_states /usr/lib/python3.9/site-packages/nova/compute/manager.py:10268
Oct 11 02:26:41 compute-0 nova_compute[356901]: 2025-10-11 02:26:41.346 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "0cc56d17-ec3a-4408-bccb-91b29427379e" by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:26:41 compute-0 nova_compute[356901]: 2025-10-11 02:26:41.346 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "0cc56d17-ec3a-4408-bccb-91b29427379e" acquired by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:26:41 compute-0 nova_compute[356901]: 2025-10-11 02:26:41.347 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78" by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:26:41 compute-0 nova_compute[356901]: 2025-10-11 02:26:41.348 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78" acquired by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:26:41 compute-0 nova_compute[356901]: 2025-10-11 02:26:41.350 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "7513b93e-d2b8-4ae0-8f1c-3df190945259" by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:26:41 compute-0 nova_compute[356901]: 2025-10-11 02:26:41.350 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259" acquired by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:26:41 compute-0 nova_compute[356901]: 2025-10-11 02:26:41.351 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "358d31cf-2866-416a-b2fc-814ee4bfe89a" by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:26:41 compute-0 nova_compute[356901]: 2025-10-11 02:26:41.351 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "358d31cf-2866-416a-b2fc-814ee4bfe89a" acquired by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:26:41 compute-0 nova_compute[356901]: 2025-10-11 02:26:41.450 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "0cc56d17-ec3a-4408-bccb-91b29427379e" "released" by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" :: held 0.104s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:26:41 compute-0 nova_compute[356901]: 2025-10-11 02:26:41.452 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78" "released" by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" :: held 0.103s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:26:41 compute-0 nova_compute[356901]: 2025-10-11 02:26:41.468 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259" "released" by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" :: held 0.117s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:26:41 compute-0 nova_compute[356901]: 2025-10-11 02:26:41.469 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "358d31cf-2866-416a-b2fc-814ee4bfe89a" "released" by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" :: held 0.118s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:26:41 compute-0 sshd-session[433642]: Connection closed by invalid user admin 121.227.153.123 port 45058 [preauth]
Oct 11 02:26:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1462: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:26:43 compute-0 sshd-session[433682]: Invalid user admin from 121.227.153.123 port 57398
Oct 11 02:26:43 compute-0 ceph-mon[191930]: pgmap v1462: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:26:43 compute-0 sshd-session[433682]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:26:43 compute-0 sshd-session[433682]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:26:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1463: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 1.7 KiB/s rd, 0 op/s
Oct 11 02:26:44 compute-0 nova_compute[356901]: 2025-10-11 02:26:44.506 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:45 compute-0 ceph-mon[191930]: pgmap v1463: 321 pgs: 321 active+clean; 234 MiB data, 339 MiB used, 60 GiB / 60 GiB avail; 1.7 KiB/s rd, 0 op/s
Oct 11 02:26:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:26:45 compute-0 ovn_controller[88370]: 2025-10-11T02:26:45Z|00010|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:b0:ca:41 192.168.0.152
Oct 11 02:26:45 compute-0 ovn_controller[88370]: 2025-10-11T02:26:45Z|00011|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:b0:ca:41 192.168.0.152
Oct 11 02:26:45 compute-0 nova_compute[356901]: 2025-10-11 02:26:45.633 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:45 compute-0 sshd-session[433682]: Failed password for invalid user admin from 121.227.153.123 port 57398 ssh2
Oct 11 02:26:46 compute-0 sudo[433685]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:26:46 compute-0 sudo[433685]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:26:46 compute-0 sudo[433685]: pam_unix(sudo:session): session closed for user root
Oct 11 02:26:46 compute-0 sudo[433710]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:26:46 compute-0 sudo[433710]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:26:46 compute-0 sudo[433710]: pam_unix(sudo:session): session closed for user root
Oct 11 02:26:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1464: 321 pgs: 321 active+clean; 239 MiB data, 343 MiB used, 60 GiB / 60 GiB avail; 1.7 KiB/s rd, 316 KiB/s wr, 2 op/s
Oct 11 02:26:46 compute-0 sudo[433735]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:26:46 compute-0 sudo[433735]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:26:46 compute-0 sudo[433735]: pam_unix(sudo:session): session closed for user root
Oct 11 02:26:46 compute-0 sudo[433760]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:26:46 compute-0 sudo[433760]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:26:47 compute-0 sudo[433760]: pam_unix(sudo:session): session closed for user root
Oct 11 02:26:47 compute-0 sshd-session[433682]: Connection closed by invalid user admin 121.227.153.123 port 57398 [preauth]
Oct 11 02:26:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"} v 0) v1
Oct 11 02:26:47 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"}]: dispatch
Oct 11 02:26:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:26:47 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:26:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:26:47 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:26:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:26:47 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:26:47 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 04811559-62ac-43bc-9e77-e018465ce990 does not exist
Oct 11 02:26:47 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev d449eb32-d6ec-4538-8b6c-0c040ae77621 does not exist
Oct 11 02:26:47 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 1c1b3706-c478-4ed0-bcb1-f42dd327b52e does not exist
Oct 11 02:26:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:26:47 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:26:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:26:47 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:26:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:26:47 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:26:47 compute-0 sudo[433816]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:26:47 compute-0 sudo[433816]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:26:47 compute-0 sudo[433816]: pam_unix(sudo:session): session closed for user root
Oct 11 02:26:47 compute-0 sudo[433841]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:26:47 compute-0 sudo[433841]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:26:47 compute-0 sudo[433841]: pam_unix(sudo:session): session closed for user root
Oct 11 02:26:47 compute-0 ceph-mon[191930]: pgmap v1464: 321 pgs: 321 active+clean; 239 MiB data, 343 MiB used, 60 GiB / 60 GiB avail; 1.7 KiB/s rd, 316 KiB/s wr, 2 op/s
Oct 11 02:26:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"}]: dispatch
Oct 11 02:26:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:26:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:26:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:26:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:26:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:26:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:26:47 compute-0 sudo[433868]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:26:47 compute-0 sudo[433868]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:26:47 compute-0 sudo[433868]: pam_unix(sudo:session): session closed for user root
Oct 11 02:26:47 compute-0 sudo[433893]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:26:47 compute-0 sudo[433893]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:26:47 compute-0 podman[433958]: 2025-10-11 02:26:47.967607176 +0000 UTC m=+0.115317404 container create b817a37d689714628d014e7945d98bce1827b974aee3ad04bc0a6b193055a977 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_vaughan, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:26:47 compute-0 podman[433958]: 2025-10-11 02:26:47.886807584 +0000 UTC m=+0.034517832 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:26:48 compute-0 systemd[1]: Started libpod-conmon-b817a37d689714628d014e7945d98bce1827b974aee3ad04bc0a6b193055a977.scope.
Oct 11 02:26:48 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:26:48 compute-0 podman[433958]: 2025-10-11 02:26:48.114055137 +0000 UTC m=+0.261765385 container init b817a37d689714628d014e7945d98bce1827b974aee3ad04bc0a6b193055a977 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_vaughan, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:26:48 compute-0 podman[433958]: 2025-10-11 02:26:48.125527193 +0000 UTC m=+0.273237421 container start b817a37d689714628d014e7945d98bce1827b974aee3ad04bc0a6b193055a977 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_vaughan, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS)
Oct 11 02:26:48 compute-0 pensive_vaughan[433974]: 167 167
Oct 11 02:26:48 compute-0 systemd[1]: libpod-b817a37d689714628d014e7945d98bce1827b974aee3ad04bc0a6b193055a977.scope: Deactivated successfully.
Oct 11 02:26:48 compute-0 podman[433958]: 2025-10-11 02:26:48.148340305 +0000 UTC m=+0.296050624 container attach b817a37d689714628d014e7945d98bce1827b974aee3ad04bc0a6b193055a977 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_vaughan, org.label-schema.vendor=CentOS, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:26:48 compute-0 podman[433958]: 2025-10-11 02:26:48.149592409 +0000 UTC m=+0.297302667 container died b817a37d689714628d014e7945d98bce1827b974aee3ad04bc0a6b193055a977 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_vaughan, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2)
Oct 11 02:26:48 compute-0 systemd[1]: var-lib-containers-storage-overlay-fe6e41f5a73b6a7bbcc3cb98d56bbe09e2d66e596e9f149e53bf8f977c75629f-merged.mount: Deactivated successfully.
Oct 11 02:26:48 compute-0 podman[433958]: 2025-10-11 02:26:48.211279953 +0000 UTC m=+0.358990181 container remove b817a37d689714628d014e7945d98bce1827b974aee3ad04bc0a6b193055a977 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_vaughan, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=reef, OSD_FLAVOR=default, ceph=True)
Oct 11 02:26:48 compute-0 sshd-session[433853]: Invalid user admin from 121.227.153.123 port 57406
Oct 11 02:26:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1465: 321 pgs: 321 active+clean; 243 MiB data, 347 MiB used, 60 GiB / 60 GiB avail; 95 KiB/s rd, 651 KiB/s wr, 25 op/s
Oct 11 02:26:48 compute-0 systemd[1]: libpod-conmon-b817a37d689714628d014e7945d98bce1827b974aee3ad04bc0a6b193055a977.scope: Deactivated successfully.
Oct 11 02:26:48 compute-0 sshd-session[433853]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:26:48 compute-0 sshd-session[433853]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:26:48 compute-0 podman[433998]: 2025-10-11 02:26:48.456570907 +0000 UTC m=+0.081462841 container create 6473310499dccb21e423a0978c0260a6dcabbd965f0453347d41e142edb6d943 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_poincare, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, ceph=True, OSD_FLAVOR=default, org.label-schema.vendor=CentOS)
Oct 11 02:26:48 compute-0 podman[433998]: 2025-10-11 02:26:48.416965105 +0000 UTC m=+0.041857129 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:26:48 compute-0 systemd[1]: Started libpod-conmon-6473310499dccb21e423a0978c0260a6dcabbd965f0453347d41e142edb6d943.scope.
Oct 11 02:26:48 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:26:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/373a9f82afc11d3e80da4ae305ab6c18c2b5a5b5c9c4294fb25cad6ea55d14ca/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:26:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/373a9f82afc11d3e80da4ae305ab6c18c2b5a5b5c9c4294fb25cad6ea55d14ca/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:26:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/373a9f82afc11d3e80da4ae305ab6c18c2b5a5b5c9c4294fb25cad6ea55d14ca/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:26:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/373a9f82afc11d3e80da4ae305ab6c18c2b5a5b5c9c4294fb25cad6ea55d14ca/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:26:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/373a9f82afc11d3e80da4ae305ab6c18c2b5a5b5c9c4294fb25cad6ea55d14ca/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:26:48 compute-0 podman[433998]: 2025-10-11 02:26:48.588802328 +0000 UTC m=+0.213694312 container init 6473310499dccb21e423a0978c0260a6dcabbd965f0453347d41e142edb6d943 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_poincare, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:26:48 compute-0 podman[433998]: 2025-10-11 02:26:48.604603395 +0000 UTC m=+0.229495329 container start 6473310499dccb21e423a0978c0260a6dcabbd965f0453347d41e142edb6d943 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_poincare, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.build-date=20250507)
Oct 11 02:26:48 compute-0 podman[433998]: 2025-10-11 02:26:48.611567285 +0000 UTC m=+0.236459229 container attach 6473310499dccb21e423a0978c0260a6dcabbd965f0453347d41e142edb6d943 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_poincare, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3)
Oct 11 02:26:48 compute-0 nova_compute[356901]: 2025-10-11 02:26:48.944 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:26:49 compute-0 ceph-mon[191930]: pgmap v1465: 321 pgs: 321 active+clean; 243 MiB data, 347 MiB used, 60 GiB / 60 GiB avail; 95 KiB/s rd, 651 KiB/s wr, 25 op/s
Oct 11 02:26:49 compute-0 nova_compute[356901]: 2025-10-11 02:26:49.509 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:49 compute-0 priceless_poincare[434014]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:26:49 compute-0 priceless_poincare[434014]: --> relative data size: 1.0
Oct 11 02:26:49 compute-0 priceless_poincare[434014]: --> All data devices are unavailable
Oct 11 02:26:49 compute-0 systemd[1]: libpod-6473310499dccb21e423a0978c0260a6dcabbd965f0453347d41e142edb6d943.scope: Deactivated successfully.
Oct 11 02:26:49 compute-0 systemd[1]: libpod-6473310499dccb21e423a0978c0260a6dcabbd965f0453347d41e142edb6d943.scope: Consumed 1.164s CPU time.
Oct 11 02:26:49 compute-0 podman[433998]: 2025-10-11 02:26:49.879699072 +0000 UTC m=+1.504591016 container died 6473310499dccb21e423a0978c0260a6dcabbd965f0453347d41e142edb6d943 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_poincare, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS)
Oct 11 02:26:49 compute-0 systemd[1]: var-lib-containers-storage-overlay-373a9f82afc11d3e80da4ae305ab6c18c2b5a5b5c9c4294fb25cad6ea55d14ca-merged.mount: Deactivated successfully.
Oct 11 02:26:49 compute-0 podman[433998]: 2025-10-11 02:26:49.954985321 +0000 UTC m=+1.579877255 container remove 6473310499dccb21e423a0978c0260a6dcabbd965f0453347d41e142edb6d943 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=priceless_poincare, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0)
Oct 11 02:26:49 compute-0 systemd[1]: libpod-conmon-6473310499dccb21e423a0978c0260a6dcabbd965f0453347d41e142edb6d943.scope: Deactivated successfully.
Oct 11 02:26:49 compute-0 sudo[433893]: pam_unix(sudo:session): session closed for user root
Oct 11 02:26:50 compute-0 sudo[434056]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:26:50 compute-0 sudo[434056]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:26:50 compute-0 sudo[434056]: pam_unix(sudo:session): session closed for user root
Oct 11 02:26:50 compute-0 sudo[434081]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:26:50 compute-0 sudo[434081]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:26:50 compute-0 sudo[434081]: pam_unix(sudo:session): session closed for user root
Oct 11 02:26:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1466: 321 pgs: 321 active+clean; 261 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 166 KiB/s rd, 1.5 MiB/s wr, 58 op/s
Oct 11 02:26:50 compute-0 sudo[434106]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:26:50 compute-0 sudo[434106]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:26:50 compute-0 sudo[434106]: pam_unix(sudo:session): session closed for user root
Oct 11 02:26:50 compute-0 sudo[434131]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:26:50 compute-0 sudo[434131]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:26:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:26:50 compute-0 nova_compute[356901]: 2025-10-11 02:26:50.640 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:50 compute-0 podman[434193]: 2025-10-11 02:26:50.858329546 +0000 UTC m=+0.080548064 container create f73d1f1ad371673a204fb2e06557eead4cec1df47cfb55e18b894d6378ada669 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_ritchie, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 02:26:50 compute-0 nova_compute[356901]: 2025-10-11 02:26:50.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:26:50 compute-0 systemd[1]: Started libpod-conmon-f73d1f1ad371673a204fb2e06557eead4cec1df47cfb55e18b894d6378ada669.scope.
Oct 11 02:26:50 compute-0 podman[434193]: 2025-10-11 02:26:50.826418214 +0000 UTC m=+0.048636802 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:26:50 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:26:50 compute-0 podman[434193]: 2025-10-11 02:26:50.976736101 +0000 UTC m=+0.198954639 container init f73d1f1ad371673a204fb2e06557eead4cec1df47cfb55e18b894d6378ada669 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_ritchie, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:26:50 compute-0 podman[434193]: 2025-10-11 02:26:50.989057795 +0000 UTC m=+0.211276333 container start f73d1f1ad371673a204fb2e06557eead4cec1df47cfb55e18b894d6378ada669 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_ritchie, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:26:50 compute-0 podman[434193]: 2025-10-11 02:26:50.99632494 +0000 UTC m=+0.218543468 container attach f73d1f1ad371673a204fb2e06557eead4cec1df47cfb55e18b894d6378ada669 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_ritchie, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, OSD_FLAVOR=default)
Oct 11 02:26:51 compute-0 strange_ritchie[434209]: 167 167
Oct 11 02:26:51 compute-0 systemd[1]: libpod-f73d1f1ad371673a204fb2e06557eead4cec1df47cfb55e18b894d6378ada669.scope: Deactivated successfully.
Oct 11 02:26:51 compute-0 podman[434193]: 2025-10-11 02:26:51.003953038 +0000 UTC m=+0.226171556 container died f73d1f1ad371673a204fb2e06557eead4cec1df47cfb55e18b894d6378ada669 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_ritchie, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef)
Oct 11 02:26:51 compute-0 sshd-session[433853]: Failed password for invalid user admin from 121.227.153.123 port 57406 ssh2
Oct 11 02:26:51 compute-0 systemd[1]: var-lib-containers-storage-overlay-e24f7cd93601fd72cf661e0b886c8b68e2f8d6a6154d9e2ea83d1b06f551d8f0-merged.mount: Deactivated successfully.
Oct 11 02:26:51 compute-0 podman[434193]: 2025-10-11 02:26:51.068727392 +0000 UTC m=+0.290945910 container remove f73d1f1ad371673a204fb2e06557eead4cec1df47cfb55e18b894d6378ada669 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_ritchie, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:26:51 compute-0 systemd[1]: libpod-conmon-f73d1f1ad371673a204fb2e06557eead4cec1df47cfb55e18b894d6378ada669.scope: Deactivated successfully.
Oct 11 02:26:51 compute-0 podman[434233]: 2025-10-11 02:26:51.368266441 +0000 UTC m=+0.079246905 container create 7b20cdc47a221a3fb63e290340f4b89fbff65cdf4fb113f6972dfc18a108b17d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_euclid, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:26:51 compute-0 ceph-mon[191930]: pgmap v1466: 321 pgs: 321 active+clean; 261 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 166 KiB/s rd, 1.5 MiB/s wr, 58 op/s
Oct 11 02:26:51 compute-0 systemd[1]: Started libpod-conmon-7b20cdc47a221a3fb63e290340f4b89fbff65cdf4fb113f6972dfc18a108b17d.scope.
Oct 11 02:26:51 compute-0 podman[434233]: 2025-10-11 02:26:51.343466605 +0000 UTC m=+0.054447099 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:26:51 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:26:51 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2a4de38541240fea3288cb3ee3b38af368d5d418875203ecf2152a1f9e818f8d/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:26:51 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2a4de38541240fea3288cb3ee3b38af368d5d418875203ecf2152a1f9e818f8d/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:26:51 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2a4de38541240fea3288cb3ee3b38af368d5d418875203ecf2152a1f9e818f8d/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:26:51 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/2a4de38541240fea3288cb3ee3b38af368d5d418875203ecf2152a1f9e818f8d/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:26:51 compute-0 podman[434233]: 2025-10-11 02:26:51.506656739 +0000 UTC m=+0.217637203 container init 7b20cdc47a221a3fb63e290340f4b89fbff65cdf4fb113f6972dfc18a108b17d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_euclid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_REF=reef)
Oct 11 02:26:51 compute-0 podman[434250]: 2025-10-11 02:26:51.538985002 +0000 UTC m=+0.112076666 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:26:51 compute-0 podman[434233]: 2025-10-11 02:26:51.544307806 +0000 UTC m=+0.255288270 container start 7b20cdc47a221a3fb63e290340f4b89fbff65cdf4fb113f6972dfc18a108b17d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_euclid, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, OSD_FLAVOR=default)
Oct 11 02:26:51 compute-0 podman[434233]: 2025-10-11 02:26:51.548726415 +0000 UTC m=+0.259706879 container attach 7b20cdc47a221a3fb63e290340f4b89fbff65cdf4fb113f6972dfc18a108b17d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_euclid, ceph=True, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2)
Oct 11 02:26:51 compute-0 podman[434246]: 2025-10-11 02:26:51.551429004 +0000 UTC m=+0.122359428 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, managed_by=edpm_ansible)
Oct 11 02:26:51 compute-0 podman[434249]: 2025-10-11 02:26:51.588259093 +0000 UTC m=+0.158871677 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, version=9.6, build-date=2025-08-20T13:12:41, io.openshift.expose-services=, maintainer=Red Hat, Inc., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.buildah.version=1.33.7, url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.component=ubi9-minimal-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, container_name=openstack_network_exporter, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1755695350, distribution-scope=public, architecture=x86_64, io.openshift.tags=minimal rhel9, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, managed_by=edpm_ansible, name=ubi9-minimal, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal)
Oct 11 02:26:51 compute-0 sshd-session[433853]: Connection closed by invalid user admin 121.227.153.123 port 57406 [preauth]
Oct 11 02:26:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1467: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 166 KiB/s rd, 1.5 MiB/s wr, 58 op/s
Oct 11 02:26:52 compute-0 boring_euclid[434268]: {
Oct 11 02:26:52 compute-0 boring_euclid[434268]:     "0": [
Oct 11 02:26:52 compute-0 boring_euclid[434268]:         {
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "devices": [
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "/dev/loop3"
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             ],
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "lv_name": "ceph_lv0",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "lv_size": "21470642176",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "name": "ceph_lv0",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "tags": {
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.cluster_name": "ceph",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.crush_device_class": "",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.encrypted": "0",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.osd_id": "0",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.type": "block",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.vdo": "0"
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             },
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "type": "block",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "vg_name": "ceph_vg0"
Oct 11 02:26:52 compute-0 boring_euclid[434268]:         }
Oct 11 02:26:52 compute-0 boring_euclid[434268]:     ],
Oct 11 02:26:52 compute-0 boring_euclid[434268]:     "1": [
Oct 11 02:26:52 compute-0 boring_euclid[434268]:         {
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "devices": [
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "/dev/loop4"
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             ],
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "lv_name": "ceph_lv1",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "lv_size": "21470642176",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "name": "ceph_lv1",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "tags": {
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.cluster_name": "ceph",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.crush_device_class": "",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.encrypted": "0",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.osd_id": "1",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.type": "block",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.vdo": "0"
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             },
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "type": "block",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "vg_name": "ceph_vg1"
Oct 11 02:26:52 compute-0 boring_euclid[434268]:         }
Oct 11 02:26:52 compute-0 boring_euclid[434268]:     ],
Oct 11 02:26:52 compute-0 boring_euclid[434268]:     "2": [
Oct 11 02:26:52 compute-0 boring_euclid[434268]:         {
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "devices": [
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "/dev/loop5"
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             ],
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "lv_name": "ceph_lv2",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "lv_size": "21470642176",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "name": "ceph_lv2",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "tags": {
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.cluster_name": "ceph",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.crush_device_class": "",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.encrypted": "0",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.osd_id": "2",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.type": "block",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:                 "ceph.vdo": "0"
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             },
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "type": "block",
Oct 11 02:26:52 compute-0 boring_euclid[434268]:             "vg_name": "ceph_vg2"
Oct 11 02:26:52 compute-0 boring_euclid[434268]:         }
Oct 11 02:26:52 compute-0 boring_euclid[434268]:     ]
Oct 11 02:26:52 compute-0 boring_euclid[434268]: }
Oct 11 02:26:52 compute-0 systemd[1]: libpod-7b20cdc47a221a3fb63e290340f4b89fbff65cdf4fb113f6972dfc18a108b17d.scope: Deactivated successfully.
Oct 11 02:26:52 compute-0 podman[434233]: 2025-10-11 02:26:52.376296157 +0000 UTC m=+1.087276621 container died 7b20cdc47a221a3fb63e290340f4b89fbff65cdf4fb113f6972dfc18a108b17d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_euclid, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 02:26:52 compute-0 systemd[1]: var-lib-containers-storage-overlay-2a4de38541240fea3288cb3ee3b38af368d5d418875203ecf2152a1f9e818f8d-merged.mount: Deactivated successfully.
Oct 11 02:26:52 compute-0 podman[434233]: 2025-10-11 02:26:52.464792298 +0000 UTC m=+1.175772772 container remove 7b20cdc47a221a3fb63e290340f4b89fbff65cdf4fb113f6972dfc18a108b17d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_euclid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:26:52 compute-0 systemd[1]: libpod-conmon-7b20cdc47a221a3fb63e290340f4b89fbff65cdf4fb113f6972dfc18a108b17d.scope: Deactivated successfully.
Oct 11 02:26:52 compute-0 sudo[434131]: pam_unix(sudo:session): session closed for user root
Oct 11 02:26:52 compute-0 sudo[434332]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:26:52 compute-0 sudo[434332]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:26:52 compute-0 sudo[434332]: pam_unix(sudo:session): session closed for user root
Oct 11 02:26:52 compute-0 sudo[434357]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:26:52 compute-0 sudo[434357]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:26:52 compute-0 sudo[434357]: pam_unix(sudo:session): session closed for user root
Oct 11 02:26:52 compute-0 sudo[434382]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:26:52 compute-0 sudo[434382]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:26:52 compute-0 sudo[434382]: pam_unix(sudo:session): session closed for user root
Oct 11 02:26:52 compute-0 sudo[434407]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:26:52 compute-0 sudo[434407]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:26:53 compute-0 ceph-mon[191930]: pgmap v1467: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 166 KiB/s rd, 1.5 MiB/s wr, 58 op/s
Oct 11 02:26:53 compute-0 podman[434470]: 2025-10-11 02:26:53.582050024 +0000 UTC m=+0.087309419 container create 691d44b406990b3fc4cd663dd08669d4159598a52c82e7a0911fa1157586eaf9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hardcore_morse, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:26:53 compute-0 podman[434470]: 2025-10-11 02:26:53.550194309 +0000 UTC m=+0.055453724 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:26:53 compute-0 systemd[1]: Started libpod-conmon-691d44b406990b3fc4cd663dd08669d4159598a52c82e7a0911fa1157586eaf9.scope.
Oct 11 02:26:53 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:26:53 compute-0 podman[434470]: 2025-10-11 02:26:53.750298558 +0000 UTC m=+0.255557933 container init 691d44b406990b3fc4cd663dd08669d4159598a52c82e7a0911fa1157586eaf9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hardcore_morse, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True)
Oct 11 02:26:53 compute-0 podman[434470]: 2025-10-11 02:26:53.770949011 +0000 UTC m=+0.276208406 container start 691d44b406990b3fc4cd663dd08669d4159598a52c82e7a0911fa1157586eaf9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hardcore_morse, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507)
Oct 11 02:26:53 compute-0 podman[434470]: 2025-10-11 02:26:53.777648617 +0000 UTC m=+0.282907992 container attach 691d44b406990b3fc4cd663dd08669d4159598a52c82e7a0911fa1157586eaf9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hardcore_morse, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 02:26:53 compute-0 hardcore_morse[434486]: 167 167
Oct 11 02:26:53 compute-0 systemd[1]: libpod-691d44b406990b3fc4cd663dd08669d4159598a52c82e7a0911fa1157586eaf9.scope: Deactivated successfully.
Oct 11 02:26:53 compute-0 conmon[434486]: conmon 691d44b406990b3fc4cd <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-691d44b406990b3fc4cd663dd08669d4159598a52c82e7a0911fa1157586eaf9.scope/container/memory.events
Oct 11 02:26:53 compute-0 podman[434470]: 2025-10-11 02:26:53.783467746 +0000 UTC m=+0.288727141 container died 691d44b406990b3fc4cd663dd08669d4159598a52c82e7a0911fa1157586eaf9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hardcore_morse, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507)
Oct 11 02:26:53 compute-0 nova_compute[356901]: 2025-10-11 02:26:53.891 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:26:53 compute-0 systemd[1]: var-lib-containers-storage-overlay-dd207f0cb2f0b88bcbfc701191f5906455a9229f7e45d5764b20876e72d776e5-merged.mount: Deactivated successfully.
Oct 11 02:26:53 compute-0 sshd-session[434316]: Invalid user admin from 121.227.153.123 port 42664
Oct 11 02:26:53 compute-0 podman[434470]: 2025-10-11 02:26:53.936679323 +0000 UTC m=+0.441938678 container remove 691d44b406990b3fc4cd663dd08669d4159598a52c82e7a0911fa1157586eaf9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hardcore_morse, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3)
Oct 11 02:26:53 compute-0 systemd[1]: libpod-conmon-691d44b406990b3fc4cd663dd08669d4159598a52c82e7a0911fa1157586eaf9.scope: Deactivated successfully.
Oct 11 02:26:54 compute-0 sshd-session[434316]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:26:54 compute-0 sshd-session[434316]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:26:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1468: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 166 KiB/s rd, 1.5 MiB/s wr, 58 op/s
Oct 11 02:26:54 compute-0 podman[434510]: 2025-10-11 02:26:54.233607842 +0000 UTC m=+0.089218630 container create bef4d1f212e80357866d379c9804289d9a8f925f2052fb596513bd571894192b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_matsumoto, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:26:54 compute-0 podman[434510]: 2025-10-11 02:26:54.200746231 +0000 UTC m=+0.056357109 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:26:54 compute-0 systemd[1]: Started libpod-conmon-bef4d1f212e80357866d379c9804289d9a8f925f2052fb596513bd571894192b.scope.
Oct 11 02:26:54 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:26:54 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/97d35adc88f2a19e551df8521d84bc11f9b2f9e7697ff72b50fe512ee3de6d23/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:26:54 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/97d35adc88f2a19e551df8521d84bc11f9b2f9e7697ff72b50fe512ee3de6d23/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:26:54 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/97d35adc88f2a19e551df8521d84bc11f9b2f9e7697ff72b50fe512ee3de6d23/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:26:54 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/97d35adc88f2a19e551df8521d84bc11f9b2f9e7697ff72b50fe512ee3de6d23/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:26:54 compute-0 podman[434510]: 2025-10-11 02:26:54.376046452 +0000 UTC m=+0.231657270 container init bef4d1f212e80357866d379c9804289d9a8f925f2052fb596513bd571894192b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_matsumoto, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, io.buildah.version=1.39.3)
Oct 11 02:26:54 compute-0 podman[434510]: 2025-10-11 02:26:54.397768601 +0000 UTC m=+0.253379389 container start bef4d1f212e80357866d379c9804289d9a8f925f2052fb596513bd571894192b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_matsumoto, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, io.buildah.version=1.39.3)
Oct 11 02:26:54 compute-0 podman[434510]: 2025-10-11 02:26:54.413729379 +0000 UTC m=+0.269340177 container attach bef4d1f212e80357866d379c9804289d9a8f925f2052fb596513bd571894192b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_matsumoto, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:26:54 compute-0 nova_compute[356901]: 2025-10-11 02:26:54.514 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:26:54.853 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:26:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:26:54.854 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:26:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:26:54.855 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:26:55 compute-0 ceph-mon[191930]: pgmap v1468: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 166 KiB/s rd, 1.5 MiB/s wr, 58 op/s
Oct 11 02:26:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]: {
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:         "osd_id": 1,
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:         "type": "bluestore"
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:     },
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:         "osd_id": 2,
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:         "type": "bluestore"
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:     },
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:         "osd_id": 0,
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:         "type": "bluestore"
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]:     }
Oct 11 02:26:55 compute-0 competent_matsumoto[434526]: }
Oct 11 02:26:55 compute-0 systemd[1]: libpod-bef4d1f212e80357866d379c9804289d9a8f925f2052fb596513bd571894192b.scope: Deactivated successfully.
Oct 11 02:26:55 compute-0 systemd[1]: libpod-bef4d1f212e80357866d379c9804289d9a8f925f2052fb596513bd571894192b.scope: Consumed 1.066s CPU time.
Oct 11 02:26:55 compute-0 podman[434559]: 2025-10-11 02:26:55.577729699 +0000 UTC m=+0.066896511 container died bef4d1f212e80357866d379c9804289d9a8f925f2052fb596513bd571894192b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_matsumoto, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:26:55 compute-0 systemd[1]: var-lib-containers-storage-overlay-97d35adc88f2a19e551df8521d84bc11f9b2f9e7697ff72b50fe512ee3de6d23-merged.mount: Deactivated successfully.
Oct 11 02:26:55 compute-0 nova_compute[356901]: 2025-10-11 02:26:55.646 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:55 compute-0 podman[434559]: 2025-10-11 02:26:55.651856973 +0000 UTC m=+0.141023725 container remove bef4d1f212e80357866d379c9804289d9a8f925f2052fb596513bd571894192b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_matsumoto, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:26:55 compute-0 systemd[1]: libpod-conmon-bef4d1f212e80357866d379c9804289d9a8f925f2052fb596513bd571894192b.scope: Deactivated successfully.
Oct 11 02:26:55 compute-0 sudo[434407]: pam_unix(sudo:session): session closed for user root
Oct 11 02:26:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:26:55 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:26:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:26:55 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:26:55 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 73ee64ce-f957-4661-8651-2fe5b6f29c95 does not exist
Oct 11 02:26:55 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev c75e517a-4fe9-4232-a698-d556a484ab09 does not exist
Oct 11 02:26:55 compute-0 sudo[434575]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:26:55 compute-0 sudo[434575]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:26:55 compute-0 sudo[434575]: pam_unix(sudo:session): session closed for user root
Oct 11 02:26:55 compute-0 sudo[434600]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:26:55 compute-0 sudo[434600]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:26:55 compute-0 sudo[434600]: pam_unix(sudo:session): session closed for user root
Oct 11 02:26:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1469: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 165 KiB/s rd, 1.5 MiB/s wr, 58 op/s
Oct 11 02:26:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:26:56
Oct 11 02:26:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:26:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:26:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['.rgw.root', 'default.rgw.control', 'default.rgw.meta', 'default.rgw.log', 'images', 'backups', 'cephfs.cephfs.meta', 'volumes', '.mgr', 'vms', 'cephfs.cephfs.data']
Oct 11 02:26:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:26:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:26:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:26:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:26:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:26:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:26:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:26:56 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:26:56 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:26:56 compute-0 ceph-mon[191930]: pgmap v1469: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 165 KiB/s rd, 1.5 MiB/s wr, 58 op/s
Oct 11 02:26:56 compute-0 sshd-session[434316]: Failed password for invalid user admin from 121.227.153.123 port 42664 ssh2
Oct 11 02:26:56 compute-0 nova_compute[356901]: 2025-10-11 02:26:56.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:26:56 compute-0 nova_compute[356901]: 2025-10-11 02:26:56.898 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:26:56 compute-0 nova_compute[356901]: 2025-10-11 02:26:56.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:26:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:26:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:26:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:26:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:26:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:26:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:26:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:26:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:26:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:26:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:26:57 compute-0 sshd-session[434316]: Connection closed by invalid user admin 121.227.153.123 port 42664 [preauth]
Oct 11 02:26:57 compute-0 nova_compute[356901]: 2025-10-11 02:26:57.899 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:26:57 compute-0 nova_compute[356901]: 2025-10-11 02:26:57.900 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:26:58 compute-0 podman[434627]: 2025-10-11 02:26:58.194151825 +0000 UTC m=+0.090680429 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.display-name=Red Hat Universal Base Image 9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, name=ubi9, maintainer=Red Hat, Inc., com.redhat.component=ubi9-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.29.0, architecture=x86_64, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, version=9.4, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, release=1214.1726694543, config_id=edpm, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.openshift.expose-services=, summary=Provides the latest release of Red Hat Universal Base Image 9., distribution-scope=public, release-0.7.12=, container_name=kepler, io.openshift.tags=base rhel9, managed_by=edpm_ansible, vendor=Red Hat, Inc., build-date=2024-09-18T21:23:30)
Oct 11 02:26:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1470: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 165 KiB/s rd, 1.2 MiB/s wr, 56 op/s
Oct 11 02:26:58 compute-0 nova_compute[356901]: 2025-10-11 02:26:58.638 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-7513b93e-d2b8-4ae0-8f1c-3df190945259" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:26:58 compute-0 nova_compute[356901]: 2025-10-11 02:26:58.639 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-7513b93e-d2b8-4ae0-8f1c-3df190945259" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:26:58 compute-0 nova_compute[356901]: 2025-10-11 02:26:58.639 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:26:58 compute-0 sshd-session[434625]: Invalid user admin from 121.227.153.123 port 42674
Oct 11 02:26:59 compute-0 sshd-session[434625]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:26:59 compute-0 sshd-session[434625]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:26:59 compute-0 ceph-mon[191930]: pgmap v1470: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 165 KiB/s rd, 1.2 MiB/s wr, 56 op/s
Oct 11 02:26:59 compute-0 nova_compute[356901]: 2025-10-11 02:26:59.514 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:26:59 compute-0 podman[157119]: time="2025-10-11T02:26:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:26:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:26:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:26:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:26:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9057 "" "Go-http-client/1.1"
Oct 11 02:27:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1471: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 71 KiB/s rd, 869 KiB/s wr, 32 op/s
Oct 11 02:27:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:27:00 compute-0 nova_compute[356901]: 2025-10-11 02:27:00.650 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:00 compute-0 nova_compute[356901]: 2025-10-11 02:27:00.912 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Updating instance_info_cache with network_info: [{"id": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "address": "fa:16:3e:16:ee:dc", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.225", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.204", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa942acb1-1e", "ovs_interfaceid": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:27:00 compute-0 nova_compute[356901]: 2025-10-11 02:27:00.940 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-7513b93e-d2b8-4ae0-8f1c-3df190945259" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:27:00 compute-0 nova_compute[356901]: 2025-10-11 02:27:00.942 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:27:00 compute-0 nova_compute[356901]: 2025-10-11 02:27:00.944 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:27:00 compute-0 nova_compute[356901]: 2025-10-11 02:27:00.945 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:27:00 compute-0 nova_compute[356901]: 2025-10-11 02:27:00.977 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:27:00 compute-0 nova_compute[356901]: 2025-10-11 02:27:00.978 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:27:00 compute-0 nova_compute[356901]: 2025-10-11 02:27:00.979 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:27:00 compute-0 nova_compute[356901]: 2025-10-11 02:27:00.980 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:27:00 compute-0 nova_compute[356901]: 2025-10-11 02:27:00.981 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:27:01 compute-0 ceph-mon[191930]: pgmap v1471: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 71 KiB/s rd, 869 KiB/s wr, 32 op/s
Oct 11 02:27:01 compute-0 openstack_network_exporter[374316]: ERROR   02:27:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:27:01 compute-0 openstack_network_exporter[374316]: ERROR   02:27:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:27:01 compute-0 openstack_network_exporter[374316]: ERROR   02:27:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:27:01 compute-0 openstack_network_exporter[374316]: ERROR   02:27:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:27:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:27:01 compute-0 openstack_network_exporter[374316]: ERROR   02:27:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:27:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:27:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:27:01 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/294397225' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:27:01 compute-0 sshd-session[434625]: Failed password for invalid user admin from 121.227.153.123 port 42674 ssh2
Oct 11 02:27:01 compute-0 nova_compute[356901]: 2025-10-11 02:27:01.521 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.541s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:27:01 compute-0 nova_compute[356901]: 2025-10-11 02:27:01.849 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000004 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:27:01 compute-0 nova_compute[356901]: 2025-10-11 02:27:01.851 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000004 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:27:01 compute-0 nova_compute[356901]: 2025-10-11 02:27:01.852 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000004 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:27:01 compute-0 nova_compute[356901]: 2025-10-11 02:27:01.861 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:27:01 compute-0 nova_compute[356901]: 2025-10-11 02:27:01.861 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:27:01 compute-0 nova_compute[356901]: 2025-10-11 02:27:01.862 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:27:01 compute-0 nova_compute[356901]: 2025-10-11 02:27:01.869 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:27:01 compute-0 nova_compute[356901]: 2025-10-11 02:27:01.869 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:27:01 compute-0 nova_compute[356901]: 2025-10-11 02:27:01.870 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:27:01 compute-0 nova_compute[356901]: 2025-10-11 02:27:01.878 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:27:01 compute-0 nova_compute[356901]: 2025-10-11 02:27:01.878 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:27:01 compute-0 nova_compute[356901]: 2025-10-11 02:27:01.879 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:27:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1472: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 11 KiB/s wr, 0 op/s
Oct 11 02:27:02 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/294397225' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:27:02 compute-0 nova_compute[356901]: 2025-10-11 02:27:02.429 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:27:02 compute-0 nova_compute[356901]: 2025-10-11 02:27:02.431 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3208MB free_disk=59.855655670166016GB free_vcpus=4 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:27:02 compute-0 nova_compute[356901]: 2025-10-11 02:27:02.432 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:27:02 compute-0 nova_compute[356901]: 2025-10-11 02:27:02.432 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:27:02 compute-0 nova_compute[356901]: 2025-10-11 02:27:02.541 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:27:02 compute-0 nova_compute[356901]: 2025-10-11 02:27:02.542 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance d60d7ea1-5d00-4902-90e6-3ae67eb09a78 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:27:02 compute-0 nova_compute[356901]: 2025-10-11 02:27:02.543 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 7513b93e-d2b8-4ae0-8f1c-3df190945259 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:27:02 compute-0 nova_compute[356901]: 2025-10-11 02:27:02.543 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 358d31cf-2866-416a-b2fc-814ee4bfe89a actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:27:02 compute-0 nova_compute[356901]: 2025-10-11 02:27:02.544 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 4 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:27:02 compute-0 nova_compute[356901]: 2025-10-11 02:27:02.544 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=2560MB phys_disk=59GB used_disk=8GB total_vcpus=8 used_vcpus=4 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:27:02 compute-0 sshd-session[434625]: Connection closed by invalid user admin 121.227.153.123 port 42674 [preauth]
Oct 11 02:27:02 compute-0 nova_compute[356901]: 2025-10-11 02:27:02.645 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:27:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:27:03 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2063623630' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:27:03 compute-0 nova_compute[356901]: 2025-10-11 02:27:03.179 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.534s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:27:03 compute-0 nova_compute[356901]: 2025-10-11 02:27:03.192 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:27:03 compute-0 nova_compute[356901]: 2025-10-11 02:27:03.212 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:27:03 compute-0 nova_compute[356901]: 2025-10-11 02:27:03.231 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:27:03 compute-0 nova_compute[356901]: 2025-10-11 02:27:03.231 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.799s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:27:03 compute-0 ceph-mon[191930]: pgmap v1472: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 11 KiB/s wr, 0 op/s
Oct 11 02:27:03 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2063623630' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:27:04 compute-0 sshd-session[434689]: Invalid user admin from 121.227.153.123 port 59846
Oct 11 02:27:04 compute-0 nova_compute[356901]: 2025-10-11 02:27:04.186 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:27:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1473: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 8.7 KiB/s wr, 0 op/s
Oct 11 02:27:04 compute-0 sshd-session[434689]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:27:04 compute-0 sshd-session[434689]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:27:04 compute-0 nova_compute[356901]: 2025-10-11 02:27:04.518 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:05 compute-0 ceph-mon[191930]: pgmap v1473: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 8.7 KiB/s wr, 0 op/s
Oct 11 02:27:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:27:05 compute-0 nova_compute[356901]: 2025-10-11 02:27:05.656 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:06 compute-0 podman[434694]: 2025-10-11 02:27:06.214861911 +0000 UTC m=+0.101737197 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1474: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:06 compute-0 podman[434697]: 2025-10-11 02:27:06.226389576 +0000 UTC m=+0.092579681 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, org.label-schema.license=GPLv2, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:27:06 compute-0 podman[434696]: 2025-10-11 02:27:06.245051477 +0000 UTC m=+0.128337455 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, tcib_managed=true, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']})
Oct 11 02:27:06 compute-0 podman[434695]: 2025-10-11 02:27:06.272730172 +0000 UTC m=+0.153208460 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, io.buildah.version=1.41.3, managed_by=edpm_ansible)
Oct 11 02:27:06 compute-0 ceph-mon[191930]: pgmap v1474: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:06 compute-0 sshd-session[434689]: Failed password for invalid user admin from 121.227.153.123 port 59846 ssh2
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.00221085813879664 of space, bias 1.0, pg target 0.663257441638992 quantized to 32 (current 32)
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00025334537995702286 of space, bias 1.0, pg target 0.07600361398710685 quantized to 32 (current 32)
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:27:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:27:07 compute-0 sshd-session[434689]: Connection closed by invalid user admin 121.227.153.123 port 59846 [preauth]
Oct 11 02:27:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1475: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:09 compute-0 sshd-session[434775]: Invalid user admin from 121.227.153.123 port 59854
Oct 11 02:27:09 compute-0 sshd-session[434775]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:27:09 compute-0 sshd-session[434775]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:27:09 compute-0 ceph-mon[191930]: pgmap v1475: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:09 compute-0 nova_compute[356901]: 2025-10-11 02:27:09.520 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1476: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:27:10 compute-0 ceph-mon[191930]: pgmap v1476: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:10 compute-0 nova_compute[356901]: 2025-10-11 02:27:10.659 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:11 compute-0 sshd-session[434775]: Failed password for invalid user admin from 121.227.153.123 port 59854 ssh2
Oct 11 02:27:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1477: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:12 compute-0 podman[434778]: 2025-10-11 02:27:12.229939727 +0000 UTC m=+0.116327068 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, container_name=iscsid, config_id=iscsid, io.buildah.version=1.41.3, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.license=GPLv2)
Oct 11 02:27:12 compute-0 podman[434777]: 2025-10-11 02:27:12.23977246 +0000 UTC m=+0.131760279 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:27:12 compute-0 sshd-session[434775]: Connection closed by invalid user admin 121.227.153.123 port 59854 [preauth]
Oct 11 02:27:13 compute-0 ceph-mon[191930]: pgmap v1477: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.863 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.864 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.864 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.865 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.866 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.876 14 DEBUG ceilometer.compute.discovery [-] Querying metadata for instance 358d31cf-2866-416a-b2fc-814ee4bfe89a from Nova API get_server /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:176
Oct 11 02:27:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:13.880 14 DEBUG novaclient.v2.client [-] REQ: curl -g -i -X GET https://nova-internal.openstack.svc:8774/v2.1/servers/358d31cf-2866-416a-b2fc-814ee4bfe89a -H "Accept: application/json" -H "User-Agent: python-novaclient" -H "X-Auth-Token: {SHA256}d674387017edb5d8543811c363b3a2965950a94ddf4462840fede0e79ac258e9" -H "X-OpenStack-Nova-API-Version: 2.1" _http_log_request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:572
Oct 11 02:27:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1478: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:14 compute-0 sshd-session[434818]: Invalid user admin from 121.227.153.123 port 38286
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.288 14 DEBUG novaclient.v2.client [-] RESP: [200] Connection: Keep-Alive Content-Length: 1960 Content-Type: application/json Date: Sat, 11 Oct 2025 02:27:13 GMT Keep-Alive: timeout=5, max=100 OpenStack-API-Version: compute 2.1 Server: Apache Vary: OpenStack-API-Version,X-OpenStack-Nova-API-Version X-OpenStack-Nova-API-Version: 2.1 x-compute-request-id: req-a92c8ba8-77c2-416e-8433-8f571c094ee4 x-openstack-request-id: req-a92c8ba8-77c2-416e-8433-8f571c094ee4 _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:613
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.289 14 DEBUG novaclient.v2.client [-] RESP BODY: {"server": {"id": "358d31cf-2866-416a-b2fc-814ee4bfe89a", "name": "vn-vgckve2-tqko7trrsvwg-ebwakep2a2y3-vnf-ihxi227vdpwh", "status": "ACTIVE", "tenant_id": "97026531b3404a11869cb85a059c4a0d", "user_id": "d215f3ebbc07435493ccd666fc80109d", "metadata": {"metering.server_group": "3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e"}, "hostId": "2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736", "image": {"id": "a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/images/a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7"}]}, "flavor": {"id": "486e1451-345c-45d6-b075-f4717e759025", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/flavors/486e1451-345c-45d6-b075-f4717e759025"}]}, "created": "2025-10-11T02:25:57Z", "updated": "2025-10-11T02:26:09Z", "addresses": {"private": [{"version": 4, "addr": "192.168.0.152", "OS-EXT-IPS:type": "fixed", "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:b0:ca:41"}, {"version": 4, "addr": "192.168.122.173", "OS-EXT-IPS:type": "floating", "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:b0:ca:41"}]}, "accessIPv4": "", "accessIPv6": "", "links": [{"rel": "self", "href": "https://nova-internal.openstack.svc:8774/v2.1/servers/358d31cf-2866-416a-b2fc-814ee4bfe89a"}, {"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/servers/358d31cf-2866-416a-b2fc-814ee4bfe89a"}], "OS-DCF:diskConfig": "MANUAL", "progress": 0, "OS-EXT-AZ:availability_zone": "nova", "config_drive": "True", "key_name": null, "OS-SRV-USG:launched_at": "2025-10-11T02:26:09.000000", "OS-SRV-USG:terminated_at": null, "security_groups": [{"name": "basic"}], "OS-EXT-SRV-ATTR:host": "compute-0.ctlplane.example.com", "OS-EXT-SRV-ATTR:instance_name": "instance-00000004", "OS-EXT-SRV-ATTR:hypervisor_hostname": "compute-0.ctlplane.example.com", "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-EXT-STS:power_state": 1, "os-extended-volumes:volumes_attached": []}} _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:648
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.289 14 DEBUG novaclient.v2.client [-] GET call to compute for https://nova-internal.openstack.svc:8774/v2.1/servers/358d31cf-2866-416a-b2fc-814ee4bfe89a used request id req-a92c8ba8-77c2-416e-8433-8f571c094ee4 request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:1073
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.291 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '358d31cf-2866-416a-b2fc-814ee4bfe89a', 'name': 'vn-vgckve2-tqko7trrsvwg-ebwakep2a2y3-vnf-ihxi227vdpwh', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000004', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {'metering.server_group': '3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.297 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': 'd60d7ea1-5d00-4902-90e6-3ae67eb09a78', 'name': 'vn-vgckve2-ittzoa6m3dmq-egfg3ceao3k4-vnf-rvnztbwt2zgh', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000002', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {'metering.server_group': '3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.302 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '7513b93e-d2b8-4ae0-8f1c-3df190945259', 'name': 'vn-vgckve2-djjfpphdsuuh-gthznuj2xct2-vnf-jmvtgw3mflyn', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000003', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {'metering.server_group': '3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.305 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.305 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.305 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.305 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.306 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.306 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:27:14.306034) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.309 14 DEBUG ceilometer.compute.virt.libvirt.inspector [-] No delta meter predecessor for 358d31cf-2866-416a-b2fc-814ee4bfe89a / tap7f4342b0-8a inspect_vnics /usr/lib/python3.12/site-packages/ceilometer/compute/virt/libvirt/inspector.py:143
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.310 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.incoming.bytes volume: 1786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.314 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.bytes volume: 8664 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.318 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.bytes volume: 1912 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.324 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2436 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.324 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.324 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.325 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.325 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.325 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.325 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.325 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.outgoing.packets volume: 18 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.325 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.packets volume: 62 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.325 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.packets volume: 22 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.326 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 22 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.326 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:27:14.325412) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.326 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.326 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.326 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.327 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.327 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.327 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.327 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.327 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.328 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.329 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.330 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.330 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.331 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.331 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.331 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.331 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.332 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.332 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.333 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.333 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.334 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:27:14.327201) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.334 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:27:14.331826) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.335 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.335 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.336 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.336 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.336 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.336 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.337 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:27:14.336364) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.369 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.369 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.370 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.capacity volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.396 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.396 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.396 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.capacity volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.422 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.424 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.424 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.capacity volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.464 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.465 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.466 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.468 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.468 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.468 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.469 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.469 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.469 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.470 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:27:14.469723) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 nova_compute[356901]: 2025-10-11 02:27:14.523 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.540 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 sshd-session[434818]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:27:14 compute-0 sshd-session[434818]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.544 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.546 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.bytes volume: 385378 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.609 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.bytes volume: 23325184 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.609 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.610 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.bytes volume: 385378 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.682 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.683 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.683 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.bytes volume: 385378 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.740 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.742 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.742 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.743 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.743 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.743 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.743 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.743 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.743 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.744 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.latency volume: 1845147961 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.744 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.latency volume: 292571291 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.744 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.latency volume: 162750190 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.744 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.latency volume: 1853196562 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.745 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.latency volume: 293231554 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.745 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.latency volume: 250459547 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.745 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.latency volume: 1696814304 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.745 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.latency volume: 210864290 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.746 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.latency volume: 178724423 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.746 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.746 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.746 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.748 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.748 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.748 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.749 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.749 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.749 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.749 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:27:14.743889) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.750 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.750 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:27:14.749857) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.750 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.750 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.requests volume: 124 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.750 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.requests volume: 844 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.751 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.751 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.requests volume: 124 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.751 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.751 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.751 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.requests volume: 124 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.752 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.752 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.752 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.753 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.753 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.753 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.753 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.753 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.753 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.753 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.754 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.754 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.usage volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.754 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.754 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:27:14.753719) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.754 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.754 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.usage volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.755 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.755 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.755 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.usage volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.755 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.756 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.756 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.756 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.756 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.757 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.757 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.757 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.757 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.757 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.bytes volume: 41697280 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.757 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.757 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.758 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.bytes volume: 41852928 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.758 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.758 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.758 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.bytes volume: 41779200 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.758 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.759 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.759 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.759 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.759 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.760 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.760 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.760 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.760 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.760 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.760 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.760 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.latency volume: 5899899965 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.761 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.latency volume: 24741980 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.761 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:27:14.757172) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.761 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:27:14.760695) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.761 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.761 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.latency volume: 5172044232 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.761 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.latency volume: 26893276 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.761 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.762 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.latency volume: 6089609601 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.762 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.latency volume: 25967717 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.762 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.762 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.762 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.763 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.763 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.763 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.763 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.763 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.763 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.763 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.764 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:27:14.763888) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.790 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.822 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.852 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.877 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.879 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.879 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.879 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.879 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.879 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.880 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.880 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.requests volume: 221 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.881 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.882 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.883 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.requests volume: 241 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.883 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:27:14.880598) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.883 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.884 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.884 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.884 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.885 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.886 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.886 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.886 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.887 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.888 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.888 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.888 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.888 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.889 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.889 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.890 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:27:14.888935) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.890 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.bytes.delta volume: 3431 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.890 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.bytes.delta volume: 84 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.891 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 84 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.892 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.892 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.892 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.rate in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.892 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.893 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.893 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.rate heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.893 14 DEBUG ceilometer.compute.pollsters [-] LibvirtInspector does not provide data for IncomingBytesRatePollster get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:162
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.894 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.rate (2025-10-11T02:27:14.893409) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.894 14 ERROR ceilometer.polling.manager [-] Prevent pollster network.incoming.bytes.rate from polling [<NovaLikeServer: vn-vgckve2-tqko7trrsvwg-ebwakep2a2y3-vnf-ihxi227vdpwh>] on source pollsters anymore!: ceilometer.polling.plugin_base.PollsterPermanentError: [<NovaLikeServer: vn-vgckve2-tqko7trrsvwg-ebwakep2a2y3-vnf-ihxi227vdpwh>]
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.894 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.895 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.895 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.895 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.895 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.896 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:27:14.895625) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.897 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.897 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.897 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.898 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.898 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.898 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.899 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.incoming.packets volume: 14 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.899 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:27:14.898743) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.899 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.packets volume: 56 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.900 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.packets volume: 17 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.900 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 23 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.901 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.901 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.901 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.902 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.902 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.902 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.902 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.903 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:27:14.902600) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.903 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.bytes.delta volume: 2474 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.904 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.bytes.delta volume: 140 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.904 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.905 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.905 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.905 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.906 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.906 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.907 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.908 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.908 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.909 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:27:14.907218) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.909 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.910 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.910 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.911 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.911 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.912 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.912 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:27:14.911111) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.913 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.913 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.914 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.914 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.915 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.915 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.916 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.916 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.916 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.917 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.918 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.allocation volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.919 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.919 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.920 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.allocation volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.920 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:27:14.916395) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.921 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.921 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.922 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.allocation volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.923 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.923 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.924 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.925 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.925 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.925 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.925 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.925 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.926 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.926 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.926 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.926 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.927 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.927 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.927 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.928 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:27:14.925986) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.928 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.928 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.928 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.928 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.928 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/cpu volume: 34890000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.929 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/cpu volume: 315780000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.929 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/cpu volume: 35730000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.929 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 40810000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.929 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:27:14.928570) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.930 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.930 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.930 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.930 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.930 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.930 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.931 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.outgoing.bytes volume: 2076 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.931 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.bytes volume: 7304 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.931 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.bytes volume: 2328 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.931 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:27:14.930883) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.932 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2272 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.932 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.932 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.933 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.933 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.933 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.933 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.933 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/memory.usage volume: 49.4765625 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.933 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:27:14.933426) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.934 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/memory.usage volume: 48.9375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.934 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/memory.usage volume: 49.046875 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.934 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.83984375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.935 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.935 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.935 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.rate in the context of pollsters
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.935 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.935 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.935 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.rate heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.936 14 DEBUG ceilometer.compute.pollsters [-] LibvirtInspector does not provide data for OutgoingBytesRatePollster get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:162
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.936 14 ERROR ceilometer.polling.manager [-] Prevent pollster network.outgoing.bytes.rate from polling [<NovaLikeServer: vn-vgckve2-tqko7trrsvwg-ebwakep2a2y3-vnf-ihxi227vdpwh>] on source pollsters anymore!: ceilometer.polling.plugin_base.PollsterPermanentError: [<NovaLikeServer: vn-vgckve2-tqko7trrsvwg-ebwakep2a2y3-vnf-ihxi227vdpwh>]
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.937 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.rate (2025-10-11T02:27:14.935897) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.937 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.938 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.939 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.939 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.939 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.939 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.940 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.940 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.940 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.940 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.940 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.941 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.941 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.941 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.941 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.942 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.942 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.942 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.943 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.943 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.943 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.944 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.944 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.944 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.944 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:27:14.944 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:27:15 compute-0 ceph-mon[191930]: pgmap v1478: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:27:15 compute-0 nova_compute[356901]: 2025-10-11 02:27:15.663 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1479: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:16 compute-0 sshd-session[434818]: Failed password for invalid user admin from 121.227.153.123 port 38286 ssh2
Oct 11 02:27:17 compute-0 ceph-mon[191930]: pgmap v1479: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:18 compute-0 sshd-session[434818]: Connection closed by invalid user admin 121.227.153.123 port 38286 [preauth]
Oct 11 02:27:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1480: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.4 KiB/s wr, 1 op/s
Oct 11 02:27:18 compute-0 ceph-mon[191930]: pgmap v1480: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.4 KiB/s wr, 1 op/s
Oct 11 02:27:19 compute-0 sshd-session[434821]: Invalid user admin from 121.227.153.123 port 38294
Oct 11 02:27:19 compute-0 nova_compute[356901]: 2025-10-11 02:27:19.528 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:19 compute-0 sshd-session[434821]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:27:19 compute-0 sshd-session[434821]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:27:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1481: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.7 KiB/s wr, 1 op/s
Oct 11 02:27:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:27:20 compute-0 nova_compute[356901]: 2025-10-11 02:27:20.667 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:21 compute-0 sshd-session[434821]: Failed password for invalid user admin from 121.227.153.123 port 38294 ssh2
Oct 11 02:27:21 compute-0 ceph-mon[191930]: pgmap v1481: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.7 KiB/s wr, 1 op/s
Oct 11 02:27:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1482: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.7 KiB/s wr, 1 op/s
Oct 11 02:27:22 compute-0 podman[434826]: 2025-10-11 02:27:22.250689793 +0000 UTC m=+0.116579974 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:27:22 compute-0 podman[434824]: 2025-10-11 02:27:22.267468838 +0000 UTC m=+0.153745083 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_ipmi, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:27:22 compute-0 podman[434825]: 2025-10-11 02:27:22.272362559 +0000 UTC m=+0.144068404 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, version=9.6, maintainer=Red Hat, Inc., managed_by=edpm_ansible, build-date=2025-08-20T13:12:41, com.redhat.component=ubi9-minimal-container, container_name=openstack_network_exporter, distribution-scope=public, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, url=https://catalog.redhat.com/en/search?searchType=containers, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, io.openshift.expose-services=, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, name=ubi9-minimal, io.openshift.tags=minimal rhel9, release=1755695350, vendor=Red Hat, Inc., vcs-type=git, config_id=edpm)
Oct 11 02:27:22 compute-0 ceph-mon[191930]: pgmap v1482: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.7 KiB/s wr, 1 op/s
Oct 11 02:27:23 compute-0 sshd-session[434821]: Connection closed by invalid user admin 121.227.153.123 port 38294 [preauth]
Oct 11 02:27:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1483: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.7 KiB/s wr, 1 op/s
Oct 11 02:27:24 compute-0 sshd-session[434885]: Invalid user admin from 121.227.153.123 port 53648
Oct 11 02:27:24 compute-0 nova_compute[356901]: 2025-10-11 02:27:24.530 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:24 compute-0 sshd-session[434885]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:27:24 compute-0 sshd-session[434885]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:27:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:27:25 compute-0 ceph-mon[191930]: pgmap v1483: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.7 KiB/s wr, 1 op/s
Oct 11 02:27:25 compute-0 nova_compute[356901]: 2025-10-11 02:27:25.671 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1484: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.7 KiB/s wr, 1 op/s
Oct 11 02:27:26 compute-0 sshd-session[434885]: Failed password for invalid user admin from 121.227.153.123 port 53648 ssh2
Oct 11 02:27:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:27:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:27:26 compute-0 ceph-mon[191930]: pgmap v1484: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.7 KiB/s wr, 1 op/s
Oct 11 02:27:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:27:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:27:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:27:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:27:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:27:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2874491089' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:27:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:27:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2874491089' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:27:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2874491089' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:27:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2874491089' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:27:28 compute-0 sshd-session[434885]: Connection closed by invalid user admin 121.227.153.123 port 53648 [preauth]
Oct 11 02:27:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1485: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.7 KiB/s wr, 1 op/s
Oct 11 02:27:28 compute-0 ceph-mon[191930]: pgmap v1485: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.7 KiB/s wr, 1 op/s
Oct 11 02:27:29 compute-0 podman[434889]: 2025-10-11 02:27:29.209026237 +0000 UTC m=+0.103074200 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., managed_by=edpm_ansible, io.openshift.tags=base rhel9, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, architecture=x86_64, version=9.4, io.openshift.expose-services=, com.redhat.component=ubi9-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=kepler, io.buildah.version=1.29.0, vendor=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2024-09-18T21:23:30, distribution-scope=public, name=ubi9, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, release=1214.1726694543, release-0.7.12=, io.k8s.display-name=Red Hat Universal Base Image 9, config_id=edpm)
Oct 11 02:27:29 compute-0 sshd-session[434887]: Invalid user admin from 121.227.153.123 port 53656
Oct 11 02:27:29 compute-0 nova_compute[356901]: 2025-10-11 02:27:29.533 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:29 compute-0 sshd-session[434887]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:27:29 compute-0 sshd-session[434887]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:27:29 compute-0 podman[157119]: time="2025-10-11T02:27:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:27:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:27:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:27:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:27:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9052 "" "Go-http-client/1.1"
Oct 11 02:27:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1486: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:27:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:27:30 compute-0 nova_compute[356901]: 2025-10-11 02:27:30.675 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:31 compute-0 ceph-mon[191930]: pgmap v1486: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:27:31 compute-0 openstack_network_exporter[374316]: ERROR   02:27:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:27:31 compute-0 openstack_network_exporter[374316]: ERROR   02:27:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:27:31 compute-0 openstack_network_exporter[374316]: ERROR   02:27:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:27:31 compute-0 openstack_network_exporter[374316]: ERROR   02:27:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:27:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:27:31 compute-0 openstack_network_exporter[374316]: ERROR   02:27:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:27:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:27:32 compute-0 sshd-session[434887]: Failed password for invalid user admin from 121.227.153.123 port 53656 ssh2
Oct 11 02:27:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1487: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:33 compute-0 sshd-session[434887]: Connection closed by invalid user admin 121.227.153.123 port 53656 [preauth]
Oct 11 02:27:33 compute-0 ceph-mon[191930]: pgmap v1487: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1488: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:34 compute-0 sshd-session[434907]: Invalid user admin from 121.227.153.123 port 54442
Oct 11 02:27:34 compute-0 nova_compute[356901]: 2025-10-11 02:27:34.537 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:34 compute-0 sshd-session[434907]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:27:34 compute-0 sshd-session[434907]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:27:35 compute-0 ceph-mon[191930]: pgmap v1488: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:27:35 compute-0 nova_compute[356901]: 2025-10-11 02:27:35.679 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1489: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:36 compute-0 sshd-session[434907]: Failed password for invalid user admin from 121.227.153.123 port 54442 ssh2
Oct 11 02:27:36 compute-0 ceph-mon[191930]: pgmap v1489: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:37 compute-0 podman[434909]: 2025-10-11 02:27:37.245162976 +0000 UTC m=+0.124967092 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:27:37 compute-0 podman[434912]: 2025-10-11 02:27:37.254924517 +0000 UTC m=+0.113516208 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_metadata_agent, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, managed_by=edpm_ansible, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']})
Oct 11 02:27:37 compute-0 podman[434911]: 2025-10-11 02:27:37.268703978 +0000 UTC m=+0.133983085 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_id=edpm, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, org.label-schema.name=CentOS Stream 10 Base Image, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:27:37 compute-0 podman[434910]: 2025-10-11 02:27:37.295317826 +0000 UTC m=+0.164327665 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, config_id=ovn_controller, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 02:27:38 compute-0 sshd-session[434907]: Connection closed by invalid user admin 121.227.153.123 port 54442 [preauth]
Oct 11 02:27:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1490: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:39 compute-0 ceph-mon[191930]: pgmap v1490: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:39 compute-0 sshd-session[434988]: Invalid user admin from 121.227.153.123 port 54458
Oct 11 02:27:39 compute-0 nova_compute[356901]: 2025-10-11 02:27:39.539 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:39 compute-0 sshd-session[434988]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:27:39 compute-0 sshd-session[434988]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:27:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1491: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:27:40 compute-0 nova_compute[356901]: 2025-10-11 02:27:40.683 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:41 compute-0 ceph-mon[191930]: pgmap v1491: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:41 compute-0 sshd-session[434988]: Failed password for invalid user admin from 121.227.153.123 port 54458 ssh2
Oct 11 02:27:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1492: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:43 compute-0 sshd-session[434988]: Connection closed by invalid user admin 121.227.153.123 port 54458 [preauth]
Oct 11 02:27:43 compute-0 podman[434991]: 2025-10-11 02:27:43.23708661 +0000 UTC m=+0.126125110 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, container_name=iscsid, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:27:43 compute-0 podman[434990]: 2025-10-11 02:27:43.243725924 +0000 UTC m=+0.130139919 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=multipathd, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 02:27:43 compute-0 ceph-mon[191930]: pgmap v1492: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1493: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:44 compute-0 sshd-session[435024]: Invalid user admin from 121.227.153.123 port 33284
Oct 11 02:27:44 compute-0 nova_compute[356901]: 2025-10-11 02:27:44.543 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:44 compute-0 sshd-session[435024]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:27:44 compute-0 sshd-session[435024]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:27:45 compute-0 ceph-mon[191930]: pgmap v1493: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:27:45 compute-0 nova_compute[356901]: 2025-10-11 02:27:45.688 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1494: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:46 compute-0 sshd-session[435024]: Failed password for invalid user admin from 121.227.153.123 port 33284 ssh2
Oct 11 02:27:47 compute-0 ceph-mon[191930]: pgmap v1494: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:48 compute-0 sshd-session[435024]: Connection closed by invalid user admin 121.227.153.123 port 33284 [preauth]
Oct 11 02:27:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1495: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:48 compute-0 ceph-mon[191930]: pgmap v1495: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:48 compute-0 nova_compute[356901]: 2025-10-11 02:27:48.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:27:49 compute-0 sshd-session[435026]: Invalid user admin from 121.227.153.123 port 33290
Oct 11 02:27:49 compute-0 nova_compute[356901]: 2025-10-11 02:27:49.546 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:49 compute-0 sshd-session[435026]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:27:49 compute-0 sshd-session[435026]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:27:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1496: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:27:50 compute-0 nova_compute[356901]: 2025-10-11 02:27:50.694 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:50 compute-0 ceph-mon[191930]: pgmap v1496: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:51 compute-0 sshd-session[435026]: Failed password for invalid user admin from 121.227.153.123 port 33290 ssh2
Oct 11 02:27:51 compute-0 sshd-session[435026]: Connection closed by invalid user admin 121.227.153.123 port 33290 [preauth]
Oct 11 02:27:51 compute-0 nova_compute[356901]: 2025-10-11 02:27:51.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:27:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1497: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:52 compute-0 sshd-session[435029]: Invalid user admin from 121.227.153.123 port 51804
Oct 11 02:27:52 compute-0 podman[435031]: 2025-10-11 02:27:52.948498195 +0000 UTC m=+0.115631253 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_managed=true, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:27:52 compute-0 podman[435032]: 2025-10-11 02:27:52.963568797 +0000 UTC m=+0.112137668 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, name=ubi9-minimal, vcs-type=git, build-date=2025-08-20T13:12:41, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, container_name=openstack_network_exporter, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, com.redhat.component=ubi9-minimal-container, managed_by=edpm_ansible, vendor=Red Hat, Inc., architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, io.openshift.expose-services=, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://catalog.redhat.com/en/search?searchType=containers, version=9.6, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:27:52 compute-0 podman[435033]: 2025-10-11 02:27:52.968342701 +0000 UTC m=+0.112512329 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:27:53 compute-0 sshd-session[435029]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:27:53 compute-0 sshd-session[435029]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:27:53 compute-0 ceph-mon[191930]: pgmap v1497: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1498: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:54 compute-0 nova_compute[356901]: 2025-10-11 02:27:54.550 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:27:54.855 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:27:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:27:54.856 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:27:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:27:54.856 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:27:55 compute-0 ceph-mon[191930]: pgmap v1498: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:27:55 compute-0 sshd-session[435029]: Failed password for invalid user admin from 121.227.153.123 port 51804 ssh2
Oct 11 02:27:55 compute-0 nova_compute[356901]: 2025-10-11 02:27:55.698 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:55 compute-0 nova_compute[356901]: 2025-10-11 02:27:55.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:27:56 compute-0 sudo[435091]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:27:56 compute-0 sudo[435091]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:27:56 compute-0 sudo[435091]: pam_unix(sudo:session): session closed for user root
Oct 11 02:27:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1499: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:56 compute-0 sudo[435116]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:27:56 compute-0 sudo[435116]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:27:56 compute-0 sudo[435116]: pam_unix(sudo:session): session closed for user root
Oct 11 02:27:56 compute-0 sudo[435141]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:27:56 compute-0 sudo[435141]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:27:56 compute-0 sudo[435141]: pam_unix(sudo:session): session closed for user root
Oct 11 02:27:56 compute-0 sshd-session[435029]: Connection closed by invalid user admin 121.227.153.123 port 51804 [preauth]
Oct 11 02:27:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:27:56
Oct 11 02:27:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:27:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:27:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['backups', 'cephfs.cephfs.data', 'cephfs.cephfs.meta', 'images', '.rgw.root', '.mgr', 'default.rgw.meta', 'default.rgw.log', 'default.rgw.control', 'volumes', 'vms']
Oct 11 02:27:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:27:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:27:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:27:56 compute-0 sudo[435166]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:27:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:27:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:27:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:27:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:27:56 compute-0 sudo[435166]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:27:56 compute-0 nova_compute[356901]: 2025-10-11 02:27:56.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:27:56 compute-0 nova_compute[356901]: 2025-10-11 02:27:56.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:27:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:27:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:27:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:27:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:27:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:27:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:27:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:27:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:27:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:27:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:27:57 compute-0 sudo[435166]: pam_unix(sudo:session): session closed for user root
Oct 11 02:27:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:27:57 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:27:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:27:57 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:27:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:27:57 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:27:57 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 59187224-d89c-4e20-bf63-91599d2a78e8 does not exist
Oct 11 02:27:57 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev a092dbeb-b971-4a68-8ca1-413a19e60923 does not exist
Oct 11 02:27:57 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev af2270a0-a13f-42f9-baf3-d93e58d60f16 does not exist
Oct 11 02:27:57 compute-0 ceph-mon[191930]: pgmap v1499: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:57 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:27:57 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:27:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:27:57 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:27:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:27:57 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:27:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:27:57 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:27:57 compute-0 sudo[435223]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:27:57 compute-0 sudo[435223]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:27:57 compute-0 sudo[435223]: pam_unix(sudo:session): session closed for user root
Oct 11 02:27:57 compute-0 sudo[435248]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:27:57 compute-0 sudo[435248]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:27:57 compute-0 sudo[435248]: pam_unix(sudo:session): session closed for user root
Oct 11 02:27:57 compute-0 sudo[435273]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:27:57 compute-0 sudo[435273]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:27:57 compute-0 sudo[435273]: pam_unix(sudo:session): session closed for user root
Oct 11 02:27:57 compute-0 nova_compute[356901]: 2025-10-11 02:27:57.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:27:57 compute-0 nova_compute[356901]: 2025-10-11 02:27:57.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:27:57 compute-0 nova_compute[356901]: 2025-10-11 02:27:57.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:27:58 compute-0 sudo[435298]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:27:58 compute-0 sudo[435298]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:27:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1500: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:27:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:27:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:27:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:27:58 compute-0 podman[435362]: 2025-10-11 02:27:58.590942624 +0000 UTC m=+0.080401265 container create 8bb395895287eb3b80f15108b9a4f258ba04fff243b93a820feb7895867331c3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_swirles, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef)
Oct 11 02:27:58 compute-0 nova_compute[356901]: 2025-10-11 02:27:58.643 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:27:58 compute-0 nova_compute[356901]: 2025-10-11 02:27:58.643 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:27:58 compute-0 nova_compute[356901]: 2025-10-11 02:27:58.644 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:27:58 compute-0 nova_compute[356901]: 2025-10-11 02:27:58.644 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:27:58 compute-0 sshd-session[435202]: Invalid user admin from 121.227.153.123 port 51818
Oct 11 02:27:58 compute-0 podman[435362]: 2025-10-11 02:27:58.561567032 +0000 UTC m=+0.051025723 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:27:58 compute-0 systemd[1]: Started libpod-conmon-8bb395895287eb3b80f15108b9a4f258ba04fff243b93a820feb7895867331c3.scope.
Oct 11 02:27:58 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:27:58 compute-0 podman[435362]: 2025-10-11 02:27:58.748388632 +0000 UTC m=+0.237847273 container init 8bb395895287eb3b80f15108b9a4f258ba04fff243b93a820feb7895867331c3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_swirles, CEPH_REF=reef, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3)
Oct 11 02:27:58 compute-0 podman[435362]: 2025-10-11 02:27:58.763076003 +0000 UTC m=+0.252534624 container start 8bb395895287eb3b80f15108b9a4f258ba04fff243b93a820feb7895867331c3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_swirles, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 02:27:58 compute-0 podman[435362]: 2025-10-11 02:27:58.76899638 +0000 UTC m=+0.258455001 container attach 8bb395895287eb3b80f15108b9a4f258ba04fff243b93a820feb7895867331c3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_swirles, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2)
Oct 11 02:27:58 compute-0 upbeat_swirles[435378]: 167 167
Oct 11 02:27:58 compute-0 systemd[1]: libpod-8bb395895287eb3b80f15108b9a4f258ba04fff243b93a820feb7895867331c3.scope: Deactivated successfully.
Oct 11 02:27:58 compute-0 podman[435362]: 2025-10-11 02:27:58.777701652 +0000 UTC m=+0.267160263 container died 8bb395895287eb3b80f15108b9a4f258ba04fff243b93a820feb7895867331c3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_swirles, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:27:58 compute-0 systemd[1]: var-lib-containers-storage-overlay-be21f4217a267e3999fc8f1f67c19cb763d8b1347704b2470cdd42914c5989e3-merged.mount: Deactivated successfully.
Oct 11 02:27:58 compute-0 podman[435362]: 2025-10-11 02:27:58.848721424 +0000 UTC m=+0.338180075 container remove 8bb395895287eb3b80f15108b9a4f258ba04fff243b93a820feb7895867331c3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_swirles, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:27:58 compute-0 systemd[1]: libpod-conmon-8bb395895287eb3b80f15108b9a4f258ba04fff243b93a820feb7895867331c3.scope: Deactivated successfully.
Oct 11 02:27:58 compute-0 sshd-session[435202]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:27:58 compute-0 sshd-session[435202]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:27:59 compute-0 podman[435401]: 2025-10-11 02:27:59.087451312 +0000 UTC m=+0.070510328 container create 9dec264239b2611005200e594457d99873e04e95441a78fb544e118aed15dd83 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stoic_kilby, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0)
Oct 11 02:27:59 compute-0 systemd[1]: Started libpod-conmon-9dec264239b2611005200e594457d99873e04e95441a78fb544e118aed15dd83.scope.
Oct 11 02:27:59 compute-0 podman[435401]: 2025-10-11 02:27:59.06542306 +0000 UTC m=+0.048482126 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:27:59 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:27:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/103c4d07c2c5426a62af30508eca0389e31d823098bac0f494962188c88c2046/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:27:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/103c4d07c2c5426a62af30508eca0389e31d823098bac0f494962188c88c2046/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:27:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/103c4d07c2c5426a62af30508eca0389e31d823098bac0f494962188c88c2046/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:27:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/103c4d07c2c5426a62af30508eca0389e31d823098bac0f494962188c88c2046/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:27:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/103c4d07c2c5426a62af30508eca0389e31d823098bac0f494962188c88c2046/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:27:59 compute-0 podman[435401]: 2025-10-11 02:27:59.230666892 +0000 UTC m=+0.213725928 container init 9dec264239b2611005200e594457d99873e04e95441a78fb544e118aed15dd83 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stoic_kilby, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:27:59 compute-0 podman[435401]: 2025-10-11 02:27:59.25027476 +0000 UTC m=+0.233333776 container start 9dec264239b2611005200e594457d99873e04e95441a78fb544e118aed15dd83 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stoic_kilby, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS)
Oct 11 02:27:59 compute-0 podman[435401]: 2025-10-11 02:27:59.254516068 +0000 UTC m=+0.237575124 container attach 9dec264239b2611005200e594457d99873e04e95441a78fb544e118aed15dd83 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stoic_kilby, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS)
Oct 11 02:27:59 compute-0 ceph-mon[191930]: pgmap v1500: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:27:59 compute-0 nova_compute[356901]: 2025-10-11 02:27:59.552 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:27:59 compute-0 podman[157119]: time="2025-10-11T02:27:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:27:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:27:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47970 "" "Go-http-client/1.1"
Oct 11 02:27:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:27:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9470 "" "Go-http-client/1.1"
Oct 11 02:28:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1501: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:00 compute-0 podman[435429]: 2025-10-11 02:28:00.276029267 +0000 UTC m=+0.157037036 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, maintainer=Red Hat, Inc., version=9.4, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.29.0, io.openshift.tags=base rhel9, release-0.7.12=, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, build-date=2024-09-18T21:23:30, config_id=edpm, release=1214.1726694543, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vendor=Red Hat, Inc., io.openshift.expose-services=, container_name=kepler, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9, managed_by=edpm_ansible, vcs-type=git, name=ubi9, summary=Provides the latest release of Red Hat Universal Base Image 9., com.redhat.component=ubi9-container)
Oct 11 02:28:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:28:00 compute-0 stoic_kilby[435418]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:28:00 compute-0 stoic_kilby[435418]: --> relative data size: 1.0
Oct 11 02:28:00 compute-0 stoic_kilby[435418]: --> All data devices are unavailable
Oct 11 02:28:00 compute-0 systemd[1]: libpod-9dec264239b2611005200e594457d99873e04e95441a78fb544e118aed15dd83.scope: Deactivated successfully.
Oct 11 02:28:00 compute-0 podman[435401]: 2025-10-11 02:28:00.586038805 +0000 UTC m=+1.569097821 container died 9dec264239b2611005200e594457d99873e04e95441a78fb544e118aed15dd83 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stoic_kilby, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507)
Oct 11 02:28:00 compute-0 systemd[1]: libpod-9dec264239b2611005200e594457d99873e04e95441a78fb544e118aed15dd83.scope: Consumed 1.251s CPU time.
Oct 11 02:28:00 compute-0 systemd[1]: var-lib-containers-storage-overlay-103c4d07c2c5426a62af30508eca0389e31d823098bac0f494962188c88c2046-merged.mount: Deactivated successfully.
Oct 11 02:28:00 compute-0 podman[435401]: 2025-10-11 02:28:00.660935474 +0000 UTC m=+1.643994490 container remove 9dec264239b2611005200e594457d99873e04e95441a78fb544e118aed15dd83 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stoic_kilby, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2)
Oct 11 02:28:00 compute-0 systemd[1]: libpod-conmon-9dec264239b2611005200e594457d99873e04e95441a78fb544e118aed15dd83.scope: Deactivated successfully.
Oct 11 02:28:00 compute-0 nova_compute[356901]: 2025-10-11 02:28:00.701 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:00 compute-0 sudo[435298]: pam_unix(sudo:session): session closed for user root
Oct 11 02:28:00 compute-0 sshd-session[435202]: Failed password for invalid user admin from 121.227.153.123 port 51818 ssh2
Oct 11 02:28:00 compute-0 sudo[435478]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:28:00 compute-0 sudo[435478]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:28:00 compute-0 sudo[435478]: pam_unix(sudo:session): session closed for user root
Oct 11 02:28:00 compute-0 sudo[435503]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:28:00 compute-0 sudo[435503]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:28:00 compute-0 sudo[435503]: pam_unix(sudo:session): session closed for user root
Oct 11 02:28:01 compute-0 sudo[435528]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:28:01 compute-0 sudo[435528]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:28:01 compute-0 sudo[435528]: pam_unix(sudo:session): session closed for user root
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.101 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.121 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.121 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.122 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.122 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.148 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.148 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.148 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.148 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.149 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:28:01 compute-0 sudo[435553]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:28:01 compute-0 sudo[435553]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:28:01 compute-0 ceph-mon[191930]: pgmap v1501: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:01 compute-0 openstack_network_exporter[374316]: ERROR   02:28:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:28:01 compute-0 openstack_network_exporter[374316]: ERROR   02:28:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:28:01 compute-0 openstack_network_exporter[374316]: ERROR   02:28:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:28:01 compute-0 openstack_network_exporter[374316]: ERROR   02:28:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:28:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:28:01 compute-0 openstack_network_exporter[374316]: ERROR   02:28:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:28:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:28:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:28:01 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3707354689' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.614 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.465s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:28:01 compute-0 podman[435638]: 2025-10-11 02:28:01.665326441 +0000 UTC m=+0.066345593 container create 86cd9aa9dd5e239ce29a315507efe7037fa0eea1fef7b4bf681691ac4f36a475 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_bhabha, org.label-schema.schema-version=1.0, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS)
Oct 11 02:28:01 compute-0 systemd[1]: Started libpod-conmon-86cd9aa9dd5e239ce29a315507efe7037fa0eea1fef7b4bf681691ac4f36a475.scope.
Oct 11 02:28:01 compute-0 podman[435638]: 2025-10-11 02:28:01.638797854 +0000 UTC m=+0.039817006 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.733 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000004 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.734 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000004 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.734 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000004 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.739 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.739 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.739 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.744 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.744 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.744 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.749 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.749 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:28:01 compute-0 nova_compute[356901]: 2025-10-11 02:28:01.749 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:28:01 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:28:01 compute-0 podman[435638]: 2025-10-11 02:28:01.798335804 +0000 UTC m=+0.199354966 container init 86cd9aa9dd5e239ce29a315507efe7037fa0eea1fef7b4bf681691ac4f36a475 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_bhabha, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS)
Oct 11 02:28:01 compute-0 podman[435638]: 2025-10-11 02:28:01.817684775 +0000 UTC m=+0.218703937 container start 86cd9aa9dd5e239ce29a315507efe7037fa0eea1fef7b4bf681691ac4f36a475 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_bhabha, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:28:01 compute-0 podman[435638]: 2025-10-11 02:28:01.823099208 +0000 UTC m=+0.224118380 container attach 86cd9aa9dd5e239ce29a315507efe7037fa0eea1fef7b4bf681691ac4f36a475 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_bhabha, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True)
Oct 11 02:28:01 compute-0 eager_bhabha[435655]: 167 167
Oct 11 02:28:01 compute-0 systemd[1]: libpod-86cd9aa9dd5e239ce29a315507efe7037fa0eea1fef7b4bf681691ac4f36a475.scope: Deactivated successfully.
Oct 11 02:28:01 compute-0 podman[435638]: 2025-10-11 02:28:01.828556391 +0000 UTC m=+0.229575533 container died 86cd9aa9dd5e239ce29a315507efe7037fa0eea1fef7b4bf681691ac4f36a475 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_bhabha, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2)
Oct 11 02:28:01 compute-0 systemd[1]: var-lib-containers-storage-overlay-8ce343913febbce970dc7bf5281392f961138382fa44b23f50f5f003301ecea4-merged.mount: Deactivated successfully.
Oct 11 02:28:01 compute-0 podman[435638]: 2025-10-11 02:28:01.887023107 +0000 UTC m=+0.288042259 container remove 86cd9aa9dd5e239ce29a315507efe7037fa0eea1fef7b4bf681691ac4f36a475 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_bhabha, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:28:01 compute-0 systemd[1]: libpod-conmon-86cd9aa9dd5e239ce29a315507efe7037fa0eea1fef7b4bf681691ac4f36a475.scope: Deactivated successfully.
Oct 11 02:28:02 compute-0 podman[435679]: 2025-10-11 02:28:02.142933661 +0000 UTC m=+0.076415236 container create 88a9525cbff927f4669e6eb8d93cddd9c0795c761fb2bac56b6bd67bb63cd649 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_moore, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.build-date=20250507)
Oct 11 02:28:02 compute-0 systemd[1]: Started libpod-conmon-88a9525cbff927f4669e6eb8d93cddd9c0795c761fb2bac56b6bd67bb63cd649.scope.
Oct 11 02:28:02 compute-0 podman[435679]: 2025-10-11 02:28:02.116663722 +0000 UTC m=+0.050145367 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:28:02 compute-0 nova_compute[356901]: 2025-10-11 02:28:02.212 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:28:02 compute-0 nova_compute[356901]: 2025-10-11 02:28:02.214 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3151MB free_disk=59.855655670166016GB free_vcpus=4 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:28:02 compute-0 nova_compute[356901]: 2025-10-11 02:28:02.214 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:28:02 compute-0 nova_compute[356901]: 2025-10-11 02:28:02.214 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:28:02 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:28:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9ee1530cae7ec9a52f1fc84a018d0c2d93600f9d62e9064ff90f0467791c9c33/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:28:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9ee1530cae7ec9a52f1fc84a018d0c2d93600f9d62e9064ff90f0467791c9c33/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:28:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9ee1530cae7ec9a52f1fc84a018d0c2d93600f9d62e9064ff90f0467791c9c33/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:28:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9ee1530cae7ec9a52f1fc84a018d0c2d93600f9d62e9064ff90f0467791c9c33/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:28:02 compute-0 podman[435679]: 2025-10-11 02:28:02.272857571 +0000 UTC m=+0.206339246 container init 88a9525cbff927f4669e6eb8d93cddd9c0795c761fb2bac56b6bd67bb63cd649 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_moore, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507)
Oct 11 02:28:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1502: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:02 compute-0 podman[435679]: 2025-10-11 02:28:02.303002316 +0000 UTC m=+0.236483891 container start 88a9525cbff927f4669e6eb8d93cddd9c0795c761fb2bac56b6bd67bb63cd649 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_moore, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0)
Oct 11 02:28:02 compute-0 podman[435679]: 2025-10-11 02:28:02.311746139 +0000 UTC m=+0.245227804 container attach 88a9525cbff927f4669e6eb8d93cddd9c0795c761fb2bac56b6bd67bb63cd649 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_moore, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 02:28:02 compute-0 nova_compute[356901]: 2025-10-11 02:28:02.316 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:28:02 compute-0 nova_compute[356901]: 2025-10-11 02:28:02.317 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance d60d7ea1-5d00-4902-90e6-3ae67eb09a78 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:28:02 compute-0 nova_compute[356901]: 2025-10-11 02:28:02.317 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 7513b93e-d2b8-4ae0-8f1c-3df190945259 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:28:02 compute-0 nova_compute[356901]: 2025-10-11 02:28:02.317 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 358d31cf-2866-416a-b2fc-814ee4bfe89a actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:28:02 compute-0 nova_compute[356901]: 2025-10-11 02:28:02.318 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 4 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:28:02 compute-0 nova_compute[356901]: 2025-10-11 02:28:02.318 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=2560MB phys_disk=59GB used_disk=8GB total_vcpus=8 used_vcpus=4 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:28:02 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3707354689' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:28:02 compute-0 nova_compute[356901]: 2025-10-11 02:28:02.408 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:28:02 compute-0 sshd-session[435202]: Connection closed by invalid user admin 121.227.153.123 port 51818 [preauth]
Oct 11 02:28:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:28:02 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3612066856' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:28:02 compute-0 nova_compute[356901]: 2025-10-11 02:28:02.899 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.491s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:28:02 compute-0 nova_compute[356901]: 2025-10-11 02:28:02.913 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:28:02 compute-0 nova_compute[356901]: 2025-10-11 02:28:02.935 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:28:02 compute-0 nova_compute[356901]: 2025-10-11 02:28:02.939 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:28:02 compute-0 nova_compute[356901]: 2025-10-11 02:28:02.940 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.726s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:28:03 compute-0 lucid_moore[435695]: {
Oct 11 02:28:03 compute-0 lucid_moore[435695]:     "0": [
Oct 11 02:28:03 compute-0 lucid_moore[435695]:         {
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "devices": [
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "/dev/loop3"
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             ],
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "lv_name": "ceph_lv0",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "lv_size": "21470642176",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "name": "ceph_lv0",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "tags": {
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.cluster_name": "ceph",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.crush_device_class": "",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.encrypted": "0",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.osd_id": "0",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.type": "block",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.vdo": "0"
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             },
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "type": "block",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "vg_name": "ceph_vg0"
Oct 11 02:28:03 compute-0 lucid_moore[435695]:         }
Oct 11 02:28:03 compute-0 lucid_moore[435695]:     ],
Oct 11 02:28:03 compute-0 lucid_moore[435695]:     "1": [
Oct 11 02:28:03 compute-0 lucid_moore[435695]:         {
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "devices": [
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "/dev/loop4"
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             ],
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "lv_name": "ceph_lv1",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "lv_size": "21470642176",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "name": "ceph_lv1",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "tags": {
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.cluster_name": "ceph",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.crush_device_class": "",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.encrypted": "0",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.osd_id": "1",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.type": "block",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.vdo": "0"
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             },
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "type": "block",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "vg_name": "ceph_vg1"
Oct 11 02:28:03 compute-0 lucid_moore[435695]:         }
Oct 11 02:28:03 compute-0 lucid_moore[435695]:     ],
Oct 11 02:28:03 compute-0 lucid_moore[435695]:     "2": [
Oct 11 02:28:03 compute-0 lucid_moore[435695]:         {
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "devices": [
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "/dev/loop5"
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             ],
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "lv_name": "ceph_lv2",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "lv_size": "21470642176",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "name": "ceph_lv2",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "tags": {
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.cluster_name": "ceph",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.crush_device_class": "",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.encrypted": "0",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.osd_id": "2",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.type": "block",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:                 "ceph.vdo": "0"
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             },
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "type": "block",
Oct 11 02:28:03 compute-0 lucid_moore[435695]:             "vg_name": "ceph_vg2"
Oct 11 02:28:03 compute-0 lucid_moore[435695]:         }
Oct 11 02:28:03 compute-0 lucid_moore[435695]:     ]
Oct 11 02:28:03 compute-0 lucid_moore[435695]: }
Oct 11 02:28:03 compute-0 systemd[1]: libpod-88a9525cbff927f4669e6eb8d93cddd9c0795c761fb2bac56b6bd67bb63cd649.scope: Deactivated successfully.
Oct 11 02:28:03 compute-0 podman[435679]: 2025-10-11 02:28:03.262397012 +0000 UTC m=+1.195878637 container died 88a9525cbff927f4669e6eb8d93cddd9c0795c761fb2bac56b6bd67bb63cd649 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_moore, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0)
Oct 11 02:28:03 compute-0 systemd[1]: var-lib-containers-storage-overlay-9ee1530cae7ec9a52f1fc84a018d0c2d93600f9d62e9064ff90f0467791c9c33-merged.mount: Deactivated successfully.
Oct 11 02:28:03 compute-0 podman[435679]: 2025-10-11 02:28:03.344640281 +0000 UTC m=+1.278121856 container remove 88a9525cbff927f4669e6eb8d93cddd9c0795c761fb2bac56b6bd67bb63cd649 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_moore, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 02:28:03 compute-0 systemd[1]: libpod-conmon-88a9525cbff927f4669e6eb8d93cddd9c0795c761fb2bac56b6bd67bb63cd649.scope: Deactivated successfully.
Oct 11 02:28:03 compute-0 sudo[435553]: pam_unix(sudo:session): session closed for user root
Oct 11 02:28:03 compute-0 ceph-mon[191930]: pgmap v1502: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:03 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3612066856' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:28:03 compute-0 sudo[435740]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:28:03 compute-0 sudo[435740]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:28:03 compute-0 sudo[435740]: pam_unix(sudo:session): session closed for user root
Oct 11 02:28:03 compute-0 sudo[435765]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:28:03 compute-0 sudo[435765]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:28:03 compute-0 sudo[435765]: pam_unix(sudo:session): session closed for user root
Oct 11 02:28:03 compute-0 sshd-session[435721]: Invalid user admin from 121.227.153.123 port 46536
Oct 11 02:28:03 compute-0 sudo[435790]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:28:03 compute-0 sudo[435790]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:28:03 compute-0 sudo[435790]: pam_unix(sudo:session): session closed for user root
Oct 11 02:28:03 compute-0 sudo[435815]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:28:03 compute-0 sudo[435815]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:28:03 compute-0 sshd-session[435721]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:28:03 compute-0 sshd-session[435721]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:28:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1503: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:04 compute-0 podman[435881]: 2025-10-11 02:28:04.473684819 +0000 UTC m=+0.085102046 container create 7dda71cf838dde9b6b1c418bc3e31cd99bf73fcb7a3bd0b81e20c3b92254828c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_rosalind, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:28:04 compute-0 podman[435881]: 2025-10-11 02:28:04.435971397 +0000 UTC m=+0.047388704 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:28:04 compute-0 systemd[1]: Started libpod-conmon-7dda71cf838dde9b6b1c418bc3e31cd99bf73fcb7a3bd0b81e20c3b92254828c.scope.
Oct 11 02:28:04 compute-0 nova_compute[356901]: 2025-10-11 02:28:04.555 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:04 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:28:04 compute-0 podman[435881]: 2025-10-11 02:28:04.623946071 +0000 UTC m=+0.235363328 container init 7dda71cf838dde9b6b1c418bc3e31cd99bf73fcb7a3bd0b81e20c3b92254828c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_rosalind, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 02:28:04 compute-0 podman[435881]: 2025-10-11 02:28:04.638481687 +0000 UTC m=+0.249898934 container start 7dda71cf838dde9b6b1c418bc3e31cd99bf73fcb7a3bd0b81e20c3b92254828c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_rosalind, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 02:28:04 compute-0 podman[435881]: 2025-10-11 02:28:04.645129307 +0000 UTC m=+0.256546534 container attach 7dda71cf838dde9b6b1c418bc3e31cd99bf73fcb7a3bd0b81e20c3b92254828c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_rosalind, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_REF=reef)
Oct 11 02:28:04 compute-0 elegant_rosalind[435897]: 167 167
Oct 11 02:28:04 compute-0 systemd[1]: libpod-7dda71cf838dde9b6b1c418bc3e31cd99bf73fcb7a3bd0b81e20c3b92254828c.scope: Deactivated successfully.
Oct 11 02:28:04 compute-0 podman[435881]: 2025-10-11 02:28:04.649046815 +0000 UTC m=+0.260464032 container died 7dda71cf838dde9b6b1c418bc3e31cd99bf73fcb7a3bd0b81e20c3b92254828c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_rosalind, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 02:28:04 compute-0 systemd[1]: var-lib-containers-storage-overlay-5ba2f1f37e68ced5a0e0c850aca777d26370a84037ec5509305b4ef5f1b630c2-merged.mount: Deactivated successfully.
Oct 11 02:28:04 compute-0 podman[435881]: 2025-10-11 02:28:04.71352289 +0000 UTC m=+0.324940117 container remove 7dda71cf838dde9b6b1c418bc3e31cd99bf73fcb7a3bd0b81e20c3b92254828c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_rosalind, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True)
Oct 11 02:28:04 compute-0 nova_compute[356901]: 2025-10-11 02:28:04.716 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:28:04 compute-0 systemd[1]: libpod-conmon-7dda71cf838dde9b6b1c418bc3e31cd99bf73fcb7a3bd0b81e20c3b92254828c.scope: Deactivated successfully.
Oct 11 02:28:04 compute-0 nova_compute[356901]: 2025-10-11 02:28:04.743 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:28:04 compute-0 nova_compute[356901]: 2025-10-11 02:28:04.744 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:28:05 compute-0 podman[435921]: 2025-10-11 02:28:05.040384294 +0000 UTC m=+0.097298362 container create c80bb41fc8ad7af8506feb0afb650dc5b570ab5f90c40df13ffe0ba85d9ef170 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=serene_lamarr, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 02:28:05 compute-0 podman[435921]: 2025-10-11 02:28:05.006975111 +0000 UTC m=+0.063889209 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:28:05 compute-0 systemd[1]: Started libpod-conmon-c80bb41fc8ad7af8506feb0afb650dc5b570ab5f90c40df13ffe0ba85d9ef170.scope.
Oct 11 02:28:05 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:28:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/922b2e259ff65bac39512807b3b20fee1cca12a9c1b776a5ee805548386ed8a4/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:28:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/922b2e259ff65bac39512807b3b20fee1cca12a9c1b776a5ee805548386ed8a4/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:28:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/922b2e259ff65bac39512807b3b20fee1cca12a9c1b776a5ee805548386ed8a4/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:28:05 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/922b2e259ff65bac39512807b3b20fee1cca12a9c1b776a5ee805548386ed8a4/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:28:05 compute-0 podman[435921]: 2025-10-11 02:28:05.217889684 +0000 UTC m=+0.274803812 container init c80bb41fc8ad7af8506feb0afb650dc5b570ab5f90c40df13ffe0ba85d9ef170 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=serene_lamarr, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:28:05 compute-0 podman[435921]: 2025-10-11 02:28:05.241186083 +0000 UTC m=+0.298100161 container start c80bb41fc8ad7af8506feb0afb650dc5b570ab5f90c40df13ffe0ba85d9ef170 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=serene_lamarr, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:28:05 compute-0 podman[435921]: 2025-10-11 02:28:05.249525844 +0000 UTC m=+0.306439932 container attach c80bb41fc8ad7af8506feb0afb650dc5b570ab5f90c40df13ffe0ba85d9ef170 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=serene_lamarr, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:28:05 compute-0 ceph-mon[191930]: pgmap v1503: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:28:05 compute-0 nova_compute[356901]: 2025-10-11 02:28:05.706 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1504: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:06 compute-0 serene_lamarr[435937]: {
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:         "osd_id": 1,
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:         "type": "bluestore"
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:     },
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:         "osd_id": 2,
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:         "type": "bluestore"
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:     },
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:         "osd_id": 0,
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:         "type": "bluestore"
Oct 11 02:28:06 compute-0 serene_lamarr[435937]:     }
Oct 11 02:28:06 compute-0 serene_lamarr[435937]: }
Oct 11 02:28:06 compute-0 sshd-session[435721]: Failed password for invalid user admin from 121.227.153.123 port 46536 ssh2
Oct 11 02:28:06 compute-0 systemd[1]: libpod-c80bb41fc8ad7af8506feb0afb650dc5b570ab5f90c40df13ffe0ba85d9ef170.scope: Deactivated successfully.
Oct 11 02:28:06 compute-0 systemd[1]: libpod-c80bb41fc8ad7af8506feb0afb650dc5b570ab5f90c40df13ffe0ba85d9ef170.scope: Consumed 1.257s CPU time.
Oct 11 02:28:06 compute-0 podman[435921]: 2025-10-11 02:28:06.51801655 +0000 UTC m=+1.574930638 container died c80bb41fc8ad7af8506feb0afb650dc5b570ab5f90c40df13ffe0ba85d9ef170 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=serene_lamarr, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:28:06 compute-0 systemd[1]: var-lib-containers-storage-overlay-922b2e259ff65bac39512807b3b20fee1cca12a9c1b776a5ee805548386ed8a4-merged.mount: Deactivated successfully.
Oct 11 02:28:06 compute-0 podman[435921]: 2025-10-11 02:28:06.622006642 +0000 UTC m=+1.678920690 container remove c80bb41fc8ad7af8506feb0afb650dc5b570ab5f90c40df13ffe0ba85d9ef170 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=serene_lamarr, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3)
Oct 11 02:28:06 compute-0 systemd[1]: libpod-conmon-c80bb41fc8ad7af8506feb0afb650dc5b570ab5f90c40df13ffe0ba85d9ef170.scope: Deactivated successfully.
Oct 11 02:28:06 compute-0 sudo[435815]: pam_unix(sudo:session): session closed for user root
Oct 11 02:28:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:28:06 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:28:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:28:06 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 4a1378f3-cfa6-429a-813a-0f3995020f65 does not exist
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 499154b8-a8f6-4869-9948-05f50fd2ba0c does not exist
Oct 11 02:28:06 compute-0 sudo[435981]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:28:06 compute-0 sudo[435981]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:28:06 compute-0 sudo[435981]: pam_unix(sudo:session): session closed for user root
Oct 11 02:28:06 compute-0 systemd[1]: virtsecretd.service: Deactivated successfully.
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.00221085813879664 of space, bias 1.0, pg target 0.663257441638992 quantized to 32 (current 32)
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00025334537995702286 of space, bias 1.0, pg target 0.07600361398710685 quantized to 32 (current 32)
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:28:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:28:06 compute-0 sudo[436007]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:28:06 compute-0 sudo[436007]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:28:06 compute-0 sudo[436007]: pam_unix(sudo:session): session closed for user root
Oct 11 02:28:07 compute-0 ceph-mon[191930]: pgmap v1504: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:07 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:28:07 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:28:07 compute-0 sshd-session[435721]: Connection closed by invalid user admin 121.227.153.123 port 46536 [preauth]
Oct 11 02:28:08 compute-0 podman[436034]: 2025-10-11 02:28:08.237405883 +0000 UTC m=+0.111912162 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:28:08 compute-0 podman[436037]: 2025-10-11 02:28:08.266565108 +0000 UTC m=+0.129761207 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:28:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1505: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:08 compute-0 podman[436036]: 2025-10-11 02:28:08.281558428 +0000 UTC m=+0.153787258 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, org.label-schema.schema-version=1.0, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20251007, config_id=edpm, container_name=ceilometer_agent_compute, org.label-schema.name=CentOS Stream 10 Base Image, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4)
Oct 11 02:28:08 compute-0 podman[436035]: 2025-10-11 02:28:08.308459176 +0000 UTC m=+0.182667196 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:28:08 compute-0 sshd-session[436032]: Invalid user admin from 121.227.153.123 port 46538
Oct 11 02:28:08 compute-0 sshd-session[436032]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:28:08 compute-0 sshd-session[436032]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:28:09 compute-0 ceph-mon[191930]: pgmap v1505: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:09 compute-0 nova_compute[356901]: 2025-10-11 02:28:09.557 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1506: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:28:10 compute-0 sshd-session[436032]: Failed password for invalid user admin from 121.227.153.123 port 46538 ssh2
Oct 11 02:28:10 compute-0 nova_compute[356901]: 2025-10-11 02:28:10.712 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:11 compute-0 systemd[1]: virtproxyd.service: Deactivated successfully.
Oct 11 02:28:11 compute-0 ceph-mon[191930]: pgmap v1506: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1507: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:12 compute-0 sshd-session[436032]: Connection closed by invalid user admin 121.227.153.123 port 46538 [preauth]
Oct 11 02:28:13 compute-0 ceph-mon[191930]: pgmap v1507: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:13 compute-0 sshd-session[436118]: Invalid user admin from 121.227.153.123 port 33166
Oct 11 02:28:13 compute-0 podman[436120]: 2025-10-11 02:28:13.673649673 +0000 UTC m=+0.142268853 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, config_id=multipathd, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:28:13 compute-0 podman[436121]: 2025-10-11 02:28:13.675966032 +0000 UTC m=+0.137141188 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, managed_by=edpm_ansible, tcib_managed=true, container_name=iscsid, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Oct 11 02:28:13 compute-0 sshd-session[436118]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:28:13 compute-0 sshd-session[436118]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:28:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1508: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:14 compute-0 ceph-mon[191930]: pgmap v1508: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:14 compute-0 nova_compute[356901]: 2025-10-11 02:28:14.561 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:28:15 compute-0 sshd-session[436118]: Failed password for invalid user admin from 121.227.153.123 port 33166 ssh2
Oct 11 02:28:15 compute-0 nova_compute[356901]: 2025-10-11 02:28:15.715 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1509: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:17 compute-0 sshd-session[436118]: Connection closed by invalid user admin 121.227.153.123 port 33166 [preauth]
Oct 11 02:28:17 compute-0 ceph-mon[191930]: pgmap v1509: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1510: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:18 compute-0 sshd-session[436157]: Invalid user admin from 121.227.153.123 port 33174
Oct 11 02:28:18 compute-0 sshd-session[436157]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:28:18 compute-0 sshd-session[436157]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:28:19 compute-0 ceph-mon[191930]: pgmap v1510: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:19 compute-0 nova_compute[356901]: 2025-10-11 02:28:19.565 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1511: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:28:20 compute-0 nova_compute[356901]: 2025-10-11 02:28:20.719 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:20 compute-0 sshd-session[436157]: Failed password for invalid user admin from 121.227.153.123 port 33174 ssh2
Oct 11 02:28:21 compute-0 ceph-mon[191930]: pgmap v1511: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1512: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:22 compute-0 sshd-session[436157]: Connection closed by invalid user admin 121.227.153.123 port 33174 [preauth]
Oct 11 02:28:23 compute-0 podman[436164]: 2025-10-11 02:28:23.257503482 +0000 UTC m=+0.127714986 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:28:23 compute-0 podman[436163]: 2025-10-11 02:28:23.259646136 +0000 UTC m=+0.128037145 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.33.7, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, managed_by=edpm_ansible, name=ubi9-minimal, vcs-type=git, version=9.6, build-date=2025-08-20T13:12:41, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=openstack_network_exporter, distribution-scope=public, url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., architecture=x86_64, release=1755695350, config_id=edpm, io.openshift.expose-services=, maintainer=Red Hat, Inc., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, com.redhat.component=ubi9-minimal-container)
Oct 11 02:28:23 compute-0 podman[436162]: 2025-10-11 02:28:23.263346667 +0000 UTC m=+0.138542280 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS)
Oct 11 02:28:23 compute-0 ceph-mon[191930]: pgmap v1512: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:23 compute-0 sshd-session[436160]: Invalid user admin from 121.227.153.123 port 59476
Oct 11 02:28:23 compute-0 sshd-session[436160]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:28:23 compute-0 sshd-session[436160]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:28:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1513: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:24 compute-0 nova_compute[356901]: 2025-10-11 02:28:24.570 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:25 compute-0 ceph-mon[191930]: pgmap v1513: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:28:25 compute-0 nova_compute[356901]: 2025-10-11 02:28:25.722 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:25 compute-0 sshd-session[436160]: Failed password for invalid user admin from 121.227.153.123 port 59476 ssh2
Oct 11 02:28:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1514: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:28:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:28:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:28:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:28:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:28:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:28:27 compute-0 sshd-session[436160]: Connection closed by invalid user admin 121.227.153.123 port 59476 [preauth]
Oct 11 02:28:27 compute-0 ceph-mon[191930]: pgmap v1514: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:28:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/555807005' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:28:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:28:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/555807005' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:28:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1515: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/555807005' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:28:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/555807005' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:28:28 compute-0 sshd-session[436226]: Invalid user admin from 121.227.153.123 port 59484
Oct 11 02:28:28 compute-0 sshd-session[436226]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:28:28 compute-0 sshd-session[436226]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:28:29 compute-0 ceph-mon[191930]: pgmap v1515: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:29 compute-0 nova_compute[356901]: 2025-10-11 02:28:29.572 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:29 compute-0 podman[157119]: time="2025-10-11T02:28:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:28:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:28:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:28:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:28:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9053 "" "Go-http-client/1.1"
Oct 11 02:28:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1516: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:30 compute-0 ceph-mon[191930]: pgmap v1516: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:28:30 compute-0 nova_compute[356901]: 2025-10-11 02:28:30.726 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:30 compute-0 sshd-session[436226]: Failed password for invalid user admin from 121.227.153.123 port 59484 ssh2
Oct 11 02:28:31 compute-0 podman[436228]: 2025-10-11 02:28:31.294693708 +0000 UTC m=+0.176370906 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.component=ubi9-container, architecture=x86_64, maintainer=Red Hat, Inc., summary=Provides the latest release of Red Hat Universal Base Image 9., distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.tags=base rhel9, name=ubi9, config_id=edpm, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, managed_by=edpm_ansible, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.buildah.version=1.29.0, io.openshift.expose-services=, build-date=2024-09-18T21:23:30, vcs-type=git, version=9.4, release-0.7.12=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, release=1214.1726694543, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=kepler)
Oct 11 02:28:31 compute-0 openstack_network_exporter[374316]: ERROR   02:28:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:28:31 compute-0 openstack_network_exporter[374316]: ERROR   02:28:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:28:31 compute-0 openstack_network_exporter[374316]: ERROR   02:28:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:28:31 compute-0 openstack_network_exporter[374316]: ERROR   02:28:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:28:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:28:31 compute-0 openstack_network_exporter[374316]: ERROR   02:28:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:28:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:28:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1517: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:32 compute-0 sshd-session[436226]: Connection closed by invalid user admin 121.227.153.123 port 59484 [preauth]
Oct 11 02:28:33 compute-0 ceph-mon[191930]: pgmap v1517: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:33 compute-0 sshd-session[436248]: Invalid user admin from 121.227.153.123 port 41644
Oct 11 02:28:33 compute-0 sshd-session[436248]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:28:33 compute-0 sshd-session[436248]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:28:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1518: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:34 compute-0 nova_compute[356901]: 2025-10-11 02:28:34.574 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:35 compute-0 ceph-mon[191930]: pgmap v1518: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:28:35 compute-0 sshd-session[436248]: Failed password for invalid user admin from 121.227.153.123 port 41644 ssh2
Oct 11 02:28:35 compute-0 nova_compute[356901]: 2025-10-11 02:28:35.730 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1519: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:37 compute-0 ceph-mon[191930]: pgmap v1519: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:37 compute-0 sshd-session[436248]: Connection closed by invalid user admin 121.227.153.123 port 41644 [preauth]
Oct 11 02:28:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1520: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:38 compute-0 sshd-session[436250]: Invalid user admin from 121.227.153.123 port 41672
Oct 11 02:28:38 compute-0 podman[436252]: 2025-10-11 02:28:38.75883412 +0000 UTC m=+0.116859099 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:28:38 compute-0 podman[436254]: 2025-10-11 02:28:38.774700877 +0000 UTC m=+0.111088507 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:28:38 compute-0 podman[436253]: 2025-10-11 02:28:38.78347039 +0000 UTC m=+0.138831229 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, config_id=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:28:38 compute-0 podman[436255]: 2025-10-11 02:28:38.800887983 +0000 UTC m=+0.130554231 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:28:38 compute-0 sshd-session[436250]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:28:38 compute-0 sshd-session[436250]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:28:39 compute-0 ceph-mon[191930]: pgmap v1520: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:39 compute-0 nova_compute[356901]: 2025-10-11 02:28:39.579 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1521: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:28:40 compute-0 nova_compute[356901]: 2025-10-11 02:28:40.733 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:41 compute-0 sshd-session[436250]: Failed password for invalid user admin from 121.227.153.123 port 41672 ssh2
Oct 11 02:28:41 compute-0 ceph-mon[191930]: pgmap v1521: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1522: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:42 compute-0 sshd-session[436250]: Connection closed by invalid user admin 121.227.153.123 port 41672 [preauth]
Oct 11 02:28:43 compute-0 ceph-mon[191930]: pgmap v1522: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:43 compute-0 sshd-session[436337]: Invalid user admin from 121.227.153.123 port 49354
Oct 11 02:28:43 compute-0 sshd-session[436337]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:28:43 compute-0 sshd-session[436337]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:28:43 compute-0 podman[436340]: 2025-10-11 02:28:43.979641881 +0000 UTC m=+0.121461297 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=iscsid)
Oct 11 02:28:43 compute-0 podman[436339]: 2025-10-11 02:28:43.982717644 +0000 UTC m=+0.135201971 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, container_name=multipathd, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:28:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1523: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:44 compute-0 nova_compute[356901]: 2025-10-11 02:28:44.582 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:45 compute-0 ceph-mon[191930]: pgmap v1523: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:45 compute-0 sshd-session[436337]: Failed password for invalid user admin from 121.227.153.123 port 49354 ssh2
Oct 11 02:28:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:28:45 compute-0 nova_compute[356901]: 2025-10-11 02:28:45.737 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1524: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:47 compute-0 sshd-session[436337]: Connection closed by invalid user admin 121.227.153.123 port 49354 [preauth]
Oct 11 02:28:47 compute-0 ceph-mon[191930]: pgmap v1524: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1525: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:48 compute-0 sshd-session[436379]: Invalid user admin from 121.227.153.123 port 49360
Oct 11 02:28:48 compute-0 sshd-session[436379]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:28:48 compute-0 sshd-session[436379]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:28:49 compute-0 ceph-mon[191930]: pgmap v1525: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:49 compute-0 nova_compute[356901]: 2025-10-11 02:28:49.586 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:49 compute-0 nova_compute[356901]: 2025-10-11 02:28:49.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:28:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1526: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:28:50 compute-0 sshd-session[436379]: Failed password for invalid user admin from 121.227.153.123 port 49360 ssh2
Oct 11 02:28:50 compute-0 nova_compute[356901]: 2025-10-11 02:28:50.742 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:51 compute-0 ceph-mon[191930]: pgmap v1526: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1527: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:52 compute-0 sshd-session[436379]: Connection closed by invalid user admin 121.227.153.123 port 49360 [preauth]
Oct 11 02:28:53 compute-0 ceph-mon[191930]: pgmap v1527: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:53 compute-0 sshd-session[436382]: Invalid user admin from 121.227.153.123 port 56554
Oct 11 02:28:53 compute-0 nova_compute[356901]: 2025-10-11 02:28:53.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:28:53 compute-0 podman[436385]: 2025-10-11 02:28:53.974854001 +0000 UTC m=+0.130705015 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vendor=Red Hat, Inc., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, maintainer=Red Hat, Inc., url=https://catalog.redhat.com/en/search?searchType=containers, vcs-type=git, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.openshift.expose-services=, version=9.6, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi9-minimal-container, distribution-scope=public, build-date=2025-08-20T13:12:41, io.buildah.version=1.33.7, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, name=ubi9-minimal, architecture=x86_64, container_name=openstack_network_exporter, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, release=1755695350, io.openshift.tags=minimal rhel9)
Oct 11 02:28:53 compute-0 podman[436384]: 2025-10-11 02:28:53.99781247 +0000 UTC m=+0.155190590 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:28:54 compute-0 podman[436386]: 2025-10-11 02:28:54.001927754 +0000 UTC m=+0.139903162 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:28:54 compute-0 sshd-session[436382]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:28:54 compute-0 sshd-session[436382]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:28:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1528: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:54 compute-0 ceph-mon[191930]: pgmap v1528: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:54 compute-0 nova_compute[356901]: 2025-10-11 02:28:54.590 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:28:54.855 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:28:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:28:54.856 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:28:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:28:54.857 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:28:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:28:55 compute-0 nova_compute[356901]: 2025-10-11 02:28:55.747 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:55 compute-0 nova_compute[356901]: 2025-10-11 02:28:55.891 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:28:56 compute-0 sshd-session[436382]: Failed password for invalid user admin from 121.227.153.123 port 56554 ssh2
Oct 11 02:28:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1529: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:28:56
Oct 11 02:28:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:28:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:28:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.control', 'cephfs.cephfs.meta', 'images', 'backups', 'cephfs.cephfs.data', '.rgw.root', 'default.rgw.log', 'default.rgw.meta', 'vms', 'volumes', '.mgr']
Oct 11 02:28:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:28:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:28:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:28:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:28:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:28:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:28:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:28:56 compute-0 nova_compute[356901]: 2025-10-11 02:28:56.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:28:56 compute-0 nova_compute[356901]: 2025-10-11 02:28:56.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:28:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:28:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:28:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:28:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:28:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:28:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:28:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:28:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:28:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:28:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:28:57 compute-0 ceph-mon[191930]: pgmap v1529: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:57 compute-0 sshd-session[436382]: Connection closed by invalid user admin 121.227.153.123 port 56554 [preauth]
Oct 11 02:28:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1530: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:58 compute-0 sshd-session[436446]: Invalid user admin from 121.227.153.123 port 56562
Oct 11 02:28:58 compute-0 nova_compute[356901]: 2025-10-11 02:28:58.898 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:28:58 compute-0 nova_compute[356901]: 2025-10-11 02:28:58.899 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:28:59 compute-0 sshd-session[436446]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:28:59 compute-0 sshd-session[436446]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:28:59 compute-0 ceph-mon[191930]: pgmap v1530: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:28:59 compute-0 nova_compute[356901]: 2025-10-11 02:28:59.593 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:28:59 compute-0 podman[157119]: time="2025-10-11T02:28:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:28:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:28:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:28:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:28:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9060 "" "Go-http-client/1.1"
Oct 11 02:28:59 compute-0 nova_compute[356901]: 2025-10-11 02:28:59.895 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:28:59 compute-0 nova_compute[356901]: 2025-10-11 02:28:59.896 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:28:59 compute-0 nova_compute[356901]: 2025-10-11 02:28:59.897 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:29:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1531: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:29:00 compute-0 nova_compute[356901]: 2025-10-11 02:29:00.751 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:00 compute-0 sshd-session[436446]: Failed password for invalid user admin from 121.227.153.123 port 56562 ssh2
Oct 11 02:29:01 compute-0 openstack_network_exporter[374316]: ERROR   02:29:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:29:01 compute-0 openstack_network_exporter[374316]: ERROR   02:29:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:29:01 compute-0 openstack_network_exporter[374316]: ERROR   02:29:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:29:01 compute-0 openstack_network_exporter[374316]: ERROR   02:29:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:29:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:29:01 compute-0 openstack_network_exporter[374316]: ERROR   02:29:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:29:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:29:01 compute-0 ceph-mon[191930]: pgmap v1531: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:02 compute-0 podman[436448]: 2025-10-11 02:29:02.218820578 +0000 UTC m=+0.114959549 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, managed_by=edpm_ansible, com.redhat.component=ubi9-container, version=9.4, io.openshift.expose-services=, maintainer=Red Hat, Inc., container_name=kepler, distribution-scope=public, io.buildah.version=1.29.0, vcs-type=git, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.openshift.tags=base rhel9, architecture=x86_64, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, name=ubi9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9, release-0.7.12=, vendor=Red Hat, Inc., build-date=2024-09-18T21:23:30, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']})
Oct 11 02:29:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1532: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:02 compute-0 sshd-session[436446]: Connection closed by invalid user admin 121.227.153.123 port 56562 [preauth]
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.146 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Updating instance_info_cache with network_info: [{"id": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "address": "fa:16:3e:c2:ee:14", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.80", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa7108c4c-c9", "ovs_interfaceid": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.162 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.163 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.163 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.164 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.164 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.185 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.185 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.186 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.186 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.186 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:29:03 compute-0 ceph-mon[191930]: pgmap v1532: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:29:03 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/202874259' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.633 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.447s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:29:03 compute-0 sshd-session[436468]: Invalid user admin from 121.227.153.123 port 40658
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.752 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000004 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.753 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000004 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.753 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000004 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.761 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.762 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.762 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000002 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.769 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.770 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.770 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.778 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.779 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:29:03 compute-0 nova_compute[356901]: 2025-10-11 02:29:03.779 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:29:03 compute-0 sshd-session[436468]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:29:03 compute-0 sshd-session[436468]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:29:04 compute-0 nova_compute[356901]: 2025-10-11 02:29:04.297 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:29:04 compute-0 nova_compute[356901]: 2025-10-11 02:29:04.298 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3216MB free_disk=59.855655670166016GB free_vcpus=4 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:29:04 compute-0 nova_compute[356901]: 2025-10-11 02:29:04.299 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:29:04 compute-0 nova_compute[356901]: 2025-10-11 02:29:04.299 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:29:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1533: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:04 compute-0 nova_compute[356901]: 2025-10-11 02:29:04.401 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:29:04 compute-0 nova_compute[356901]: 2025-10-11 02:29:04.402 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance d60d7ea1-5d00-4902-90e6-3ae67eb09a78 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:29:04 compute-0 nova_compute[356901]: 2025-10-11 02:29:04.402 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 7513b93e-d2b8-4ae0-8f1c-3df190945259 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:29:04 compute-0 nova_compute[356901]: 2025-10-11 02:29:04.403 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 358d31cf-2866-416a-b2fc-814ee4bfe89a actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:29:04 compute-0 nova_compute[356901]: 2025-10-11 02:29:04.404 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 4 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:29:04 compute-0 nova_compute[356901]: 2025-10-11 02:29:04.404 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=2560MB phys_disk=59GB used_disk=8GB total_vcpus=8 used_vcpus=4 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:29:04 compute-0 nova_compute[356901]: 2025-10-11 02:29:04.426 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing inventories for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:804
Oct 11 02:29:04 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/202874259' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:29:04 compute-0 nova_compute[356901]: 2025-10-11 02:29:04.453 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating ProviderTree inventory for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 from _refresh_and_get_inventory using data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} _refresh_and_get_inventory /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:768
Oct 11 02:29:04 compute-0 nova_compute[356901]: 2025-10-11 02:29:04.454 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating inventory in ProviderTree for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 with inventory: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:176
Oct 11 02:29:04 compute-0 nova_compute[356901]: 2025-10-11 02:29:04.472 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing aggregate associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, aggregates: None _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:813
Oct 11 02:29:04 compute-0 nova_compute[356901]: 2025-10-11 02:29:04.491 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing trait associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, traits: COMPUTE_VOLUME_EXTEND,COMPUTE_NET_VIF_MODEL_VMXNET3,HW_CPU_X86_SSSE3,COMPUTE_RESCUE_BFV,COMPUTE_SOCKET_PCI_NUMA_AFFINITY,COMPUTE_NODE,HW_CPU_X86_SVM,COMPUTE_STORAGE_BUS_SCSI,HW_CPU_X86_FMA3,COMPUTE_GRAPHICS_MODEL_NONE,COMPUTE_NET_VIF_MODEL_RTL8139,HW_CPU_X86_SSE4A,COMPUTE_IMAGE_TYPE_QCOW2,HW_CPU_X86_BMI2,HW_CPU_X86_SSE42,HW_CPU_X86_AVX2,COMPUTE_IMAGE_TYPE_RAW,COMPUTE_VIOMMU_MODEL_VIRTIO,HW_CPU_X86_AESNI,COMPUTE_STORAGE_BUS_FDC,COMPUTE_GRAPHICS_MODEL_VIRTIO,HW_CPU_X86_AMD_SVM,COMPUTE_NET_VIF_MODEL_NE2K_PCI,COMPUTE_ACCELERATORS,HW_CPU_X86_SSE2,COMPUTE_GRAPHICS_MODEL_VGA,HW_CPU_X86_ABM,HW_CPU_X86_AVX,COMPUTE_NET_VIF_MODEL_E1000,COMPUTE_STORAGE_BUS_USB,COMPUTE_NET_ATTACH_INTERFACE,HW_CPU_X86_MMX,COMPUTE_SECURITY_TPM_2_0,COMPUTE_IMAGE_TYPE_ISO,HW_CPU_X86_SSE41,COMPUTE_IMAGE_TYPE_AKI,COMPUTE_IMAGE_TYPE_AMI,COMPUTE_NET_ATTACH_INTERFACE_WITH_TAG,COMPUTE_DEVICE_TAGGING,COMPUTE_SECURITY_UEFI_SECURE_BOOT,COMPUTE_TRUSTED_CERTS,COMPUTE_NET_VIF_MODEL_VIRTIO,COMPUTE_VIOMMU_MODEL_INTEL,COMPUTE_STORAGE_BUS_SATA,HW_CPU_X86_SSE,COMPUTE_STORAGE_BUS_VIRTIO,COMPUTE_NET_VIF_MODEL_PCNET,COMPUTE_GRAPHICS_MODEL_CIRRUS,HW_CPU_X86_SHA,HW_CPU_X86_BMI,COMPUTE_NET_VIF_MODEL_E1000E,COMPUTE_NET_VIF_MODEL_SPAPR_VLAN,COMPUTE_VOLUME_ATTACH_WITH_TAG,COMPUTE_GRAPHICS_MODEL_BOCHS,COMPUTE_VIOMMU_MODEL_AUTO,COMPUTE_IMAGE_TYPE_ARI,HW_CPU_X86_CLMUL,COMPUTE_STORAGE_BUS_IDE,COMPUTE_VOLUME_MULTI_ATTACH,HW_CPU_X86_F16C,COMPUTE_SECURITY_TPM_1_2 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:825
Oct 11 02:29:04 compute-0 nova_compute[356901]: 2025-10-11 02:29:04.584 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:29:04 compute-0 nova_compute[356901]: 2025-10-11 02:29:04.615 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:29:05 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/232816773' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:29:05 compute-0 nova_compute[356901]: 2025-10-11 02:29:05.161 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.577s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:29:05 compute-0 nova_compute[356901]: 2025-10-11 02:29:05.171 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:29:05 compute-0 nova_compute[356901]: 2025-10-11 02:29:05.189 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:29:05 compute-0 nova_compute[356901]: 2025-10-11 02:29:05.191 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:29:05 compute-0 nova_compute[356901]: 2025-10-11 02:29:05.191 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.892s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:29:05 compute-0 ceph-mon[191930]: pgmap v1533: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:05 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/232816773' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:29:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:29:05 compute-0 nova_compute[356901]: 2025-10-11 02:29:05.754 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:05 compute-0 sshd-session[436468]: Failed password for invalid user admin from 121.227.153.123 port 40658 ssh2
Oct 11 02:29:05 compute-0 nova_compute[356901]: 2025-10-11 02:29:05.924 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1534: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.00221085813879664 of space, bias 1.0, pg target 0.663257441638992 quantized to 32 (current 32)
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00025334537995702286 of space, bias 1.0, pg target 0.07600361398710685 quantized to 32 (current 32)
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:29:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:29:07 compute-0 sudo[436514]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:29:07 compute-0 sudo[436514]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:07 compute-0 sudo[436514]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:07 compute-0 sudo[436539]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:29:07 compute-0 sudo[436539]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:07 compute-0 sudo[436539]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:07 compute-0 sudo[436564]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:29:07 compute-0 sudo[436564]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:07 compute-0 sudo[436564]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:07 compute-0 ceph-mon[191930]: pgmap v1534: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:07 compute-0 sshd-session[436468]: Connection closed by invalid user admin 121.227.153.123 port 40658 [preauth]
Oct 11 02:29:07 compute-0 sudo[436589]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ls
Oct 11 02:29:07 compute-0 sudo[436589]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1535: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:08 compute-0 podman[436686]: 2025-10-11 02:29:08.571962968 +0000 UTC m=+0.123008232 container exec ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 02:29:08 compute-0 podman[436686]: 2025-10-11 02:29:08.687928023 +0000 UTC m=+0.238973347 container exec_died ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, OSD_FLAVOR=default)
Oct 11 02:29:08 compute-0 podman[436720]: 2025-10-11 02:29:08.908498269 +0000 UTC m=+0.090625554 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:29:08 compute-0 podman[436723]: 2025-10-11 02:29:08.939433257 +0000 UTC m=+0.116166822 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']})
Oct 11 02:29:08 compute-0 podman[436721]: 2025-10-11 02:29:08.953072295 +0000 UTC m=+0.128865594 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:29:09 compute-0 podman[436794]: 2025-10-11 02:29:09.02248721 +0000 UTC m=+0.076302777 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent)
Oct 11 02:29:09 compute-0 sshd-session[436614]: Invalid user admin from 121.227.153.123 port 40668
Oct 11 02:29:09 compute-0 ceph-mon[191930]: pgmap v1535: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:09 compute-0 nova_compute[356901]: 2025-10-11 02:29:09.601 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:09 compute-0 sshd-session[436614]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:29:09 compute-0 sshd-session[436614]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:29:09 compute-0 sudo[436589]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:29:09 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:29:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:29:09 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:29:09 compute-0 sudo[436921]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:29:09 compute-0 sudo[436921]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:09 compute-0 sudo[436921]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:09 compute-0 sudo[436946]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:29:09 compute-0 sudo[436946]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:09 compute-0 sudo[436946]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:10 compute-0 sudo[436971]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:29:10 compute-0 sudo[436971]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:10 compute-0 sudo[436971]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:10 compute-0 sudo[436996]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:29:10 compute-0 sudo[436996]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1536: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:29:10 compute-0 nova_compute[356901]: 2025-10-11 02:29:10.758 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:10 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:29:10 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:29:10 compute-0 ceph-mon[191930]: pgmap v1536: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:10 compute-0 sudo[436996]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:29:10 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:29:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:29:10 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:29:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:29:11 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:29:11 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev f910b5a3-6e05-4867-a5d0-2fb6fde7bf0f does not exist
Oct 11 02:29:11 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 2e38de64-3941-4f7a-95e4-16f854af5541 does not exist
Oct 11 02:29:11 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 11adf4a2-f16a-4dc8-aa71-55079ef11924 does not exist
Oct 11 02:29:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:29:11 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:29:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:29:11 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:29:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:29:11 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:29:11 compute-0 sudo[437050]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:29:11 compute-0 sudo[437050]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:11 compute-0 sudo[437050]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:11 compute-0 sudo[437075]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:29:11 compute-0 sudo[437075]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:11 compute-0 sudo[437075]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:11 compute-0 sudo[437100]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:29:11 compute-0 sudo[437100]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:11 compute-0 sudo[437100]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:11 compute-0 sudo[437125]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:29:11 compute-0 sudo[437125]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:11 compute-0 sshd-session[436614]: Failed password for invalid user admin from 121.227.153.123 port 40668 ssh2
Oct 11 02:29:11 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:29:11 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:29:11 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:29:11 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:29:11 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:29:11 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:29:12 compute-0 podman[437189]: 2025-10-11 02:29:12.163138994 +0000 UTC m=+0.136125045 container create 76fab42b13871213cefa47404c624ec66632779be7c29b0a9affbc0ff4c70e4b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_payne, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:29:12 compute-0 podman[437189]: 2025-10-11 02:29:12.081339506 +0000 UTC m=+0.054325637 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:29:12 compute-0 systemd[1]: Started libpod-conmon-76fab42b13871213cefa47404c624ec66632779be7c29b0a9affbc0ff4c70e4b.scope.
Oct 11 02:29:12 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:29:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1537: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:12 compute-0 podman[437189]: 2025-10-11 02:29:12.327657576 +0000 UTC m=+0.300643607 container init 76fab42b13871213cefa47404c624ec66632779be7c29b0a9affbc0ff4c70e4b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_payne, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:29:12 compute-0 podman[437189]: 2025-10-11 02:29:12.338000913 +0000 UTC m=+0.310986924 container start 76fab42b13871213cefa47404c624ec66632779be7c29b0a9affbc0ff4c70e4b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_payne, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 02:29:12 compute-0 festive_payne[437205]: 167 167
Oct 11 02:29:12 compute-0 systemd[1]: libpod-76fab42b13871213cefa47404c624ec66632779be7c29b0a9affbc0ff4c70e4b.scope: Deactivated successfully.
Oct 11 02:29:12 compute-0 conmon[437205]: conmon 76fab42b13871213cefa <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-76fab42b13871213cefa47404c624ec66632779be7c29b0a9affbc0ff4c70e4b.scope/container/memory.events
Oct 11 02:29:12 compute-0 podman[437189]: 2025-10-11 02:29:12.350196081 +0000 UTC m=+0.323182112 container attach 76fab42b13871213cefa47404c624ec66632779be7c29b0a9affbc0ff4c70e4b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_payne, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default)
Oct 11 02:29:12 compute-0 podman[437189]: 2025-10-11 02:29:12.351383274 +0000 UTC m=+0.324369345 container died 76fab42b13871213cefa47404c624ec66632779be7c29b0a9affbc0ff4c70e4b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_payne, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 02:29:12 compute-0 systemd[1]: var-lib-containers-storage-overlay-42b1b39ec1811cb5db88ec150cdcc257d940834d474fd2c96d3d8c6d13946fba-merged.mount: Deactivated successfully.
Oct 11 02:29:12 compute-0 podman[437189]: 2025-10-11 02:29:12.760701444 +0000 UTC m=+0.733687465 container remove 76fab42b13871213cefa47404c624ec66632779be7c29b0a9affbc0ff4c70e4b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_payne, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0)
Oct 11 02:29:12 compute-0 systemd[1]: libpod-conmon-76fab42b13871213cefa47404c624ec66632779be7c29b0a9affbc0ff4c70e4b.scope: Deactivated successfully.
Oct 11 02:29:12 compute-0 ceph-mon[191930]: pgmap v1537: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:13 compute-0 podman[437228]: 2025-10-11 02:29:13.064517298 +0000 UTC m=+0.093882794 container create d18d92a0340bb398019eed84ba93bbd0c974b04e0bc3c918d8844505de27137f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_wing, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2)
Oct 11 02:29:13 compute-0 sshd-session[436614]: Connection closed by invalid user admin 121.227.153.123 port 40668 [preauth]
Oct 11 02:29:13 compute-0 podman[437228]: 2025-10-11 02:29:13.031702228 +0000 UTC m=+0.061067754 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:29:13 compute-0 systemd[1]: Started libpod-conmon-d18d92a0340bb398019eed84ba93bbd0c974b04e0bc3c918d8844505de27137f.scope.
Oct 11 02:29:13 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:29:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ebbe4c325f0958e0fd55a8da4f6518bc9927ec128141bc5864b64a08a342c59a/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:29:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ebbe4c325f0958e0fd55a8da4f6518bc9927ec128141bc5864b64a08a342c59a/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:29:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ebbe4c325f0958e0fd55a8da4f6518bc9927ec128141bc5864b64a08a342c59a/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:29:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ebbe4c325f0958e0fd55a8da4f6518bc9927ec128141bc5864b64a08a342c59a/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:29:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ebbe4c325f0958e0fd55a8da4f6518bc9927ec128141bc5864b64a08a342c59a/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:29:13 compute-0 podman[437228]: 2025-10-11 02:29:13.302640621 +0000 UTC m=+0.332006157 container init d18d92a0340bb398019eed84ba93bbd0c974b04e0bc3c918d8844505de27137f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_wing, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.build-date=20250507)
Oct 11 02:29:13 compute-0 podman[437228]: 2025-10-11 02:29:13.31594337 +0000 UTC m=+0.345308856 container start d18d92a0340bb398019eed84ba93bbd0c974b04e0bc3c918d8844505de27137f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_wing, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 02:29:13 compute-0 podman[437228]: 2025-10-11 02:29:13.335992296 +0000 UTC m=+0.365357832 container attach d18d92a0340bb398019eed84ba93bbd0c974b04e0bc3c918d8844505de27137f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_wing, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.864 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.865 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.866 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.877 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '358d31cf-2866-416a-b2fc-814ee4bfe89a', 'name': 'vn-vgckve2-tqko7trrsvwg-ebwakep2a2y3-vnf-ihxi227vdpwh', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000004', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {'metering.server_group': '3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.881 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': 'd60d7ea1-5d00-4902-90e6-3ae67eb09a78', 'name': 'vn-vgckve2-ittzoa6m3dmq-egfg3ceao3k4-vnf-rvnztbwt2zgh', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000002', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {'metering.server_group': '3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.885 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '7513b93e-d2b8-4ae0-8f1c-3df190945259', 'name': 'vn-vgckve2-djjfpphdsuuh-gthznuj2xct2-vnf-jmvtgw3mflyn', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000003', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {'metering.server_group': '3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.888 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.889 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.889 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.889 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.889 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.890 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:29:13.889478) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.896 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.incoming.bytes volume: 1786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.901 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.bytes volume: 8664 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.907 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.bytes volume: 1912 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.912 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2436 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.913 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.913 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.913 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.913 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.913 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.914 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.914 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.outgoing.packets volume: 21 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.914 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.packets volume: 63 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.914 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:29:13.914005) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.915 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.packets volume: 23 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.915 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 23 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.915 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.915 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.915 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.915 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.916 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.916 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.916 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.916 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.916 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.916 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.917 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.917 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.917 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:29:13.916101) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.917 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.917 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.917 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.918 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.918 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.918 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.918 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.919 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.919 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.919 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.920 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:29:13.918106) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.920 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.920 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.920 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.920 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.920 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:29:13.920333) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.956 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.957 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.957 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.capacity volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.982 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.982 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:13.983 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.capacity volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.019 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.020 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.021 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.capacity volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.044 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.045 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.045 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.046 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.046 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.046 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.046 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.046 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.046 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.047 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:29:14.046731) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.104 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.104 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.105 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.bytes volume: 385378 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.165 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.bytes volume: 23325184 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.166 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.166 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.bytes volume: 385378 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.222 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.223 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.223 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.bytes volume: 385378 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 podman[437258]: 2025-10-11 02:29:14.228174884 +0000 UTC m=+0.120268175 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, container_name=iscsid, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:29:14 compute-0 podman[437256]: 2025-10-11 02:29:14.246581705 +0000 UTC m=+0.129851422 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_id=multipathd, container_name=multipathd, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.276 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.277 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.277 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.278 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.278 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.278 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.278 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.278 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.279 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.279 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.latency volume: 1845147961 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.279 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.latency volume: 292571291 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.279 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:29:14.278903) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.279 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.latency volume: 162750190 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.279 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.latency volume: 1853196562 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.280 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.latency volume: 293231554 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.280 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.latency volume: 250459547 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.280 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.latency volume: 1696814304 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.280 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.latency volume: 210864290 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.281 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.latency volume: 178724423 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.281 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.281 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.281 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.282 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.282 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.282 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.282 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.282 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.282 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.283 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:29:14.282721) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.283 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.283 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.283 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.requests volume: 124 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.283 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.requests volume: 844 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.283 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.284 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.read.requests volume: 124 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.284 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.284 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.284 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.requests volume: 124 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.285 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.285 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.285 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.286 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.286 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.286 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.286 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.286 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.286 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.287 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:29:14.286692) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.287 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.287 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.287 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.usage volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.288 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.288 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.288 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.usage volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.288 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.288 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.289 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.usage volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.289 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.289 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.289 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.290 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.290 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.290 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.290 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.290 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.290 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.291 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:29:14.290709) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.291 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.bytes volume: 41779200 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.291 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.291 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.291 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.bytes volume: 41852928 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.291 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.292 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.292 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.bytes volume: 41779200 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.292 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.292 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.292 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.293 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.293 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.293 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.294 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.294 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.294 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.294 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.294 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.295 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.latency volume: 7286997145 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.295 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:29:14.294622) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.295 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.latency volume: 24741980 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.295 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.295 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.latency volume: 5172044232 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.296 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.latency volume: 26893276 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.296 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.296 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.latency volume: 6089609601 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.296 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.latency volume: 25967717 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.296 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.297 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.297 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.297 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.297 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.298 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.298 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.298 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.298 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.298 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.298 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:29:14.298326) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1538: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.320 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.345 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.370 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.393 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.394 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.394 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.394 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.394 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.394 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.394 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.395 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.395 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:29:14.394782) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.395 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.396 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.396 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.requests volume: 241 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.396 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.396 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.397 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.397 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.397 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.398 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.398 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.398 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.399 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.399 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.399 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.399 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.399 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.400 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.400 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.400 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.400 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.401 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:29:14.399993) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.401 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.402 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.402 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.402 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.402 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.402 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.402 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.403 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.403 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.404 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.404 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.404 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.404 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.404 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:29:14.403143) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.405 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.405 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.405 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.incoming.packets volume: 14 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.405 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.packets volume: 56 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.406 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:29:14.405158) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.406 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.packets volume: 17 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.406 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 23 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.407 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.407 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.407 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.407 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.408 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.408 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.408 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.outgoing.bytes.delta volume: 210 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.408 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.bytes.delta volume: 70 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.408 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:29:14.408128) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.409 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.bytes.delta volume: 70 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.409 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 70 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.409 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.410 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.410 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.410 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.410 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.410 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.412 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.412 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.412 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.412 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.412 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.412 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.413 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.413 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.413 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.414 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.414 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.415 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.415 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.415 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.415 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.415 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.415 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.415 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:29:14.410901) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.416 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.416 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.allocation volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.416 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:29:14.412972) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.416 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.417 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:29:14.415707) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.417 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.417 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/disk.device.allocation volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.417 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.418 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.419 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.allocation volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 sshd-session[437250]: Invalid user admin from 121.227.153.123 port 59034
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.419 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.420 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.420 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.421 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.421 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.422 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.422 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.422 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.422 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.422 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.422 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.423 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.423 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.423 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.423 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.424 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.424 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.424 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.424 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.424 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/cpu volume: 36910000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.424 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/cpu volume: 317740000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.425 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/cpu volume: 37680000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.425 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 42760000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.425 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.426 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.426 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.426 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.426 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.426 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.426 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.outgoing.bytes volume: 2286 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.427 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/network.outgoing.bytes volume: 7374 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.427 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.bytes volume: 2398 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.427 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2342 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.428 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.428 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.428 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.428 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.428 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.429 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.429 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/memory.usage volume: 48.98046875 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.429 14 DEBUG ceilometer.compute.pollsters [-] d60d7ea1-5d00-4902-90e6-3ae67eb09a78/memory.usage volume: 48.9375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.429 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/memory.usage volume: 49.046875 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.430 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.83984375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.430 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.430 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.431 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.430 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:29:14.422403) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.431 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:29:14.424364) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.431 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:29:14.426574) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.431 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:29:14.429039) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.431 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.432 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.432 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.433 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.433 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.433 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.434 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.434 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.434 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.434 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.434 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.434 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.434 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.435 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.435 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.435 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.435 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.435 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.436 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.436 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.436 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.436 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.436 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.437 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.437 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:29:14.437 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:29:14 compute-0 unruffled_wing[437245]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:29:14 compute-0 unruffled_wing[437245]: --> relative data size: 1.0
Oct 11 02:29:14 compute-0 unruffled_wing[437245]: --> All data devices are unavailable
Oct 11 02:29:14 compute-0 systemd[1]: libpod-d18d92a0340bb398019eed84ba93bbd0c974b04e0bc3c918d8844505de27137f.scope: Deactivated successfully.
Oct 11 02:29:14 compute-0 systemd[1]: libpod-d18d92a0340bb398019eed84ba93bbd0c974b04e0bc3c918d8844505de27137f.scope: Consumed 1.114s CPU time.
Oct 11 02:29:14 compute-0 podman[437228]: 2025-10-11 02:29:14.532611546 +0000 UTC m=+1.561977072 container died d18d92a0340bb398019eed84ba93bbd0c974b04e0bc3c918d8844505de27137f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_wing, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:29:14 compute-0 nova_compute[356901]: 2025-10-11 02:29:14.605 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:14 compute-0 sshd-session[437250]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:29:14 compute-0 sshd-session[437250]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:29:14 compute-0 systemd[1]: var-lib-containers-storage-overlay-ebbe4c325f0958e0fd55a8da4f6518bc9927ec128141bc5864b64a08a342c59a-merged.mount: Deactivated successfully.
Oct 11 02:29:15 compute-0 podman[437228]: 2025-10-11 02:29:15.348485937 +0000 UTC m=+2.377851443 container remove d18d92a0340bb398019eed84ba93bbd0c974b04e0bc3c918d8844505de27137f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_wing, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:29:15 compute-0 sudo[437125]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:15 compute-0 systemd[1]: libpod-conmon-d18d92a0340bb398019eed84ba93bbd0c974b04e0bc3c918d8844505de27137f.scope: Deactivated successfully.
Oct 11 02:29:15 compute-0 sudo[437326]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:29:15 compute-0 sudo[437326]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:15 compute-0 sudo[437326]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:15 compute-0 sudo[437351]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:29:15 compute-0 sudo[437351]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:15 compute-0 sudo[437351]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:29:15 compute-0 ceph-mon[191930]: pgmap v1538: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:15 compute-0 sudo[437376]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:29:15 compute-0 sudo[437376]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:15 compute-0 sudo[437376]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:15 compute-0 nova_compute[356901]: 2025-10-11 02:29:15.762 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:15 compute-0 sudo[437401]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:29:15 compute-0 sudo[437401]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1539: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:16 compute-0 podman[437466]: 2025-10-11 02:29:16.247940568 +0000 UTC m=+0.046066788 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:29:16 compute-0 podman[437466]: 2025-10-11 02:29:16.530066801 +0000 UTC m=+0.328192941 container create 4745e9f7d341e33e02dd13d8021b3f70fe335ca8adb10e6fafc19d5db3bc3f4c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=practical_booth, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:29:16 compute-0 sshd-session[437250]: Failed password for invalid user admin from 121.227.153.123 port 59034 ssh2
Oct 11 02:29:16 compute-0 systemd[1]: Started libpod-conmon-4745e9f7d341e33e02dd13d8021b3f70fe335ca8adb10e6fafc19d5db3bc3f4c.scope.
Oct 11 02:29:16 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:29:17 compute-0 podman[437466]: 2025-10-11 02:29:17.356949869 +0000 UTC m=+1.155076009 container init 4745e9f7d341e33e02dd13d8021b3f70fe335ca8adb10e6fafc19d5db3bc3f4c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=practical_booth, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 02:29:17 compute-0 podman[437466]: 2025-10-11 02:29:17.372386467 +0000 UTC m=+1.170512607 container start 4745e9f7d341e33e02dd13d8021b3f70fe335ca8adb10e6fafc19d5db3bc3f4c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=practical_booth, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 02:29:17 compute-0 practical_booth[437480]: 167 167
Oct 11 02:29:17 compute-0 systemd[1]: libpod-4745e9f7d341e33e02dd13d8021b3f70fe335ca8adb10e6fafc19d5db3bc3f4c.scope: Deactivated successfully.
Oct 11 02:29:17 compute-0 ceph-mon[191930]: pgmap v1539: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:17 compute-0 podman[437466]: 2025-10-11 02:29:17.660915508 +0000 UTC m=+1.459041648 container attach 4745e9f7d341e33e02dd13d8021b3f70fe335ca8adb10e6fafc19d5db3bc3f4c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=practical_booth, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 02:29:17 compute-0 podman[437466]: 2025-10-11 02:29:17.662860972 +0000 UTC m=+1.460987122 container died 4745e9f7d341e33e02dd13d8021b3f70fe335ca8adb10e6fafc19d5db3bc3f4c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=practical_booth, ceph=True, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_REF=reef)
Oct 11 02:29:18 compute-0 systemd[1]: var-lib-containers-storage-overlay-21d5555e4b3d3144f647f18ce2846e8452d342401ae4012e851e2d9c3979b966-merged.mount: Deactivated successfully.
Oct 11 02:29:18 compute-0 sshd-session[437250]: Connection closed by invalid user admin 121.227.153.123 port 59034 [preauth]
Oct 11 02:29:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1540: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:18 compute-0 podman[437466]: 2025-10-11 02:29:18.380587463 +0000 UTC m=+2.178713613 container remove 4745e9f7d341e33e02dd13d8021b3f70fe335ca8adb10e6fafc19d5db3bc3f4c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=practical_booth, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:29:18 compute-0 systemd[1]: libpod-conmon-4745e9f7d341e33e02dd13d8021b3f70fe335ca8adb10e6fafc19d5db3bc3f4c.scope: Deactivated successfully.
Oct 11 02:29:18 compute-0 podman[437506]: 2025-10-11 02:29:18.707067936 +0000 UTC m=+0.105277721 container create 932601b990e85ef601de4fbf1e890e802740b9778c7197ba728f457775fdec66 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_heisenberg, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 02:29:18 compute-0 podman[437506]: 2025-10-11 02:29:18.640925121 +0000 UTC m=+0.039134896 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:29:18 compute-0 ceph-mon[191930]: pgmap v1540: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:18 compute-0 systemd[1]: Started libpod-conmon-932601b990e85ef601de4fbf1e890e802740b9778c7197ba728f457775fdec66.scope.
Oct 11 02:29:18 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:29:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dc9dcce5871d2843bbad670e3ace75005a302e67b92463bbd238c2fa3d0bec39/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:29:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dc9dcce5871d2843bbad670e3ace75005a302e67b92463bbd238c2fa3d0bec39/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:29:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dc9dcce5871d2843bbad670e3ace75005a302e67b92463bbd238c2fa3d0bec39/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:29:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dc9dcce5871d2843bbad670e3ace75005a302e67b92463bbd238c2fa3d0bec39/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:29:19 compute-0 podman[437506]: 2025-10-11 02:29:19.171136972 +0000 UTC m=+0.569346777 container init 932601b990e85ef601de4fbf1e890e802740b9778c7197ba728f457775fdec66 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_heisenberg, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_REF=reef)
Oct 11 02:29:19 compute-0 podman[437506]: 2025-10-11 02:29:19.186442547 +0000 UTC m=+0.584652342 container start 932601b990e85ef601de4fbf1e890e802740b9778c7197ba728f457775fdec66 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_heisenberg, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507)
Oct 11 02:29:19 compute-0 podman[437506]: 2025-10-11 02:29:19.413614606 +0000 UTC m=+0.811824391 container attach 932601b990e85ef601de4fbf1e890e802740b9778c7197ba728f457775fdec66 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_heisenberg, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:29:19 compute-0 sshd-session[437499]: Invalid user admin from 121.227.153.123 port 59044
Oct 11 02:29:19 compute-0 nova_compute[356901]: 2025-10-11 02:29:19.608 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:19 compute-0 sshd-session[437499]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:29:19 compute-0 sshd-session[437499]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]: {
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:     "0": [
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:         {
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "devices": [
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "/dev/loop3"
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             ],
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "lv_name": "ceph_lv0",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "lv_size": "21470642176",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "name": "ceph_lv0",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "tags": {
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.cluster_name": "ceph",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.crush_device_class": "",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.encrypted": "0",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.osd_id": "0",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.type": "block",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.vdo": "0"
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             },
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "type": "block",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "vg_name": "ceph_vg0"
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:         }
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:     ],
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:     "1": [
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:         {
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "devices": [
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "/dev/loop4"
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             ],
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "lv_name": "ceph_lv1",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "lv_size": "21470642176",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "name": "ceph_lv1",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "tags": {
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.cluster_name": "ceph",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.crush_device_class": "",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.encrypted": "0",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.osd_id": "1",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.type": "block",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.vdo": "0"
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             },
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "type": "block",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "vg_name": "ceph_vg1"
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:         }
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:     ],
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:     "2": [
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:         {
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "devices": [
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "/dev/loop5"
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             ],
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "lv_name": "ceph_lv2",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "lv_size": "21470642176",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "name": "ceph_lv2",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "tags": {
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.cluster_name": "ceph",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.crush_device_class": "",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.encrypted": "0",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.osd_id": "2",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.type": "block",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:                 "ceph.vdo": "0"
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             },
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "type": "block",
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:             "vg_name": "ceph_vg2"
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:         }
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]:     ]
Oct 11 02:29:20 compute-0 agitated_heisenberg[437523]: }
Oct 11 02:29:20 compute-0 systemd[1]: libpod-932601b990e85ef601de4fbf1e890e802740b9778c7197ba728f457775fdec66.scope: Deactivated successfully.
Oct 11 02:29:20 compute-0 podman[437533]: 2025-10-11 02:29:20.176934631 +0000 UTC m=+0.055536250 container died 932601b990e85ef601de4fbf1e890e802740b9778c7197ba728f457775fdec66 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_heisenberg, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 02:29:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1541: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:20 compute-0 systemd[1]: var-lib-containers-storage-overlay-dc9dcce5871d2843bbad670e3ace75005a302e67b92463bbd238c2fa3d0bec39-merged.mount: Deactivated successfully.
Oct 11 02:29:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:29:20 compute-0 nova_compute[356901]: 2025-10-11 02:29:20.767 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:20 compute-0 podman[437533]: 2025-10-11 02:29:20.955825479 +0000 UTC m=+0.834427088 container remove 932601b990e85ef601de4fbf1e890e802740b9778c7197ba728f457775fdec66 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_heisenberg, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:29:20 compute-0 systemd[1]: libpod-conmon-932601b990e85ef601de4fbf1e890e802740b9778c7197ba728f457775fdec66.scope: Deactivated successfully.
Oct 11 02:29:21 compute-0 sudo[437401]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:21 compute-0 sshd-session[437499]: Failed password for invalid user admin from 121.227.153.123 port 59044 ssh2
Oct 11 02:29:21 compute-0 sudo[437547]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:29:21 compute-0 sudo[437547]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:21 compute-0 sudo[437547]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:21 compute-0 sudo[437572]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:29:21 compute-0 sudo[437572]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:21 compute-0 sudo[437572]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:21 compute-0 sudo[437597]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:29:21 compute-0 sudo[437597]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:21 compute-0 sudo[437597]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:21 compute-0 ceph-mon[191930]: pgmap v1541: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:21 compute-0 sudo[437622]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:29:21 compute-0 sudo[437622]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:21 compute-0 sshd-session[437499]: Connection closed by invalid user admin 121.227.153.123 port 59044 [preauth]
Oct 11 02:29:22 compute-0 podman[437689]: 2025-10-11 02:29:22.139335036 +0000 UTC m=+0.058312658 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:29:22 compute-0 podman[437689]: 2025-10-11 02:29:22.23974748 +0000 UTC m=+0.158725052 container create 2db3ba0037001a81827857c90eb744d74ab2d19c06a688a509b6fb21d87991ae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_johnson, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:29:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1542: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:22 compute-0 systemd[1]: Started libpod-conmon-2db3ba0037001a81827857c90eb744d74ab2d19c06a688a509b6fb21d87991ae.scope.
Oct 11 02:29:22 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:29:22 compute-0 podman[437689]: 2025-10-11 02:29:22.540780086 +0000 UTC m=+0.459757748 container init 2db3ba0037001a81827857c90eb744d74ab2d19c06a688a509b6fb21d87991ae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_johnson, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True)
Oct 11 02:29:22 compute-0 podman[437689]: 2025-10-11 02:29:22.551690778 +0000 UTC m=+0.470668350 container start 2db3ba0037001a81827857c90eb744d74ab2d19c06a688a509b6fb21d87991ae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_johnson, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:29:22 compute-0 compassionate_johnson[437704]: 167 167
Oct 11 02:29:22 compute-0 systemd[1]: libpod-2db3ba0037001a81827857c90eb744d74ab2d19c06a688a509b6fb21d87991ae.scope: Deactivated successfully.
Oct 11 02:29:22 compute-0 podman[437689]: 2025-10-11 02:29:22.630321399 +0000 UTC m=+0.549299001 container attach 2db3ba0037001a81827857c90eb744d74ab2d19c06a688a509b6fb21d87991ae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_johnson, org.label-schema.vendor=CentOS, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:29:22 compute-0 podman[437689]: 2025-10-11 02:29:22.632142849 +0000 UTC m=+0.551120451 container died 2db3ba0037001a81827857c90eb744d74ab2d19c06a688a509b6fb21d87991ae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_johnson, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:29:22 compute-0 ceph-mon[191930]: pgmap v1542: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:22 compute-0 systemd[1]: var-lib-containers-storage-overlay-d785047a80c6b0f7f8239deceb0560d26909abf2e60dbe5c7c036f62e3fb7cdb-merged.mount: Deactivated successfully.
Oct 11 02:29:23 compute-0 podman[437689]: 2025-10-11 02:29:23.014705477 +0000 UTC m=+0.933683049 container remove 2db3ba0037001a81827857c90eb744d74ab2d19c06a688a509b6fb21d87991ae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_johnson, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 02:29:23 compute-0 systemd[1]: libpod-conmon-2db3ba0037001a81827857c90eb744d74ab2d19c06a688a509b6fb21d87991ae.scope: Deactivated successfully.
Oct 11 02:29:23 compute-0 sshd-session[437675]: Invalid user admin from 121.227.153.123 port 58344
Oct 11 02:29:23 compute-0 sshd-session[437675]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:29:23 compute-0 podman[437728]: 2025-10-11 02:29:23.218584421 +0000 UTC m=+0.039758894 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:29:23 compute-0 sshd-session[437675]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:29:23 compute-0 podman[437728]: 2025-10-11 02:29:23.32568119 +0000 UTC m=+0.146855653 container create 3977f00d00f7e35cdd591e469c87eda1572231d46beceb0284d2501f2edfe79f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=focused_davinci, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.build-date=20250507, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:29:23 compute-0 systemd[1]: Started libpod-conmon-3977f00d00f7e35cdd591e469c87eda1572231d46beceb0284d2501f2edfe79f.scope.
Oct 11 02:29:23 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:29:23 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8a99efb0cc1ce1c23cbaf13e29875c29492af26e0ce8fb25c2908adcb600f5f2/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:29:23 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8a99efb0cc1ce1c23cbaf13e29875c29492af26e0ce8fb25c2908adcb600f5f2/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:29:23 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8a99efb0cc1ce1c23cbaf13e29875c29492af26e0ce8fb25c2908adcb600f5f2/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:29:23 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8a99efb0cc1ce1c23cbaf13e29875c29492af26e0ce8fb25c2908adcb600f5f2/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:29:23 compute-0 podman[437728]: 2025-10-11 02:29:23.651839934 +0000 UTC m=+0.473014397 container init 3977f00d00f7e35cdd591e469c87eda1572231d46beceb0284d2501f2edfe79f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=focused_davinci, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:29:23 compute-0 podman[437728]: 2025-10-11 02:29:23.670343197 +0000 UTC m=+0.491517650 container start 3977f00d00f7e35cdd591e469c87eda1572231d46beceb0284d2501f2edfe79f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=focused_davinci, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 02:29:23 compute-0 podman[437728]: 2025-10-11 02:29:23.721214837 +0000 UTC m=+0.542389380 container attach 3977f00d00f7e35cdd591e469c87eda1572231d46beceb0284d2501f2edfe79f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=focused_davinci, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0)
Oct 11 02:29:24 compute-0 podman[437751]: 2025-10-11 02:29:24.27235549 +0000 UTC m=+0.133284517 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:29:24 compute-0 podman[437749]: 2025-10-11 02:29:24.285653789 +0000 UTC m=+0.159267428 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, tcib_managed=true)
Oct 11 02:29:24 compute-0 podman[437750]: 2025-10-11 02:29:24.298893046 +0000 UTC m=+0.161148900 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, architecture=x86_64, container_name=openstack_network_exporter, io.buildah.version=1.33.7, name=ubi9-minimal, io.openshift.expose-services=, release=1755695350, version=9.6, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., distribution-scope=public, com.redhat.component=ubi9-minimal-container, vendor=Red Hat, Inc., config_id=edpm, vcs-type=git, managed_by=edpm_ansible, url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, build-date=2025-08-20T13:12:41, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b)
Oct 11 02:29:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1543: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:24 compute-0 nova_compute[356901]: 2025-10-11 02:29:24.610 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:24 compute-0 ceph-mon[191930]: pgmap v1543: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:24 compute-0 focused_davinci[437744]: {
Oct 11 02:29:24 compute-0 focused_davinci[437744]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:29:24 compute-0 focused_davinci[437744]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:29:24 compute-0 focused_davinci[437744]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:29:24 compute-0 focused_davinci[437744]:         "osd_id": 1,
Oct 11 02:29:24 compute-0 focused_davinci[437744]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:29:24 compute-0 focused_davinci[437744]:         "type": "bluestore"
Oct 11 02:29:24 compute-0 focused_davinci[437744]:     },
Oct 11 02:29:24 compute-0 focused_davinci[437744]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:29:24 compute-0 focused_davinci[437744]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:29:24 compute-0 focused_davinci[437744]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:29:24 compute-0 focused_davinci[437744]:         "osd_id": 2,
Oct 11 02:29:24 compute-0 focused_davinci[437744]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:29:24 compute-0 focused_davinci[437744]:         "type": "bluestore"
Oct 11 02:29:24 compute-0 focused_davinci[437744]:     },
Oct 11 02:29:24 compute-0 focused_davinci[437744]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:29:24 compute-0 focused_davinci[437744]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:29:24 compute-0 focused_davinci[437744]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:29:24 compute-0 focused_davinci[437744]:         "osd_id": 0,
Oct 11 02:29:24 compute-0 focused_davinci[437744]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:29:24 compute-0 focused_davinci[437744]:         "type": "bluestore"
Oct 11 02:29:24 compute-0 focused_davinci[437744]:     }
Oct 11 02:29:24 compute-0 focused_davinci[437744]: }
Oct 11 02:29:24 compute-0 systemd[1]: libpod-3977f00d00f7e35cdd591e469c87eda1572231d46beceb0284d2501f2edfe79f.scope: Deactivated successfully.
Oct 11 02:29:24 compute-0 systemd[1]: libpod-3977f00d00f7e35cdd591e469c87eda1572231d46beceb0284d2501f2edfe79f.scope: Consumed 1.204s CPU time.
Oct 11 02:29:24 compute-0 podman[437728]: 2025-10-11 02:29:24.894723217 +0000 UTC m=+1.715897710 container died 3977f00d00f7e35cdd591e469c87eda1572231d46beceb0284d2501f2edfe79f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=focused_davinci, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0)
Oct 11 02:29:25 compute-0 sshd-session[437675]: Failed password for invalid user admin from 121.227.153.123 port 58344 ssh2
Oct 11 02:29:25 compute-0 systemd[1]: var-lib-containers-storage-overlay-8a99efb0cc1ce1c23cbaf13e29875c29492af26e0ce8fb25c2908adcb600f5f2-merged.mount: Deactivated successfully.
Oct 11 02:29:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:29:25 compute-0 podman[437728]: 2025-10-11 02:29:25.670675723 +0000 UTC m=+2.491850216 container remove 3977f00d00f7e35cdd591e469c87eda1572231d46beceb0284d2501f2edfe79f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=focused_davinci, ceph=True, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:29:25 compute-0 systemd[1]: libpod-conmon-3977f00d00f7e35cdd591e469c87eda1572231d46beceb0284d2501f2edfe79f.scope: Deactivated successfully.
Oct 11 02:29:25 compute-0 sudo[437622]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:29:25 compute-0 nova_compute[356901]: 2025-10-11 02:29:25.771 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:25 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:29:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:29:26 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:29:26 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 595c20ea-a7b3-4d46-9c86-51fcecf9ce21 does not exist
Oct 11 02:29:26 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev d60a0247-4724-418e-8b5c-1abcd217d3ee does not exist
Oct 11 02:29:26 compute-0 sudo[437852]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:29:26 compute-0 sudo[437852]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:26 compute-0 sudo[437852]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:26 compute-0 sudo[437877]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:29:26 compute-0 sudo[437877]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:29:26 compute-0 sudo[437877]: pam_unix(sudo:session): session closed for user root
Oct 11 02:29:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1544: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:29:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:29:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:29:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:29:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:29:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:29:26 compute-0 sshd-session[437675]: Connection closed by invalid user admin 121.227.153.123 port 58344 [preauth]
Oct 11 02:29:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:29:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:29:27 compute-0 ceph-mon[191930]: pgmap v1544: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:29:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3669052563' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:29:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:29:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3669052563' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:29:28 compute-0 sshd-session[437902]: Invalid user admin from 121.227.153.123 port 58360
Oct 11 02:29:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3669052563' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:29:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3669052563' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:29:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1545: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:28 compute-0 sshd-session[437902]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:29:28 compute-0 sshd-session[437902]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:29:29 compute-0 ceph-mon[191930]: pgmap v1545: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:29 compute-0 nova_compute[356901]: 2025-10-11 02:29:29.612 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:29 compute-0 podman[157119]: time="2025-10-11T02:29:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:29:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:29:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:29:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:29:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9057 "" "Go-http-client/1.1"
Oct 11 02:29:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1546: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:30 compute-0 sshd-session[437902]: Failed password for invalid user admin from 121.227.153.123 port 58360 ssh2
Oct 11 02:29:30 compute-0 unix_chkpwd[437906]: password check failed for user (root)
Oct 11 02:29:30 compute-0 sshd-session[437904]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=80.94.93.233  user=root
Oct 11 02:29:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:29:30 compute-0 ceph-mon[191930]: pgmap v1546: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:30 compute-0 nova_compute[356901]: 2025-10-11 02:29:30.774 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:31 compute-0 openstack_network_exporter[374316]: ERROR   02:29:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:29:31 compute-0 openstack_network_exporter[374316]: ERROR   02:29:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:29:31 compute-0 openstack_network_exporter[374316]: ERROR   02:29:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:29:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:29:31 compute-0 openstack_network_exporter[374316]: ERROR   02:29:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:29:31 compute-0 openstack_network_exporter[374316]: ERROR   02:29:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:29:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:29:31 compute-0 sshd-session[437902]: Connection closed by invalid user admin 121.227.153.123 port 58360 [preauth]
Oct 11 02:29:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1547: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:32 compute-0 sshd-session[437904]: Failed password for root from 80.94.93.233 port 22304 ssh2
Oct 11 02:29:33 compute-0 podman[437909]: 2025-10-11 02:29:33.238910567 +0000 UTC m=+0.117171120 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.expose-services=, container_name=kepler, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vendor=Red Hat, Inc., version=9.4, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, maintainer=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9, managed_by=edpm_ansible, release-0.7.12=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, build-date=2024-09-18T21:23:30, name=ubi9, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, com.redhat.component=ubi9-container, vcs-type=git, io.buildah.version=1.29.0, summary=Provides the latest release of Red Hat Universal Base Image 9., release=1214.1726694543, architecture=x86_64)
Oct 11 02:29:33 compute-0 sshd-session[437907]: Invalid user admin from 121.227.153.123 port 50166
Oct 11 02:29:33 compute-0 ceph-mon[191930]: pgmap v1547: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:33 compute-0 sshd-session[437907]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:29:33 compute-0 sshd-session[437907]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:29:33 compute-0 unix_chkpwd[437930]: password check failed for user (root)
Oct 11 02:29:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1548: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:34 compute-0 nova_compute[356901]: 2025-10-11 02:29:34.615 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:34 compute-0 ceph-mon[191930]: pgmap v1548: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:35 compute-0 sshd-session[437907]: Failed password for invalid user admin from 121.227.153.123 port 50166 ssh2
Oct 11 02:29:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:29:35 compute-0 sshd-session[437904]: Failed password for root from 80.94.93.233 port 22304 ssh2
Oct 11 02:29:35 compute-0 nova_compute[356901]: 2025-10-11 02:29:35.779 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1549: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:37 compute-0 unix_chkpwd[437931]: password check failed for user (root)
Oct 11 02:29:37 compute-0 sshd-session[437907]: Connection closed by invalid user admin 121.227.153.123 port 50166 [preauth]
Oct 11 02:29:37 compute-0 ceph-mon[191930]: pgmap v1549: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1550: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:38 compute-0 sshd-session[437932]: Invalid user admin from 121.227.153.123 port 50178
Oct 11 02:29:38 compute-0 sshd-session[437932]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:29:38 compute-0 sshd-session[437932]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:29:38 compute-0 sshd-session[437904]: Failed password for root from 80.94.93.233 port 22304 ssh2
Oct 11 02:29:39 compute-0 podman[437934]: 2025-10-11 02:29:39.21377768 +0000 UTC m=+0.111256616 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:29:39 compute-0 podman[437937]: 2025-10-11 02:29:39.231578334 +0000 UTC m=+0.113260532 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_managed=true, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:29:39 compute-0 podman[437936]: 2025-10-11 02:29:39.241722465 +0000 UTC m=+0.108914161 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0, config_id=edpm, tcib_managed=true, container_name=ceilometer_agent_compute, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4)
Oct 11 02:29:39 compute-0 podman[437935]: 2025-10-11 02:29:39.256812874 +0000 UTC m=+0.134415868 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, config_id=ovn_controller, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, container_name=ovn_controller, io.buildah.version=1.41.3)
Oct 11 02:29:39 compute-0 ceph-mon[191930]: pgmap v1550: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:39 compute-0 nova_compute[356901]: 2025-10-11 02:29:39.618 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:40 compute-0 sshd-session[437932]: Failed password for invalid user admin from 121.227.153.123 port 50178 ssh2
Oct 11 02:29:40 compute-0 sshd-session[437904]: Received disconnect from 80.94.93.233 port 22304:11:  [preauth]
Oct 11 02:29:40 compute-0 sshd-session[437904]: Disconnected from authenticating user root 80.94.93.233 port 22304 [preauth]
Oct 11 02:29:40 compute-0 sshd-session[437904]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=80.94.93.233  user=root
Oct 11 02:29:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1551: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:40 compute-0 sshd-session[437932]: Connection closed by invalid user admin 121.227.153.123 port 50178 [preauth]
Oct 11 02:29:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:29:40 compute-0 nova_compute[356901]: 2025-10-11 02:29:40.784 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:41 compute-0 unix_chkpwd[438021]: password check failed for user (root)
Oct 11 02:29:41 compute-0 sshd-session[438017]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=80.94.93.233  user=root
Oct 11 02:29:41 compute-0 ceph-mon[191930]: pgmap v1551: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:41 compute-0 sshd-session[438019]: Invalid user admin from 121.227.153.123 port 54698
Oct 11 02:29:42 compute-0 sshd-session[438019]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:29:42 compute-0 sshd-session[438019]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:29:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1552: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:43 compute-0 sshd-session[438017]: Failed password for root from 80.94.93.233 port 10850 ssh2
Oct 11 02:29:43 compute-0 ceph-mon[191930]: pgmap v1552: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:43 compute-0 sshd-session[438019]: Failed password for invalid user admin from 121.227.153.123 port 54698 ssh2
Oct 11 02:29:44 compute-0 unix_chkpwd[438022]: password check failed for user (root)
Oct 11 02:29:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1553: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:44 compute-0 nova_compute[356901]: 2025-10-11 02:29:44.622 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:44 compute-0 podman[438024]: 2025-10-11 02:29:44.835817164 +0000 UTC m=+0.108075948 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=iscsid, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:29:44 compute-0 podman[438023]: 2025-10-11 02:29:44.838454657 +0000 UTC m=+0.115635648 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=multipathd, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:29:45 compute-0 ceph-mon[191930]: pgmap v1553: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:45 compute-0 sshd-session[438019]: Connection closed by invalid user admin 121.227.153.123 port 54698 [preauth]
Oct 11 02:29:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:29:45 compute-0 nova_compute[356901]: 2025-10-11 02:29:45.789 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:46 compute-0 sshd-session[438017]: Failed password for root from 80.94.93.233 port 10850 ssh2
Oct 11 02:29:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1554: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:46 compute-0 sshd-session[438060]: Invalid user admin from 121.227.153.123 port 54706
Oct 11 02:29:47 compute-0 sshd-session[438060]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:29:47 compute-0 sshd-session[438060]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:29:47 compute-0 ceph-mon[191930]: pgmap v1554: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:47 compute-0 unix_chkpwd[438062]: password check failed for user (root)
Oct 11 02:29:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1555: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:48 compute-0 nova_compute[356901]: 2025-10-11 02:29:48.752 2 DEBUG oslo_concurrency.lockutils [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:29:48 compute-0 nova_compute[356901]: 2025-10-11 02:29:48.752 2 DEBUG oslo_concurrency.lockutils [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78" acquired by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:29:48 compute-0 nova_compute[356901]: 2025-10-11 02:29:48.753 2 DEBUG oslo_concurrency.lockutils [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78-events" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:29:48 compute-0 nova_compute[356901]: 2025-10-11 02:29:48.753 2 DEBUG oslo_concurrency.lockutils [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78-events" acquired by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:29:48 compute-0 nova_compute[356901]: 2025-10-11 02:29:48.754 2 DEBUG oslo_concurrency.lockutils [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78-events" "released" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:29:48 compute-0 nova_compute[356901]: 2025-10-11 02:29:48.755 2 INFO nova.compute.manager [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Terminating instance
Oct 11 02:29:48 compute-0 nova_compute[356901]: 2025-10-11 02:29:48.757 2 DEBUG nova.compute.manager [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Start destroying the instance on the hypervisor. _shutdown_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:3120
Oct 11 02:29:48 compute-0 kernel: tapa7108c4c-c9 (unregistering): left promiscuous mode
Oct 11 02:29:48 compute-0 NetworkManager[44908]: <info>  [1760149788.9348] device (tapa7108c4c-c9): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')
Oct 11 02:29:48 compute-0 ovn_controller[88370]: 2025-10-11T02:29:48Z|00050|binding|INFO|Releasing lport a7108c4c-c96c-4354-a4bf-99b1d2160514 from this chassis (sb_readonly=0)
Oct 11 02:29:48 compute-0 ovn_controller[88370]: 2025-10-11T02:29:48Z|00051|binding|INFO|Setting lport a7108c4c-c96c-4354-a4bf-99b1d2160514 down in Southbound
Oct 11 02:29:48 compute-0 nova_compute[356901]: 2025-10-11 02:29:48.953 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:48 compute-0 ovn_controller[88370]: 2025-10-11T02:29:48Z|00052|binding|INFO|Removing iface tapa7108c4c-c9 ovn-installed in OVS
Oct 11 02:29:48 compute-0 nova_compute[356901]: 2025-10-11 02:29:48.958 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:48 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:48.971 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:c2:ee:14 192.168.0.80'], port_security=['fa:16:3e:c2:ee:14 192.168.0.80'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'name': 'vnf-scaleup_group-wzkjkvgckve2-ittzoa6m3dmq-egfg3ceao3k4-port-r4lbf7nhvsnm', 'neutron:cidrs': '192.168.0.80/24', 'neutron:device_id': 'd60d7ea1-5d00-4902-90e6-3ae67eb09a78', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'neutron:port_capabilities': '', 'neutron:port_name': 'vnf-scaleup_group-wzkjkvgckve2-ittzoa6m3dmq-egfg3ceao3k4-port-r4lbf7nhvsnm', 'neutron:project_id': '97026531b3404a11869cb85a059c4a0d', 'neutron:revision_number': '4', 'neutron:security_group_ids': 'c0c90d87-d29f-4e96-98a1-ffb301424ea4', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:port_fip': '192.168.122.245', 'neutron:host_id': 'compute-0.ctlplane.example.com'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=d19b0dd1-1656-436b-911a-8f2dcc98f6bf, chassis=[], tunnel_key=4, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=a7108c4c-c96c-4354-a4bf-99b1d2160514) old=Port_Binding(up=[True], chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:29:48 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:48.974 286362 INFO neutron.agent.ovn.metadata.agent [-] Port a7108c4c-c96c-4354-a4bf-99b1d2160514 in datapath d4dded16-3268-4cf9-bb6b-aa5200d5e4ec unbound from our chassis
Oct 11 02:29:48 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:48.980 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network d4dded16-3268-4cf9-bb6b-aa5200d5e4ec
Oct 11 02:29:48 compute-0 nova_compute[356901]: 2025-10-11 02:29:48.980 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:49 compute-0 sshd-session[438060]: Failed password for invalid user admin from 121.227.153.123 port 54706 ssh2
Oct 11 02:29:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:49.009 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[4eeae1ac-db4e-4285-8241-e112c05c9d92]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:29:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:49.058 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[1d3307a8-c1d3-4985-8ca8-f5a8cc0c9b43]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:29:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:49.062 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[6fc66528-77ae-466c-bfb4-9b38d9b31f88]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:29:49 compute-0 systemd[1]: machine-qemu\x2d2\x2dinstance\x2d00000002.scope: Deactivated successfully.
Oct 11 02:29:49 compute-0 systemd[1]: machine-qemu\x2d2\x2dinstance\x2d00000002.scope: Consumed 6min 34.702s CPU time.
Oct 11 02:29:49 compute-0 systemd-machined[137586]: Machine qemu-2-instance-00000002 terminated.
Oct 11 02:29:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:49.105 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[a5ea7786-b6e4-4b66-962b-8b0a32a7ac8a]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:29:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:49.132 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[7cb177fa-a76f-4436-a7a3-4dd17c4ebc75]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapd4dded16-31'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:11:50:48'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 8, 'tx_packets': 11, 'rx_bytes': 832, 'tx_bytes': 606, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 8, 'tx_packets': 11, 'rx_bytes': 832, 'tx_bytes': 606, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 15], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 548909, 'reachable_time': 31912, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 8, 'inoctets': 720, 'indelivers': 1, 'outforwdatagrams': 0, 'outpkts': 3, 'outoctets': 228, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 8, 'outmcastpkts': 3, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 720, 'outmcastoctets': 228, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 8, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 1, 'inerrors': 0, 'outmsgs': 3, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 438074, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:29:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:49.156 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[63b4ffc6-73e5-48db-93c8-93e0099ad5e1]: (4, ({'family': 2, 'prefixlen': 32, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '169.254.169.254'], ['IFA_LOCAL', '169.254.169.254'], ['IFA_BROADCAST', '169.254.169.254'], ['IFA_LABEL', 'tapd4dded16-31'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 548926, 'tstamp': 548926}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 438075, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'}, {'family': 2, 'prefixlen': 24, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '192.168.0.2'], ['IFA_LOCAL', '192.168.0.2'], ['IFA_BROADCAST', '192.168.0.255'], ['IFA_LABEL', 'tapd4dded16-31'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 548931, 'tstamp': 548931}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 438075, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'})) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:29:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:49.159 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapd4dded16-30, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.161 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.171 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:49.172 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapd4dded16-30, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:29:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:49.173 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:29:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:49.173 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tapd4dded16-30, col_values=(('external_ids', {'iface-id': 'f0f8488b-423f-46a5-8a6a-984c2ae3438e'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:29:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:49.174 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.193 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.202 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.221 2 INFO nova.virt.libvirt.driver [-] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Instance destroyed successfully.
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.222 2 DEBUG nova.objects.instance [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lazy-loading 'resources' on Instance uuid d60d7ea1-5d00-4902-90e6-3ae67eb09a78 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.243 2 DEBUG nova.virt.libvirt.vif [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='True',created_at=2025-10-11T02:19:56Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=<?>,disable_terminate=False,display_description=None,display_name='vn-vgckve2-ittzoa6m3dmq-egfg3ceao3k4-vnf-rvnztbwt2zgh',ec2_ids=<?>,ephemeral_gb=1,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(1),hidden=False,host='compute-0.ctlplane.example.com',hostname='vn-vgckve2-ittzoa6m3dmq-egfg3ceao3k4-vnf-rvnztbwt2zgh',id=2,image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',info_cache=InstanceInfoCache,instance_type_id=1,kernel_id='',key_data=None,key_name=None,keypairs=<?>,launch_index=0,launched_at=2025-10-11T02:20:07Z,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=512,metadata={metering.server_group='3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'},migration_context=<?>,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=<?>,power_state=1,progress=0,project_id='97026531b3404a11869cb85a059c4a0d',ramdisk_id='',reservation_id='r-hkja6gj9',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='reader,member,admin',image_base_image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',image_container_format='bare',image_disk_format='qcow2',image_hw_cdrom_bus='sata',image_hw_disk_bus='virtio',image_hw_input_bus='usb',image_hw_machine_type='q35',image_hw_pointer_model='usbtablet',image_hw_video_model='virtio',image_hw_vif_model='virtio',image_min_disk='1',image_min_ram='0',image_owner_specified.openstack.md5='',image_owner_specified.openstack.object='images/cirros',image_owner_specified.openstack.sha256='',owner_project_name='admin',owner_user_name='admin'},tags=<?>,task_state='deleting',terminated_at=None,trusted_certs=<?>,updated_at=2025-10-11T02:20:07Z,user_data='Q29udGVudC1UeXBlOiBtdWx0aXBhcnQvbWl4ZWQ7IGJvdW5kYXJ5PSI9PT09PT09PT09PT09PT0zMTQ2MTUzMDI2MzE5OTEyMDEyPT0iCk1JTUUtVmVyc2lvbjogMS4wCgotLT09PT09PT09PT09PT09PTMxNDYxNTMwMjYzMTk5MTIwMTI9PQpDb250ZW50LVR5cGU6IHRleHQvY2xvdWQtY29uZmlnOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0iY2xvdWQtY29uZmlnIgoKCgojIENhcHR1cmUgYWxsIHN1YnByb2Nlc3Mgb3V0cHV0IGludG8gYSBsb2dmaWxlCiMgVXNlZnVsIGZvciB0cm91Ymxlc2hvb3RpbmcgY2xvdWQtaW5pdCBpc3N1ZXMKb3V0cHV0OiB7YWxsOiAnfCB0ZWUgLWEgL3Zhci9sb2cvY2xvdWQtaW5pdC1vdXRwdXQubG9nJ30KCi0tPT09PT09PT09PT09PT09MzE0NjE1MzAyNjMxOTkxMjAxMj09CkNvbnRlbnQtVHlwZTogdGV4dC9jbG91ZC1ib290aG9vazsgY2hhcnNldD0idXMtYXNjaWkiCk1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IDdiaXQKQ29udGVudC1EaXNwb3NpdGlvbjogYXR0YWNobWVudDsgZmlsZW5hbWU9ImJvb3Rob29rLnNoIgoKIyEvdXNyL2Jpbi9iYXNoCgojIEZJWE1FKHNoYWRvd2VyKSB0aGlzIGlzIGEgd29ya2Fyb3VuZCBmb3IgY2xvdWQtaW5pdCAwLjYuMyBwcmVzZW50IGluIFVidW50dQojIDEyLjA0IExUUzoKIyBodHRwczovL2J1Z3MubGF1bmNocGFkLm5ldC9oZWF0LytidWcvMTI1NzQxMAojCiMgVGhlIG9sZCBjbG91ZC1pbml0IGRvZXNuJ3QgY3JlYXRlIHRoZSB1c2VycyBkaXJlY3RseSBzbyB0aGUgY29tbWFuZHMgdG8gZG8KIyB0aGlzIGFyZSBpbmplY3RlZCB0aG91Z2ggbm92YV91dGlscy5weS4KIwojIE9uY2Ugd2UgZHJvcCBzdXBwb3J0IGZvciAwLjYuMywgd2UgY2FuIHNhZmVseSByZW1vdmUgdGhpcy4KCgojIGluIGNhc2UgaGVhdC1jZm50b29scyBoYXMgYmVlbiBpbnN0YWxsZWQgZnJvbSBwYWNrYWdlIGJ1dCBubyBzeW1saW5rcwojIGFyZSB5ZXQgaW4gL29wdC9hd3MvYmluLwpjZm4tY3JlYXRlLWF3cy1zeW1saW5rcwoKIyBEbyBub3QgcmVtb3ZlIC0gdGhlIGNsb3VkIGJvb3Rob29rIHNob3VsZCBhbHdheXMgcmV0dXJuIHN1Y2Nlc3MKZXhpdCAwCgotLT09PT09PT09PT09PT09PTMxNDYxNTMwMjYzMTk5MTIwMTI9PQpDb250ZW50LVR5cGU6IHRleHQvcGFydC1oYW5kbGVyOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0icGFydC1oYW5kbGVyLnB5IgoKIyBwYXJ0LWhhbmRsZXIKIwojICAgIExpY2Vuc2VkIHVuZGVyIHRoZSBBcGFjaGUgTGljZW5zZSwgVmVyc2lvbiAyLjAgKHRoZSAiTGljZW5zZSIpOyB5b3UgbWF5CiMgICAgbm90IHVzZSB0aGlzIGZpbGUgZXhjZXB0IGluIGNvbXBsaWFuY2Ugd2l0aCB0aGUgTGljZW5zZS4gWW91IG1heSBvYnRhaW4KIyAgICBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgICAgICAgaHR0cDovL3d3dy5hcGFjaGUub3JnL2xpY2Vuc2VzL0xJQ0VOU0UtMi4wCiMKIyAgICBVbmxlc3MgcmVxdWlyZWQgYnkgYXBwbGljYWJsZSBsYXcgb3IgYWdyZWVkIHRvIGluIHdyaXRpbmcsIHNvZnR3YXJlCiMgICAgZGlzdHJpYnV0ZWQgdW5kZXIgdGhlIExpY2Vuc2UgaXMgZGlzdHJpYnV0ZWQgb24gYW4gIkFTIElTIiBCQVNJUywgV0lUSE9VVAojICAgIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4gU2VlIHRoZQojICAgIExpY2Vuc2UgZm9yIHRoZSBzcGVjaWZpYyBsYW5ndWFnZSBnb3Zlcm5pbmcgcGVybWlzc2lvbnMgYW5kIGxpbWl0YXRpb25zCiMgICAgdW5kZXIgdGhlIExpY2Vuc2UuCgppbXBvcnQgZGF0ZXRpbWUKaW1wb3J0IGVycm5vCmltcG9ydCBvcwppbXBvcnQgc3lzCgoKZGVmIGxpc3RfdHlwZXMoKToKICAgIHJldHVybiBbInRleHQveC1jZm5pbml0ZGF0YSJdCgoKZGVmIGhhbmRsZV9wYXJ0KGRhdGEsIGN0eXBlLCBmaWxlbmFtZSwgcGF5bG9hZCk6CiAgICBpZiBjdHlwZSA9PSAiX19iZWdpbl9fIjoKICAgICAgICB0cnk6CiAgICAgICAgICAgIG9zLm1ha2VkaXJzKCcvdmFyL2xpYi9oZWF0LWNmbnRvb2xzJywgaW50KCI3MDAiLCA4KSkKICAgICAgICBleGNlcHQgT1NFcnJvcjoKICAgICAgICAgICAgZXhfdHlwZSwgZSwgdGIgPSBzeXMuZXhjX2luZm8oKQogICAgICAgICAgICBpZiBlLmVycm5vICE9IGVycm5vLkVFWElTVDoKICAgICAgICAgICAgICAgIHJhaXNlCiAgICAgICAgcmV0dXJuCgogICAgaWYgY3R5cGUgPT0gIl9fZW5kX18iOgogICAgICAgIHJldHVybgoKICAgIHRpbWVzdGFtcCA9IGRhdGV0aW1lLmRhdGV0aW1lLm5vdygpCiAgICB3aXRoIG9wZW4oJy92YXIvbG9nL3BhcnQtaGFuZGxlci5sb2cnLCAnYScpIGFzIGxvZzoKICAgICAgICBsb2cud3JpdGUoJyVzIGZpbGVuYW1lOiVzLCBjdHlwZTolc1xuJyAlICh0aW1lc3RhbXAsIGZpbGVuYW1lLCBjdHlwZSkpCgogICAgaWYgY3R5cGUgPT0gJ3RleHQveC1jZm5pbml0ZGF0YSc6CiAgICAgICAgd2l0aCBvcGVuKCcvdmFyL2xpYi9oZWF0LWNmbnRvb2xzLyVzJyAlIGZpbGVuYW1lLCAndycpIGFzIGY6CiAgICAgICAgICAgIGYud3JpdGUocGF5bG9hZCkKCiAgICAgICAgIyBUT0RPKHNkYWtlKSBob3BlZnVsbHkgdGVtcG9yYXJ5IHVudGlsIHVzZXJzIG1vdmUgdG8gaGVhdC1jZm50b29scy0xLjMKICAgICAgICB3aXRoIG9wZW4oJy92YXIvbGliL2Nsb3VkL2RhdGEvJXMnICUgZmlsZW5hbWUsICd3JykgYXMgZjoKICAgICAgICAgICAgZi53cml0ZShwYXlsb2FkKQoKLS09PT09PT09PT09PT09PT0zMTQ2MTUzMDI2MzE5OTEyMDEyPT0KQ29udGVudC1UeXBlOiB0ZXh0L3gtY2ZuaW5pdGRhdGE7IGNoYXJzZXQ9InVzLWFzY2lpIgpNSU1FLVZlcnNpb246IDEuMApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkNvbnRlbnQtRGlzcG9zaXRpb246IGF0dGFjaG1lbnQ7IGZpbGVuYW1lPSJjZm4tdXNlcmRhdGEiCgoKLS09PT09PT09PT09PT09PT0zMTQ2MTUzMDI2MzE5OTEyMDEyPT0KQ29udGVudC1UeXBlOiB0ZXh0L3gtc2hlbGxzY3JpcHQ7IGNoYXJzZXQ9InVzLWFzY2lpIgpNSU1FLVZlcnNpb246IDEuMApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkNvbnRlbnQtRGlzcG9zaXRpb246IGF0dGFjaG1lbnQ7IGZpbGVuYW1lPSJsb2d1c2VyZGF0YS5weSIKCiMhL3Vzci9iaW4vZW52IHB5dGhvbjMKIwojICAgIExpY2Vuc2VkIHVuZGVyIHRoZSBBcGFjaGUgTGljZW5zZSwgVmVyc2lvbiAyLjAgKHRoZSAiTGljZW5zZSIpOyB5b3UgbWF5CiMgICAgbm90IHVzZSB0aGlzIGZpbGUgZXhjZXB0IGluIGNvbXBsaWFuY2Ugd2l0aCB0aGUgTGljZW5zZS4gWW91IG1heSBvYnRhaW4KIyAgICBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgICAgICAgaHR0cDovL3d3dy5hcGFjaGUub3JnL2xpY2Vuc2VzL0xJQ0VOU0UtMi4wCiMKIyAgICBVbmxlc3MgcmVxdWlyZWQgYnkgYXBwbGljYWJsZSBsYXcgb3IgYWdyZWVkIHRvIGluIHdyaXRpbmcsIHNvZnR3YXJlCiMgICAgZGlzdHJpYnV0ZWQgdW5kZXIgdGhlIExpY2Vuc2UgaXMgZGlzdHJpYnV0ZWQgb24gYW4gIkFTIElTIiBCQVNJUywgV0lUSE9VVAojICAgIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4gU2VlIHRoZQojICAgIExpY2Vuc2UgZm9yIHRoZSBzcGVjaWZpYyBsYW5ndWFnZSBnb3Zlcm5pbmcgcGVybWlzc2lvbnMgYW5kIGxpbWl0YXRpb25zCiMgICAgdW5kZXIgdGhlIExpY2Vuc2UuCgppbXBvcnQgZGF0ZXRpbWUKaW1wb3J0IGVycm5vCmltcG9ydCBsb2dnaW5nCmltcG9ydCBvcwppbXBvcnQgc3VicHJvY2VzcwppbXBvcnQgc3lzCgoKVkFSX1BBVEggPSAnL3Zhci9saWIvaGVhdC1jZm50b29scycKTE9HID0gbG9nZ2luZy5nZXRMb2dnZXIoJ2hlYXQtcHJvdmlzaW9uJykKCgpkZWYgaW5pdF9sb2dnaW5nKCk6CiAgICBMT0cuc2V0TGV2ZWwobG9nZ2luZy5JTkZPKQogICAgTE9HLmFkZEhhbmRsZXIobG9nZ2luZy5TdHJlYW1IYW5kbGVyKCkpCiAgICBmaCA9IGxvZ2dpbmcuRmlsZUhhbmRsZXIoIi92YXIvbG9nL2hlYXQtcHJvdmlzaW9uLmxvZyIpCiAgICBvcy5jaG1vZChmaC5iYXNlRmlsZW5hbWUsIGludCgiNjAwIiwgOCkpCiAgICBMT0cuYWRkSGFuZGxlcihmaCkKCgpkZWYgY2FsbChhcmdzKToKCiAgICBjbGFzcyBMb2dTdHJlYW0ob2JqZWN0KToKCiAgICAgICAgZGVmIHdyaXRlKHNlbGYsIGRhdGEpOgogICAgICAgICAgICBMT0cuaW5mbyhkYXRhKQoKICAgIExPRy5pbmZvKCclc1
Oct 11 02:29:49 compute-0 nova_compute[356901]: xuJywgJyAnLmpvaW4oYXJncykpICAjIG5vcWEKICAgIHRyeToKICAgICAgICBscyA9IExvZ1N0cmVhbSgpCiAgICAgICAgcCA9IHN1YnByb2Nlc3MuUG9wZW4oYXJncywgc3Rkb3V0PXN1YnByb2Nlc3MuUElQRSwKICAgICAgICAgICAgICAgICAgICAgICAgICAgICBzdGRlcnI9c3VicHJvY2Vzcy5QSVBFKQogICAgICAgIGRhdGEgPSBwLmNvbW11bmljYXRlKCkKICAgICAgICBpZiBkYXRhOgogICAgICAgICAgICBmb3IgeCBpbiBkYXRhOgogICAgICAgICAgICAgICAgbHMud3JpdGUoeCkKICAgIGV4Y2VwdCBPU0Vycm9yOgogICAgICAgIGV4X3R5cGUsIGV4LCB0YiA9IHN5cy5leGNfaW5mbygpCiAgICAgICAgaWYgZXguZXJybm8gPT0gZXJybm8uRU5PRVhFQzoKICAgICAgICAgICAgTE9HLmVycm9yKCdVc2VyZGF0YSBlbXB0eSBvciBub3QgZXhlY3V0YWJsZTogJXMnLCBleCkKICAgICAgICAgICAgcmV0dXJuIG9zLkVYX09LCiAgICAgICAgZWxzZToKICAgICAgICAgICAgTE9HLmVycm9yKCdPUyBlcnJvciBydW5uaW5nIHVzZXJkYXRhOiAlcycsIGV4KQogICAgICAgICAgICByZXR1cm4gb3MuRVhfT1NFUlIKICAgIGV4Y2VwdCBFeGNlcHRpb246CiAgICAgICAgZXhfdHlwZSwgZXgsIHRiID0gc3lzLmV4Y19pbmZvKCkKICAgICAgICBMT0cuZXJyb3IoJ1Vua25vd24gZXJyb3IgcnVubmluZyB1c2VyZGF0YTogJXMnLCBleCkKICAgICAgICByZXR1cm4gb3MuRVhfU09GVFdBUkUKICAgIHJldHVybiBwLnJldHVybmNvZGUKCgpkZWYgbWFpbigpOgogICAgdXNlcmRhdGFfcGF0aCA9IG9zLnBhdGguam9pbihWQVJfUEFUSCwgJ2Nmbi11c2VyZGF0YScpCiAgICBvcy5jaG1vZCh1c2VyZGF0YV9wYXRoLCBpbnQoIjcwMCIsIDgpKQoKICAgIExPRy5pbmZvKCdQcm92aXNpb24gYmVnYW46ICVzJywgZGF0ZXRpbWUuZGF0ZXRpbWUubm93KCkpCiAgICByZXR1cm5jb2RlID0gY2FsbChbdXNlcmRhdGFfcGF0aF0pCiAgICBMT0cuaW5mbygnUHJvdmlzaW9uIGRvbmU6ICVzJywgZGF0ZXRpbWUuZGF0ZXRpbWUubm93KCkpCiAgICBpZiByZXR1cm5jb2RlOgogICAgICAgIHJldHVybiByZXR1cm5jb2RlCgoKaWYgX19uYW1lX18gPT0gJ19fbWFpbl9fJzoKICAgIGluaXRfbG9nZ2luZygpCgogICAgY29kZSA9IG1haW4oKQogICAgaWYgY29kZToKICAgICAgICBMT0cuZXJyb3IoJ1Byb3Zpc2lvbiBmYWlsZWQgd2l0aCBleGl0IGNvZGUgJXMnLCBjb2RlKQogICAgICAgIHN5cy5leGl0KGNvZGUpCgogICAgcHJvdmlzaW9uX2xvZyA9IG9zLnBhdGguam9pbihWQVJfUEFUSCwgJ3Byb3Zpc2lvbi1maW5pc2hlZCcpCiAgICAjIHRvdWNoIHRoZSBmaWxlIHNvIGl0IGlzIHRpbWVzdGFtcGVkIHdpdGggd2hlbiBmaW5pc2hlZAogICAgd2l0aCBvcGVuKHByb3Zpc2lvbl9sb2csICdhJyk6CiAgICAgICAgb3MudXRpbWUocHJvdmlzaW9uX2xvZywgTm9uZSkKCi0tPT09PT09PT09PT09PT09MzE0NjE1MzAyNjMxOTkxMjAxMj09CkNvbnRlbnQtVHlwZTogdGV4dC94LWNmbmluaXRkYXRhOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0iY2ZuLW1ldGFkYXRhLXNlcnZlciIKCmh0dHBzOi8vaGVhdC1jZm5hcGktaW50ZXJuYWwub3BlbnN0YWNrLnN2Yzo4MDAwL3YxLwotLT09PT09PT09PT09PT09PTMxNDYxNTMwMjYzMTk5MTIwMTI9PQpDb250ZW50LVR5cGU6IHRleHQveC1jZm5pbml0ZGF0YTsgY2hhcnNldD0idXMtYXNjaWkiCk1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IDdiaXQKQ29udGVudC1EaXNwb3NpdGlvbjogYXR0YWNobWVudDsgZmlsZW5hbWU9ImNmbi1ib3RvLWNmZyIKCltCb3RvXQpkZWJ1ZyA9IDAKaXNfc2VjdXJlID0gMApodHRwc192YWxpZGF0ZV9jZXJ0aWZpY2F0ZXMgPSAxCmNmbl9yZWdpb25fbmFtZSA9IGhlYXQKY2ZuX3JlZ2lvbl9lbmRwb2ludCA9IGhlYXQtY2ZuYXBpLWludGVybmFsLm9wZW5zdGFjay5zdmMKLS09PT09PT09PT09PT09PT0zMTQ2MTUzMDI2MzE5OTEyMDEyPT0tLQo=',user_id='d215f3ebbc07435493ccd666fc80109d',uuid=d60d7ea1-5d00-4902-90e6-3ae67eb09a78,vcpu_model=<?>,vcpus=1,vm_mode=None,vm_state='active') vif={"id": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "address": "fa:16:3e:c2:ee:14", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.80", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa7108c4c-c9", "ovs_interfaceid": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}} unplug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:828
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.244 2 DEBUG nova.network.os_vif_util [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converting VIF {"id": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "address": "fa:16:3e:c2:ee:14", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.80", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa7108c4c-c9", "ovs_interfaceid": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.244 2 DEBUG nova.network.os_vif_util [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converted object VIFOpenVSwitch(active=True,address=fa:16:3e:c2:ee:14,bridge_name='br-int',has_traffic_filtering=True,id=a7108c4c-c96c-4354-a4bf-99b1d2160514,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tapa7108c4c-c9') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.245 2 DEBUG os_vif [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Unplugging vif VIFOpenVSwitch(active=True,address=fa:16:3e:c2:ee:14,bridge_name='br-int',has_traffic_filtering=True,id=a7108c4c-c96c-4354-a4bf-99b1d2160514,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tapa7108c4c-c9') unplug /usr/lib/python3.9/site-packages/os_vif/__init__.py:109
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.247 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.247 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapa7108c4c-c9, bridge=br-int, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.249 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.255 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.259 2 INFO os_vif [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Successfully unplugged vif VIFOpenVSwitch(active=True,address=fa:16:3e:c2:ee:14,bridge_name='br-int',has_traffic_filtering=True,id=a7108c4c-c96c-4354-a4bf-99b1d2160514,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tapa7108c4c-c9')
Oct 11 02:29:49 compute-0 rsyslogd[187706]: message too long (8192) with configured size 8096, begin of message is: 2025-10-11 02:29:49.243 2 DEBUG nova.virt.libvirt.vif [None req-553af8b7-dab6-46 [v8.2506.0-2.el9 try https://www.rsyslog.com/e/2445 ]
Oct 11 02:29:49 compute-0 sshd-session[438017]: Failed password for root from 80.94.93.233 port 10850 ssh2
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.366 2 DEBUG nova.compute.manager [req-8fe82a0e-6e48-4dfe-b987-49c4cd123337 req-7b5c0469-b8e5-417a-af84-0fc7115f8076 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Received event network-vif-unplugged-a7108c4c-c96c-4354-a4bf-99b1d2160514 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.366 2 DEBUG oslo_concurrency.lockutils [req-8fe82a0e-6e48-4dfe-b987-49c4cd123337 req-7b5c0469-b8e5-417a-af84-0fc7115f8076 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.366 2 DEBUG oslo_concurrency.lockutils [req-8fe82a0e-6e48-4dfe-b987-49c4cd123337 req-7b5c0469-b8e5-417a-af84-0fc7115f8076 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.367 2 DEBUG oslo_concurrency.lockutils [req-8fe82a0e-6e48-4dfe-b987-49c4cd123337 req-7b5c0469-b8e5-417a-af84-0fc7115f8076 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.367 2 DEBUG nova.compute.manager [req-8fe82a0e-6e48-4dfe-b987-49c4cd123337 req-7b5c0469-b8e5-417a-af84-0fc7115f8076 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] No waiting events found dispatching network-vif-unplugged-a7108c4c-c96c-4354-a4bf-99b1d2160514 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.367 2 DEBUG nova.compute.manager [req-8fe82a0e-6e48-4dfe-b987-49c4cd123337 req-7b5c0469-b8e5-417a-af84-0fc7115f8076 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Received event network-vif-unplugged-a7108c4c-c96c-4354-a4bf-99b1d2160514 for instance with task_state deleting. _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10826
Oct 11 02:29:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:49.399 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: SbGlobalUpdateEvent(events=('update',), table='SB_Global', conditions=None, old_conditions=None), priority=20 to row=SB_Global(external_ids={}, nb_cfg=8, options={'arp_ns_explicit_output': 'true', 'mac_prefix': 'fe:55:97', 'max_tunid': '16711680', 'northd_internal_version': '24.03.7-20.33.0-76.8', 'svc_monitor_mac': 'ce:9c:4f:b4:85:9b'}, ipsec=False) old=SB_Global(nb_cfg=7) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:29:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:49.400 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Delaying updating chassis table for 9 seconds run /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:274
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.400 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:49 compute-0 ceph-mon[191930]: pgmap v1555: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.625 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:49 compute-0 nova_compute[356901]: 2025-10-11 02:29:49.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:29:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1556: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:50 compute-0 ceph-mon[191930]: pgmap v1556: 321 pgs: 321 active+clean; 263 MiB data, 357 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:29:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:29:50 compute-0 sshd-session[438060]: Connection closed by invalid user admin 121.227.153.123 port 54706 [preauth]
Oct 11 02:29:50 compute-0 sshd-session[438017]: Received disconnect from 80.94.93.233 port 10850:11:  [preauth]
Oct 11 02:29:50 compute-0 sshd-session[438017]: Disconnected from authenticating user root 80.94.93.233 port 10850 [preauth]
Oct 11 02:29:50 compute-0 sshd-session[438017]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=80.94.93.233  user=root
Oct 11 02:29:51 compute-0 nova_compute[356901]: 2025-10-11 02:29:51.460 2 DEBUG nova.compute.manager [req-5863ad5e-2fb5-462a-9af7-bff31c540dc5 req-1dddc7d7-c969-4b9c-b3ad-b983ab8732f1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Received event network-vif-plugged-a7108c4c-c96c-4354-a4bf-99b1d2160514 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:29:51 compute-0 nova_compute[356901]: 2025-10-11 02:29:51.460 2 DEBUG oslo_concurrency.lockutils [req-5863ad5e-2fb5-462a-9af7-bff31c540dc5 req-1dddc7d7-c969-4b9c-b3ad-b983ab8732f1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:29:51 compute-0 nova_compute[356901]: 2025-10-11 02:29:51.461 2 DEBUG oslo_concurrency.lockutils [req-5863ad5e-2fb5-462a-9af7-bff31c540dc5 req-1dddc7d7-c969-4b9c-b3ad-b983ab8732f1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:29:51 compute-0 nova_compute[356901]: 2025-10-11 02:29:51.462 2 DEBUG oslo_concurrency.lockutils [req-5863ad5e-2fb5-462a-9af7-bff31c540dc5 req-1dddc7d7-c969-4b9c-b3ad-b983ab8732f1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:29:51 compute-0 nova_compute[356901]: 2025-10-11 02:29:51.462 2 DEBUG nova.compute.manager [req-5863ad5e-2fb5-462a-9af7-bff31c540dc5 req-1dddc7d7-c969-4b9c-b3ad-b983ab8732f1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] No waiting events found dispatching network-vif-plugged-a7108c4c-c96c-4354-a4bf-99b1d2160514 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:29:51 compute-0 nova_compute[356901]: 2025-10-11 02:29:51.462 2 WARNING nova.compute.manager [req-5863ad5e-2fb5-462a-9af7-bff31c540dc5 req-1dddc7d7-c969-4b9c-b3ad-b983ab8732f1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Received unexpected event network-vif-plugged-a7108c4c-c96c-4354-a4bf-99b1d2160514 for instance with vm_state active and task_state deleting.
Oct 11 02:29:51 compute-0 nova_compute[356901]: 2025-10-11 02:29:51.462 2 DEBUG nova.compute.manager [req-5863ad5e-2fb5-462a-9af7-bff31c540dc5 req-1dddc7d7-c969-4b9c-b3ad-b983ab8732f1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Received event network-changed-a7108c4c-c96c-4354-a4bf-99b1d2160514 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:29:51 compute-0 nova_compute[356901]: 2025-10-11 02:29:51.463 2 DEBUG nova.compute.manager [req-5863ad5e-2fb5-462a-9af7-bff31c540dc5 req-1dddc7d7-c969-4b9c-b3ad-b983ab8732f1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Refreshing instance network info cache due to event network-changed-a7108c4c-c96c-4354-a4bf-99b1d2160514. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:29:51 compute-0 nova_compute[356901]: 2025-10-11 02:29:51.463 2 DEBUG oslo_concurrency.lockutils [req-5863ad5e-2fb5-462a-9af7-bff31c540dc5 req-1dddc7d7-c969-4b9c-b3ad-b983ab8732f1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:29:51 compute-0 nova_compute[356901]: 2025-10-11 02:29:51.463 2 DEBUG oslo_concurrency.lockutils [req-5863ad5e-2fb5-462a-9af7-bff31c540dc5 req-1dddc7d7-c969-4b9c-b3ad-b983ab8732f1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:29:51 compute-0 nova_compute[356901]: 2025-10-11 02:29:51.464 2 DEBUG nova.network.neutron [req-5863ad5e-2fb5-462a-9af7-bff31c540dc5 req-1dddc7d7-c969-4b9c-b3ad-b983ab8732f1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Refreshing network info cache for port a7108c4c-c96c-4354-a4bf-99b1d2160514 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:29:51 compute-0 unix_chkpwd[438111]: password check failed for user (root)
Oct 11 02:29:51 compute-0 sshd-session[438107]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=80.94.93.233  user=root
Oct 11 02:29:51 compute-0 sshd-session[438109]: Invalid user admin from 121.227.153.123 port 35870
Oct 11 02:29:52 compute-0 sshd-session[438109]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:29:52 compute-0 sshd-session[438109]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:29:52 compute-0 nova_compute[356901]: 2025-10-11 02:29:52.126 2 INFO nova.virt.libvirt.driver [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Deleting instance files /var/lib/nova/instances/d60d7ea1-5d00-4902-90e6-3ae67eb09a78_del
Oct 11 02:29:52 compute-0 nova_compute[356901]: 2025-10-11 02:29:52.127 2 INFO nova.virt.libvirt.driver [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Deletion of /var/lib/nova/instances/d60d7ea1-5d00-4902-90e6-3ae67eb09a78_del complete
Oct 11 02:29:52 compute-0 nova_compute[356901]: 2025-10-11 02:29:52.211 2 DEBUG nova.virt.libvirt.host [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Checking UEFI support for host arch (x86_64) supports_uefi /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1754
Oct 11 02:29:52 compute-0 nova_compute[356901]: 2025-10-11 02:29:52.211 2 INFO nova.virt.libvirt.host [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] UEFI support detected
Oct 11 02:29:52 compute-0 nova_compute[356901]: 2025-10-11 02:29:52.215 2 INFO nova.compute.manager [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Took 3.46 seconds to destroy the instance on the hypervisor.
Oct 11 02:29:52 compute-0 nova_compute[356901]: 2025-10-11 02:29:52.216 2 DEBUG oslo.service.loopingcall [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Waiting for function nova.compute.manager.ComputeManager._try_deallocate_network.<locals>._deallocate_network_with_retries to return. func /usr/lib/python3.9/site-packages/oslo_service/loopingcall.py:435
Oct 11 02:29:52 compute-0 nova_compute[356901]: 2025-10-11 02:29:52.217 2 DEBUG nova.compute.manager [-] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Deallocating network for instance _deallocate_network /usr/lib/python3.9/site-packages/nova/compute/manager.py:2259
Oct 11 02:29:52 compute-0 nova_compute[356901]: 2025-10-11 02:29:52.217 2 DEBUG nova.network.neutron [-] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] deallocate_for_instance() deallocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1803
Oct 11 02:29:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1557: 321 pgs: 321 active+clean; 241 MiB data, 348 MiB used, 60 GiB / 60 GiB avail; 9.2 KiB/s rd, 852 B/s wr, 13 op/s
Oct 11 02:29:53 compute-0 nova_compute[356901]: 2025-10-11 02:29:53.182 2 DEBUG nova.network.neutron [req-5863ad5e-2fb5-462a-9af7-bff31c540dc5 req-1dddc7d7-c969-4b9c-b3ad-b983ab8732f1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Updated VIF entry in instance network info cache for port a7108c4c-c96c-4354-a4bf-99b1d2160514. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:29:53 compute-0 nova_compute[356901]: 2025-10-11 02:29:53.183 2 DEBUG nova.network.neutron [req-5863ad5e-2fb5-462a-9af7-bff31c540dc5 req-1dddc7d7-c969-4b9c-b3ad-b983ab8732f1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Updating instance_info_cache with network_info: [{"id": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "address": "fa:16:3e:c2:ee:14", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.80", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa7108c4c-c9", "ovs_interfaceid": "a7108c4c-c96c-4354-a4bf-99b1d2160514", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:29:53 compute-0 nova_compute[356901]: 2025-10-11 02:29:53.204 2 DEBUG oslo_concurrency.lockutils [req-5863ad5e-2fb5-462a-9af7-bff31c540dc5 req-1dddc7d7-c969-4b9c-b3ad-b983ab8732f1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-d60d7ea1-5d00-4902-90e6-3ae67eb09a78" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:29:53 compute-0 ceph-mon[191930]: pgmap v1557: 321 pgs: 321 active+clean; 241 MiB data, 348 MiB used, 60 GiB / 60 GiB avail; 9.2 KiB/s rd, 852 B/s wr, 13 op/s
Oct 11 02:29:53 compute-0 sshd-session[438107]: Failed password for root from 80.94.93.233 port 28202 ssh2
Oct 11 02:29:53 compute-0 nova_compute[356901]: 2025-10-11 02:29:53.825 2 DEBUG nova.network.neutron [-] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Updating instance_info_cache with network_info: [] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:29:53 compute-0 nova_compute[356901]: 2025-10-11 02:29:53.840 2 INFO nova.compute.manager [-] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Took 1.62 seconds to deallocate network for instance.
Oct 11 02:29:53 compute-0 nova_compute[356901]: 2025-10-11 02:29:53.883 2 DEBUG oslo_concurrency.lockutils [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.update_usage" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:29:53 compute-0 nova_compute[356901]: 2025-10-11 02:29:53.885 2 DEBUG oslo_concurrency.lockutils [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:29:53 compute-0 nova_compute[356901]: 2025-10-11 02:29:53.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:29:54 compute-0 nova_compute[356901]: 2025-10-11 02:29:54.024 2 DEBUG oslo_concurrency.processutils [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:29:54 compute-0 nova_compute[356901]: 2025-10-11 02:29:54.251 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:54 compute-0 sshd-session[438109]: Failed password for invalid user admin from 121.227.153.123 port 35870 ssh2
Oct 11 02:29:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1558: 321 pgs: 321 active+clean; 201 MiB data, 327 MiB used, 60 GiB / 60 GiB avail; 20 KiB/s rd, 1.2 KiB/s wr, 28 op/s
Oct 11 02:29:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:29:54 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/195306613' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:29:54 compute-0 nova_compute[356901]: 2025-10-11 02:29:54.490 2 DEBUG oslo_concurrency.processutils [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.466s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:29:54 compute-0 nova_compute[356901]: 2025-10-11 02:29:54.502 2 DEBUG nova.compute.provider_tree [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:29:54 compute-0 nova_compute[356901]: 2025-10-11 02:29:54.538 2 DEBUG nova.scheduler.client.report [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:29:54 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/195306613' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:29:54 compute-0 nova_compute[356901]: 2025-10-11 02:29:54.565 2 DEBUG oslo_concurrency.lockutils [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: held 0.680s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:29:54 compute-0 nova_compute[356901]: 2025-10-11 02:29:54.609 2 INFO nova.scheduler.client.report [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Deleted allocations for instance d60d7ea1-5d00-4902-90e6-3ae67eb09a78
Oct 11 02:29:54 compute-0 nova_compute[356901]: 2025-10-11 02:29:54.627 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:54 compute-0 nova_compute[356901]: 2025-10-11 02:29:54.688 2 DEBUG oslo_concurrency.lockutils [None req-553af8b7-dab6-46cf-b720-2e6f6c4ee4b0 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "d60d7ea1-5d00-4902-90e6-3ae67eb09a78" "released" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: held 5.935s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:29:54 compute-0 unix_chkpwd[438134]: password check failed for user (root)
Oct 11 02:29:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:54.857 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:29:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:54.858 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:29:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:54.858 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:29:55 compute-0 podman[438135]: 2025-10-11 02:29:55.201171063 +0000 UTC m=+0.091205770 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, config_id=edpm, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3)
Oct 11 02:29:55 compute-0 podman[438136]: 2025-10-11 02:29:55.203834597 +0000 UTC m=+0.094354788 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., build-date=2025-08-20T13:12:41, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, container_name=openstack_network_exporter, name=ubi9-minimal, version=9.6, architecture=x86_64, io.buildah.version=1.33.7, io.openshift.tags=minimal rhel9, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.expose-services=, com.redhat.component=ubi9-minimal-container, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1755695350, vendor=Red Hat, Inc., vcs-type=git, url=https://catalog.redhat.com/en/search?searchType=containers, distribution-scope=public, managed_by=edpm_ansible, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, config_id=edpm, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9.)
Oct 11 02:29:55 compute-0 podman[438137]: 2025-10-11 02:29:55.225277121 +0000 UTC m=+0.111520093 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:29:55 compute-0 sshd-session[438109]: Connection closed by invalid user admin 121.227.153.123 port 35870 [preauth]
Oct 11 02:29:55 compute-0 ceph-mon[191930]: pgmap v1558: 321 pgs: 321 active+clean; 201 MiB data, 327 MiB used, 60 GiB / 60 GiB avail; 20 KiB/s rd, 1.2 KiB/s wr, 28 op/s
Oct 11 02:29:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:29:55 compute-0 nova_compute[356901]: 2025-10-11 02:29:55.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:29:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1559: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:29:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:29:56
Oct 11 02:29:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:29:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:29:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['vms', 'images', 'cephfs.cephfs.data', 'default.rgw.control', 'default.rgw.log', 'volumes', 'cephfs.cephfs.meta', '.mgr', '.rgw.root', 'default.rgw.meta', 'backups']
Oct 11 02:29:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:29:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:29:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:29:56 compute-0 sshd-session[438107]: Failed password for root from 80.94.93.233 port 28202 ssh2
Oct 11 02:29:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:29:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:29:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:29:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:29:56 compute-0 ceph-mon[191930]: pgmap v1559: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:29:56 compute-0 nova_compute[356901]: 2025-10-11 02:29:56.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:29:56 compute-0 nova_compute[356901]: 2025-10-11 02:29:56.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:29:57 compute-0 sshd-session[438200]: Invalid user admin from 121.227.153.123 port 35878
Oct 11 02:29:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:29:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:29:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:29:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:29:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:29:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:29:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:29:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:29:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:29:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:29:57 compute-0 sshd-session[438200]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:29:57 compute-0 sshd-session[438200]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:29:58 compute-0 unix_chkpwd[438202]: password check failed for user (root)
Oct 11 02:29:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1560: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:29:58 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:29:58.404 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '8'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:29:58 compute-0 nova_compute[356901]: 2025-10-11 02:29:58.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:29:58 compute-0 nova_compute[356901]: 2025-10-11 02:29:58.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:29:59 compute-0 sshd-session[438200]: Failed password for invalid user admin from 121.227.153.123 port 35878 ssh2
Oct 11 02:29:59 compute-0 nova_compute[356901]: 2025-10-11 02:29:59.255 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:59 compute-0 ceph-mon[191930]: pgmap v1560: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:29:59 compute-0 nova_compute[356901]: 2025-10-11 02:29:59.631 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:29:59 compute-0 podman[157119]: time="2025-10-11T02:29:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:29:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:29:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:29:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:29:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9059 "" "Go-http-client/1.1"
Oct 11 02:30:00 compute-0 nova_compute[356901]: 2025-10-11 02:30:00.014 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-7513b93e-d2b8-4ae0-8f1c-3df190945259" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:30:00 compute-0 nova_compute[356901]: 2025-10-11 02:30:00.015 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-7513b93e-d2b8-4ae0-8f1c-3df190945259" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:30:00 compute-0 nova_compute[356901]: 2025-10-11 02:30:00.015 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:30:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1561: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:30:00 compute-0 sshd-session[438107]: Failed password for root from 80.94.93.233 port 28202 ssh2
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #69. Immutable memtables: 0.
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:30:00.545440) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 37] Flushing memtable with next log file: 69
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149800545591, "job": 37, "event": "flush_started", "num_memtables": 1, "num_entries": 2042, "num_deletes": 251, "total_data_size": 3436225, "memory_usage": 3488064, "flush_reason": "Manual Compaction"}
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 37] Level-0 flush table #70: started
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149800617771, "cf_name": "default", "job": 37, "event": "table_file_creation", "file_number": 70, "file_size": 3370446, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 30146, "largest_seqno": 32187, "table_properties": {"data_size": 3361072, "index_size": 5932, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 2373, "raw_key_size": 18473, "raw_average_key_size": 20, "raw_value_size": 3342594, "raw_average_value_size": 3625, "num_data_blocks": 263, "num_entries": 922, "num_filter_entries": 922, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760149572, "oldest_key_time": 1760149572, "file_creation_time": 1760149800, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 70, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 37] Flush lasted 72393 microseconds, and 16185 cpu microseconds.
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:30:00.617875) [db/flush_job.cc:967] [default] [JOB 37] Level-0 flush table #70: 3370446 bytes OK
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:30:00.617908) [db/memtable_list.cc:519] [default] Level-0 commit table #70 started
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:30:00.626550) [db/memtable_list.cc:722] [default] Level-0 commit table #70: memtable #1 done
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:30:00.626575) EVENT_LOG_v1 {"time_micros": 1760149800626567, "job": 37, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:30:00.626603) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 37] Try to delete WAL files size 3427704, prev total WAL file size 3427704, number of live WAL files 2.
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000066.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:30:00.629118) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F730032373631' seq:72057594037927935, type:22 .. '7061786F730033303133' seq:0, type:0; will stop at (end)
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 38] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 37 Base level 0, inputs: [70(3291KB)], [68(7035KB)]
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149800629211, "job": 38, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [70], "files_L6": [68], "score": -1, "input_data_size": 10574335, "oldest_snapshot_seqno": -1}
Oct 11 02:30:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 38] Generated table #71: 5338 keys, 8869326 bytes, temperature: kUnknown
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149800698363, "cf_name": "default", "job": 38, "event": "table_file_creation", "file_number": 71, "file_size": 8869326, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 8833127, "index_size": 21753, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 13381, "raw_key_size": 133741, "raw_average_key_size": 25, "raw_value_size": 8736080, "raw_average_value_size": 1636, "num_data_blocks": 897, "num_entries": 5338, "num_filter_entries": 5338, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760149800, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 71, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:30:00.703771) [db/compaction/compaction_job.cc:1663] [default] [JOB 38] Compacted 1@0 + 1@6 files to L6 => 8869326 bytes
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:30:00.712413) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 152.9 rd, 128.2 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(3.2, 6.9 +0.0 blob) out(8.5 +0.0 blob), read-write-amplify(5.8) write-amplify(2.6) OK, records in: 5852, records dropped: 514 output_compression: NoCompression
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:30:00.712451) EVENT_LOG_v1 {"time_micros": 1760149800712433, "job": 38, "event": "compaction_finished", "compaction_time_micros": 69180, "compaction_time_cpu_micros": 39468, "output_level": 6, "num_output_files": 1, "total_output_size": 8869326, "num_input_records": 5852, "num_output_records": 5338, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000070.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149800713823, "job": 38, "event": "table_file_deletion", "file_number": 70}
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000068.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149800716839, "job": 38, "event": "table_file_deletion", "file_number": 68}
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:30:00.628870) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:30:00.717564) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:30:00.717576) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:30:00.717579) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:30:00.717582) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:30:00 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:30:00.717587) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:30:00 compute-0 sshd-session[438200]: Connection closed by invalid user admin 121.227.153.123 port 35878 [preauth]
Oct 11 02:30:01 compute-0 sshd-session[438107]: Received disconnect from 80.94.93.233 port 28202:11:  [preauth]
Oct 11 02:30:01 compute-0 sshd-session[438107]: Disconnected from authenticating user root 80.94.93.233 port 28202 [preauth]
Oct 11 02:30:01 compute-0 sshd-session[438107]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=80.94.93.233  user=root
Oct 11 02:30:01 compute-0 openstack_network_exporter[374316]: ERROR   02:30:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:30:01 compute-0 openstack_network_exporter[374316]: ERROR   02:30:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:30:01 compute-0 openstack_network_exporter[374316]: ERROR   02:30:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:30:01 compute-0 openstack_network_exporter[374316]: ERROR   02:30:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:30:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:30:01 compute-0 openstack_network_exporter[374316]: ERROR   02:30:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:30:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:30:01 compute-0 ceph-mon[191930]: pgmap v1561: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:30:02 compute-0 sshd-session[438204]: Invalid user admin from 121.227.153.123 port 52522
Oct 11 02:30:02 compute-0 sshd-session[438204]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:30:02 compute-0 sshd-session[438204]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:30:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1562: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:30:02 compute-0 ceph-mon[191930]: pgmap v1562: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:30:03 compute-0 nova_compute[356901]: 2025-10-11 02:30:03.425 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Updating instance_info_cache with network_info: [{"id": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "address": "fa:16:3e:16:ee:dc", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.225", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.204", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa942acb1-1e", "ovs_interfaceid": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:30:03 compute-0 nova_compute[356901]: 2025-10-11 02:30:03.443 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-7513b93e-d2b8-4ae0-8f1c-3df190945259" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:30:03 compute-0 nova_compute[356901]: 2025-10-11 02:30:03.444 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:30:03 compute-0 nova_compute[356901]: 2025-10-11 02:30:03.444 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:30:03 compute-0 nova_compute[356901]: 2025-10-11 02:30:03.444 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:30:03 compute-0 nova_compute[356901]: 2025-10-11 02:30:03.445 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:30:03 compute-0 nova_compute[356901]: 2025-10-11 02:30:03.480 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:30:03 compute-0 nova_compute[356901]: 2025-10-11 02:30:03.481 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:30:03 compute-0 nova_compute[356901]: 2025-10-11 02:30:03.481 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:30:03 compute-0 nova_compute[356901]: 2025-10-11 02:30:03.481 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:30:03 compute-0 nova_compute[356901]: 2025-10-11 02:30:03.481 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:30:03 compute-0 sshd-session[438204]: Failed password for invalid user admin from 121.227.153.123 port 52522 ssh2
Oct 11 02:30:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:30:03 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/670916773' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:30:03 compute-0 nova_compute[356901]: 2025-10-11 02:30:03.936 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.454s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:30:03 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/670916773' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.101 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000004 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.102 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000004 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.102 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000004 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:30:04 compute-0 podman[438229]: 2025-10-11 02:30:04.102349839 +0000 UTC m=+0.092703360 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, architecture=x86_64, config_id=edpm, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, release=1214.1726694543, release-0.7.12=, build-date=2024-09-18T21:23:30, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.expose-services=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, name=ubi9, version=9.4, io.openshift.tags=base rhel9, com.redhat.component=ubi9-container, container_name=kepler, distribution-scope=public, io.buildah.version=1.29.0, io.k8s.display-name=Red Hat Universal Base Image 9, maintainer=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc.)
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.111 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.111 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.112 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.121 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.121 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.121 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:30:04 compute-0 sshd-session[438204]: Connection closed by invalid user admin 121.227.153.123 port 52522 [preauth]
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.216 2 DEBUG nova.virt.driver [-] Emitting event <LifecycleEvent: 1760149789.2152073, d60d7ea1-5d00-4902-90e6-3ae67eb09a78 => Stopped> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.217 2 INFO nova.compute.manager [-] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] VM Stopped (Lifecycle Event)
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.244 2 DEBUG nova.compute.manager [None req-3c966c6e-3bdd-4ffa-9346-789bf446edfa - - - - - -] [instance: d60d7ea1-5d00-4902-90e6-3ae67eb09a78] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.258 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1563: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 938 B/s wr, 26 op/s
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.565 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.566 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3389MB free_disk=59.88886642456055GB free_vcpus=5 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.566 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.566 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.634 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.659 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.660 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 7513b93e-d2b8-4ae0-8f1c-3df190945259 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.660 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 358d31cf-2866-416a-b2fc-814ee4bfe89a actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.660 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 3 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.660 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=2048MB phys_disk=59GB used_disk=6GB total_vcpus=8 used_vcpus=3 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:30:04 compute-0 nova_compute[356901]: 2025-10-11 02:30:04.717 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:30:05 compute-0 ceph-mon[191930]: pgmap v1563: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 938 B/s wr, 26 op/s
Oct 11 02:30:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:30:05 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/882353444' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:30:05 compute-0 nova_compute[356901]: 2025-10-11 02:30:05.202 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.485s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:30:05 compute-0 nova_compute[356901]: 2025-10-11 02:30:05.210 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:30:05 compute-0 nova_compute[356901]: 2025-10-11 02:30:05.228 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:30:05 compute-0 nova_compute[356901]: 2025-10-11 02:30:05.230 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:30:05 compute-0 nova_compute[356901]: 2025-10-11 02:30:05.230 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.664s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:30:05 compute-0 sshd-session[438248]: Invalid user admin from 121.227.153.123 port 52528
Oct 11 02:30:05 compute-0 sshd-session[438248]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:30:05 compute-0 sshd-session[438248]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:30:05 compute-0 nova_compute[356901]: 2025-10-11 02:30:05.681 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:30:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:30:05 compute-0 nova_compute[356901]: 2025-10-11 02:30:05.702 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:30:06 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/882353444' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1564: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 8.6 KiB/s rd, 596 B/s wr, 11 op/s
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0016572374365110374 of space, bias 1.0, pg target 0.4971712309533112 quantized to 32 (current 32)
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00025334537995702286 of space, bias 1.0, pg target 0.07600361398710685 quantized to 32 (current 32)
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:30:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:30:07 compute-0 ceph-mon[191930]: pgmap v1564: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 8.6 KiB/s rd, 596 B/s wr, 11 op/s
Oct 11 02:30:07 compute-0 sshd-session[438248]: Failed password for invalid user admin from 121.227.153.123 port 52528 ssh2
Oct 11 02:30:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1565: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:09 compute-0 sshd-session[438248]: Connection closed by invalid user admin 121.227.153.123 port 52528 [preauth]
Oct 11 02:30:09 compute-0 nova_compute[356901]: 2025-10-11 02:30:09.260 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:09 compute-0 ceph-mon[191930]: pgmap v1565: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:09 compute-0 nova_compute[356901]: 2025-10-11 02:30:09.635 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:10 compute-0 podman[438274]: 2025-10-11 02:30:10.18579251 +0000 UTC m=+0.086188267 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:30:10 compute-0 podman[438276]: 2025-10-11 02:30:10.22130585 +0000 UTC m=+0.114796532 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.license=GPLv2, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251007)
Oct 11 02:30:10 compute-0 podman[438277]: 2025-10-11 02:30:10.228074975 +0000 UTC m=+0.108358938 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.vendor=CentOS, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:30:10 compute-0 podman[438275]: 2025-10-11 02:30:10.248124828 +0000 UTC m=+0.135069896 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0)
Oct 11 02:30:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1566: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:10 compute-0 sshd-session[438272]: Invalid user admin from 121.227.153.123 port 47812
Oct 11 02:30:10 compute-0 sshd-session[438272]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:30:10 compute-0 sshd-session[438272]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:30:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:30:11 compute-0 ceph-mon[191930]: pgmap v1566: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1567: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:12 compute-0 sshd-session[438272]: Failed password for invalid user admin from 121.227.153.123 port 47812 ssh2
Oct 11 02:30:13 compute-0 ceph-mon[191930]: pgmap v1567: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:14 compute-0 sshd-session[438272]: Connection closed by invalid user admin 121.227.153.123 port 47812 [preauth]
Oct 11 02:30:14 compute-0 nova_compute[356901]: 2025-10-11 02:30:14.264 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1568: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:14 compute-0 ceph-mon[191930]: pgmap v1568: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:14 compute-0 nova_compute[356901]: 2025-10-11 02:30:14.637 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:15 compute-0 podman[438361]: 2025-10-11 02:30:15.234873452 +0000 UTC m=+0.106771942 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, config_id=iscsid, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, container_name=iscsid, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']})
Oct 11 02:30:15 compute-0 podman[438360]: 2025-10-11 02:30:15.242745081 +0000 UTC m=+0.132536717 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=multipathd, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd)
Oct 11 02:30:15 compute-0 sshd-session[438358]: Invalid user admin from 121.227.153.123 port 47826
Oct 11 02:30:15 compute-0 sshd-session[438358]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:30:15 compute-0 sshd-session[438358]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:30:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:30:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1569: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:17 compute-0 ceph-mon[191930]: pgmap v1569: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:17 compute-0 sshd-session[438358]: Failed password for invalid user admin from 121.227.153.123 port 47826 ssh2
Oct 11 02:30:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1570: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:19 compute-0 sshd-session[438358]: Connection closed by invalid user admin 121.227.153.123 port 47826 [preauth]
Oct 11 02:30:19 compute-0 nova_compute[356901]: 2025-10-11 02:30:19.266 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:19 compute-0 ceph-mon[191930]: pgmap v1570: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:19 compute-0 nova_compute[356901]: 2025-10-11 02:30:19.640 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1571: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:20 compute-0 sshd-session[438397]: Invalid user admin from 121.227.153.123 port 59678
Oct 11 02:30:20 compute-0 sshd-session[438397]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:30:20 compute-0 sshd-session[438397]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:30:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:30:21 compute-0 ceph-mon[191930]: pgmap v1571: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:22 compute-0 sshd-session[438397]: Failed password for invalid user admin from 121.227.153.123 port 59678 ssh2
Oct 11 02:30:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1572: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:22 compute-0 ceph-mon[191930]: pgmap v1572: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:24 compute-0 sshd-session[438397]: Connection closed by invalid user admin 121.227.153.123 port 59678 [preauth]
Oct 11 02:30:24 compute-0 nova_compute[356901]: 2025-10-11 02:30:24.269 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1573: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:24 compute-0 nova_compute[356901]: 2025-10-11 02:30:24.643 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:25 compute-0 ovn_controller[88370]: 2025-10-11T02:30:25Z|00053|memory_trim|INFO|Detected inactivity (last active 30004 ms ago): trimming memory
Oct 11 02:30:25 compute-0 ceph-mon[191930]: pgmap v1573: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:30:26 compute-0 podman[438402]: 2025-10-11 02:30:26.226532384 +0000 UTC m=+0.114958642 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, org.label-schema.schema-version=1.0, tcib_managed=true, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:30:26 compute-0 podman[438403]: 2025-10-11 02:30:26.254601187 +0000 UTC m=+0.123913105 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.expose-services=, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc., managed_by=edpm_ansible, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, config_id=edpm, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vendor=Red Hat, Inc., build-date=2025-08-20T13:12:41, container_name=openstack_network_exporter, distribution-scope=public, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, architecture=x86_64, io.buildah.version=1.33.7, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., version=9.6, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, release=1755695350, url=https://catalog.redhat.com/en/search?searchType=containers, vcs-type=git, com.redhat.component=ubi9-minimal-container)
Oct 11 02:30:26 compute-0 podman[438404]: 2025-10-11 02:30:26.273665027 +0000 UTC m=+0.137326854 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:30:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1574: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:26 compute-0 sudo[438466]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:30:26 compute-0 sudo[438466]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:30:26 compute-0 sudo[438466]: pam_unix(sudo:session): session closed for user root
Oct 11 02:30:26 compute-0 sudo[438491]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:30:26 compute-0 sudo[438491]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:30:26 compute-0 sudo[438491]: pam_unix(sudo:session): session closed for user root
Oct 11 02:30:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:30:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:30:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:30:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:30:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:30:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:30:26 compute-0 ceph-mon[191930]: pgmap v1574: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:26 compute-0 sshd-session[438400]: Invalid user admin from 121.227.153.123 port 59682
Oct 11 02:30:26 compute-0 sudo[438516]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:30:26 compute-0 sudo[438516]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:30:26 compute-0 sudo[438516]: pam_unix(sudo:session): session closed for user root
Oct 11 02:30:26 compute-0 sudo[438541]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:30:26 compute-0 sudo[438541]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:30:26 compute-0 sshd-session[438400]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:30:26 compute-0 sshd-session[438400]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:30:27 compute-0 sudo[438541]: pam_unix(sudo:session): session closed for user root
Oct 11 02:30:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:30:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:30:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:30:27 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:30:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:30:27 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:30:27 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev e0cd6350-6559-4bd0-8adc-6d63a8430f1a does not exist
Oct 11 02:30:27 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev bf6c08e6-bfae-4401-8977-17ea69ab86f1 does not exist
Oct 11 02:30:27 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev e6f770c2-4a0a-48b5-961a-375453a35e77 does not exist
Oct 11 02:30:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:30:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:30:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:30:27 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:30:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:30:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:30:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:30:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:30:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:30:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:30:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:30:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:30:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:30:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/877987374' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:30:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:30:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/877987374' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:30:27 compute-0 sudo[438598]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:30:27 compute-0 sudo[438598]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:30:27 compute-0 sudo[438598]: pam_unix(sudo:session): session closed for user root
Oct 11 02:30:27 compute-0 sudo[438623]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:30:27 compute-0 sudo[438623]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:30:27 compute-0 sudo[438623]: pam_unix(sudo:session): session closed for user root
Oct 11 02:30:27 compute-0 sudo[438648]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:30:27 compute-0 sudo[438648]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:30:27 compute-0 sudo[438648]: pam_unix(sudo:session): session closed for user root
Oct 11 02:30:28 compute-0 sudo[438673]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:30:28 compute-0 sudo[438673]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:30:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1575: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:28 compute-0 podman[438736]: 2025-10-11 02:30:28.590343705 +0000 UTC m=+0.132667977 container create 5ce8f0b323f9af371063bd638d55c3e30094f942902520bbe09221c693ff2955 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_chaum, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:30:28 compute-0 podman[438736]: 2025-10-11 02:30:28.50429783 +0000 UTC m=+0.046622192 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:30:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/877987374' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:30:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/877987374' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:30:28 compute-0 ceph-mon[191930]: pgmap v1575: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:28 compute-0 systemd[1]: Started libpod-conmon-5ce8f0b323f9af371063bd638d55c3e30094f942902520bbe09221c693ff2955.scope.
Oct 11 02:30:28 compute-0 sshd-session[438400]: Failed password for invalid user admin from 121.227.153.123 port 59682 ssh2
Oct 11 02:30:28 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:30:28 compute-0 podman[438736]: 2025-10-11 02:30:28.76346649 +0000 UTC m=+0.305790812 container init 5ce8f0b323f9af371063bd638d55c3e30094f942902520bbe09221c693ff2955 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_chaum, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507)
Oct 11 02:30:28 compute-0 podman[438736]: 2025-10-11 02:30:28.780910034 +0000 UTC m=+0.323234316 container start 5ce8f0b323f9af371063bd638d55c3e30094f942902520bbe09221c693ff2955 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_chaum, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True)
Oct 11 02:30:28 compute-0 nostalgic_chaum[438749]: 167 167
Oct 11 02:30:28 compute-0 systemd[1]: libpod-5ce8f0b323f9af371063bd638d55c3e30094f942902520bbe09221c693ff2955.scope: Deactivated successfully.
Oct 11 02:30:28 compute-0 podman[438736]: 2025-10-11 02:30:28.803305666 +0000 UTC m=+0.345630018 container attach 5ce8f0b323f9af371063bd638d55c3e30094f942902520bbe09221c693ff2955 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_chaum, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3)
Oct 11 02:30:28 compute-0 podman[438736]: 2025-10-11 02:30:28.804646121 +0000 UTC m=+0.346970403 container died 5ce8f0b323f9af371063bd638d55c3e30094f942902520bbe09221c693ff2955 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_chaum, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:30:28 compute-0 systemd[1]: var-lib-containers-storage-overlay-9c2dc2ed0fa3b5d0a0bf58b6daab4ed2bed7dd32fc3384b1d993c68e07f4d293-merged.mount: Deactivated successfully.
Oct 11 02:30:29 compute-0 podman[438736]: 2025-10-11 02:30:29.008546678 +0000 UTC m=+0.550870990 container remove 5ce8f0b323f9af371063bd638d55c3e30094f942902520bbe09221c693ff2955 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nostalgic_chaum, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:30:29 compute-0 systemd[1]: libpod-conmon-5ce8f0b323f9af371063bd638d55c3e30094f942902520bbe09221c693ff2955.scope: Deactivated successfully.
Oct 11 02:30:29 compute-0 podman[438776]: 2025-10-11 02:30:29.245022974 +0000 UTC m=+0.084123959 container create 344b99ca8f2f19f956d125b10da404cdb2b7a490315d03e410b0623f4fb22b2c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=peaceful_lewin, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:30:29 compute-0 nova_compute[356901]: 2025-10-11 02:30:29.272 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:29 compute-0 podman[438776]: 2025-10-11 02:30:29.216142919 +0000 UTC m=+0.055243964 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:30:29 compute-0 systemd[1]: Started libpod-conmon-344b99ca8f2f19f956d125b10da404cdb2b7a490315d03e410b0623f4fb22b2c.scope.
Oct 11 02:30:29 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:30:29 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/26b13a79409d17235bb6750a20a37683ea6572ccf71c472b4c48afee29c85bed/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:30:29 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/26b13a79409d17235bb6750a20a37683ea6572ccf71c472b4c48afee29c85bed/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:30:29 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/26b13a79409d17235bb6750a20a37683ea6572ccf71c472b4c48afee29c85bed/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:30:29 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/26b13a79409d17235bb6750a20a37683ea6572ccf71c472b4c48afee29c85bed/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:30:29 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/26b13a79409d17235bb6750a20a37683ea6572ccf71c472b4c48afee29c85bed/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:30:29 compute-0 podman[438776]: 2025-10-11 02:30:29.376788327 +0000 UTC m=+0.215889312 container init 344b99ca8f2f19f956d125b10da404cdb2b7a490315d03e410b0623f4fb22b2c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=peaceful_lewin, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_REF=reef)
Oct 11 02:30:29 compute-0 podman[438776]: 2025-10-11 02:30:29.39929592 +0000 UTC m=+0.238396905 container start 344b99ca8f2f19f956d125b10da404cdb2b7a490315d03e410b0623f4fb22b2c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=peaceful_lewin, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:30:29 compute-0 podman[438776]: 2025-10-11 02:30:29.404029137 +0000 UTC m=+0.243130132 container attach 344b99ca8f2f19f956d125b10da404cdb2b7a490315d03e410b0623f4fb22b2c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=peaceful_lewin, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:30:29 compute-0 nova_compute[356901]: 2025-10-11 02:30:29.646 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:29 compute-0 podman[157119]: time="2025-10-11T02:30:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:30:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:30:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47973 "" "Go-http-client/1.1"
Oct 11 02:30:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:30:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9478 "" "Go-http-client/1.1"
Oct 11 02:30:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1576: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:30 compute-0 sshd-session[438400]: Connection closed by invalid user admin 121.227.153.123 port 59682 [preauth]
Oct 11 02:30:30 compute-0 peaceful_lewin[438792]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:30:30 compute-0 peaceful_lewin[438792]: --> relative data size: 1.0
Oct 11 02:30:30 compute-0 peaceful_lewin[438792]: --> All data devices are unavailable
Oct 11 02:30:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:30:30 compute-0 systemd[1]: libpod-344b99ca8f2f19f956d125b10da404cdb2b7a490315d03e410b0623f4fb22b2c.scope: Deactivated successfully.
Oct 11 02:30:30 compute-0 podman[438776]: 2025-10-11 02:30:30.718611973 +0000 UTC m=+1.557712998 container died 344b99ca8f2f19f956d125b10da404cdb2b7a490315d03e410b0623f4fb22b2c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=peaceful_lewin, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:30:30 compute-0 systemd[1]: libpod-344b99ca8f2f19f956d125b10da404cdb2b7a490315d03e410b0623f4fb22b2c.scope: Consumed 1.218s CPU time.
Oct 11 02:30:30 compute-0 systemd[1]: var-lib-containers-storage-overlay-26b13a79409d17235bb6750a20a37683ea6572ccf71c472b4c48afee29c85bed-merged.mount: Deactivated successfully.
Oct 11 02:30:30 compute-0 podman[438776]: 2025-10-11 02:30:30.811211093 +0000 UTC m=+1.650312088 container remove 344b99ca8f2f19f956d125b10da404cdb2b7a490315d03e410b0623f4fb22b2c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=peaceful_lewin, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:30:30 compute-0 systemd[1]: libpod-conmon-344b99ca8f2f19f956d125b10da404cdb2b7a490315d03e410b0623f4fb22b2c.scope: Deactivated successfully.
Oct 11 02:30:30 compute-0 sudo[438673]: pam_unix(sudo:session): session closed for user root
Oct 11 02:30:30 compute-0 sudo[438833]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:30:30 compute-0 sudo[438833]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:30:30 compute-0 sudo[438833]: pam_unix(sudo:session): session closed for user root
Oct 11 02:30:31 compute-0 sudo[438858]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:30:31 compute-0 sudo[438858]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:30:31 compute-0 sudo[438858]: pam_unix(sudo:session): session closed for user root
Oct 11 02:30:31 compute-0 sudo[438883]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:30:31 compute-0 sudo[438883]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:30:31 compute-0 sudo[438883]: pam_unix(sudo:session): session closed for user root
Oct 11 02:30:31 compute-0 sudo[438908]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:30:31 compute-0 sudo[438908]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:30:31 compute-0 openstack_network_exporter[374316]: ERROR   02:30:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:30:31 compute-0 openstack_network_exporter[374316]: ERROR   02:30:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:30:31 compute-0 openstack_network_exporter[374316]: ERROR   02:30:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:30:31 compute-0 openstack_network_exporter[374316]: ERROR   02:30:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:30:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:30:31 compute-0 openstack_network_exporter[374316]: ERROR   02:30:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:30:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:30:31 compute-0 ceph-mon[191930]: pgmap v1576: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:31 compute-0 sshd-session[438819]: Invalid user admin from 121.227.153.123 port 47230
Oct 11 02:30:31 compute-0 podman[438971]: 2025-10-11 02:30:31.774572123 +0000 UTC m=+0.088950357 container create 4f1ab5883fa143d345032b6cf0aaa8d956fb81a2893b2d34382b9a5fc1d35806 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_northcutt, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:30:31 compute-0 sshd-session[438819]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:30:31 compute-0 sshd-session[438819]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:30:31 compute-0 podman[438971]: 2025-10-11 02:30:31.729365187 +0000 UTC m=+0.043743381 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:30:31 compute-0 systemd[1]: Started libpod-conmon-4f1ab5883fa143d345032b6cf0aaa8d956fb81a2893b2d34382b9a5fc1d35806.scope.
Oct 11 02:30:31 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:30:31 compute-0 podman[438971]: 2025-10-11 02:30:31.968522773 +0000 UTC m=+0.282900937 container init 4f1ab5883fa143d345032b6cf0aaa8d956fb81a2893b2d34382b9a5fc1d35806 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_northcutt, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:30:31 compute-0 podman[438971]: 2025-10-11 02:30:31.983613929 +0000 UTC m=+0.297992073 container start 4f1ab5883fa143d345032b6cf0aaa8d956fb81a2893b2d34382b9a5fc1d35806 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_northcutt, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.39.3)
Oct 11 02:30:31 compute-0 recursing_northcutt[438987]: 167 167
Oct 11 02:30:31 compute-0 systemd[1]: libpod-4f1ab5883fa143d345032b6cf0aaa8d956fb81a2893b2d34382b9a5fc1d35806.scope: Deactivated successfully.
Oct 11 02:30:32 compute-0 podman[438971]: 2025-10-11 02:30:32.01129184 +0000 UTC m=+0.325669984 container attach 4f1ab5883fa143d345032b6cf0aaa8d956fb81a2893b2d34382b9a5fc1d35806 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_northcutt, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef)
Oct 11 02:30:32 compute-0 podman[438971]: 2025-10-11 02:30:32.011843812 +0000 UTC m=+0.326221956 container died 4f1ab5883fa143d345032b6cf0aaa8d956fb81a2893b2d34382b9a5fc1d35806 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_northcutt, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=reef)
Oct 11 02:30:32 compute-0 systemd[1]: var-lib-containers-storage-overlay-ecbb61d55ec3fdec5b90fe6e237531210cdb9a4653ad7bacb68d494c930c1643-merged.mount: Deactivated successfully.
Oct 11 02:30:32 compute-0 podman[438971]: 2025-10-11 02:30:32.261138796 +0000 UTC m=+0.575516980 container remove 4f1ab5883fa143d345032b6cf0aaa8d956fb81a2893b2d34382b9a5fc1d35806 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_northcutt, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507)
Oct 11 02:30:32 compute-0 systemd[1]: libpod-conmon-4f1ab5883fa143d345032b6cf0aaa8d956fb81a2893b2d34382b9a5fc1d35806.scope: Deactivated successfully.
Oct 11 02:30:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1577: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:32 compute-0 podman[439009]: 2025-10-11 02:30:32.527609452 +0000 UTC m=+0.092203019 container create 2131661d55c12b7a5b4691ef7caeceb58bde0f7262fb6ebe5c2e34587dcaa59d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_blackburn, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS)
Oct 11 02:30:32 compute-0 podman[439009]: 2025-10-11 02:30:32.477528659 +0000 UTC m=+0.042122296 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:30:32 compute-0 systemd[1]: Started libpod-conmon-2131661d55c12b7a5b4691ef7caeceb58bde0f7262fb6ebe5c2e34587dcaa59d.scope.
Oct 11 02:30:32 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:30:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bebf9b876c8ff59940e226125e8df60c7aad17381136d3d0229cd53c3a5ac665/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:30:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bebf9b876c8ff59940e226125e8df60c7aad17381136d3d0229cd53c3a5ac665/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:30:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bebf9b876c8ff59940e226125e8df60c7aad17381136d3d0229cd53c3a5ac665/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:30:32 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bebf9b876c8ff59940e226125e8df60c7aad17381136d3d0229cd53c3a5ac665/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:30:32 compute-0 podman[439009]: 2025-10-11 02:30:32.826796218 +0000 UTC m=+0.391389795 container init 2131661d55c12b7a5b4691ef7caeceb58bde0f7262fb6ebe5c2e34587dcaa59d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_blackburn, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507)
Oct 11 02:30:32 compute-0 podman[439009]: 2025-10-11 02:30:32.852819804 +0000 UTC m=+0.417413351 container start 2131661d55c12b7a5b4691ef7caeceb58bde0f7262fb6ebe5c2e34587dcaa59d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_blackburn, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:30:32 compute-0 podman[439009]: 2025-10-11 02:30:32.876070539 +0000 UTC m=+0.440664086 container attach 2131661d55c12b7a5b4691ef7caeceb58bde0f7262fb6ebe5c2e34587dcaa59d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_blackburn, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.license=GPLv2)
Oct 11 02:30:33 compute-0 ceph-mon[191930]: pgmap v1577: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]: {
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:     "0": [
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:         {
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "devices": [
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "/dev/loop3"
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             ],
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "lv_name": "ceph_lv0",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "lv_size": "21470642176",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "name": "ceph_lv0",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "tags": {
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.cluster_name": "ceph",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.crush_device_class": "",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.encrypted": "0",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.osd_id": "0",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.type": "block",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.vdo": "0"
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             },
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "type": "block",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "vg_name": "ceph_vg0"
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:         }
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:     ],
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:     "1": [
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:         {
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "devices": [
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "/dev/loop4"
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             ],
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "lv_name": "ceph_lv1",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "lv_size": "21470642176",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "name": "ceph_lv1",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "tags": {
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.cluster_name": "ceph",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.crush_device_class": "",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.encrypted": "0",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.osd_id": "1",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.type": "block",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.vdo": "0"
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             },
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "type": "block",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "vg_name": "ceph_vg1"
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:         }
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:     ],
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:     "2": [
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:         {
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "devices": [
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "/dev/loop5"
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             ],
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "lv_name": "ceph_lv2",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "lv_size": "21470642176",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "name": "ceph_lv2",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "tags": {
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.cluster_name": "ceph",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.crush_device_class": "",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.encrypted": "0",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.osd_id": "2",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.type": "block",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:                 "ceph.vdo": "0"
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             },
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "type": "block",
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:             "vg_name": "ceph_vg2"
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:         }
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]:     ]
Oct 11 02:30:33 compute-0 admiring_blackburn[439025]: }
Oct 11 02:30:33 compute-0 sshd-session[438819]: Failed password for invalid user admin from 121.227.153.123 port 47230 ssh2
Oct 11 02:30:33 compute-0 systemd[1]: libpod-2131661d55c12b7a5b4691ef7caeceb58bde0f7262fb6ebe5c2e34587dcaa59d.scope: Deactivated successfully.
Oct 11 02:30:33 compute-0 podman[439034]: 2025-10-11 02:30:33.734491855 +0000 UTC m=+0.036517895 container died 2131661d55c12b7a5b4691ef7caeceb58bde0f7262fb6ebe5c2e34587dcaa59d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_blackburn, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, OSD_FLAVOR=default, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:30:33 compute-0 systemd[1]: var-lib-containers-storage-overlay-bebf9b876c8ff59940e226125e8df60c7aad17381136d3d0229cd53c3a5ac665-merged.mount: Deactivated successfully.
Oct 11 02:30:33 compute-0 podman[439034]: 2025-10-11 02:30:33.948169967 +0000 UTC m=+0.250195987 container remove 2131661d55c12b7a5b4691ef7caeceb58bde0f7262fb6ebe5c2e34587dcaa59d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=admiring_blackburn, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2)
Oct 11 02:30:33 compute-0 systemd[1]: libpod-conmon-2131661d55c12b7a5b4691ef7caeceb58bde0f7262fb6ebe5c2e34587dcaa59d.scope: Deactivated successfully.
Oct 11 02:30:33 compute-0 sudo[438908]: pam_unix(sudo:session): session closed for user root
Oct 11 02:30:34 compute-0 sudo[439049]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:30:34 compute-0 sudo[439049]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:30:34 compute-0 sudo[439049]: pam_unix(sudo:session): session closed for user root
Oct 11 02:30:34 compute-0 sudo[439074]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:30:34 compute-0 sudo[439074]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:30:34 compute-0 sudo[439074]: pam_unix(sudo:session): session closed for user root
Oct 11 02:30:34 compute-0 nova_compute[356901]: 2025-10-11 02:30:34.276 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:34 compute-0 sudo[439100]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:30:34 compute-0 sudo[439100]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:30:34 compute-0 sudo[439100]: pam_unix(sudo:session): session closed for user root
Oct 11 02:30:34 compute-0 podman[439098]: 2025-10-11 02:30:34.34212155 +0000 UTC m=+0.087765832 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vendor=Red Hat, Inc., container_name=kepler, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, com.redhat.component=ubi9-container, build-date=2024-09-18T21:23:30, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_id=edpm, maintainer=Red Hat, Inc., release-0.7.12=, version=9.4, managed_by=edpm_ansible, architecture=x86_64, io.buildah.version=1.29.0, io.openshift.tags=base rhel9, io.k8s.display-name=Red Hat Universal Base Image 9, distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, release=1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., io.openshift.expose-services=, vcs-type=git)
Oct 11 02:30:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1578: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:34 compute-0 sudo[439143]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:30:34 compute-0 sudo[439143]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:30:34 compute-0 nova_compute[356901]: 2025-10-11 02:30:34.649 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:34 compute-0 podman[439208]: 2025-10-11 02:30:34.828419432 +0000 UTC m=+0.068060800 container create 1622cd61f20430724d4893ebf72cc61bdec355165110a2a4faac1246e3cf140f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_mestorf, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20250507)
Oct 11 02:30:34 compute-0 podman[439208]: 2025-10-11 02:30:34.797003077 +0000 UTC m=+0.036644485 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:30:34 compute-0 systemd[1]: Started libpod-conmon-1622cd61f20430724d4893ebf72cc61bdec355165110a2a4faac1246e3cf140f.scope.
Oct 11 02:30:34 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:30:35 compute-0 podman[439208]: 2025-10-11 02:30:35.018948911 +0000 UTC m=+0.258590319 container init 1622cd61f20430724d4893ebf72cc61bdec355165110a2a4faac1246e3cf140f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_mestorf, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, ceph=True)
Oct 11 02:30:35 compute-0 podman[439208]: 2025-10-11 02:30:35.034438627 +0000 UTC m=+0.274080015 container start 1622cd61f20430724d4893ebf72cc61bdec355165110a2a4faac1246e3cf140f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_mestorf, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:30:35 compute-0 elastic_mestorf[439224]: 167 167
Oct 11 02:30:35 compute-0 systemd[1]: libpod-1622cd61f20430724d4893ebf72cc61bdec355165110a2a4faac1246e3cf140f.scope: Deactivated successfully.
Oct 11 02:30:35 compute-0 podman[439208]: 2025-10-11 02:30:35.051149969 +0000 UTC m=+0.290791347 container attach 1622cd61f20430724d4893ebf72cc61bdec355165110a2a4faac1246e3cf140f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_mestorf, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:30:35 compute-0 podman[439208]: 2025-10-11 02:30:35.052555914 +0000 UTC m=+0.292197342 container died 1622cd61f20430724d4893ebf72cc61bdec355165110a2a4faac1246e3cf140f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_mestorf, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.build-date=20250507)
Oct 11 02:30:35 compute-0 systemd[1]: var-lib-containers-storage-overlay-d3c35d77ffb56b940d50469474ad0920291351725cc01f2bc6035f55ee264b12-merged.mount: Deactivated successfully.
Oct 11 02:30:35 compute-0 podman[439208]: 2025-10-11 02:30:35.257782686 +0000 UTC m=+0.497424064 container remove 1622cd61f20430724d4893ebf72cc61bdec355165110a2a4faac1246e3cf140f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_mestorf, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507)
Oct 11 02:30:35 compute-0 systemd[1]: libpod-conmon-1622cd61f20430724d4893ebf72cc61bdec355165110a2a4faac1246e3cf140f.scope: Deactivated successfully.
Oct 11 02:30:35 compute-0 sshd-session[438819]: Connection closed by invalid user admin 121.227.153.123 port 47230 [preauth]
Oct 11 02:30:35 compute-0 podman[439249]: 2025-10-11 02:30:35.585883998 +0000 UTC m=+0.132152905 container create e3710d12c1e9fd66a3db6915da93c036713f5f54146c8e9d55e418bc35bd97cb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_nightingale, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 02:30:35 compute-0 podman[439249]: 2025-10-11 02:30:35.496091329 +0000 UTC m=+0.042360256 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:30:35 compute-0 ceph-mon[191930]: pgmap v1578: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:35 compute-0 systemd[1]: Started libpod-conmon-e3710d12c1e9fd66a3db6915da93c036713f5f54146c8e9d55e418bc35bd97cb.scope.
Oct 11 02:30:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:30:35 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:30:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0bc0a33aa96dd1ffc5c7f224a38c73be164bb9bd90735e13da7e6146ba4367e9/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:30:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0bc0a33aa96dd1ffc5c7f224a38c73be164bb9bd90735e13da7e6146ba4367e9/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:30:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0bc0a33aa96dd1ffc5c7f224a38c73be164bb9bd90735e13da7e6146ba4367e9/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:30:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0bc0a33aa96dd1ffc5c7f224a38c73be164bb9bd90735e13da7e6146ba4367e9/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:30:35 compute-0 podman[439249]: 2025-10-11 02:30:35.829901512 +0000 UTC m=+0.376170509 container init e3710d12c1e9fd66a3db6915da93c036713f5f54146c8e9d55e418bc35bd97cb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_nightingale, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2)
Oct 11 02:30:35 compute-0 podman[439249]: 2025-10-11 02:30:35.84012431 +0000 UTC m=+0.386393237 container start e3710d12c1e9fd66a3db6915da93c036713f5f54146c8e9d55e418bc35bd97cb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_nightingale, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef)
Oct 11 02:30:35 compute-0 podman[439249]: 2025-10-11 02:30:35.880344877 +0000 UTC m=+0.426613814 container attach e3710d12c1e9fd66a3db6915da93c036713f5f54146c8e9d55e418bc35bd97cb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_nightingale, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 02:30:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1579: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:36 compute-0 sshd-session[439262]: Invalid user admin from 121.227.153.123 port 47236
Oct 11 02:30:36 compute-0 sshd-session[439262]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:30:36 compute-0 sshd-session[439262]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:30:36 compute-0 ceph-mon[191930]: pgmap v1579: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]: {
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:         "osd_id": 1,
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:         "type": "bluestore"
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:     },
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:         "osd_id": 2,
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:         "type": "bluestore"
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:     },
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:         "osd_id": 0,
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:         "type": "bluestore"
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]:     }
Oct 11 02:30:36 compute-0 elastic_nightingale[439267]: }
Oct 11 02:30:36 compute-0 systemd[1]: libpod-e3710d12c1e9fd66a3db6915da93c036713f5f54146c8e9d55e418bc35bd97cb.scope: Deactivated successfully.
Oct 11 02:30:36 compute-0 podman[439249]: 2025-10-11 02:30:36.976288353 +0000 UTC m=+1.522557260 container died e3710d12c1e9fd66a3db6915da93c036713f5f54146c8e9d55e418bc35bd97cb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_nightingale, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, ceph=True, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:30:36 compute-0 systemd[1]: libpod-e3710d12c1e9fd66a3db6915da93c036713f5f54146c8e9d55e418bc35bd97cb.scope: Consumed 1.130s CPU time.
Oct 11 02:30:37 compute-0 systemd[1]: var-lib-containers-storage-overlay-0bc0a33aa96dd1ffc5c7f224a38c73be164bb9bd90735e13da7e6146ba4367e9-merged.mount: Deactivated successfully.
Oct 11 02:30:37 compute-0 podman[439249]: 2025-10-11 02:30:37.926919636 +0000 UTC m=+2.473188583 container remove e3710d12c1e9fd66a3db6915da93c036713f5f54146c8e9d55e418bc35bd97cb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_nightingale, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:30:37 compute-0 systemd[1]: libpod-conmon-e3710d12c1e9fd66a3db6915da93c036713f5f54146c8e9d55e418bc35bd97cb.scope: Deactivated successfully.
Oct 11 02:30:37 compute-0 sudo[439143]: pam_unix(sudo:session): session closed for user root
Oct 11 02:30:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:30:38 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:30:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:30:38 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:30:38 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev ec588792-98f6-4e3a-9712-f244d5f5abe4 does not exist
Oct 11 02:30:38 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 3193d31d-56f8-45bb-9e13-27fede68a5b0 does not exist
Oct 11 02:30:38 compute-0 sudo[439311]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:30:38 compute-0 sudo[439311]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:30:38 compute-0 sudo[439311]: pam_unix(sudo:session): session closed for user root
Oct 11 02:30:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1580: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:38 compute-0 sudo[439336]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:30:38 compute-0 sudo[439336]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:30:38 compute-0 sudo[439336]: pam_unix(sudo:session): session closed for user root
Oct 11 02:30:38 compute-0 sshd-session[439262]: Failed password for invalid user admin from 121.227.153.123 port 47236 ssh2
Oct 11 02:30:39 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:30:39 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:30:39 compute-0 ceph-mon[191930]: pgmap v1580: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:39 compute-0 nova_compute[356901]: 2025-10-11 02:30:39.280 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:39 compute-0 nova_compute[356901]: 2025-10-11 02:30:39.652 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:40 compute-0 sshd-session[439262]: Connection closed by invalid user admin 121.227.153.123 port 47236 [preauth]
Oct 11 02:30:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1581: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:30:41 compute-0 podman[439366]: 2025-10-11 02:30:41.202640587 +0000 UTC m=+0.087628682 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, managed_by=edpm_ansible, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:30:41 compute-0 podman[439363]: 2025-10-11 02:30:41.205117676 +0000 UTC m=+0.095517181 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:30:41 compute-0 podman[439365]: 2025-10-11 02:30:41.210732096 +0000 UTC m=+0.102052475 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0, tcib_managed=true)
Oct 11 02:30:41 compute-0 podman[439364]: 2025-10-11 02:30:41.253376423 +0000 UTC m=+0.145543875 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible)
Oct 11 02:30:41 compute-0 sshd-session[439361]: Invalid user admin from 121.227.153.123 port 44458
Oct 11 02:30:41 compute-0 ceph-mon[191930]: pgmap v1581: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:41 compute-0 sshd-session[439361]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:30:41 compute-0 sshd-session[439361]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:30:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1582: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:42 compute-0 ceph-mon[191930]: pgmap v1582: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:44 compute-0 nova_compute[356901]: 2025-10-11 02:30:44.285 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:44 compute-0 sshd-session[439361]: Failed password for invalid user admin from 121.227.153.123 port 44458 ssh2
Oct 11 02:30:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1583: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:44 compute-0 nova_compute[356901]: 2025-10-11 02:30:44.657 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:45 compute-0 sshd-session[439361]: Connection closed by invalid user admin 121.227.153.123 port 44458 [preauth]
Oct 11 02:30:45 compute-0 ceph-mon[191930]: pgmap v1583: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:30:46 compute-0 podman[439446]: 2025-10-11 02:30:46.232448086 +0000 UTC m=+0.118340145 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, container_name=iscsid, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, config_id=iscsid, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 02:30:46 compute-0 podman[439445]: 2025-10-11 02:30:46.259184854 +0000 UTC m=+0.140577166 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, config_id=multipathd, container_name=multipathd, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']})
Oct 11 02:30:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1584: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:46 compute-0 sshd-session[439443]: Invalid user admin from 121.227.153.123 port 44466
Oct 11 02:30:46 compute-0 ceph-mon[191930]: pgmap v1584: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:46 compute-0 sshd-session[439443]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:30:46 compute-0 sshd-session[439443]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:30:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1585: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:49 compute-0 sshd-session[439443]: Failed password for invalid user admin from 121.227.153.123 port 44466 ssh2
Oct 11 02:30:49 compute-0 nova_compute[356901]: 2025-10-11 02:30:49.289 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:49 compute-0 ceph-mon[191930]: pgmap v1585: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:49 compute-0 nova_compute[356901]: 2025-10-11 02:30:49.660 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:49 compute-0 nova_compute[356901]: 2025-10-11 02:30:49.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:30:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1586: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:50 compute-0 sshd-session[439443]: Connection closed by invalid user admin 121.227.153.123 port 44466 [preauth]
Oct 11 02:30:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:30:51 compute-0 ceph-mon[191930]: pgmap v1586: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:51 compute-0 sshd-session[439486]: Invalid user admin from 121.227.153.123 port 36570
Oct 11 02:30:52 compute-0 sshd-session[439486]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:30:52 compute-0 sshd-session[439486]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:30:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1587: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:53 compute-0 ceph-mon[191930]: pgmap v1587: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:54 compute-0 nova_compute[356901]: 2025-10-11 02:30:54.293 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1588: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:54 compute-0 sshd-session[439486]: Failed password for invalid user admin from 121.227.153.123 port 36570 ssh2
Oct 11 02:30:54 compute-0 nova_compute[356901]: 2025-10-11 02:30:54.666 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:54 compute-0 ceph-mon[191930]: pgmap v1588: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:30:54.858 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:30:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:30:54.859 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:30:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:30:54.860 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:30:55 compute-0 sshd-session[439486]: Connection closed by invalid user admin 121.227.153.123 port 36570 [preauth]
Oct 11 02:30:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:30:55 compute-0 nova_compute[356901]: 2025-10-11 02:30:55.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:30:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1589: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:30:56
Oct 11 02:30:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:30:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:30:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['images', 'cephfs.cephfs.meta', 'vms', 'default.rgw.meta', 'backups', 'volumes', 'default.rgw.log', '.mgr', '.rgw.root', 'cephfs.cephfs.data', 'default.rgw.control']
Oct 11 02:30:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:30:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:30:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:30:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:30:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:30:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:30:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:30:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:30:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:30:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:30:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:30:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:30:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:30:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:30:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:30:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:30:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:30:57 compute-0 podman[439492]: 2025-10-11 02:30:57.207108465 +0000 UTC m=+0.092921606 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:30:57 compute-0 podman[439491]: 2025-10-11 02:30:57.217937177 +0000 UTC m=+0.103989844 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, com.redhat.component=ubi9-minimal-container, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, container_name=openstack_network_exporter, vcs-type=git, io.openshift.expose-services=, version=9.6, release=1755695350, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., name=ubi9-minimal, distribution-scope=public, build-date=2025-08-20T13:12:41, managed_by=edpm_ansible, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.33.7, url=https://catalog.redhat.com/en/search?searchType=containers, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.openshift.tags=minimal rhel9, vendor=Red Hat, Inc.)
Oct 11 02:30:57 compute-0 podman[439490]: 2025-10-11 02:30:57.231824012 +0000 UTC m=+0.125854294 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_id=edpm, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 02:30:57 compute-0 ceph-mon[191930]: pgmap v1589: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:57 compute-0 sshd-session[439488]: Invalid user admin from 121.227.153.123 port 36578
Oct 11 02:30:57 compute-0 nova_compute[356901]: 2025-10-11 02:30:57.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:30:57 compute-0 nova_compute[356901]: 2025-10-11 02:30:57.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_incomplete_migrations run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:30:57 compute-0 nova_compute[356901]: 2025-10-11 02:30:57.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances with incomplete migration  _cleanup_incomplete_migrations /usr/lib/python3.9/site-packages/nova/compute/manager.py:11183
Oct 11 02:30:58 compute-0 sshd-session[439488]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:30:58 compute-0 sshd-session[439488]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:30:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1590: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:58 compute-0 ceph-mon[191930]: pgmap v1590: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:30:58 compute-0 nova_compute[356901]: 2025-10-11 02:30:58.910 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:30:58 compute-0 nova_compute[356901]: 2025-10-11 02:30:58.911 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:30:59 compute-0 nova_compute[356901]: 2025-10-11 02:30:59.296 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:59 compute-0 nova_compute[356901]: 2025-10-11 02:30:59.668 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:30:59 compute-0 podman[157119]: time="2025-10-11T02:30:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:30:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:30:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:30:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:30:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9063 "" "Go-http-client/1.1"
Oct 11 02:30:59 compute-0 sshd-session[439488]: Failed password for invalid user admin from 121.227.153.123 port 36578 ssh2
Oct 11 02:31:00 compute-0 nova_compute[356901]: 2025-10-11 02:31:00.014 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-358d31cf-2866-416a-b2fc-814ee4bfe89a" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:31:00 compute-0 nova_compute[356901]: 2025-10-11 02:31:00.015 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-358d31cf-2866-416a-b2fc-814ee4bfe89a" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:31:00 compute-0 nova_compute[356901]: 2025-10-11 02:31:00.015 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:31:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1591: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:31:01 compute-0 openstack_network_exporter[374316]: ERROR   02:31:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:31:01 compute-0 openstack_network_exporter[374316]: ERROR   02:31:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:31:01 compute-0 openstack_network_exporter[374316]: ERROR   02:31:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:31:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:31:01 compute-0 openstack_network_exporter[374316]: ERROR   02:31:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:31:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:31:01 compute-0 openstack_network_exporter[374316]: ERROR   02:31:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:31:01 compute-0 sshd-session[439488]: Connection closed by invalid user admin 121.227.153.123 port 36578 [preauth]
Oct 11 02:31:01 compute-0 ceph-mon[191930]: pgmap v1591: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1592: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:02 compute-0 nova_compute[356901]: 2025-10-11 02:31:02.871 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Updating instance_info_cache with network_info: [{"id": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "address": "fa:16:3e:b0:ca:41", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.152", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.173", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap7f4342b0-8a", "ovs_interfaceid": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:31:02 compute-0 nova_compute[356901]: 2025-10-11 02:31:02.903 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-358d31cf-2866-416a-b2fc-814ee4bfe89a" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:31:02 compute-0 nova_compute[356901]: 2025-10-11 02:31:02.904 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:31:02 compute-0 nova_compute[356901]: 2025-10-11 02:31:02.906 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:31:02 compute-0 nova_compute[356901]: 2025-10-11 02:31:02.907 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:31:02 compute-0 nova_compute[356901]: 2025-10-11 02:31:02.908 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:31:02 compute-0 nova_compute[356901]: 2025-10-11 02:31:02.908 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:31:02 compute-0 nova_compute[356901]: 2025-10-11 02:31:02.909 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:31:02 compute-0 nova_compute[356901]: 2025-10-11 02:31:02.958 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:31:02 compute-0 nova_compute[356901]: 2025-10-11 02:31:02.958 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:31:02 compute-0 nova_compute[356901]: 2025-10-11 02:31:02.959 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:31:02 compute-0 nova_compute[356901]: 2025-10-11 02:31:02.959 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:31:02 compute-0 nova_compute[356901]: 2025-10-11 02:31:02.960 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:31:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:31:03 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2119217519' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:31:03 compute-0 ceph-mon[191930]: pgmap v1592: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:03 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2119217519' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:31:03 compute-0 nova_compute[356901]: 2025-10-11 02:31:03.544 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.584s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:31:03 compute-0 nova_compute[356901]: 2025-10-11 02:31:03.643 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000004 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:31:03 compute-0 nova_compute[356901]: 2025-10-11 02:31:03.644 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000004 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:31:03 compute-0 nova_compute[356901]: 2025-10-11 02:31:03.644 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000004 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:31:03 compute-0 nova_compute[356901]: 2025-10-11 02:31:03.648 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:31:03 compute-0 nova_compute[356901]: 2025-10-11 02:31:03.649 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:31:03 compute-0 nova_compute[356901]: 2025-10-11 02:31:03.649 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000003 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:31:03 compute-0 nova_compute[356901]: 2025-10-11 02:31:03.653 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:31:03 compute-0 nova_compute[356901]: 2025-10-11 02:31:03.654 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:31:03 compute-0 nova_compute[356901]: 2025-10-11 02:31:03.654 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:31:04 compute-0 nova_compute[356901]: 2025-10-11 02:31:04.077 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:31:04 compute-0 nova_compute[356901]: 2025-10-11 02:31:04.078 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3417MB free_disk=59.88886642456055GB free_vcpus=5 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:31:04 compute-0 nova_compute[356901]: 2025-10-11 02:31:04.078 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:31:04 compute-0 nova_compute[356901]: 2025-10-11 02:31:04.079 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:31:04 compute-0 nova_compute[356901]: 2025-10-11 02:31:04.286 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:31:04 compute-0 nova_compute[356901]: 2025-10-11 02:31:04.288 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 7513b93e-d2b8-4ae0-8f1c-3df190945259 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:31:04 compute-0 nova_compute[356901]: 2025-10-11 02:31:04.290 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 358d31cf-2866-416a-b2fc-814ee4bfe89a actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:31:04 compute-0 nova_compute[356901]: 2025-10-11 02:31:04.290 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 3 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:31:04 compute-0 nova_compute[356901]: 2025-10-11 02:31:04.291 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=2048MB phys_disk=59GB used_disk=6GB total_vcpus=8 used_vcpus=3 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:31:04 compute-0 nova_compute[356901]: 2025-10-11 02:31:04.298 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1593: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:04 compute-0 sshd-session[439553]: Invalid user admin from 121.227.153.123 port 51806
Oct 11 02:31:04 compute-0 nova_compute[356901]: 2025-10-11 02:31:04.497 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:31:04 compute-0 podman[439577]: 2025-10-11 02:31:04.611820862 +0000 UTC m=+0.123214973 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, container_name=kepler, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, name=ubi9, release=1214.1726694543, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, distribution-scope=public, io.buildah.version=1.29.0, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.4, vcs-type=git, maintainer=Red Hat, Inc., summary=Provides the latest release of Red Hat Universal Base Image 9., release-0.7.12=, io.openshift.expose-services=, com.redhat.component=ubi9-container, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2024-09-18T21:23:30, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, config_id=edpm, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.tags=base rhel9, vendor=Red Hat, Inc.)
Oct 11 02:31:04 compute-0 nova_compute[356901]: 2025-10-11 02:31:04.671 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:04 compute-0 sshd-session[439553]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:31:04 compute-0 sshd-session[439553]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:31:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:31:05 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/980996505' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:31:05 compute-0 nova_compute[356901]: 2025-10-11 02:31:05.050 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.553s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:31:05 compute-0 nova_compute[356901]: 2025-10-11 02:31:05.061 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:31:05 compute-0 nova_compute[356901]: 2025-10-11 02:31:05.086 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:31:05 compute-0 nova_compute[356901]: 2025-10-11 02:31:05.089 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:31:05 compute-0 nova_compute[356901]: 2025-10-11 02:31:05.089 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 1.010s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:31:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:31:05 compute-0 ceph-mon[191930]: pgmap v1593: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:05 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/980996505' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:31:06 compute-0 nova_compute[356901]: 2025-10-11 02:31:06.079 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1594: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:06 compute-0 ceph-mon[191930]: pgmap v1594: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0016572374365110374 of space, bias 1.0, pg target 0.4971712309533112 quantized to 32 (current 32)
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00025334537995702286 of space, bias 1.0, pg target 0.07600361398710685 quantized to 32 (current 32)
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:31:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:31:07 compute-0 sshd-session[439553]: Failed password for invalid user admin from 121.227.153.123 port 51806 ssh2
Oct 11 02:31:08 compute-0 sshd-session[439553]: Connection closed by invalid user admin 121.227.153.123 port 51806 [preauth]
Oct 11 02:31:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1595: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:09 compute-0 nova_compute[356901]: 2025-10-11 02:31:09.301 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:09 compute-0 sshd-session[439620]: Invalid user admin from 121.227.153.123 port 51812
Oct 11 02:31:09 compute-0 ceph-mon[191930]: pgmap v1595: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:31:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3000.0 total, 600.0 interval
                                            Cumulative writes: 7292 writes, 32K keys, 7292 commit groups, 1.0 writes per commit group, ingest: 0.04 GB, 0.02 MB/s
                                            Cumulative WAL: 7292 writes, 7292 syncs, 1.00 writes per sync, written: 0.04 GB, 0.02 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 1318 writes, 5977 keys, 1318 commit groups, 1.0 writes per commit group, ingest: 8.55 MB, 0.01 MB/s
                                            Interval WAL: 1318 writes, 1318 syncs, 1.00 writes per sync, written: 0.01 GB, 0.01 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
                                            
                                            ** Compaction Stats [default] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0    103.9      0.38              0.17        19    0.020       0      0       0.0       0.0
                                              L6      1/0    8.46 MB   0.0      0.2     0.0      0.1       0.1      0.0       0.0   3.3    173.2    140.0      0.93              0.59        18    0.052     86K    10K       0.0       0.0
                                             Sum      1/0    8.46 MB   0.0      0.2     0.0      0.1       0.2      0.0       0.0   4.3    123.1    129.6      1.30              0.76        37    0.035     86K    10K       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   4.4    108.3    112.2      0.35              0.18         8    0.044     22K   2532       0.0       0.0
                                            
                                            ** Compaction Stats [default] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Low      0/0    0.00 KB   0.0      0.2     0.0      0.1       0.1      0.0       0.0   0.0    173.2    140.0      0.93              0.59        18    0.052     86K    10K       0.0       0.0
                                            High      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0    104.9      0.37              0.17        18    0.021       0      0       0.0       0.0
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0     12.0      0.00              0.00         1    0.004       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 3000.0 total, 600.0 interval
                                            Flush(GB): cumulative 0.038, interval 0.009
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.17 GB write, 0.06 MB/s write, 0.16 GB read, 0.05 MB/s read, 1.3 seconds
                                            Interval compaction: 0.04 GB write, 0.07 MB/s write, 0.04 GB read, 0.06 MB/s read, 0.4 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x55816e47f1f0#2 capacity: 308.00 MB usage: 20.14 MB table_size: 0 occupancy: 18446744073709551615 collections: 6 last_copies: 0 last_secs: 0.000294 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1282,19.46 MB,6.31835%) FilterBlock(38,246.92 KB,0.0782905%) IndexBlock(38,447.83 KB,0.141991%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [default] **
Oct 11 02:31:09 compute-0 sshd-session[439620]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:31:09 compute-0 sshd-session[439620]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:31:09 compute-0 nova_compute[356901]: 2025-10-11 02:31:09.675 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1596: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:31:11 compute-0 ceph-mon[191930]: pgmap v1596: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:11 compute-0 sshd-session[439620]: Failed password for invalid user admin from 121.227.153.123 port 51812 ssh2
Oct 11 02:31:11 compute-0 nova_compute[356901]: 2025-10-11 02:31:11.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_expired_console_auth_tokens run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:31:12 compute-0 podman[439625]: 2025-10-11 02:31:12.219758321 +0000 UTC m=+0.091329944 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent)
Oct 11 02:31:12 compute-0 podman[439622]: 2025-10-11 02:31:12.247136995 +0000 UTC m=+0.139334558 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:31:12 compute-0 podman[439623]: 2025-10-11 02:31:12.252642182 +0000 UTC m=+0.149458617 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']})
Oct 11 02:31:12 compute-0 podman[439624]: 2025-10-11 02:31:12.262935397 +0000 UTC m=+0.142934931 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0)
Oct 11 02:31:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1597: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:13 compute-0 sshd-session[439620]: Connection closed by invalid user admin 121.227.153.123 port 51812 [preauth]
Oct 11 02:31:13 compute-0 ceph-mon[191930]: pgmap v1597: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.865 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.865 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.865 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.866 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.878 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '358d31cf-2866-416a-b2fc-814ee4bfe89a', 'name': 'vn-vgckve2-tqko7trrsvwg-ebwakep2a2y3-vnf-ihxi227vdpwh', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000004', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {'metering.server_group': '3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.883 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '7513b93e-d2b8-4ae0-8f1c-3df190945259', 'name': 'vn-vgckve2-djjfpphdsuuh-gthznuj2xct2-vnf-jmvtgw3mflyn', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000003', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {'metering.server_group': '3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.889 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.890 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.890 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.891 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.891 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.892 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:31:13.891599) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.899 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.incoming.bytes volume: 1870 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.906 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.bytes volume: 1996 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.912 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2520 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.913 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.913 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.913 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.914 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.914 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.914 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.914 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.outgoing.packets volume: 22 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.915 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.packets volume: 23 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.916 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 23 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.916 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:31:13.914659) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.917 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.918 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.918 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.918 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.918 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.918 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.919 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.919 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.920 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.921 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.921 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.922 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.922 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:31:13.918793) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.922 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.922 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.922 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.922 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.923 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.924 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:31:13.922648) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.924 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.925 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.925 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.926 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.926 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.926 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.927 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.928 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:31:13.926982) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.966 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.967 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.968 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.capacity volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.996 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.997 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:13.997 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.capacity volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.036 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.038 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.039 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.040 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.041 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.041 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.042 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.042 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.043 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.043 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:31:14.042869) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.110 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.111 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.111 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.bytes volume: 385378 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.169 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.170 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.171 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.bytes volume: 385378 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 sshd-session[439701]: Invalid user admin from 121.227.153.123 port 57256
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.245 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.246 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.247 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.248 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.248 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.248 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.248 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.249 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.249 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.249 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.latency volume: 1845147961 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.250 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.latency volume: 292571291 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.250 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.latency volume: 162750190 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.251 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.latency volume: 1696814304 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.252 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.latency volume: 210864290 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.252 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.latency volume: 178724423 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.252 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:31:14.249216) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.253 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.253 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.253 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.255 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.256 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.256 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.257 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.257 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.257 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.258 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.258 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.259 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:31:14.257686) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.259 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.read.requests volume: 124 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.260 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.260 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.261 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.read.requests volume: 124 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.261 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.263 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.263 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.264 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.264 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.265 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.265 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.265 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.265 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.266 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.266 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.267 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.usage volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.267 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.267 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.268 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.usage volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.268 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.269 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.270 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.271 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.271 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.271 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.271 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.271 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:31:14.265799) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.271 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.272 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.272 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.bytes volume: 41779200 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.272 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.272 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.273 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.bytes volume: 41779200 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.273 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.273 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.273 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.274 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.274 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.275 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.275 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.275 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.275 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.276 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.275 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:31:14.271967) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.276 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.276 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.latency volume: 7286997145 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.276 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.latency volume: 24741980 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.276 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.277 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.latency volume: 6089609601 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.277 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.latency volume: 25967717 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.277 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.278 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.278 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.278 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.279 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.279 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.279 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.280 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.280 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.280 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:31:14.276107) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.280 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.281 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:31:14.280397) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:14 compute-0 nova_compute[356901]: 2025-10-11 02:31:14.303 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.315 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.345 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.372 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.373 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.373 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.373 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.373 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.373 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.374 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.374 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.374 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.374 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.375 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.375 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.375 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.376 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.376 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.376 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:31:14.373927) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.377 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.377 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.377 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.378 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.378 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.378 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.378 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.378 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:31:14.378431) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.378 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.incoming.bytes.delta volume: 84 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.379 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.bytes.delta volume: 84 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.379 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 84 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.380 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.380 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.380 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.380 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.380 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.380 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.381 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.381 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.381 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:31:14.381105) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.382 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.382 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.382 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.382 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.382 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.382 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.382 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.incoming.packets volume: 16 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.383 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.packets volume: 19 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.383 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:31:14.382704) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.383 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 25 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.384 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.384 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.384 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.384 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.384 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.384 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.385 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.outgoing.bytes.delta volume: 70 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.385 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.385 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:31:14.384931) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.385 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.386 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.386 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.386 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.386 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.387 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.387 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:31:14.387176) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.387 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.388 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.388 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.388 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.389 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.389 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.389 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.389 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.390 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.390 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.391 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.391 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:31:14.389325) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.391 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.391 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.391 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.391 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.392 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.392 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:31:14.392053) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.392 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.392 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.393 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/disk.device.allocation volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.393 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.393 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.394 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/disk.device.allocation volume: 583680 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.394 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.394 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.395 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.395 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.395 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.395 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.396 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.396 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.396 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1598: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.396 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.396 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.397 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:31:14.396273) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.397 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.398 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.398 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.398 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.398 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.398 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.399 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.399 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/cpu volume: 38680000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.399 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:31:14.399132) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.399 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/cpu volume: 39380000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.400 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 44510000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.400 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.401 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.401 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.401 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.401 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.401 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.401 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/network.outgoing.bytes volume: 2356 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.401 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/network.outgoing.bytes volume: 2398 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.402 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2342 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.402 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.403 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.403 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.403 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.403 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.403 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.403 14 DEBUG ceilometer.compute.pollsters [-] 358d31cf-2866-416a-b2fc-814ee4bfe89a/memory.usage volume: 48.98046875 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.404 14 DEBUG ceilometer.compute.pollsters [-] 7513b93e-d2b8-4ae0-8f1c-3df190945259/memory.usage volume: 48.92578125 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.404 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.83984375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.404 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.405 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:31:14.401473) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.405 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.405 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:31:14.403596) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.405 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.405 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.406 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.406 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.406 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.406 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.407 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.407 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.407 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.407 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.407 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.408 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.408 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.408 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.408 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.408 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.408 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.409 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.409 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.409 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.409 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.409 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.409 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.410 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.410 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.410 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:31:14.410 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:31:14 compute-0 sshd-session[439701]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:31:14 compute-0 sshd-session[439701]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:31:14 compute-0 nova_compute[356901]: 2025-10-11 02:31:14.677 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:15 compute-0 ceph-mon[191930]: pgmap v1598: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:31:16 compute-0 sshd-session[439701]: Failed password for invalid user admin from 121.227.153.123 port 57256 ssh2
Oct 11 02:31:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1599: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:17 compute-0 podman[439705]: 2025-10-11 02:31:17.267633986 +0000 UTC m=+0.147888443 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_id=iscsid, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, tcib_managed=true, io.buildah.version=1.41.3)
Oct 11 02:31:17 compute-0 podman[439704]: 2025-10-11 02:31:17.270725574 +0000 UTC m=+0.156827788 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, config_id=multipathd, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=multipathd, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:31:17 compute-0 ceph-mon[191930]: pgmap v1599: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:17 compute-0 sshd-session[439701]: Connection closed by invalid user admin 121.227.153.123 port 57256 [preauth]
Oct 11 02:31:17 compute-0 nova_compute[356901]: 2025-10-11 02:31:17.915 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._run_pending_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:31:17 compute-0 nova_compute[356901]: 2025-10-11 02:31:17.916 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11145
Oct 11 02:31:17 compute-0 nova_compute[356901]: 2025-10-11 02:31:17.948 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] There are 0 instances to clean _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11154
Oct 11 02:31:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1600: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:19 compute-0 sshd-session[439744]: Invalid user admin from 121.227.153.123 port 57262
Oct 11 02:31:19 compute-0 nova_compute[356901]: 2025-10-11 02:31:19.306 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:19 compute-0 ceph-mon[191930]: pgmap v1600: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:19 compute-0 sshd-session[439744]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:31:19 compute-0 sshd-session[439744]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:31:19 compute-0 nova_compute[356901]: 2025-10-11 02:31:19.681 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1601: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:31:20 compute-0 sshd-session[439744]: Failed password for invalid user admin from 121.227.153.123 port 57262 ssh2
Oct 11 02:31:21 compute-0 sshd-session[439744]: Connection closed by invalid user admin 121.227.153.123 port 57262 [preauth]
Oct 11 02:31:21 compute-0 ceph-mon[191930]: pgmap v1601: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1602: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:22 compute-0 sshd-session[439747]: Invalid user admin from 121.227.153.123 port 33332
Oct 11 02:31:22 compute-0 sshd-session[439747]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:31:22 compute-0 sshd-session[439747]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:31:23 compute-0 ceph-mon[191930]: pgmap v1602: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:24 compute-0 nova_compute[356901]: 2025-10-11 02:31:24.309 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1603: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:24 compute-0 sshd-session[439747]: Failed password for invalid user admin from 121.227.153.123 port 33332 ssh2
Oct 11 02:31:24 compute-0 nova_compute[356901]: 2025-10-11 02:31:24.685 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:25 compute-0 ceph-mon[191930]: pgmap v1603: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:31:26 compute-0 sshd-session[439747]: Connection closed by invalid user admin 121.227.153.123 port 33332 [preauth]
Oct 11 02:31:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1604: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:31:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:31:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:31:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:31:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:31:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:31:27 compute-0 ceph-mon[191930]: pgmap v1604: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:27 compute-0 sshd-session[439749]: Invalid user admin from 121.227.153.123 port 33348
Oct 11 02:31:27 compute-0 podman[439753]: 2025-10-11 02:31:27.778132382 +0000 UTC m=+0.109103273 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:31:27 compute-0 podman[439752]: 2025-10-11 02:31:27.794881851 +0000 UTC m=+0.134657854 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, name=ubi9-minimal, build-date=2025-08-20T13:12:41, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, com.redhat.component=ubi9-minimal-container, distribution-scope=public, io.buildah.version=1.33.7, io.openshift.tags=minimal rhel9, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=openstack_network_exporter, vcs-type=git, maintainer=Red Hat, Inc., url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., version=9.6, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, release=1755695350, vendor=Red Hat, Inc., io.openshift.expose-services=, managed_by=edpm_ansible)
Oct 11 02:31:27 compute-0 podman[439751]: 2025-10-11 02:31:27.802600432 +0000 UTC m=+0.158815275 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:31:27 compute-0 sshd-session[439749]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:31:27 compute-0 sshd-session[439749]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:31:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1605: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:28 compute-0 ceph-mon[191930]: pgmap v1605: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:29 compute-0 nova_compute[356901]: 2025-10-11 02:31:29.313 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:29 compute-0 nova_compute[356901]: 2025-10-11 02:31:29.689 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:29 compute-0 podman[157119]: time="2025-10-11T02:31:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:31:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:31:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:31:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:31:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9062 "" "Go-http-client/1.1"
Oct 11 02:31:30 compute-0 sshd-session[439749]: Failed password for invalid user admin from 121.227.153.123 port 33348 ssh2
Oct 11 02:31:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1606: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:31:31 compute-0 openstack_network_exporter[374316]: ERROR   02:31:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:31:31 compute-0 openstack_network_exporter[374316]: ERROR   02:31:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:31:31 compute-0 openstack_network_exporter[374316]: ERROR   02:31:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:31:31 compute-0 openstack_network_exporter[374316]: ERROR   02:31:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:31:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:31:31 compute-0 openstack_network_exporter[374316]: ERROR   02:31:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:31:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:31:31 compute-0 sshd-session[439749]: Connection closed by invalid user admin 121.227.153.123 port 33348 [preauth]
Oct 11 02:31:31 compute-0 ceph-mon[191930]: pgmap v1606: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1607: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:32 compute-0 sshd-session[439811]: Invalid user admin from 121.227.153.123 port 45346
Oct 11 02:31:32 compute-0 sshd-session[439811]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:31:32 compute-0 sshd-session[439811]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:31:33 compute-0 ceph-mon[191930]: pgmap v1607: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:34 compute-0 nova_compute[356901]: 2025-10-11 02:31:34.317 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1608: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:34 compute-0 nova_compute[356901]: 2025-10-11 02:31:34.692 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:34 compute-0 sshd-session[439811]: Failed password for invalid user admin from 121.227.153.123 port 45346 ssh2
Oct 11 02:31:35 compute-0 podman[439813]: 2025-10-11 02:31:35.230649996 +0000 UTC m=+0.106912220 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.component=ubi9-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, io.openshift.expose-services=, name=ubi9, config_id=edpm, vendor=Red Hat, Inc., version=9.4, architecture=x86_64, container_name=kepler, io.openshift.tags=base rhel9, build-date=2024-09-18T21:23:30, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.k8s.display-name=Red Hat Universal Base Image 9, maintainer=Red Hat, Inc., release=1214.1726694543, release-0.7.12=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, distribution-scope=public, io.buildah.version=1.29.0, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-type=git)
Oct 11 02:31:35 compute-0 ceph-mon[191930]: pgmap v1608: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:31:36 compute-0 sshd-session[439811]: Connection closed by invalid user admin 121.227.153.123 port 45346 [preauth]
Oct 11 02:31:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1609: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:37 compute-0 sshd-session[439833]: Invalid user admin from 121.227.153.123 port 45348
Oct 11 02:31:37 compute-0 ceph-mon[191930]: pgmap v1609: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:37 compute-0 sshd-session[439833]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:31:37 compute-0 sshd-session[439833]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:31:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1610: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:38 compute-0 sudo[439835]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:31:38 compute-0 sudo[439835]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:31:38 compute-0 sudo[439835]: pam_unix(sudo:session): session closed for user root
Oct 11 02:31:38 compute-0 sudo[439860]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:31:38 compute-0 sudo[439860]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:31:38 compute-0 sudo[439860]: pam_unix(sudo:session): session closed for user root
Oct 11 02:31:38 compute-0 sudo[439885]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:31:38 compute-0 sudo[439885]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:31:38 compute-0 sudo[439885]: pam_unix(sudo:session): session closed for user root
Oct 11 02:31:38 compute-0 sudo[439910]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:31:38 compute-0 sudo[439910]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:31:39 compute-0 nova_compute[356901]: 2025-10-11 02:31:39.319 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:39 compute-0 ceph-mon[191930]: pgmap v1610: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:39 compute-0 sshd-session[439833]: Failed password for invalid user admin from 121.227.153.123 port 45348 ssh2
Oct 11 02:31:39 compute-0 sudo[439910]: pam_unix(sudo:session): session closed for user root
Oct 11 02:31:39 compute-0 nova_compute[356901]: 2025-10-11 02:31:39.694 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:31:39 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:31:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:31:39 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:31:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:31:39 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:31:39 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev dde6fad1-eb74-4cbb-903a-8b8c7b8d3de1 does not exist
Oct 11 02:31:39 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 70a3853e-3944-4136-8019-02503f2c2ad1 does not exist
Oct 11 02:31:39 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 73d76034-e106-4dd0-a5fa-3bc64f1e934d does not exist
Oct 11 02:31:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:31:39 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:31:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:31:39 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:31:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:31:39 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:31:39 compute-0 sudo[439965]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:31:39 compute-0 sudo[439965]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:31:39 compute-0 sudo[439965]: pam_unix(sudo:session): session closed for user root
Oct 11 02:31:39 compute-0 sudo[439990]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:31:39 compute-0 sudo[439990]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:31:39 compute-0 sudo[439990]: pam_unix(sudo:session): session closed for user root
Oct 11 02:31:40 compute-0 sudo[440015]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:31:40 compute-0 sudo[440015]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:31:40 compute-0 sudo[440015]: pam_unix(sudo:session): session closed for user root
Oct 11 02:31:40 compute-0 sudo[440040]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:31:40 compute-0 sudo[440040]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:31:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1611: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:31:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:31:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:31:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:31:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:31:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:31:40 compute-0 podman[440104]: 2025-10-11 02:31:40.678361681 +0000 UTC m=+0.063191129 container create e498fff9d97fa85845c5673535f6540f158aca17db20aa29b66e322f4fef633a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_grothendieck, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:31:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #72. Immutable memtables: 0.
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:31:40.726879) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 39] Flushing memtable with next log file: 72
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149900726966, "job": 39, "event": "flush_started", "num_memtables": 1, "num_entries": 1028, "num_deletes": 256, "total_data_size": 1444059, "memory_usage": 1467288, "flush_reason": "Manual Compaction"}
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 39] Level-0 flush table #73: started
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149900742064, "cf_name": "default", "job": 39, "event": "table_file_creation", "file_number": 73, "file_size": 1430227, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 32188, "largest_seqno": 33215, "table_properties": {"data_size": 1425189, "index_size": 2562, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1413, "raw_key_size": 10677, "raw_average_key_size": 19, "raw_value_size": 1415079, "raw_average_value_size": 2554, "num_data_blocks": 114, "num_entries": 554, "num_filter_entries": 554, "num_deletions": 256, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760149801, "oldest_key_time": 1760149801, "file_creation_time": 1760149900, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 73, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 39] Flush lasted 15239 microseconds, and 6899 cpu microseconds.
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:31:40.742134) [db/flush_job.cc:967] [default] [JOB 39] Level-0 flush table #73: 1430227 bytes OK
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:31:40.742156) [db/memtable_list.cc:519] [default] Level-0 commit table #73 started
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:31:40.745472) [db/memtable_list.cc:722] [default] Level-0 commit table #73: memtable #1 done
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:31:40.745485) EVENT_LOG_v1 {"time_micros": 1760149900745481, "job": 39, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:31:40.745503) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 39] Try to delete WAL files size 1439199, prev total WAL file size 1465687, number of live WAL files 2.
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000069.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:31:40.746361) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '6C6F676D0031303038' seq:72057594037927935, type:22 .. '6C6F676D0031323630' seq:0, type:0; will stop at (end)
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 40] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 39 Base level 0, inputs: [73(1396KB)], [71(8661KB)]
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149900746407, "job": 40, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [73], "files_L6": [71], "score": -1, "input_data_size": 10299553, "oldest_snapshot_seqno": -1}
Oct 11 02:31:40 compute-0 systemd[1]: Started libpod-conmon-e498fff9d97fa85845c5673535f6540f158aca17db20aa29b66e322f4fef633a.scope.
Oct 11 02:31:40 compute-0 podman[440104]: 2025-10-11 02:31:40.656935418 +0000 UTC m=+0.041764896 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:31:40 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 40] Generated table #74: 5368 keys, 10198564 bytes, temperature: kUnknown
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149900819527, "cf_name": "default", "job": 40, "event": "table_file_creation", "file_number": 74, "file_size": 10198564, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 10160123, "index_size": 23931, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 13445, "raw_key_size": 135274, "raw_average_key_size": 25, "raw_value_size": 10060502, "raw_average_value_size": 1874, "num_data_blocks": 989, "num_entries": 5368, "num_filter_entries": 5368, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760149900, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 74, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:31:40.820898) [db/compaction/compaction_job.cc:1663] [default] [JOB 40] Compacted 1@0 + 1@6 files to L6 => 10198564 bytes
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:31:40.823095) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 138.8 rd, 137.4 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(1.4, 8.5 +0.0 blob) out(9.7 +0.0 blob), read-write-amplify(14.3) write-amplify(7.1) OK, records in: 5892, records dropped: 524 output_compression: NoCompression
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:31:40.823126) EVENT_LOG_v1 {"time_micros": 1760149900823111, "job": 40, "event": "compaction_finished", "compaction_time_micros": 74224, "compaction_time_cpu_micros": 33215, "output_level": 6, "num_output_files": 1, "total_output_size": 10198564, "num_input_records": 5892, "num_output_records": 5368, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000073.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149900823803, "job": 40, "event": "table_file_deletion", "file_number": 73}
Oct 11 02:31:40 compute-0 podman[440104]: 2025-10-11 02:31:40.824122912 +0000 UTC m=+0.208952380 container init e498fff9d97fa85845c5673535f6540f158aca17db20aa29b66e322f4fef633a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_grothendieck, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000071.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149900827552, "job": 40, "event": "table_file_deletion", "file_number": 71}
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:31:40.746139) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:31:40.827799) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:31:40.827809) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:31:40.827812) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:31:40.827814) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:31:40 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:31:40.827817) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:31:40 compute-0 podman[440104]: 2025-10-11 02:31:40.837040151 +0000 UTC m=+0.221869609 container start e498fff9d97fa85845c5673535f6540f158aca17db20aa29b66e322f4fef633a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_grothendieck, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 02:31:40 compute-0 podman[440104]: 2025-10-11 02:31:40.842165698 +0000 UTC m=+0.226995146 container attach e498fff9d97fa85845c5673535f6540f158aca17db20aa29b66e322f4fef633a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_grothendieck, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:31:40 compute-0 suspicious_grothendieck[440120]: 167 167
Oct 11 02:31:40 compute-0 systemd[1]: libpod-e498fff9d97fa85845c5673535f6540f158aca17db20aa29b66e322f4fef633a.scope: Deactivated successfully.
Oct 11 02:31:40 compute-0 podman[440104]: 2025-10-11 02:31:40.848054986 +0000 UTC m=+0.232884474 container died e498fff9d97fa85845c5673535f6540f158aca17db20aa29b66e322f4fef633a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_grothendieck, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:31:40 compute-0 systemd[1]: var-lib-containers-storage-overlay-026995a7338894f48557224422e713c20c78e53298c5d708d0a0540dde29042f-merged.mount: Deactivated successfully.
Oct 11 02:31:40 compute-0 podman[440104]: 2025-10-11 02:31:40.939269816 +0000 UTC m=+0.324099284 container remove e498fff9d97fa85845c5673535f6540f158aca17db20aa29b66e322f4fef633a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_grothendieck, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:31:40 compute-0 systemd[1]: libpod-conmon-e498fff9d97fa85845c5673535f6540f158aca17db20aa29b66e322f4fef633a.scope: Deactivated successfully.
Oct 11 02:31:41 compute-0 podman[440145]: 2025-10-11 02:31:41.176075491 +0000 UTC m=+0.063132157 container create 54a420f466501d6f2b6f7ab28b3ddbbc3e4bd774780652c9d64ec2e7899ebd27 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_mccarthy, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, ceph=True, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:31:41 compute-0 sshd-session[439833]: Connection closed by invalid user admin 121.227.153.123 port 45348 [preauth]
Oct 11 02:31:41 compute-0 systemd[1]: Started libpod-conmon-54a420f466501d6f2b6f7ab28b3ddbbc3e4bd774780652c9d64ec2e7899ebd27.scope.
Oct 11 02:31:41 compute-0 podman[440145]: 2025-10-11 02:31:41.149627055 +0000 UTC m=+0.036683731 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:31:41 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:31:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5637698615b1076beda75aff59217dd0775079689f0b72c8cd77b4a66c70d692/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:31:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5637698615b1076beda75aff59217dd0775079689f0b72c8cd77b4a66c70d692/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:31:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5637698615b1076beda75aff59217dd0775079689f0b72c8cd77b4a66c70d692/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:31:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5637698615b1076beda75aff59217dd0775079689f0b72c8cd77b4a66c70d692/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:31:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5637698615b1076beda75aff59217dd0775079689f0b72c8cd77b4a66c70d692/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:31:41 compute-0 podman[440145]: 2025-10-11 02:31:41.300665376 +0000 UTC m=+0.187722032 container init 54a420f466501d6f2b6f7ab28b3ddbbc3e4bd774780652c9d64ec2e7899ebd27 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_mccarthy, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:31:41 compute-0 podman[440145]: 2025-10-11 02:31:41.336393688 +0000 UTC m=+0.223450354 container start 54a420f466501d6f2b6f7ab28b3ddbbc3e4bd774780652c9d64ec2e7899ebd27 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_mccarthy, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 02:31:41 compute-0 podman[440145]: 2025-10-11 02:31:41.341560826 +0000 UTC m=+0.228617502 container attach 54a420f466501d6f2b6f7ab28b3ddbbc3e4bd774780652c9d64ec2e7899ebd27 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_mccarthy, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:31:41 compute-0 ceph-mon[191930]: pgmap v1611: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1612: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:42 compute-0 sshd-session[440167]: Invalid user admin from 121.227.153.123 port 40648
Oct 11 02:31:42 compute-0 podman[440187]: 2025-10-11 02:31:42.657755583 +0000 UTC m=+0.102178444 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:31:42 compute-0 sleepy_mccarthy[440162]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:31:42 compute-0 sleepy_mccarthy[440162]: --> relative data size: 1.0
Oct 11 02:31:42 compute-0 sleepy_mccarthy[440162]: --> All data devices are unavailable
Oct 11 02:31:42 compute-0 podman[440196]: 2025-10-11 02:31:42.681028929 +0000 UTC m=+0.097501391 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent)
Oct 11 02:31:42 compute-0 podman[440191]: 2025-10-11 02:31:42.692063785 +0000 UTC m=+0.110599326 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:31:42 compute-0 podman[440189]: 2025-10-11 02:31:42.72091313 +0000 UTC m=+0.157460786 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, container_name=ovn_controller, org.label-schema.license=GPLv2, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.build-date=20251009, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller)
Oct 11 02:31:42 compute-0 systemd[1]: libpod-54a420f466501d6f2b6f7ab28b3ddbbc3e4bd774780652c9d64ec2e7899ebd27.scope: Deactivated successfully.
Oct 11 02:31:42 compute-0 systemd[1]: libpod-54a420f466501d6f2b6f7ab28b3ddbbc3e4bd774780652c9d64ec2e7899ebd27.scope: Consumed 1.299s CPU time.
Oct 11 02:31:42 compute-0 podman[440145]: 2025-10-11 02:31:42.732718478 +0000 UTC m=+1.619775134 container died 54a420f466501d6f2b6f7ab28b3ddbbc3e4bd774780652c9d64ec2e7899ebd27 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_mccarthy, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2)
Oct 11 02:31:42 compute-0 systemd[1]: var-lib-containers-storage-overlay-5637698615b1076beda75aff59217dd0775079689f0b72c8cd77b4a66c70d692-merged.mount: Deactivated successfully.
Oct 11 02:31:42 compute-0 sshd-session[440167]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:31:42 compute-0 sshd-session[440167]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:31:42 compute-0 podman[440145]: 2025-10-11 02:31:42.805438279 +0000 UTC m=+1.692494935 container remove 54a420f466501d6f2b6f7ab28b3ddbbc3e4bd774780652c9d64ec2e7899ebd27 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_mccarthy, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:31:42 compute-0 systemd[1]: libpod-conmon-54a420f466501d6f2b6f7ab28b3ddbbc3e4bd774780652c9d64ec2e7899ebd27.scope: Deactivated successfully.
Oct 11 02:31:42 compute-0 sudo[440040]: pam_unix(sudo:session): session closed for user root
Oct 11 02:31:42 compute-0 sudo[440287]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:31:42 compute-0 sudo[440287]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:31:42 compute-0 sudo[440287]: pam_unix(sudo:session): session closed for user root
Oct 11 02:31:43 compute-0 sudo[440312]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:31:43 compute-0 sudo[440312]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:31:43 compute-0 sudo[440312]: pam_unix(sudo:session): session closed for user root
Oct 11 02:31:43 compute-0 sudo[440337]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:31:43 compute-0 sudo[440337]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:31:43 compute-0 sudo[440337]: pam_unix(sudo:session): session closed for user root
Oct 11 02:31:43 compute-0 sudo[440362]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:31:43 compute-0 sudo[440362]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:31:43 compute-0 ceph-mon[191930]: pgmap v1612: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:43 compute-0 podman[440424]: 2025-10-11 02:31:43.951042726 +0000 UTC m=+0.082247435 container create 05232ef0f2bd1537e53fe6bc2574ffeb25bade951ffedb6f0fe21e9c11ed2743 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_moser, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3)
Oct 11 02:31:44 compute-0 podman[440424]: 2025-10-11 02:31:43.917176427 +0000 UTC m=+0.048381186 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:31:44 compute-0 systemd[1]: Started libpod-conmon-05232ef0f2bd1537e53fe6bc2574ffeb25bade951ffedb6f0fe21e9c11ed2743.scope.
Oct 11 02:31:44 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:31:44 compute-0 podman[440424]: 2025-10-11 02:31:44.076866676 +0000 UTC m=+0.208071465 container init 05232ef0f2bd1537e53fe6bc2574ffeb25bade951ffedb6f0fe21e9c11ed2743 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_moser, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS)
Oct 11 02:31:44 compute-0 podman[440424]: 2025-10-11 02:31:44.088404516 +0000 UTC m=+0.219609225 container start 05232ef0f2bd1537e53fe6bc2574ffeb25bade951ffedb6f0fe21e9c11ed2743 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_moser, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, ceph=True, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 02:31:44 compute-0 podman[440424]: 2025-10-11 02:31:44.093692527 +0000 UTC m=+0.224897266 container attach 05232ef0f2bd1537e53fe6bc2574ffeb25bade951ffedb6f0fe21e9c11ed2743 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_moser, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:31:44 compute-0 hungry_moser[440441]: 167 167
Oct 11 02:31:44 compute-0 systemd[1]: libpod-05232ef0f2bd1537e53fe6bc2574ffeb25bade951ffedb6f0fe21e9c11ed2743.scope: Deactivated successfully.
Oct 11 02:31:44 compute-0 podman[440424]: 2025-10-11 02:31:44.10252985 +0000 UTC m=+0.233734569 container died 05232ef0f2bd1537e53fe6bc2574ffeb25bade951ffedb6f0fe21e9c11ed2743 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_moser, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:31:44 compute-0 systemd[1]: var-lib-containers-storage-overlay-8642c370c8820e827f5bd72710b49a9b755eb947f06c96381f96be78f4627035-merged.mount: Deactivated successfully.
Oct 11 02:31:44 compute-0 podman[440424]: 2025-10-11 02:31:44.178791202 +0000 UTC m=+0.309995931 container remove 05232ef0f2bd1537e53fe6bc2574ffeb25bade951ffedb6f0fe21e9c11ed2743 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_moser, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default)
Oct 11 02:31:44 compute-0 systemd[1]: libpod-conmon-05232ef0f2bd1537e53fe6bc2574ffeb25bade951ffedb6f0fe21e9c11ed2743.scope: Deactivated successfully.
Oct 11 02:31:44 compute-0 nova_compute[356901]: 2025-10-11 02:31:44.325 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1613: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:44 compute-0 podman[440464]: 2025-10-11 02:31:44.451197666 +0000 UTC m=+0.075883482 container create ce16aba65b056a965feb5d1eea3ad384a3d900ed018d9bc8a02df6ebbb17955d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_brown, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:31:44 compute-0 podman[440464]: 2025-10-11 02:31:44.422222617 +0000 UTC m=+0.046908413 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:31:44 compute-0 systemd[1]: Started libpod-conmon-ce16aba65b056a965feb5d1eea3ad384a3d900ed018d9bc8a02df6ebbb17955d.scope.
Oct 11 02:31:44 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:31:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fc954f3d2ea3bfb91ac26d9b09fe807818564d0d5e2d8999675691a0c1a29fa2/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:31:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fc954f3d2ea3bfb91ac26d9b09fe807818564d0d5e2d8999675691a0c1a29fa2/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:31:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fc954f3d2ea3bfb91ac26d9b09fe807818564d0d5e2d8999675691a0c1a29fa2/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:31:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fc954f3d2ea3bfb91ac26d9b09fe807818564d0d5e2d8999675691a0c1a29fa2/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:31:44 compute-0 podman[440464]: 2025-10-11 02:31:44.601512227 +0000 UTC m=+0.226198023 container init ce16aba65b056a965feb5d1eea3ad384a3d900ed018d9bc8a02df6ebbb17955d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_brown, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef)
Oct 11 02:31:44 compute-0 podman[440464]: 2025-10-11 02:31:44.625573445 +0000 UTC m=+0.250259221 container start ce16aba65b056a965feb5d1eea3ad384a3d900ed018d9bc8a02df6ebbb17955d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_brown, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507)
Oct 11 02:31:44 compute-0 podman[440464]: 2025-10-11 02:31:44.629974081 +0000 UTC m=+0.254659857 container attach ce16aba65b056a965feb5d1eea3ad384a3d900ed018d9bc8a02df6ebbb17955d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_brown, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:31:44 compute-0 nova_compute[356901]: 2025-10-11 02:31:44.699 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:45 compute-0 sshd-session[440167]: Failed password for invalid user admin from 121.227.153.123 port 40648 ssh2
Oct 11 02:31:45 compute-0 angry_brown[440481]: {
Oct 11 02:31:45 compute-0 angry_brown[440481]:     "0": [
Oct 11 02:31:45 compute-0 angry_brown[440481]:         {
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "devices": [
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "/dev/loop3"
Oct 11 02:31:45 compute-0 angry_brown[440481]:             ],
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "lv_name": "ceph_lv0",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "lv_size": "21470642176",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "name": "ceph_lv0",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "tags": {
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.cluster_name": "ceph",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.crush_device_class": "",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.encrypted": "0",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.osd_id": "0",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.type": "block",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.vdo": "0"
Oct 11 02:31:45 compute-0 angry_brown[440481]:             },
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "type": "block",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "vg_name": "ceph_vg0"
Oct 11 02:31:45 compute-0 angry_brown[440481]:         }
Oct 11 02:31:45 compute-0 angry_brown[440481]:     ],
Oct 11 02:31:45 compute-0 angry_brown[440481]:     "1": [
Oct 11 02:31:45 compute-0 angry_brown[440481]:         {
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "devices": [
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "/dev/loop4"
Oct 11 02:31:45 compute-0 angry_brown[440481]:             ],
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "lv_name": "ceph_lv1",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "lv_size": "21470642176",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "name": "ceph_lv1",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "tags": {
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.cluster_name": "ceph",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.crush_device_class": "",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.encrypted": "0",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.osd_id": "1",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.type": "block",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.vdo": "0"
Oct 11 02:31:45 compute-0 angry_brown[440481]:             },
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "type": "block",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "vg_name": "ceph_vg1"
Oct 11 02:31:45 compute-0 angry_brown[440481]:         }
Oct 11 02:31:45 compute-0 angry_brown[440481]:     ],
Oct 11 02:31:45 compute-0 angry_brown[440481]:     "2": [
Oct 11 02:31:45 compute-0 angry_brown[440481]:         {
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "devices": [
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "/dev/loop5"
Oct 11 02:31:45 compute-0 angry_brown[440481]:             ],
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "lv_name": "ceph_lv2",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "lv_size": "21470642176",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "name": "ceph_lv2",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "tags": {
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.cluster_name": "ceph",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.crush_device_class": "",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.encrypted": "0",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.osd_id": "2",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.type": "block",
Oct 11 02:31:45 compute-0 angry_brown[440481]:                 "ceph.vdo": "0"
Oct 11 02:31:45 compute-0 angry_brown[440481]:             },
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "type": "block",
Oct 11 02:31:45 compute-0 angry_brown[440481]:             "vg_name": "ceph_vg2"
Oct 11 02:31:45 compute-0 angry_brown[440481]:         }
Oct 11 02:31:45 compute-0 angry_brown[440481]:     ]
Oct 11 02:31:45 compute-0 angry_brown[440481]: }
Oct 11 02:31:45 compute-0 systemd[1]: libpod-ce16aba65b056a965feb5d1eea3ad384a3d900ed018d9bc8a02df6ebbb17955d.scope: Deactivated successfully.
Oct 11 02:31:45 compute-0 podman[440464]: 2025-10-11 02:31:45.547049449 +0000 UTC m=+1.171735265 container died ce16aba65b056a965feb5d1eea3ad384a3d900ed018d9bc8a02df6ebbb17955d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_brown, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:31:45 compute-0 ceph-mon[191930]: pgmap v1613: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:45 compute-0 systemd[1]: var-lib-containers-storage-overlay-fc954f3d2ea3bfb91ac26d9b09fe807818564d0d5e2d8999675691a0c1a29fa2-merged.mount: Deactivated successfully.
Oct 11 02:31:45 compute-0 podman[440464]: 2025-10-11 02:31:45.658349853 +0000 UTC m=+1.283035639 container remove ce16aba65b056a965feb5d1eea3ad384a3d900ed018d9bc8a02df6ebbb17955d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_brown, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default)
Oct 11 02:31:45 compute-0 systemd[1]: libpod-conmon-ce16aba65b056a965feb5d1eea3ad384a3d900ed018d9bc8a02df6ebbb17955d.scope: Deactivated successfully.
Oct 11 02:31:45 compute-0 sudo[440362]: pam_unix(sudo:session): session closed for user root
Oct 11 02:31:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:31:45 compute-0 sudo[440503]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:31:45 compute-0 sudo[440503]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:31:45 compute-0 sudo[440503]: pam_unix(sudo:session): session closed for user root
Oct 11 02:31:45 compute-0 sudo[440528]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:31:45 compute-0 sudo[440528]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:31:46 compute-0 sudo[440528]: pam_unix(sudo:session): session closed for user root
Oct 11 02:31:46 compute-0 sudo[440553]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:31:46 compute-0 sudo[440553]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:31:46 compute-0 sudo[440553]: pam_unix(sudo:session): session closed for user root
Oct 11 02:31:46 compute-0 sudo[440578]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:31:46 compute-0 sudo[440578]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:31:46 compute-0 sshd-session[440167]: Connection closed by invalid user admin 121.227.153.123 port 40648 [preauth]
Oct 11 02:31:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1614: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:46 compute-0 ceph-mon[191930]: pgmap v1614: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:46 compute-0 podman[440644]: 2025-10-11 02:31:46.811606219 +0000 UTC m=+0.072022522 container create 8e0a18f0bc7a05263a54fd65273ad8cb7bc6a3567bb02c150befb2c1a4139fad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_mestorf, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default)
Oct 11 02:31:46 compute-0 systemd[1]: Started libpod-conmon-8e0a18f0bc7a05263a54fd65273ad8cb7bc6a3567bb02c150befb2c1a4139fad.scope.
Oct 11 02:31:46 compute-0 podman[440644]: 2025-10-11 02:31:46.788670183 +0000 UTC m=+0.049086536 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:31:46 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:31:46 compute-0 podman[440644]: 2025-10-11 02:31:46.934156625 +0000 UTC m=+0.194572978 container init 8e0a18f0bc7a05263a54fd65273ad8cb7bc6a3567bb02c150befb2c1a4139fad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_mestorf, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:31:46 compute-0 podman[440644]: 2025-10-11 02:31:46.954444376 +0000 UTC m=+0.214860689 container start 8e0a18f0bc7a05263a54fd65273ad8cb7bc6a3567bb02c150befb2c1a4139fad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_mestorf, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef)
Oct 11 02:31:46 compute-0 podman[440644]: 2025-10-11 02:31:46.960562901 +0000 UTC m=+0.220979204 container attach 8e0a18f0bc7a05263a54fd65273ad8cb7bc6a3567bb02c150befb2c1a4139fad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_mestorf, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:31:46 compute-0 musing_mestorf[440660]: 167 167
Oct 11 02:31:46 compute-0 systemd[1]: libpod-8e0a18f0bc7a05263a54fd65273ad8cb7bc6a3567bb02c150befb2c1a4139fad.scope: Deactivated successfully.
Oct 11 02:31:46 compute-0 conmon[440660]: conmon 8e0a18f0bc7a05263a54 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-8e0a18f0bc7a05263a54fd65273ad8cb7bc6a3567bb02c150befb2c1a4139fad.scope/container/memory.events
Oct 11 02:31:46 compute-0 podman[440644]: 2025-10-11 02:31:46.967626053 +0000 UTC m=+0.228042386 container died 8e0a18f0bc7a05263a54fd65273ad8cb7bc6a3567bb02c150befb2c1a4139fad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_mestorf, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 02:31:47 compute-0 systemd[1]: var-lib-containers-storage-overlay-cba8336a40a205d26dfeb4c10e3c45ea28f780d92b072ae1d98abd217f89c6a8-merged.mount: Deactivated successfully.
Oct 11 02:31:47 compute-0 podman[440644]: 2025-10-11 02:31:47.024579223 +0000 UTC m=+0.284995526 container remove 8e0a18f0bc7a05263a54fd65273ad8cb7bc6a3567bb02c150befb2c1a4139fad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_mestorf, ceph=True, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:31:47 compute-0 systemd[1]: libpod-conmon-8e0a18f0bc7a05263a54fd65273ad8cb7bc6a3567bb02c150befb2c1a4139fad.scope: Deactivated successfully.
Oct 11 02:31:47 compute-0 podman[440682]: 2025-10-11 02:31:47.267334948 +0000 UTC m=+0.071650051 container create cf216e8c521c998eed50e288b67e501f5e84f218d8b658728f6f8fdc3eff075f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_chatelet, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.build-date=20250507, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:31:47 compute-0 podman[440682]: 2025-10-11 02:31:47.239013498 +0000 UTC m=+0.043328631 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:31:47 compute-0 systemd[1]: Started libpod-conmon-cf216e8c521c998eed50e288b67e501f5e84f218d8b658728f6f8fdc3eff075f.scope.
Oct 11 02:31:47 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:31:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/412169c95ef5eeba6fa5441998531760aceecc6722e90e8e585202fc29e4daa9/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:31:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/412169c95ef5eeba6fa5441998531760aceecc6722e90e8e585202fc29e4daa9/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:31:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/412169c95ef5eeba6fa5441998531760aceecc6722e90e8e585202fc29e4daa9/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:31:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/412169c95ef5eeba6fa5441998531760aceecc6722e90e8e585202fc29e4daa9/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:31:47 compute-0 podman[440682]: 2025-10-11 02:31:47.4327168 +0000 UTC m=+0.237031913 container init cf216e8c521c998eed50e288b67e501f5e84f218d8b658728f6f8fdc3eff075f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_chatelet, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2)
Oct 11 02:31:47 compute-0 podman[440682]: 2025-10-11 02:31:47.455946904 +0000 UTC m=+0.260261997 container start cf216e8c521c998eed50e288b67e501f5e84f218d8b658728f6f8fdc3eff075f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_chatelet, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:31:47 compute-0 podman[440699]: 2025-10-11 02:31:47.456104219 +0000 UTC m=+0.111194632 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, managed_by=edpm_ansible, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS)
Oct 11 02:31:47 compute-0 podman[440682]: 2025-10-11 02:31:47.460114534 +0000 UTC m=+0.264429677 container attach cf216e8c521c998eed50e288b67e501f5e84f218d8b658728f6f8fdc3eff075f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_chatelet, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 02:31:47 compute-0 podman[440696]: 2025-10-11 02:31:47.486501729 +0000 UTC m=+0.139700098 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, container_name=multipathd, io.buildah.version=1.41.3, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0)
Oct 11 02:31:47 compute-0 sshd-session[440628]: Invalid user admin from 121.227.153.123 port 40652
Oct 11 02:31:47 compute-0 sshd-session[440628]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:31:47 compute-0 sshd-session[440628]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:31:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1615: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]: {
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:         "osd_id": 1,
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:         "type": "bluestore"
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:     },
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:         "osd_id": 2,
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:         "type": "bluestore"
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:     },
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:         "osd_id": 0,
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:         "type": "bluestore"
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]:     }
Oct 11 02:31:48 compute-0 laughing_chatelet[440710]: }
Oct 11 02:31:48 compute-0 systemd[1]: libpod-cf216e8c521c998eed50e288b67e501f5e84f218d8b658728f6f8fdc3eff075f.scope: Deactivated successfully.
Oct 11 02:31:48 compute-0 podman[440682]: 2025-10-11 02:31:48.825903541 +0000 UTC m=+1.630218674 container died cf216e8c521c998eed50e288b67e501f5e84f218d8b658728f6f8fdc3eff075f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_chatelet, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default)
Oct 11 02:31:48 compute-0 systemd[1]: libpod-cf216e8c521c998eed50e288b67e501f5e84f218d8b658728f6f8fdc3eff075f.scope: Consumed 1.335s CPU time.
Oct 11 02:31:48 compute-0 systemd[1]: var-lib-containers-storage-overlay-412169c95ef5eeba6fa5441998531760aceecc6722e90e8e585202fc29e4daa9-merged.mount: Deactivated successfully.
Oct 11 02:31:48 compute-0 podman[440682]: 2025-10-11 02:31:48.927718874 +0000 UTC m=+1.732033977 container remove cf216e8c521c998eed50e288b67e501f5e84f218d8b658728f6f8fdc3eff075f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_chatelet, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:31:48 compute-0 systemd[1]: libpod-conmon-cf216e8c521c998eed50e288b67e501f5e84f218d8b658728f6f8fdc3eff075f.scope: Deactivated successfully.
Oct 11 02:31:48 compute-0 sudo[440578]: pam_unix(sudo:session): session closed for user root
Oct 11 02:31:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:31:49 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:31:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:31:49 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:31:49 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 146ff63d-4f47-4649-b562-2908471fa67f does not exist
Oct 11 02:31:49 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev dd6f90dd-a498-4413-bcf2-7a09899ced77 does not exist
Oct 11 02:31:49 compute-0 sudo[440784]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:31:49 compute-0 sudo[440784]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:31:49 compute-0 sudo[440784]: pam_unix(sudo:session): session closed for user root
Oct 11 02:31:49 compute-0 sudo[440809]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:31:49 compute-0 sudo[440809]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:31:49 compute-0 sudo[440809]: pam_unix(sudo:session): session closed for user root
Oct 11 02:31:49 compute-0 nova_compute[356901]: 2025-10-11 02:31:49.330 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:49 compute-0 ceph-mon[191930]: pgmap v1615: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:49 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:31:49 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:31:49 compute-0 nova_compute[356901]: 2025-10-11 02:31:49.555 2 DEBUG oslo_concurrency.lockutils [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "7513b93e-d2b8-4ae0-8f1c-3df190945259" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:31:49 compute-0 nova_compute[356901]: 2025-10-11 02:31:49.556 2 DEBUG oslo_concurrency.lockutils [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259" acquired by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:31:49 compute-0 nova_compute[356901]: 2025-10-11 02:31:49.557 2 DEBUG oslo_concurrency.lockutils [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:31:49 compute-0 nova_compute[356901]: 2025-10-11 02:31:49.557 2 DEBUG oslo_concurrency.lockutils [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" acquired by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:31:49 compute-0 nova_compute[356901]: 2025-10-11 02:31:49.557 2 DEBUG oslo_concurrency.lockutils [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" "released" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:31:49 compute-0 nova_compute[356901]: 2025-10-11 02:31:49.559 2 INFO nova.compute.manager [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Terminating instance
Oct 11 02:31:49 compute-0 nova_compute[356901]: 2025-10-11 02:31:49.560 2 DEBUG nova.compute.manager [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Start destroying the instance on the hypervisor. _shutdown_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:3120
Oct 11 02:31:49 compute-0 nova_compute[356901]: 2025-10-11 02:31:49.702 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:49 compute-0 kernel: tapa942acb1-1e (unregistering): left promiscuous mode
Oct 11 02:31:49 compute-0 NetworkManager[44908]: <info>  [1760149909.7367] device (tapa942acb1-1e): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')
Oct 11 02:31:49 compute-0 ovn_controller[88370]: 2025-10-11T02:31:49Z|00054|binding|INFO|Releasing lport a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e from this chassis (sb_readonly=0)
Oct 11 02:31:49 compute-0 nova_compute[356901]: 2025-10-11 02:31:49.761 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:49 compute-0 ovn_controller[88370]: 2025-10-11T02:31:49Z|00055|binding|INFO|Setting lport a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e down in Southbound
Oct 11 02:31:49 compute-0 ovn_controller[88370]: 2025-10-11T02:31:49Z|00056|binding|INFO|Removing iface tapa942acb1-1e ovn-installed in OVS
Oct 11 02:31:49 compute-0 nova_compute[356901]: 2025-10-11 02:31:49.768 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:49.773 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:16:ee:dc 192.168.0.225'], port_security=['fa:16:3e:16:ee:dc 192.168.0.225'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'name': 'vnf-scaleup_group-wzkjkvgckve2-djjfpphdsuuh-gthznuj2xct2-port-zo7fokg3iel2', 'neutron:cidrs': '192.168.0.225/24', 'neutron:device_id': '7513b93e-d2b8-4ae0-8f1c-3df190945259', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'neutron:port_capabilities': '', 'neutron:port_name': 'vnf-scaleup_group-wzkjkvgckve2-djjfpphdsuuh-gthznuj2xct2-port-zo7fokg3iel2', 'neutron:project_id': '97026531b3404a11869cb85a059c4a0d', 'neutron:revision_number': '4', 'neutron:security_group_ids': 'c0c90d87-d29f-4e96-98a1-ffb301424ea4', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:port_fip': '192.168.122.204', 'neutron:host_id': 'compute-0.ctlplane.example.com'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=d19b0dd1-1656-436b-911a-8f2dcc98f6bf, chassis=[], tunnel_key=5, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e) old=Port_Binding(up=[True], chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:31:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:49.776 286362 INFO neutron.agent.ovn.metadata.agent [-] Port a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e in datapath d4dded16-3268-4cf9-bb6b-aa5200d5e4ec unbound from our chassis
Oct 11 02:31:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:49.780 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network d4dded16-3268-4cf9-bb6b-aa5200d5e4ec
Oct 11 02:31:49 compute-0 nova_compute[356901]: 2025-10-11 02:31:49.798 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:49.818 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[8c19cc4a-ca7d-4f16-b373-be818d56f5fb]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:31:49 compute-0 systemd[1]: machine-qemu\x2d3\x2dinstance\x2d00000003.scope: Deactivated successfully.
Oct 11 02:31:49 compute-0 systemd[1]: machine-qemu\x2d3\x2dinstance\x2d00000003.scope: Consumed 1min 41.686s CPU time.
Oct 11 02:31:49 compute-0 systemd-machined[137586]: Machine qemu-3-instance-00000003 terminated.
Oct 11 02:31:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:49.867 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[297df254-9d98-4ab5-8743-35cccf09991b]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:31:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:49.872 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[46a3299a-0080-499e-b04b-ff00c3db951c]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:31:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:49.914 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[cffd9d74-4912-49a6-abc9-0e57576289c0]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:31:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:49.948 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[b345a7bf-a3a9-432d-8f10-21bd12964a81]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapd4dded16-31'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:11:50:48'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 8, 'tx_packets': 13, 'rx_bytes': 832, 'tx_bytes': 690, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 8, 'tx_packets': 13, 'rx_bytes': 832, 'tx_bytes': 690, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 15], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 548909, 'reachable_time': 31912, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 8, 'inoctets': 720, 'indelivers': 1, 'outforwdatagrams': 0, 'outpkts': 3, 'outoctets': 228, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 8, 'outmcastpkts': 3, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 720, 'outmcastoctets': 228, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 8, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 1, 'inerrors': 0, 'outmsgs': 3, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 440847, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:31:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:49.982 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[4e4a7fbc-a708-4cc6-8b6b-b6a506113662]: (4, ({'family': 2, 'prefixlen': 32, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '169.254.169.254'], ['IFA_LOCAL', '169.254.169.254'], ['IFA_BROADCAST', '169.254.169.254'], ['IFA_LABEL', 'tapd4dded16-31'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 548926, 'tstamp': 548926}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 440848, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'}, {'family': 2, 'prefixlen': 24, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '192.168.0.2'], ['IFA_LOCAL', '192.168.0.2'], ['IFA_BROADCAST', '192.168.0.255'], ['IFA_LABEL', 'tapd4dded16-31'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 548931, 'tstamp': 548931}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 440848, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'})) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:31:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:49.985 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapd4dded16-30, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:31:49 compute-0 kernel: tapa942acb1-1e: entered promiscuous mode
Oct 11 02:31:49 compute-0 nova_compute[356901]: 2025-10-11 02:31:49.988 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:49 compute-0 ovn_controller[88370]: 2025-10-11T02:31:49Z|00057|binding|INFO|Claiming lport a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e for this chassis.
Oct 11 02:31:49 compute-0 ovn_controller[88370]: 2025-10-11T02:31:49Z|00058|binding|INFO|a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e: Claiming fa:16:3e:16:ee:dc 192.168.0.225
Oct 11 02:31:49 compute-0 kernel: tapa942acb1-1e (unregistering): left promiscuous mode
Oct 11 02:31:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:49.994 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapd4dded16-30, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:31:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:49.995 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:31:49 compute-0 nova_compute[356901]: 2025-10-11 02:31:49.994 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:49.995 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tapd4dded16-30, col_values=(('external_ids', {'iface-id': 'f0f8488b-423f-46a5-8a6a-984c2ae3438e'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:31:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:49.996 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:31:49 compute-0 NetworkManager[44908]: <info>  [1760149909.9996] manager: (tapa942acb1-1e): new Tun device (/org/freedesktop/NetworkManager/Devices/37)
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.001 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:16:ee:dc 192.168.0.225'], port_security=['fa:16:3e:16:ee:dc 192.168.0.225'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'name': 'vnf-scaleup_group-wzkjkvgckve2-djjfpphdsuuh-gthznuj2xct2-port-zo7fokg3iel2', 'neutron:cidrs': '192.168.0.225/24', 'neutron:device_id': '7513b93e-d2b8-4ae0-8f1c-3df190945259', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'neutron:port_capabilities': '', 'neutron:port_name': 'vnf-scaleup_group-wzkjkvgckve2-djjfpphdsuuh-gthznuj2xct2-port-zo7fokg3iel2', 'neutron:project_id': '97026531b3404a11869cb85a059c4a0d', 'neutron:revision_number': '4', 'neutron:security_group_ids': 'c0c90d87-d29f-4e96-98a1-ffb301424ea4', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:port_fip': '192.168.122.204', 'neutron:host_id': 'compute-0.ctlplane.example.com'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=d19b0dd1-1656-436b-911a-8f2dcc98f6bf, chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], tunnel_key=5, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e) old=Port_Binding(chassis=[]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.003 286362 INFO neutron.agent.ovn.metadata.agent [-] Port a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e in datapath d4dded16-3268-4cf9-bb6b-aa5200d5e4ec bound to our chassis
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.006 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network d4dded16-3268-4cf9-bb6b-aa5200d5e4ec
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.027 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[cac2b0f9-76cc-4ccc-b088-862bba7112a2]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:31:50 compute-0 ovn_controller[88370]: 2025-10-11T02:31:50Z|00059|binding|INFO|Setting lport a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e ovn-installed in OVS
Oct 11 02:31:50 compute-0 ovn_controller[88370]: 2025-10-11T02:31:50Z|00060|binding|INFO|Setting lport a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e up in Southbound
Oct 11 02:31:50 compute-0 ovn_controller[88370]: 2025-10-11T02:31:50Z|00061|binding|INFO|Releasing lport a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e from this chassis (sb_readonly=1)
Oct 11 02:31:50 compute-0 ovn_controller[88370]: 2025-10-11T02:31:50Z|00062|if_status|INFO|Not setting lport a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e down as sb is readonly
Oct 11 02:31:50 compute-0 ovn_controller[88370]: 2025-10-11T02:31:50Z|00063|binding|INFO|Removing iface tapa942acb1-1e ovn-installed in OVS
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.034 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:50 compute-0 ovn_controller[88370]: 2025-10-11T02:31:50Z|00064|binding|INFO|Releasing lport a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e from this chassis (sb_readonly=0)
Oct 11 02:31:50 compute-0 ovn_controller[88370]: 2025-10-11T02:31:50Z|00065|binding|INFO|Setting lport a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e down in Southbound
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.045 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:16:ee:dc 192.168.0.225'], port_security=['fa:16:3e:16:ee:dc 192.168.0.225'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'name': 'vnf-scaleup_group-wzkjkvgckve2-djjfpphdsuuh-gthznuj2xct2-port-zo7fokg3iel2', 'neutron:cidrs': '192.168.0.225/24', 'neutron:device_id': '7513b93e-d2b8-4ae0-8f1c-3df190945259', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'neutron:port_capabilities': '', 'neutron:port_name': 'vnf-scaleup_group-wzkjkvgckve2-djjfpphdsuuh-gthznuj2xct2-port-zo7fokg3iel2', 'neutron:project_id': '97026531b3404a11869cb85a059c4a0d', 'neutron:revision_number': '4', 'neutron:security_group_ids': 'c0c90d87-d29f-4e96-98a1-ffb301424ea4', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:port_fip': '192.168.122.204', 'neutron:host_id': 'compute-0.ctlplane.example.com'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=d19b0dd1-1656-436b-911a-8f2dcc98f6bf, chassis=[], tunnel_key=5, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e) old=Port_Binding(up=[True], chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.049 2 INFO nova.virt.libvirt.driver [-] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Instance destroyed successfully.
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.050 2 DEBUG nova.objects.instance [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lazy-loading 'resources' on Instance uuid 7513b93e-d2b8-4ae0-8f1c-3df190945259 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.052 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.068 2 DEBUG nova.virt.libvirt.vif [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='True',created_at=2025-10-11T02:23:54Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=<?>,disable_terminate=False,display_description=None,display_name='vn-vgckve2-djjfpphdsuuh-gthznuj2xct2-vnf-jmvtgw3mflyn',ec2_ids=<?>,ephemeral_gb=1,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(1),hidden=False,host='compute-0.ctlplane.example.com',hostname='vn-vgckve2-djjfpphdsuuh-gthznuj2xct2-vnf-jmvtgw3mflyn',id=3,image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',info_cache=InstanceInfoCache,instance_type_id=1,kernel_id='',key_data=None,key_name=None,keypairs=<?>,launch_index=0,launched_at=2025-10-11T02:24:06Z,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=512,metadata={metering.server_group='3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'},migration_context=<?>,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=<?>,power_state=1,progress=0,project_id='97026531b3404a11869cb85a059c4a0d',ramdisk_id='',reservation_id='r-t8a5mh5u',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader,admin',image_base_image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',image_container_format='bare',image_disk_format='qcow2',image_hw_cdrom_bus='sata',image_hw_disk_bus='virtio',image_hw_input_bus='usb',image_hw_machine_type='q35',image_hw_pointer_model='usbtablet',image_hw_video_model='virtio',image_hw_vif_model='virtio',image_min_disk='1',image_min_ram='0',image_owner_specified.openstack.md5='',image_owner_specified.openstack.object='images/cirros',image_owner_specified.openstack.sha256='',owner_project_name='admin',owner_user_name='admin'},tags=<?>,task_state='deleting',terminated_at=None,trusted_certs=<?>,updated_at=2025-10-11T02:24:06Z,user_data='Q29udGVudC1UeXBlOiBtdWx0aXBhcnQvbWl4ZWQ7IGJvdW5kYXJ5PSI9PT09PT09PT09PT09PT0zOTM4MTI3NjM1NDAwNDM1NzIyPT0iCk1JTUUtVmVyc2lvbjogMS4wCgotLT09PT09PT09PT09PT09PTM5MzgxMjc2MzU0MDA0MzU3MjI9PQpDb250ZW50LVR5cGU6IHRleHQvY2xvdWQtY29uZmlnOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0iY2xvdWQtY29uZmlnIgoKCgojIENhcHR1cmUgYWxsIHN1YnByb2Nlc3Mgb3V0cHV0IGludG8gYSBsb2dmaWxlCiMgVXNlZnVsIGZvciB0cm91Ymxlc2hvb3RpbmcgY2xvdWQtaW5pdCBpc3N1ZXMKb3V0cHV0OiB7YWxsOiAnfCB0ZWUgLWEgL3Zhci9sb2cvY2xvdWQtaW5pdC1vdXRwdXQubG9nJ30KCi0tPT09PT09PT09PT09PT09MzkzODEyNzYzNTQwMDQzNTcyMj09CkNvbnRlbnQtVHlwZTogdGV4dC9jbG91ZC1ib290aG9vazsgY2hhcnNldD0idXMtYXNjaWkiCk1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IDdiaXQKQ29udGVudC1EaXNwb3NpdGlvbjogYXR0YWNobWVudDsgZmlsZW5hbWU9ImJvb3Rob29rLnNoIgoKIyEvdXNyL2Jpbi9iYXNoCgojIEZJWE1FKHNoYWRvd2VyKSB0aGlzIGlzIGEgd29ya2Fyb3VuZCBmb3IgY2xvdWQtaW5pdCAwLjYuMyBwcmVzZW50IGluIFVidW50dQojIDEyLjA0IExUUzoKIyBodHRwczovL2J1Z3MubGF1bmNocGFkLm5ldC9oZWF0LytidWcvMTI1NzQxMAojCiMgVGhlIG9sZCBjbG91ZC1pbml0IGRvZXNuJ3QgY3JlYXRlIHRoZSB1c2VycyBkaXJlY3RseSBzbyB0aGUgY29tbWFuZHMgdG8gZG8KIyB0aGlzIGFyZSBpbmplY3RlZCB0aG91Z2ggbm92YV91dGlscy5weS4KIwojIE9uY2Ugd2UgZHJvcCBzdXBwb3J0IGZvciAwLjYuMywgd2UgY2FuIHNhZmVseSByZW1vdmUgdGhpcy4KCgojIGluIGNhc2UgaGVhdC1jZm50b29scyBoYXMgYmVlbiBpbnN0YWxsZWQgZnJvbSBwYWNrYWdlIGJ1dCBubyBzeW1saW5rcwojIGFyZSB5ZXQgaW4gL29wdC9hd3MvYmluLwpjZm4tY3JlYXRlLWF3cy1zeW1saW5rcwoKIyBEbyBub3QgcmVtb3ZlIC0gdGhlIGNsb3VkIGJvb3Rob29rIHNob3VsZCBhbHdheXMgcmV0dXJuIHN1Y2Nlc3MKZXhpdCAwCgotLT09PT09PT09PT09PT09PTM5MzgxMjc2MzU0MDA0MzU3MjI9PQpDb250ZW50LVR5cGU6IHRleHQvcGFydC1oYW5kbGVyOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0icGFydC1oYW5kbGVyLnB5IgoKIyBwYXJ0LWhhbmRsZXIKIwojICAgIExpY2Vuc2VkIHVuZGVyIHRoZSBBcGFjaGUgTGljZW5zZSwgVmVyc2lvbiAyLjAgKHRoZSAiTGljZW5zZSIpOyB5b3UgbWF5CiMgICAgbm90IHVzZSB0aGlzIGZpbGUgZXhjZXB0IGluIGNvbXBsaWFuY2Ugd2l0aCB0aGUgTGljZW5zZS4gWW91IG1heSBvYnRhaW4KIyAgICBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgICAgICAgaHR0cDovL3d3dy5hcGFjaGUub3JnL2xpY2Vuc2VzL0xJQ0VOU0UtMi4wCiMKIyAgICBVbmxlc3MgcmVxdWlyZWQgYnkgYXBwbGljYWJsZSBsYXcgb3IgYWdyZWVkIHRvIGluIHdyaXRpbmcsIHNvZnR3YXJlCiMgICAgZGlzdHJpYnV0ZWQgdW5kZXIgdGhlIExpY2Vuc2UgaXMgZGlzdHJpYnV0ZWQgb24gYW4gIkFTIElTIiBCQVNJUywgV0lUSE9VVAojICAgIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4gU2VlIHRoZQojICAgIExpY2Vuc2UgZm9yIHRoZSBzcGVjaWZpYyBsYW5ndWFnZSBnb3Zlcm5pbmcgcGVybWlzc2lvbnMgYW5kIGxpbWl0YXRpb25zCiMgICAgdW5kZXIgdGhlIExpY2Vuc2UuCgppbXBvcnQgZGF0ZXRpbWUKaW1wb3J0IGVycm5vCmltcG9ydCBvcwppbXBvcnQgc3lzCgoKZGVmIGxpc3RfdHlwZXMoKToKICAgIHJldHVybiBbInRleHQveC1jZm5pbml0ZGF0YSJdCgoKZGVmIGhhbmRsZV9wYXJ0KGRhdGEsIGN0eXBlLCBmaWxlbmFtZSwgcGF5bG9hZCk6CiAgICBpZiBjdHlwZSA9PSAiX19iZWdpbl9fIjoKICAgICAgICB0cnk6CiAgICAgICAgICAgIG9zLm1ha2VkaXJzKCcvdmFyL2xpYi9oZWF0LWNmbnRvb2xzJywgaW50KCI3MDAiLCA4KSkKICAgICAgICBleGNlcHQgT1NFcnJvcjoKICAgICAgICAgICAgZXhfdHlwZSwgZSwgdGIgPSBzeXMuZXhjX2luZm8oKQogICAgICAgICAgICBpZiBlLmVycm5vICE9IGVycm5vLkVFWElTVDoKICAgICAgICAgICAgICAgIHJhaXNlCiAgICAgICAgcmV0dXJuCgogICAgaWYgY3R5cGUgPT0gIl9fZW5kX18iOgogICAgICAgIHJldHVybgoKICAgIHRpbWVzdGFtcCA9IGRhdGV0aW1lLmRhdGV0aW1lLm5vdygpCiAgICB3aXRoIG9wZW4oJy92YXIvbG9nL3BhcnQtaGFuZGxlci5sb2cnLCAnYScpIGFzIGxvZzoKICAgICAgICBsb2cud3JpdGUoJyVzIGZpbGVuYW1lOiVzLCBjdHlwZTolc1xuJyAlICh0aW1lc3RhbXAsIGZpbGVuYW1lLCBjdHlwZSkpCgogICAgaWYgY3R5cGUgPT0gJ3RleHQveC1jZm5pbml0ZGF0YSc6CiAgICAgICAgd2l0aCBvcGVuKCcvdmFyL2xpYi9oZWF0LWNmbnRvb2xzLyVzJyAlIGZpbGVuYW1lLCAndycpIGFzIGY6CiAgICAgICAgICAgIGYud3JpdGUocGF5bG9hZCkKCiAgICAgICAgIyBUT0RPKHNkYWtlKSBob3BlZnVsbHkgdGVtcG9yYXJ5IHVudGlsIHVzZXJzIG1vdmUgdG8gaGVhdC1jZm50b29scy0xLjMKICAgICAgICB3aXRoIG9wZW4oJy92YXIvbGliL2Nsb3VkL2RhdGEvJXMnICUgZmlsZW5hbWUsICd3JykgYXMgZjoKICAgICAgICAgICAgZi53cml0ZShwYXlsb2FkKQoKLS09PT09PT09PT09PT09PT0zOTM4MTI3NjM1NDAwNDM1NzIyPT0KQ29udGVudC1UeXBlOiB0ZXh0L3gtY2ZuaW5pdGRhdGE7IGNoYXJzZXQ9InVzLWFzY2lpIgpNSU1FLVZlcnNpb246IDEuMApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkNvbnRlbnQtRGlzcG9zaXRpb246IGF0dGFjaG1lbnQ7IGZpbGVuYW1lPSJjZm4tdXNlcmRhdGEiCgoKLS09PT09PT09PT09PT09PT0zOTM4MTI3NjM1NDAwNDM1NzIyPT0KQ29udGVudC1UeXBlOiB0ZXh0L3gtc2hlbGxzY3JpcHQ7IGNoYXJzZXQ9InVzLWFzY2lpIgpNSU1FLVZlcnNpb246IDEuMApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkNvbnRlbnQtRGlzcG9zaXRpb246IGF0dGFjaG1lbnQ7IGZpbGVuYW1lPSJsb2d1c2VyZGF0YS5weSIKCiMhL3Vzci9iaW4vZW52IHB5dGhvbjMKIwojICAgIExpY2Vuc2VkIHVuZGVyIHRoZSBBcGFjaGUgTGljZW5zZSwgVmVyc2lvbiAyLjAgKHRoZSAiTGljZW5zZSIpOyB5b3UgbWF5CiMgICAgbm90IHVzZSB0aGlzIGZpbGUgZXhjZXB0IGluIGNvbXBsaWFuY2Ugd2l0aCB0aGUgTGljZW5zZS4gWW91IG1heSBvYnRhaW4KIyAgICBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgICAgICAgaHR0cDovL3d3dy5hcGFjaGUub3JnL2xpY2Vuc2VzL0xJQ0VOU0UtMi4wCiMKIyAgICBVbmxlc3MgcmVxdWlyZWQgYnkgYXBwbGljYWJsZSBsYXcgb3IgYWdyZWVkIHRvIGluIHdyaXRpbmcsIHNvZnR3YXJlCiMgICAgZGlzdHJpYnV0ZWQgdW5kZXIgdGhlIExpY2Vuc2UgaXMgZGlzdHJpYnV0ZWQgb24gYW4gIkFTIElTIiBCQVNJUywgV0lUSE9VVAojICAgIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4gU2VlIHRoZQojICAgIExpY2Vuc2UgZm9yIHRoZSBzcGVjaWZpYyBsYW5ndWFnZSBnb3Zlcm5pbmcgcGVybWlzc2lvbnMgYW5kIGxpbWl0YXRpb25zCiMgICAgdW5kZXIgdGhlIExpY2Vuc2UuCgppbXBvcnQgZGF0ZXRpbWUKaW1wb3J0IGVycm5vCmltcG9ydCBsb2dnaW5nCmltcG9ydCBvcwppbXBvcnQgc3VicHJvY2VzcwppbXBvcnQgc3lzCgoKVkFSX1BBVEggPSAnL3Zhci9saWIvaGVhdC1jZm50b29scycKTE9HID0gbG9nZ2luZy5nZXRMb2dnZXIoJ2hlYXQtcHJvdmlzaW9uJykKCgpkZWYgaW5pdF9sb2dnaW5nKCk6CiAgICBMT0cuc2V0TGV2ZWwobG9nZ2luZy5JTkZPKQogICAgTE9HLmFkZEhhbmRsZXIobG9nZ2luZy5TdHJlYW1IYW5kbGVyKCkpCiAgICBmaCA9IGxvZ2dpbmcuRmlsZUhhbmRsZXIoIi92YXIvbG9nL2hlYXQtcHJvdmlzaW9uLmxvZyIpCiAgICBvcy5jaG1vZChmaC5iYXNlRmlsZW5hbWUsIGludCgiNjAwIiwgOCkpCiAgICBMT0cuYWRkSGFuZGxlcihmaCkKCgpkZWYgY2FsbChhcmdzKToKCiAgICBjbGFzcyBMb2dTdHJlYW0ob2JqZWN0KToKCiAgICAgICAgZGVmIHdyaXRlKHNlbGYsIGRhdGEpOgogICAgICAgICAgICBMT0cuaW5mbyhkYXRhKQoKICAgIExPRy5pbmZvKCclc1
Oct 11 02:31:50 compute-0 nova_compute[356901]: xuJywgJyAnLmpvaW4oYXJncykpICAjIG5vcWEKICAgIHRyeToKICAgICAgICBscyA9IExvZ1N0cmVhbSgpCiAgICAgICAgcCA9IHN1YnByb2Nlc3MuUG9wZW4oYXJncywgc3Rkb3V0PXN1YnByb2Nlc3MuUElQRSwKICAgICAgICAgICAgICAgICAgICAgICAgICAgICBzdGRlcnI9c3VicHJvY2Vzcy5QSVBFKQogICAgICAgIGRhdGEgPSBwLmNvbW11bmljYXRlKCkKICAgICAgICBpZiBkYXRhOgogICAgICAgICAgICBmb3IgeCBpbiBkYXRhOgogICAgICAgICAgICAgICAgbHMud3JpdGUoeCkKICAgIGV4Y2VwdCBPU0Vycm9yOgogICAgICAgIGV4X3R5cGUsIGV4LCB0YiA9IHN5cy5leGNfaW5mbygpCiAgICAgICAgaWYgZXguZXJybm8gPT0gZXJybm8uRU5PRVhFQzoKICAgICAgICAgICAgTE9HLmVycm9yKCdVc2VyZGF0YSBlbXB0eSBvciBub3QgZXhlY3V0YWJsZTogJXMnLCBleCkKICAgICAgICAgICAgcmV0dXJuIG9zLkVYX09LCiAgICAgICAgZWxzZToKICAgICAgICAgICAgTE9HLmVycm9yKCdPUyBlcnJvciBydW5uaW5nIHVzZXJkYXRhOiAlcycsIGV4KQogICAgICAgICAgICByZXR1cm4gb3MuRVhfT1NFUlIKICAgIGV4Y2VwdCBFeGNlcHRpb246CiAgICAgICAgZXhfdHlwZSwgZXgsIHRiID0gc3lzLmV4Y19pbmZvKCkKICAgICAgICBMT0cuZXJyb3IoJ1Vua25vd24gZXJyb3IgcnVubmluZyB1c2VyZGF0YTogJXMnLCBleCkKICAgICAgICByZXR1cm4gb3MuRVhfU09GVFdBUkUKICAgIHJldHVybiBwLnJldHVybmNvZGUKCgpkZWYgbWFpbigpOgogICAgdXNlcmRhdGFfcGF0aCA9IG9zLnBhdGguam9pbihWQVJfUEFUSCwgJ2Nmbi11c2VyZGF0YScpCiAgICBvcy5jaG1vZCh1c2VyZGF0YV9wYXRoLCBpbnQoIjcwMCIsIDgpKQoKICAgIExPRy5pbmZvKCdQcm92aXNpb24gYmVnYW46ICVzJywgZGF0ZXRpbWUuZGF0ZXRpbWUubm93KCkpCiAgICByZXR1cm5jb2RlID0gY2FsbChbdXNlcmRhdGFfcGF0aF0pCiAgICBMT0cuaW5mbygnUHJvdmlzaW9uIGRvbmU6ICVzJywgZGF0ZXRpbWUuZGF0ZXRpbWUubm93KCkpCiAgICBpZiByZXR1cm5jb2RlOgogICAgICAgIHJldHVybiByZXR1cm5jb2RlCgoKaWYgX19uYW1lX18gPT0gJ19fbWFpbl9fJzoKICAgIGluaXRfbG9nZ2luZygpCgogICAgY29kZSA9IG1haW4oKQogICAgaWYgY29kZToKICAgICAgICBMT0cuZXJyb3IoJ1Byb3Zpc2lvbiBmYWlsZWQgd2l0aCBleGl0IGNvZGUgJXMnLCBjb2RlKQogICAgICAgIHN5cy5leGl0KGNvZGUpCgogICAgcHJvdmlzaW9uX2xvZyA9IG9zLnBhdGguam9pbihWQVJfUEFUSCwgJ3Byb3Zpc2lvbi1maW5pc2hlZCcpCiAgICAjIHRvdWNoIHRoZSBmaWxlIHNvIGl0IGlzIHRpbWVzdGFtcGVkIHdpdGggd2hlbiBmaW5pc2hlZAogICAgd2l0aCBvcGVuKHByb3Zpc2lvbl9sb2csICdhJyk6CiAgICAgICAgb3MudXRpbWUocHJvdmlzaW9uX2xvZywgTm9uZSkKCi0tPT09PT09PT09PT09PT09MzkzODEyNzYzNTQwMDQzNTcyMj09CkNvbnRlbnQtVHlwZTogdGV4dC94LWNmbmluaXRkYXRhOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0iY2ZuLW1ldGFkYXRhLXNlcnZlciIKCmh0dHBzOi8vaGVhdC1jZm5hcGktaW50ZXJuYWwub3BlbnN0YWNrLnN2Yzo4MDAwL3YxLwotLT09PT09PT09PT09PT09PTM5MzgxMjc2MzU0MDA0MzU3MjI9PQpDb250ZW50LVR5cGU6IHRleHQveC1jZm5pbml0ZGF0YTsgY2hhcnNldD0idXMtYXNjaWkiCk1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IDdiaXQKQ29udGVudC1EaXNwb3NpdGlvbjogYXR0YWNobWVudDsgZmlsZW5hbWU9ImNmbi1ib3RvLWNmZyIKCltCb3RvXQpkZWJ1ZyA9IDAKaXNfc2VjdXJlID0gMApodHRwc192YWxpZGF0ZV9jZXJ0aWZpY2F0ZXMgPSAxCmNmbl9yZWdpb25fbmFtZSA9IGhlYXQKY2ZuX3JlZ2lvbl9lbmRwb2ludCA9IGhlYXQtY2ZuYXBpLWludGVybmFsLm9wZW5zdGFjay5zdmMKLS09PT09PT09PT09PT09PT0zOTM4MTI3NjM1NDAwNDM1NzIyPT0tLQo=',user_id='d215f3ebbc07435493ccd666fc80109d',uuid=7513b93e-d2b8-4ae0-8f1c-3df190945259,vcpu_model=<?>,vcpus=1,vm_mode=None,vm_state='active') vif={"id": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "address": "fa:16:3e:16:ee:dc", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.225", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.204", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa942acb1-1e", "ovs_interfaceid": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}} unplug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:828
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.071 2 DEBUG nova.network.os_vif_util [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converting VIF {"id": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "address": "fa:16:3e:16:ee:dc", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.225", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.204", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapa942acb1-1e", "ovs_interfaceid": "a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.072 2 DEBUG nova.network.os_vif_util [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converted object VIFOpenVSwitch(active=True,address=fa:16:3e:16:ee:dc,bridge_name='br-int',has_traffic_filtering=True,id=a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tapa942acb1-1e') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.074 2 DEBUG os_vif [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Unplugging vif VIFOpenVSwitch(active=True,address=fa:16:3e:16:ee:dc,bridge_name='br-int',has_traffic_filtering=True,id=a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tapa942acb1-1e') unplug /usr/lib/python3.9/site-packages/os_vif/__init__.py:109
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.077 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.078 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapa942acb1-1e, bridge=br-int, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.080 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.083 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.086 2 INFO os_vif [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Successfully unplugged vif VIFOpenVSwitch(active=True,address=fa:16:3e:16:ee:dc,bridge_name='br-int',has_traffic_filtering=True,id=a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tapa942acb1-1e')
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.085 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[e6c72a00-a875-43fb-860f-ba720f930642]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.091 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[b69ffa3a-90c3-4ccf-bb73-3e4ea17cc795]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.144 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[5b686852-473c-4185-a667-4bf9e0a71c30]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.168 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[a865b208-ca20-4d78-9380-3b8dcc4f9d83]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapd4dded16-31'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:11:50:48'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 8, 'tx_packets': 15, 'rx_bytes': 832, 'tx_bytes': 774, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 8, 'tx_packets': 15, 'rx_bytes': 832, 'tx_bytes': 774, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 15], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 548909, 'reachable_time': 31912, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 8, 'inoctets': 720, 'indelivers': 1, 'outforwdatagrams': 0, 'outpkts': 3, 'outoctets': 228, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 8, 'outmcastpkts': 3, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 720, 'outmcastoctets': 228, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 8, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 1, 'inerrors': 0, 'outmsgs': 3, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 440880, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.194 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[8beb5280-6c58-4388-a2e8-34af48d98e90]: (4, ({'family': 2, 'prefixlen': 32, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '169.254.169.254'], ['IFA_LOCAL', '169.254.169.254'], ['IFA_BROADCAST', '169.254.169.254'], ['IFA_LABEL', 'tapd4dded16-31'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 548926, 'tstamp': 548926}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 440881, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'}, {'family': 2, 'prefixlen': 24, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '192.168.0.2'], ['IFA_LOCAL', '192.168.0.2'], ['IFA_BROADCAST', '192.168.0.255'], ['IFA_LABEL', 'tapd4dded16-31'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 548931, 'tstamp': 548931}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 440881, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'})) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.196 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapd4dded16-30, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.198 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.202 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapd4dded16-30, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.202 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.203 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tapd4dded16-30, col_values=(('external_ids', {'iface-id': 'f0f8488b-423f-46a5-8a6a-984c2ae3438e'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.203 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.205 286362 INFO neutron.agent.ovn.metadata.agent [-] Port a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e in datapath d4dded16-3268-4cf9-bb6b-aa5200d5e4ec unbound from our chassis
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.206 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network d4dded16-3268-4cf9-bb6b-aa5200d5e4ec
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.231 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[53ff0e07-ae79-43fd-9771-c45777aa7771]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.283 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[1a979c6a-ba40-4f73-9c67-1692e227e441]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:31:50 compute-0 rsyslogd[187706]: message too long (8192) with configured size 8096, begin of message is: 2025-10-11 02:31:50.068 2 DEBUG nova.virt.libvirt.vif [None req-db28ecd4-f0fd-4a [v8.2506.0-2.el9 try https://www.rsyslog.com/e/2445 ]
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.289 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[7d80ff9c-1c57-4d61-ad07-d657f0b0eb45]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.338 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[9e6f3905-c430-4218-91e7-b6745dfc83ea]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.346 2 DEBUG nova.compute.manager [req-9502b0a7-62e5-43bd-8cb3-526f63bb5396 req-286a38a3-59e3-4250-8f05-0395d9004b9a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Received event network-vif-unplugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.347 2 DEBUG oslo_concurrency.lockutils [req-9502b0a7-62e5-43bd-8cb3-526f63bb5396 req-286a38a3-59e3-4250-8f05-0395d9004b9a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.347 2 DEBUG oslo_concurrency.lockutils [req-9502b0a7-62e5-43bd-8cb3-526f63bb5396 req-286a38a3-59e3-4250-8f05-0395d9004b9a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.348 2 DEBUG oslo_concurrency.lockutils [req-9502b0a7-62e5-43bd-8cb3-526f63bb5396 req-286a38a3-59e3-4250-8f05-0395d9004b9a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.348 2 DEBUG nova.compute.manager [req-9502b0a7-62e5-43bd-8cb3-526f63bb5396 req-286a38a3-59e3-4250-8f05-0395d9004b9a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] No waiting events found dispatching network-vif-unplugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.349 2 DEBUG nova.compute.manager [req-9502b0a7-62e5-43bd-8cb3-526f63bb5396 req-286a38a3-59e3-4250-8f05-0395d9004b9a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Received event network-vif-unplugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e for instance with task_state deleting. _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10826
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.370 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[d5f7d527-9c70-49b5-8246-dd92e1a48c8c]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapd4dded16-31'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:11:50:48'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 8, 'tx_packets': 17, 'rx_bytes': 832, 'tx_bytes': 858, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 8, 'tx_packets': 17, 'rx_bytes': 832, 'tx_bytes': 858, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 15], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 548909, 'reachable_time': 31912, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 8, 'inoctets': 720, 'indelivers': 1, 'outforwdatagrams': 0, 'outpkts': 3, 'outoctets': 228, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 8, 'outmcastpkts': 3, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 720, 'outmcastoctets': 228, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 8, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 1, 'inerrors': 0, 'outmsgs': 3, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 440887, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.399 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[8a862ac9-bc98-4ef7-a2df-c3dc23ec439d]: (4, ({'family': 2, 'prefixlen': 32, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '169.254.169.254'], ['IFA_LOCAL', '169.254.169.254'], ['IFA_BROADCAST', '169.254.169.254'], ['IFA_LABEL', 'tapd4dded16-31'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 548926, 'tstamp': 548926}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 440888, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'}, {'family': 2, 'prefixlen': 24, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '192.168.0.2'], ['IFA_LOCAL', '192.168.0.2'], ['IFA_BROADCAST', '192.168.0.255'], ['IFA_LABEL', 'tapd4dded16-31'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 548931, 'tstamp': 548931}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 440888, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'})) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.404 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapd4dded16-30, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.406 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.412 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapd4dded16-30, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.413 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.415 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tapd4dded16-30, col_values=(('external_ids', {'iface-id': 'f0f8488b-423f-46a5-8a6a-984c2ae3438e'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.416 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:31:50 compute-0 nova_compute[356901]: 2025-10-11 02:31:50.417 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1616: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.420 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: SbGlobalUpdateEvent(events=('update',), table='SB_Global', conditions=None, old_conditions=None), priority=20 to row=SB_Global(external_ids={}, nb_cfg=9, options={'arp_ns_explicit_output': 'true', 'mac_prefix': 'fe:55:97', 'max_tunid': '16711680', 'northd_internal_version': '24.03.7-20.33.0-76.8', 'svc_monitor_mac': 'ce:9c:4f:b4:85:9b'}, ipsec=False) old=SB_Global(nb_cfg=8) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:31:50 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:50.422 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Delaying updating chassis table for 10 seconds run /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:274
Oct 11 02:31:50 compute-0 sshd-session[440628]: Failed password for invalid user admin from 121.227.153.123 port 40652 ssh2
Oct 11 02:31:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:31:51 compute-0 nova_compute[356901]: 2025-10-11 02:31:51.335 2 INFO nova.virt.libvirt.driver [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Deleting instance files /var/lib/nova/instances/7513b93e-d2b8-4ae0-8f1c-3df190945259_del
Oct 11 02:31:51 compute-0 nova_compute[356901]: 2025-10-11 02:31:51.337 2 INFO nova.virt.libvirt.driver [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Deletion of /var/lib/nova/instances/7513b93e-d2b8-4ae0-8f1c-3df190945259_del complete
Oct 11 02:31:51 compute-0 nova_compute[356901]: 2025-10-11 02:31:51.411 2 INFO nova.compute.manager [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Took 1.85 seconds to destroy the instance on the hypervisor.
Oct 11 02:31:51 compute-0 nova_compute[356901]: 2025-10-11 02:31:51.412 2 DEBUG oslo.service.loopingcall [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Waiting for function nova.compute.manager.ComputeManager._try_deallocate_network.<locals>._deallocate_network_with_retries to return. func /usr/lib/python3.9/site-packages/oslo_service/loopingcall.py:435
Oct 11 02:31:51 compute-0 nova_compute[356901]: 2025-10-11 02:31:51.413 2 DEBUG nova.compute.manager [-] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Deallocating network for instance _deallocate_network /usr/lib/python3.9/site-packages/nova/compute/manager.py:2259
Oct 11 02:31:51 compute-0 nova_compute[356901]: 2025-10-11 02:31:51.413 2 DEBUG nova.network.neutron [-] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] deallocate_for_instance() deallocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1803
Oct 11 02:31:51 compute-0 sshd-session[440628]: Connection closed by invalid user admin 121.227.153.123 port 40652 [preauth]
Oct 11 02:31:51 compute-0 ceph-mon[191930]: pgmap v1616: 321 pgs: 321 active+clean; 201 MiB data, 323 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:31:51 compute-0 nova_compute[356901]: 2025-10-11 02:31:51.930 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:31:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1617: 321 pgs: 321 active+clean; 192 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s rd, 852 B/s wr, 12 op/s
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.432 2 DEBUG nova.compute.manager [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Received event network-vif-plugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.433 2 DEBUG oslo_concurrency.lockutils [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.434 2 DEBUG oslo_concurrency.lockutils [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.435 2 DEBUG oslo_concurrency.lockutils [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.436 2 DEBUG nova.compute.manager [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] No waiting events found dispatching network-vif-plugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.436 2 WARNING nova.compute.manager [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Received unexpected event network-vif-plugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e for instance with vm_state active and task_state deleting.
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.437 2 DEBUG nova.compute.manager [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Received event network-vif-plugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.438 2 DEBUG oslo_concurrency.lockutils [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.438 2 DEBUG oslo_concurrency.lockutils [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.439 2 DEBUG oslo_concurrency.lockutils [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.440 2 DEBUG nova.compute.manager [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] No waiting events found dispatching network-vif-plugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.440 2 WARNING nova.compute.manager [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Received unexpected event network-vif-plugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e for instance with vm_state active and task_state deleting.
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.440 2 DEBUG nova.compute.manager [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Received event network-vif-plugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.441 2 DEBUG oslo_concurrency.lockutils [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.441 2 DEBUG oslo_concurrency.lockutils [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.441 2 DEBUG oslo_concurrency.lockutils [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.441 2 DEBUG nova.compute.manager [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] No waiting events found dispatching network-vif-plugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.441 2 WARNING nova.compute.manager [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Received unexpected event network-vif-plugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e for instance with vm_state active and task_state deleting.
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.441 2 DEBUG nova.compute.manager [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Received event network-changed-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.442 2 DEBUG nova.compute.manager [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Refreshing instance network info cache due to event network-changed-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.442 2 DEBUG oslo_concurrency.lockutils [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-7513b93e-d2b8-4ae0-8f1c-3df190945259" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.442 2 DEBUG oslo_concurrency.lockutils [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-7513b93e-d2b8-4ae0-8f1c-3df190945259" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.442 2 DEBUG nova.network.neutron [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Refreshing network info cache for port a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.583 2 INFO nova.network.neutron [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Port a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e from network info_cache is no longer associated with instance in Neutron. Removing from network info_cache.
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.584 2 DEBUG nova.network.neutron [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Updating instance_info_cache with network_info: [] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.605 2 DEBUG oslo_concurrency.lockutils [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-7513b93e-d2b8-4ae0-8f1c-3df190945259" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.606 2 DEBUG nova.compute.manager [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Received event network-vif-unplugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.606 2 DEBUG oslo_concurrency.lockutils [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.607 2 DEBUG oslo_concurrency.lockutils [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.607 2 DEBUG oslo_concurrency.lockutils [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.607 2 DEBUG nova.compute.manager [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] No waiting events found dispatching network-vif-unplugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.608 2 DEBUG nova.compute.manager [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Received event network-vif-unplugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e for instance with task_state deleting. _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10826
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.608 2 DEBUG nova.compute.manager [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Received event network-vif-plugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.608 2 DEBUG oslo_concurrency.lockutils [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.609 2 DEBUG oslo_concurrency.lockutils [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.609 2 DEBUG oslo_concurrency.lockutils [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.609 2 DEBUG nova.compute.manager [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] No waiting events found dispatching network-vif-plugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.610 2 WARNING nova.compute.manager [req-46121f3f-2cd1-416a-9d24-3f0d17710888 req-b7a94edb-379a-49d9-b3e1-adeb45e3387d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Received unexpected event network-vif-plugged-a942acb1-1e5d-49d7-ba92-7c6a8f05bd0e for instance with vm_state active and task_state deleting.
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.624 2 DEBUG nova.network.neutron [-] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Updating instance_info_cache with network_info: [] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.637 2 INFO nova.compute.manager [-] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Took 1.22 seconds to deallocate network for instance.
Oct 11 02:31:52 compute-0 sshd-session[440890]: Invalid user admin from 121.227.153.123 port 58738
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.687 2 DEBUG oslo_concurrency.lockutils [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.update_usage" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.687 2 DEBUG oslo_concurrency.lockutils [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:31:52 compute-0 nova_compute[356901]: 2025-10-11 02:31:52.812 2 DEBUG oslo_concurrency.processutils [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:31:52 compute-0 sshd-session[440890]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:31:52 compute-0 sshd-session[440890]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:31:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:31:53 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3311183869' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:31:53 compute-0 nova_compute[356901]: 2025-10-11 02:31:53.280 2 DEBUG oslo_concurrency.processutils [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.468s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:31:53 compute-0 nova_compute[356901]: 2025-10-11 02:31:53.298 2 DEBUG nova.compute.provider_tree [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:31:53 compute-0 nova_compute[356901]: 2025-10-11 02:31:53.316 2 DEBUG nova.scheduler.client.report [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:31:53 compute-0 nova_compute[356901]: 2025-10-11 02:31:53.343 2 DEBUG oslo_concurrency.lockutils [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: held 0.656s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:31:53 compute-0 nova_compute[356901]: 2025-10-11 02:31:53.375 2 INFO nova.scheduler.client.report [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Deleted allocations for instance 7513b93e-d2b8-4ae0-8f1c-3df190945259
Oct 11 02:31:53 compute-0 nova_compute[356901]: 2025-10-11 02:31:53.447 2 DEBUG oslo_concurrency.lockutils [None req-db28ecd4-f0fd-4afd-a3de-919e80e38786 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "7513b93e-d2b8-4ae0-8f1c-3df190945259" "released" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: held 3.890s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:31:53 compute-0 ceph-mon[191930]: pgmap v1617: 321 pgs: 321 active+clean; 192 MiB data, 323 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s rd, 852 B/s wr, 12 op/s
Oct 11 02:31:53 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3311183869' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:31:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1618: 321 pgs: 321 active+clean; 139 MiB data, 293 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.5 KiB/s wr, 38 op/s
Oct 11 02:31:54 compute-0 nova_compute[356901]: 2025-10-11 02:31:54.707 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:54 compute-0 sshd-session[440890]: Failed password for invalid user admin from 121.227.153.123 port 58738 ssh2
Oct 11 02:31:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:54.859 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:31:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:54.861 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:31:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:31:54.862 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:31:55 compute-0 nova_compute[356901]: 2025-10-11 02:31:55.081 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:55 compute-0 ceph-mon[191930]: pgmap v1618: 321 pgs: 321 active+clean; 139 MiB data, 293 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.5 KiB/s wr, 38 op/s
Oct 11 02:31:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:31:56 compute-0 sshd-session[440890]: Connection closed by invalid user admin 121.227.153.123 port 58738 [preauth]
Oct 11 02:31:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1619: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:31:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:31:56
Oct 11 02:31:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:31:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:31:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['images', '.mgr', '.rgw.root', 'volumes', 'default.rgw.control', 'default.rgw.meta', 'vms', 'backups', 'cephfs.cephfs.data', 'cephfs.cephfs.meta', 'default.rgw.log']
Oct 11 02:31:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:31:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:31:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:31:56 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 02:31:56 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 02:31:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:31:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:31:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:31:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:31:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:31:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:31:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:31:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:31:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:31:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:31:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:31:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:31:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:31:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:31:57 compute-0 ceph-mon[191930]: pgmap v1619: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:31:57 compute-0 sshd-session[440915]: Invalid user admin from 121.227.153.123 port 58744
Oct 11 02:31:57 compute-0 nova_compute[356901]: 2025-10-11 02:31:57.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:31:58 compute-0 sshd-session[440915]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:31:58 compute-0 sshd-session[440915]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:31:58 compute-0 podman[440917]: 2025-10-11 02:31:58.149139607 +0000 UTC m=+0.109468443 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_ipmi, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS)
Oct 11 02:31:58 compute-0 podman[440919]: 2025-10-11 02:31:58.157724513 +0000 UTC m=+0.099707114 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:31:58 compute-0 podman[440918]: 2025-10-11 02:31:58.158040222 +0000 UTC m=+0.114160547 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, build-date=2025-08-20T13:12:41, config_id=edpm, vendor=Red Hat, Inc., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, name=ubi9-minimal, com.redhat.component=ubi9-minimal-container, release=1755695350, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vcs-type=git, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, version=9.6, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., architecture=x86_64, managed_by=edpm_ansible, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc., url=https://catalog.redhat.com/en/search?searchType=containers, distribution-scope=public, io.openshift.tags=minimal rhel9, io.buildah.version=1.33.7, container_name=openstack_network_exporter)
Oct 11 02:31:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1620: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:31:58 compute-0 nova_compute[356901]: 2025-10-11 02:31:58.894 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:31:59 compute-0 ceph-mon[191930]: pgmap v1620: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:31:59 compute-0 nova_compute[356901]: 2025-10-11 02:31:59.709 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:31:59 compute-0 podman[157119]: time="2025-10-11T02:31:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:31:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:31:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:31:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:31:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9058 "" "Go-http-client/1.1"
Oct 11 02:31:59 compute-0 nova_compute[356901]: 2025-10-11 02:31:59.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:31:59 compute-0 nova_compute[356901]: 2025-10-11 02:31:59.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:31:59 compute-0 nova_compute[356901]: 2025-10-11 02:31:59.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:32:00 compute-0 sshd-session[440915]: Failed password for invalid user admin from 121.227.153.123 port 58744 ssh2
Oct 11 02:32:00 compute-0 nova_compute[356901]: 2025-10-11 02:32:00.085 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1621: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:32:00 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:32:00.425 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '9'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:32:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:32:00 compute-0 nova_compute[356901]: 2025-10-11 02:32:00.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:32:00 compute-0 nova_compute[356901]: 2025-10-11 02:32:00.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:32:00 compute-0 nova_compute[356901]: 2025-10-11 02:32:00.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:32:01 compute-0 openstack_network_exporter[374316]: ERROR   02:32:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:32:01 compute-0 openstack_network_exporter[374316]: ERROR   02:32:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:32:01 compute-0 openstack_network_exporter[374316]: ERROR   02:32:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:32:01 compute-0 openstack_network_exporter[374316]: ERROR   02:32:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:32:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:32:01 compute-0 openstack_network_exporter[374316]: ERROR   02:32:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:32:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:32:01 compute-0 sshd-session[440915]: Connection closed by invalid user admin 121.227.153.123 port 58744 [preauth]
Oct 11 02:32:01 compute-0 ceph-mon[191930]: pgmap v1621: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:32:02 compute-0 nova_compute[356901]: 2025-10-11 02:32:02.027 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:32:02 compute-0 nova_compute[356901]: 2025-10-11 02:32:02.027 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:32:02 compute-0 nova_compute[356901]: 2025-10-11 02:32:02.027 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:32:02 compute-0 nova_compute[356901]: 2025-10-11 02:32:02.028 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:32:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1622: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:32:02 compute-0 sshd-session[440979]: Invalid user admin from 121.227.153.123 port 60828
Oct 11 02:32:02 compute-0 sshd-session[440979]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:32:02 compute-0 sshd-session[440979]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:32:03 compute-0 ceph-mon[191930]: pgmap v1622: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:32:04 compute-0 nova_compute[356901]: 2025-10-11 02:32:04.059 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:32:04 compute-0 nova_compute[356901]: 2025-10-11 02:32:04.086 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:32:04 compute-0 nova_compute[356901]: 2025-10-11 02:32:04.086 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:32:04 compute-0 nova_compute[356901]: 2025-10-11 02:32:04.087 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:32:04 compute-0 nova_compute[356901]: 2025-10-11 02:32:04.088 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:32:04 compute-0 nova_compute[356901]: 2025-10-11 02:32:04.088 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:32:04 compute-0 nova_compute[356901]: 2025-10-11 02:32:04.128 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:32:04 compute-0 nova_compute[356901]: 2025-10-11 02:32:04.128 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:32:04 compute-0 nova_compute[356901]: 2025-10-11 02:32:04.129 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:32:04 compute-0 nova_compute[356901]: 2025-10-11 02:32:04.130 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:32:04 compute-0 nova_compute[356901]: 2025-10-11 02:32:04.131 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:32:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1623: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 938 B/s wr, 27 op/s
Oct 11 02:32:04 compute-0 sshd-session[440979]: Failed password for invalid user admin from 121.227.153.123 port 60828 ssh2
Oct 11 02:32:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:32:04 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/383335737' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:32:04 compute-0 ceph-mon[191930]: pgmap v1623: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 938 B/s wr, 27 op/s
Oct 11 02:32:04 compute-0 nova_compute[356901]: 2025-10-11 02:32:04.633 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.502s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:32:04 compute-0 nova_compute[356901]: 2025-10-11 02:32:04.711 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:04 compute-0 nova_compute[356901]: 2025-10-11 02:32:04.749 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000004 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:32:04 compute-0 nova_compute[356901]: 2025-10-11 02:32:04.749 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000004 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:32:04 compute-0 nova_compute[356901]: 2025-10-11 02:32:04.749 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000004 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:32:04 compute-0 nova_compute[356901]: 2025-10-11 02:32:04.757 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:32:04 compute-0 nova_compute[356901]: 2025-10-11 02:32:04.757 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:32:04 compute-0 nova_compute[356901]: 2025-10-11 02:32:04.757 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:32:05 compute-0 nova_compute[356901]: 2025-10-11 02:32:05.030 2 DEBUG nova.virt.driver [-] Emitting event <LifecycleEvent: 1760149910.0286288, 7513b93e-d2b8-4ae0-8f1c-3df190945259 => Stopped> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:32:05 compute-0 nova_compute[356901]: 2025-10-11 02:32:05.031 2 INFO nova.compute.manager [-] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] VM Stopped (Lifecycle Event)
Oct 11 02:32:05 compute-0 nova_compute[356901]: 2025-10-11 02:32:05.050 2 DEBUG nova.compute.manager [None req-176c5ce9-5714-42f5-a6ba-7638c4649b96 - - - - - -] [instance: 7513b93e-d2b8-4ae0-8f1c-3df190945259] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:32:05 compute-0 nova_compute[356901]: 2025-10-11 02:32:05.088 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:05 compute-0 nova_compute[356901]: 2025-10-11 02:32:05.226 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:32:05 compute-0 nova_compute[356901]: 2025-10-11 02:32:05.228 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3596MB free_disk=59.922035217285156GB free_vcpus=6 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:32:05 compute-0 nova_compute[356901]: 2025-10-11 02:32:05.228 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:32:05 compute-0 nova_compute[356901]: 2025-10-11 02:32:05.228 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:32:05 compute-0 nova_compute[356901]: 2025-10-11 02:32:05.345 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:32:05 compute-0 nova_compute[356901]: 2025-10-11 02:32:05.346 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 358d31cf-2866-416a-b2fc-814ee4bfe89a actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:32:05 compute-0 nova_compute[356901]: 2025-10-11 02:32:05.346 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 2 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:32:05 compute-0 nova_compute[356901]: 2025-10-11 02:32:05.346 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1536MB phys_disk=59GB used_disk=4GB total_vcpus=8 used_vcpus=2 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:32:05 compute-0 nova_compute[356901]: 2025-10-11 02:32:05.401 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:32:05 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/383335737' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:32:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:32:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:32:05 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/772517457' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:32:05 compute-0 nova_compute[356901]: 2025-10-11 02:32:05.894 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.493s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:32:05 compute-0 nova_compute[356901]: 2025-10-11 02:32:05.905 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:32:05 compute-0 nova_compute[356901]: 2025-10-11 02:32:05.930 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:32:05 compute-0 nova_compute[356901]: 2025-10-11 02:32:05.933 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:32:05 compute-0 nova_compute[356901]: 2025-10-11 02:32:05.934 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.705s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:32:06 compute-0 podman[441026]: 2025-10-11 02:32:06.255477198 +0000 UTC m=+0.143550828 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., summary=Provides the latest release of Red Hat Universal Base Image 9., release=1214.1726694543, io.k8s.display-name=Red Hat Universal Base Image 9, build-date=2024-09-18T21:23:30, managed_by=edpm_ansible, io.buildah.version=1.29.0, version=9.4, config_id=edpm, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=kepler, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, architecture=x86_64, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, name=ubi9, io.openshift.tags=base rhel9, com.redhat.component=ubi9-container, vendor=Red Hat, Inc., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, release-0.7.12=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f)
Oct 11 02:32:06 compute-0 sshd-session[440979]: Connection closed by invalid user admin 121.227.153.123 port 60828 [preauth]
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1624: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail; 682 B/s rd, 255 B/s wr, 1 op/s
Oct 11 02:32:06 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/772517457' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:32:06 compute-0 ceph-mon[191930]: pgmap v1624: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail; 682 B/s rd, 255 B/s wr, 1 op/s
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0011043162320114605 of space, bias 1.0, pg target 0.3312948696034382 quantized to 32 (current 32)
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00025334537995702286 of space, bias 1.0, pg target 0.07600361398710685 quantized to 32 (current 32)
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:32:06 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:32:07 compute-0 sshd-session[441045]: Invalid user admin from 121.227.153.123 port 60840
Oct 11 02:32:08 compute-0 sshd-session[441045]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:32:08 compute-0 sshd-session[441045]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:32:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1625: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:09 compute-0 sshd-session[441045]: Failed password for invalid user admin from 121.227.153.123 port 60840 ssh2
Oct 11 02:32:09 compute-0 ceph-mon[191930]: pgmap v1625: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:09 compute-0 nova_compute[356901]: 2025-10-11 02:32:09.714 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:09 compute-0 sshd-session[441045]: Connection closed by invalid user admin 121.227.153.123 port 60840 [preauth]
Oct 11 02:32:10 compute-0 nova_compute[356901]: 2025-10-11 02:32:10.091 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1626: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:10 compute-0 ceph-mon[191930]: pgmap v1626: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:32:11 compute-0 sshd-session[441047]: Invalid user admin from 121.227.153.123 port 45452
Oct 11 02:32:11 compute-0 sshd-session[441047]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:32:11 compute-0 sshd-session[441047]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:32:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1627: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:13 compute-0 podman[441049]: 2025-10-11 02:32:13.251704006 +0000 UTC m=+0.129918635 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:32:13 compute-0 podman[441051]: 2025-10-11 02:32:13.25183105 +0000 UTC m=+0.111841438 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_compute, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:32:13 compute-0 podman[441052]: 2025-10-11 02:32:13.290540628 +0000 UTC m=+0.143514721 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, container_name=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.build-date=20251009, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2)
Oct 11 02:32:13 compute-0 podman[441050]: 2025-10-11 02:32:13.313114365 +0000 UTC m=+0.192307332 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_managed=true, managed_by=edpm_ansible, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, config_id=ovn_controller, container_name=ovn_controller)
Oct 11 02:32:13 compute-0 ceph-mon[191930]: pgmap v1627: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:13 compute-0 sshd-session[441047]: Failed password for invalid user admin from 121.227.153.123 port 45452 ssh2
Oct 11 02:32:13 compute-0 nova_compute[356901]: 2025-10-11 02:32:13.929 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:32:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1628: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:14 compute-0 nova_compute[356901]: 2025-10-11 02:32:14.717 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:14 compute-0 sshd-session[441047]: Connection closed by invalid user admin 121.227.153.123 port 45452 [preauth]
Oct 11 02:32:15 compute-0 nova_compute[356901]: 2025-10-11 02:32:15.095 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:15 compute-0 ceph-mon[191930]: pgmap v1628: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:32:16 compute-0 sshd-session[441131]: Invalid user admin from 121.227.153.123 port 45468
Oct 11 02:32:16 compute-0 sshd-session[441131]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:32:16 compute-0 sshd-session[441131]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:32:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1629: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.369 2 DEBUG oslo_concurrency.lockutils [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "358d31cf-2866-416a-b2fc-814ee4bfe89a" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.371 2 DEBUG oslo_concurrency.lockutils [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "358d31cf-2866-416a-b2fc-814ee4bfe89a" acquired by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.372 2 DEBUG oslo_concurrency.lockutils [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "358d31cf-2866-416a-b2fc-814ee4bfe89a-events" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.374 2 DEBUG oslo_concurrency.lockutils [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "358d31cf-2866-416a-b2fc-814ee4bfe89a-events" acquired by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.375 2 DEBUG oslo_concurrency.lockutils [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "358d31cf-2866-416a-b2fc-814ee4bfe89a-events" "released" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.378 2 INFO nova.compute.manager [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Terminating instance
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.381 2 DEBUG nova.compute.manager [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Start destroying the instance on the hypervisor. _shutdown_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:3120
Oct 11 02:32:17 compute-0 ceph-mon[191930]: pgmap v1629: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:17 compute-0 kernel: tap7f4342b0-8a (unregistering): left promiscuous mode
Oct 11 02:32:17 compute-0 NetworkManager[44908]: <info>  [1760149937.5789] device (tap7f4342b0-8a): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')
Oct 11 02:32:17 compute-0 ovn_controller[88370]: 2025-10-11T02:32:17Z|00066|binding|INFO|Releasing lport 7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 from this chassis (sb_readonly=0)
Oct 11 02:32:17 compute-0 ovn_controller[88370]: 2025-10-11T02:32:17Z|00067|binding|INFO|Setting lport 7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 down in Southbound
Oct 11 02:32:17 compute-0 ovn_controller[88370]: 2025-10-11T02:32:17Z|00068|binding|INFO|Removing iface tap7f4342b0-8a ovn-installed in OVS
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.618 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:17 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:32:17.630 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:b0:ca:41 192.168.0.152'], port_security=['fa:16:3e:b0:ca:41 192.168.0.152'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'name': 'vnf-scaleup_group-wzkjkvgckve2-tqko7trrsvwg-ebwakep2a2y3-port-pnpy23xi2rfl', 'neutron:cidrs': '192.168.0.152/24', 'neutron:device_id': '358d31cf-2866-416a-b2fc-814ee4bfe89a', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'neutron:port_capabilities': '', 'neutron:port_name': 'vnf-scaleup_group-wzkjkvgckve2-tqko7trrsvwg-ebwakep2a2y3-port-pnpy23xi2rfl', 'neutron:project_id': '97026531b3404a11869cb85a059c4a0d', 'neutron:revision_number': '4', 'neutron:security_group_ids': 'c0c90d87-d29f-4e96-98a1-ffb301424ea4', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:port_fip': '192.168.122.173', 'neutron:host_id': 'compute-0.ctlplane.example.com'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=d19b0dd1-1656-436b-911a-8f2dcc98f6bf, chassis=[], tunnel_key=6, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8) old=Port_Binding(up=[True], chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:32:17 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:32:17.633 286362 INFO neutron.agent.ovn.metadata.agent [-] Port 7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 in datapath d4dded16-3268-4cf9-bb6b-aa5200d5e4ec unbound from our chassis
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.632 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:17 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:32:17.635 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network d4dded16-3268-4cf9-bb6b-aa5200d5e4ec
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.640 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:17 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:32:17.667 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[0104ff84-9195-4967-8cf5-8ef79ff671dc]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:32:17 compute-0 systemd[1]: machine-qemu\x2d4\x2dinstance\x2d00000004.scope: Deactivated successfully.
Oct 11 02:32:17 compute-0 systemd[1]: machine-qemu\x2d4\x2dinstance\x2d00000004.scope: Consumed 1min 29.390s CPU time.
Oct 11 02:32:17 compute-0 systemd-machined[137586]: Machine qemu-4-instance-00000004 terminated.
Oct 11 02:32:17 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:32:17.710 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[fabc2d53-7888-4a0b-ab05-f93c80fe365c]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:32:17 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:32:17.715 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[e969254c-7f10-4fe5-87fe-332820061c96]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:32:17 compute-0 podman[441133]: 2025-10-11 02:32:17.750300303 +0000 UTC m=+0.129154482 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, container_name=multipathd, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3, config_id=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0)
Oct 11 02:32:17 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:32:17.752 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[1fcb9373-065c-407c-a11c-cde6dc9bb71e]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:32:17 compute-0 podman[441136]: 2025-10-11 02:32:17.771792819 +0000 UTC m=+0.134071015 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, container_name=iscsid, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']})
Oct 11 02:32:17 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:32:17.773 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[3bdf94f9-28e3-4387-b24f-3700a7f3ea4d]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapd4dded16-31'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:11:50:48'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 8, 'tx_packets': 19, 'rx_bytes': 832, 'tx_bytes': 942, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 8, 'tx_packets': 19, 'rx_bytes': 832, 'tx_bytes': 942, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 15], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 548909, 'reachable_time': 31912, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 8, 'inoctets': 720, 'indelivers': 1, 'outforwdatagrams': 0, 'outpkts': 3, 'outoctets': 228, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 8, 'outmcastpkts': 3, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 720, 'outmcastoctets': 228, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 8, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 1, 'inerrors': 0, 'outmsgs': 3, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 441182, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:32:17 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:32:17.793 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[a6e091ad-f2f3-49bf-898d-a07b4f234267]: (4, ({'family': 2, 'prefixlen': 32, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '169.254.169.254'], ['IFA_LOCAL', '169.254.169.254'], ['IFA_BROADCAST', '169.254.169.254'], ['IFA_LABEL', 'tapd4dded16-31'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 548926, 'tstamp': 548926}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 441184, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'}, {'family': 2, 'prefixlen': 24, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '192.168.0.2'], ['IFA_LOCAL', '192.168.0.2'], ['IFA_BROADCAST', '192.168.0.255'], ['IFA_LABEL', 'tapd4dded16-31'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 548931, 'tstamp': 548931}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 441184, 'error': None, 'target': 'ovnmeta-d4dded16-3268-4cf9-bb6b-aa5200d5e4ec', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'})) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:32:17 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:32:17.795 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapd4dded16-30, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.796 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:17 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:32:17.811 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapd4dded16-30, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:32:17 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:32:17.811 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.810 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:17 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:32:17.812 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tapd4dded16-30, col_values=(('external_ids', {'iface-id': 'f0f8488b-423f-46a5-8a6a-984c2ae3438e'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:32:17 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:32:17.812 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.815 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.828 2 INFO nova.virt.libvirt.driver [-] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Instance destroyed successfully.
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.828 2 DEBUG nova.objects.instance [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lazy-loading 'resources' on Instance uuid 358d31cf-2866-416a-b2fc-814ee4bfe89a obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.842 2 DEBUG nova.virt.libvirt.vif [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='True',created_at=2025-10-11T02:25:57Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=<?>,disable_terminate=False,display_description=None,display_name='vn-vgckve2-tqko7trrsvwg-ebwakep2a2y3-vnf-ihxi227vdpwh',ec2_ids=<?>,ephemeral_gb=1,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(1),hidden=False,host='compute-0.ctlplane.example.com',hostname='vn-vgckve2-tqko7trrsvwg-ebwakep2a2y3-vnf-ihxi227vdpwh',id=4,image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',info_cache=InstanceInfoCache,instance_type_id=1,kernel_id='',key_data=None,key_name=None,keypairs=<?>,launch_index=0,launched_at=2025-10-11T02:26:09Z,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=512,metadata={metering.server_group='3a1aa025-8cf8-437f-a9ac-0e04a9beaa1e'},migration_context=<?>,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=<?>,power_state=1,progress=0,project_id='97026531b3404a11869cb85a059c4a0d',ramdisk_id='',reservation_id='r-3vv490li',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,admin,reader',image_base_image_ref='a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7',image_container_format='bare',image_disk_format='qcow2',image_hw_cdrom_bus='sata',image_hw_disk_bus='virtio',image_hw_input_bus='usb',image_hw_machine_type='q35',image_hw_pointer_model='usbtablet',image_hw_video_model='virtio',image_hw_vif_model='virtio',image_min_disk='1',image_min_ram='0',image_owner_specified.openstack.md5='',image_owner_specified.openstack.object='images/cirros',image_owner_specified.openstack.sha256='',owner_project_name='admin',owner_user_name='admin'},tags=<?>,task_state='deleting',terminated_at=None,trusted_certs=<?>,updated_at=2025-10-11T02:26:09Z,user_data='Q29udGVudC1UeXBlOiBtdWx0aXBhcnQvbWl4ZWQ7IGJvdW5kYXJ5PSI9PT09PT09PT09PT09PT00MTYzNzY0OTY1NzU5NDY3NDE0PT0iCk1JTUUtVmVyc2lvbjogMS4wCgotLT09PT09PT09PT09PT09PTQxNjM3NjQ5NjU3NTk0Njc0MTQ9PQpDb250ZW50LVR5cGU6IHRleHQvY2xvdWQtY29uZmlnOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0iY2xvdWQtY29uZmlnIgoKCgojIENhcHR1cmUgYWxsIHN1YnByb2Nlc3Mgb3V0cHV0IGludG8gYSBsb2dmaWxlCiMgVXNlZnVsIGZvciB0cm91Ymxlc2hvb3RpbmcgY2xvdWQtaW5pdCBpc3N1ZXMKb3V0cHV0OiB7YWxsOiAnfCB0ZWUgLWEgL3Zhci9sb2cvY2xvdWQtaW5pdC1vdXRwdXQubG9nJ30KCi0tPT09PT09PT09PT09PT09NDE2Mzc2NDk2NTc1OTQ2NzQxND09CkNvbnRlbnQtVHlwZTogdGV4dC9jbG91ZC1ib290aG9vazsgY2hhcnNldD0idXMtYXNjaWkiCk1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IDdiaXQKQ29udGVudC1EaXNwb3NpdGlvbjogYXR0YWNobWVudDsgZmlsZW5hbWU9ImJvb3Rob29rLnNoIgoKIyEvdXNyL2Jpbi9iYXNoCgojIEZJWE1FKHNoYWRvd2VyKSB0aGlzIGlzIGEgd29ya2Fyb3VuZCBmb3IgY2xvdWQtaW5pdCAwLjYuMyBwcmVzZW50IGluIFVidW50dQojIDEyLjA0IExUUzoKIyBodHRwczovL2J1Z3MubGF1bmNocGFkLm5ldC9oZWF0LytidWcvMTI1NzQxMAojCiMgVGhlIG9sZCBjbG91ZC1pbml0IGRvZXNuJ3QgY3JlYXRlIHRoZSB1c2VycyBkaXJlY3RseSBzbyB0aGUgY29tbWFuZHMgdG8gZG8KIyB0aGlzIGFyZSBpbmplY3RlZCB0aG91Z2ggbm92YV91dGlscy5weS4KIwojIE9uY2Ugd2UgZHJvcCBzdXBwb3J0IGZvciAwLjYuMywgd2UgY2FuIHNhZmVseSByZW1vdmUgdGhpcy4KCgojIGluIGNhc2UgaGVhdC1jZm50b29scyBoYXMgYmVlbiBpbnN0YWxsZWQgZnJvbSBwYWNrYWdlIGJ1dCBubyBzeW1saW5rcwojIGFyZSB5ZXQgaW4gL29wdC9hd3MvYmluLwpjZm4tY3JlYXRlLWF3cy1zeW1saW5rcwoKIyBEbyBub3QgcmVtb3ZlIC0gdGhlIGNsb3VkIGJvb3Rob29rIHNob3VsZCBhbHdheXMgcmV0dXJuIHN1Y2Nlc3MKZXhpdCAwCgotLT09PT09PT09PT09PT09PTQxNjM3NjQ5NjU3NTk0Njc0MTQ9PQpDb250ZW50LVR5cGU6IHRleHQvcGFydC1oYW5kbGVyOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0icGFydC1oYW5kbGVyLnB5IgoKIyBwYXJ0LWhhbmRsZXIKIwojICAgIExpY2Vuc2VkIHVuZGVyIHRoZSBBcGFjaGUgTGljZW5zZSwgVmVyc2lvbiAyLjAgKHRoZSAiTGljZW5zZSIpOyB5b3UgbWF5CiMgICAgbm90IHVzZSB0aGlzIGZpbGUgZXhjZXB0IGluIGNvbXBsaWFuY2Ugd2l0aCB0aGUgTGljZW5zZS4gWW91IG1heSBvYnRhaW4KIyAgICBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgICAgICAgaHR0cDovL3d3dy5hcGFjaGUub3JnL2xpY2Vuc2VzL0xJQ0VOU0UtMi4wCiMKIyAgICBVbmxlc3MgcmVxdWlyZWQgYnkgYXBwbGljYWJsZSBsYXcgb3IgYWdyZWVkIHRvIGluIHdyaXRpbmcsIHNvZnR3YXJlCiMgICAgZGlzdHJpYnV0ZWQgdW5kZXIgdGhlIExpY2Vuc2UgaXMgZGlzdHJpYnV0ZWQgb24gYW4gIkFTIElTIiBCQVNJUywgV0lUSE9VVAojICAgIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4gU2VlIHRoZQojICAgIExpY2Vuc2UgZm9yIHRoZSBzcGVjaWZpYyBsYW5ndWFnZSBnb3Zlcm5pbmcgcGVybWlzc2lvbnMgYW5kIGxpbWl0YXRpb25zCiMgICAgdW5kZXIgdGhlIExpY2Vuc2UuCgppbXBvcnQgZGF0ZXRpbWUKaW1wb3J0IGVycm5vCmltcG9ydCBvcwppbXBvcnQgc3lzCgoKZGVmIGxpc3RfdHlwZXMoKToKICAgIHJldHVybiBbInRleHQveC1jZm5pbml0ZGF0YSJdCgoKZGVmIGhhbmRsZV9wYXJ0KGRhdGEsIGN0eXBlLCBmaWxlbmFtZSwgcGF5bG9hZCk6CiAgICBpZiBjdHlwZSA9PSAiX19iZWdpbl9fIjoKICAgICAgICB0cnk6CiAgICAgICAgICAgIG9zLm1ha2VkaXJzKCcvdmFyL2xpYi9oZWF0LWNmbnRvb2xzJywgaW50KCI3MDAiLCA4KSkKICAgICAgICBleGNlcHQgT1NFcnJvcjoKICAgICAgICAgICAgZXhfdHlwZSwgZSwgdGIgPSBzeXMuZXhjX2luZm8oKQogICAgICAgICAgICBpZiBlLmVycm5vICE9IGVycm5vLkVFWElTVDoKICAgICAgICAgICAgICAgIHJhaXNlCiAgICAgICAgcmV0dXJuCgogICAgaWYgY3R5cGUgPT0gIl9fZW5kX18iOgogICAgICAgIHJldHVybgoKICAgIHRpbWVzdGFtcCA9IGRhdGV0aW1lLmRhdGV0aW1lLm5vdygpCiAgICB3aXRoIG9wZW4oJy92YXIvbG9nL3BhcnQtaGFuZGxlci5sb2cnLCAnYScpIGFzIGxvZzoKICAgICAgICBsb2cud3JpdGUoJyVzIGZpbGVuYW1lOiVzLCBjdHlwZTolc1xuJyAlICh0aW1lc3RhbXAsIGZpbGVuYW1lLCBjdHlwZSkpCgogICAgaWYgY3R5cGUgPT0gJ3RleHQveC1jZm5pbml0ZGF0YSc6CiAgICAgICAgd2l0aCBvcGVuKCcvdmFyL2xpYi9oZWF0LWNmbnRvb2xzLyVzJyAlIGZpbGVuYW1lLCAndycpIGFzIGY6CiAgICAgICAgICAgIGYud3JpdGUocGF5bG9hZCkKCiAgICAgICAgIyBUT0RPKHNkYWtlKSBob3BlZnVsbHkgdGVtcG9yYXJ5IHVudGlsIHVzZXJzIG1vdmUgdG8gaGVhdC1jZm50b29scy0xLjMKICAgICAgICB3aXRoIG9wZW4oJy92YXIvbGliL2Nsb3VkL2RhdGEvJXMnICUgZmlsZW5hbWUsICd3JykgYXMgZjoKICAgICAgICAgICAgZi53cml0ZShwYXlsb2FkKQoKLS09PT09PT09PT09PT09PT00MTYzNzY0OTY1NzU5NDY3NDE0PT0KQ29udGVudC1UeXBlOiB0ZXh0L3gtY2ZuaW5pdGRhdGE7IGNoYXJzZXQ9InVzLWFzY2lpIgpNSU1FLVZlcnNpb246IDEuMApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkNvbnRlbnQtRGlzcG9zaXRpb246IGF0dGFjaG1lbnQ7IGZpbGVuYW1lPSJjZm4tdXNlcmRhdGEiCgoKLS09PT09PT09PT09PT09PT00MTYzNzY0OTY1NzU5NDY3NDE0PT0KQ29udGVudC1UeXBlOiB0ZXh0L3gtc2hlbGxzY3JpcHQ7IGNoYXJzZXQ9InVzLWFzY2lpIgpNSU1FLVZlcnNpb246IDEuMApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkNvbnRlbnQtRGlzcG9zaXRpb246IGF0dGFjaG1lbnQ7IGZpbGVuYW1lPSJsb2d1c2VyZGF0YS5weSIKCiMhL3Vzci9iaW4vZW52IHB5dGhvbjMKIwojICAgIExpY2Vuc2VkIHVuZGVyIHRoZSBBcGFjaGUgTGljZW5zZSwgVmVyc2lvbiAyLjAgKHRoZSAiTGljZW5zZSIpOyB5b3UgbWF5CiMgICAgbm90IHVzZSB0aGlzIGZpbGUgZXhjZXB0IGluIGNvbXBsaWFuY2Ugd2l0aCB0aGUgTGljZW5zZS4gWW91IG1heSBvYnRhaW4KIyAgICBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKIwojICAgICAgICAgaHR0cDovL3d3dy5hcGFjaGUub3JnL2xpY2Vuc2VzL0xJQ0VOU0UtMi4wCiMKIyAgICBVbmxlc3MgcmVxdWlyZWQgYnkgYXBwbGljYWJsZSBsYXcgb3IgYWdyZWVkIHRvIGluIHdyaXRpbmcsIHNvZnR3YXJlCiMgICAgZGlzdHJpYnV0ZWQgdW5kZXIgdGhlIExpY2Vuc2UgaXMgZGlzdHJpYnV0ZWQgb24gYW4gIkFTIElTIiBCQVNJUywgV0lUSE9VVAojICAgIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4gU2VlIHRoZQojICAgIExpY2Vuc2UgZm9yIHRoZSBzcGVjaWZpYyBsYW5ndWFnZSBnb3Zlcm5pbmcgcGVybWlzc2lvbnMgYW5kIGxpbWl0YXRpb25zCiMgICAgdW5kZXIgdGhlIExpY2Vuc2UuCgppbXBvcnQgZGF0ZXRpbWUKaW1wb3J0IGVycm5vCmltcG9ydCBsb2dnaW5nCmltcG9ydCBvcwppbXBvcnQgc3VicHJvY2VzcwppbXBvcnQgc3lzCgoKVkFSX1BBVEggPSAnL3Zhci9saWIvaGVhdC1jZm50b29scycKTE9HID0gbG9nZ2luZy5nZXRMb2dnZXIoJ2hlYXQtcHJvdmlzaW9uJykKCgpkZWYgaW5pdF9sb2dnaW5nKCk6CiAgICBMT0cuc2V0TGV2ZWwobG9nZ2luZy5JTkZPKQogICAgTE9HLmFkZEhhbmRsZXIobG9nZ2luZy5TdHJlYW1IYW5kbGVyKCkpCiAgICBmaCA9IGxvZ2dpbmcuRmlsZUhhbmRsZXIoIi92YXIvbG9nL2hlYXQtcHJvdmlzaW9uLmxvZyIpCiAgICBvcy5jaG1vZChmaC5iYXNlRmlsZW5hbWUsIGludCgiNjAwIiwgOCkpCiAgICBMT0cuYWRkSGFuZGxlcihmaCkKCgpkZWYgY2FsbChhcmdzKToKCiAgICBjbGFzcyBMb2dTdHJlYW0ob2JqZWN0KToKCiAgICAgICAgZGVmIHdyaXRlKHNlbGYsIGRhdGEpOgogICAgICAgICAgICBMT0cuaW5mbyhkYXRhKQoKICAgIExPRy5pbmZvKCclc1
Oct 11 02:32:17 compute-0 nova_compute[356901]: xuJywgJyAnLmpvaW4oYXJncykpICAjIG5vcWEKICAgIHRyeToKICAgICAgICBscyA9IExvZ1N0cmVhbSgpCiAgICAgICAgcCA9IHN1YnByb2Nlc3MuUG9wZW4oYXJncywgc3Rkb3V0PXN1YnByb2Nlc3MuUElQRSwKICAgICAgICAgICAgICAgICAgICAgICAgICAgICBzdGRlcnI9c3VicHJvY2Vzcy5QSVBFKQogICAgICAgIGRhdGEgPSBwLmNvbW11bmljYXRlKCkKICAgICAgICBpZiBkYXRhOgogICAgICAgICAgICBmb3IgeCBpbiBkYXRhOgogICAgICAgICAgICAgICAgbHMud3JpdGUoeCkKICAgIGV4Y2VwdCBPU0Vycm9yOgogICAgICAgIGV4X3R5cGUsIGV4LCB0YiA9IHN5cy5leGNfaW5mbygpCiAgICAgICAgaWYgZXguZXJybm8gPT0gZXJybm8uRU5PRVhFQzoKICAgICAgICAgICAgTE9HLmVycm9yKCdVc2VyZGF0YSBlbXB0eSBvciBub3QgZXhlY3V0YWJsZTogJXMnLCBleCkKICAgICAgICAgICAgcmV0dXJuIG9zLkVYX09LCiAgICAgICAgZWxzZToKICAgICAgICAgICAgTE9HLmVycm9yKCdPUyBlcnJvciBydW5uaW5nIHVzZXJkYXRhOiAlcycsIGV4KQogICAgICAgICAgICByZXR1cm4gb3MuRVhfT1NFUlIKICAgIGV4Y2VwdCBFeGNlcHRpb246CiAgICAgICAgZXhfdHlwZSwgZXgsIHRiID0gc3lzLmV4Y19pbmZvKCkKICAgICAgICBMT0cuZXJyb3IoJ1Vua25vd24gZXJyb3IgcnVubmluZyB1c2VyZGF0YTogJXMnLCBleCkKICAgICAgICByZXR1cm4gb3MuRVhfU09GVFdBUkUKICAgIHJldHVybiBwLnJldHVybmNvZGUKCgpkZWYgbWFpbigpOgogICAgdXNlcmRhdGFfcGF0aCA9IG9zLnBhdGguam9pbihWQVJfUEFUSCwgJ2Nmbi11c2VyZGF0YScpCiAgICBvcy5jaG1vZCh1c2VyZGF0YV9wYXRoLCBpbnQoIjcwMCIsIDgpKQoKICAgIExPRy5pbmZvKCdQcm92aXNpb24gYmVnYW46ICVzJywgZGF0ZXRpbWUuZGF0ZXRpbWUubm93KCkpCiAgICByZXR1cm5jb2RlID0gY2FsbChbdXNlcmRhdGFfcGF0aF0pCiAgICBMT0cuaW5mbygnUHJvdmlzaW9uIGRvbmU6ICVzJywgZGF0ZXRpbWUuZGF0ZXRpbWUubm93KCkpCiAgICBpZiByZXR1cm5jb2RlOgogICAgICAgIHJldHVybiByZXR1cm5jb2RlCgoKaWYgX19uYW1lX18gPT0gJ19fbWFpbl9fJzoKICAgIGluaXRfbG9nZ2luZygpCgogICAgY29kZSA9IG1haW4oKQogICAgaWYgY29kZToKICAgICAgICBMT0cuZXJyb3IoJ1Byb3Zpc2lvbiBmYWlsZWQgd2l0aCBleGl0IGNvZGUgJXMnLCBjb2RlKQogICAgICAgIHN5cy5leGl0KGNvZGUpCgogICAgcHJvdmlzaW9uX2xvZyA9IG9zLnBhdGguam9pbihWQVJfUEFUSCwgJ3Byb3Zpc2lvbi1maW5pc2hlZCcpCiAgICAjIHRvdWNoIHRoZSBmaWxlIHNvIGl0IGlzIHRpbWVzdGFtcGVkIHdpdGggd2hlbiBmaW5pc2hlZAogICAgd2l0aCBvcGVuKHByb3Zpc2lvbl9sb2csICdhJyk6CiAgICAgICAgb3MudXRpbWUocHJvdmlzaW9uX2xvZywgTm9uZSkKCi0tPT09PT09PT09PT09PT09NDE2Mzc2NDk2NTc1OTQ2NzQxND09CkNvbnRlbnQtVHlwZTogdGV4dC94LWNmbmluaXRkYXRhOyBjaGFyc2V0PSJ1cy1hc2NpaSIKTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UcmFuc2Zlci1FbmNvZGluZzogN2JpdApDb250ZW50LURpc3Bvc2l0aW9uOiBhdHRhY2htZW50OyBmaWxlbmFtZT0iY2ZuLW1ldGFkYXRhLXNlcnZlciIKCmh0dHBzOi8vaGVhdC1jZm5hcGktaW50ZXJuYWwub3BlbnN0YWNrLnN2Yzo4MDAwL3YxLwotLT09PT09PT09PT09PT09PTQxNjM3NjQ5NjU3NTk0Njc0MTQ9PQpDb250ZW50LVR5cGU6IHRleHQveC1jZm5pbml0ZGF0YTsgY2hhcnNldD0idXMtYXNjaWkiCk1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IDdiaXQKQ29udGVudC1EaXNwb3NpdGlvbjogYXR0YWNobWVudDsgZmlsZW5hbWU9ImNmbi1ib3RvLWNmZyIKCltCb3RvXQpkZWJ1ZyA9IDAKaXNfc2VjdXJlID0gMApodHRwc192YWxpZGF0ZV9jZXJ0aWZpY2F0ZXMgPSAxCmNmbl9yZWdpb25fbmFtZSA9IGhlYXQKY2ZuX3JlZ2lvbl9lbmRwb2ludCA9IGhlYXQtY2ZuYXBpLWludGVybmFsLm9wZW5zdGFjay5zdmMKLS09PT09PT09PT09PT09PT00MTYzNzY0OTY1NzU5NDY3NDE0PT0tLQo=',user_id='d215f3ebbc07435493ccd666fc80109d',uuid=358d31cf-2866-416a-b2fc-814ee4bfe89a,vcpu_model=<?>,vcpus=1,vm_mode=None,vm_state='active') vif={"id": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "address": "fa:16:3e:b0:ca:41", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.152", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.173", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap7f4342b0-8a", "ovs_interfaceid": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}} unplug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:828
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.842 2 DEBUG nova.network.os_vif_util [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converting VIF {"id": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "address": "fa:16:3e:b0:ca:41", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.152", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.173", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap7f4342b0-8a", "ovs_interfaceid": "7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": true, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.843 2 DEBUG nova.network.os_vif_util [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Converted object VIFOpenVSwitch(active=True,address=fa:16:3e:b0:ca:41,bridge_name='br-int',has_traffic_filtering=True,id=7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tap7f4342b0-8a') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.843 2 DEBUG os_vif [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Unplugging vif VIFOpenVSwitch(active=True,address=fa:16:3e:b0:ca:41,bridge_name='br-int',has_traffic_filtering=True,id=7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tap7f4342b0-8a') unplug /usr/lib/python3.9/site-packages/os_vif/__init__.py:109
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.845 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.845 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tap7f4342b0-8a, bridge=br-int, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.847 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.849 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:17 compute-0 nova_compute[356901]: 2025-10-11 02:32:17.851 2 INFO os_vif [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Successfully unplugged vif VIFOpenVSwitch(active=True,address=fa:16:3e:b0:ca:41,bridge_name='br-int',has_traffic_filtering=True,id=7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8,network=Network(d4dded16-3268-4cf9-bb6b-aa5200d5e4ec),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=True,vif_name='tap7f4342b0-8a')
Oct 11 02:32:18 compute-0 nova_compute[356901]: 2025-10-11 02:32:18.045 2 DEBUG nova.compute.manager [req-67171d9d-94de-4ff8-9005-8a59dda23673 req-75fff24f-d3ab-41b2-9322-dffed4ef404f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Received event network-vif-unplugged-7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:32:18 compute-0 nova_compute[356901]: 2025-10-11 02:32:18.047 2 DEBUG oslo_concurrency.lockutils [req-67171d9d-94de-4ff8-9005-8a59dda23673 req-75fff24f-d3ab-41b2-9322-dffed4ef404f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "358d31cf-2866-416a-b2fc-814ee4bfe89a-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:32:18 compute-0 nova_compute[356901]: 2025-10-11 02:32:18.048 2 DEBUG oslo_concurrency.lockutils [req-67171d9d-94de-4ff8-9005-8a59dda23673 req-75fff24f-d3ab-41b2-9322-dffed4ef404f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "358d31cf-2866-416a-b2fc-814ee4bfe89a-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:32:18 compute-0 nova_compute[356901]: 2025-10-11 02:32:18.049 2 DEBUG oslo_concurrency.lockutils [req-67171d9d-94de-4ff8-9005-8a59dda23673 req-75fff24f-d3ab-41b2-9322-dffed4ef404f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "358d31cf-2866-416a-b2fc-814ee4bfe89a-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:32:18 compute-0 nova_compute[356901]: 2025-10-11 02:32:18.049 2 DEBUG nova.compute.manager [req-67171d9d-94de-4ff8-9005-8a59dda23673 req-75fff24f-d3ab-41b2-9322-dffed4ef404f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] No waiting events found dispatching network-vif-unplugged-7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:32:18 compute-0 nova_compute[356901]: 2025-10-11 02:32:18.050 2 DEBUG nova.compute.manager [req-67171d9d-94de-4ff8-9005-8a59dda23673 req-75fff24f-d3ab-41b2-9322-dffed4ef404f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Received event network-vif-unplugged-7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 for instance with task_state deleting. _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10826
Oct 11 02:32:18 compute-0 rsyslogd[187706]: message too long (8192) with configured size 8096, begin of message is: 2025-10-11 02:32:17.842 2 DEBUG nova.virt.libvirt.vif [None req-d0351305-ca24-4c [v8.2506.0-2.el9 try https://www.rsyslog.com/e/2445 ]
Oct 11 02:32:18 compute-0 sshd-session[441131]: Failed password for invalid user admin from 121.227.153.123 port 45468 ssh2
Oct 11 02:32:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1630: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s rd, 0 B/s wr, 4 op/s
Oct 11 02:32:19 compute-0 nova_compute[356901]: 2025-10-11 02:32:19.204 2 INFO nova.virt.libvirt.driver [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Deleting instance files /var/lib/nova/instances/358d31cf-2866-416a-b2fc-814ee4bfe89a_del
Oct 11 02:32:19 compute-0 nova_compute[356901]: 2025-10-11 02:32:19.205 2 INFO nova.virt.libvirt.driver [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Deletion of /var/lib/nova/instances/358d31cf-2866-416a-b2fc-814ee4bfe89a_del complete
Oct 11 02:32:19 compute-0 nova_compute[356901]: 2025-10-11 02:32:19.285 2 INFO nova.compute.manager [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Took 1.90 seconds to destroy the instance on the hypervisor.
Oct 11 02:32:19 compute-0 nova_compute[356901]: 2025-10-11 02:32:19.285 2 DEBUG oslo.service.loopingcall [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Waiting for function nova.compute.manager.ComputeManager._try_deallocate_network.<locals>._deallocate_network_with_retries to return. func /usr/lib/python3.9/site-packages/oslo_service/loopingcall.py:435
Oct 11 02:32:19 compute-0 nova_compute[356901]: 2025-10-11 02:32:19.286 2 DEBUG nova.compute.manager [-] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Deallocating network for instance _deallocate_network /usr/lib/python3.9/site-packages/nova/compute/manager.py:2259
Oct 11 02:32:19 compute-0 nova_compute[356901]: 2025-10-11 02:32:19.286 2 DEBUG nova.network.neutron [-] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] deallocate_for_instance() deallocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1803
Oct 11 02:32:19 compute-0 ceph-mon[191930]: pgmap v1630: 321 pgs: 321 active+clean; 139 MiB data, 289 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s rd, 0 B/s wr, 4 op/s
Oct 11 02:32:19 compute-0 nova_compute[356901]: 2025-10-11 02:32:19.722 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:19 compute-0 sshd-session[441131]: Connection closed by invalid user admin 121.227.153.123 port 45468 [preauth]
Oct 11 02:32:20 compute-0 nova_compute[356901]: 2025-10-11 02:32:20.135 2 DEBUG nova.compute.manager [req-10357bc2-615b-4848-b7e5-ad420214fddd req-6459cbe1-52e6-48bb-8dde-b22f27ff8456 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Received event network-vif-plugged-7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:32:20 compute-0 nova_compute[356901]: 2025-10-11 02:32:20.136 2 DEBUG oslo_concurrency.lockutils [req-10357bc2-615b-4848-b7e5-ad420214fddd req-6459cbe1-52e6-48bb-8dde-b22f27ff8456 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "358d31cf-2866-416a-b2fc-814ee4bfe89a-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:32:20 compute-0 nova_compute[356901]: 2025-10-11 02:32:20.136 2 DEBUG oslo_concurrency.lockutils [req-10357bc2-615b-4848-b7e5-ad420214fddd req-6459cbe1-52e6-48bb-8dde-b22f27ff8456 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "358d31cf-2866-416a-b2fc-814ee4bfe89a-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:32:20 compute-0 nova_compute[356901]: 2025-10-11 02:32:20.137 2 DEBUG oslo_concurrency.lockutils [req-10357bc2-615b-4848-b7e5-ad420214fddd req-6459cbe1-52e6-48bb-8dde-b22f27ff8456 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "358d31cf-2866-416a-b2fc-814ee4bfe89a-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:32:20 compute-0 nova_compute[356901]: 2025-10-11 02:32:20.137 2 DEBUG nova.compute.manager [req-10357bc2-615b-4848-b7e5-ad420214fddd req-6459cbe1-52e6-48bb-8dde-b22f27ff8456 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] No waiting events found dispatching network-vif-plugged-7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:32:20 compute-0 nova_compute[356901]: 2025-10-11 02:32:20.138 2 WARNING nova.compute.manager [req-10357bc2-615b-4848-b7e5-ad420214fddd req-6459cbe1-52e6-48bb-8dde-b22f27ff8456 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Received unexpected event network-vif-plugged-7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 for instance with vm_state active and task_state deleting.
Oct 11 02:32:20 compute-0 nova_compute[356901]: 2025-10-11 02:32:20.138 2 DEBUG nova.compute.manager [req-10357bc2-615b-4848-b7e5-ad420214fddd req-6459cbe1-52e6-48bb-8dde-b22f27ff8456 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Received event network-changed-7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:32:20 compute-0 nova_compute[356901]: 2025-10-11 02:32:20.138 2 DEBUG nova.compute.manager [req-10357bc2-615b-4848-b7e5-ad420214fddd req-6459cbe1-52e6-48bb-8dde-b22f27ff8456 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Refreshing instance network info cache due to event network-changed-7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:32:20 compute-0 nova_compute[356901]: 2025-10-11 02:32:20.139 2 DEBUG oslo_concurrency.lockutils [req-10357bc2-615b-4848-b7e5-ad420214fddd req-6459cbe1-52e6-48bb-8dde-b22f27ff8456 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-358d31cf-2866-416a-b2fc-814ee4bfe89a" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:32:20 compute-0 nova_compute[356901]: 2025-10-11 02:32:20.139 2 DEBUG oslo_concurrency.lockutils [req-10357bc2-615b-4848-b7e5-ad420214fddd req-6459cbe1-52e6-48bb-8dde-b22f27ff8456 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-358d31cf-2866-416a-b2fc-814ee4bfe89a" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:32:20 compute-0 nova_compute[356901]: 2025-10-11 02:32:20.140 2 DEBUG nova.network.neutron [req-10357bc2-615b-4848-b7e5-ad420214fddd req-6459cbe1-52e6-48bb-8dde-b22f27ff8456 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Refreshing network info cache for port 7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:32:20 compute-0 nova_compute[356901]: 2025-10-11 02:32:20.291 2 INFO nova.network.neutron [req-10357bc2-615b-4848-b7e5-ad420214fddd req-6459cbe1-52e6-48bb-8dde-b22f27ff8456 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Port 7f4342b0-8ae6-4898-bc1a-0daa87a6b9a8 from network info_cache is no longer associated with instance in Neutron. Removing from network info_cache.
Oct 11 02:32:20 compute-0 nova_compute[356901]: 2025-10-11 02:32:20.292 2 DEBUG nova.network.neutron [req-10357bc2-615b-4848-b7e5-ad420214fddd req-6459cbe1-52e6-48bb-8dde-b22f27ff8456 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Updating instance_info_cache with network_info: [] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:32:20 compute-0 nova_compute[356901]: 2025-10-11 02:32:20.309 2 DEBUG oslo_concurrency.lockutils [req-10357bc2-615b-4848-b7e5-ad420214fddd req-6459cbe1-52e6-48bb-8dde-b22f27ff8456 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-358d31cf-2866-416a-b2fc-814ee4bfe89a" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:32:20 compute-0 nova_compute[356901]: 2025-10-11 02:32:20.423 2 DEBUG nova.network.neutron [-] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Updating instance_info_cache with network_info: [] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:32:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1631: 321 pgs: 321 active+clean; 110 MiB data, 273 MiB used, 60 GiB / 60 GiB avail; 6.4 KiB/s rd, 341 B/s wr, 9 op/s
Oct 11 02:32:20 compute-0 nova_compute[356901]: 2025-10-11 02:32:20.440 2 INFO nova.compute.manager [-] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Took 1.15 seconds to deallocate network for instance.
Oct 11 02:32:20 compute-0 nova_compute[356901]: 2025-10-11 02:32:20.480 2 DEBUG oslo_concurrency.lockutils [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.update_usage" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:32:20 compute-0 nova_compute[356901]: 2025-10-11 02:32:20.481 2 DEBUG oslo_concurrency.lockutils [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:32:20 compute-0 nova_compute[356901]: 2025-10-11 02:32:20.571 2 DEBUG oslo_concurrency.processutils [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:32:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:32:21 compute-0 sshd-session[441216]: Invalid user admin from 121.227.153.123 port 59432
Oct 11 02:32:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:32:21 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1648690550' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:32:21 compute-0 nova_compute[356901]: 2025-10-11 02:32:21.193 2 DEBUG oslo_concurrency.processutils [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.621s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:32:21 compute-0 nova_compute[356901]: 2025-10-11 02:32:21.208 2 DEBUG nova.compute.provider_tree [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:32:21 compute-0 nova_compute[356901]: 2025-10-11 02:32:21.227 2 DEBUG nova.scheduler.client.report [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:32:21 compute-0 nova_compute[356901]: 2025-10-11 02:32:21.249 2 DEBUG oslo_concurrency.lockutils [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: held 0.768s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:32:21 compute-0 nova_compute[356901]: 2025-10-11 02:32:21.281 2 INFO nova.scheduler.client.report [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Deleted allocations for instance 358d31cf-2866-416a-b2fc-814ee4bfe89a
Oct 11 02:32:21 compute-0 nova_compute[356901]: 2025-10-11 02:32:21.341 2 DEBUG oslo_concurrency.lockutils [None req-d0351305-ca24-4c2d-8955-eed6078cdbfb d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "358d31cf-2866-416a-b2fc-814ee4bfe89a" "released" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: held 3.971s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:32:21 compute-0 sshd-session[441216]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:32:21 compute-0 sshd-session[441216]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:32:21 compute-0 ceph-mon[191930]: pgmap v1631: 321 pgs: 321 active+clean; 110 MiB data, 273 MiB used, 60 GiB / 60 GiB avail; 6.4 KiB/s rd, 341 B/s wr, 9 op/s
Oct 11 02:32:21 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1648690550' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:32:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1632: 321 pgs: 321 active+clean; 96 MiB data, 263 MiB used, 60 GiB / 60 GiB avail; 14 KiB/s rd, 1.1 KiB/s wr, 20 op/s
Oct 11 02:32:22 compute-0 nova_compute[356901]: 2025-10-11 02:32:22.848 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:23 compute-0 sshd-session[441216]: Failed password for invalid user admin from 121.227.153.123 port 59432 ssh2
Oct 11 02:32:23 compute-0 ceph-mon[191930]: pgmap v1632: 321 pgs: 321 active+clean; 96 MiB data, 263 MiB used, 60 GiB / 60 GiB avail; 14 KiB/s rd, 1.1 KiB/s wr, 20 op/s
Oct 11 02:32:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1633: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:32:24 compute-0 nova_compute[356901]: 2025-10-11 02:32:24.725 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:24 compute-0 sshd-session[441216]: Connection closed by invalid user admin 121.227.153.123 port 59432 [preauth]
Oct 11 02:32:25 compute-0 ceph-mon[191930]: pgmap v1633: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:32:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:32:26 compute-0 sshd-session[441240]: Invalid user admin from 121.227.153.123 port 59438
Oct 11 02:32:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1634: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:32:26 compute-0 sshd-session[441240]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:32:26 compute-0 sshd-session[441240]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:32:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:32:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:32:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:32:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:32:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:32:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:32:26 compute-0 ceph-mon[191930]: pgmap v1634: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:32:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:32:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/36640316' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:32:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:32:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/36640316' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:32:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/36640316' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:32:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/36640316' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:32:27 compute-0 nova_compute[356901]: 2025-10-11 02:32:27.853 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:28 compute-0 sshd-session[441240]: Failed password for invalid user admin from 121.227.153.123 port 59438 ssh2
Oct 11 02:32:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1635: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:32:28 compute-0 ceph-mon[191930]: pgmap v1635: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail; 28 KiB/s rd, 1.7 KiB/s wr, 40 op/s
Oct 11 02:32:29 compute-0 podman[441244]: 2025-10-11 02:32:29.252490168 +0000 UTC m=+0.116977018 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:32:29 compute-0 podman[441242]: 2025-10-11 02:32:29.259198143 +0000 UTC m=+0.140495013 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:32:29 compute-0 podman[441243]: 2025-10-11 02:32:29.280967417 +0000 UTC m=+0.155158040 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=openstack_network_exporter, name=ubi9-minimal, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, build-date=2025-08-20T13:12:41, config_id=edpm, com.redhat.component=ubi9-minimal-container, url=https://catalog.redhat.com/en/search?searchType=containers, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, distribution-scope=public, managed_by=edpm_ansible, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=minimal rhel9, version=9.6, maintainer=Red Hat, Inc., io.openshift.expose-services=, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1755695350, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vendor=Red Hat, Inc., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, io.buildah.version=1.33.7)
Oct 11 02:32:29 compute-0 nova_compute[356901]: 2025-10-11 02:32:29.727 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:29 compute-0 podman[157119]: time="2025-10-11T02:32:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:32:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:32:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:32:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:32:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9060 "" "Go-http-client/1.1"
Oct 11 02:32:29 compute-0 sshd-session[441240]: Connection closed by invalid user admin 121.227.153.123 port 59438 [preauth]
Oct 11 02:32:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1636: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail; 25 KiB/s rd, 1.7 KiB/s wr, 35 op/s
Oct 11 02:32:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:32:31 compute-0 sshd-session[441304]: Invalid user admin from 121.227.153.123 port 40768
Oct 11 02:32:31 compute-0 openstack_network_exporter[374316]: ERROR   02:32:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:32:31 compute-0 openstack_network_exporter[374316]: ERROR   02:32:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:32:31 compute-0 openstack_network_exporter[374316]: ERROR   02:32:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:32:31 compute-0 openstack_network_exporter[374316]: ERROR   02:32:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:32:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:32:31 compute-0 openstack_network_exporter[374316]: ERROR   02:32:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:32:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:32:31 compute-0 ceph-mon[191930]: pgmap v1636: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail; 25 KiB/s rd, 1.7 KiB/s wr, 35 op/s
Oct 11 02:32:31 compute-0 sshd-session[441304]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:32:31 compute-0 sshd-session[441304]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:32:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1637: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail; 22 KiB/s rd, 1.4 KiB/s wr, 30 op/s
Oct 11 02:32:32 compute-0 nova_compute[356901]: 2025-10-11 02:32:32.826 2 DEBUG nova.virt.driver [-] Emitting event <LifecycleEvent: 1760149937.8246276, 358d31cf-2866-416a-b2fc-814ee4bfe89a => Stopped> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:32:32 compute-0 nova_compute[356901]: 2025-10-11 02:32:32.827 2 INFO nova.compute.manager [-] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] VM Stopped (Lifecycle Event)
Oct 11 02:32:32 compute-0 nova_compute[356901]: 2025-10-11 02:32:32.849 2 DEBUG nova.compute.manager [None req-cc663b76-19cd-40d6-b050-5000780c3b9b - - - - - -] [instance: 358d31cf-2866-416a-b2fc-814ee4bfe89a] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:32:32 compute-0 nova_compute[356901]: 2025-10-11 02:32:32.857 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:33 compute-0 sshd-session[441304]: Failed password for invalid user admin from 121.227.153.123 port 40768 ssh2
Oct 11 02:32:33 compute-0 ceph-mon[191930]: pgmap v1637: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail; 22 KiB/s rd, 1.4 KiB/s wr, 30 op/s
Oct 11 02:32:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1638: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail; 14 KiB/s rd, 682 B/s wr, 19 op/s
Oct 11 02:32:34 compute-0 nova_compute[356901]: 2025-10-11 02:32:34.731 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:35 compute-0 sshd-session[441304]: Connection closed by invalid user admin 121.227.153.123 port 40768 [preauth]
Oct 11 02:32:35 compute-0 ceph-mon[191930]: pgmap v1638: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail; 14 KiB/s rd, 682 B/s wr, 19 op/s
Oct 11 02:32:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #75. Immutable memtables: 0.
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:32:35.749979) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 41] Flushing memtable with next log file: 75
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149955750107, "job": 41, "event": "flush_started", "num_memtables": 1, "num_entries": 683, "num_deletes": 250, "total_data_size": 848774, "memory_usage": 861992, "flush_reason": "Manual Compaction"}
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 41] Level-0 flush table #76: started
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149955759745, "cf_name": "default", "job": 41, "event": "table_file_creation", "file_number": 76, "file_size": 551855, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 33216, "largest_seqno": 33898, "table_properties": {"data_size": 548759, "index_size": 1004, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1029, "raw_key_size": 8148, "raw_average_key_size": 20, "raw_value_size": 542329, "raw_average_value_size": 1366, "num_data_blocks": 45, "num_entries": 397, "num_filter_entries": 397, "num_deletions": 250, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760149900, "oldest_key_time": 1760149900, "file_creation_time": 1760149955, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 76, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 41] Flush lasted 9852 microseconds, and 5248 cpu microseconds.
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:32:35.759848) [db/flush_job.cc:967] [default] [JOB 41] Level-0 flush table #76: 551855 bytes OK
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:32:35.759872) [db/memtable_list.cc:519] [default] Level-0 commit table #76 started
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:32:35.762443) [db/memtable_list.cc:722] [default] Level-0 commit table #76: memtable #1 done
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:32:35.762464) EVENT_LOG_v1 {"time_micros": 1760149955762457, "job": 41, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:32:35.762487) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 41] Try to delete WAL files size 845207, prev total WAL file size 845207, number of live WAL files 2.
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000072.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:32:35.764039) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '6D6772737461740031323534' seq:72057594037927935, type:22 .. '6D6772737461740031353035' seq:0, type:0; will stop at (end)
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 42] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 41 Base level 0, inputs: [76(538KB)], [74(9959KB)]
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149955764121, "job": 42, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [76], "files_L6": [74], "score": -1, "input_data_size": 10750419, "oldest_snapshot_seqno": -1}
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 42] Generated table #77: 5276 keys, 7740292 bytes, temperature: kUnknown
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149955844334, "cf_name": "default", "job": 42, "event": "table_file_creation", "file_number": 77, "file_size": 7740292, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 7706548, "index_size": 19426, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 13253, "raw_key_size": 133520, "raw_average_key_size": 25, "raw_value_size": 7612568, "raw_average_value_size": 1442, "num_data_blocks": 803, "num_entries": 5276, "num_filter_entries": 5276, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760149955, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 77, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:32:35.844758) [db/compaction/compaction_job.cc:1663] [default] [JOB 42] Compacted 1@0 + 1@6 files to L6 => 7740292 bytes
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:32:35.847706) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 133.8 rd, 96.4 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(0.5, 9.7 +0.0 blob) out(7.4 +0.0 blob), read-write-amplify(33.5) write-amplify(14.0) OK, records in: 5765, records dropped: 489 output_compression: NoCompression
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:32:35.847740) EVENT_LOG_v1 {"time_micros": 1760149955847723, "job": 42, "event": "compaction_finished", "compaction_time_micros": 80333, "compaction_time_cpu_micros": 43612, "output_level": 6, "num_output_files": 1, "total_output_size": 7740292, "num_input_records": 5765, "num_output_records": 5276, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000076.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149955848187, "job": 42, "event": "table_file_deletion", "file_number": 76}
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000074.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760149955852611, "job": 42, "event": "table_file_deletion", "file_number": 74}
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:32:35.763532) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:32:35.852918) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:32:35.852929) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:32:35.852934) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:32:35.852938) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:32:35 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:32:35.852942) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:32:36 compute-0 sshd-session[441306]: Invalid user admin from 121.227.153.123 port 40774
Oct 11 02:32:36 compute-0 podman[441308]: 2025-10-11 02:32:36.438943575 +0000 UTC m=+0.116106783 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, version=9.4, maintainer=Red Hat, Inc., summary=Provides the latest release of Red Hat Universal Base Image 9., distribution-scope=public, io.openshift.tags=base rhel9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.buildah.version=1.29.0, release-0.7.12=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, container_name=kepler, io.openshift.expose-services=, release=1214.1726694543, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 9, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, build-date=2024-09-18T21:23:30, config_id=edpm, managed_by=edpm_ansible, architecture=x86_64, com.redhat.component=ubi9-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vendor=Red Hat, Inc.)
Oct 11 02:32:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1639: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:36 compute-0 sshd-session[441306]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:32:36 compute-0 sshd-session[441306]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:32:36 compute-0 ceph-mon[191930]: pgmap v1639: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:37 compute-0 nova_compute[356901]: 2025-10-11 02:32:37.860 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1640: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:39 compute-0 sshd-session[441306]: Failed password for invalid user admin from 121.227.153.123 port 40774 ssh2
Oct 11 02:32:39 compute-0 ceph-mon[191930]: pgmap v1640: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:39 compute-0 nova_compute[356901]: 2025-10-11 02:32:39.734 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:40 compute-0 sshd-session[441306]: Connection closed by invalid user admin 121.227.153.123 port 40774 [preauth]
Oct 11 02:32:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1641: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:32:41 compute-0 sshd-session[441327]: Invalid user admin from 121.227.153.123 port 33540
Oct 11 02:32:41 compute-0 ceph-mon[191930]: pgmap v1641: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:41 compute-0 sshd-session[441327]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:32:41 compute-0 sshd-session[441327]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:32:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1642: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:42 compute-0 nova_compute[356901]: 2025-10-11 02:32:42.862 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:43 compute-0 ceph-mon[191930]: pgmap v1642: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:43 compute-0 sshd-session[441327]: Failed password for invalid user admin from 121.227.153.123 port 33540 ssh2
Oct 11 02:32:44 compute-0 podman[441329]: 2025-10-11 02:32:44.252445011 +0000 UTC m=+0.128476233 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:32:44 compute-0 podman[441332]: 2025-10-11 02:32:44.264134141 +0000 UTC m=+0.118706878 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 02:32:44 compute-0 podman[441331]: 2025-10-11 02:32:44.286122042 +0000 UTC m=+0.146569040 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0)
Oct 11 02:32:44 compute-0 podman[441330]: 2025-10-11 02:32:44.286461291 +0000 UTC m=+0.164307096 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, container_name=ovn_controller, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:32:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1643: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:44 compute-0 nova_compute[356901]: 2025-10-11 02:32:44.736 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:45 compute-0 sshd-session[441327]: Connection closed by invalid user admin 121.227.153.123 port 33540 [preauth]
Oct 11 02:32:45 compute-0 ceph-mon[191930]: pgmap v1643: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:32:46 compute-0 sshd-session[441413]: Invalid user admin from 121.227.153.123 port 33546
Oct 11 02:32:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1644: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:46 compute-0 sshd-session[441413]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:32:46 compute-0 sshd-session[441413]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:32:47 compute-0 sshd-session[441415]: Accepted publickey for zuul from 38.102.83.70 port 56298 ssh2: RSA SHA256:sxgyqRujXfGvMV2Eq7ZlGcFGCGFr/dtz6dk2ZJwy3W4
Oct 11 02:32:47 compute-0 systemd-logind[804]: New session 63 of user zuul.
Oct 11 02:32:47 compute-0 systemd[1]: Started Session 63 of User zuul.
Oct 11 02:32:47 compute-0 sshd-session[441415]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 02:32:47 compute-0 ceph-mon[191930]: pgmap v1644: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:47 compute-0 nova_compute[356901]: 2025-10-11 02:32:47.866 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:48 compute-0 podman[441543]: 2025-10-11 02:32:48.24273643 +0000 UTC m=+0.138534566 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.build-date=20251009)
Oct 11 02:32:48 compute-0 podman[441544]: 2025-10-11 02:32:48.251822645 +0000 UTC m=+0.135833097 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=iscsid, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=iscsid, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']})
Oct 11 02:32:48 compute-0 sudo[441631]:     zuul : TTY=pts/1 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ngpfojisxjrhhbbzbjorpkkojnzjhbgq ; KUBECONFIG=/home/zuul/.crc/machines/crc/kubeconfig PATH=/home/zuul/.crc/bin:/home/zuul/.crc/bin/oc:/home/zuul/bin:/home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760149967.5108821-57449-95274734333605/AnsiballZ_command.py'
Oct 11 02:32:48 compute-0 sudo[441631]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:32:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1645: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:48 compute-0 python3[441633]: ansible-ansible.legacy.command Invoked with _raw_params=podman ps -a --format "{{.Names}} {{.Status}}" | grep ceilometer_agent_compute
                                            _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:32:48 compute-0 sudo[441631]: pam_unix(sudo:session): session closed for user root
Oct 11 02:32:48 compute-0 sshd-session[441413]: Failed password for invalid user admin from 121.227.153.123 port 33546 ssh2
Oct 11 02:32:49 compute-0 sudo[441673]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:32:49 compute-0 sudo[441673]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:32:49 compute-0 sudo[441673]: pam_unix(sudo:session): session closed for user root
Oct 11 02:32:49 compute-0 sudo[441698]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:32:49 compute-0 sudo[441698]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:32:49 compute-0 sudo[441698]: pam_unix(sudo:session): session closed for user root
Oct 11 02:32:49 compute-0 ceph-mon[191930]: pgmap v1645: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:49 compute-0 sudo[441724]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:32:49 compute-0 sudo[441724]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:32:49 compute-0 sudo[441724]: pam_unix(sudo:session): session closed for user root
Oct 11 02:32:49 compute-0 nova_compute[356901]: 2025-10-11 02:32:49.740 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:49 compute-0 sudo[441749]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:32:49 compute-0 sudo[441749]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:32:50 compute-0 sshd-session[441413]: Connection closed by invalid user admin 121.227.153.123 port 33546 [preauth]
Oct 11 02:32:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1646: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:50 compute-0 sudo[441749]: pam_unix(sudo:session): session closed for user root
Oct 11 02:32:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:32:50 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:32:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:32:50 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:32:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:32:50 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:32:50 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev dfc04d46-e47b-4633-a034-8c366e2c3e32 does not exist
Oct 11 02:32:50 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 58a3ab3a-d096-4dcc-aeae-01a7c2c49d6e does not exist
Oct 11 02:32:50 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 44617997-a04b-4891-adb8-1446bc6efeda does not exist
Oct 11 02:32:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:32:50 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:32:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:32:50 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:32:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:32:50 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:32:50 compute-0 ceph-mon[191930]: pgmap v1646: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:50 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:32:50 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:32:50 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:32:50 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:32:50 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:32:50 compute-0 sudo[441805]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:32:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:32:50 compute-0 sudo[441805]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:32:50 compute-0 sudo[441805]: pam_unix(sudo:session): session closed for user root
Oct 11 02:32:50 compute-0 sudo[441830]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:32:50 compute-0 sudo[441830]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:32:50 compute-0 sudo[441830]: pam_unix(sudo:session): session closed for user root
Oct 11 02:32:51 compute-0 sudo[441855]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:32:51 compute-0 sudo[441855]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:32:51 compute-0 sudo[441855]: pam_unix(sudo:session): session closed for user root
Oct 11 02:32:51 compute-0 sudo[441880]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:32:51 compute-0 sudo[441880]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:32:51 compute-0 sshd-session[441790]: Invalid user admin from 121.227.153.123 port 43094
Oct 11 02:32:51 compute-0 ovn_controller[88370]: 2025-10-11T02:32:51Z|00069|memory_trim|INFO|Detected inactivity (last active 30007 ms ago): trimming memory
Oct 11 02:32:51 compute-0 sshd-session[441790]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:32:51 compute-0 sshd-session[441790]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:32:51 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:32:51 compute-0 podman[441941]: 2025-10-11 02:32:51.727220715 +0000 UTC m=+0.060271836 container create fadf4af526f975fdc42b935400147bee7d8b94e31467e4e9326e88e236b4816d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dreamy_jackson, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:32:51 compute-0 systemd[1]: Started libpod-conmon-fadf4af526f975fdc42b935400147bee7d8b94e31467e4e9326e88e236b4816d.scope.
Oct 11 02:32:51 compute-0 podman[441941]: 2025-10-11 02:32:51.701840956 +0000 UTC m=+0.034892127 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:32:51 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:32:51 compute-0 podman[441941]: 2025-10-11 02:32:51.839722082 +0000 UTC m=+0.172773223 container init fadf4af526f975fdc42b935400147bee7d8b94e31467e4e9326e88e236b4816d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dreamy_jackson, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:32:51 compute-0 podman[441941]: 2025-10-11 02:32:51.858388205 +0000 UTC m=+0.191439326 container start fadf4af526f975fdc42b935400147bee7d8b94e31467e4e9326e88e236b4816d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dreamy_jackson, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:32:51 compute-0 podman[441941]: 2025-10-11 02:32:51.863881665 +0000 UTC m=+0.196932786 container attach fadf4af526f975fdc42b935400147bee7d8b94e31467e4e9326e88e236b4816d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dreamy_jackson, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS)
Oct 11 02:32:51 compute-0 dreamy_jackson[441957]: 167 167
Oct 11 02:32:51 compute-0 podman[441941]: 2025-10-11 02:32:51.870141478 +0000 UTC m=+0.203192599 container died fadf4af526f975fdc42b935400147bee7d8b94e31467e4e9326e88e236b4816d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dreamy_jackson, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:32:51 compute-0 systemd[1]: libpod-fadf4af526f975fdc42b935400147bee7d8b94e31467e4e9326e88e236b4816d.scope: Deactivated successfully.
Oct 11 02:32:51 compute-0 nova_compute[356901]: 2025-10-11 02:32:51.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:32:51 compute-0 systemd[1]: var-lib-containers-storage-overlay-883dd788a6bfa607a696ff7f9b13af55c17533f0832aa24b621d0e7f285c2940-merged.mount: Deactivated successfully.
Oct 11 02:32:51 compute-0 podman[441941]: 2025-10-11 02:32:51.950387305 +0000 UTC m=+0.283438426 container remove fadf4af526f975fdc42b935400147bee7d8b94e31467e4e9326e88e236b4816d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dreamy_jackson, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS)
Oct 11 02:32:51 compute-0 systemd[1]: libpod-conmon-fadf4af526f975fdc42b935400147bee7d8b94e31467e4e9326e88e236b4816d.scope: Deactivated successfully.
Oct 11 02:32:52 compute-0 podman[441981]: 2025-10-11 02:32:52.211974883 +0000 UTC m=+0.077363534 container create c47503e6fbe79b5aa1cc6aaf3e51274a76d9cad75bede3423dfa2f9cb04da90c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_noyce, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:32:52 compute-0 podman[441981]: 2025-10-11 02:32:52.179862698 +0000 UTC m=+0.045251409 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:32:52 compute-0 systemd[1]: Started libpod-conmon-c47503e6fbe79b5aa1cc6aaf3e51274a76d9cad75bede3423dfa2f9cb04da90c.scope.
Oct 11 02:32:52 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:32:52 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4fc6c27087726d9d4989db40f3274428470149655cd25ecb1971087a79ea6f7c/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:32:52 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4fc6c27087726d9d4989db40f3274428470149655cd25ecb1971087a79ea6f7c/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:32:52 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4fc6c27087726d9d4989db40f3274428470149655cd25ecb1971087a79ea6f7c/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:32:52 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4fc6c27087726d9d4989db40f3274428470149655cd25ecb1971087a79ea6f7c/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:32:52 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4fc6c27087726d9d4989db40f3274428470149655cd25ecb1971087a79ea6f7c/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:32:52 compute-0 podman[441981]: 2025-10-11 02:32:52.363703903 +0000 UTC m=+0.229092564 container init c47503e6fbe79b5aa1cc6aaf3e51274a76d9cad75bede3423dfa2f9cb04da90c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_noyce, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef)
Oct 11 02:32:52 compute-0 podman[441981]: 2025-10-11 02:32:52.392131421 +0000 UTC m=+0.257520042 container start c47503e6fbe79b5aa1cc6aaf3e51274a76d9cad75bede3423dfa2f9cb04da90c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_noyce, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:32:52 compute-0 podman[441981]: 2025-10-11 02:32:52.397855257 +0000 UTC m=+0.263243968 container attach c47503e6fbe79b5aa1cc6aaf3e51274a76d9cad75bede3423dfa2f9cb04da90c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_noyce, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 02:32:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1647: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:52 compute-0 ceph-mon[191930]: pgmap v1647: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:32:52 compute-0 nova_compute[356901]: 2025-10-11 02:32:52.869 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:53 compute-0 sshd-session[441790]: Failed password for invalid user admin from 121.227.153.123 port 43094 ssh2
Oct 11 02:32:53 compute-0 festive_noyce[441997]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:32:53 compute-0 festive_noyce[441997]: --> relative data size: 1.0
Oct 11 02:32:53 compute-0 festive_noyce[441997]: --> All data devices are unavailable
Oct 11 02:32:53 compute-0 systemd[1]: libpod-c47503e6fbe79b5aa1cc6aaf3e51274a76d9cad75bede3423dfa2f9cb04da90c.scope: Deactivated successfully.
Oct 11 02:32:53 compute-0 podman[441981]: 2025-10-11 02:32:53.826807104 +0000 UTC m=+1.692195745 container died c47503e6fbe79b5aa1cc6aaf3e51274a76d9cad75bede3423dfa2f9cb04da90c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_noyce, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 02:32:53 compute-0 systemd[1]: libpod-c47503e6fbe79b5aa1cc6aaf3e51274a76d9cad75bede3423dfa2f9cb04da90c.scope: Consumed 1.375s CPU time.
Oct 11 02:32:53 compute-0 systemd[1]: var-lib-containers-storage-overlay-4fc6c27087726d9d4989db40f3274428470149655cd25ecb1971087a79ea6f7c-merged.mount: Deactivated successfully.
Oct 11 02:32:53 compute-0 podman[441981]: 2025-10-11 02:32:53.916825006 +0000 UTC m=+1.782213657 container remove c47503e6fbe79b5aa1cc6aaf3e51274a76d9cad75bede3423dfa2f9cb04da90c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_noyce, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_REF=reef)
Oct 11 02:32:53 compute-0 systemd[1]: libpod-conmon-c47503e6fbe79b5aa1cc6aaf3e51274a76d9cad75bede3423dfa2f9cb04da90c.scope: Deactivated successfully.
Oct 11 02:32:53 compute-0 sudo[441880]: pam_unix(sudo:session): session closed for user root
Oct 11 02:32:54 compute-0 sudo[442037]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:32:54 compute-0 sudo[442037]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:32:54 compute-0 sudo[442037]: pam_unix(sudo:session): session closed for user root
Oct 11 02:32:54 compute-0 sudo[442062]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:32:54 compute-0 sudo[442062]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:32:54 compute-0 sudo[442062]: pam_unix(sudo:session): session closed for user root
Oct 11 02:32:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1648: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 85 B/s wr, 0 op/s
Oct 11 02:32:54 compute-0 sudo[442087]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:32:54 compute-0 sudo[442087]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:32:54 compute-0 sudo[442087]: pam_unix(sudo:session): session closed for user root
Oct 11 02:32:54 compute-0 sudo[442112]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:32:54 compute-0 sudo[442112]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:32:54 compute-0 nova_compute[356901]: 2025-10-11 02:32:54.744 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:32:54.860 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:32:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:32:54.861 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:32:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:32:54.862 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:32:54 compute-0 sshd-session[441790]: Connection closed by invalid user admin 121.227.153.123 port 43094 [preauth]
Oct 11 02:32:55 compute-0 podman[442177]: 2025-10-11 02:32:55.313982479 +0000 UTC m=+0.139495734 container create e6f046b0f371705e7101480b146cae8a7187ab992a32e4fb8b382c64c36fe721 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_turing, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:32:55 compute-0 podman[442177]: 2025-10-11 02:32:55.2254373 +0000 UTC m=+0.050950635 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:32:55 compute-0 systemd[1]: Started libpod-conmon-e6f046b0f371705e7101480b146cae8a7187ab992a32e4fb8b382c64c36fe721.scope.
Oct 11 02:32:55 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:32:55 compute-0 podman[442177]: 2025-10-11 02:32:55.45411085 +0000 UTC m=+0.279624185 container init e6f046b0f371705e7101480b146cae8a7187ab992a32e4fb8b382c64c36fe721 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_turing, org.label-schema.build-date=20250507, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef)
Oct 11 02:32:55 compute-0 podman[442177]: 2025-10-11 02:32:55.47367195 +0000 UTC m=+0.299185235 container start e6f046b0f371705e7101480b146cae8a7187ab992a32e4fb8b382c64c36fe721 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_turing, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:32:55 compute-0 xenodochial_turing[442193]: 167 167
Oct 11 02:32:55 compute-0 podman[442177]: 2025-10-11 02:32:55.480988733 +0000 UTC m=+0.306502078 container attach e6f046b0f371705e7101480b146cae8a7187ab992a32e4fb8b382c64c36fe721 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_turing, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, OSD_FLAVOR=default)
Oct 11 02:32:55 compute-0 systemd[1]: libpod-e6f046b0f371705e7101480b146cae8a7187ab992a32e4fb8b382c64c36fe721.scope: Deactivated successfully.
Oct 11 02:32:55 compute-0 conmon[442193]: conmon e6f046b0f371705e7101 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-e6f046b0f371705e7101480b146cae8a7187ab992a32e4fb8b382c64c36fe721.scope/container/memory.events
Oct 11 02:32:55 compute-0 podman[442177]: 2025-10-11 02:32:55.4850247 +0000 UTC m=+0.310537995 container died e6f046b0f371705e7101480b146cae8a7187ab992a32e4fb8b382c64c36fe721 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_turing, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_REF=reef)
Oct 11 02:32:55 compute-0 systemd[1]: var-lib-containers-storage-overlay-261fb45312566cdb8a62e176e5bef3f4538ce518429f494da1340a426c43e4cd-merged.mount: Deactivated successfully.
Oct 11 02:32:55 compute-0 ceph-mon[191930]: pgmap v1648: 321 pgs: 321 active+clean; 78 MiB data, 255 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 85 B/s wr, 0 op/s
Oct 11 02:32:55 compute-0 podman[442177]: 2025-10-11 02:32:55.551019273 +0000 UTC m=+0.376532528 container remove e6f046b0f371705e7101480b146cae8a7187ab992a32e4fb8b382c64c36fe721 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_turing, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2)
Oct 11 02:32:55 compute-0 systemd[1]: libpod-conmon-e6f046b0f371705e7101480b146cae8a7187ab992a32e4fb8b382c64c36fe721.scope: Deactivated successfully.
Oct 11 02:32:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:32:55 compute-0 podman[442215]: 2025-10-11 02:32:55.781183816 +0000 UTC m=+0.069639149 container create 6fbc33380a4a84fe48ba35df90442586812ceeed2fcfc5ac8e8e2b082cac42a1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_noyce, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:32:55 compute-0 systemd[1]: Started libpod-conmon-6fbc33380a4a84fe48ba35df90442586812ceeed2fcfc5ac8e8e2b082cac42a1.scope.
Oct 11 02:32:55 compute-0 podman[442215]: 2025-10-11 02:32:55.759641339 +0000 UTC m=+0.048096702 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:32:55 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:32:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b11c2529f773517c567ea5998487c86eabb0f096527c55b144c663f687b551db/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:32:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b11c2529f773517c567ea5998487c86eabb0f096527c55b144c663f687b551db/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:32:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b11c2529f773517c567ea5998487c86eabb0f096527c55b144c663f687b551db/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:32:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b11c2529f773517c567ea5998487c86eabb0f096527c55b144c663f687b551db/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:32:55 compute-0 podman[442215]: 2025-10-11 02:32:55.923567233 +0000 UTC m=+0.212022666 container init 6fbc33380a4a84fe48ba35df90442586812ceeed2fcfc5ac8e8e2b082cac42a1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_noyce, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, io.buildah.version=1.39.3, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:32:55 compute-0 podman[442215]: 2025-10-11 02:32:55.944001218 +0000 UTC m=+0.232456561 container start 6fbc33380a4a84fe48ba35df90442586812ceeed2fcfc5ac8e8e2b082cac42a1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_noyce, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507)
Oct 11 02:32:55 compute-0 podman[442215]: 2025-10-11 02:32:55.949019224 +0000 UTC m=+0.237474617 container attach 6fbc33380a4a84fe48ba35df90442586812ceeed2fcfc5ac8e8e2b082cac42a1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_noyce, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:32:56 compute-0 sshd-session[442189]: Invalid user admin from 121.227.153.123 port 43106
Oct 11 02:32:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1649: 321 pgs: 321 active+clean; 86 MiB data, 263 MiB used, 60 GiB / 60 GiB avail; 3.4 KiB/s rd, 682 KiB/s wr, 4 op/s
Oct 11 02:32:56 compute-0 sshd-session[442189]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:32:56 compute-0 sshd-session[442189]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:32:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:32:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:32:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:32:56
Oct 11 02:32:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:32:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:32:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.log', '.rgw.root', 'backups', 'vms', 'default.rgw.control', 'default.rgw.meta', '.mgr', 'cephfs.cephfs.meta', 'volumes', 'cephfs.cephfs.data', 'images']
Oct 11 02:32:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:32:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:32:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:32:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:32:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:32:56 compute-0 amazing_noyce[442232]: {
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:     "0": [
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:         {
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "devices": [
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "/dev/loop3"
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             ],
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "lv_name": "ceph_lv0",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "lv_size": "21470642176",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "name": "ceph_lv0",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "tags": {
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.cluster_name": "ceph",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.crush_device_class": "",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.encrypted": "0",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.osd_id": "0",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.type": "block",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.vdo": "0"
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             },
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "type": "block",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "vg_name": "ceph_vg0"
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:         }
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:     ],
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:     "1": [
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:         {
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "devices": [
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "/dev/loop4"
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             ],
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "lv_name": "ceph_lv1",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "lv_size": "21470642176",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "name": "ceph_lv1",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "tags": {
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.cluster_name": "ceph",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.crush_device_class": "",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.encrypted": "0",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.osd_id": "1",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.type": "block",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.vdo": "0"
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             },
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "type": "block",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "vg_name": "ceph_vg1"
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:         }
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:     ],
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:     "2": [
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:         {
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "devices": [
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "/dev/loop5"
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             ],
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "lv_name": "ceph_lv2",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "lv_size": "21470642176",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "name": "ceph_lv2",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "tags": {
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.cluster_name": "ceph",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.crush_device_class": "",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.encrypted": "0",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.osd_id": "2",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.type": "block",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:                 "ceph.vdo": "0"
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             },
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "type": "block",
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:             "vg_name": "ceph_vg2"
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:         }
Oct 11 02:32:56 compute-0 amazing_noyce[442232]:     ]
Oct 11 02:32:56 compute-0 amazing_noyce[442232]: }
Oct 11 02:32:56 compute-0 systemd[1]: libpod-6fbc33380a4a84fe48ba35df90442586812ceeed2fcfc5ac8e8e2b082cac42a1.scope: Deactivated successfully.
Oct 11 02:32:56 compute-0 podman[442215]: 2025-10-11 02:32:56.88038046 +0000 UTC m=+1.168835833 container died 6fbc33380a4a84fe48ba35df90442586812ceeed2fcfc5ac8e8e2b082cac42a1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_noyce, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:32:56 compute-0 systemd[1]: var-lib-containers-storage-overlay-b11c2529f773517c567ea5998487c86eabb0f096527c55b144c663f687b551db-merged.mount: Deactivated successfully.
Oct 11 02:32:56 compute-0 podman[442215]: 2025-10-11 02:32:56.960979208 +0000 UTC m=+1.249434551 container remove 6fbc33380a4a84fe48ba35df90442586812ceeed2fcfc5ac8e8e2b082cac42a1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_noyce, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:32:56 compute-0 systemd[1]: libpod-conmon-6fbc33380a4a84fe48ba35df90442586812ceeed2fcfc5ac8e8e2b082cac42a1.scope: Deactivated successfully.
Oct 11 02:32:57 compute-0 sudo[442112]: pam_unix(sudo:session): session closed for user root
Oct 11 02:32:57 compute-0 sudo[442253]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:32:57 compute-0 sudo[442253]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:32:57 compute-0 sudo[442253]: pam_unix(sudo:session): session closed for user root
Oct 11 02:32:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:32:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:32:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:32:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:32:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:32:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:32:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:32:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:32:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:32:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:32:57 compute-0 sudo[442278]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:32:57 compute-0 sudo[442278]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:32:57 compute-0 sudo[442278]: pam_unix(sudo:session): session closed for user root
Oct 11 02:32:57 compute-0 sudo[442303]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:32:57 compute-0 sudo[442303]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:32:57 compute-0 sudo[442303]: pam_unix(sudo:session): session closed for user root
Oct 11 02:32:57 compute-0 ceph-mon[191930]: pgmap v1649: 321 pgs: 321 active+clean; 86 MiB data, 263 MiB used, 60 GiB / 60 GiB avail; 3.4 KiB/s rd, 682 KiB/s wr, 4 op/s
Oct 11 02:32:57 compute-0 sudo[442328]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:32:57 compute-0 sudo[442328]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:32:57 compute-0 nova_compute[356901]: 2025-10-11 02:32:57.872 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:58 compute-0 podman[442392]: 2025-10-11 02:32:58.226016561 +0000 UTC m=+0.086024147 container create 8b7a768881cf19e525cae26ec911f2b8a283da0b8e7dedba8b77d8180092a62e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_wiles, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:32:58 compute-0 systemd[1]: Started libpod-conmon-8b7a768881cf19e525cae26ec911f2b8a283da0b8e7dedba8b77d8180092a62e.scope.
Oct 11 02:32:58 compute-0 podman[442392]: 2025-10-11 02:32:58.197728007 +0000 UTC m=+0.057735663 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:32:58 compute-0 sshd-session[442189]: Failed password for invalid user admin from 121.227.153.123 port 43106 ssh2
Oct 11 02:32:58 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:32:58 compute-0 podman[442392]: 2025-10-11 02:32:58.357698046 +0000 UTC m=+0.217705712 container init 8b7a768881cf19e525cae26ec911f2b8a283da0b8e7dedba8b77d8180092a62e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_wiles, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:32:58 compute-0 podman[442392]: 2025-10-11 02:32:58.376905585 +0000 UTC m=+0.236913191 container start 8b7a768881cf19e525cae26ec911f2b8a283da0b8e7dedba8b77d8180092a62e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_wiles, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:32:58 compute-0 podman[442392]: 2025-10-11 02:32:58.385055413 +0000 UTC m=+0.245063089 container attach 8b7a768881cf19e525cae26ec911f2b8a283da0b8e7dedba8b77d8180092a62e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_wiles, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2)
Oct 11 02:32:58 compute-0 condescending_wiles[442407]: 167 167
Oct 11 02:32:58 compute-0 systemd[1]: libpod-8b7a768881cf19e525cae26ec911f2b8a283da0b8e7dedba8b77d8180092a62e.scope: Deactivated successfully.
Oct 11 02:32:58 compute-0 podman[442392]: 2025-10-11 02:32:58.391522141 +0000 UTC m=+0.251529757 container died 8b7a768881cf19e525cae26ec911f2b8a283da0b8e7dedba8b77d8180092a62e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_wiles, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:32:58 compute-0 systemd[1]: var-lib-containers-storage-overlay-3293226716a79342d13af500cf7032432fcdcdd04269b31bf401ae80802679a8-merged.mount: Deactivated successfully.
Oct 11 02:32:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1650: 321 pgs: 321 active+clean; 93 MiB data, 270 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s rd, 1.3 MiB/s wr, 5 op/s
Oct 11 02:32:58 compute-0 podman[442392]: 2025-10-11 02:32:58.473914881 +0000 UTC m=+0.333922457 container remove 8b7a768881cf19e525cae26ec911f2b8a283da0b8e7dedba8b77d8180092a62e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=condescending_wiles, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:32:58 compute-0 systemd[1]: libpod-conmon-8b7a768881cf19e525cae26ec911f2b8a283da0b8e7dedba8b77d8180092a62e.scope: Deactivated successfully.
Oct 11 02:32:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e129 do_prune osdmap full prune enabled
Oct 11 02:32:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e130 e130: 3 total, 3 up, 3 in
Oct 11 02:32:58 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e130: 3 total, 3 up, 3 in
Oct 11 02:32:58 compute-0 podman[442431]: 2025-10-11 02:32:58.766336988 +0000 UTC m=+0.085479751 container create 747f55c0caf4e428390cf203d1c62369a1a762af97cba94411ef23fc198ea84c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_wilbur, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, ceph=True, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:32:58 compute-0 podman[442431]: 2025-10-11 02:32:58.727942219 +0000 UTC m=+0.047085072 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:32:58 compute-0 systemd[1]: Started libpod-conmon-747f55c0caf4e428390cf203d1c62369a1a762af97cba94411ef23fc198ea84c.scope.
Oct 11 02:32:58 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:32:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8a371ebfba9aa941399a4dd9cfd13cb6801b2afce74b3ac440c153f5495ceefc/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:32:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8a371ebfba9aa941399a4dd9cfd13cb6801b2afce74b3ac440c153f5495ceefc/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:32:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8a371ebfba9aa941399a4dd9cfd13cb6801b2afce74b3ac440c153f5495ceefc/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:32:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8a371ebfba9aa941399a4dd9cfd13cb6801b2afce74b3ac440c153f5495ceefc/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:32:58 compute-0 nova_compute[356901]: 2025-10-11 02:32:58.894 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:32:58 compute-0 nova_compute[356901]: 2025-10-11 02:32:58.894 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:32:58 compute-0 podman[442431]: 2025-10-11 02:32:58.902906295 +0000 UTC m=+0.222049078 container init 747f55c0caf4e428390cf203d1c62369a1a762af97cba94411ef23fc198ea84c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_wilbur, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:32:58 compute-0 podman[442431]: 2025-10-11 02:32:58.922807395 +0000 UTC m=+0.241950148 container start 747f55c0caf4e428390cf203d1c62369a1a762af97cba94411ef23fc198ea84c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_wilbur, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 02:32:58 compute-0 podman[442431]: 2025-10-11 02:32:58.928412098 +0000 UTC m=+0.247554871 container attach 747f55c0caf4e428390cf203d1c62369a1a762af97cba94411ef23fc198ea84c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_wilbur, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 02:32:59 compute-0 ceph-mon[191930]: pgmap v1650: 321 pgs: 321 active+clean; 93 MiB data, 270 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s rd, 1.3 MiB/s wr, 5 op/s
Oct 11 02:32:59 compute-0 ceph-mon[191930]: osdmap e130: 3 total, 3 up, 3 in
Oct 11 02:32:59 compute-0 podman[157119]: time="2025-10-11T02:32:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:32:59 compute-0 nova_compute[356901]: 2025-10-11 02:32:59.749 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:32:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:32:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47836 "" "Go-http-client/1.1"
Oct 11 02:32:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:32:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9478 "" "Go-http-client/1.1"
Oct 11 02:32:59 compute-0 nova_compute[356901]: 2025-10-11 02:32:59.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:33:00 compute-0 sshd-session[442189]: Connection closed by invalid user admin 121.227.153.123 port 43106 [preauth]
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]: {
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:         "osd_id": 1,
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:         "type": "bluestore"
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:     },
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:         "osd_id": 2,
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:         "type": "bluestore"
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:     },
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:         "osd_id": 0,
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:         "type": "bluestore"
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]:     }
Oct 11 02:33:00 compute-0 sleepy_wilbur[442447]: }
Oct 11 02:33:00 compute-0 systemd[1]: libpod-747f55c0caf4e428390cf203d1c62369a1a762af97cba94411ef23fc198ea84c.scope: Deactivated successfully.
Oct 11 02:33:00 compute-0 podman[442431]: 2025-10-11 02:33:00.170871265 +0000 UTC m=+1.490014038 container died 747f55c0caf4e428390cf203d1c62369a1a762af97cba94411ef23fc198ea84c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_wilbur, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0)
Oct 11 02:33:00 compute-0 systemd[1]: libpod-747f55c0caf4e428390cf203d1c62369a1a762af97cba94411ef23fc198ea84c.scope: Consumed 1.233s CPU time.
Oct 11 02:33:00 compute-0 systemd[1]: var-lib-containers-storage-overlay-8a371ebfba9aa941399a4dd9cfd13cb6801b2afce74b3ac440c153f5495ceefc-merged.mount: Deactivated successfully.
Oct 11 02:33:00 compute-0 podman[442483]: 2025-10-11 02:33:00.246497847 +0000 UTC m=+0.109475489 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:33:00 compute-0 podman[442431]: 2025-10-11 02:33:00.259514556 +0000 UTC m=+1.578657319 container remove 747f55c0caf4e428390cf203d1c62369a1a762af97cba94411ef23fc198ea84c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_wilbur, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:33:00 compute-0 podman[442482]: 2025-10-11 02:33:00.262516604 +0000 UTC m=+0.139248597 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, release=1755695350, build-date=2025-08-20T13:12:41, vcs-type=git, vendor=Red Hat, Inc., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., managed_by=edpm_ansible, com.redhat.component=ubi9-minimal-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, url=https://catalog.redhat.com/en/search?searchType=containers, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, distribution-scope=public, architecture=x86_64, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, version=9.6, config_id=edpm, io.openshift.expose-services=, io.openshift.tags=minimal rhel9, container_name=openstack_network_exporter, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., name=ubi9-minimal)
Oct 11 02:33:00 compute-0 podman[442479]: 2025-10-11 02:33:00.274881084 +0000 UTC m=+0.149705541 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3)
Oct 11 02:33:00 compute-0 systemd[1]: libpod-conmon-747f55c0caf4e428390cf203d1c62369a1a762af97cba94411ef23fc198ea84c.scope: Deactivated successfully.
Oct 11 02:33:00 compute-0 sudo[442328]: pam_unix(sudo:session): session closed for user root
Oct 11 02:33:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:33:00 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:33:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:33:00 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:33:00 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 7cdb5ee6-9974-4434-9d82-56a40f2471ca does not exist
Oct 11 02:33:00 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 083d2893-0e4d-4fa9-abb0-838985500f14 does not exist
Oct 11 02:33:00 compute-0 sudo[442556]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:33:00 compute-0 sudo[442556]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:33:00 compute-0 sudo[442556]: pam_unix(sudo:session): session closed for user root
Oct 11 02:33:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1652: 321 pgs: 321 active+clean; 93 MiB data, 270 MiB used, 60 GiB / 60 GiB avail; 6.5 KiB/s rd, 1.6 MiB/s wr, 10 op/s
Oct 11 02:33:00 compute-0 sudo[442583]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:33:00 compute-0 sudo[442583]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:33:00 compute-0 sudo[442583]: pam_unix(sudo:session): session closed for user root
Oct 11 02:33:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e130 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:33:00 compute-0 nova_compute[356901]: 2025-10-11 02:33:00.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:33:00 compute-0 nova_compute[356901]: 2025-10-11 02:33:00.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:33:00 compute-0 nova_compute[356901]: 2025-10-11 02:33:00.925 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944
Oct 11 02:33:00 compute-0 nova_compute[356901]: 2025-10-11 02:33:00.926 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:33:00 compute-0 nova_compute[356901]: 2025-10-11 02:33:00.927 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:33:01 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:33:01 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:33:01 compute-0 ceph-mon[191930]: pgmap v1652: 321 pgs: 321 active+clean; 93 MiB data, 270 MiB used, 60 GiB / 60 GiB avail; 6.5 KiB/s rd, 1.6 MiB/s wr, 10 op/s
Oct 11 02:33:01 compute-0 openstack_network_exporter[374316]: ERROR   02:33:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:33:01 compute-0 openstack_network_exporter[374316]: ERROR   02:33:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:33:01 compute-0 openstack_network_exporter[374316]: ERROR   02:33:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:33:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:33:01 compute-0 openstack_network_exporter[374316]: ERROR   02:33:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:33:01 compute-0 openstack_network_exporter[374316]: ERROR   02:33:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:33:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:33:01 compute-0 sshd-session[442563]: Invalid user admin from 121.227.153.123 port 43340
Oct 11 02:33:01 compute-0 sshd-session[442563]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:33:01 compute-0 sshd-session[442563]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:33:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1653: 321 pgs: 321 active+clean; 93 MiB data, 270 MiB used, 60 GiB / 60 GiB avail; 13 KiB/s rd, 1.6 MiB/s wr, 18 op/s
Oct 11 02:33:02 compute-0 nova_compute[356901]: 2025-10-11 02:33:02.877 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:02 compute-0 nova_compute[356901]: 2025-10-11 02:33:02.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:33:02 compute-0 nova_compute[356901]: 2025-10-11 02:33:02.931 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:33:02 compute-0 nova_compute[356901]: 2025-10-11 02:33:02.932 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:33:02 compute-0 nova_compute[356901]: 2025-10-11 02:33:02.933 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:33:02 compute-0 nova_compute[356901]: 2025-10-11 02:33:02.933 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:33:02 compute-0 nova_compute[356901]: 2025-10-11 02:33:02.935 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:33:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:33:03 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1639520037' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:33:03 compute-0 nova_compute[356901]: 2025-10-11 02:33:03.411 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.477s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:33:03 compute-0 sshd-session[442563]: Failed password for invalid user admin from 121.227.153.123 port 43340 ssh2
Oct 11 02:33:03 compute-0 nova_compute[356901]: 2025-10-11 02:33:03.547 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:33:03 compute-0 nova_compute[356901]: 2025-10-11 02:33:03.548 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:33:03 compute-0 nova_compute[356901]: 2025-10-11 02:33:03.548 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:33:03 compute-0 ceph-mon[191930]: pgmap v1653: 321 pgs: 321 active+clean; 93 MiB data, 270 MiB used, 60 GiB / 60 GiB avail; 13 KiB/s rd, 1.6 MiB/s wr, 18 op/s
Oct 11 02:33:03 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1639520037' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:33:04 compute-0 nova_compute[356901]: 2025-10-11 02:33:04.216 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:33:04 compute-0 nova_compute[356901]: 2025-10-11 02:33:04.218 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3794MB free_disk=59.955204010009766GB free_vcpus=7 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:33:04 compute-0 nova_compute[356901]: 2025-10-11 02:33:04.219 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:33:04 compute-0 nova_compute[356901]: 2025-10-11 02:33:04.220 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:33:04 compute-0 nova_compute[356901]: 2025-10-11 02:33:04.345 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:33:04 compute-0 nova_compute[356901]: 2025-10-11 02:33:04.346 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 1 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:33:04 compute-0 nova_compute[356901]: 2025-10-11 02:33:04.346 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1024MB phys_disk=59GB used_disk=2GB total_vcpus=8 used_vcpus=1 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:33:04 compute-0 nova_compute[356901]: 2025-10-11 02:33:04.404 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:33:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1654: 321 pgs: 321 active+clean; 93 MiB data, 271 MiB used, 60 GiB / 60 GiB avail; 13 KiB/s rd, 1.6 MiB/s wr, 17 op/s
Oct 11 02:33:04 compute-0 ceph-mon[191930]: pgmap v1654: 321 pgs: 321 active+clean; 93 MiB data, 271 MiB used, 60 GiB / 60 GiB avail; 13 KiB/s rd, 1.6 MiB/s wr, 17 op/s
Oct 11 02:33:04 compute-0 nova_compute[356901]: 2025-10-11 02:33:04.754 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:33:04 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/4045324951' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:33:04 compute-0 nova_compute[356901]: 2025-10-11 02:33:04.904 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.500s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:33:04 compute-0 nova_compute[356901]: 2025-10-11 02:33:04.918 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:33:04 compute-0 nova_compute[356901]: 2025-10-11 02:33:04.938 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:33:04 compute-0 nova_compute[356901]: 2025-10-11 02:33:04.939 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:33:04 compute-0 nova_compute[356901]: 2025-10-11 02:33:04.940 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.719s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:33:05 compute-0 sshd-session[442563]: Connection closed by invalid user admin 121.227.153.123 port 43340 [preauth]
Oct 11 02:33:05 compute-0 nova_compute[356901]: 2025-10-11 02:33:05.518 2 DEBUG oslo_concurrency.lockutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "eb750444-4572-4c74-a72a-4955daed4f7b" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:33:05 compute-0 nova_compute[356901]: 2025-10-11 02:33:05.519 2 DEBUG oslo_concurrency.lockutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "eb750444-4572-4c74-a72a-4955daed4f7b" acquired by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:33:05 compute-0 nova_compute[356901]: 2025-10-11 02:33:05.535 2 DEBUG nova.compute.manager [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Starting instance... _do_build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2402
Oct 11 02:33:05 compute-0 nova_compute[356901]: 2025-10-11 02:33:05.618 2 DEBUG oslo_concurrency.lockutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:33:05 compute-0 nova_compute[356901]: 2025-10-11 02:33:05.619 2 DEBUG oslo_concurrency.lockutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:33:05 compute-0 nova_compute[356901]: 2025-10-11 02:33:05.633 2 DEBUG nova.virt.hardware [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Require both a host and instance NUMA topology to fit instance on host. numa_fit_instance_to_host /usr/lib/python3.9/site-packages/nova/virt/hardware.py:2368
Oct 11 02:33:05 compute-0 nova_compute[356901]: 2025-10-11 02:33:05.634 2 INFO nova.compute.claims [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Claim successful on node compute-0.ctlplane.example.com
Oct 11 02:33:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e130 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:33:05 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/4045324951' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:33:05 compute-0 nova_compute[356901]: 2025-10-11 02:33:05.791 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:33:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:33:06 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2220336593' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:33:06 compute-0 nova_compute[356901]: 2025-10-11 02:33:06.389 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.599s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:33:06 compute-0 nova_compute[356901]: 2025-10-11 02:33:06.403 2 DEBUG nova.compute.provider_tree [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:33:06 compute-0 nova_compute[356901]: 2025-10-11 02:33:06.438 2 DEBUG nova.scheduler.client.report [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:33:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1655: 321 pgs: 321 active+clean; 93 MiB data, 271 MiB used, 60 GiB / 60 GiB avail; 8.6 KiB/s rd, 774 KiB/s wr, 12 op/s
Oct 11 02:33:06 compute-0 nova_compute[356901]: 2025-10-11 02:33:06.474 2 DEBUG oslo_concurrency.lockutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: held 0.855s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:33:06 compute-0 nova_compute[356901]: 2025-10-11 02:33:06.475 2 DEBUG nova.compute.manager [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Start building networks asynchronously for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2799
Oct 11 02:33:06 compute-0 nova_compute[356901]: 2025-10-11 02:33:06.536 2 DEBUG nova.compute.manager [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Not allocating networking since 'none' was specified. _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1948
Oct 11 02:33:06 compute-0 nova_compute[356901]: 2025-10-11 02:33:06.557 2 INFO nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Ignoring supplied device name: /dev/vda. Libvirt can't honour user-supplied dev names
Oct 11 02:33:06 compute-0 nova_compute[356901]: 2025-10-11 02:33:06.620 2 DEBUG nova.compute.manager [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Start building block device mappings for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2834
Oct 11 02:33:06 compute-0 nova_compute[356901]: 2025-10-11 02:33:06.756 2 DEBUG nova.compute.manager [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Start spawning the instance on the hypervisor. _build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2608
Oct 11 02:33:06 compute-0 nova_compute[356901]: 2025-10-11 02:33:06.759 2 DEBUG nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Creating instance directory _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4723
Oct 11 02:33:06 compute-0 nova_compute[356901]: 2025-10-11 02:33:06.760 2 INFO nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Creating image(s)
Oct 11 02:33:06 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2220336593' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:33:06 compute-0 ceph-mon[191930]: pgmap v1655: 321 pgs: 321 active+clean; 93 MiB data, 271 MiB used, 60 GiB / 60 GiB avail; 8.6 KiB/s rd, 774 KiB/s wr, 12 op/s
Oct 11 02:33:06 compute-0 nova_compute[356901]: 2025-10-11 02:33:06.822 2 DEBUG nova.storage.rbd_utils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image eb750444-4572-4c74-a72a-4955daed4f7b_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:33:06 compute-0 nova_compute[356901]: 2025-10-11 02:33:06.897 2 DEBUG nova.storage.rbd_utils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image eb750444-4572-4c74-a72a-4955daed4f7b_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:33:06 compute-0 nova_compute[356901]: 2025-10-11 02:33:06.965 2 DEBUG nova.storage.rbd_utils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image eb750444-4572-4c74-a72a-4955daed4f7b_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:33:06 compute-0 nova_compute[356901]: 2025-10-11 02:33:06.977 2 DEBUG oslo_concurrency.lockutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "1d8c0e72e3c59f2e2987fd026cc0e3a116837b53" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:33:06 compute-0 nova_compute[356901]: 2025-10-11 02:33:06.979 2 DEBUG oslo_concurrency.lockutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "1d8c0e72e3c59f2e2987fd026cc0e3a116837b53" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:33:06 compute-0 nova_compute[356901]: 2025-10-11 02:33:06.986 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:33:06 compute-0 nova_compute[356901]: 2025-10-11 02:33:06.986 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0005066271692062251 of space, bias 1.0, pg target 0.15198815076186756 quantized to 32 (current 32)
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:33:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:33:07 compute-0 sshd-session[442653]: Invalid user admin from 121.227.153.123 port 43356
Oct 11 02:33:07 compute-0 podman[442731]: 2025-10-11 02:33:07.286560137 +0000 UTC m=+0.175221044 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.29.0, vendor=Red Hat, Inc., container_name=kepler, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9, architecture=x86_64, summary=Provides the latest release of Red Hat Universal Base Image 9., name=ubi9, maintainer=Red Hat, Inc., io.openshift.tags=base rhel9, release-0.7.12=, managed_by=edpm_ansible, release=1214.1726694543, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, distribution-scope=public, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, version=9.4, build-date=2024-09-18T21:23:30, config_id=edpm, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, com.redhat.component=ubi9-container)
Oct 11 02:33:07 compute-0 nova_compute[356901]: 2025-10-11 02:33:07.320 2 DEBUG nova.virt.libvirt.imagebackend [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Image locations are: [{'url': 'rbd://3c7617c3-7a20-523e-a9de-20c0d6ba41da/images/cd2dfc66-f23e-4259-896a-4ce37807bb33/snap', 'metadata': {'store': 'default_backend'}}, {'url': 'rbd://3c7617c3-7a20-523e-a9de-20c0d6ba41da/images/cd2dfc66-f23e-4259-896a-4ce37807bb33/snap', 'metadata': {}}] clone /usr/lib/python3.9/site-packages/nova/virt/libvirt/imagebackend.py:1085
Oct 11 02:33:07 compute-0 sshd-session[442653]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:33:07 compute-0 sshd-session[442653]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:33:07 compute-0 nova_compute[356901]: 2025-10-11 02:33:07.881 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:08 compute-0 nova_compute[356901]: 2025-10-11 02:33:08.462 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/1d8c0e72e3c59f2e2987fd026cc0e3a116837b53.part --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:33:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1656: 321 pgs: 321 active+clean; 93 MiB data, 271 MiB used, 60 GiB / 60 GiB avail; 8.5 KiB/s rd, 1.4 KiB/s wr, 12 op/s
Oct 11 02:33:08 compute-0 nova_compute[356901]: 2025-10-11 02:33:08.580 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/1d8c0e72e3c59f2e2987fd026cc0e3a116837b53.part --force-share --output=json" returned: 0 in 0.117s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:33:08 compute-0 nova_compute[356901]: 2025-10-11 02:33:08.582 2 DEBUG nova.virt.images [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] cd2dfc66-f23e-4259-896a-4ce37807bb33 was qcow2, converting to raw fetch_to_raw /usr/lib/python3.9/site-packages/nova/virt/images.py:242
Oct 11 02:33:08 compute-0 nova_compute[356901]: 2025-10-11 02:33:08.584 2 DEBUG nova.privsep.utils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Path '/var/lib/nova/instances' supports direct I/O supports_direct_io /usr/lib/python3.9/site-packages/nova/privsep/utils.py:63
Oct 11 02:33:08 compute-0 nova_compute[356901]: 2025-10-11 02:33:08.586 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): qemu-img convert -t none -O raw -f qcow2 /var/lib/nova/instances/_base/1d8c0e72e3c59f2e2987fd026cc0e3a116837b53.part /var/lib/nova/instances/_base/1d8c0e72e3c59f2e2987fd026cc0e3a116837b53.converted execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:33:08 compute-0 nova_compute[356901]: 2025-10-11 02:33:08.980 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "qemu-img convert -t none -O raw -f qcow2 /var/lib/nova/instances/_base/1d8c0e72e3c59f2e2987fd026cc0e3a116837b53.part /var/lib/nova/instances/_base/1d8c0e72e3c59f2e2987fd026cc0e3a116837b53.converted" returned: 0 in 0.394s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:33:08 compute-0 nova_compute[356901]: 2025-10-11 02:33:08.989 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/1d8c0e72e3c59f2e2987fd026cc0e3a116837b53.converted --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:33:09 compute-0 nova_compute[356901]: 2025-10-11 02:33:09.099 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/1d8c0e72e3c59f2e2987fd026cc0e3a116837b53.converted --force-share --output=json" returned: 0 in 0.111s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:33:09 compute-0 nova_compute[356901]: 2025-10-11 02:33:09.102 2 DEBUG oslo_concurrency.lockutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "1d8c0e72e3c59f2e2987fd026cc0e3a116837b53" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 2.123s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:33:09 compute-0 nova_compute[356901]: 2025-10-11 02:33:09.154 2 DEBUG nova.storage.rbd_utils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image eb750444-4572-4c74-a72a-4955daed4f7b_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:33:09 compute-0 nova_compute[356901]: 2025-10-11 02:33:09.165 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/1d8c0e72e3c59f2e2987fd026cc0e3a116837b53 eb750444-4572-4c74-a72a-4955daed4f7b_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:33:09 compute-0 ceph-mon[191930]: pgmap v1656: 321 pgs: 321 active+clean; 93 MiB data, 271 MiB used, 60 GiB / 60 GiB avail; 8.5 KiB/s rd, 1.4 KiB/s wr, 12 op/s
Oct 11 02:33:09 compute-0 nova_compute[356901]: 2025-10-11 02:33:09.696 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/1d8c0e72e3c59f2e2987fd026cc0e3a116837b53 eb750444-4572-4c74-a72a-4955daed4f7b_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.530s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:33:09 compute-0 sshd-session[442653]: Failed password for invalid user admin from 121.227.153.123 port 43356 ssh2
Oct 11 02:33:09 compute-0 nova_compute[356901]: 2025-10-11 02:33:09.760 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:09 compute-0 nova_compute[356901]: 2025-10-11 02:33:09.871 2 DEBUG nova.storage.rbd_utils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] resizing rbd image eb750444-4572-4c74-a72a-4955daed4f7b_disk to 1073741824 resize /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:288
Oct 11 02:33:10 compute-0 nova_compute[356901]: 2025-10-11 02:33:10.332 2 DEBUG nova.objects.instance [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lazy-loading 'migration_context' on Instance uuid eb750444-4572-4c74-a72a-4955daed4f7b obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:33:10 compute-0 nova_compute[356901]: 2025-10-11 02:33:10.413 2 DEBUG nova.storage.rbd_utils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image eb750444-4572-4c74-a72a-4955daed4f7b_disk.eph0 does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:33:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1657: 321 pgs: 321 active+clean; 93 MiB data, 271 MiB used, 60 GiB / 60 GiB avail; 7.1 KiB/s rd, 1.2 KiB/s wr, 10 op/s
Oct 11 02:33:10 compute-0 nova_compute[356901]: 2025-10-11 02:33:10.472 2 DEBUG nova.storage.rbd_utils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image eb750444-4572-4c74-a72a-4955daed4f7b_disk.eph0 does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:33:10 compute-0 nova_compute[356901]: 2025-10-11 02:33:10.482 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/ephemeral_1_0706d66 --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:33:10 compute-0 nova_compute[356901]: 2025-10-11 02:33:10.582 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/ephemeral_1_0706d66 --force-share --output=json" returned: 0 in 0.100s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:33:10 compute-0 nova_compute[356901]: 2025-10-11 02:33:10.584 2 DEBUG oslo_concurrency.lockutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "ephemeral_1_0706d66" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:33:10 compute-0 nova_compute[356901]: 2025-10-11 02:33:10.584 2 DEBUG oslo_concurrency.lockutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "ephemeral_1_0706d66" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:33:10 compute-0 nova_compute[356901]: 2025-10-11 02:33:10.585 2 DEBUG oslo_concurrency.lockutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "ephemeral_1_0706d66" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:33:10 compute-0 nova_compute[356901]: 2025-10-11 02:33:10.620 2 DEBUG nova.storage.rbd_utils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image eb750444-4572-4c74-a72a-4955daed4f7b_disk.eph0 does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:33:10 compute-0 nova_compute[356901]: 2025-10-11 02:33:10.628 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/ephemeral_1_0706d66 eb750444-4572-4c74-a72a-4955daed4f7b_disk.eph0 --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:33:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e130 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:33:10 compute-0 sshd-session[442653]: Connection closed by invalid user admin 121.227.153.123 port 43356 [preauth]
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.137 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/ephemeral_1_0706d66 eb750444-4572-4c74-a72a-4955daed4f7b_disk.eph0 --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.509s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.339 2 DEBUG nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Created local disks _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4857
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.340 2 DEBUG nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Ensure instance console log exists: /var/lib/nova/instances/eb750444-4572-4c74-a72a-4955daed4f7b/console.log _ensure_console_log_for_instance /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4609
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.340 2 DEBUG oslo_concurrency.lockutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "vgpu_resources" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.341 2 DEBUG oslo_concurrency.lockutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "vgpu_resources" acquired by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.341 2 DEBUG oslo_concurrency.lockutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "vgpu_resources" "released" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.343 2 DEBUG nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Start _get_guest_xml network_info=[] disk_info={'disk_bus': 'virtio', 'cdrom_bus': 'sata', 'mapping': {'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'disk.config': {'bus': 'sata', 'dev': 'sda', 'type': 'cdrom'}}} image_meta=ImageMeta(checksum='b874c39491a2377b8490f5f1e89761a4',container_format='bare',created_at=2025-10-11T02:32:52Z,direct_url=<?>,disk_format='qcow2',id=cd2dfc66-f23e-4259-896a-4ce37807bb33,min_disk=0,min_ram=0,name='fvt_testing_image',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=16300544,status='active',tags=<?>,updated_at=2025-10-11T02:32:58Z,virtual_size=<?>,visibility=<?>) rescue=None block_device_info={'root_device_name': '/dev/vda', 'image': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'boot_index': 0, 'device_name': '/dev/vda', 'size': 0, 'encryption_format': None, 'image_id': 'cd2dfc66-f23e-4259-896a-4ce37807bb33'}], 'ephemerals': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'device_name': '/dev/vdb', 'size': 1, 'encryption_format': None}], 'block_device_mapping': [], 'swap': None} _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7549
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.353 2 WARNING nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.360 2 DEBUG nova.virt.libvirt.host [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V1... _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1653
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.361 2 DEBUG nova.virt.libvirt.host [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CPU controller missing on host. _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1663
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.373 2 DEBUG nova.virt.libvirt.host [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V2... _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1672
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.374 2 DEBUG nova.virt.libvirt.host [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CPU controller found on host. _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1679
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.375 2 DEBUG nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CPU mode 'host-model' models '' was chosen, with extra flags: '' _get_guest_cpu_model_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:5396
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.376 2 DEBUG nova.virt.hardware [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Getting desirable topologies for flavor Flavor(created_at=2025-10-11T02:33:01Z,deleted=False,deleted_at=None,description=None,disabled=False,ephemeral_gb=1,extra_specs={},flavorid='78c20b95-5cf7-42e2-9353-630728b9091d',id=2,is_public=True,memory_mb=512,name='fvt_testing_flavor',projects=<?>,root_gb=1,rxtx_factor=1.0,swap=0,updated_at=None,vcpu_weight=0,vcpus=1) and image_meta ImageMeta(checksum='b874c39491a2377b8490f5f1e89761a4',container_format='bare',created_at=2025-10-11T02:32:52Z,direct_url=<?>,disk_format='qcow2',id=cd2dfc66-f23e-4259-896a-4ce37807bb33,min_disk=0,min_ram=0,name='fvt_testing_image',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=16300544,status='active',tags=<?>,updated_at=2025-10-11T02:32:58Z,virtual_size=<?>,visibility=<?>), allow threads: True _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:563
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.377 2 DEBUG nova.virt.hardware [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Flavor limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:348
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.377 2 DEBUG nova.virt.hardware [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Image limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:352
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.377 2 DEBUG nova.virt.hardware [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Flavor pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:388
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.378 2 DEBUG nova.virt.hardware [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Image pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:392
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.378 2 DEBUG nova.virt.hardware [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Chose sockets=0, cores=0, threads=0; limits were sockets=65536, cores=65536, threads=65536 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:430
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.379 2 DEBUG nova.virt.hardware [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Topology preferred VirtCPUTopology(cores=0,sockets=0,threads=0), maximum VirtCPUTopology(cores=65536,sockets=65536,threads=65536) _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:569
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.380 2 DEBUG nova.virt.hardware [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Build topologies for 1 vcpu(s) 1:1:1 _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:471
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.381 2 DEBUG nova.virt.hardware [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Got 1 possible topologies _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:501
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.381 2 DEBUG nova.virt.hardware [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Possible topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:575
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.381 2 DEBUG nova.virt.hardware [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Sorted desired topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:577
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.385 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:33:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:33:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3000.1 total, 600.0 interval
                                            Cumulative writes: 7474 writes, 29K keys, 7474 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.01 MB/s
                                            Cumulative WAL: 7474 writes, 1672 syncs, 4.47 writes per sync, written: 0.02 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 991 writes, 3371 keys, 991 commit groups, 1.0 writes per commit group, ingest: 2.86 MB, 0.00 MB/s
                                            Interval WAL: 991 writes, 421 syncs, 2.35 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:33:11 compute-0 ceph-mon[191930]: pgmap v1657: 321 pgs: 321 active+clean; 93 MiB data, 271 MiB used, 60 GiB / 60 GiB avail; 7.1 KiB/s rd, 1.2 KiB/s wr, 10 op/s
Oct 11 02:33:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:33:11 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2791860227' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.907 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.522s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:33:11 compute-0 nova_compute[356901]: 2025-10-11 02:33:11.910 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:33:12 compute-0 sshd-session[442980]: Invalid user admin from 121.227.153.123 port 49082
Oct 11 02:33:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:33:12 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1637862669' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:33:12 compute-0 nova_compute[356901]: 2025-10-11 02:33:12.448 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.538s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:33:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1658: 321 pgs: 321 active+clean; 103 MiB data, 274 MiB used, 60 GiB / 60 GiB avail; 693 KiB/s rd, 338 KiB/s wr, 13 op/s
Oct 11 02:33:12 compute-0 nova_compute[356901]: 2025-10-11 02:33:12.501 2 DEBUG nova.storage.rbd_utils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image eb750444-4572-4c74-a72a-4955daed4f7b_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:33:12 compute-0 nova_compute[356901]: 2025-10-11 02:33:12.513 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:33:12 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2791860227' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:33:12 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1637862669' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:33:12 compute-0 sshd-session[442980]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:33:12 compute-0 sshd-session[442980]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:33:12 compute-0 nova_compute[356901]: 2025-10-11 02:33:12.884 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:33:13 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1074111425' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:33:13 compute-0 nova_compute[356901]: 2025-10-11 02:33:13.039 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.526s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:33:13 compute-0 nova_compute[356901]: 2025-10-11 02:33:13.041 2 DEBUG nova.objects.instance [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lazy-loading 'pci_devices' on Instance uuid eb750444-4572-4c74-a72a-4955daed4f7b obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:33:13 compute-0 nova_compute[356901]: 2025-10-11 02:33:13.055 2 DEBUG nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] End _get_guest_xml xml=<domain type="kvm">
Oct 11 02:33:13 compute-0 nova_compute[356901]:   <uuid>eb750444-4572-4c74-a72a-4955daed4f7b</uuid>
Oct 11 02:33:13 compute-0 nova_compute[356901]:   <name>instance-00000005</name>
Oct 11 02:33:13 compute-0 nova_compute[356901]:   <memory>524288</memory>
Oct 11 02:33:13 compute-0 nova_compute[356901]:   <vcpu>1</vcpu>
Oct 11 02:33:13 compute-0 nova_compute[356901]:   <metadata>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.1">
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <nova:package version="27.5.2-0.20250829104910.6f8decf.el9"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <nova:name>fvt_testing_server</nova:name>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <nova:creationTime>2025-10-11 02:33:11</nova:creationTime>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <nova:flavor name="fvt_testing_flavor">
Oct 11 02:33:13 compute-0 nova_compute[356901]:         <nova:memory>512</nova:memory>
Oct 11 02:33:13 compute-0 nova_compute[356901]:         <nova:disk>1</nova:disk>
Oct 11 02:33:13 compute-0 nova_compute[356901]:         <nova:swap>0</nova:swap>
Oct 11 02:33:13 compute-0 nova_compute[356901]:         <nova:ephemeral>1</nova:ephemeral>
Oct 11 02:33:13 compute-0 nova_compute[356901]:         <nova:vcpus>1</nova:vcpus>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       </nova:flavor>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <nova:owner>
Oct 11 02:33:13 compute-0 nova_compute[356901]:         <nova:user uuid="d215f3ebbc07435493ccd666fc80109d">admin</nova:user>
Oct 11 02:33:13 compute-0 nova_compute[356901]:         <nova:project uuid="97026531b3404a11869cb85a059c4a0d">admin</nova:project>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       </nova:owner>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <nova:root type="image" uuid="cd2dfc66-f23e-4259-896a-4ce37807bb33"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <nova:ports/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     </nova:instance>
Oct 11 02:33:13 compute-0 nova_compute[356901]:   </metadata>
Oct 11 02:33:13 compute-0 nova_compute[356901]:   <sysinfo type="smbios">
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <system>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <entry name="manufacturer">RDO</entry>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <entry name="product">OpenStack Compute</entry>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <entry name="version">27.5.2-0.20250829104910.6f8decf.el9</entry>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <entry name="serial">eb750444-4572-4c74-a72a-4955daed4f7b</entry>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <entry name="uuid">eb750444-4572-4c74-a72a-4955daed4f7b</entry>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <entry name="family">Virtual Machine</entry>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     </system>
Oct 11 02:33:13 compute-0 nova_compute[356901]:   </sysinfo>
Oct 11 02:33:13 compute-0 nova_compute[356901]:   <os>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <type arch="x86_64" machine="q35">hvm</type>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <boot dev="hd"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <smbios mode="sysinfo"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:   </os>
Oct 11 02:33:13 compute-0 nova_compute[356901]:   <features>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <acpi/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <apic/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <vmcoreinfo/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:   </features>
Oct 11 02:33:13 compute-0 nova_compute[356901]:   <clock offset="utc">
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <timer name="pit" tickpolicy="delay"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <timer name="rtc" tickpolicy="catchup"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <timer name="hpet" present="no"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:   </clock>
Oct 11 02:33:13 compute-0 nova_compute[356901]:   <cpu mode="host-model" match="exact">
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <topology sockets="1" cores="1" threads="1"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:33:13 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/eb750444-4572-4c74-a72a-4955daed4f7b_disk">
Oct 11 02:33:13 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       </source>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:33:13 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <target dev="vda" bus="virtio"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/eb750444-4572-4c74-a72a-4955daed4f7b_disk.eph0">
Oct 11 02:33:13 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       </source>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:33:13 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <target dev="vdb" bus="virtio"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <disk type="network" device="cdrom">
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/eb750444-4572-4c74-a72a-4955daed4f7b_disk.config">
Oct 11 02:33:13 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       </source>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:33:13 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <target dev="sda" bus="sata"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <serial type="pty">
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <log file="/var/lib/nova/instances/eb750444-4572-4c74-a72a-4955daed4f7b/console.log" append="off"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     </serial>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <graphics type="vnc" autoport="yes" listen="::0"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <video>
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     </video>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <input type="tablet" bus="usb"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <rng model="virtio">
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <backend model="random">/dev/urandom</backend>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <controller type="usb" index="0"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     <memballoon model="virtio">
Oct 11 02:33:13 compute-0 nova_compute[356901]:       <stats period="10"/>
Oct 11 02:33:13 compute-0 nova_compute[356901]:     </memballoon>
Oct 11 02:33:13 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:33:13 compute-0 nova_compute[356901]: </domain>
Oct 11 02:33:13 compute-0 nova_compute[356901]:  _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7555
Oct 11 02:33:13 compute-0 nova_compute[356901]: 2025-10-11 02:33:13.122 2 DEBUG nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] No BDM found with device name vda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:33:13 compute-0 nova_compute[356901]: 2025-10-11 02:33:13.123 2 DEBUG nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] No BDM found with device name vdb, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:33:13 compute-0 nova_compute[356901]: 2025-10-11 02:33:13.124 2 DEBUG nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] No BDM found with device name sda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:33:13 compute-0 nova_compute[356901]: 2025-10-11 02:33:13.125 2 INFO nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Using config drive
Oct 11 02:33:13 compute-0 nova_compute[356901]: 2025-10-11 02:33:13.171 2 DEBUG nova.storage.rbd_utils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image eb750444-4572-4c74-a72a-4955daed4f7b_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:33:13 compute-0 nova_compute[356901]: 2025-10-11 02:33:13.428 2 INFO nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Creating config drive at /var/lib/nova/instances/eb750444-4572-4c74-a72a-4955daed4f7b/disk.config
Oct 11 02:33:13 compute-0 nova_compute[356901]: 2025-10-11 02:33:13.435 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): /usr/bin/mkisofs -o /var/lib/nova/instances/eb750444-4572-4c74-a72a-4955daed4f7b/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmpgj2dsul0 execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:33:13 compute-0 nova_compute[356901]: 2025-10-11 02:33:13.570 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "/usr/bin/mkisofs -o /var/lib/nova/instances/eb750444-4572-4c74-a72a-4955daed4f7b/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmpgj2dsul0" returned: 0 in 0.135s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:33:13 compute-0 ceph-mon[191930]: pgmap v1658: 321 pgs: 321 active+clean; 103 MiB data, 274 MiB used, 60 GiB / 60 GiB avail; 693 KiB/s rd, 338 KiB/s wr, 13 op/s
Oct 11 02:33:13 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1074111425' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:33:13 compute-0 nova_compute[356901]: 2025-10-11 02:33:13.634 2 DEBUG nova.storage.rbd_utils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] rbd image eb750444-4572-4c74-a72a-4955daed4f7b_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:33:13 compute-0 nova_compute[356901]: 2025-10-11 02:33:13.648 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/eb750444-4572-4c74-a72a-4955daed4f7b/disk.config eb750444-4572-4c74-a72a-4955daed4f7b_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.866 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.867 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.868 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.876 14 DEBUG ceilometer.compute.discovery [-] Querying metadata for instance eb750444-4572-4c74-a72a-4955daed4f7b from Nova API get_server /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:176
Oct 11 02:33:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:13.878 14 DEBUG novaclient.v2.client [-] REQ: curl -g -i -X GET https://nova-internal.openstack.svc:8774/v2.1/servers/eb750444-4572-4c74-a72a-4955daed4f7b -H "Accept: application/json" -H "User-Agent: python-novaclient" -H "X-Auth-Token: {SHA256}d674387017edb5d8543811c363b3a2965950a94ddf4462840fede0e79ac258e9" -H "X-OpenStack-Nova-API-Version: 2.1" _http_log_request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:572
Oct 11 02:33:13 compute-0 nova_compute[356901]: 2025-10-11 02:33:13.921 2 DEBUG oslo_concurrency.processutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/eb750444-4572-4c74-a72a-4955daed4f7b/disk.config eb750444-4572-4c74-a72a-4955daed4f7b_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.273s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:33:13 compute-0 nova_compute[356901]: 2025-10-11 02:33:13.923 2 INFO nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Deleting local config drive /var/lib/nova/instances/eb750444-4572-4c74-a72a-4955daed4f7b/disk.config because it was imported into RBD.
Oct 11 02:33:13 compute-0 systemd[1]: Starting libvirt secret daemon...
Oct 11 02:33:13 compute-0 systemd[1]: Started libvirt secret daemon.
Oct 11 02:33:14 compute-0 systemd-machined[137586]: New machine qemu-5-instance-00000005.
Oct 11 02:33:14 compute-0 systemd[1]: Started Virtual Machine qemu-5-instance-00000005.
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.169 14 DEBUG novaclient.v2.client [-] RESP: [200] Connection: Keep-Alive Content-Length: 1551 Content-Type: application/json Date: Sat, 11 Oct 2025 02:33:13 GMT Keep-Alive: timeout=5, max=100 OpenStack-API-Version: compute 2.1 Server: Apache Vary: OpenStack-API-Version,X-OpenStack-Nova-API-Version X-OpenStack-Nova-API-Version: 2.1 x-compute-request-id: req-8840f8db-0da3-416d-9c0b-2de8d29e4e5c x-openstack-request-id: req-8840f8db-0da3-416d-9c0b-2de8d29e4e5c _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:613
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.171 14 DEBUG novaclient.v2.client [-] RESP BODY: {"server": {"id": "eb750444-4572-4c74-a72a-4955daed4f7b", "name": "fvt_testing_server", "status": "BUILD", "tenant_id": "97026531b3404a11869cb85a059c4a0d", "user_id": "d215f3ebbc07435493ccd666fc80109d", "metadata": {}, "hostId": "2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736", "image": {"id": "cd2dfc66-f23e-4259-896a-4ce37807bb33", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/images/cd2dfc66-f23e-4259-896a-4ce37807bb33"}]}, "flavor": {"id": "78c20b95-5cf7-42e2-9353-630728b9091d", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/flavors/78c20b95-5cf7-42e2-9353-630728b9091d"}]}, "created": "2025-10-11T02:33:04Z", "updated": "2025-10-11T02:33:06Z", "addresses": {}, "accessIPv4": "", "accessIPv6": "", "links": [{"rel": "self", "href": "https://nova-internal.openstack.svc:8774/v2.1/servers/eb750444-4572-4c74-a72a-4955daed4f7b"}, {"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/servers/eb750444-4572-4c74-a72a-4955daed4f7b"}], "OS-DCF:diskConfig": "MANUAL", "progress": 0, "OS-EXT-AZ:availability_zone": "nova", "config_drive": "", "key_name": null, "OS-SRV-USG:launched_at": null, "OS-SRV-USG:terminated_at": null, "OS-EXT-SRV-ATTR:host": "compute-0.ctlplane.example.com", "OS-EXT-SRV-ATTR:instance_name": "instance-00000005", "OS-EXT-SRV-ATTR:hypervisor_hostname": "compute-0.ctlplane.example.com", "OS-EXT-STS:task_state": "spawning", "OS-EXT-STS:vm_state": "building", "OS-EXT-STS:power_state": 0, "os-extended-volumes:volumes_attached": []}} _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:648
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.171 14 DEBUG novaclient.v2.client [-] GET call to compute for https://nova-internal.openstack.svc:8774/v2.1/servers/eb750444-4572-4c74-a72a-4955daed4f7b used request id req-8840f8db-0da3-416d-9c0b-2de8d29e4e5c request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:1073
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.193 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': 'eb750444-4572-4c74-a72a-4955daed4f7b', 'name': 'fvt_testing_server', 'flavor': {'id': '78c20b95-5cf7-42e2-9353-630728b9091d', 'name': 'fvt_testing_flavor', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'cd2dfc66-f23e-4259-896a-4ce37807bb33'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000005', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'paused', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'paused', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.200 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.200 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.201 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.201 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.202 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.203 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:33:14.202120) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.215 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2856 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.217 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.217 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.217 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.217 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.218 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.218 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.218 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 23 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.219 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:33:14.218212) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.219 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.219 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.220 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.220 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.220 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.221 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:33:14.220702) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.220 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.221 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.222 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.222 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.222 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.223 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.223 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.223 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.224 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.225 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.225 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.226 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:33:14.223607) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.227 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.227 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.228 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.229 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:14.229 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:33:14.229148) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1659: 321 pgs: 321 active+clean; 124 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 1.3 MiB/s rd, 1.4 MiB/s wr, 43 op/s
Oct 11 02:33:14 compute-0 nova_compute[356901]: 2025-10-11 02:33:14.762 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:14 compute-0 podman[443179]: 2025-10-11 02:33:14.840552858 +0000 UTC m=+0.113689292 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, tcib_managed=true, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_id=edpm)
Oct 11 02:33:14 compute-0 podman[443177]: 2025-10-11 02:33:14.856899224 +0000 UTC m=+0.146502388 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:33:14 compute-0 podman[443180]: 2025-10-11 02:33:14.85780186 +0000 UTC m=+0.133281122 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_managed=true, container_name=ovn_metadata_agent, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:33:14 compute-0 podman[443178]: 2025-10-11 02:33:14.876304419 +0000 UTC m=+0.160679661 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, config_id=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 02:33:15 compute-0 sshd-session[442980]: Failed password for invalid user admin from 121.227.153.123 port 49082 ssh2
Oct 11 02:33:15 compute-0 ceph-mon[191930]: pgmap v1659: 321 pgs: 321 active+clean; 124 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 1.3 MiB/s rd, 1.4 MiB/s wr, 43 op/s
Oct 11 02:33:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e130 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:33:16 compute-0 sshd-session[442980]: Connection closed by invalid user admin 121.227.153.123 port 49082 [preauth]
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.386 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760149996.3857858, eb750444-4572-4c74-a72a-4955daed4f7b => Resumed> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.387 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] VM Resumed (Lifecycle Event)
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.393 2 DEBUG nova.compute.manager [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Instance event wait completed in 0 seconds for  wait_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:577
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.394 2 DEBUG nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Guest created on hypervisor spawn /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4417
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.400 2 INFO nova.virt.libvirt.driver [-] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Instance spawned successfully.
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.401 2 DEBUG nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Attempting to register defaults for the following image properties: ['hw_cdrom_bus', 'hw_disk_bus', 'hw_input_bus', 'hw_pointer_model', 'hw_video_model', 'hw_vif_model'] _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:917
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.413 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.415 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.416 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.428 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.447 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.448 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.448 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.449 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.450 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.450 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.450 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.451 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.449 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Synchronizing instance power state after lifecycle event "Resumed"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.451 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.452 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:33:16.451220) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.462 2 DEBUG nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Found default for hw_cdrom_bus of sata _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.463 2 DEBUG nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Found default for hw_disk_bus of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.463 2 DEBUG nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Found default for hw_input_bus of usb _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.464 2 DEBUG nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Found default for hw_pointer_model of usbtablet _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.465 2 DEBUG nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Found default for hw_video_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.465 2 DEBUG nova.virt.libvirt.driver [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Found default for hw_vif_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.473 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.473 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760149996.387288, eb750444-4572-4c74-a72a-4955daed4f7b => Started> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.474 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] VM Started (Lifecycle Event)
Oct 11 02:33:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1660: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 1.3 MiB/s rd, 1.4 MiB/s wr, 48 op/s
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.492 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.498 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Synchronizing instance power state after lifecycle event "Started"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.523 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.read.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.524 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.read.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.524 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.read.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.526 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.532 2 INFO nova.compute.manager [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Took 9.77 seconds to spawn the instance on the hypervisor.
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.533 2 DEBUG nova.compute.manager [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.585 2 INFO nova.compute.manager [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Took 11.01 seconds to build instance.
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.589 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.590 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.591 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.592 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.592 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.592 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.592 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.593 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.593 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.593 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.read.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.594 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:33:16.593332) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.594 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.read.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.595 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.read.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.596 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.596 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.597 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.598 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.598 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.599 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.599 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.599 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.600 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.600 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.read.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.601 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:33:16.600090) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.601 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.read.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 nova_compute[356901]: 2025-10-11 02:33:16.601 2 DEBUG oslo_concurrency.lockutils [None req-b395eec8-0778-4423-be09-9f0e9e196dcd d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "eb750444-4572-4c74-a72a-4955daed4f7b" "released" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: held 11.083s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.602 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.read.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.602 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.603 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.604 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.605 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.605 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.606 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.606 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.606 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.606 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.607 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.608 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:33:16.606766) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.608 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.609 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.609 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.610 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.611 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.612 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.612 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.612 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.613 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.613 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.613 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.614 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.614 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:33:16.613768) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.615 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.616 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.616 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.617 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.618 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.619 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.619 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.619 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.620 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.620 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.620 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.620 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.620 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:33:16.620416) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.621 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.621 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.622 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.622 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.623 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.623 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.624 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.624 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.624 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.624 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.624 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.625 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:33:16.624863) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.657 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.684 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.685 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.685 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.686 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.686 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.686 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.686 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.687 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.687 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.688 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.688 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.689 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.689 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.690 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.690 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.691 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.691 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.691 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.691 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.692 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 336 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.692 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.692 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.693 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.rate in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.693 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.693 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.693 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.rate heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.694 14 DEBUG ceilometer.compute.pollsters [-] LibvirtInspector does not provide data for IncomingBytesRatePollster get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:162
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.694 14 ERROR ceilometer.polling.manager [-] Prevent pollster network.incoming.bytes.rate from polling [<NovaLikeServer: fvt_testing_server>] on source pollsters anymore!: ceilometer.polling.plugin_base.PollsterPermanentError: [<NovaLikeServer: fvt_testing_server>]
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.694 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.695 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.695 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.695 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.695 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.696 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.696 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.697 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.697 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.697 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.697 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.697 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 33 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.698 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.699 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.699 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.699 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.699 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.700 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.700 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.701 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.701 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.701 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.701 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.702 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.702 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.703 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.703 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.703 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.703 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.703 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.704 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.704 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.705 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:33:16.686780) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.705 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:33:16.691793) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.705 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.705 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.rate (2025-10-11T02:33:16.693825) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.705 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.706 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:33:16.695670) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.706 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.706 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.706 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:33:16.697649) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.706 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.706 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:33:16.700131) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.706 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:33:16.702154) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.706 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.706 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:33:16.704080) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.706 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.707 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.707 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.708 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.709 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.709 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.710 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.711 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.711 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.711 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.712 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.712 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.712 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.713 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.714 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.714 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.714 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.714 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.715 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.715 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/cpu volume: 210000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.716 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 46470000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.716 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.717 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.717 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.717 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.718 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.718 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.718 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2342 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.719 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.720 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.720 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.720 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.720 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.721 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.721 14 DEBUG ceilometer.compute.pollsters [-] eb750444-4572-4c74-a72a-4955daed4f7b/memory.usage volume: Unavailable _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.721 14 WARNING ceilometer.compute.pollsters [-] memory.usage statistic in not available for instance eb750444-4572-4c74-a72a-4955daed4f7b: ceilometer.compute.pollsters.NoVolumeException
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.722 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.83984375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.722 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.723 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.723 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.rate in the context of pollsters
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.723 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.723 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.723 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.rate heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.724 14 DEBUG ceilometer.compute.pollsters [-] LibvirtInspector does not provide data for OutgoingBytesRatePollster get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:162
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.724 14 ERROR ceilometer.polling.manager [-] Prevent pollster network.outgoing.bytes.rate from polling [<NovaLikeServer: fvt_testing_server>] on source pollsters anymore!: ceilometer.polling.plugin_base.PollsterPermanentError: [<NovaLikeServer: fvt_testing_server>]
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.725 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.726 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.726 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.726 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.726 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.726 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.726 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.726 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.726 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.727 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.727 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.727 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.727 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:33:16.706589) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.727 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.727 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.727 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:33:16.712545) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.727 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.727 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.727 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:33:16.715199) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.727 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.727 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:33:16.718488) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.727 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.727 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.727 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:33:16.721179) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.728 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.728 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.rate (2025-10-11T02:33:16.723919) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.728 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.728 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.728 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.728 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.728 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:33:16.728 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:33:17 compute-0 systemd[1]: Starting libvirt proxy daemon...
Oct 11 02:33:17 compute-0 systemd[1]: Started libvirt proxy daemon.
Oct 11 02:33:17 compute-0 ceph-mon[191930]: pgmap v1660: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 1.3 MiB/s rd, 1.4 MiB/s wr, 48 op/s
Oct 11 02:33:17 compute-0 sshd-session[443318]: Invalid user pi from 121.227.153.123 port 49094
Oct 11 02:33:17 compute-0 sshd-session[443318]: pam_unix(sshd:auth): check pass; user unknown
Oct 11 02:33:17 compute-0 sshd-session[443318]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123
Oct 11 02:33:17 compute-0 nova_compute[356901]: 2025-10-11 02:33:17.891 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1661: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 1.4 MiB/s rd, 1.4 MiB/s wr, 54 op/s
Oct 11 02:33:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:33:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3000.1 total, 600.0 interval
                                            Cumulative writes: 8598 writes, 33K keys, 8598 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.01 MB/s
                                            Cumulative WAL: 8598 writes, 1979 syncs, 4.34 writes per sync, written: 0.02 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 783 writes, 2196 keys, 783 commit groups, 1.0 writes per commit group, ingest: 1.52 MB, 0.00 MB/s
                                            Interval WAL: 783 writes, 356 syncs, 2.20 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:33:19 compute-0 podman[443337]: 2025-10-11 02:33:19.252795001 +0000 UTC m=+0.130386113 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, container_name=multipathd, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 02:33:19 compute-0 podman[443338]: 2025-10-11 02:33:19.278542299 +0000 UTC m=+0.154445544 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, config_id=iscsid, container_name=iscsid, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']})
Oct 11 02:33:19 compute-0 sshd-session[443318]: Failed password for invalid user pi from 121.227.153.123 port 49094 ssh2
Oct 11 02:33:19 compute-0 ceph-mon[191930]: pgmap v1661: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 1.4 MiB/s rd, 1.4 MiB/s wr, 54 op/s
Oct 11 02:33:19 compute-0 nova_compute[356901]: 2025-10-11 02:33:19.764 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1662: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 1.4 MiB/s wr, 68 op/s
Oct 11 02:33:20 compute-0 ceph-mon[191930]: pgmap v1662: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 1.4 MiB/s wr, 68 op/s
Oct 11 02:33:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e130 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:33:21 compute-0 sshd-session[443318]: Connection closed by invalid user pi 121.227.153.123 port 49094 [preauth]
Oct 11 02:33:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1663: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 2.0 MiB/s rd, 1.4 MiB/s wr, 78 op/s
Oct 11 02:33:22 compute-0 nova_compute[356901]: 2025-10-11 02:33:22.896 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:22 compute-0 unix_chkpwd[443379]: password check failed for user (ftp)
Oct 11 02:33:22 compute-0 sshd-session[443377]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.227.153.123  user=ftp
Oct 11 02:33:23 compute-0 ceph-mon[191930]: pgmap v1663: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 2.0 MiB/s rd, 1.4 MiB/s wr, 78 op/s
Oct 11 02:33:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1664: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 2.1 MiB/s rd, 1.0 MiB/s wr, 97 op/s
Oct 11 02:33:24 compute-0 nova_compute[356901]: 2025-10-11 02:33:24.767 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:24 compute-0 sshd-session[443377]: Failed password for ftp from 121.227.153.123 port 33360 ssh2
Oct 11 02:33:25 compute-0 ceph-mon[191930]: pgmap v1664: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 2.1 MiB/s rd, 1.0 MiB/s wr, 97 op/s
Oct 11 02:33:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e130 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:33:26 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:33:26 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3000.2 total, 600.0 interval
                                            Cumulative writes: 7165 writes, 28K keys, 7165 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.01 MB/s
                                            Cumulative WAL: 7165 writes, 1536 syncs, 4.66 writes per sync, written: 0.02 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 676 writes, 2157 keys, 676 commit groups, 1.0 writes per commit group, ingest: 2.05 MB, 0.00 MB/s
                                            Interval WAL: 676 writes, 300 syncs, 2.25 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:33:26 compute-0 ceph-mgr[192233]: [devicehealth INFO root] Check health
Oct 11 02:33:26 compute-0 sshd-session[443377]: Connection closed by authenticating user ftp 121.227.153.123 port 33360 [preauth]
Oct 11 02:33:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1665: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 14 KiB/s wr, 60 op/s
Oct 11 02:33:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:33:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:33:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:33:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:33:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:33:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:33:27 compute-0 ceph-mon[191930]: pgmap v1665: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 14 KiB/s wr, 60 op/s
Oct 11 02:33:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:33:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1748008122' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:33:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:33:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1748008122' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:33:27 compute-0 nova_compute[356901]: 2025-10-11 02:33:27.899 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1666: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 12 KiB/s wr, 55 op/s
Oct 11 02:33:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1748008122' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:33:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1748008122' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:33:29 compute-0 ceph-mon[191930]: pgmap v1666: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 12 KiB/s wr, 55 op/s
Oct 11 02:33:29 compute-0 podman[157119]: time="2025-10-11T02:33:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:33:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:33:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:33:29 compute-0 nova_compute[356901]: 2025-10-11 02:33:29.770 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:33:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9050 "" "Go-http-client/1.1"
Oct 11 02:33:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1667: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 1.4 MiB/s rd, 12 KiB/s wr, 50 op/s
Oct 11 02:33:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e130 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:33:31 compute-0 podman[443382]: 2025-10-11 02:33:31.249642378 +0000 UTC m=+0.117062736 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:33:31 compute-0 podman[443381]: 2025-10-11 02:33:31.252675505 +0000 UTC m=+0.127998694 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, version=9.6, architecture=x86_64, io.buildah.version=1.33.7, io.openshift.tags=minimal rhel9, container_name=openstack_network_exporter, vcs-type=git, build-date=2025-08-20T13:12:41, config_id=edpm, io.openshift.expose-services=, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, managed_by=edpm_ansible, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, maintainer=Red Hat, Inc., distribution-scope=public, release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, com.redhat.component=ubi9-minimal-container, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://catalog.redhat.com/en/search?searchType=containers)
Oct 11 02:33:31 compute-0 podman[443380]: 2025-10-11 02:33:31.26918023 +0000 UTC m=+0.158599575 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=edpm, managed_by=edpm_ansible, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Oct 11 02:33:31 compute-0 openstack_network_exporter[374316]: ERROR   02:33:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:33:31 compute-0 openstack_network_exporter[374316]: ERROR   02:33:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:33:31 compute-0 openstack_network_exporter[374316]: ERROR   02:33:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:33:31 compute-0 openstack_network_exporter[374316]: ERROR   02:33:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:33:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:33:31 compute-0 openstack_network_exporter[374316]: ERROR   02:33:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:33:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:33:31 compute-0 ceph-mon[191930]: pgmap v1667: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 1.4 MiB/s rd, 12 KiB/s wr, 50 op/s
Oct 11 02:33:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1668: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 1.1 MiB/s rd, 35 op/s
Oct 11 02:33:32 compute-0 nova_compute[356901]: 2025-10-11 02:33:32.903 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:33 compute-0 ceph-mon[191930]: pgmap v1668: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 1.1 MiB/s rd, 35 op/s
Oct 11 02:33:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1669: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 797 KiB/s rd, 25 op/s
Oct 11 02:33:34 compute-0 ceph-mon[191930]: pgmap v1669: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 797 KiB/s rd, 25 op/s
Oct 11 02:33:34 compute-0 nova_compute[356901]: 2025-10-11 02:33:34.774 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:35 compute-0 nova_compute[356901]: 2025-10-11 02:33:35.210 2 DEBUG oslo_concurrency.lockutils [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "eb750444-4572-4c74-a72a-4955daed4f7b" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:33:35 compute-0 nova_compute[356901]: 2025-10-11 02:33:35.211 2 DEBUG oslo_concurrency.lockutils [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "eb750444-4572-4c74-a72a-4955daed4f7b" acquired by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:33:35 compute-0 nova_compute[356901]: 2025-10-11 02:33:35.212 2 DEBUG oslo_concurrency.lockutils [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "eb750444-4572-4c74-a72a-4955daed4f7b-events" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:33:35 compute-0 nova_compute[356901]: 2025-10-11 02:33:35.212 2 DEBUG oslo_concurrency.lockutils [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "eb750444-4572-4c74-a72a-4955daed4f7b-events" acquired by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:33:35 compute-0 nova_compute[356901]: 2025-10-11 02:33:35.213 2 DEBUG oslo_concurrency.lockutils [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "eb750444-4572-4c74-a72a-4955daed4f7b-events" "released" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:33:35 compute-0 nova_compute[356901]: 2025-10-11 02:33:35.215 2 INFO nova.compute.manager [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Terminating instance
Oct 11 02:33:35 compute-0 nova_compute[356901]: 2025-10-11 02:33:35.216 2 DEBUG oslo_concurrency.lockutils [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "refresh_cache-eb750444-4572-4c74-a72a-4955daed4f7b" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:33:35 compute-0 nova_compute[356901]: 2025-10-11 02:33:35.217 2 DEBUG oslo_concurrency.lockutils [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquired lock "refresh_cache-eb750444-4572-4c74-a72a-4955daed4f7b" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:33:35 compute-0 nova_compute[356901]: 2025-10-11 02:33:35.217 2 DEBUG nova.network.neutron [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Building network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2010
Oct 11 02:33:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e130 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:33:36 compute-0 nova_compute[356901]: 2025-10-11 02:33:36.071 2 DEBUG nova.network.neutron [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Instance cache missing network info. _get_preexisting_port_ids /usr/lib/python3.9/site-packages/nova/network/neutron.py:3323
Oct 11 02:33:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1670: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:33:37 compute-0 nova_compute[356901]: 2025-10-11 02:33:37.118 2 DEBUG nova.network.neutron [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Updating instance_info_cache with network_info: [] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:33:37 compute-0 nova_compute[356901]: 2025-10-11 02:33:37.141 2 DEBUG oslo_concurrency.lockutils [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Releasing lock "refresh_cache-eb750444-4572-4c74-a72a-4955daed4f7b" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:33:37 compute-0 nova_compute[356901]: 2025-10-11 02:33:37.143 2 DEBUG nova.compute.manager [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Start destroying the instance on the hypervisor. _shutdown_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:3120
Oct 11 02:33:37 compute-0 systemd[1]: machine-qemu\x2d5\x2dinstance\x2d00000005.scope: Deactivated successfully.
Oct 11 02:33:37 compute-0 systemd[1]: machine-qemu\x2d5\x2dinstance\x2d00000005.scope: Consumed 23.513s CPU time.
Oct 11 02:33:37 compute-0 systemd-machined[137586]: Machine qemu-5-instance-00000005 terminated.
Oct 11 02:33:37 compute-0 nova_compute[356901]: 2025-10-11 02:33:37.383 2 INFO nova.virt.libvirt.driver [-] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Instance destroyed successfully.
Oct 11 02:33:37 compute-0 nova_compute[356901]: 2025-10-11 02:33:37.383 2 DEBUG nova.objects.instance [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lazy-loading 'resources' on Instance uuid eb750444-4572-4c74-a72a-4955daed4f7b obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:33:37 compute-0 ceph-mon[191930]: pgmap v1670: 321 pgs: 321 active+clean; 126 MiB data, 287 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:33:37 compute-0 nova_compute[356901]: 2025-10-11 02:33:37.905 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:38 compute-0 podman[443463]: 2025-10-11 02:33:38.245179709 +0000 UTC m=+0.123151611 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, release=1214.1726694543, io.openshift.tags=base rhel9, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release-0.7.12=, name=ubi9, com.redhat.component=ubi9-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, maintainer=Red Hat, Inc., version=9.4, build-date=2024-09-18T21:23:30, distribution-scope=public, architecture=x86_64, io.buildah.version=1.29.0, managed_by=edpm_ansible, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.expose-services=, summary=Provides the latest release of Red Hat Universal Base Image 9., container_name=kepler, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, vendor=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']})
Oct 11 02:33:38 compute-0 nova_compute[356901]: 2025-10-11 02:33:38.440 2 INFO nova.virt.libvirt.driver [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Deleting instance files /var/lib/nova/instances/eb750444-4572-4c74-a72a-4955daed4f7b_del
Oct 11 02:33:38 compute-0 nova_compute[356901]: 2025-10-11 02:33:38.441 2 INFO nova.virt.libvirt.driver [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Deletion of /var/lib/nova/instances/eb750444-4572-4c74-a72a-4955daed4f7b_del complete
Oct 11 02:33:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1671: 321 pgs: 321 active+clean; 117 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 682 B/s rd, 0 B/s wr, 0 op/s
Oct 11 02:33:38 compute-0 nova_compute[356901]: 2025-10-11 02:33:38.501 2 INFO nova.compute.manager [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Took 1.36 seconds to destroy the instance on the hypervisor.
Oct 11 02:33:38 compute-0 nova_compute[356901]: 2025-10-11 02:33:38.502 2 DEBUG oslo.service.loopingcall [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Waiting for function nova.compute.manager.ComputeManager._try_deallocate_network.<locals>._deallocate_network_with_retries to return. func /usr/lib/python3.9/site-packages/oslo_service/loopingcall.py:435
Oct 11 02:33:38 compute-0 nova_compute[356901]: 2025-10-11 02:33:38.502 2 DEBUG nova.compute.manager [-] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Deallocating network for instance _deallocate_network /usr/lib/python3.9/site-packages/nova/compute/manager.py:2259
Oct 11 02:33:38 compute-0 nova_compute[356901]: 2025-10-11 02:33:38.502 2 DEBUG nova.network.neutron [-] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] deallocate_for_instance() deallocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1803
Oct 11 02:33:38 compute-0 nova_compute[356901]: 2025-10-11 02:33:38.656 2 DEBUG nova.network.neutron [-] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Instance cache missing network info. _get_preexisting_port_ids /usr/lib/python3.9/site-packages/nova/network/neutron.py:3323
Oct 11 02:33:38 compute-0 nova_compute[356901]: 2025-10-11 02:33:38.671 2 DEBUG nova.network.neutron [-] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Updating instance_info_cache with network_info: [] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:33:38 compute-0 nova_compute[356901]: 2025-10-11 02:33:38.690 2 INFO nova.compute.manager [-] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Took 0.19 seconds to deallocate network for instance.
Oct 11 02:33:38 compute-0 nova_compute[356901]: 2025-10-11 02:33:38.776 2 DEBUG oslo_concurrency.lockutils [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.update_usage" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:33:38 compute-0 nova_compute[356901]: 2025-10-11 02:33:38.778 2 DEBUG oslo_concurrency.lockutils [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: waited 0.003s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:33:38 compute-0 nova_compute[356901]: 2025-10-11 02:33:38.867 2 DEBUG oslo_concurrency.processutils [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:33:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:33:39 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/7606463' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:33:39 compute-0 nova_compute[356901]: 2025-10-11 02:33:39.481 2 DEBUG oslo_concurrency.processutils [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.614s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:33:39 compute-0 nova_compute[356901]: 2025-10-11 02:33:39.497 2 DEBUG nova.compute.provider_tree [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:33:39 compute-0 nova_compute[356901]: 2025-10-11 02:33:39.526 2 DEBUG nova.scheduler.client.report [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:33:39 compute-0 nova_compute[356901]: 2025-10-11 02:33:39.561 2 DEBUG oslo_concurrency.lockutils [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: held 0.782s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:33:39 compute-0 ceph-mon[191930]: pgmap v1671: 321 pgs: 321 active+clean; 117 MiB data, 287 MiB used, 60 GiB / 60 GiB avail; 682 B/s rd, 0 B/s wr, 0 op/s
Oct 11 02:33:39 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/7606463' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:33:39 compute-0 nova_compute[356901]: 2025-10-11 02:33:39.597 2 INFO nova.scheduler.client.report [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Deleted allocations for instance eb750444-4572-4c74-a72a-4955daed4f7b
Oct 11 02:33:39 compute-0 nova_compute[356901]: 2025-10-11 02:33:39.683 2 DEBUG oslo_concurrency.lockutils [None req-de12bb2d-1b6c-4ff3-a604-3d43190f5a89 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Lock "eb750444-4572-4c74-a72a-4955daed4f7b" "released" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: held 4.471s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:33:39 compute-0 nova_compute[356901]: 2025-10-11 02:33:39.778 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1672: 321 pgs: 321 active+clean; 105 MiB data, 279 MiB used, 60 GiB / 60 GiB avail; 12 KiB/s rd, 682 B/s wr, 16 op/s
Oct 11 02:33:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e130 do_prune osdmap full prune enabled
Oct 11 02:33:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e131 e131: 3 total, 3 up, 3 in
Oct 11 02:33:40 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e131: 3 total, 3 up, 3 in
Oct 11 02:33:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e131 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:33:41 compute-0 ceph-mon[191930]: pgmap v1672: 321 pgs: 321 active+clean; 105 MiB data, 279 MiB used, 60 GiB / 60 GiB avail; 12 KiB/s rd, 682 B/s wr, 16 op/s
Oct 11 02:33:41 compute-0 ceph-mon[191930]: osdmap e131: 3 total, 3 up, 3 in
Oct 11 02:33:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1674: 321 pgs: 321 active+clean; 93 MiB data, 275 MiB used, 60 GiB / 60 GiB avail; 35 KiB/s rd, 2.3 KiB/s wr, 47 op/s
Oct 11 02:33:42 compute-0 nova_compute[356901]: 2025-10-11 02:33:42.909 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:43 compute-0 ceph-mon[191930]: pgmap v1674: 321 pgs: 321 active+clean; 93 MiB data, 275 MiB used, 60 GiB / 60 GiB avail; 35 KiB/s rd, 2.3 KiB/s wr, 47 op/s
Oct 11 02:33:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1675: 321 pgs: 321 active+clean; 86 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 49 KiB/s rd, 3.5 KiB/s wr, 67 op/s
Oct 11 02:33:44 compute-0 nova_compute[356901]: 2025-10-11 02:33:44.782 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:45 compute-0 podman[443507]: 2025-10-11 02:33:45.249475327 +0000 UTC m=+0.113672342 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_compute, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0)
Oct 11 02:33:45 compute-0 podman[443505]: 2025-10-11 02:33:45.272990828 +0000 UTC m=+0.152138682 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:33:45 compute-0 podman[443508]: 2025-10-11 02:33:45.277885624 +0000 UTC m=+0.150779720 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, container_name=ovn_metadata_agent, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.3)
Oct 11 02:33:45 compute-0 podman[443506]: 2025-10-11 02:33:45.299699077 +0000 UTC m=+0.173516182 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']})
Oct 11 02:33:45 compute-0 ceph-mon[191930]: pgmap v1675: 321 pgs: 321 active+clean; 86 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 49 KiB/s rd, 3.5 KiB/s wr, 67 op/s
Oct 11 02:33:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e131 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:33:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e131 do_prune osdmap full prune enabled
Oct 11 02:33:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e132 e132: 3 total, 3 up, 3 in
Oct 11 02:33:45 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e132: 3 total, 3 up, 3 in
Oct 11 02:33:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1677: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 64 KiB/s rd, 4.4 KiB/s wr, 87 op/s
Oct 11 02:33:46 compute-0 ceph-mon[191930]: osdmap e132: 3 total, 3 up, 3 in
Oct 11 02:33:46 compute-0 ceph-mon[191930]: pgmap v1677: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 64 KiB/s rd, 4.4 KiB/s wr, 87 op/s
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #78. Immutable memtables: 0.
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:33:46.806407) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 43] Flushing memtable with next log file: 78
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150026806448, "job": 43, "event": "flush_started", "num_memtables": 1, "num_entries": 854, "num_deletes": 251, "total_data_size": 1125832, "memory_usage": 1154304, "flush_reason": "Manual Compaction"}
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 43] Level-0 flush table #79: started
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150026817803, "cf_name": "default", "job": 43, "event": "table_file_creation", "file_number": 79, "file_size": 1114906, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 33899, "largest_seqno": 34752, "table_properties": {"data_size": 1110521, "index_size": 2037, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1285, "raw_key_size": 9709, "raw_average_key_size": 19, "raw_value_size": 1101734, "raw_average_value_size": 2243, "num_data_blocks": 90, "num_entries": 491, "num_filter_entries": 491, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760149956, "oldest_key_time": 1760149956, "file_creation_time": 1760150026, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 79, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 43] Flush lasted 11527 microseconds, and 5711 cpu microseconds.
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:33:46.817928) [db/flush_job.cc:967] [default] [JOB 43] Level-0 flush table #79: 1114906 bytes OK
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:33:46.817965) [db/memtable_list.cc:519] [default] Level-0 commit table #79 started
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:33:46.822024) [db/memtable_list.cc:722] [default] Level-0 commit table #79: memtable #1 done
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:33:46.822054) EVENT_LOG_v1 {"time_micros": 1760150026822045, "job": 43, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:33:46.822081) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 43] Try to delete WAL files size 1121607, prev total WAL file size 1121607, number of live WAL files 2.
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000075.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:33:46.823715) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F730033303132' seq:72057594037927935, type:22 .. '7061786F730033323634' seq:0, type:0; will stop at (end)
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 44] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 43 Base level 0, inputs: [79(1088KB)], [77(7558KB)]
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150026823774, "job": 44, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [79], "files_L6": [77], "score": -1, "input_data_size": 8855198, "oldest_snapshot_seqno": -1}
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 44] Generated table #80: 5250 keys, 7121032 bytes, temperature: kUnknown
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150026865679, "cf_name": "default", "job": 44, "event": "table_file_creation", "file_number": 80, "file_size": 7121032, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 7087965, "index_size": 18813, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 13189, "raw_key_size": 133718, "raw_average_key_size": 25, "raw_value_size": 6994961, "raw_average_value_size": 1332, "num_data_blocks": 770, "num_entries": 5250, "num_filter_entries": 5250, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760150026, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 80, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:33:46.866563) [db/compaction/compaction_job.cc:1663] [default] [JOB 44] Compacted 1@0 + 1@6 files to L6 => 7121032 bytes
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:33:46.869608) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 208.2 rd, 167.5 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(1.1, 7.4 +0.0 blob) out(6.8 +0.0 blob), read-write-amplify(14.3) write-amplify(6.4) OK, records in: 5767, records dropped: 517 output_compression: NoCompression
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:33:46.869639) EVENT_LOG_v1 {"time_micros": 1760150026869624, "job": 44, "event": "compaction_finished", "compaction_time_micros": 42522, "compaction_time_cpu_micros": 19589, "output_level": 6, "num_output_files": 1, "total_output_size": 7121032, "num_input_records": 5767, "num_output_records": 5250, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000079.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150026872096, "job": 44, "event": "table_file_deletion", "file_number": 79}
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000077.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150026875947, "job": 44, "event": "table_file_deletion", "file_number": 77}
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:33:46.823536) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:33:46.876813) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:33:46.876819) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:33:46.876823) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:33:46.876825) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:33:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:33:46.876828) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:33:47 compute-0 nova_compute[356901]: 2025-10-11 02:33:47.912 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:48 compute-0 sshd-session[441418]: Received disconnect from 38.102.83.70 port 56298:11: disconnected by user
Oct 11 02:33:48 compute-0 sshd-session[441418]: Disconnected from user zuul 38.102.83.70 port 56298
Oct 11 02:33:48 compute-0 sshd-session[441415]: pam_unix(sshd:session): session closed for user zuul
Oct 11 02:33:48 compute-0 systemd[1]: session-63.scope: Deactivated successfully.
Oct 11 02:33:48 compute-0 systemd[1]: session-63.scope: Consumed 1.367s CPU time.
Oct 11 02:33:48 compute-0 systemd-logind[804]: Session 63 logged out. Waiting for processes to exit.
Oct 11 02:33:48 compute-0 systemd-logind[804]: Removed session 63.
Oct 11 02:33:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1678: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 47 KiB/s rd, 3.4 KiB/s wr, 63 op/s
Oct 11 02:33:49 compute-0 ceph-mon[191930]: pgmap v1678: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 47 KiB/s rd, 3.4 KiB/s wr, 63 op/s
Oct 11 02:33:49 compute-0 nova_compute[356901]: 2025-10-11 02:33:49.784 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:50 compute-0 podman[443589]: 2025-10-11 02:33:50.230775205 +0000 UTC m=+0.111153563 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, tcib_managed=true, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0)
Oct 11 02:33:50 compute-0 podman[443588]: 2025-10-11 02:33:50.240097863 +0000 UTC m=+0.126201990 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_managed=true, config_id=multipathd, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3)
Oct 11 02:33:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1679: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 38 KiB/s rd, 2.7 KiB/s wr, 51 op/s
Oct 11 02:33:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e132 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:33:51 compute-0 ceph-mon[191930]: pgmap v1679: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 38 KiB/s rd, 2.7 KiB/s wr, 51 op/s
Oct 11 02:33:52 compute-0 nova_compute[356901]: 2025-10-11 02:33:52.379 2 DEBUG nova.virt.driver [-] Emitting event <LifecycleEvent: 1760150017.3778117, eb750444-4572-4c74-a72a-4955daed4f7b => Stopped> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:33:52 compute-0 nova_compute[356901]: 2025-10-11 02:33:52.380 2 INFO nova.compute.manager [-] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] VM Stopped (Lifecycle Event)
Oct 11 02:33:52 compute-0 nova_compute[356901]: 2025-10-11 02:33:52.406 2 DEBUG nova.compute.manager [None req-7973efda-6cfc-42d9-8f41-9d207cc8822b - - - - - -] [instance: eb750444-4572-4c74-a72a-4955daed4f7b] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:33:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1680: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 16 KiB/s rd, 1.2 KiB/s wr, 23 op/s
Oct 11 02:33:52 compute-0 nova_compute[356901]: 2025-10-11 02:33:52.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:33:52 compute-0 nova_compute[356901]: 2025-10-11 02:33:52.915 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:53 compute-0 ceph-mon[191930]: pgmap v1680: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 16 KiB/s rd, 1.2 KiB/s wr, 23 op/s
Oct 11 02:33:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1681: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 6.8 KiB/s rd, 511 B/s wr, 9 op/s
Oct 11 02:33:54 compute-0 nova_compute[356901]: 2025-10-11 02:33:54.787 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:33:54.861 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:33:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:33:54.862 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:33:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:33:54.863 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:33:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e132 do_prune osdmap full prune enabled
Oct 11 02:33:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e133 e133: 3 total, 3 up, 3 in
Oct 11 02:33:55 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e133: 3 total, 3 up, 3 in
Oct 11 02:33:55 compute-0 ceph-mon[191930]: pgmap v1681: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 6.8 KiB/s rd, 511 B/s wr, 9 op/s
Oct 11 02:33:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e133 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:33:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1683: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 4.7 KiB/s rd, 511 B/s wr, 6 op/s
Oct 11 02:33:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:33:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:33:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:33:56
Oct 11 02:33:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:33:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:33:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['cephfs.cephfs.data', '.rgw.root', 'vms', 'default.rgw.meta', 'images', 'backups', 'default.rgw.control', 'cephfs.cephfs.meta', '.mgr', 'default.rgw.log', 'volumes']
Oct 11 02:33:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:33:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:33:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:33:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:33:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:33:56 compute-0 ceph-mon[191930]: osdmap e133: 3 total, 3 up, 3 in
Oct 11 02:33:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:33:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:33:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:33:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:33:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:33:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:33:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:33:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:33:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:33:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:33:57 compute-0 ceph-mon[191930]: pgmap v1683: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 4.7 KiB/s rd, 511 B/s wr, 6 op/s
Oct 11 02:33:57 compute-0 nova_compute[356901]: 2025-10-11 02:33:57.917 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1684: 321 pgs: 321 active+clean; 86 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 6.8 KiB/s rd, 820 KiB/s wr, 10 op/s
Oct 11 02:33:58 compute-0 ceph-mon[191930]: pgmap v1684: 321 pgs: 321 active+clean; 86 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 6.8 KiB/s rd, 820 KiB/s wr, 10 op/s
Oct 11 02:33:59 compute-0 podman[157119]: time="2025-10-11T02:33:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:33:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:33:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:33:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:33:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9072 "" "Go-http-client/1.1"
Oct 11 02:33:59 compute-0 nova_compute[356901]: 2025-10-11 02:33:59.791 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:33:59 compute-0 nova_compute[356901]: 2025-10-11 02:33:59.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:33:59 compute-0 nova_compute[356901]: 2025-10-11 02:33:59.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:34:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1685: 321 pgs: 321 active+clean; 93 MiB data, 275 MiB used, 60 GiB / 60 GiB avail; 13 KiB/s rd, 1.6 MiB/s wr, 18 op/s
Oct 11 02:34:00 compute-0 sudo[443625]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:34:00 compute-0 sudo[443625]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:34:00 compute-0 sudo[443625]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e133 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:34:00 compute-0 sudo[443650]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:34:00 compute-0 sudo[443650]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:34:00 compute-0 sudo[443650]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:00 compute-0 nova_compute[356901]: 2025-10-11 02:34:00.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:34:00 compute-0 sudo[443675]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:34:00 compute-0 sudo[443675]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:34:01 compute-0 sudo[443675]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:01 compute-0 sudo[443700]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:34:01 compute-0 sudo[443700]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:34:01 compute-0 openstack_network_exporter[374316]: ERROR   02:34:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:34:01 compute-0 openstack_network_exporter[374316]: ERROR   02:34:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:34:01 compute-0 openstack_network_exporter[374316]: ERROR   02:34:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:34:01 compute-0 openstack_network_exporter[374316]: ERROR   02:34:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:34:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:34:01 compute-0 openstack_network_exporter[374316]: ERROR   02:34:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:34:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:34:01 compute-0 ceph-mon[191930]: pgmap v1685: 321 pgs: 321 active+clean; 93 MiB data, 275 MiB used, 60 GiB / 60 GiB avail; 13 KiB/s rd, 1.6 MiB/s wr, 18 op/s
Oct 11 02:34:01 compute-0 sudo[443700]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:01 compute-0 nova_compute[356901]: 2025-10-11 02:34:01.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:34:01 compute-0 nova_compute[356901]: 2025-10-11 02:34:01.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:34:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:34:01 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:34:01 compute-0 nova_compute[356901]: 2025-10-11 02:34:01.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:34:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:34:01 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:34:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:34:01 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:34:01 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 76020ab7-2e51-4d5b-b2f7-a12ccde9e68d does not exist
Oct 11 02:34:01 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 217a0b6f-9f8f-4083-ae7d-45252ed377ae does not exist
Oct 11 02:34:01 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 764accdf-2b12-44b2-af56-e4afbc119c40 does not exist
Oct 11 02:34:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:34:01 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:34:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:34:01 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:34:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:34:01 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:34:02 compute-0 sudo[443755]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:34:02 compute-0 sudo[443755]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:34:02 compute-0 sudo[443755]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:02 compute-0 sudo[443793]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:34:02 compute-0 sudo[443793]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:34:02 compute-0 sudo[443793]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:02 compute-0 podman[443779]: 2025-10-11 02:34:02.207153799 +0000 UTC m=+0.132790903 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']})
Oct 11 02:34:02 compute-0 podman[443781]: 2025-10-11 02:34:02.218157903 +0000 UTC m=+0.135972591 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:34:02 compute-0 podman[443780]: 2025-10-11 02:34:02.226648719 +0000 UTC m=+0.127502617 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, architecture=x86_64, build-date=2025-08-20T13:12:41, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, version=9.6, release=1755695350, vendor=Red Hat, Inc., io.openshift.expose-services=, config_id=edpm, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., maintainer=Red Hat, Inc., com.redhat.component=ubi9-minimal-container, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://catalog.redhat.com/en/search?searchType=containers, distribution-scope=public, io.buildah.version=1.33.7, vcs-type=git, managed_by=edpm_ansible, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, container_name=openstack_network_exporter, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.tags=minimal rhel9, name=ubi9-minimal)
Oct 11 02:34:02 compute-0 sudo[443857]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:34:02 compute-0 sudo[443857]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:34:02 compute-0 sudo[443857]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:02 compute-0 nova_compute[356901]: 2025-10-11 02:34:02.310 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:34:02 compute-0 nova_compute[356901]: 2025-10-11 02:34:02.310 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:34:02 compute-0 nova_compute[356901]: 2025-10-11 02:34:02.311 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:34:02 compute-0 nova_compute[356901]: 2025-10-11 02:34:02.311 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:34:02 compute-0 sudo[443889]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:34:02 compute-0 sudo[443889]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:34:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1686: 321 pgs: 321 active+clean; 93 MiB data, 275 MiB used, 60 GiB / 60 GiB avail; 13 KiB/s rd, 1.6 MiB/s wr, 18 op/s
Oct 11 02:34:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:34:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:34:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:34:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:34:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:34:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:34:02 compute-0 nova_compute[356901]: 2025-10-11 02:34:02.919 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:02 compute-0 podman[443951]: 2025-10-11 02:34:02.921726009 +0000 UTC m=+0.083697037 container create 32c08a346e86a13cb502779320cc48a7950347e10e2056e1e9842d65d95bafb8 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_yalow, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS)
Oct 11 02:34:02 compute-0 podman[443951]: 2025-10-11 02:34:02.88554493 +0000 UTC m=+0.047516028 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:34:02 compute-0 systemd[1]: Started libpod-conmon-32c08a346e86a13cb502779320cc48a7950347e10e2056e1e9842d65d95bafb8.scope.
Oct 11 02:34:03 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:34:03 compute-0 podman[443951]: 2025-10-11 02:34:03.059815158 +0000 UTC m=+0.221786226 container init 32c08a346e86a13cb502779320cc48a7950347e10e2056e1e9842d65d95bafb8 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_yalow, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, io.buildah.version=1.39.3, ceph=True, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:34:03 compute-0 podman[443951]: 2025-10-11 02:34:03.077771322 +0000 UTC m=+0.239742340 container start 32c08a346e86a13cb502779320cc48a7950347e10e2056e1e9842d65d95bafb8 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_yalow, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.build-date=20250507)
Oct 11 02:34:03 compute-0 podman[443951]: 2025-10-11 02:34:03.082618885 +0000 UTC m=+0.244589943 container attach 32c08a346e86a13cb502779320cc48a7950347e10e2056e1e9842d65d95bafb8 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_yalow, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507)
Oct 11 02:34:03 compute-0 elegant_yalow[443967]: 167 167
Oct 11 02:34:03 compute-0 systemd[1]: libpod-32c08a346e86a13cb502779320cc48a7950347e10e2056e1e9842d65d95bafb8.scope: Deactivated successfully.
Oct 11 02:34:03 compute-0 podman[443951]: 2025-10-11 02:34:03.086908066 +0000 UTC m=+0.248879114 container died 32c08a346e86a13cb502779320cc48a7950347e10e2056e1e9842d65d95bafb8 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_yalow, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:34:03 compute-0 systemd[1]: var-lib-containers-storage-overlay-1628096ce693aa0518a9d312431df8edb3231a1579538546e5c882abb7e9a4b1-merged.mount: Deactivated successfully.
Oct 11 02:34:03 compute-0 podman[443951]: 2025-10-11 02:34:03.171874238 +0000 UTC m=+0.333845256 container remove 32c08a346e86a13cb502779320cc48a7950347e10e2056e1e9842d65d95bafb8 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_yalow, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, ceph=True)
Oct 11 02:34:03 compute-0 systemd[1]: libpod-conmon-32c08a346e86a13cb502779320cc48a7950347e10e2056e1e9842d65d95bafb8.scope: Deactivated successfully.
Oct 11 02:34:03 compute-0 podman[443991]: 2025-10-11 02:34:03.421959271 +0000 UTC m=+0.082082856 container create b995463f7dc2a853c48783c453b34b26f0adef049734098c465d078079369d0b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_dewdney, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 02:34:03 compute-0 podman[443991]: 2025-10-11 02:34:03.392110077 +0000 UTC m=+0.052233762 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:34:03 compute-0 systemd[1]: Started libpod-conmon-b995463f7dc2a853c48783c453b34b26f0adef049734098c465d078079369d0b.scope.
Oct 11 02:34:03 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:34:03 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/23bffc724b801ef7201093a74248bb24ff61076c031348fb8f2daf24d6f7e3cd/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:34:03 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/23bffc724b801ef7201093a74248bb24ff61076c031348fb8f2daf24d6f7e3cd/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:34:03 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/23bffc724b801ef7201093a74248bb24ff61076c031348fb8f2daf24d6f7e3cd/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:34:03 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/23bffc724b801ef7201093a74248bb24ff61076c031348fb8f2daf24d6f7e3cd/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:34:03 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/23bffc724b801ef7201093a74248bb24ff61076c031348fb8f2daf24d6f7e3cd/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:34:03 compute-0 podman[443991]: 2025-10-11 02:34:03.576516703 +0000 UTC m=+0.236640318 container init b995463f7dc2a853c48783c453b34b26f0adef049734098c465d078079369d0b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_dewdney, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3)
Oct 11 02:34:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e133 do_prune osdmap full prune enabled
Oct 11 02:34:03 compute-0 ceph-mon[191930]: pgmap v1686: 321 pgs: 321 active+clean; 93 MiB data, 275 MiB used, 60 GiB / 60 GiB avail; 13 KiB/s rd, 1.6 MiB/s wr, 18 op/s
Oct 11 02:34:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e134 e134: 3 total, 3 up, 3 in
Oct 11 02:34:03 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e134: 3 total, 3 up, 3 in
Oct 11 02:34:03 compute-0 podman[443991]: 2025-10-11 02:34:03.608485957 +0000 UTC m=+0.268609562 container start b995463f7dc2a853c48783c453b34b26f0adef049734098c465d078079369d0b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_dewdney, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507)
Oct 11 02:34:03 compute-0 podman[443991]: 2025-10-11 02:34:03.617367462 +0000 UTC m=+0.277491097 container attach b995463f7dc2a853c48783c453b34b26f0adef049734098c465d078079369d0b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_dewdney, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:34:03 compute-0 nova_compute[356901]: 2025-10-11 02:34:03.634 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:34:03 compute-0 nova_compute[356901]: 2025-10-11 02:34:03.647 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:34:03 compute-0 nova_compute[356901]: 2025-10-11 02:34:03.648 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:34:03 compute-0 nova_compute[356901]: 2025-10-11 02:34:03.648 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:34:03 compute-0 nova_compute[356901]: 2025-10-11 02:34:03.648 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:34:03 compute-0 nova_compute[356901]: 2025-10-11 02:34:03.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:34:03 compute-0 nova_compute[356901]: 2025-10-11 02:34:03.922 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:34:03 compute-0 nova_compute[356901]: 2025-10-11 02:34:03.922 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:34:03 compute-0 nova_compute[356901]: 2025-10-11 02:34:03.922 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:34:03 compute-0 nova_compute[356901]: 2025-10-11 02:34:03.923 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:34:03 compute-0 nova_compute[356901]: 2025-10-11 02:34:03.923 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:34:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:34:04 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3348220794' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:34:04 compute-0 nova_compute[356901]: 2025-10-11 02:34:04.395 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.471s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:34:04 compute-0 nova_compute[356901]: 2025-10-11 02:34:04.502 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:34:04 compute-0 nova_compute[356901]: 2025-10-11 02:34:04.503 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:34:04 compute-0 nova_compute[356901]: 2025-10-11 02:34:04.503 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:34:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1688: 321 pgs: 321 active+clean; 93 MiB data, 275 MiB used, 60 GiB / 60 GiB avail; 20 KiB/s rd, 1.7 MiB/s wr, 27 op/s
Oct 11 02:34:04 compute-0 ceph-mon[191930]: osdmap e134: 3 total, 3 up, 3 in
Oct 11 02:34:04 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3348220794' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:34:04 compute-0 nova_compute[356901]: 2025-10-11 02:34:04.794 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:04 compute-0 trusting_dewdney[444007]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:34:04 compute-0 trusting_dewdney[444007]: --> relative data size: 1.0
Oct 11 02:34:04 compute-0 trusting_dewdney[444007]: --> All data devices are unavailable
Oct 11 02:34:04 compute-0 nova_compute[356901]: 2025-10-11 02:34:04.865 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:34:04 compute-0 nova_compute[356901]: 2025-10-11 02:34:04.866 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3755MB free_disk=59.955204010009766GB free_vcpus=7 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:34:04 compute-0 nova_compute[356901]: 2025-10-11 02:34:04.866 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:34:04 compute-0 nova_compute[356901]: 2025-10-11 02:34:04.866 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:34:04 compute-0 systemd[1]: libpod-b995463f7dc2a853c48783c453b34b26f0adef049734098c465d078079369d0b.scope: Deactivated successfully.
Oct 11 02:34:04 compute-0 systemd[1]: libpod-b995463f7dc2a853c48783c453b34b26f0adef049734098c465d078079369d0b.scope: Consumed 1.183s CPU time.
Oct 11 02:34:04 compute-0 podman[443991]: 2025-10-11 02:34:04.904280602 +0000 UTC m=+1.564404217 container died b995463f7dc2a853c48783c453b34b26f0adef049734098c465d078079369d0b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_dewdney, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:34:04 compute-0 systemd[1]: var-lib-containers-storage-overlay-23bffc724b801ef7201093a74248bb24ff61076c031348fb8f2daf24d6f7e3cd-merged.mount: Deactivated successfully.
Oct 11 02:34:04 compute-0 nova_compute[356901]: 2025-10-11 02:34:04.947 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:34:04 compute-0 nova_compute[356901]: 2025-10-11 02:34:04.948 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 1 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:34:04 compute-0 nova_compute[356901]: 2025-10-11 02:34:04.948 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1024MB phys_disk=59GB used_disk=2GB total_vcpus=8 used_vcpus=1 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:34:04 compute-0 nova_compute[356901]: 2025-10-11 02:34:04.966 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing inventories for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:804
Oct 11 02:34:04 compute-0 podman[443991]: 2025-10-11 02:34:04.977401977 +0000 UTC m=+1.637525572 container remove b995463f7dc2a853c48783c453b34b26f0adef049734098c465d078079369d0b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_dewdney, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.vendor=CentOS, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:34:04 compute-0 nova_compute[356901]: 2025-10-11 02:34:04.986 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating ProviderTree inventory for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 from _refresh_and_get_inventory using data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} _refresh_and_get_inventory /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:768
Oct 11 02:34:04 compute-0 nova_compute[356901]: 2025-10-11 02:34:04.986 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating inventory in ProviderTree for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 with inventory: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:176
Oct 11 02:34:04 compute-0 systemd[1]: libpod-conmon-b995463f7dc2a853c48783c453b34b26f0adef049734098c465d078079369d0b.scope: Deactivated successfully.
Oct 11 02:34:05 compute-0 nova_compute[356901]: 2025-10-11 02:34:05.002 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing aggregate associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, aggregates: None _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:813
Oct 11 02:34:05 compute-0 sudo[443889]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:05 compute-0 nova_compute[356901]: 2025-10-11 02:34:05.041 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing trait associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, traits: COMPUTE_VOLUME_EXTEND,COMPUTE_NET_VIF_MODEL_VMXNET3,HW_CPU_X86_SSSE3,COMPUTE_RESCUE_BFV,COMPUTE_SOCKET_PCI_NUMA_AFFINITY,COMPUTE_NODE,HW_CPU_X86_SVM,COMPUTE_STORAGE_BUS_SCSI,HW_CPU_X86_FMA3,COMPUTE_GRAPHICS_MODEL_NONE,COMPUTE_NET_VIF_MODEL_RTL8139,HW_CPU_X86_SSE4A,COMPUTE_IMAGE_TYPE_QCOW2,HW_CPU_X86_BMI2,HW_CPU_X86_SSE42,HW_CPU_X86_AVX2,COMPUTE_IMAGE_TYPE_RAW,COMPUTE_VIOMMU_MODEL_VIRTIO,HW_CPU_X86_AESNI,COMPUTE_STORAGE_BUS_FDC,COMPUTE_GRAPHICS_MODEL_VIRTIO,HW_CPU_X86_AMD_SVM,COMPUTE_NET_VIF_MODEL_NE2K_PCI,COMPUTE_ACCELERATORS,HW_CPU_X86_SSE2,COMPUTE_GRAPHICS_MODEL_VGA,HW_CPU_X86_ABM,HW_CPU_X86_AVX,COMPUTE_NET_VIF_MODEL_E1000,COMPUTE_STORAGE_BUS_USB,COMPUTE_NET_ATTACH_INTERFACE,HW_CPU_X86_MMX,COMPUTE_SECURITY_TPM_2_0,COMPUTE_IMAGE_TYPE_ISO,HW_CPU_X86_SSE41,COMPUTE_IMAGE_TYPE_AKI,COMPUTE_IMAGE_TYPE_AMI,COMPUTE_NET_ATTACH_INTERFACE_WITH_TAG,COMPUTE_DEVICE_TAGGING,COMPUTE_SECURITY_UEFI_SECURE_BOOT,COMPUTE_TRUSTED_CERTS,COMPUTE_NET_VIF_MODEL_VIRTIO,COMPUTE_VIOMMU_MODEL_INTEL,COMPUTE_STORAGE_BUS_SATA,HW_CPU_X86_SSE,COMPUTE_STORAGE_BUS_VIRTIO,COMPUTE_NET_VIF_MODEL_PCNET,COMPUTE_GRAPHICS_MODEL_CIRRUS,HW_CPU_X86_SHA,HW_CPU_X86_BMI,COMPUTE_NET_VIF_MODEL_E1000E,COMPUTE_NET_VIF_MODEL_SPAPR_VLAN,COMPUTE_VOLUME_ATTACH_WITH_TAG,COMPUTE_GRAPHICS_MODEL_BOCHS,COMPUTE_VIOMMU_MODEL_AUTO,COMPUTE_IMAGE_TYPE_ARI,HW_CPU_X86_CLMUL,COMPUTE_STORAGE_BUS_IDE,COMPUTE_VOLUME_MULTI_ATTACH,HW_CPU_X86_F16C,COMPUTE_SECURITY_TPM_1_2 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:825
Oct 11 02:34:05 compute-0 nova_compute[356901]: 2025-10-11 02:34:05.099 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:34:05 compute-0 sudo[444074]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:34:05 compute-0 sudo[444074]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:34:05 compute-0 sudo[444074]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:05 compute-0 sudo[444100]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:34:05 compute-0 sudo[444100]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:34:05 compute-0 sudo[444100]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:05 compute-0 sudo[444125]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:34:05 compute-0 sudo[444125]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:34:05 compute-0 sudo[444125]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:05 compute-0 sudo[444169]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:34:05 compute-0 sudo[444169]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:34:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:34:05 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3717315881' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:34:05 compute-0 nova_compute[356901]: 2025-10-11 02:34:05.555 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.456s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:34:05 compute-0 nova_compute[356901]: 2025-10-11 02:34:05.566 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:34:05 compute-0 nova_compute[356901]: 2025-10-11 02:34:05.585 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:34:05 compute-0 nova_compute[356901]: 2025-10-11 02:34:05.610 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:34:05 compute-0 nova_compute[356901]: 2025-10-11 02:34:05.611 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.744s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:34:05 compute-0 ceph-mon[191930]: pgmap v1688: 321 pgs: 321 active+clean; 93 MiB data, 275 MiB used, 60 GiB / 60 GiB avail; 20 KiB/s rd, 1.7 MiB/s wr, 27 op/s
Oct 11 02:34:05 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3717315881' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:34:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e134 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:34:05 compute-0 podman[444237]: 2025-10-11 02:34:05.878362259 +0000 UTC m=+0.069266648 container create 4398f835f8916c36d29f2556b15c4c775972dda03f94870fe79b2a715428598f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_archimedes, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.schema-version=1.0)
Oct 11 02:34:05 compute-0 systemd[1]: Started libpod-conmon-4398f835f8916c36d29f2556b15c4c775972dda03f94870fe79b2a715428598f.scope.
Oct 11 02:34:05 compute-0 podman[444237]: 2025-10-11 02:34:05.856837436 +0000 UTC m=+0.047741855 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:34:05 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:34:06 compute-0 podman[444237]: 2025-10-11 02:34:06.019717593 +0000 UTC m=+0.210622082 container init 4398f835f8916c36d29f2556b15c4c775972dda03f94870fe79b2a715428598f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_archimedes, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:34:06 compute-0 podman[444237]: 2025-10-11 02:34:06.037889701 +0000 UTC m=+0.228794140 container start 4398f835f8916c36d29f2556b15c4c775972dda03f94870fe79b2a715428598f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_archimedes, org.label-schema.build-date=20250507, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:34:06 compute-0 podman[444237]: 2025-10-11 02:34:06.043999192 +0000 UTC m=+0.234903691 container attach 4398f835f8916c36d29f2556b15c4c775972dda03f94870fe79b2a715428598f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_archimedes, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:34:06 compute-0 vigilant_archimedes[444253]: 167 167
Oct 11 02:34:06 compute-0 systemd[1]: libpod-4398f835f8916c36d29f2556b15c4c775972dda03f94870fe79b2a715428598f.scope: Deactivated successfully.
Oct 11 02:34:06 compute-0 podman[444237]: 2025-10-11 02:34:06.048420861 +0000 UTC m=+0.239325290 container died 4398f835f8916c36d29f2556b15c4c775972dda03f94870fe79b2a715428598f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_archimedes, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 02:34:06 compute-0 systemd[1]: var-lib-containers-storage-overlay-09a4f72b91cb5917ed60c290e808c937c0a322c5fed7edfa59bab71bf593059e-merged.mount: Deactivated successfully.
Oct 11 02:34:06 compute-0 podman[444237]: 2025-10-11 02:34:06.12668201 +0000 UTC m=+0.317586449 container remove 4398f835f8916c36d29f2556b15c4c775972dda03f94870fe79b2a715428598f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_archimedes, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:34:06 compute-0 systemd[1]: libpod-conmon-4398f835f8916c36d29f2556b15c4c775972dda03f94870fe79b2a715428598f.scope: Deactivated successfully.
Oct 11 02:34:06 compute-0 podman[444276]: 2025-10-11 02:34:06.375831446 +0000 UTC m=+0.090921446 container create efbd3fdb1e732dd157ed1ece0572182e27ec909071d7efe3bcf7942736586993 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_kapitsa, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:34:06 compute-0 podman[444276]: 2025-10-11 02:34:06.345674972 +0000 UTC m=+0.060765012 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:34:06 compute-0 systemd[1]: Started libpod-conmon-efbd3fdb1e732dd157ed1ece0572182e27ec909071d7efe3bcf7942736586993.scope.
Oct 11 02:34:06 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:34:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c599497fc44e60cef5b8b67289fb0102018b9c174ad52ff438a74faefe1991b9/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:34:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c599497fc44e60cef5b8b67289fb0102018b9c174ad52ff438a74faefe1991b9/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:34:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c599497fc44e60cef5b8b67289fb0102018b9c174ad52ff438a74faefe1991b9/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:34:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c599497fc44e60cef5b8b67289fb0102018b9c174ad52ff438a74faefe1991b9/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:34:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1689: 321 pgs: 321 active+clean; 93 MiB data, 275 MiB used, 60 GiB / 60 GiB avail; 18 KiB/s rd, 1.6 MiB/s wr, 24 op/s
Oct 11 02:34:06 compute-0 podman[444276]: 2025-10-11 02:34:06.537417922 +0000 UTC m=+0.252507922 container init efbd3fdb1e732dd157ed1ece0572182e27ec909071d7efe3bcf7942736586993 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_kapitsa, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:34:06 compute-0 podman[444276]: 2025-10-11 02:34:06.557027836 +0000 UTC m=+0.272117786 container start efbd3fdb1e732dd157ed1ece0572182e27ec909071d7efe3bcf7942736586993 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_kapitsa, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:34:06 compute-0 podman[444276]: 2025-10-11 02:34:06.563597032 +0000 UTC m=+0.278687082 container attach efbd3fdb1e732dd157ed1ece0572182e27ec909071d7efe3bcf7942736586993 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_kapitsa, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, ceph=True, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2)
Oct 11 02:34:06 compute-0 nova_compute[356901]: 2025-10-11 02:34:06.611 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:34:06 compute-0 nova_compute[356901]: 2025-10-11 02:34:06.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0005065635784984046 of space, bias 1.0, pg target 0.15196907354952138 quantized to 32 (current 32)
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:34:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]: {
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:     "0": [
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:         {
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "devices": [
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "/dev/loop3"
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             ],
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "lv_name": "ceph_lv0",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "lv_size": "21470642176",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "name": "ceph_lv0",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "tags": {
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.cluster_name": "ceph",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.crush_device_class": "",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.encrypted": "0",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.osd_id": "0",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.type": "block",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.vdo": "0"
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             },
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "type": "block",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "vg_name": "ceph_vg0"
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:         }
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:     ],
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:     "1": [
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:         {
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "devices": [
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "/dev/loop4"
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             ],
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "lv_name": "ceph_lv1",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "lv_size": "21470642176",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "name": "ceph_lv1",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "tags": {
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.cluster_name": "ceph",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.crush_device_class": "",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.encrypted": "0",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.osd_id": "1",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.type": "block",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.vdo": "0"
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             },
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "type": "block",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "vg_name": "ceph_vg1"
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:         }
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:     ],
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:     "2": [
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:         {
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "devices": [
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "/dev/loop5"
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             ],
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "lv_name": "ceph_lv2",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "lv_size": "21470642176",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "name": "ceph_lv2",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "tags": {
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.cluster_name": "ceph",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.crush_device_class": "",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.encrypted": "0",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.osd_id": "2",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.type": "block",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:                 "ceph.vdo": "0"
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             },
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "type": "block",
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:             "vg_name": "ceph_vg2"
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:         }
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]:     ]
Oct 11 02:34:07 compute-0 mystifying_kapitsa[444292]: }
Oct 11 02:34:07 compute-0 systemd[1]: libpod-efbd3fdb1e732dd157ed1ece0572182e27ec909071d7efe3bcf7942736586993.scope: Deactivated successfully.
Oct 11 02:34:07 compute-0 podman[444276]: 2025-10-11 02:34:07.479111849 +0000 UTC m=+1.194201809 container died efbd3fdb1e732dd157ed1ece0572182e27ec909071d7efe3bcf7942736586993 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_kapitsa, io.buildah.version=1.39.3, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507)
Oct 11 02:34:07 compute-0 systemd[1]: var-lib-containers-storage-overlay-c599497fc44e60cef5b8b67289fb0102018b9c174ad52ff438a74faefe1991b9-merged.mount: Deactivated successfully.
Oct 11 02:34:07 compute-0 podman[444276]: 2025-10-11 02:34:07.580032187 +0000 UTC m=+1.295122157 container remove efbd3fdb1e732dd157ed1ece0572182e27ec909071d7efe3bcf7942736586993 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_kapitsa, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:34:07 compute-0 systemd[1]: libpod-conmon-efbd3fdb1e732dd157ed1ece0572182e27ec909071d7efe3bcf7942736586993.scope: Deactivated successfully.
Oct 11 02:34:07 compute-0 ceph-mon[191930]: pgmap v1689: 321 pgs: 321 active+clean; 93 MiB data, 275 MiB used, 60 GiB / 60 GiB avail; 18 KiB/s rd, 1.6 MiB/s wr, 24 op/s
Oct 11 02:34:07 compute-0 sudo[444169]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:07 compute-0 sudo[444311]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:34:07 compute-0 sudo[444311]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:34:07 compute-0 sudo[444311]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:07 compute-0 sudo[444336]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:34:07 compute-0 sudo[444336]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:34:07 compute-0 sudo[444336]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:07 compute-0 nova_compute[356901]: 2025-10-11 02:34:07.922 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:08 compute-0 sudo[444361]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:34:08 compute-0 sudo[444361]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:34:08 compute-0 sudo[444361]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:08 compute-0 sudo[444386]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:34:08 compute-0 sudo[444386]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:34:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1690: 321 pgs: 321 active+clean; 85 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 18 KiB/s rd, 774 KiB/s wr, 25 op/s
Oct 11 02:34:08 compute-0 podman[444449]: 2025-10-11 02:34:08.744147113 +0000 UTC m=+0.088039516 container create 971bd4f5dbf58a109f391ac1a16d2e4b23937c1f8c894d0af78ff83e13ec85bc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_feynman, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:34:08 compute-0 podman[444449]: 2025-10-11 02:34:08.712523664 +0000 UTC m=+0.056416107 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:34:08 compute-0 systemd[1]: Started libpod-conmon-971bd4f5dbf58a109f391ac1a16d2e4b23937c1f8c894d0af78ff83e13ec85bc.scope.
Oct 11 02:34:08 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:34:08 compute-0 nova_compute[356901]: 2025-10-11 02:34:08.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:34:08 compute-0 podman[444462]: 2025-10-11 02:34:08.920651696 +0000 UTC m=+0.116645071 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, release-0.7.12=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, name=ubi9, version=9.4, com.redhat.component=ubi9-container, io.k8s.display-name=Red Hat Universal Base Image 9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, config_id=edpm, managed_by=edpm_ansible, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, distribution-scope=public, maintainer=Red Hat, Inc., summary=Provides the latest release of Red Hat Universal Base Image 9., container_name=kepler, architecture=x86_64, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.29.0, release=1214.1726694543, io.openshift.expose-services=, vendor=Red Hat, Inc., vcs-type=git, build-date=2024-09-18T21:23:30, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:34:08 compute-0 podman[444449]: 2025-10-11 02:34:08.929654438 +0000 UTC m=+0.273546871 container init 971bd4f5dbf58a109f391ac1a16d2e4b23937c1f8c894d0af78ff83e13ec85bc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_feynman, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:34:08 compute-0 podman[444449]: 2025-10-11 02:34:08.944698619 +0000 UTC m=+0.288590982 container start 971bd4f5dbf58a109f391ac1a16d2e4b23937c1f8c894d0af78ff83e13ec85bc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_feynman, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default)
Oct 11 02:34:08 compute-0 podman[444449]: 2025-10-11 02:34:08.949324884 +0000 UTC m=+0.293217307 container attach 971bd4f5dbf58a109f391ac1a16d2e4b23937c1f8c894d0af78ff83e13ec85bc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_feynman, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True)
Oct 11 02:34:08 compute-0 competent_feynman[444474]: 167 167
Oct 11 02:34:08 compute-0 systemd[1]: libpod-971bd4f5dbf58a109f391ac1a16d2e4b23937c1f8c894d0af78ff83e13ec85bc.scope: Deactivated successfully.
Oct 11 02:34:08 compute-0 podman[444449]: 2025-10-11 02:34:08.955749755 +0000 UTC m=+0.299642158 container died 971bd4f5dbf58a109f391ac1a16d2e4b23937c1f8c894d0af78ff83e13ec85bc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_feynman, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3)
Oct 11 02:34:08 compute-0 systemd[1]: var-lib-containers-storage-overlay-9bbd551b3ac6d5491fbb37080f489423bcb8ded4556d5dec81b0d8b023a8346e-merged.mount: Deactivated successfully.
Oct 11 02:34:09 compute-0 podman[444449]: 2025-10-11 02:34:09.018856379 +0000 UTC m=+0.362748752 container remove 971bd4f5dbf58a109f391ac1a16d2e4b23937c1f8c894d0af78ff83e13ec85bc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_feynman, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:34:09 compute-0 systemd[1]: libpod-conmon-971bd4f5dbf58a109f391ac1a16d2e4b23937c1f8c894d0af78ff83e13ec85bc.scope: Deactivated successfully.
Oct 11 02:34:09 compute-0 podman[444506]: 2025-10-11 02:34:09.296557619 +0000 UTC m=+0.075268906 container create 953067dad746e25aa5f65a5ba8f6e32bc9cc20ca67cc0e270fad0929e208d4ac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_ritchie, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:34:09 compute-0 podman[444506]: 2025-10-11 02:34:09.27196908 +0000 UTC m=+0.050680367 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:34:09 compute-0 systemd[1]: Started libpod-conmon-953067dad746e25aa5f65a5ba8f6e32bc9cc20ca67cc0e270fad0929e208d4ac.scope.
Oct 11 02:34:09 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:34:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/26d62bb22da9a53a23ad508383b57cabe0614b23c97c4f102e6101a6cb20e4a3/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:34:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/26d62bb22da9a53a23ad508383b57cabe0614b23c97c4f102e6101a6cb20e4a3/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:34:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/26d62bb22da9a53a23ad508383b57cabe0614b23c97c4f102e6101a6cb20e4a3/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:34:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/26d62bb22da9a53a23ad508383b57cabe0614b23c97c4f102e6101a6cb20e4a3/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:34:09 compute-0 podman[444506]: 2025-10-11 02:34:09.451291221 +0000 UTC m=+0.230002508 container init 953067dad746e25aa5f65a5ba8f6e32bc9cc20ca67cc0e270fad0929e208d4ac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_ritchie, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3)
Oct 11 02:34:09 compute-0 podman[444506]: 2025-10-11 02:34:09.467583691 +0000 UTC m=+0.246294968 container start 953067dad746e25aa5f65a5ba8f6e32bc9cc20ca67cc0e270fad0929e208d4ac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_ritchie, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0)
Oct 11 02:34:09 compute-0 podman[444506]: 2025-10-11 02:34:09.472630249 +0000 UTC m=+0.251341526 container attach 953067dad746e25aa5f65a5ba8f6e32bc9cc20ca67cc0e270fad0929e208d4ac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_ritchie, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, OSD_FLAVOR=default)
Oct 11 02:34:09 compute-0 ceph-mon[191930]: pgmap v1690: 321 pgs: 321 active+clean; 85 MiB data, 267 MiB used, 60 GiB / 60 GiB avail; 18 KiB/s rd, 774 KiB/s wr, 25 op/s
Oct 11 02:34:09 compute-0 nova_compute[356901]: 2025-10-11 02:34:09.797 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1691: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 18 KiB/s rd, 1.4 KiB/s wr, 25 op/s
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]: {
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:         "osd_id": 1,
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:         "type": "bluestore"
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:     },
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:         "osd_id": 2,
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:         "type": "bluestore"
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:     },
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:         "osd_id": 0,
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:         "type": "bluestore"
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]:     }
Oct 11 02:34:10 compute-0 vigilant_ritchie[444523]: }
Oct 11 02:34:10 compute-0 systemd[1]: libpod-953067dad746e25aa5f65a5ba8f6e32bc9cc20ca67cc0e270fad0929e208d4ac.scope: Deactivated successfully.
Oct 11 02:34:10 compute-0 podman[444506]: 2025-10-11 02:34:10.705096324 +0000 UTC m=+1.483807641 container died 953067dad746e25aa5f65a5ba8f6e32bc9cc20ca67cc0e270fad0929e208d4ac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_ritchie, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:34:10 compute-0 systemd[1]: libpod-953067dad746e25aa5f65a5ba8f6e32bc9cc20ca67cc0e270fad0929e208d4ac.scope: Consumed 1.230s CPU time.
Oct 11 02:34:10 compute-0 systemd[1]: var-lib-containers-storage-overlay-26d62bb22da9a53a23ad508383b57cabe0614b23c97c4f102e6101a6cb20e4a3-merged.mount: Deactivated successfully.
Oct 11 02:34:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e134 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:34:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e134 do_prune osdmap full prune enabled
Oct 11 02:34:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 e135: 3 total, 3 up, 3 in
Oct 11 02:34:10 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e135: 3 total, 3 up, 3 in
Oct 11 02:34:10 compute-0 podman[444506]: 2025-10-11 02:34:10.805498736 +0000 UTC m=+1.584210023 container remove 953067dad746e25aa5f65a5ba8f6e32bc9cc20ca67cc0e270fad0929e208d4ac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigilant_ritchie, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2)
Oct 11 02:34:10 compute-0 systemd[1]: libpod-conmon-953067dad746e25aa5f65a5ba8f6e32bc9cc20ca67cc0e270fad0929e208d4ac.scope: Deactivated successfully.
Oct 11 02:34:10 compute-0 sudo[444386]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:34:10 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:34:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:34:10 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:34:10 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev e0b399a7-131f-4ee1-aae0-1c13704d39ea does not exist
Oct 11 02:34:10 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 77d2dcd7-6885-4a4e-a8e5-fb2328f02397 does not exist
Oct 11 02:34:11 compute-0 sudo[444566]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:34:11 compute-0 sudo[444566]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:34:11 compute-0 sudo[444566]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:11 compute-0 sudo[444591]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:34:11 compute-0 sudo[444591]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:34:11 compute-0 sudo[444591]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:11 compute-0 ceph-mon[191930]: pgmap v1691: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 18 KiB/s rd, 1.4 KiB/s wr, 25 op/s
Oct 11 02:34:11 compute-0 ceph-mon[191930]: osdmap e135: 3 total, 3 up, 3 in
Oct 11 02:34:11 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:34:11 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:34:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1693: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 20 KiB/s rd, 1.5 KiB/s wr, 26 op/s
Oct 11 02:34:12 compute-0 ceph-mon[191930]: pgmap v1693: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 20 KiB/s rd, 1.5 KiB/s wr, 26 op/s
Oct 11 02:34:12 compute-0 nova_compute[356901]: 2025-10-11 02:34:12.925 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:13 compute-0 sshd-session[444616]: Accepted publickey for zuul from 38.102.83.70 port 54730 ssh2: RSA SHA256:sxgyqRujXfGvMV2Eq7ZlGcFGCGFr/dtz6dk2ZJwy3W4
Oct 11 02:34:13 compute-0 systemd-logind[804]: New session 64 of user zuul.
Oct 11 02:34:13 compute-0 systemd[1]: Started Session 64 of User zuul.
Oct 11 02:34:13 compute-0 sshd-session[444616]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 02:34:14 compute-0 sudo[444793]:     zuul : TTY=pts/1 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bpkrrrxkphxgdbbwjmiipsbodaqrogcr ; KUBECONFIG=/home/zuul/.crc/machines/crc/kubeconfig PATH=/home/zuul/.crc/bin:/home/zuul/.crc/bin/oc:/home/zuul/bin:/home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760150053.2524-58193-230216099860844/AnsiballZ_command.py'
Oct 11 02:34:14 compute-0 sudo[444793]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:34:14 compute-0 python3[444795]: ansible-ansible.legacy.command Invoked with _raw_params=podman ps -a --format "{{.Names}} {{.Status}}" | grep node_exporter
                                            _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:34:14 compute-0 sudo[444793]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1694: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s rd, 716 B/s wr, 12 op/s
Oct 11 02:34:14 compute-0 nova_compute[356901]: 2025-10-11 02:34:14.800 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:15 compute-0 ceph-mon[191930]: pgmap v1694: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s rd, 716 B/s wr, 12 op/s
Oct 11 02:34:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:34:16 compute-0 podman[444835]: 2025-10-11 02:34:16.253062114 +0000 UTC m=+0.121179633 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:34:16 compute-0 podman[444837]: 2025-10-11 02:34:16.270060626 +0000 UTC m=+0.122859246 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, config_id=edpm, container_name=ceilometer_agent_compute, org.label-schema.schema-version=1.0, tcib_managed=true, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 02:34:16 compute-0 podman[444838]: 2025-10-11 02:34:16.273981209 +0000 UTC m=+0.123641891 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_metadata_agent, org.label-schema.build-date=20251009)
Oct 11 02:34:16 compute-0 podman[444836]: 2025-10-11 02:34:16.309922903 +0000 UTC m=+0.183460752 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_id=ovn_controller, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.build-date=20251009)
Oct 11 02:34:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1695: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 8.4 KiB/s rd, 716 B/s wr, 12 op/s
Oct 11 02:34:17 compute-0 ceph-mon[191930]: pgmap v1695: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 8.4 KiB/s rd, 716 B/s wr, 12 op/s
Oct 11 02:34:17 compute-0 nova_compute[356901]: 2025-10-11 02:34:17.929 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1696: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 5.9 KiB/s rd, 307 B/s wr, 7 op/s
Oct 11 02:34:19 compute-0 ceph-mon[191930]: pgmap v1696: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 5.9 KiB/s rd, 307 B/s wr, 7 op/s
Oct 11 02:34:19 compute-0 nova_compute[356901]: 2025-10-11 02:34:19.802 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1697: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:34:21 compute-0 podman[444920]: 2025-10-11 02:34:21.222190703 +0000 UTC m=+0.098778052 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, managed_by=edpm_ansible, config_id=iscsid, container_name=iscsid, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:34:21 compute-0 podman[444919]: 2025-10-11 02:34:21.238633427 +0000 UTC m=+0.122731671 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:34:21 compute-0 ceph-mon[191930]: pgmap v1697: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:22 compute-0 sudo[445130]:     zuul : TTY=pts/1 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gpctvhjuyassypkkbdhqqpbauibazhkc ; KUBECONFIG=/home/zuul/.crc/machines/crc/kubeconfig PATH=/home/zuul/.crc/bin:/home/zuul/.crc/bin/oc:/home/zuul/bin:/home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760150061.5653744-58355-74890497152215/AnsiballZ_command.py'
Oct 11 02:34:22 compute-0 sudo[445130]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:34:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1698: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:22 compute-0 python3[445132]: ansible-ansible.legacy.command Invoked with _raw_params=podman ps -a --format "{{.Names}} {{.Status}}" | grep podman_exporter
                                            _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:34:22 compute-0 sudo[445130]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:22 compute-0 nova_compute[356901]: 2025-10-11 02:34:22.932 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:23 compute-0 ceph-mon[191930]: pgmap v1698: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1699: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:24 compute-0 nova_compute[356901]: 2025-10-11 02:34:24.804 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:25 compute-0 ceph-mon[191930]: pgmap v1699: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:34:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1700: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:34:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:34:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:34:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:34:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:34:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:34:26 compute-0 ceph-mon[191930]: pgmap v1700: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:34:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/4038117081' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:34:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:34:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/4038117081' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:34:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/4038117081' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:34:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/4038117081' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:34:27 compute-0 nova_compute[356901]: 2025-10-11 02:34:27.934 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1701: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:28 compute-0 ceph-mon[191930]: pgmap v1701: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:29 compute-0 podman[157119]: time="2025-10-11T02:34:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:34:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:34:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:34:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:34:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9058 "" "Go-http-client/1.1"
Oct 11 02:34:29 compute-0 nova_compute[356901]: 2025-10-11 02:34:29.808 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1702: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:34:31 compute-0 openstack_network_exporter[374316]: ERROR   02:34:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:34:31 compute-0 openstack_network_exporter[374316]: ERROR   02:34:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:34:31 compute-0 openstack_network_exporter[374316]: ERROR   02:34:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:34:31 compute-0 openstack_network_exporter[374316]: ERROR   02:34:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:34:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:34:31 compute-0 openstack_network_exporter[374316]: ERROR   02:34:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:34:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:34:31 compute-0 ceph-mon[191930]: pgmap v1702: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:32 compute-0 sudo[445386]:     zuul : TTY=pts/1 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yaqyhniqkggzvvskawmcleqbwescqkeg ; KUBECONFIG=/home/zuul/.crc/machines/crc/kubeconfig PATH=/home/zuul/.crc/bin:/home/zuul/.crc/bin/oc:/home/zuul/bin:/home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760150071.6002705-58507-180646017354773/AnsiballZ_command.py'
Oct 11 02:34:32 compute-0 sudo[445386]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:34:32 compute-0 podman[445318]: 2025-10-11 02:34:32.477926342 +0000 UTC m=+0.135520956 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_id=edpm, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS)
Oct 11 02:34:32 compute-0 podman[445320]: 2025-10-11 02:34:32.506727799 +0000 UTC m=+0.152556856 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:34:32 compute-0 podman[445319]: 2025-10-11 02:34:32.522112409 +0000 UTC m=+0.174515564 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, build-date=2025-08-20T13:12:41, container_name=openstack_network_exporter, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, managed_by=edpm_ansible, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.expose-services=, release=1755695350, architecture=x86_64, io.openshift.tags=minimal rhel9, url=https://catalog.redhat.com/en/search?searchType=containers, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, version=9.6, vendor=Red Hat, Inc., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, config_id=edpm, distribution-scope=public, io.buildah.version=1.33.7, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi9-minimal-container, maintainer=Red Hat, Inc., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9.)
Oct 11 02:34:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1703: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:32 compute-0 python3[445407]: ansible-ansible.legacy.command Invoked with _raw_params=podman ps -a --format "{{.Names}} {{.Status}}" | grep kepler
                                            _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:34:32 compute-0 sudo[445386]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:32 compute-0 nova_compute[356901]: 2025-10-11 02:34:32.937 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:33 compute-0 ceph-mon[191930]: pgmap v1703: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1704: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:34 compute-0 nova_compute[356901]: 2025-10-11 02:34:34.813 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:35 compute-0 ceph-mon[191930]: pgmap v1704: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:34:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1705: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:37 compute-0 ceph-mon[191930]: pgmap v1705: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:37 compute-0 nova_compute[356901]: 2025-10-11 02:34:37.941 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1706: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:39 compute-0 podman[445448]: 2025-10-11 02:34:39.232147648 +0000 UTC m=+0.116578896 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, summary=Provides the latest release of Red Hat Universal Base Image 9., vendor=Red Hat, Inc., release=1214.1726694543, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, architecture=x86_64, container_name=kepler, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.29.0, managed_by=edpm_ansible, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, io.openshift.expose-services=, build-date=2024-09-18T21:23:30, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.openshift.tags=base rhel9, release-0.7.12=, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.4, com.redhat.component=ubi9-container, config_id=edpm, io.k8s.display-name=Red Hat Universal Base Image 9, maintainer=Red Hat, Inc., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git)
Oct 11 02:34:39 compute-0 ceph-mon[191930]: pgmap v1706: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:39 compute-0 nova_compute[356901]: 2025-10-11 02:34:39.816 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1707: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:34:41 compute-0 ceph-mon[191930]: pgmap v1707: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1708: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:42 compute-0 ceph-mon[191930]: pgmap v1708: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:42 compute-0 nova_compute[356901]: 2025-10-11 02:34:42.944 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1709: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:44 compute-0 nova_compute[356901]: 2025-10-11 02:34:44.821 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:45 compute-0 ceph-mon[191930]: pgmap v1709: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:34:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1710: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:47 compute-0 podman[445615]: 2025-10-11 02:34:47.222464953 +0000 UTC m=+0.106689729 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:34:47 compute-0 sudo[445693]:     zuul : TTY=pts/1 ; PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-urtculjamxbryhrilquryfmmnlfxotcr ; KUBECONFIG=/home/zuul/.crc/machines/crc/kubeconfig PATH=/home/zuul/.crc/bin:/home/zuul/.crc/bin/oc:/home/zuul/bin:/home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin /usr/bin/python3 /home/zuul/.ansible/tmp/ansible-tmp-1760150086.401288-58724-221541511688889/AnsiballZ_command.py'
Oct 11 02:34:47 compute-0 sudo[445693]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:34:47 compute-0 podman[445618]: 2025-10-11 02:34:47.255567261 +0000 UTC m=+0.116035233 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 02:34:47 compute-0 podman[445616]: 2025-10-11 02:34:47.268689395 +0000 UTC m=+0.154871683 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, container_name=ovn_controller, managed_by=edpm_ansible, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:34:47 compute-0 podman[445617]: 2025-10-11 02:34:47.271998325 +0000 UTC m=+0.136575971 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, config_id=edpm, tcib_managed=true)
Oct 11 02:34:47 compute-0 python3[445714]: ansible-ansible.legacy.command Invoked with _raw_params=podman ps -a --format "{{.Names}} {{.Status}}" | grep openstack_network_exporter
                                            _uses_shell=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Oct 11 02:34:47 compute-0 sudo[445693]: pam_unix(sudo:session): session closed for user root
Oct 11 02:34:47 compute-0 ceph-mon[191930]: pgmap v1710: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:47 compute-0 nova_compute[356901]: 2025-10-11 02:34:47.946 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1711: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:49 compute-0 ceph-mon[191930]: pgmap v1711: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:49 compute-0 nova_compute[356901]: 2025-10-11 02:34:49.823 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1712: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:34:51 compute-0 ceph-mon[191930]: pgmap v1712: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:34:52 compute-0 podman[445762]: 2025-10-11 02:34:52.303883588 +0000 UTC m=+0.178346536 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, org.label-schema.build-date=20251009, config_id=multipathd, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:34:52 compute-0 podman[445763]: 2025-10-11 02:34:52.314926754 +0000 UTC m=+0.187473741 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=iscsid, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:34:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1713: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 511 B/s rd, 0 B/s wr, 0 op/s
Oct 11 02:34:52 compute-0 nova_compute[356901]: 2025-10-11 02:34:52.949 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:53 compute-0 ceph-mon[191930]: pgmap v1713: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 511 B/s rd, 0 B/s wr, 0 op/s
Oct 11 02:34:53 compute-0 nova_compute[356901]: 2025-10-11 02:34:53.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:34:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1714: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 20 KiB/s rd, 0 B/s wr, 33 op/s
Oct 11 02:34:54 compute-0 nova_compute[356901]: 2025-10-11 02:34:54.826 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:34:54.862 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:34:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:34:54.863 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:34:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:34:54.864 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:34:55 compute-0 ceph-mon[191930]: pgmap v1714: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 20 KiB/s rd, 0 B/s wr, 33 op/s
Oct 11 02:34:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:34:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1715: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 31 KiB/s rd, 0 B/s wr, 51 op/s
Oct 11 02:34:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:34:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:34:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:34:56
Oct 11 02:34:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:34:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:34:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['volumes', 'cephfs.cephfs.data', '.rgw.root', 'vms', 'default.rgw.log', 'cephfs.cephfs.meta', 'images', 'default.rgw.meta', '.mgr', 'backups', 'default.rgw.control']
Oct 11 02:34:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:34:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:34:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:34:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:34:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:34:56 compute-0 ceph-mon[191930]: pgmap v1715: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 31 KiB/s rd, 0 B/s wr, 51 op/s
Oct 11 02:34:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:34:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:34:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:34:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:34:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:34:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:34:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:34:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:34:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:34:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:34:57 compute-0 nova_compute[356901]: 2025-10-11 02:34:57.953 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:34:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1716: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 35 KiB/s rd, 0 B/s wr, 58 op/s
Oct 11 02:34:59 compute-0 ceph-mon[191930]: pgmap v1716: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 35 KiB/s rd, 0 B/s wr, 58 op/s
Oct 11 02:34:59 compute-0 podman[157119]: time="2025-10-11T02:34:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:34:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:34:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:34:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:34:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9069 "" "Go-http-client/1.1"
Oct 11 02:34:59 compute-0 nova_compute[356901]: 2025-10-11 02:34:59.829 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1717: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:35:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:35:00 compute-0 nova_compute[356901]: 2025-10-11 02:35:00.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:35:00 compute-0 nova_compute[356901]: 2025-10-11 02:35:00.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:35:01 compute-0 openstack_network_exporter[374316]: ERROR   02:35:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:35:01 compute-0 openstack_network_exporter[374316]: ERROR   02:35:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:35:01 compute-0 openstack_network_exporter[374316]: ERROR   02:35:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:35:01 compute-0 openstack_network_exporter[374316]: ERROR   02:35:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:35:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:35:01 compute-0 openstack_network_exporter[374316]: ERROR   02:35:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:35:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:35:01 compute-0 ceph-mon[191930]: pgmap v1717: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:35:01 compute-0 nova_compute[356901]: 2025-10-11 02:35:01.893 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:35:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1718: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:35:02 compute-0 nova_compute[356901]: 2025-10-11 02:35:02.955 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:03 compute-0 podman[445800]: 2025-10-11 02:35:03.222335823 +0000 UTC m=+0.108060666 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.component=ubi9-minimal-container, config_id=edpm, name=ubi9-minimal, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.buildah.version=1.33.7, architecture=x86_64, version=9.6, distribution-scope=public, maintainer=Red Hat, Inc., vcs-type=git, url=https://catalog.redhat.com/en/search?searchType=containers, io.openshift.expose-services=, container_name=openstack_network_exporter, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, release=1755695350, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.tags=minimal rhel9, build-date=2025-08-20T13:12:41, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9.)
Oct 11 02:35:03 compute-0 podman[445801]: 2025-10-11 02:35:03.25682905 +0000 UTC m=+0.130353058 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:35:03 compute-0 podman[445799]: 2025-10-11 02:35:03.276255391 +0000 UTC m=+0.154613113 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, org.label-schema.schema-version=1.0)
Oct 11 02:35:03 compute-0 ceph-mon[191930]: pgmap v1718: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:35:03 compute-0 nova_compute[356901]: 2025-10-11 02:35:03.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:35:03 compute-0 nova_compute[356901]: 2025-10-11 02:35:03.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:35:03 compute-0 nova_compute[356901]: 2025-10-11 02:35:03.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:35:04 compute-0 nova_compute[356901]: 2025-10-11 02:35:04.133 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:35:04 compute-0 nova_compute[356901]: 2025-10-11 02:35:04.133 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:35:04 compute-0 nova_compute[356901]: 2025-10-11 02:35:04.134 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:35:04 compute-0 nova_compute[356901]: 2025-10-11 02:35:04.134 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:35:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1719: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 35 KiB/s rd, 0 B/s wr, 58 op/s
Oct 11 02:35:04 compute-0 nova_compute[356901]: 2025-10-11 02:35:04.832 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:05 compute-0 ceph-mon[191930]: pgmap v1719: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 35 KiB/s rd, 0 B/s wr, 58 op/s
Oct 11 02:35:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:35:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1720: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 16 KiB/s rd, 0 B/s wr, 25 op/s
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00025334537995702286 of space, bias 1.0, pg target 0.07600361398710685 quantized to 32 (current 32)
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:35:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:35:07 compute-0 nova_compute[356901]: 2025-10-11 02:35:07.069 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:35:07 compute-0 nova_compute[356901]: 2025-10-11 02:35:07.085 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:35:07 compute-0 nova_compute[356901]: 2025-10-11 02:35:07.086 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:35:07 compute-0 nova_compute[356901]: 2025-10-11 02:35:07.087 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:35:07 compute-0 nova_compute[356901]: 2025-10-11 02:35:07.087 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:35:07 compute-0 nova_compute[356901]: 2025-10-11 02:35:07.088 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:35:07 compute-0 nova_compute[356901]: 2025-10-11 02:35:07.088 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:35:07 compute-0 nova_compute[356901]: 2025-10-11 02:35:07.089 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:35:07 compute-0 nova_compute[356901]: 2025-10-11 02:35:07.117 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:35:07 compute-0 nova_compute[356901]: 2025-10-11 02:35:07.117 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:35:07 compute-0 nova_compute[356901]: 2025-10-11 02:35:07.118 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:35:07 compute-0 nova_compute[356901]: 2025-10-11 02:35:07.118 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:35:07 compute-0 nova_compute[356901]: 2025-10-11 02:35:07.119 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:35:07 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:35:07 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3464225348' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:35:07 compute-0 nova_compute[356901]: 2025-10-11 02:35:07.626 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.507s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:35:07 compute-0 ceph-mon[191930]: pgmap v1720: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 16 KiB/s rd, 0 B/s wr, 25 op/s
Oct 11 02:35:07 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3464225348' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:35:07 compute-0 nova_compute[356901]: 2025-10-11 02:35:07.753 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:35:07 compute-0 nova_compute[356901]: 2025-10-11 02:35:07.754 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:35:07 compute-0 nova_compute[356901]: 2025-10-11 02:35:07.755 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:35:07 compute-0 nova_compute[356901]: 2025-10-11 02:35:07.959 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:08 compute-0 nova_compute[356901]: 2025-10-11 02:35:08.235 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:35:08 compute-0 nova_compute[356901]: 2025-10-11 02:35:08.237 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3857MB free_disk=59.955204010009766GB free_vcpus=7 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:35:08 compute-0 nova_compute[356901]: 2025-10-11 02:35:08.237 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:35:08 compute-0 nova_compute[356901]: 2025-10-11 02:35:08.238 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:35:08 compute-0 nova_compute[356901]: 2025-10-11 02:35:08.315 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:35:08 compute-0 nova_compute[356901]: 2025-10-11 02:35:08.315 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 1 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:35:08 compute-0 nova_compute[356901]: 2025-10-11 02:35:08.316 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1024MB phys_disk=59GB used_disk=2GB total_vcpus=8 used_vcpus=1 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:35:08 compute-0 nova_compute[356901]: 2025-10-11 02:35:08.354 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:35:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1721: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 4.8 KiB/s rd, 0 B/s wr, 8 op/s
Oct 11 02:35:08 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:35:08 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1216411740' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:35:08 compute-0 nova_compute[356901]: 2025-10-11 02:35:08.874 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.521s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:35:08 compute-0 nova_compute[356901]: 2025-10-11 02:35:08.882 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:35:08 compute-0 nova_compute[356901]: 2025-10-11 02:35:08.903 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:35:08 compute-0 nova_compute[356901]: 2025-10-11 02:35:08.904 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:35:08 compute-0 nova_compute[356901]: 2025-10-11 02:35:08.905 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.667s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:35:09 compute-0 ceph-mon[191930]: pgmap v1721: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 4.8 KiB/s rd, 0 B/s wr, 8 op/s
Oct 11 02:35:09 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1216411740' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:35:09 compute-0 nova_compute[356901]: 2025-10-11 02:35:09.836 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:10 compute-0 podman[445905]: 2025-10-11 02:35:10.208943317 +0000 UTC m=+0.107925861 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2024-09-18T21:23:30, config_id=edpm, managed_by=edpm_ansible, container_name=kepler, vendor=Red Hat, Inc., architecture=x86_64, com.redhat.component=ubi9-container, maintainer=Red Hat, Inc., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, summary=Provides the latest release of Red Hat Universal Base Image 9., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=base rhel9, version=9.4, io.k8s.display-name=Red Hat Universal Base Image 9, release-0.7.12=, distribution-scope=public, name=ubi9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.buildah.version=1.29.0, io.openshift.expose-services=, release=1214.1726694543, vcs-type=git)
Oct 11 02:35:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1722: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 852 B/s rd, 0 B/s wr, 1 op/s
Oct 11 02:35:10 compute-0 ceph-mon[191930]: pgmap v1722: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail; 852 B/s rd, 0 B/s wr, 1 op/s
Oct 11 02:35:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:35:11 compute-0 sudo[445925]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:35:11 compute-0 sudo[445925]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:35:11 compute-0 sudo[445925]: pam_unix(sudo:session): session closed for user root
Oct 11 02:35:11 compute-0 sudo[445950]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:35:11 compute-0 sudo[445950]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:35:11 compute-0 sudo[445950]: pam_unix(sudo:session): session closed for user root
Oct 11 02:35:11 compute-0 sudo[445975]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:35:11 compute-0 sudo[445975]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:35:11 compute-0 sudo[445975]: pam_unix(sudo:session): session closed for user root
Oct 11 02:35:11 compute-0 sudo[446000]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:35:11 compute-0 sudo[446000]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:35:12 compute-0 sudo[446000]: pam_unix(sudo:session): session closed for user root
Oct 11 02:35:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:35:12 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:35:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:35:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:35:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:35:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:35:12 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 650248d1-a3b9-451a-bb7a-80c56dd56bfb does not exist
Oct 11 02:35:12 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 9ae8bb61-59e7-4061-b074-e740342e07db does not exist
Oct 11 02:35:12 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 4514d249-3e63-43ea-be4d-9ed3acc85e24 does not exist
Oct 11 02:35:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:35:12 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:35:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:35:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:35:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:35:12 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:35:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:35:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:35:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:35:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:35:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:35:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:35:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1723: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:12 compute-0 sudo[446054]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:35:12 compute-0 sudo[446054]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:35:12 compute-0 sudo[446054]: pam_unix(sudo:session): session closed for user root
Oct 11 02:35:12 compute-0 sudo[446079]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:35:12 compute-0 sudo[446079]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:35:12 compute-0 sudo[446079]: pam_unix(sudo:session): session closed for user root
Oct 11 02:35:12 compute-0 sudo[446104]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:35:12 compute-0 sudo[446104]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:35:12 compute-0 sudo[446104]: pam_unix(sudo:session): session closed for user root
Oct 11 02:35:12 compute-0 sudo[446129]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:35:12 compute-0 sudo[446129]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:35:12 compute-0 nova_compute[356901]: 2025-10-11 02:35:12.961 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:13 compute-0 podman[446192]: 2025-10-11 02:35:13.390478123 +0000 UTC m=+0.069403723 container create c68f018ff71a6c6c1f5f036a4b7128ed855785a1d7654366ac9d02d8126ff60f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_moser, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:35:13 compute-0 podman[446192]: 2025-10-11 02:35:13.364433013 +0000 UTC m=+0.043358673 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:35:13 compute-0 systemd[1]: Started libpod-conmon-c68f018ff71a6c6c1f5f036a4b7128ed855785a1d7654366ac9d02d8126ff60f.scope.
Oct 11 02:35:13 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:35:13 compute-0 ceph-mon[191930]: pgmap v1723: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:13 compute-0 podman[446192]: 2025-10-11 02:35:13.527987982 +0000 UTC m=+0.206913632 container init c68f018ff71a6c6c1f5f036a4b7128ed855785a1d7654366ac9d02d8126ff60f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_moser, ceph=True, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3)
Oct 11 02:35:13 compute-0 podman[446192]: 2025-10-11 02:35:13.5454532 +0000 UTC m=+0.224378820 container start c68f018ff71a6c6c1f5f036a4b7128ed855785a1d7654366ac9d02d8126ff60f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_moser, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:35:13 compute-0 podman[446192]: 2025-10-11 02:35:13.553374954 +0000 UTC m=+0.232300625 container attach c68f018ff71a6c6c1f5f036a4b7128ed855785a1d7654366ac9d02d8126ff60f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_moser, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:35:13 compute-0 strange_moser[446209]: 167 167
Oct 11 02:35:13 compute-0 systemd[1]: libpod-c68f018ff71a6c6c1f5f036a4b7128ed855785a1d7654366ac9d02d8126ff60f.scope: Deactivated successfully.
Oct 11 02:35:13 compute-0 podman[446192]: 2025-10-11 02:35:13.556836491 +0000 UTC m=+0.235762111 container died c68f018ff71a6c6c1f5f036a4b7128ed855785a1d7654366ac9d02d8126ff60f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_moser, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:35:13 compute-0 systemd[1]: var-lib-containers-storage-overlay-de471002a467e84b9f71ef13780b5295c2ba00e84ed16a56fa82a0776f6b00e2-merged.mount: Deactivated successfully.
Oct 11 02:35:13 compute-0 podman[446192]: 2025-10-11 02:35:13.620798743 +0000 UTC m=+0.299724313 container remove c68f018ff71a6c6c1f5f036a4b7128ed855785a1d7654366ac9d02d8126ff60f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_moser, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0)
Oct 11 02:35:13 compute-0 systemd[1]: libpod-conmon-c68f018ff71a6c6c1f5f036a4b7128ed855785a1d7654366ac9d02d8126ff60f.scope: Deactivated successfully.
Oct 11 02:35:13 compute-0 podman[446232]: 2025-10-11 02:35:13.839813275 +0000 UTC m=+0.057073032 container create b6b137375c4bfb686e365c6cd457fd4dfcecf7bd7ef28581788eea0dbad2bf00 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_dirac, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, OSD_FLAVOR=default, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0)
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.866 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.867 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.867 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.868 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.877 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.877 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.878 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.878 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.878 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.878 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:35:13.878303) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.885 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2856 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.886 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.886 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.886 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.886 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.886 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.886 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.887 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 23 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.887 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.887 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.887 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.887 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.887 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.888 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.888 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.888 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.888 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.889 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.888 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:35:13.886837) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.889 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.889 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:35:13.888040) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.889 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.889 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.889 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.889 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:35:13.889397) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.890 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.890 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.890 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.890 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.890 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.890 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.891 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:35:13.890711) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:13 compute-0 systemd[1]: Started libpod-conmon-b6b137375c4bfb686e365c6cd457fd4dfcecf7bd7ef28581788eea0dbad2bf00.scope.
Oct 11 02:35:13 compute-0 podman[446232]: 2025-10-11 02:35:13.813600398 +0000 UTC m=+0.030860175 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.911 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.911 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.911 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.912 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.912 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.912 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.912 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.912 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.913 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.913 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:35:13.913076) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:13 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:35:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/56e0c1c081a941c4682eeebdaa143c0a31526f63aa8e4f2c23ff0a726947d01d/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:35:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/56e0c1c081a941c4682eeebdaa143c0a31526f63aa8e4f2c23ff0a726947d01d/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:35:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/56e0c1c081a941c4682eeebdaa143c0a31526f63aa8e4f2c23ff0a726947d01d/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:35:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/56e0c1c081a941c4682eeebdaa143c0a31526f63aa8e4f2c23ff0a726947d01d/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:35:13 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/56e0c1c081a941c4682eeebdaa143c0a31526f63aa8e4f2c23ff0a726947d01d/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:35:13 compute-0 podman[446232]: 2025-10-11 02:35:13.957712136 +0000 UTC m=+0.174971923 container init b6b137375c4bfb686e365c6cd457fd4dfcecf7bd7ef28581788eea0dbad2bf00 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_dirac, CEPH_REF=reef, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.958 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.959 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.959 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.960 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.960 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.960 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.960 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.960 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.961 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.961 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.961 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.961 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.962 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.962 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.962 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.962 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.963 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.963 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:35:13.960944) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.963 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.963 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.964 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.964 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.964 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.965 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.965 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.965 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.965 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.965 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.965 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.966 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.966 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.967 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.967 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.967 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.967 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.967 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.968 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.968 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.968 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.968 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.969 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.969 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.969 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.969 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.970 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:35:13.963658) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.970 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.970 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:35:13.965762) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.970 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:35:13.968094) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.970 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.970 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.970 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.971 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.971 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.971 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.972 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.972 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:35:13.970400) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.972 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.972 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.972 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.973 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:35:13.972514) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:13 compute-0 podman[446232]: 2025-10-11 02:35:13.973372288 +0000 UTC m=+0.190632045 container start b6b137375c4bfb686e365c6cd457fd4dfcecf7bd7ef28581788eea0dbad2bf00 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_dirac, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:35:13 compute-0 podman[446232]: 2025-10-11 02:35:13.977665649 +0000 UTC m=+0.194925426 container attach b6b137375c4bfb686e365c6cd457fd4dfcecf7bd7ef28581788eea0dbad2bf00 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_dirac, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.997 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.997 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.998 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.998 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.998 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.998 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.998 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.998 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.999 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:13.999 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.000 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.001 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:35:13.998605) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.001 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.002 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.003 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.003 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.003 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.003 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:35:14.003307) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.003 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.004 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.004 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.004 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.004 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.004 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.004 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.005 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.005 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.005 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:35:14.005133) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.005 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.006 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.006 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.006 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.006 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.006 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.006 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 33 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.006 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:35:14.006504) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.007 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.007 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.007 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.007 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.007 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.007 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.008 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.008 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:35:14.007856) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.008 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.008 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.008 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.008 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.009 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.009 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.009 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:35:14.009171) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.009 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.010 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.010 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.010 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.010 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.010 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.010 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.011 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:35:14.010541) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.011 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.011 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.011 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.011 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.011 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.011 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.011 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.012 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:35:14.011836) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.012 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.012 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.013 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.013 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.013 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.013 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.013 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.013 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.014 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:35:14.013943) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.014 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.014 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.014 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.014 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.015 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.015 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.015 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.015 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 48390000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.015 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:35:14.015329) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.016 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.016 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.016 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.016 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.016 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.016 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.017 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2342 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.017 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:35:14.016730) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.017 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.018 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.018 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.018 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.018 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.018 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.018 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.83984375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.018 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:35:14.018589) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.019 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.019 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.019 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.020 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.020 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.020 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.020 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.020 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.020 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.020 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.020 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.020 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.020 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.020 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.021 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.021 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.021 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.021 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.021 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.021 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.021 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.021 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.021 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.021 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.021 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.021 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.021 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.021 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:35:14.022 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:35:14 compute-0 systemd[1]: virtsecretd.service: Deactivated successfully.
Oct 11 02:35:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1724: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:14 compute-0 nova_compute[356901]: 2025-10-11 02:35:14.838 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:15 compute-0 vigorous_dirac[446249]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:35:15 compute-0 vigorous_dirac[446249]: --> relative data size: 1.0
Oct 11 02:35:15 compute-0 vigorous_dirac[446249]: --> All data devices are unavailable
Oct 11 02:35:15 compute-0 systemd[1]: libpod-b6b137375c4bfb686e365c6cd457fd4dfcecf7bd7ef28581788eea0dbad2bf00.scope: Deactivated successfully.
Oct 11 02:35:15 compute-0 systemd[1]: libpod-b6b137375c4bfb686e365c6cd457fd4dfcecf7bd7ef28581788eea0dbad2bf00.scope: Consumed 1.130s CPU time.
Oct 11 02:35:15 compute-0 podman[446232]: 2025-10-11 02:35:15.180013143 +0000 UTC m=+1.397272940 container died b6b137375c4bfb686e365c6cd457fd4dfcecf7bd7ef28581788eea0dbad2bf00 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_dirac, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:35:15 compute-0 systemd[1]: var-lib-containers-storage-overlay-56e0c1c081a941c4682eeebdaa143c0a31526f63aa8e4f2c23ff0a726947d01d-merged.mount: Deactivated successfully.
Oct 11 02:35:15 compute-0 podman[446232]: 2025-10-11 02:35:15.27391885 +0000 UTC m=+1.491178617 container remove b6b137375c4bfb686e365c6cd457fd4dfcecf7bd7ef28581788eea0dbad2bf00 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_dirac, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 02:35:15 compute-0 systemd[1]: libpod-conmon-b6b137375c4bfb686e365c6cd457fd4dfcecf7bd7ef28581788eea0dbad2bf00.scope: Deactivated successfully.
Oct 11 02:35:15 compute-0 sudo[446129]: pam_unix(sudo:session): session closed for user root
Oct 11 02:35:15 compute-0 sudo[446290]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:35:15 compute-0 sudo[446290]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:35:15 compute-0 sudo[446290]: pam_unix(sudo:session): session closed for user root
Oct 11 02:35:15 compute-0 sudo[446315]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:35:15 compute-0 sudo[446315]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:35:15 compute-0 sudo[446315]: pam_unix(sudo:session): session closed for user root
Oct 11 02:35:15 compute-0 ceph-mon[191930]: pgmap v1724: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:15 compute-0 sudo[446340]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:35:15 compute-0 sudo[446340]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:35:15 compute-0 sudo[446340]: pam_unix(sudo:session): session closed for user root
Oct 11 02:35:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:35:15 compute-0 sudo[446365]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:35:15 compute-0 sudo[446365]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:35:16 compute-0 podman[446428]: 2025-10-11 02:35:16.426569905 +0000 UTC m=+0.081549566 container create e739913227d40ce702685d120074e6429e99be2f1b03ac7b6e7e02dd03e81cc0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_shtern, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:35:16 compute-0 podman[446428]: 2025-10-11 02:35:16.399406117 +0000 UTC m=+0.054385788 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:35:16 compute-0 systemd[1]: Started libpod-conmon-e739913227d40ce702685d120074e6429e99be2f1b03ac7b6e7e02dd03e81cc0.scope.
Oct 11 02:35:16 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:35:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1725: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:16 compute-0 podman[446428]: 2025-10-11 02:35:16.577768152 +0000 UTC m=+0.232747833 container init e739913227d40ce702685d120074e6429e99be2f1b03ac7b6e7e02dd03e81cc0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_shtern, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2)
Oct 11 02:35:16 compute-0 podman[446428]: 2025-10-11 02:35:16.587272634 +0000 UTC m=+0.242252295 container start e739913227d40ce702685d120074e6429e99be2f1b03ac7b6e7e02dd03e81cc0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_shtern, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3)
Oct 11 02:35:16 compute-0 podman[446428]: 2025-10-11 02:35:16.592192722 +0000 UTC m=+0.247172453 container attach e739913227d40ce702685d120074e6429e99be2f1b03ac7b6e7e02dd03e81cc0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_shtern, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:35:16 compute-0 festive_shtern[446444]: 167 167
Oct 11 02:35:16 compute-0 systemd[1]: libpod-e739913227d40ce702685d120074e6429e99be2f1b03ac7b6e7e02dd03e81cc0.scope: Deactivated successfully.
Oct 11 02:35:16 compute-0 podman[446428]: 2025-10-11 02:35:16.598137123 +0000 UTC m=+0.253116814 container died e739913227d40ce702685d120074e6429e99be2f1b03ac7b6e7e02dd03e81cc0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_shtern, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0)
Oct 11 02:35:16 compute-0 systemd[1]: var-lib-containers-storage-overlay-cc29b0357d4dbb4a211c6fd04356d4686e937939ad638bcd85357b4a420d49c7-merged.mount: Deactivated successfully.
Oct 11 02:35:16 compute-0 podman[446428]: 2025-10-11 02:35:16.655770027 +0000 UTC m=+0.310749678 container remove e739913227d40ce702685d120074e6429e99be2f1b03ac7b6e7e02dd03e81cc0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_shtern, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:35:16 compute-0 systemd[1]: libpod-conmon-e739913227d40ce702685d120074e6429e99be2f1b03ac7b6e7e02dd03e81cc0.scope: Deactivated successfully.
Oct 11 02:35:16 compute-0 podman[446467]: 2025-10-11 02:35:16.936957666 +0000 UTC m=+0.076653020 container create 25871a59d2a25365a1c466cd21db06f3c85c9aac8376abfc26898875757f1b43 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_maxwell, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=reef, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:35:16 compute-0 systemd[1]: Started libpod-conmon-25871a59d2a25365a1c466cd21db06f3c85c9aac8376abfc26898875757f1b43.scope.
Oct 11 02:35:17 compute-0 podman[446467]: 2025-10-11 02:35:16.917279804 +0000 UTC m=+0.056975188 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:35:17 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:35:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/64a6cbec25fa92bfc3bc8e70731f8ae4d775806472fd47d0aab1ab28c61faeaf/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:35:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/64a6cbec25fa92bfc3bc8e70731f8ae4d775806472fd47d0aab1ab28c61faeaf/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:35:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/64a6cbec25fa92bfc3bc8e70731f8ae4d775806472fd47d0aab1ab28c61faeaf/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:35:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/64a6cbec25fa92bfc3bc8e70731f8ae4d775806472fd47d0aab1ab28c61faeaf/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:35:17 compute-0 podman[446467]: 2025-10-11 02:35:17.064927622 +0000 UTC m=+0.204623016 container init 25871a59d2a25365a1c466cd21db06f3c85c9aac8376abfc26898875757f1b43 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_maxwell, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:35:17 compute-0 podman[446467]: 2025-10-11 02:35:17.098049991 +0000 UTC m=+0.237745345 container start 25871a59d2a25365a1c466cd21db06f3c85c9aac8376abfc26898875757f1b43 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_maxwell, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:35:17 compute-0 podman[446467]: 2025-10-11 02:35:17.10300577 +0000 UTC m=+0.242701214 container attach 25871a59d2a25365a1c466cd21db06f3c85c9aac8376abfc26898875757f1b43 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_maxwell, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:35:17 compute-0 systemd[1]: virtproxyd.service: Deactivated successfully.
Oct 11 02:35:17 compute-0 podman[446490]: 2025-10-11 02:35:17.399475645 +0000 UTC m=+0.103462892 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:35:17 compute-0 podman[446491]: 2025-10-11 02:35:17.404679225 +0000 UTC m=+0.107779454 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_managed=true, config_id=ovn_metadata_agent)
Oct 11 02:35:17 compute-0 podman[446531]: 2025-10-11 02:35:17.546436233 +0000 UTC m=+0.103693761 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_id=edpm, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS)
Oct 11 02:35:17 compute-0 podman[446530]: 2025-10-11 02:35:17.590905992 +0000 UTC m=+0.145379033 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, managed_by=edpm_ansible, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']})
Oct 11 02:35:17 compute-0 ceph-mon[191930]: pgmap v1725: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:17 compute-0 funny_maxwell[446484]: {
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:     "0": [
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:         {
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "devices": [
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "/dev/loop3"
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             ],
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "lv_name": "ceph_lv0",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "lv_size": "21470642176",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "name": "ceph_lv0",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "tags": {
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.cluster_name": "ceph",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.crush_device_class": "",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.encrypted": "0",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.osd_id": "0",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.type": "block",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.vdo": "0"
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             },
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "type": "block",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "vg_name": "ceph_vg0"
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:         }
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:     ],
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:     "1": [
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:         {
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "devices": [
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "/dev/loop4"
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             ],
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "lv_name": "ceph_lv1",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "lv_size": "21470642176",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "name": "ceph_lv1",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "tags": {
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.cluster_name": "ceph",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.crush_device_class": "",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.encrypted": "0",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.osd_id": "1",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.type": "block",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.vdo": "0"
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             },
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "type": "block",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "vg_name": "ceph_vg1"
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:         }
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:     ],
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:     "2": [
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:         {
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "devices": [
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "/dev/loop5"
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             ],
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "lv_name": "ceph_lv2",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "lv_size": "21470642176",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "name": "ceph_lv2",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "tags": {
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.cluster_name": "ceph",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.crush_device_class": "",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.encrypted": "0",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.osd_id": "2",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.type": "block",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:                 "ceph.vdo": "0"
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             },
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "type": "block",
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:             "vg_name": "ceph_vg2"
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:         }
Oct 11 02:35:17 compute-0 funny_maxwell[446484]:     ]
Oct 11 02:35:17 compute-0 funny_maxwell[446484]: }
Oct 11 02:35:17 compute-0 systemd[1]: libpod-25871a59d2a25365a1c466cd21db06f3c85c9aac8376abfc26898875757f1b43.scope: Deactivated successfully.
Oct 11 02:35:17 compute-0 nova_compute[356901]: 2025-10-11 02:35:17.964 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:17 compute-0 podman[446467]: 2025-10-11 02:35:17.975407806 +0000 UTC m=+1.115103200 container died 25871a59d2a25365a1c466cd21db06f3c85c9aac8376abfc26898875757f1b43 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_maxwell, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Oct 11 02:35:18 compute-0 systemd[1]: var-lib-containers-storage-overlay-64a6cbec25fa92bfc3bc8e70731f8ae4d775806472fd47d0aab1ab28c61faeaf-merged.mount: Deactivated successfully.
Oct 11 02:35:18 compute-0 podman[446467]: 2025-10-11 02:35:18.074544854 +0000 UTC m=+1.214240198 container remove 25871a59d2a25365a1c466cd21db06f3c85c9aac8376abfc26898875757f1b43 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_maxwell, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:35:18 compute-0 systemd[1]: libpod-conmon-25871a59d2a25365a1c466cd21db06f3c85c9aac8376abfc26898875757f1b43.scope: Deactivated successfully.
Oct 11 02:35:18 compute-0 sudo[446365]: pam_unix(sudo:session): session closed for user root
Oct 11 02:35:18 compute-0 sudo[446589]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:35:18 compute-0 sudo[446589]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:35:18 compute-0 sudo[446589]: pam_unix(sudo:session): session closed for user root
Oct 11 02:35:18 compute-0 sudo[446614]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:35:18 compute-0 sudo[446614]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:35:18 compute-0 sudo[446614]: pam_unix(sudo:session): session closed for user root
Oct 11 02:35:18 compute-0 sudo[446639]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:35:18 compute-0 sudo[446639]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:35:18 compute-0 sudo[446639]: pam_unix(sudo:session): session closed for user root
Oct 11 02:35:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1726: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:18 compute-0 sudo[446664]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:35:18 compute-0 sudo[446664]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:35:19 compute-0 podman[446727]: 2025-10-11 02:35:19.167860082 +0000 UTC m=+0.087441025 container create 3dae9fd07a0c5ecb750263088bb958f4c99c26787390ec9fb75b5645c72fc0f7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_chebyshev, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:35:19 compute-0 podman[446727]: 2025-10-11 02:35:19.135819958 +0000 UTC m=+0.055400951 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:35:19 compute-0 systemd[1]: Started libpod-conmon-3dae9fd07a0c5ecb750263088bb958f4c99c26787390ec9fb75b5645c72fc0f7.scope.
Oct 11 02:35:19 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:35:19 compute-0 podman[446727]: 2025-10-11 02:35:19.312512733 +0000 UTC m=+0.232093686 container init 3dae9fd07a0c5ecb750263088bb958f4c99c26787390ec9fb75b5645c72fc0f7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_chebyshev, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True)
Oct 11 02:35:19 compute-0 podman[446727]: 2025-10-11 02:35:19.325399667 +0000 UTC m=+0.244980630 container start 3dae9fd07a0c5ecb750263088bb958f4c99c26787390ec9fb75b5645c72fc0f7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_chebyshev, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS)
Oct 11 02:35:19 compute-0 podman[446727]: 2025-10-11 02:35:19.331599989 +0000 UTC m=+0.251181002 container attach 3dae9fd07a0c5ecb750263088bb958f4c99c26787390ec9fb75b5645c72fc0f7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_chebyshev, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0)
Oct 11 02:35:19 compute-0 vigorous_chebyshev[446743]: 167 167
Oct 11 02:35:19 compute-0 systemd[1]: libpod-3dae9fd07a0c5ecb750263088bb958f4c99c26787390ec9fb75b5645c72fc0f7.scope: Deactivated successfully.
Oct 11 02:35:19 compute-0 podman[446727]: 2025-10-11 02:35:19.335768005 +0000 UTC m=+0.255348968 container died 3dae9fd07a0c5ecb750263088bb958f4c99c26787390ec9fb75b5645c72fc0f7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_chebyshev, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0)
Oct 11 02:35:19 compute-0 systemd[1]: var-lib-containers-storage-overlay-9c4d2c19e010b93751a70bbae0c9a5c1daad0f8948632928296d3600df6b36c2-merged.mount: Deactivated successfully.
Oct 11 02:35:19 compute-0 podman[446727]: 2025-10-11 02:35:19.408724347 +0000 UTC m=+0.328305280 container remove 3dae9fd07a0c5ecb750263088bb958f4c99c26787390ec9fb75b5645c72fc0f7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_chebyshev, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0)
Oct 11 02:35:19 compute-0 systemd[1]: libpod-conmon-3dae9fd07a0c5ecb750263088bb958f4c99c26787390ec9fb75b5645c72fc0f7.scope: Deactivated successfully.
Oct 11 02:35:19 compute-0 podman[446768]: 2025-10-11 02:35:19.60975128 +0000 UTC m=+0.054366988 container create 51b2e5fc865eb8cfe0af56b6e6aae7bd57d3c974cac40e72678ac358d89a1a65 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_goldberg, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:35:19 compute-0 ceph-mon[191930]: pgmap v1726: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:19 compute-0 systemd[1]: Started libpod-conmon-51b2e5fc865eb8cfe0af56b6e6aae7bd57d3c974cac40e72678ac358d89a1a65.scope.
Oct 11 02:35:19 compute-0 podman[446768]: 2025-10-11 02:35:19.587687868 +0000 UTC m=+0.032303596 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:35:19 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:35:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/72bdf48714ad33b27f93c1fce57cffe40dee1903038b82e8fffef7a0dd8fe3e0/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:35:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/72bdf48714ad33b27f93c1fce57cffe40dee1903038b82e8fffef7a0dd8fe3e0/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:35:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/72bdf48714ad33b27f93c1fce57cffe40dee1903038b82e8fffef7a0dd8fe3e0/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:35:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/72bdf48714ad33b27f93c1fce57cffe40dee1903038b82e8fffef7a0dd8fe3e0/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:35:19 compute-0 podman[446768]: 2025-10-11 02:35:19.74351162 +0000 UTC m=+0.188127368 container init 51b2e5fc865eb8cfe0af56b6e6aae7bd57d3c974cac40e72678ac358d89a1a65 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_goldberg, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507)
Oct 11 02:35:19 compute-0 podman[446768]: 2025-10-11 02:35:19.758700532 +0000 UTC m=+0.203316240 container start 51b2e5fc865eb8cfe0af56b6e6aae7bd57d3c974cac40e72678ac358d89a1a65 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_goldberg, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:35:19 compute-0 podman[446768]: 2025-10-11 02:35:19.763785157 +0000 UTC m=+0.208400915 container attach 51b2e5fc865eb8cfe0af56b6e6aae7bd57d3c974cac40e72678ac358d89a1a65 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_goldberg, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:35:19 compute-0 nova_compute[356901]: 2025-10-11 02:35:19.842 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1727: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]: {
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:         "osd_id": 1,
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:         "type": "bluestore"
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:     },
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:         "osd_id": 2,
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:         "type": "bluestore"
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:     },
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:         "osd_id": 0,
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:         "type": "bluestore"
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]:     }
Oct 11 02:35:20 compute-0 suspicious_goldberg[446784]: }
Oct 11 02:35:20 compute-0 systemd[1]: libpod-51b2e5fc865eb8cfe0af56b6e6aae7bd57d3c974cac40e72678ac358d89a1a65.scope: Deactivated successfully.
Oct 11 02:35:20 compute-0 systemd[1]: libpod-51b2e5fc865eb8cfe0af56b6e6aae7bd57d3c974cac40e72678ac358d89a1a65.scope: Consumed 1.216s CPU time.
Oct 11 02:35:20 compute-0 podman[446768]: 2025-10-11 02:35:20.982086994 +0000 UTC m=+1.426702712 container died 51b2e5fc865eb8cfe0af56b6e6aae7bd57d3c974cac40e72678ac358d89a1a65 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_goldberg, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, ceph=True)
Oct 11 02:35:21 compute-0 systemd[1]: var-lib-containers-storage-overlay-72bdf48714ad33b27f93c1fce57cffe40dee1903038b82e8fffef7a0dd8fe3e0-merged.mount: Deactivated successfully.
Oct 11 02:35:21 compute-0 podman[446768]: 2025-10-11 02:35:21.07478106 +0000 UTC m=+1.519396778 container remove 51b2e5fc865eb8cfe0af56b6e6aae7bd57d3c974cac40e72678ac358d89a1a65 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_goldberg, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:35:21 compute-0 systemd[1]: libpod-conmon-51b2e5fc865eb8cfe0af56b6e6aae7bd57d3c974cac40e72678ac358d89a1a65.scope: Deactivated successfully.
Oct 11 02:35:21 compute-0 sudo[446664]: pam_unix(sudo:session): session closed for user root
Oct 11 02:35:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:35:21 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:35:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:35:21 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:35:21 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 25574262-edce-4a18-ab55-0c93d3ae3d40 does not exist
Oct 11 02:35:21 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 398173e4-55f7-4c05-afdf-412ac7fe0955 does not exist
Oct 11 02:35:21 compute-0 sudo[446829]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:35:21 compute-0 sudo[446829]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:35:21 compute-0 sudo[446829]: pam_unix(sudo:session): session closed for user root
Oct 11 02:35:21 compute-0 sudo[446854]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:35:21 compute-0 sudo[446854]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:35:21 compute-0 sudo[446854]: pam_unix(sudo:session): session closed for user root
Oct 11 02:35:21 compute-0 ceph-mon[191930]: pgmap v1727: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:21 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:35:21 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:35:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1728: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:22 compute-0 ceph-mon[191930]: pgmap v1728: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:22 compute-0 nova_compute[356901]: 2025-10-11 02:35:22.967 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:23 compute-0 podman[446879]: 2025-10-11 02:35:23.228917313 +0000 UTC m=+0.109174803 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=multipathd, container_name=multipathd, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3)
Oct 11 02:35:23 compute-0 podman[446880]: 2025-10-11 02:35:23.249878829 +0000 UTC m=+0.129907349 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 02:35:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1729: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:24 compute-0 nova_compute[356901]: 2025-10-11 02:35:24.847 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:25 compute-0 ceph-mon[191930]: pgmap v1729: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:35:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1730: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:35:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:35:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:35:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:35:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:35:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:35:27 compute-0 ceph-mon[191930]: pgmap v1730: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:35:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3189181857' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:35:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:35:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3189181857' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:35:27 compute-0 nova_compute[356901]: 2025-10-11 02:35:27.971 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1731: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3189181857' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:35:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3189181857' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:35:29 compute-0 ceph-mon[191930]: pgmap v1731: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:29 compute-0 podman[157119]: time="2025-10-11T02:35:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:35:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:35:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:35:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:35:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9056 "" "Go-http-client/1.1"
Oct 11 02:35:29 compute-0 nova_compute[356901]: 2025-10-11 02:35:29.851 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1732: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:35:31 compute-0 openstack_network_exporter[374316]: ERROR   02:35:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:35:31 compute-0 openstack_network_exporter[374316]: ERROR   02:35:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:35:31 compute-0 openstack_network_exporter[374316]: ERROR   02:35:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:35:31 compute-0 openstack_network_exporter[374316]: ERROR   02:35:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:35:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:35:31 compute-0 openstack_network_exporter[374316]: ERROR   02:35:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:35:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:35:31 compute-0 ceph-mon[191930]: pgmap v1732: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1733: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:32 compute-0 ceph-mon[191930]: pgmap v1733: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:32 compute-0 nova_compute[356901]: 2025-10-11 02:35:32.975 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:34 compute-0 podman[446919]: 2025-10-11 02:35:34.234114031 +0000 UTC m=+0.109012628 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:35:34 compute-0 podman[446917]: 2025-10-11 02:35:34.239983007 +0000 UTC m=+0.125148426 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_ipmi, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 02:35:34 compute-0 podman[446918]: 2025-10-11 02:35:34.242039983 +0000 UTC m=+0.123081669 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, version=9.6, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.tags=minimal rhel9, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, com.redhat.component=ubi9-minimal-container, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, name=ubi9-minimal, maintainer=Red Hat, Inc., architecture=x86_64, vendor=Red Hat, Inc., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., managed_by=edpm_ansible, build-date=2025-08-20T13:12:41, config_id=edpm, distribution-scope=public, container_name=openstack_network_exporter, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1755695350, vcs-type=git, io.openshift.expose-services=, url=https://catalog.redhat.com/en/search?searchType=containers, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']})
Oct 11 02:35:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1734: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:34 compute-0 nova_compute[356901]: 2025-10-11 02:35:34.856 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:35 compute-0 ceph-mon[191930]: pgmap v1734: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:35:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1735: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:37 compute-0 ceph-mon[191930]: pgmap v1735: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:37 compute-0 nova_compute[356901]: 2025-10-11 02:35:37.978 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1736: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:39 compute-0 ceph-mon[191930]: pgmap v1736: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:39 compute-0 nova_compute[356901]: 2025-10-11 02:35:39.859 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1737: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:40 compute-0 ceph-mon[191930]: pgmap v1737: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:35:41 compute-0 podman[446978]: 2025-10-11 02:35:41.239004206 +0000 UTC m=+0.122780615 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, summary=Provides the latest release of Red Hat Universal Base Image 9., io.buildah.version=1.29.0, io.openshift.tags=base rhel9, maintainer=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, version=9.4, io.openshift.expose-services=, architecture=x86_64, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., build-date=2024-09-18T21:23:30, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, name=ubi9, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, managed_by=edpm_ansible, release=1214.1726694543, com.redhat.component=ubi9-container, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=kepler, release-0.7.12=)
Oct 11 02:35:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1738: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:42 compute-0 nova_compute[356901]: 2025-10-11 02:35:42.981 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:43 compute-0 sshd-session[446998]: Connection closed by 115.127.46.194 port 46450
Oct 11 02:35:43 compute-0 ceph-mon[191930]: pgmap v1738: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1739: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:44 compute-0 nova_compute[356901]: 2025-10-11 02:35:44.863 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:45 compute-0 ceph-mon[191930]: pgmap v1739: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:35:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1740: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:47 compute-0 sshd-session[444619]: Received disconnect from 38.102.83.70 port 54730:11: disconnected by user
Oct 11 02:35:47 compute-0 sshd-session[444619]: Disconnected from user zuul 38.102.83.70 port 54730
Oct 11 02:35:47 compute-0 sshd-session[444616]: pam_unix(sshd:session): session closed for user zuul
Oct 11 02:35:47 compute-0 systemd[1]: session-64.scope: Deactivated successfully.
Oct 11 02:35:47 compute-0 systemd[1]: session-64.scope: Consumed 5.626s CPU time.
Oct 11 02:35:47 compute-0 systemd-logind[804]: Session 64 logged out. Waiting for processes to exit.
Oct 11 02:35:47 compute-0 systemd-logind[804]: Removed session 64.
Oct 11 02:35:47 compute-0 podman[447000]: 2025-10-11 02:35:47.593431971 +0000 UTC m=+0.109862800 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, container_name=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:35:47 compute-0 podman[446999]: 2025-10-11 02:35:47.6042847 +0000 UTC m=+0.129696764 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:35:47 compute-0 ceph-mon[191930]: pgmap v1740: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:47 compute-0 podman[447040]: 2025-10-11 02:35:47.752995637 +0000 UTC m=+0.120116611 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, io.buildah.version=1.41.4, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=edpm, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_compute)
Oct 11 02:35:47 compute-0 podman[447041]: 2025-10-11 02:35:47.819449079 +0000 UTC m=+0.160104454 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_controller, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:35:47 compute-0 nova_compute[356901]: 2025-10-11 02:35:47.984 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1741: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:49 compute-0 ceph-mon[191930]: pgmap v1741: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:49 compute-0 nova_compute[356901]: 2025-10-11 02:35:49.865 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1742: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:35:51 compute-0 ceph-mon[191930]: pgmap v1742: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1743: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:52 compute-0 ceph-mon[191930]: pgmap v1743: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:52 compute-0 nova_compute[356901]: 2025-10-11 02:35:52.989 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:54 compute-0 podman[447084]: 2025-10-11 02:35:54.220693523 +0000 UTC m=+0.105257760 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, container_name=multipathd, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, config_id=multipathd)
Oct 11 02:35:54 compute-0 podman[447085]: 2025-10-11 02:35:54.23521827 +0000 UTC m=+0.113570178 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_id=iscsid, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:35:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1744: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:35:54.863 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:35:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:35:54.863 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:35:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:35:54.864 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:35:54 compute-0 nova_compute[356901]: 2025-10-11 02:35:54.871 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:55 compute-0 ceph-mon[191930]: pgmap v1744: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:35:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1745: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:35:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:35:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:35:56
Oct 11 02:35:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:35:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:35:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.meta', 'cephfs.cephfs.meta', 'default.rgw.log', 'backups', 'volumes', '.mgr', 'images', 'default.rgw.control', 'vms', '.rgw.root', 'cephfs.cephfs.data']
Oct 11 02:35:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:35:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:35:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:35:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:35:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:35:56 compute-0 nova_compute[356901]: 2025-10-11 02:35:56.714 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:35:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:35:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:35:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:35:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:35:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:35:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:35:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:35:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:35:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:35:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:35:57 compute-0 ceph-mon[191930]: pgmap v1745: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:57 compute-0 nova_compute[356901]: 2025-10-11 02:35:57.992 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:35:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1746: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:59 compute-0 ceph-mon[191930]: pgmap v1746: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:35:59 compute-0 podman[157119]: time="2025-10-11T02:35:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:35:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:35:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:35:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:35:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9056 "" "Go-http-client/1.1"
Oct 11 02:35:59 compute-0 nova_compute[356901]: 2025-10-11 02:35:59.873 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1747: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:36:00 compute-0 nova_compute[356901]: 2025-10-11 02:36:00.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:36:00 compute-0 nova_compute[356901]: 2025-10-11 02:36:00.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:36:01 compute-0 openstack_network_exporter[374316]: ERROR   02:36:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:36:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:36:01 compute-0 openstack_network_exporter[374316]: ERROR   02:36:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:36:01 compute-0 openstack_network_exporter[374316]: ERROR   02:36:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:36:01 compute-0 openstack_network_exporter[374316]: ERROR   02:36:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:36:01 compute-0 openstack_network_exporter[374316]: ERROR   02:36:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:36:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:36:01 compute-0 ceph-mon[191930]: pgmap v1747: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1748: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:02 compute-0 nova_compute[356901]: 2025-10-11 02:36:02.995 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:03 compute-0 ceph-mon[191930]: pgmap v1748: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:03 compute-0 nova_compute[356901]: 2025-10-11 02:36:03.893 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:36:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1749: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:04 compute-0 ceph-mon[191930]: pgmap v1749: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:04 compute-0 nova_compute[356901]: 2025-10-11 02:36:04.875 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:04 compute-0 nova_compute[356901]: 2025-10-11 02:36:04.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:36:04 compute-0 nova_compute[356901]: 2025-10-11 02:36:04.895 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:36:04 compute-0 nova_compute[356901]: 2025-10-11 02:36:04.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:36:05 compute-0 nova_compute[356901]: 2025-10-11 02:36:05.140 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:36:05 compute-0 nova_compute[356901]: 2025-10-11 02:36:05.140 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:36:05 compute-0 nova_compute[356901]: 2025-10-11 02:36:05.140 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:36:05 compute-0 nova_compute[356901]: 2025-10-11 02:36:05.140 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:36:05 compute-0 podman[447124]: 2025-10-11 02:36:05.199315654 +0000 UTC m=+0.085874002 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., version=9.6, release=1755695350, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., config_id=edpm, io.openshift.expose-services=, com.redhat.component=ubi9-minimal-container, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, build-date=2025-08-20T13:12:41, distribution-scope=public, url=https://catalog.redhat.com/en/search?searchType=containers, io.buildah.version=1.33.7, name=ubi9-minimal, architecture=x86_64, vendor=Red Hat, Inc., io.openshift.tags=minimal rhel9, vcs-type=git, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, container_name=openstack_network_exporter, managed_by=edpm_ansible)
Oct 11 02:36:05 compute-0 podman[447125]: 2025-10-11 02:36:05.21229484 +0000 UTC m=+0.083407210 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:36:05 compute-0 podman[447123]: 2025-10-11 02:36:05.225407409 +0000 UTC m=+0.115953848 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:36:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:36:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1750: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00025334537995702286 of space, bias 1.0, pg target 0.07600361398710685 quantized to 32 (current 32)
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:36:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:36:07 compute-0 nova_compute[356901]: 2025-10-11 02:36:07.107 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:36:07 compute-0 nova_compute[356901]: 2025-10-11 02:36:07.124 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:36:07 compute-0 nova_compute[356901]: 2025-10-11 02:36:07.124 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:36:07 compute-0 nova_compute[356901]: 2025-10-11 02:36:07.125 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:36:07 compute-0 nova_compute[356901]: 2025-10-11 02:36:07.125 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:36:07 compute-0 nova_compute[356901]: 2025-10-11 02:36:07.126 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:36:07 compute-0 nova_compute[356901]: 2025-10-11 02:36:07.126 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:36:07 compute-0 nova_compute[356901]: 2025-10-11 02:36:07.155 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:36:07 compute-0 nova_compute[356901]: 2025-10-11 02:36:07.156 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:36:07 compute-0 nova_compute[356901]: 2025-10-11 02:36:07.156 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:36:07 compute-0 nova_compute[356901]: 2025-10-11 02:36:07.157 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:36:07 compute-0 nova_compute[356901]: 2025-10-11 02:36:07.157 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:36:07 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:36:07 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/294272532' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:36:07 compute-0 nova_compute[356901]: 2025-10-11 02:36:07.599 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.442s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:36:07 compute-0 ceph-mon[191930]: pgmap v1750: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:07 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/294272532' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:36:07 compute-0 nova_compute[356901]: 2025-10-11 02:36:07.682 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:36:07 compute-0 nova_compute[356901]: 2025-10-11 02:36:07.682 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:36:07 compute-0 nova_compute[356901]: 2025-10-11 02:36:07.683 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:36:07 compute-0 nova_compute[356901]: 2025-10-11 02:36:07.997 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:08 compute-0 nova_compute[356901]: 2025-10-11 02:36:08.075 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:36:08 compute-0 nova_compute[356901]: 2025-10-11 02:36:08.076 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3862MB free_disk=59.955204010009766GB free_vcpus=7 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:36:08 compute-0 nova_compute[356901]: 2025-10-11 02:36:08.077 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:36:08 compute-0 nova_compute[356901]: 2025-10-11 02:36:08.077 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:36:08 compute-0 nova_compute[356901]: 2025-10-11 02:36:08.215 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:36:08 compute-0 nova_compute[356901]: 2025-10-11 02:36:08.216 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 1 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:36:08 compute-0 nova_compute[356901]: 2025-10-11 02:36:08.216 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1024MB phys_disk=59GB used_disk=2GB total_vcpus=8 used_vcpus=1 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:36:08 compute-0 nova_compute[356901]: 2025-10-11 02:36:08.328 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:36:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1751: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:08 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:36:08 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1681378626' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:36:08 compute-0 nova_compute[356901]: 2025-10-11 02:36:08.778 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.451s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:36:08 compute-0 nova_compute[356901]: 2025-10-11 02:36:08.787 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:36:08 compute-0 nova_compute[356901]: 2025-10-11 02:36:08.809 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:36:08 compute-0 nova_compute[356901]: 2025-10-11 02:36:08.813 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:36:08 compute-0 nova_compute[356901]: 2025-10-11 02:36:08.814 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.737s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:36:09 compute-0 ceph-mon[191930]: pgmap v1751: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:09 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1681378626' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:36:09 compute-0 nova_compute[356901]: 2025-10-11 02:36:09.880 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:09 compute-0 nova_compute[356901]: 2025-10-11 02:36:09.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:36:09 compute-0 nova_compute[356901]: 2025-10-11 02:36:09.923 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:36:09 compute-0 nova_compute[356901]: 2025-10-11 02:36:09.924 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_incomplete_migrations run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:36:09 compute-0 nova_compute[356901]: 2025-10-11 02:36:09.924 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances with incomplete migration  _cleanup_incomplete_migrations /usr/lib/python3.9/site-packages/nova/compute/manager.py:11183
Oct 11 02:36:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1752: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:36:11 compute-0 ceph-mon[191930]: pgmap v1752: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:12 compute-0 podman[447226]: 2025-10-11 02:36:12.197258208 +0000 UTC m=+0.095949791 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, version=9.4, summary=Provides the latest release of Red Hat Universal Base Image 9., io.k8s.display-name=Red Hat Universal Base Image 9, vcs-type=git, release=1214.1726694543, distribution-scope=public, build-date=2024-09-18T21:23:30, container_name=kepler, release-0.7.12=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.buildah.version=1.29.0, io.openshift.expose-services=, name=ubi9, maintainer=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, com.redhat.component=ubi9-container, io.openshift.tags=base rhel9, vendor=Red Hat, Inc., architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:36:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1753: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:13 compute-0 nova_compute[356901]: 2025-10-11 02:36:13.000 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:13 compute-0 ceph-mon[191930]: pgmap v1753: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1754: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:14 compute-0 nova_compute[356901]: 2025-10-11 02:36:14.884 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:15 compute-0 ceph-mon[191930]: pgmap v1754: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:36:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1755: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:16 compute-0 ceph-mon[191930]: pgmap v1755: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:17 compute-0 nova_compute[356901]: 2025-10-11 02:36:17.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_expired_console_auth_tokens run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:36:18 compute-0 nova_compute[356901]: 2025-10-11 02:36:18.003 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:18 compute-0 podman[447245]: 2025-10-11 02:36:18.225711007 +0000 UTC m=+0.108243109 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:36:18 compute-0 podman[447248]: 2025-10-11 02:36:18.272619098 +0000 UTC m=+0.136507261 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']})
Oct 11 02:36:18 compute-0 podman[447247]: 2025-10-11 02:36:18.277480001 +0000 UTC m=+0.147993919 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true)
Oct 11 02:36:18 compute-0 podman[447246]: 2025-10-11 02:36:18.303410513 +0000 UTC m=+0.179751276 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, container_name=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, tcib_managed=true, config_id=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:36:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1756: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:19 compute-0 ceph-mon[191930]: pgmap v1756: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:19 compute-0 nova_compute[356901]: 2025-10-11 02:36:19.886 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:20 compute-0 nova_compute[356901]: 2025-10-11 02:36:20.325 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_running_deleted_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:36:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1757: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:36:21 compute-0 sudo[447328]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:36:21 compute-0 sudo[447328]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:21 compute-0 sudo[447328]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:21 compute-0 ceph-mon[191930]: pgmap v1757: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:21 compute-0 sudo[447353]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:36:21 compute-0 sudo[447353]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:21 compute-0 sudo[447353]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:21 compute-0 sudo[447378]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:36:21 compute-0 sudo[447378]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:21 compute-0 sudo[447378]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:22 compute-0 sudo[447403]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 check-host
Oct 11 02:36:22 compute-0 sudo[447403]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:22 compute-0 sudo[447403]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:36:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:36:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:36:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:36:22 compute-0 sudo[447447]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:36:22 compute-0 sudo[447447]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:22 compute-0 sudo[447447]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:22 compute-0 sudo[447472]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:36:22 compute-0 sudo[447472]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:22 compute-0 sudo[447472]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:22 compute-0 sudo[447497]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:36:22 compute-0 sudo[447497]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:22 compute-0 sudo[447497]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1758: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:22 compute-0 sudo[447522]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:36:22 compute-0 sudo[447522]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:23 compute-0 nova_compute[356901]: 2025-10-11 02:36:23.006 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:36:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:36:23 compute-0 ceph-mon[191930]: pgmap v1758: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:23 compute-0 sudo[447522]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:36:23 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:36:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:36:23 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:36:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:36:23 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:36:23 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 5adf23e8-32d7-486e-b6d7-3d32a640693f does not exist
Oct 11 02:36:23 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 5a5ebadc-1780-4822-95e8-fbb3fb601721 does not exist
Oct 11 02:36:23 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 7022a026-f59d-4f98-9902-a7ba37d747c8 does not exist
Oct 11 02:36:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:36:23 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:36:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:36:23 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:36:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:36:23 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:36:23 compute-0 sudo[447577]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:36:23 compute-0 sudo[447577]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:23 compute-0 sudo[447577]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:23 compute-0 sudo[447602]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:36:23 compute-0 sudo[447602]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:23 compute-0 sudo[447602]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:23 compute-0 sudo[447627]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:36:23 compute-0 sudo[447627]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:23 compute-0 sudo[447627]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:23 compute-0 sudo[447652]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:36:23 compute-0 sudo[447652]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:36:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:36:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:36:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:36:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:36:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:36:24 compute-0 podman[447716]: 2025-10-11 02:36:24.493692913 +0000 UTC m=+0.067345424 container create 712d725de92e8a3b79aa8c8beb9048a1c74336b3177ac6f12e5d6cae6407633c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stoic_ramanujan, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:36:24 compute-0 podman[447716]: 2025-10-11 02:36:24.459848269 +0000 UTC m=+0.033500770 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:36:24 compute-0 systemd[1]: Started libpod-conmon-712d725de92e8a3b79aa8c8beb9048a1c74336b3177ac6f12e5d6cae6407633c.scope.
Oct 11 02:36:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1759: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:24 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:36:24 compute-0 podman[447716]: 2025-10-11 02:36:24.631285127 +0000 UTC m=+0.204937638 container init 712d725de92e8a3b79aa8c8beb9048a1c74336b3177ac6f12e5d6cae6407633c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stoic_ramanujan, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 02:36:24 compute-0 podman[447716]: 2025-10-11 02:36:24.643916368 +0000 UTC m=+0.217568869 container start 712d725de92e8a3b79aa8c8beb9048a1c74336b3177ac6f12e5d6cae6407633c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stoic_ramanujan, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507)
Oct 11 02:36:24 compute-0 podman[447716]: 2025-10-11 02:36:24.648931243 +0000 UTC m=+0.222583754 container attach 712d725de92e8a3b79aa8c8beb9048a1c74336b3177ac6f12e5d6cae6407633c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stoic_ramanujan, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:36:24 compute-0 stoic_ramanujan[447737]: 167 167
Oct 11 02:36:24 compute-0 systemd[1]: libpod-712d725de92e8a3b79aa8c8beb9048a1c74336b3177ac6f12e5d6cae6407633c.scope: Deactivated successfully.
Oct 11 02:36:24 compute-0 podman[447716]: 2025-10-11 02:36:24.659584759 +0000 UTC m=+0.233237240 container died 712d725de92e8a3b79aa8c8beb9048a1c74336b3177ac6f12e5d6cae6407633c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stoic_ramanujan, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef)
Oct 11 02:36:24 compute-0 systemd[1]: var-lib-containers-storage-overlay-26edbaee4eabfb3640e6c853c60846382aa97a94f4b4f6e60e611f5118859f97-merged.mount: Deactivated successfully.
Oct 11 02:36:24 compute-0 podman[447732]: 2025-10-11 02:36:24.717053566 +0000 UTC m=+0.135115202 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, config_id=iscsid, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=iscsid, io.buildah.version=1.41.3, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']})
Oct 11 02:36:24 compute-0 podman[447716]: 2025-10-11 02:36:24.745743484 +0000 UTC m=+0.319396015 container remove 712d725de92e8a3b79aa8c8beb9048a1c74336b3177ac6f12e5d6cae6407633c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=stoic_ramanujan, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS)
Oct 11 02:36:24 compute-0 systemd[1]: libpod-conmon-712d725de92e8a3b79aa8c8beb9048a1c74336b3177ac6f12e5d6cae6407633c.scope: Deactivated successfully.
Oct 11 02:36:24 compute-0 podman[447729]: 2025-10-11 02:36:24.764357313 +0000 UTC m=+0.191504566 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, config_id=multipathd, container_name=multipathd, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:36:24 compute-0 nova_compute[356901]: 2025-10-11 02:36:24.889 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:24 compute-0 podman[447792]: 2025-10-11 02:36:24.963693459 +0000 UTC m=+0.067173093 container create a032f67f0fdf78830e2b301affb7e7355e1d5fd969e85b77c5950e703d7ded82 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_hodgkin, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, io.buildah.version=1.39.3)
Oct 11 02:36:25 compute-0 podman[447792]: 2025-10-11 02:36:24.932420358 +0000 UTC m=+0.035900092 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:36:25 compute-0 systemd[1]: Started libpod-conmon-a032f67f0fdf78830e2b301affb7e7355e1d5fd969e85b77c5950e703d7ded82.scope.
Oct 11 02:36:25 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:36:25 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ea780d7e06014c32d3a73db4d45594a4b4950136eb87182ee01ed438da966637/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:36:25 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ea780d7e06014c32d3a73db4d45594a4b4950136eb87182ee01ed438da966637/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:36:25 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ea780d7e06014c32d3a73db4d45594a4b4950136eb87182ee01ed438da966637/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:36:25 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ea780d7e06014c32d3a73db4d45594a4b4950136eb87182ee01ed438da966637/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:36:25 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ea780d7e06014c32d3a73db4d45594a4b4950136eb87182ee01ed438da966637/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:36:25 compute-0 podman[447792]: 2025-10-11 02:36:25.126120251 +0000 UTC m=+0.229599905 container init a032f67f0fdf78830e2b301affb7e7355e1d5fd969e85b77c5950e703d7ded82 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_hodgkin, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:36:25 compute-0 podman[447792]: 2025-10-11 02:36:25.148552419 +0000 UTC m=+0.252032083 container start a032f67f0fdf78830e2b301affb7e7355e1d5fd969e85b77c5950e703d7ded82 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_hodgkin, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 02:36:25 compute-0 podman[447792]: 2025-10-11 02:36:25.155910473 +0000 UTC m=+0.259390107 container attach a032f67f0fdf78830e2b301affb7e7355e1d5fd969e85b77c5950e703d7ded82 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_hodgkin, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True)
Oct 11 02:36:25 compute-0 ceph-mon[191930]: pgmap v1759: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:36:26 compute-0 lucid_hodgkin[447808]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:36:26 compute-0 lucid_hodgkin[447808]: --> relative data size: 1.0
Oct 11 02:36:26 compute-0 lucid_hodgkin[447808]: --> All data devices are unavailable
Oct 11 02:36:26 compute-0 systemd[1]: libpod-a032f67f0fdf78830e2b301affb7e7355e1d5fd969e85b77c5950e703d7ded82.scope: Deactivated successfully.
Oct 11 02:36:26 compute-0 podman[447792]: 2025-10-11 02:36:26.389058625 +0000 UTC m=+1.492538279 container died a032f67f0fdf78830e2b301affb7e7355e1d5fd969e85b77c5950e703d7ded82 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_hodgkin, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:36:26 compute-0 systemd[1]: libpod-a032f67f0fdf78830e2b301affb7e7355e1d5fd969e85b77c5950e703d7ded82.scope: Consumed 1.187s CPU time.
Oct 11 02:36:26 compute-0 systemd[1]: var-lib-containers-storage-overlay-ea780d7e06014c32d3a73db4d45594a4b4950136eb87182ee01ed438da966637-merged.mount: Deactivated successfully.
Oct 11 02:36:26 compute-0 podman[447792]: 2025-10-11 02:36:26.480072192 +0000 UTC m=+1.583551866 container remove a032f67f0fdf78830e2b301affb7e7355e1d5fd969e85b77c5950e703d7ded82 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_hodgkin, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3)
Oct 11 02:36:26 compute-0 systemd[1]: libpod-conmon-a032f67f0fdf78830e2b301affb7e7355e1d5fd969e85b77c5950e703d7ded82.scope: Deactivated successfully.
Oct 11 02:36:26 compute-0 sudo[447652]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:36:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:36:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1760: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:36:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:36:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:36:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:36:26 compute-0 sudo[447850]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:36:26 compute-0 sudo[447850]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:26 compute-0 sudo[447850]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:26 compute-0 sudo[447875]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:36:26 compute-0 sudo[447875]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:26 compute-0 sudo[447875]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:26 compute-0 sudo[447900]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:36:26 compute-0 sudo[447900]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:26 compute-0 sudo[447900]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:26 compute-0 nova_compute[356901]: 2025-10-11 02:36:26.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._run_pending_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:36:26 compute-0 nova_compute[356901]: 2025-10-11 02:36:26.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11145
Oct 11 02:36:26 compute-0 nova_compute[356901]: 2025-10-11 02:36:26.920 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] There are 0 instances to clean _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11154
Oct 11 02:36:26 compute-0 sudo[447925]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:36:26 compute-0 sudo[447925]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:27 compute-0 podman[447987]: 2025-10-11 02:36:27.569651543 +0000 UTC m=+0.107734673 container create 8147beceac46440e4fd360e8a2f7c8d9ce683a913183a9e6edbb203d42d5de9b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_bell, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef)
Oct 11 02:36:27 compute-0 podman[447987]: 2025-10-11 02:36:27.504390326 +0000 UTC m=+0.042473506 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:36:27 compute-0 systemd[1]: Started libpod-conmon-8147beceac46440e4fd360e8a2f7c8d9ce683a913183a9e6edbb203d42d5de9b.scope.
Oct 11 02:36:27 compute-0 ceph-mon[191930]: pgmap v1760: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:27 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:36:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:36:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1611063380' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:36:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:36:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1611063380' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:36:27 compute-0 podman[447987]: 2025-10-11 02:36:27.706535058 +0000 UTC m=+0.244618218 container init 8147beceac46440e4fd360e8a2f7c8d9ce683a913183a9e6edbb203d42d5de9b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_bell, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:36:27 compute-0 podman[447987]: 2025-10-11 02:36:27.71685517 +0000 UTC m=+0.254938310 container start 8147beceac46440e4fd360e8a2f7c8d9ce683a913183a9e6edbb203d42d5de9b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_bell, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef)
Oct 11 02:36:27 compute-0 podman[447987]: 2025-10-11 02:36:27.721986106 +0000 UTC m=+0.260069256 container attach 8147beceac46440e4fd360e8a2f7c8d9ce683a913183a9e6edbb203d42d5de9b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_bell, org.label-schema.build-date=20250507, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:36:27 compute-0 strange_bell[448003]: 167 167
Oct 11 02:36:27 compute-0 systemd[1]: libpod-8147beceac46440e4fd360e8a2f7c8d9ce683a913183a9e6edbb203d42d5de9b.scope: Deactivated successfully.
Oct 11 02:36:27 compute-0 podman[447987]: 2025-10-11 02:36:27.72696865 +0000 UTC m=+0.265051820 container died 8147beceac46440e4fd360e8a2f7c8d9ce683a913183a9e6edbb203d42d5de9b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_bell, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:36:27 compute-0 systemd[1]: var-lib-containers-storage-overlay-f8edbd76b741c15a7b9a7896e68639923d0920b6b6ecf46e92759df7cc1e6307-merged.mount: Deactivated successfully.
Oct 11 02:36:27 compute-0 podman[447987]: 2025-10-11 02:36:27.785892355 +0000 UTC m=+0.323975495 container remove 8147beceac46440e4fd360e8a2f7c8d9ce683a913183a9e6edbb203d42d5de9b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_bell, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:36:27 compute-0 systemd[1]: libpod-conmon-8147beceac46440e4fd360e8a2f7c8d9ce683a913183a9e6edbb203d42d5de9b.scope: Deactivated successfully.
Oct 11 02:36:28 compute-0 nova_compute[356901]: 2025-10-11 02:36:28.010 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:28 compute-0 podman[448026]: 2025-10-11 02:36:28.02879504 +0000 UTC m=+0.092732560 container create 5533f478bff1d41c8972c963cc407c95ceeb183e4bb85cc2cb954fcfe386e8bb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_cohen, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True)
Oct 11 02:36:28 compute-0 podman[448026]: 2025-10-11 02:36:27.969898805 +0000 UTC m=+0.033836355 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:36:28 compute-0 systemd[1]: Started libpod-conmon-5533f478bff1d41c8972c963cc407c95ceeb183e4bb85cc2cb954fcfe386e8bb.scope.
Oct 11 02:36:28 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:36:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/52f71dd2dad46480032904d110307cd6f1d4edb96f1aa8fafb062ca328728422/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:36:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/52f71dd2dad46480032904d110307cd6f1d4edb96f1aa8fafb062ca328728422/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:36:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/52f71dd2dad46480032904d110307cd6f1d4edb96f1aa8fafb062ca328728422/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:36:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/52f71dd2dad46480032904d110307cd6f1d4edb96f1aa8fafb062ca328728422/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:36:28 compute-0 podman[448026]: 2025-10-11 02:36:28.152544797 +0000 UTC m=+0.216482337 container init 5533f478bff1d41c8972c963cc407c95ceeb183e4bb85cc2cb954fcfe386e8bb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_cohen, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:36:28 compute-0 podman[448026]: 2025-10-11 02:36:28.162625196 +0000 UTC m=+0.226562716 container start 5533f478bff1d41c8972c963cc407c95ceeb183e4bb85cc2cb954fcfe386e8bb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_cohen, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2)
Oct 11 02:36:28 compute-0 podman[448026]: 2025-10-11 02:36:28.166441965 +0000 UTC m=+0.230379485 container attach 5533f478bff1d41c8972c963cc407c95ceeb183e4bb85cc2cb954fcfe386e8bb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_cohen, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 02:36:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1761: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1611063380' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:36:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1611063380' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:36:29 compute-0 fervent_cohen[448042]: {
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:     "0": [
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:         {
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "devices": [
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "/dev/loop3"
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             ],
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "lv_name": "ceph_lv0",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "lv_size": "21470642176",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "name": "ceph_lv0",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "tags": {
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.cluster_name": "ceph",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.crush_device_class": "",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.encrypted": "0",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.osd_id": "0",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.type": "block",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.vdo": "0"
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             },
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "type": "block",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "vg_name": "ceph_vg0"
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:         }
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:     ],
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:     "1": [
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:         {
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "devices": [
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "/dev/loop4"
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             ],
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "lv_name": "ceph_lv1",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "lv_size": "21470642176",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "name": "ceph_lv1",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "tags": {
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.cluster_name": "ceph",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.crush_device_class": "",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.encrypted": "0",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.osd_id": "1",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.type": "block",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.vdo": "0"
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             },
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "type": "block",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "vg_name": "ceph_vg1"
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:         }
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:     ],
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:     "2": [
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:         {
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "devices": [
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "/dev/loop5"
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             ],
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "lv_name": "ceph_lv2",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "lv_size": "21470642176",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "name": "ceph_lv2",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "tags": {
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.cluster_name": "ceph",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.crush_device_class": "",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.encrypted": "0",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.osd_id": "2",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.type": "block",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:                 "ceph.vdo": "0"
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             },
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "type": "block",
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:             "vg_name": "ceph_vg2"
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:         }
Oct 11 02:36:29 compute-0 fervent_cohen[448042]:     ]
Oct 11 02:36:29 compute-0 fervent_cohen[448042]: }
Oct 11 02:36:29 compute-0 systemd[1]: libpod-5533f478bff1d41c8972c963cc407c95ceeb183e4bb85cc2cb954fcfe386e8bb.scope: Deactivated successfully.
Oct 11 02:36:29 compute-0 podman[448051]: 2025-10-11 02:36:29.181979235 +0000 UTC m=+0.043729322 container died 5533f478bff1d41c8972c963cc407c95ceeb183e4bb85cc2cb954fcfe386e8bb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_cohen, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:36:29 compute-0 systemd[1]: var-lib-containers-storage-overlay-52f71dd2dad46480032904d110307cd6f1d4edb96f1aa8fafb062ca328728422-merged.mount: Deactivated successfully.
Oct 11 02:36:29 compute-0 podman[448051]: 2025-10-11 02:36:29.279466955 +0000 UTC m=+0.141216952 container remove 5533f478bff1d41c8972c963cc407c95ceeb183e4bb85cc2cb954fcfe386e8bb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_cohen, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS)
Oct 11 02:36:29 compute-0 systemd[1]: libpod-conmon-5533f478bff1d41c8972c963cc407c95ceeb183e4bb85cc2cb954fcfe386e8bb.scope: Deactivated successfully.
Oct 11 02:36:29 compute-0 sudo[447925]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:29 compute-0 sudo[448066]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:36:29 compute-0 sudo[448066]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:29 compute-0 sudo[448066]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:29 compute-0 sudo[448091]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:36:29 compute-0 sudo[448091]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:29 compute-0 sudo[448091]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:29 compute-0 ceph-mon[191930]: pgmap v1761: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:29 compute-0 sudo[448116]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:36:29 compute-0 sudo[448116]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:29 compute-0 sudo[448116]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:29 compute-0 podman[157119]: time="2025-10-11T02:36:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:36:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:36:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:36:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:36:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9055 "" "Go-http-client/1.1"
Oct 11 02:36:29 compute-0 sudo[448141]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:36:29 compute-0 sudo[448141]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:29 compute-0 nova_compute[356901]: 2025-10-11 02:36:29.891 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:30 compute-0 podman[448203]: 2025-10-11 02:36:30.381085574 +0000 UTC m=+0.104434200 container create 53e48086f5829bfa0027e9744949a16e889fd857a2c09c602cdd19e49db79b93 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dreamy_cray, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 02:36:30 compute-0 podman[448203]: 2025-10-11 02:36:30.328813918 +0000 UTC m=+0.052162634 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:36:30 compute-0 systemd[1]: Started libpod-conmon-53e48086f5829bfa0027e9744949a16e889fd857a2c09c602cdd19e49db79b93.scope.
Oct 11 02:36:30 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:36:30 compute-0 podman[448203]: 2025-10-11 02:36:30.486491476 +0000 UTC m=+0.209840182 container init 53e48086f5829bfa0027e9744949a16e889fd857a2c09c602cdd19e49db79b93 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dreamy_cray, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:36:30 compute-0 podman[448203]: 2025-10-11 02:36:30.495808931 +0000 UTC m=+0.219157557 container start 53e48086f5829bfa0027e9744949a16e889fd857a2c09c602cdd19e49db79b93 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dreamy_cray, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:36:30 compute-0 dreamy_cray[448219]: 167 167
Oct 11 02:36:30 compute-0 podman[448203]: 2025-10-11 02:36:30.501333418 +0000 UTC m=+0.224682134 container attach 53e48086f5829bfa0027e9744949a16e889fd857a2c09c602cdd19e49db79b93 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dreamy_cray, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:36:30 compute-0 systemd[1]: libpod-53e48086f5829bfa0027e9744949a16e889fd857a2c09c602cdd19e49db79b93.scope: Deactivated successfully.
Oct 11 02:36:30 compute-0 podman[448203]: 2025-10-11 02:36:30.503904344 +0000 UTC m=+0.227252960 container died 53e48086f5829bfa0027e9744949a16e889fd857a2c09c602cdd19e49db79b93 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dreamy_cray, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 02:36:30 compute-0 systemd[1]: var-lib-containers-storage-overlay-9ca5dc077357d02a5a36e8edfeee7980d825dd8a4352e4ff7e35d65c8181235b-merged.mount: Deactivated successfully.
Oct 11 02:36:30 compute-0 podman[448203]: 2025-10-11 02:36:30.562567357 +0000 UTC m=+0.285915983 container remove 53e48086f5829bfa0027e9744949a16e889fd857a2c09c602cdd19e49db79b93 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dreamy_cray, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:36:30 compute-0 systemd[1]: libpod-conmon-53e48086f5829bfa0027e9744949a16e889fd857a2c09c602cdd19e49db79b93.scope: Deactivated successfully.
Oct 11 02:36:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1762: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:30 compute-0 podman[448241]: 2025-10-11 02:36:30.811902032 +0000 UTC m=+0.063276139 container create b29fcdde804cc580629e9275c0e75683fc0bc68d69c041b606de3530042a85a3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_ritchie, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0)
Oct 11 02:36:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:36:30 compute-0 podman[448241]: 2025-10-11 02:36:30.789002928 +0000 UTC m=+0.040377115 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:36:30 compute-0 systemd[1]: Started libpod-conmon-b29fcdde804cc580629e9275c0e75683fc0bc68d69c041b606de3530042a85a3.scope.
Oct 11 02:36:30 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:36:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/05182cf7e96bde925467bb59c9b39e1137587bb819debcdae0278e79d4969c2c/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:36:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/05182cf7e96bde925467bb59c9b39e1137587bb819debcdae0278e79d4969c2c/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:36:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/05182cf7e96bde925467bb59c9b39e1137587bb819debcdae0278e79d4969c2c/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:36:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/05182cf7e96bde925467bb59c9b39e1137587bb819debcdae0278e79d4969c2c/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:36:30 compute-0 podman[448241]: 2025-10-11 02:36:30.999722669 +0000 UTC m=+0.251096816 container init b29fcdde804cc580629e9275c0e75683fc0bc68d69c041b606de3530042a85a3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_ritchie, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0)
Oct 11 02:36:31 compute-0 podman[448241]: 2025-10-11 02:36:31.022172478 +0000 UTC m=+0.273546595 container start b29fcdde804cc580629e9275c0e75683fc0bc68d69c041b606de3530042a85a3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_ritchie, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 02:36:31 compute-0 podman[448241]: 2025-10-11 02:36:31.027151006 +0000 UTC m=+0.278525153 container attach b29fcdde804cc580629e9275c0e75683fc0bc68d69c041b606de3530042a85a3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_ritchie, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef)
Oct 11 02:36:31 compute-0 openstack_network_exporter[374316]: ERROR   02:36:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:36:31 compute-0 openstack_network_exporter[374316]: ERROR   02:36:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:36:31 compute-0 openstack_network_exporter[374316]: ERROR   02:36:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:36:31 compute-0 openstack_network_exporter[374316]: ERROR   02:36:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:36:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:36:31 compute-0 openstack_network_exporter[374316]: ERROR   02:36:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:36:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:36:31 compute-0 ceph-mon[191930]: pgmap v1762: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:32 compute-0 cool_ritchie[448257]: {
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:         "osd_id": 1,
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:         "type": "bluestore"
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:     },
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:         "osd_id": 2,
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:         "type": "bluestore"
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:     },
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:         "osd_id": 0,
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:         "type": "bluestore"
Oct 11 02:36:32 compute-0 cool_ritchie[448257]:     }
Oct 11 02:36:32 compute-0 cool_ritchie[448257]: }
Oct 11 02:36:32 compute-0 systemd[1]: libpod-b29fcdde804cc580629e9275c0e75683fc0bc68d69c041b606de3530042a85a3.scope: Deactivated successfully.
Oct 11 02:36:32 compute-0 systemd[1]: libpod-b29fcdde804cc580629e9275c0e75683fc0bc68d69c041b606de3530042a85a3.scope: Consumed 1.164s CPU time.
Oct 11 02:36:32 compute-0 podman[448290]: 2025-10-11 02:36:32.265365996 +0000 UTC m=+0.049354410 container died b29fcdde804cc580629e9275c0e75683fc0bc68d69c041b606de3530042a85a3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_ritchie, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:36:32 compute-0 systemd[1]: var-lib-containers-storage-overlay-05182cf7e96bde925467bb59c9b39e1137587bb819debcdae0278e79d4969c2c-merged.mount: Deactivated successfully.
Oct 11 02:36:32 compute-0 podman[448290]: 2025-10-11 02:36:32.330695586 +0000 UTC m=+0.114684000 container remove b29fcdde804cc580629e9275c0e75683fc0bc68d69c041b606de3530042a85a3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_ritchie, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:36:32 compute-0 systemd[1]: libpod-conmon-b29fcdde804cc580629e9275c0e75683fc0bc68d69c041b606de3530042a85a3.scope: Deactivated successfully.
Oct 11 02:36:32 compute-0 sudo[448141]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:36:32 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:36:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:36:32 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:36:32 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 077ce55c-59d7-43c2-a3da-9668281cc6d6 does not exist
Oct 11 02:36:32 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 7cd6cb95-f9d7-4e7d-8ad2-f936faeed417 does not exist
Oct 11 02:36:32 compute-0 sudo[448305]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:36:32 compute-0 sudo[448305]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:32 compute-0 sudo[448305]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1763: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:32 compute-0 sudo[448330]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:36:32 compute-0 sudo[448330]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:36:32 compute-0 sudo[448330]: pam_unix(sudo:session): session closed for user root
Oct 11 02:36:33 compute-0 nova_compute[356901]: 2025-10-11 02:36:33.016 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:36:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:36:33 compute-0 ceph-mon[191930]: pgmap v1763: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1764: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:34 compute-0 nova_compute[356901]: 2025-10-11 02:36:34.895 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:35 compute-0 ceph-mon[191930]: pgmap v1764: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:36:36 compute-0 podman[448355]: 2025-10-11 02:36:36.231955761 +0000 UTC m=+0.129646623 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_ipmi, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 02:36:36 compute-0 podman[448356]: 2025-10-11 02:36:36.257437752 +0000 UTC m=+0.138464215 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.expose-services=, io.openshift.tags=minimal rhel9, architecture=x86_64, distribution-scope=public, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, managed_by=edpm_ansible, container_name=openstack_network_exporter, release=1755695350, vendor=Red Hat, Inc., version=9.6, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, url=https://catalog.redhat.com/en/search?searchType=containers, name=ubi9-minimal, build-date=2025-08-20T13:12:41, com.redhat.component=ubi9-minimal-container, maintainer=Red Hat, Inc., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vcs-type=git, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']})
Oct 11 02:36:36 compute-0 podman[448357]: 2025-10-11 02:36:36.26457655 +0000 UTC m=+0.139859782 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:36:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1765: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:37 compute-0 ceph-mon[191930]: pgmap v1765: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:38 compute-0 nova_compute[356901]: 2025-10-11 02:36:38.020 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1766: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:39 compute-0 ceph-mon[191930]: pgmap v1766: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:39 compute-0 nova_compute[356901]: 2025-10-11 02:36:39.897 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1767: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:36:41 compute-0 ceph-mon[191930]: pgmap v1767: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1768: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:42 compute-0 ceph-mon[191930]: pgmap v1768: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:43 compute-0 nova_compute[356901]: 2025-10-11 02:36:43.024 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:43 compute-0 podman[448415]: 2025-10-11 02:36:43.255050626 +0000 UTC m=+0.137567622 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release-0.7.12=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.tags=base rhel9, release=1214.1726694543, version=9.4, com.redhat.component=ubi9-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.expose-services=, summary=Provides the latest release of Red Hat Universal Base Image 9., architecture=x86_64, io.buildah.version=1.29.0, maintainer=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-type=git, name=ubi9, config_id=edpm, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, build-date=2024-09-18T21:23:30, distribution-scope=public)
Oct 11 02:36:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1769: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:44 compute-0 nova_compute[356901]: 2025-10-11 02:36:44.899 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:45 compute-0 ceph-mon[191930]: pgmap v1769: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:36:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1770: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:47 compute-0 ceph-mon[191930]: pgmap v1770: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:48 compute-0 nova_compute[356901]: 2025-10-11 02:36:48.029 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1771: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:49 compute-0 podman[448435]: 2025-10-11 02:36:49.21875242 +0000 UTC m=+0.098698919 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_compute, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']})
Oct 11 02:36:49 compute-0 podman[448433]: 2025-10-11 02:36:49.222442767 +0000 UTC m=+0.110266193 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:36:49 compute-0 podman[448434]: 2025-10-11 02:36:49.255939639 +0000 UTC m=+0.139069991 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.schema-version=1.0, container_name=ovn_controller, io.buildah.version=1.41.3, tcib_managed=true, config_id=ovn_controller)
Oct 11 02:36:49 compute-0 podman[448439]: 2025-10-11 02:36:49.27306961 +0000 UTC m=+0.130390893 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, io.buildah.version=1.41.3)
Oct 11 02:36:49 compute-0 ceph-mon[191930]: pgmap v1771: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:49 compute-0 nova_compute[356901]: 2025-10-11 02:36:49.905 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1772: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:36:51 compute-0 ceph-mon[191930]: pgmap v1772: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1773: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:52 compute-0 ceph-mon[191930]: pgmap v1773: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:53 compute-0 nova_compute[356901]: 2025-10-11 02:36:53.032 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1774: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:36:54.863 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:36:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:36:54.864 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:36:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:36:54.865 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:36:54 compute-0 nova_compute[356901]: 2025-10-11 02:36:54.907 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:54 compute-0 nova_compute[356901]: 2025-10-11 02:36:54.920 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:36:55 compute-0 podman[448519]: 2025-10-11 02:36:55.254672473 +0000 UTC m=+0.132464478 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, container_name=iscsid, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0)
Oct 11 02:36:55 compute-0 podman[448518]: 2025-10-11 02:36:55.276106947 +0000 UTC m=+0.159373586 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_id=multipathd, container_name=multipathd, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.3)
Oct 11 02:36:55 compute-0 nova_compute[356901]: 2025-10-11 02:36:55.280 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_power_states run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:36:55 compute-0 nova_compute[356901]: 2025-10-11 02:36:55.312 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Triggering sync for uuid 0cc56d17-ec3a-4408-bccb-91b29427379e _sync_power_states /usr/lib/python3.9/site-packages/nova/compute/manager.py:10268
Oct 11 02:36:55 compute-0 nova_compute[356901]: 2025-10-11 02:36:55.313 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "0cc56d17-ec3a-4408-bccb-91b29427379e" by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:36:55 compute-0 nova_compute[356901]: 2025-10-11 02:36:55.314 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "0cc56d17-ec3a-4408-bccb-91b29427379e" acquired by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:36:55 compute-0 nova_compute[356901]: 2025-10-11 02:36:55.359 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "0cc56d17-ec3a-4408-bccb-91b29427379e" "released" by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" :: held 0.046s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:36:55 compute-0 ceph-mon[191930]: pgmap v1774: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:36:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:36:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:36:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:36:56
Oct 11 02:36:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:36:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:36:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['vms', 'volumes', '.mgr', 'default.rgw.log', '.rgw.root', 'default.rgw.meta', 'backups', 'cephfs.cephfs.data', 'default.rgw.control', 'images', 'cephfs.cephfs.meta']
Oct 11 02:36:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:36:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:36:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:36:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:36:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:36:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1775: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:36:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:36:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:36:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:36:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:36:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:36:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:36:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:36:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:36:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:36:57 compute-0 ceph-mon[191930]: pgmap v1775: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:58 compute-0 nova_compute[356901]: 2025-10-11 02:36:58.036 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:36:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1776: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:59 compute-0 ceph-mon[191930]: pgmap v1776: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:36:59 compute-0 podman[157119]: time="2025-10-11T02:36:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:36:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:36:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:36:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:36:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9066 "" "Go-http-client/1.1"
Oct 11 02:36:59 compute-0 nova_compute[356901]: 2025-10-11 02:36:59.913 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1777: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:00 compute-0 ceph-mon[191930]: pgmap v1777: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:37:01 compute-0 openstack_network_exporter[374316]: ERROR   02:37:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:37:01 compute-0 openstack_network_exporter[374316]: ERROR   02:37:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:37:01 compute-0 openstack_network_exporter[374316]: ERROR   02:37:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:37:01 compute-0 openstack_network_exporter[374316]: ERROR   02:37:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:37:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:37:01 compute-0 openstack_network_exporter[374316]: ERROR   02:37:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:37:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:37:01 compute-0 nova_compute[356901]: 2025-10-11 02:37:01.930 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:37:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1778: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:02 compute-0 nova_compute[356901]: 2025-10-11 02:37:02.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:37:03 compute-0 nova_compute[356901]: 2025-10-11 02:37:03.040 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:03 compute-0 ceph-mon[191930]: pgmap v1778: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1779: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:04 compute-0 nova_compute[356901]: 2025-10-11 02:37:04.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:37:04 compute-0 nova_compute[356901]: 2025-10-11 02:37:04.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:37:04 compute-0 nova_compute[356901]: 2025-10-11 02:37:04.916 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:05 compute-0 ceph-mon[191930]: pgmap v1779: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:37:05 compute-0 nova_compute[356901]: 2025-10-11 02:37:05.893 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:37:05 compute-0 nova_compute[356901]: 2025-10-11 02:37:05.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:37:05 compute-0 nova_compute[356901]: 2025-10-11 02:37:05.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:37:05 compute-0 nova_compute[356901]: 2025-10-11 02:37:05.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:37:06 compute-0 nova_compute[356901]: 2025-10-11 02:37:06.166 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:37:06 compute-0 nova_compute[356901]: 2025-10-11 02:37:06.167 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:37:06 compute-0 nova_compute[356901]: 2025-10-11 02:37:06.168 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:37:06 compute-0 nova_compute[356901]: 2025-10-11 02:37:06.168 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:37:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1780: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00025334537995702286 of space, bias 1.0, pg target 0.07600361398710685 quantized to 32 (current 32)
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:37:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:37:07 compute-0 podman[448559]: 2025-10-11 02:37:07.25560505 +0000 UTC m=+0.125499304 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:37:07 compute-0 podman[448558]: 2025-10-11 02:37:07.269786284 +0000 UTC m=+0.150728659 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, architecture=x86_64, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.expose-services=, build-date=2025-08-20T13:12:41, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, distribution-scope=public, name=ubi9-minimal, vendor=Red Hat, Inc., com.redhat.component=ubi9-minimal-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, io.openshift.tags=minimal rhel9, managed_by=edpm_ansible, version=9.6, config_id=edpm, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, container_name=openstack_network_exporter, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc., release=1755695350, vcs-type=git)
Oct 11 02:37:07 compute-0 podman[448557]: 2025-10-11 02:37:07.277151617 +0000 UTC m=+0.161232174 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_managed=true)
Oct 11 02:37:07 compute-0 ceph-mon[191930]: pgmap v1780: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:07 compute-0 nova_compute[356901]: 2025-10-11 02:37:07.937 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:37:07 compute-0 nova_compute[356901]: 2025-10-11 02:37:07.952 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:37:07 compute-0 nova_compute[356901]: 2025-10-11 02:37:07.953 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:37:07 compute-0 nova_compute[356901]: 2025-10-11 02:37:07.954 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:37:07 compute-0 nova_compute[356901]: 2025-10-11 02:37:07.955 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:37:07 compute-0 nova_compute[356901]: 2025-10-11 02:37:07.980 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:37:07 compute-0 nova_compute[356901]: 2025-10-11 02:37:07.981 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:37:07 compute-0 nova_compute[356901]: 2025-10-11 02:37:07.982 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:37:07 compute-0 nova_compute[356901]: 2025-10-11 02:37:07.982 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:37:07 compute-0 nova_compute[356901]: 2025-10-11 02:37:07.983 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:37:08 compute-0 nova_compute[356901]: 2025-10-11 02:37:08.044 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:08 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:37:08 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2251362729' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:37:08 compute-0 nova_compute[356901]: 2025-10-11 02:37:08.499 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.517s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:37:08 compute-0 nova_compute[356901]: 2025-10-11 02:37:08.591 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:37:08 compute-0 nova_compute[356901]: 2025-10-11 02:37:08.591 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:37:08 compute-0 nova_compute[356901]: 2025-10-11 02:37:08.592 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:37:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1781: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:08 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2251362729' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:37:09 compute-0 nova_compute[356901]: 2025-10-11 02:37:09.087 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:37:09 compute-0 nova_compute[356901]: 2025-10-11 02:37:09.089 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3841MB free_disk=59.955204010009766GB free_vcpus=7 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:37:09 compute-0 nova_compute[356901]: 2025-10-11 02:37:09.090 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:37:09 compute-0 nova_compute[356901]: 2025-10-11 02:37:09.091 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:37:09 compute-0 nova_compute[356901]: 2025-10-11 02:37:09.186 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:37:09 compute-0 nova_compute[356901]: 2025-10-11 02:37:09.188 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 1 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:37:09 compute-0 nova_compute[356901]: 2025-10-11 02:37:09.188 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1024MB phys_disk=59GB used_disk=2GB total_vcpus=8 used_vcpus=1 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:37:09 compute-0 nova_compute[356901]: 2025-10-11 02:37:09.234 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:37:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:37:09 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1309062163' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:37:09 compute-0 ceph-mon[191930]: pgmap v1781: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:09 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1309062163' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:37:09 compute-0 nova_compute[356901]: 2025-10-11 02:37:09.770 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.536s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:37:09 compute-0 nova_compute[356901]: 2025-10-11 02:37:09.784 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:37:09 compute-0 nova_compute[356901]: 2025-10-11 02:37:09.834 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:37:09 compute-0 nova_compute[356901]: 2025-10-11 02:37:09.838 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:37:09 compute-0 nova_compute[356901]: 2025-10-11 02:37:09.839 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.748s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:37:09 compute-0 nova_compute[356901]: 2025-10-11 02:37:09.919 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1782: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:10 compute-0 ceph-mon[191930]: pgmap v1782: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:37:11 compute-0 nova_compute[356901]: 2025-10-11 02:37:11.782 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:37:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1783: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:13 compute-0 nova_compute[356901]: 2025-10-11 02:37:13.046 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.866 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.867 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.868 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.868 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceph-mon[191930]: pgmap v1783: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.878 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.879 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.879 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.879 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.880 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.880 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:37:13.880103) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.887 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2856 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.888 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.888 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.889 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.889 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.889 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.889 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.889 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 23 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.890 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.890 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.891 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.891 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:37:13.889618) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.891 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.891 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.892 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.892 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.893 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:37:13.892126) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.893 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.893 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.894 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.894 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.894 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.894 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.894 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.895 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:37:13.894601) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.895 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.896 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.896 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.896 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.897 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.897 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.897 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:37:13.897194) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.923 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.924 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.924 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.925 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.925 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.926 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.926 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.926 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.926 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.927 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:37:13.926594) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.970 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.971 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.971 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.972 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.972 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.972 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.972 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.972 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.972 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.973 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.973 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.973 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.974 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.974 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.974 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.974 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.974 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.974 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.974 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.975 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.975 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.975 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:37:13.972887) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.976 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:37:13.974804) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.976 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.976 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.976 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.976 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.976 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.976 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.976 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.977 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.977 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.977 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.977 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.977 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.977 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.978 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.978 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.978 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.978 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.978 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.978 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.979 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.979 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.979 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.979 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.979 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.979 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.979 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:37:13.976763) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.979 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.980 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.980 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.980 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.980 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.980 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.980 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.980 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:37:13.978070) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.980 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.981 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:37:13.979566) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:13.981 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:37:13.980911) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.008 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.009 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.009 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.009 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.009 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.009 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.009 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.009 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.010 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.010 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.010 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.010 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.010 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.010 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.011 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.011 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.011 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.011 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.011 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.011 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:37:14.009488) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.011 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.012 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:37:14.011094) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.012 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.012 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.012 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.012 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.012 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.013 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.013 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.013 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.013 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.013 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.013 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.013 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 33 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.013 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.014 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.014 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.014 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.014 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.014 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.014 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.014 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:37:14.012744) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.015 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.015 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.015 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.015 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:37:14.013517) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.015 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.015 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.015 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:37:14.014480) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.015 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.016 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.016 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.016 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.016 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:37:14.015760) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.016 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.016 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.016 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.016 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.017 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.017 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.017 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.017 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.017 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.017 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:37:14.016748) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.017 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.017 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.017 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.018 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.018 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.018 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.018 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.018 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.018 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.018 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.019 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.019 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.019 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.019 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.019 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.019 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.019 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:37:14.017724) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.019 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.019 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 50340000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.020 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.021 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.021 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.022 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.022 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.022 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:37:14.018975) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.022 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.022 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:37:14.019913) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.023 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:37:14.022463) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.022 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2342 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.023 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.023 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.024 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.024 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.024 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.024 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.024 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.83984375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.025 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:37:14.024697) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.025 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.025 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.026 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.026 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.026 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.027 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.027 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.027 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.027 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.027 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.027 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.027 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.028 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.028 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.028 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.028 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.028 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.028 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.028 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.029 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.029 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.029 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.029 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.029 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.029 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.029 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.030 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.030 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:37:14.030 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:37:14 compute-0 podman[448667]: 2025-10-11 02:37:14.235998219 +0000 UTC m=+0.120031590 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, architecture=x86_64, summary=Provides the latest release of Red Hat Universal Base Image 9., build-date=2024-09-18T21:23:30, io.openshift.expose-services=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.29.0, io.openshift.tags=base rhel9, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, managed_by=edpm_ansible, release=1214.1726694543, io.k8s.display-name=Red Hat Universal Base Image 9, com.redhat.component=ubi9-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=kepler, version=9.4, distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., config_id=edpm, maintainer=Red Hat, Inc., name=ubi9, vcs-type=git, release-0.7.12=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:37:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1784: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:14 compute-0 ceph-mon[191930]: pgmap v1784: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:14 compute-0 nova_compute[356901]: 2025-10-11 02:37:14.921 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:37:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1785: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:17 compute-0 ceph-mon[191930]: pgmap v1785: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:18 compute-0 nova_compute[356901]: 2025-10-11 02:37:18.050 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1786: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:19 compute-0 ceph-mon[191930]: pgmap v1786: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:19 compute-0 nova_compute[356901]: 2025-10-11 02:37:19.924 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:20 compute-0 podman[448688]: 2025-10-11 02:37:20.191875482 +0000 UTC m=+0.086453047 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:37:20 compute-0 podman[448696]: 2025-10-11 02:37:20.251893311 +0000 UTC m=+0.109371439 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_managed=true, container_name=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:37:20 compute-0 podman[448694]: 2025-10-11 02:37:20.252907758 +0000 UTC m=+0.131389260 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, io.buildah.version=1.41.4)
Oct 11 02:37:20 compute-0 podman[448689]: 2025-10-11 02:37:20.261830293 +0000 UTC m=+0.144427223 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:37:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1787: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:37:21 compute-0 ceph-mon[191930]: pgmap v1787: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1788: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:22 compute-0 ceph-mon[191930]: pgmap v1788: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:23 compute-0 nova_compute[356901]: 2025-10-11 02:37:23.054 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1789: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:24 compute-0 nova_compute[356901]: 2025-10-11 02:37:24.927 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:25 compute-0 ceph-mon[191930]: pgmap v1789: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:37:26 compute-0 podman[448768]: 2025-10-11 02:37:26.231563807 +0000 UTC m=+0.117583206 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, org.label-schema.build-date=20251009, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:37:26 compute-0 podman[448769]: 2025-10-11 02:37:26.247865626 +0000 UTC m=+0.122783082 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, config_id=iscsid, container_name=iscsid, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']})
Oct 11 02:37:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:37:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:37:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:37:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:37:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:37:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:37:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1790: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:37:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/4068704637' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:37:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:37:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/4068704637' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:37:27 compute-0 ceph-mon[191930]: pgmap v1790: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/4068704637' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:37:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/4068704637' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:37:28 compute-0 nova_compute[356901]: 2025-10-11 02:37:28.059 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1791: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:29 compute-0 ceph-mon[191930]: pgmap v1791: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:29 compute-0 podman[157119]: time="2025-10-11T02:37:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:37:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:37:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:37:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:37:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9060 "" "Go-http-client/1.1"
Oct 11 02:37:29 compute-0 nova_compute[356901]: 2025-10-11 02:37:29.931 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1792: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:30 compute-0 ceph-mon[191930]: pgmap v1792: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:37:31 compute-0 openstack_network_exporter[374316]: ERROR   02:37:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:37:31 compute-0 openstack_network_exporter[374316]: ERROR   02:37:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:37:31 compute-0 openstack_network_exporter[374316]: ERROR   02:37:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:37:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:37:31 compute-0 openstack_network_exporter[374316]: ERROR   02:37:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:37:31 compute-0 openstack_network_exporter[374316]: ERROR   02:37:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:37:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:37:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1793: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:32 compute-0 sudo[448808]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:37:32 compute-0 sudo[448808]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:37:32 compute-0 sudo[448808]: pam_unix(sudo:session): session closed for user root
Oct 11 02:37:32 compute-0 sudo[448833]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:37:32 compute-0 sudo[448833]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:37:32 compute-0 sudo[448833]: pam_unix(sudo:session): session closed for user root
Oct 11 02:37:32 compute-0 sudo[448858]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:37:32 compute-0 sudo[448858]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:37:32 compute-0 sudo[448858]: pam_unix(sudo:session): session closed for user root
Oct 11 02:37:33 compute-0 sudo[448883]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:37:33 compute-0 sudo[448883]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:37:33 compute-0 nova_compute[356901]: 2025-10-11 02:37:33.061 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:33 compute-0 sudo[448883]: pam_unix(sudo:session): session closed for user root
Oct 11 02:37:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"} v 0) v1
Oct 11 02:37:33 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"}]: dispatch
Oct 11 02:37:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:37:33 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:37:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:37:33 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:37:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:37:33 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:37:33 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev a36f3eb7-bfc2-4498-a693-af8b6a1eca3a does not exist
Oct 11 02:37:33 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev fff2b324-2878-4587-9964-d70902b56564 does not exist
Oct 11 02:37:33 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev b19e2946-a69e-494a-96d4-b70a960a014c does not exist
Oct 11 02:37:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:37:33 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:37:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:37:33 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:37:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:37:33 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:37:33 compute-0 ceph-mon[191930]: pgmap v1793: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"}]: dispatch
Oct 11 02:37:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:37:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:37:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:37:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:37:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:37:33 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:37:33 compute-0 sudo[448938]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #81. Immutable memtables: 0.
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:37:33.728901) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 45] Flushing memtable with next log file: 81
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150253728937, "job": 45, "event": "flush_started", "num_memtables": 1, "num_entries": 2068, "num_deletes": 252, "total_data_size": 3439277, "memory_usage": 3497040, "flush_reason": "Manual Compaction"}
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 45] Level-0 flush table #82: started
Oct 11 02:37:33 compute-0 sudo[448938]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:37:33 compute-0 sudo[448938]: pam_unix(sudo:session): session closed for user root
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150253753602, "cf_name": "default", "job": 45, "event": "table_file_creation", "file_number": 82, "file_size": 3372948, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 34753, "largest_seqno": 36820, "table_properties": {"data_size": 3363472, "index_size": 6034, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 2373, "raw_key_size": 19045, "raw_average_key_size": 20, "raw_value_size": 3344543, "raw_average_value_size": 3550, "num_data_blocks": 267, "num_entries": 942, "num_filter_entries": 942, "num_deletions": 252, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760150028, "oldest_key_time": 1760150028, "file_creation_time": 1760150253, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 82, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 45] Flush lasted 24773 microseconds, and 8840 cpu microseconds.
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:37:33.753671) [db/flush_job.cc:967] [default] [JOB 45] Level-0 flush table #82: 3372948 bytes OK
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:37:33.753697) [db/memtable_list.cc:519] [default] Level-0 commit table #82 started
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:37:33.755220) [db/memtable_list.cc:722] [default] Level-0 commit table #82: memtable #1 done
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:37:33.755261) EVENT_LOG_v1 {"time_micros": 1760150253755257, "job": 45, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:37:33.755282) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 45] Try to delete WAL files size 3430609, prev total WAL file size 3430609, number of live WAL files 2.
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000078.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:37:33.756683) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F730033323633' seq:72057594037927935, type:22 .. '7061786F730033353135' seq:0, type:0; will stop at (end)
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 46] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 45 Base level 0, inputs: [82(3293KB)], [80(6954KB)]
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150253756737, "job": 46, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [82], "files_L6": [80], "score": -1, "input_data_size": 10493980, "oldest_snapshot_seqno": -1}
Oct 11 02:37:33 compute-0 sudo[448963]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:37:33 compute-0 sudo[448963]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:37:33 compute-0 sudo[448963]: pam_unix(sudo:session): session closed for user root
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 46] Generated table #83: 5672 keys, 8756529 bytes, temperature: kUnknown
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150253825905, "cf_name": "default", "job": 46, "event": "table_file_creation", "file_number": 83, "file_size": 8756529, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 8719003, "index_size": 22248, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 14213, "raw_key_size": 143134, "raw_average_key_size": 25, "raw_value_size": 8616811, "raw_average_value_size": 1519, "num_data_blocks": 913, "num_entries": 5672, "num_filter_entries": 5672, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760150253, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 83, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:37:33.826186) [db/compaction/compaction_job.cc:1663] [default] [JOB 46] Compacted 1@0 + 1@6 files to L6 => 8756529 bytes
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:37:33.827806) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 151.5 rd, 126.5 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(3.2, 6.8 +0.0 blob) out(8.4 +0.0 blob), read-write-amplify(5.7) write-amplify(2.6) OK, records in: 6192, records dropped: 520 output_compression: NoCompression
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:37:33.827824) EVENT_LOG_v1 {"time_micros": 1760150253827814, "job": 46, "event": "compaction_finished", "compaction_time_micros": 69247, "compaction_time_cpu_micros": 30911, "output_level": 6, "num_output_files": 1, "total_output_size": 8756529, "num_input_records": 6192, "num_output_records": 5672, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000082.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150253828584, "job": 46, "event": "table_file_deletion", "file_number": 82}
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000080.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150253830593, "job": 46, "event": "table_file_deletion", "file_number": 80}
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:37:33.756483) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:37:33.830865) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:37:33.830874) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:37:33.830878) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:37:33.830881) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:37:33 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:37:33.830885) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:37:33 compute-0 sudo[448988]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:37:33 compute-0 sudo[448988]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:37:33 compute-0 sudo[448988]: pam_unix(sudo:session): session closed for user root
Oct 11 02:37:34 compute-0 sudo[449013]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:37:34 compute-0 sudo[449013]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:37:34 compute-0 podman[449079]: 2025-10-11 02:37:34.49063417 +0000 UTC m=+0.082863082 container create 96ed245b9ae81c182566bd9de425204ee7c6ba2e2446efd78864e9c93d0826a0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_goldwasser, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:37:34 compute-0 podman[449079]: 2025-10-11 02:37:34.460185369 +0000 UTC m=+0.052414301 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:37:34 compute-0 systemd[1]: Started libpod-conmon-96ed245b9ae81c182566bd9de425204ee7c6ba2e2446efd78864e9c93d0826a0.scope.
Oct 11 02:37:34 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:37:34 compute-0 podman[449079]: 2025-10-11 02:37:34.629766832 +0000 UTC m=+0.221995764 container init 96ed245b9ae81c182566bd9de425204ee7c6ba2e2446efd78864e9c93d0826a0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_goldwasser, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0)
Oct 11 02:37:34 compute-0 podman[449079]: 2025-10-11 02:37:34.644629434 +0000 UTC m=+0.236858336 container start 96ed245b9ae81c182566bd9de425204ee7c6ba2e2446efd78864e9c93d0826a0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_goldwasser, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2)
Oct 11 02:37:34 compute-0 podman[449079]: 2025-10-11 02:37:34.649665976 +0000 UTC m=+0.241894878 container attach 96ed245b9ae81c182566bd9de425204ee7c6ba2e2446efd78864e9c93d0826a0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_goldwasser, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:37:34 compute-0 quizzical_goldwasser[449096]: 167 167
Oct 11 02:37:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1794: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:34 compute-0 systemd[1]: libpod-96ed245b9ae81c182566bd9de425204ee7c6ba2e2446efd78864e9c93d0826a0.scope: Deactivated successfully.
Oct 11 02:37:34 compute-0 podman[449079]: 2025-10-11 02:37:34.655394627 +0000 UTC m=+0.247623549 container died 96ed245b9ae81c182566bd9de425204ee7c6ba2e2446efd78864e9c93d0826a0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_goldwasser, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 02:37:34 compute-0 systemd[1]: var-lib-containers-storage-overlay-ebe0ce981810df5cfdb8e3999dd0a244e0c633f73c88331df3eb4fff3f40ed51-merged.mount: Deactivated successfully.
Oct 11 02:37:34 compute-0 podman[449079]: 2025-10-11 02:37:34.71893677 +0000 UTC m=+0.311165652 container remove 96ed245b9ae81c182566bd9de425204ee7c6ba2e2446efd78864e9c93d0826a0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_goldwasser, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:37:34 compute-0 systemd[1]: libpod-conmon-96ed245b9ae81c182566bd9de425204ee7c6ba2e2446efd78864e9c93d0826a0.scope: Deactivated successfully.
Oct 11 02:37:34 compute-0 nova_compute[356901]: 2025-10-11 02:37:34.933 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:34 compute-0 podman[449119]: 2025-10-11 02:37:34.941738054 +0000 UTC m=+0.062637390 container create 3aca5b6496e93ab7edc7de9ce86dcfa69e38de294283236e5ac2b295f2d04e53 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=relaxed_visvesvaraya, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef)
Oct 11 02:37:35 compute-0 systemd[1]: Started libpod-conmon-3aca5b6496e93ab7edc7de9ce86dcfa69e38de294283236e5ac2b295f2d04e53.scope.
Oct 11 02:37:35 compute-0 podman[449119]: 2025-10-11 02:37:34.915830182 +0000 UTC m=+0.036729538 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:37:35 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:37:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b3bd5d6aff2cc81e10a9acd23c96e879d0a34dd4ef47614f461aa5ecad340baf/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:37:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b3bd5d6aff2cc81e10a9acd23c96e879d0a34dd4ef47614f461aa5ecad340baf/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:37:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b3bd5d6aff2cc81e10a9acd23c96e879d0a34dd4ef47614f461aa5ecad340baf/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:37:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b3bd5d6aff2cc81e10a9acd23c96e879d0a34dd4ef47614f461aa5ecad340baf/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:37:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b3bd5d6aff2cc81e10a9acd23c96e879d0a34dd4ef47614f461aa5ecad340baf/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:37:35 compute-0 podman[449119]: 2025-10-11 02:37:35.086856094 +0000 UTC m=+0.207755540 container init 3aca5b6496e93ab7edc7de9ce86dcfa69e38de294283236e5ac2b295f2d04e53 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=relaxed_visvesvaraya, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, ceph=True)
Oct 11 02:37:35 compute-0 podman[449119]: 2025-10-11 02:37:35.103396159 +0000 UTC m=+0.224295505 container start 3aca5b6496e93ab7edc7de9ce86dcfa69e38de294283236e5ac2b295f2d04e53 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=relaxed_visvesvaraya, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3)
Oct 11 02:37:35 compute-0 podman[449119]: 2025-10-11 02:37:35.108731149 +0000 UTC m=+0.229630495 container attach 3aca5b6496e93ab7edc7de9ce86dcfa69e38de294283236e5ac2b295f2d04e53 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=relaxed_visvesvaraya, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:37:35 compute-0 ceph-mon[191930]: pgmap v1794: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:37:36 compute-0 relaxed_visvesvaraya[449136]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:37:36 compute-0 relaxed_visvesvaraya[449136]: --> relative data size: 1.0
Oct 11 02:37:36 compute-0 relaxed_visvesvaraya[449136]: --> All data devices are unavailable
Oct 11 02:37:36 compute-0 systemd[1]: libpod-3aca5b6496e93ab7edc7de9ce86dcfa69e38de294283236e5ac2b295f2d04e53.scope: Deactivated successfully.
Oct 11 02:37:36 compute-0 systemd[1]: libpod-3aca5b6496e93ab7edc7de9ce86dcfa69e38de294283236e5ac2b295f2d04e53.scope: Consumed 1.172s CPU time.
Oct 11 02:37:36 compute-0 podman[449119]: 2025-10-11 02:37:36.341775016 +0000 UTC m=+1.462674422 container died 3aca5b6496e93ab7edc7de9ce86dcfa69e38de294283236e5ac2b295f2d04e53 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=relaxed_visvesvaraya, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 02:37:36 compute-0 systemd[1]: var-lib-containers-storage-overlay-b3bd5d6aff2cc81e10a9acd23c96e879d0a34dd4ef47614f461aa5ecad340baf-merged.mount: Deactivated successfully.
Oct 11 02:37:36 compute-0 podman[449119]: 2025-10-11 02:37:36.448564203 +0000 UTC m=+1.569463579 container remove 3aca5b6496e93ab7edc7de9ce86dcfa69e38de294283236e5ac2b295f2d04e53 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=relaxed_visvesvaraya, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef)
Oct 11 02:37:36 compute-0 systemd[1]: libpod-conmon-3aca5b6496e93ab7edc7de9ce86dcfa69e38de294283236e5ac2b295f2d04e53.scope: Deactivated successfully.
Oct 11 02:37:36 compute-0 sudo[449013]: pam_unix(sudo:session): session closed for user root
Oct 11 02:37:36 compute-0 sudo[449178]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:37:36 compute-0 sudo[449178]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:37:36 compute-0 sudo[449178]: pam_unix(sudo:session): session closed for user root
Oct 11 02:37:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1795: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:36 compute-0 sudo[449203]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:37:36 compute-0 sudo[449203]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:37:36 compute-0 sudo[449203]: pam_unix(sudo:session): session closed for user root
Oct 11 02:37:36 compute-0 ceph-mon[191930]: pgmap v1795: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:36 compute-0 sudo[449228]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:37:36 compute-0 sudo[449228]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:37:36 compute-0 sudo[449228]: pam_unix(sudo:session): session closed for user root
Oct 11 02:37:36 compute-0 sudo[449253]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:37:36 compute-0 sudo[449253]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:37:37 compute-0 podman[449316]: 2025-10-11 02:37:37.529365095 +0000 UTC m=+0.071534370 container create 13f03ee51c50ed1b0469a827f76cdf01ea04f8ba9162e3d6b074b1c85ab31628 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=busy_hertz, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507)
Oct 11 02:37:37 compute-0 systemd[1]: Started libpod-conmon-13f03ee51c50ed1b0469a827f76cdf01ea04f8ba9162e3d6b074b1c85ab31628.scope.
Oct 11 02:37:37 compute-0 podman[449316]: 2025-10-11 02:37:37.51071744 +0000 UTC m=+0.052886735 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:37:37 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:37:37 compute-0 podman[449316]: 2025-10-11 02:37:37.656768382 +0000 UTC m=+0.198937667 container init 13f03ee51c50ed1b0469a827f76cdf01ea04f8ba9162e3d6b074b1c85ab31628 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=busy_hertz, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, ceph=True, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507)
Oct 11 02:37:37 compute-0 podman[449316]: 2025-10-11 02:37:37.671000764 +0000 UTC m=+0.213170049 container start 13f03ee51c50ed1b0469a827f76cdf01ea04f8ba9162e3d6b074b1c85ab31628 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=busy_hertz, io.buildah.version=1.39.3, ceph=True, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:37:37 compute-0 podman[449316]: 2025-10-11 02:37:37.675933908 +0000 UTC m=+0.218103183 container attach 13f03ee51c50ed1b0469a827f76cdf01ea04f8ba9162e3d6b074b1c85ab31628 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=busy_hertz, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0)
Oct 11 02:37:37 compute-0 busy_hertz[449339]: 167 167
Oct 11 02:37:37 compute-0 systemd[1]: libpod-13f03ee51c50ed1b0469a827f76cdf01ea04f8ba9162e3d6b074b1c85ab31628.scope: Deactivated successfully.
Oct 11 02:37:37 compute-0 podman[449316]: 2025-10-11 02:37:37.681631303 +0000 UTC m=+0.223800588 container died 13f03ee51c50ed1b0469a827f76cdf01ea04f8ba9162e3d6b074b1c85ab31628 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=busy_hertz, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2)
Oct 11 02:37:37 compute-0 podman[449329]: 2025-10-11 02:37:37.703576475 +0000 UTC m=+0.104879220 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, config_id=edpm, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:37:37 compute-0 systemd[1]: var-lib-containers-storage-overlay-0e3fb7e8461bd89f86238fc4fd6dd0cc4c39da3498849e83b216857a44b65d51-merged.mount: Deactivated successfully.
Oct 11 02:37:37 compute-0 podman[449332]: 2025-10-11 02:37:37.729858652 +0000 UTC m=+0.120004551 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, build-date=2025-08-20T13:12:41, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.6, config_id=edpm, io.buildah.version=1.33.7, io.openshift.expose-services=, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, architecture=x86_64, maintainer=Red Hat, Inc., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, distribution-scope=public, release=1755695350, com.redhat.component=ubi9-minimal-container, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, managed_by=edpm_ansible, vendor=Red Hat, Inc., container_name=openstack_network_exporter, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=minimal rhel9, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, name=ubi9-minimal)
Oct 11 02:37:37 compute-0 podman[449316]: 2025-10-11 02:37:37.735875178 +0000 UTC m=+0.278044473 container remove 13f03ee51c50ed1b0469a827f76cdf01ea04f8ba9162e3d6b074b1c85ab31628 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=busy_hertz, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507)
Oct 11 02:37:37 compute-0 podman[449333]: 2025-10-11 02:37:37.754459959 +0000 UTC m=+0.139585829 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:37:37 compute-0 systemd[1]: libpod-conmon-13f03ee51c50ed1b0469a827f76cdf01ea04f8ba9162e3d6b074b1c85ab31628.scope: Deactivated successfully.
Oct 11 02:37:37 compute-0 podman[449415]: 2025-10-11 02:37:37.93714757 +0000 UTC m=+0.059414854 container create 28887b231b4dac9e9bb8bd00c152d10e69e036ae109e3416f4e98cb06dd86a7f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_ardinghelli, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default)
Oct 11 02:37:38 compute-0 podman[449415]: 2025-10-11 02:37:37.912357689 +0000 UTC m=+0.034624993 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:37:38 compute-0 systemd[1]: Started libpod-conmon-28887b231b4dac9e9bb8bd00c152d10e69e036ae109e3416f4e98cb06dd86a7f.scope.
Oct 11 02:37:38 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:37:38 compute-0 nova_compute[356901]: 2025-10-11 02:37:38.063 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f458b7ba5cf597967bd2e01bcef826f1f35bdc8624d11189b6b76857a40794d4/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:37:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f458b7ba5cf597967bd2e01bcef826f1f35bdc8624d11189b6b76857a40794d4/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:37:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f458b7ba5cf597967bd2e01bcef826f1f35bdc8624d11189b6b76857a40794d4/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:37:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f458b7ba5cf597967bd2e01bcef826f1f35bdc8624d11189b6b76857a40794d4/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:37:38 compute-0 podman[449415]: 2025-10-11 02:37:38.102709729 +0000 UTC m=+0.224977063 container init 28887b231b4dac9e9bb8bd00c152d10e69e036ae109e3416f4e98cb06dd86a7f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_ardinghelli, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, io.buildah.version=1.39.3)
Oct 11 02:37:38 compute-0 podman[449415]: 2025-10-11 02:37:38.135070457 +0000 UTC m=+0.257337781 container start 28887b231b4dac9e9bb8bd00c152d10e69e036ae109e3416f4e98cb06dd86a7f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_ardinghelli, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2)
Oct 11 02:37:38 compute-0 podman[449415]: 2025-10-11 02:37:38.142529469 +0000 UTC m=+0.264796793 container attach 28887b231b4dac9e9bb8bd00c152d10e69e036ae109e3416f4e98cb06dd86a7f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_ardinghelli, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:37:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1796: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]: {
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:     "0": [
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:         {
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "devices": [
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "/dev/loop3"
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             ],
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "lv_name": "ceph_lv0",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "lv_size": "21470642176",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "name": "ceph_lv0",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "tags": {
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.cluster_name": "ceph",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.crush_device_class": "",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.encrypted": "0",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.osd_id": "0",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.type": "block",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.vdo": "0"
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             },
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "type": "block",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "vg_name": "ceph_vg0"
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:         }
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:     ],
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:     "1": [
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:         {
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "devices": [
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "/dev/loop4"
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             ],
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "lv_name": "ceph_lv1",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "lv_size": "21470642176",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "name": "ceph_lv1",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "tags": {
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.cluster_name": "ceph",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.crush_device_class": "",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.encrypted": "0",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.osd_id": "1",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.type": "block",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.vdo": "0"
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             },
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "type": "block",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "vg_name": "ceph_vg1"
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:         }
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:     ],
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:     "2": [
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:         {
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "devices": [
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "/dev/loop5"
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             ],
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "lv_name": "ceph_lv2",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "lv_size": "21470642176",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "name": "ceph_lv2",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "tags": {
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.cluster_name": "ceph",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.crush_device_class": "",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.encrypted": "0",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.osd_id": "2",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.type": "block",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:                 "ceph.vdo": "0"
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             },
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "type": "block",
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:             "vg_name": "ceph_vg2"
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:         }
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]:     ]
Oct 11 02:37:39 compute-0 infallible_ardinghelli[449431]: }
Oct 11 02:37:39 compute-0 systemd[1]: libpod-28887b231b4dac9e9bb8bd00c152d10e69e036ae109e3416f4e98cb06dd86a7f.scope: Deactivated successfully.
Oct 11 02:37:39 compute-0 podman[449415]: 2025-10-11 02:37:39.061058832 +0000 UTC m=+1.183326126 container died 28887b231b4dac9e9bb8bd00c152d10e69e036ae109e3416f4e98cb06dd86a7f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_ardinghelli, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:37:39 compute-0 systemd[1]: var-lib-containers-storage-overlay-f458b7ba5cf597967bd2e01bcef826f1f35bdc8624d11189b6b76857a40794d4-merged.mount: Deactivated successfully.
Oct 11 02:37:39 compute-0 podman[449415]: 2025-10-11 02:37:39.157729812 +0000 UTC m=+1.279997126 container remove 28887b231b4dac9e9bb8bd00c152d10e69e036ae109e3416f4e98cb06dd86a7f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=infallible_ardinghelli, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:37:39 compute-0 systemd[1]: libpod-conmon-28887b231b4dac9e9bb8bd00c152d10e69e036ae109e3416f4e98cb06dd86a7f.scope: Deactivated successfully.
Oct 11 02:37:39 compute-0 sudo[449253]: pam_unix(sudo:session): session closed for user root
Oct 11 02:37:39 compute-0 sudo[449452]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:37:39 compute-0 sudo[449452]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:37:39 compute-0 sudo[449452]: pam_unix(sudo:session): session closed for user root
Oct 11 02:37:39 compute-0 sudo[449477]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:37:39 compute-0 sudo[449477]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:37:39 compute-0 sudo[449477]: pam_unix(sudo:session): session closed for user root
Oct 11 02:37:39 compute-0 sudo[449502]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:37:39 compute-0 sudo[449502]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:37:39 compute-0 sudo[449502]: pam_unix(sudo:session): session closed for user root
Oct 11 02:37:39 compute-0 sudo[449527]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:37:39 compute-0 sudo[449527]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:37:39 compute-0 ceph-mon[191930]: pgmap v1796: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:39 compute-0 nova_compute[356901]: 2025-10-11 02:37:39.935 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:40 compute-0 podman[449590]: 2025-10-11 02:37:40.13902724 +0000 UTC m=+0.076097275 container create 83d022a767b3207c9b27982349f03747ee806f6ed12548ecbe44155aa96f8572 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_haslett, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0)
Oct 11 02:37:40 compute-0 systemd[1]: Started libpod-conmon-83d022a767b3207c9b27982349f03747ee806f6ed12548ecbe44155aa96f8572.scope.
Oct 11 02:37:40 compute-0 podman[449590]: 2025-10-11 02:37:40.099759366 +0000 UTC m=+0.036829471 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:37:40 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:37:40 compute-0 podman[449590]: 2025-10-11 02:37:40.256392458 +0000 UTC m=+0.193462512 container init 83d022a767b3207c9b27982349f03747ee806f6ed12548ecbe44155aa96f8572 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_haslett, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:37:40 compute-0 podman[449590]: 2025-10-11 02:37:40.268865771 +0000 UTC m=+0.205935796 container start 83d022a767b3207c9b27982349f03747ee806f6ed12548ecbe44155aa96f8572 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_haslett, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_REF=reef)
Oct 11 02:37:40 compute-0 podman[449590]: 2025-10-11 02:37:40.275309494 +0000 UTC m=+0.212379609 container attach 83d022a767b3207c9b27982349f03747ee806f6ed12548ecbe44155aa96f8572 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_haslett, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:37:40 compute-0 romantic_haslett[449605]: 167 167
Oct 11 02:37:40 compute-0 systemd[1]: libpod-83d022a767b3207c9b27982349f03747ee806f6ed12548ecbe44155aa96f8572.scope: Deactivated successfully.
Oct 11 02:37:40 compute-0 podman[449590]: 2025-10-11 02:37:40.278427413 +0000 UTC m=+0.215497448 container died 83d022a767b3207c9b27982349f03747ee806f6ed12548ecbe44155aa96f8572 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_haslett, OSD_FLAVOR=default, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507)
Oct 11 02:37:40 compute-0 systemd[1]: var-lib-containers-storage-overlay-fb119d10f2ec9a47557c8d775483989a5b20a5cea831647b20004c442b110fc0-merged.mount: Deactivated successfully.
Oct 11 02:37:40 compute-0 podman[449590]: 2025-10-11 02:37:40.335169777 +0000 UTC m=+0.272239802 container remove 83d022a767b3207c9b27982349f03747ee806f6ed12548ecbe44155aa96f8572 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_haslett, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:37:40 compute-0 systemd[1]: libpod-conmon-83d022a767b3207c9b27982349f03747ee806f6ed12548ecbe44155aa96f8572.scope: Deactivated successfully.
Oct 11 02:37:40 compute-0 podman[449630]: 2025-10-11 02:37:40.562841578 +0000 UTC m=+0.066894158 container create 1ba5bc376771c234496d9d6b302b66ec7ee7198c4d9197c02c797306745f23e9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_bouman, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 02:37:40 compute-0 podman[449630]: 2025-10-11 02:37:40.531758623 +0000 UTC m=+0.035811293 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:37:40 compute-0 systemd[1]: Started libpod-conmon-1ba5bc376771c234496d9d6b302b66ec7ee7198c4d9197c02c797306745f23e9.scope.
Oct 11 02:37:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1797: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:40 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:37:40 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/224bf1546a5c3d0f77f54aab8ae9e8c57fc4db41cdf6a78d00ca93d3a067e859/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:37:40 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/224bf1546a5c3d0f77f54aab8ae9e8c57fc4db41cdf6a78d00ca93d3a067e859/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:37:40 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/224bf1546a5c3d0f77f54aab8ae9e8c57fc4db41cdf6a78d00ca93d3a067e859/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:37:40 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/224bf1546a5c3d0f77f54aab8ae9e8c57fc4db41cdf6a78d00ca93d3a067e859/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:37:40 compute-0 podman[449630]: 2025-10-11 02:37:40.698384542 +0000 UTC m=+0.202437172 container init 1ba5bc376771c234496d9d6b302b66ec7ee7198c4d9197c02c797306745f23e9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_bouman, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2)
Oct 11 02:37:40 compute-0 podman[449630]: 2025-10-11 02:37:40.720495886 +0000 UTC m=+0.224548466 container start 1ba5bc376771c234496d9d6b302b66ec7ee7198c4d9197c02c797306745f23e9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_bouman, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:37:40 compute-0 podman[449630]: 2025-10-11 02:37:40.725842973 +0000 UTC m=+0.229895623 container attach 1ba5bc376771c234496d9d6b302b66ec7ee7198c4d9197c02c797306745f23e9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_bouman, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:37:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:37:41 compute-0 ceph-mon[191930]: pgmap v1797: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:41 compute-0 amazing_bouman[449646]: {
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:         "osd_id": 1,
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:         "type": "bluestore"
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:     },
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:         "osd_id": 2,
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:         "type": "bluestore"
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:     },
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:         "osd_id": 0,
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:         "type": "bluestore"
Oct 11 02:37:41 compute-0 amazing_bouman[449646]:     }
Oct 11 02:37:41 compute-0 amazing_bouman[449646]: }
Oct 11 02:37:41 compute-0 systemd[1]: libpod-1ba5bc376771c234496d9d6b302b66ec7ee7198c4d9197c02c797306745f23e9.scope: Deactivated successfully.
Oct 11 02:37:41 compute-0 systemd[1]: libpod-1ba5bc376771c234496d9d6b302b66ec7ee7198c4d9197c02c797306745f23e9.scope: Consumed 1.067s CPU time.
Oct 11 02:37:41 compute-0 podman[449679]: 2025-10-11 02:37:41.858153847 +0000 UTC m=+0.035985273 container died 1ba5bc376771c234496d9d6b302b66ec7ee7198c4d9197c02c797306745f23e9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_bouman, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:37:41 compute-0 systemd[1]: var-lib-containers-storage-overlay-224bf1546a5c3d0f77f54aab8ae9e8c57fc4db41cdf6a78d00ca93d3a067e859-merged.mount: Deactivated successfully.
Oct 11 02:37:41 compute-0 podman[449679]: 2025-10-11 02:37:41.929376302 +0000 UTC m=+0.107207748 container remove 1ba5bc376771c234496d9d6b302b66ec7ee7198c4d9197c02c797306745f23e9 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_bouman, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2)
Oct 11 02:37:41 compute-0 systemd[1]: libpod-conmon-1ba5bc376771c234496d9d6b302b66ec7ee7198c4d9197c02c797306745f23e9.scope: Deactivated successfully.
Oct 11 02:37:41 compute-0 sudo[449527]: pam_unix(sudo:session): session closed for user root
Oct 11 02:37:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:37:42 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:37:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:37:42 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:37:42 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 1a0522c0-fd59-4232-8b3a-4d06f6e44e85 does not exist
Oct 11 02:37:42 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev b1c8606e-69dc-4bfb-a603-341d640cbf29 does not exist
Oct 11 02:37:42 compute-0 sudo[449694]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:37:42 compute-0 sudo[449694]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:37:42 compute-0 sudo[449694]: pam_unix(sudo:session): session closed for user root
Oct 11 02:37:42 compute-0 sudo[449719]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:37:42 compute-0 sudo[449719]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:37:42 compute-0 sudo[449719]: pam_unix(sudo:session): session closed for user root
Oct 11 02:37:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1798: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:43 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:37:43 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:37:43 compute-0 ceph-mon[191930]: pgmap v1798: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:43 compute-0 nova_compute[356901]: 2025-10-11 02:37:43.067 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1799: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:44 compute-0 podman[449744]: 2025-10-11 02:37:44.893500942 +0000 UTC m=+0.173180746 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.expose-services=, architecture=x86_64, container_name=kepler, managed_by=edpm_ansible, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9, maintainer=Red Hat, Inc., release=1214.1726694543, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.openshift.tags=base rhel9, vendor=Red Hat, Inc., version=9.4, io.buildah.version=1.29.0, config_id=edpm, release-0.7.12=, build-date=2024-09-18T21:23:30, distribution-scope=public, summary=Provides the latest release of Red Hat Universal Base Image 9., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, com.redhat.component=ubi9-container, name=ubi9)
Oct 11 02:37:44 compute-0 nova_compute[356901]: 2025-10-11 02:37:44.937 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:45 compute-0 ceph-mon[191930]: pgmap v1799: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:37:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1800: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:47 compute-0 ceph-mon[191930]: pgmap v1800: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:48 compute-0 nova_compute[356901]: 2025-10-11 02:37:48.070 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1801: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:49 compute-0 ceph-mon[191930]: pgmap v1801: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:49 compute-0 nova_compute[356901]: 2025-10-11 02:37:49.940 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1802: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:50 compute-0 ceph-mon[191930]: pgmap v1802: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:37:51 compute-0 podman[449765]: 2025-10-11 02:37:51.207458835 +0000 UTC m=+0.089537377 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:37:51 compute-0 podman[449767]: 2025-10-11 02:37:51.225779497 +0000 UTC m=+0.104595716 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 10 Base Image, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:37:51 compute-0 podman[449766]: 2025-10-11 02:37:51.237860416 +0000 UTC m=+0.129497426 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 02:37:51 compute-0 podman[449772]: 2025-10-11 02:37:51.263105052 +0000 UTC m=+0.121741438 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible)
Oct 11 02:37:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1803: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:53 compute-0 nova_compute[356901]: 2025-10-11 02:37:53.072 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:53 compute-0 ceph-mon[191930]: pgmap v1803: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1804: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:37:54.865 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:37:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:37:54.866 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:37:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:37:54.867 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:37:54 compute-0 nova_compute[356901]: 2025-10-11 02:37:54.944 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:55 compute-0 ceph-mon[191930]: pgmap v1804: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:37:55 compute-0 nova_compute[356901]: 2025-10-11 02:37:55.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:37:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:37:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:37:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:37:56
Oct 11 02:37:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:37:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:37:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['cephfs.cephfs.data', 'cephfs.cephfs.meta', 'default.rgw.log', 'images', 'vms', '.rgw.root', 'backups', '.mgr', 'volumes', 'default.rgw.control', 'default.rgw.meta']
Oct 11 02:37:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:37:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:37:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:37:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:37:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:37:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1805: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:57 compute-0 podman[449846]: 2025-10-11 02:37:57.225371744 +0000 UTC m=+0.115685858 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=multipathd, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd)
Oct 11 02:37:57 compute-0 podman[449847]: 2025-10-11 02:37:57.247202394 +0000 UTC m=+0.131790594 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.license=GPLv2, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true)
Oct 11 02:37:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:37:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:37:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:37:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:37:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:37:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:37:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:37:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:37:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:37:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:37:57 compute-0 ceph-mon[191930]: pgmap v1805: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:58 compute-0 nova_compute[356901]: 2025-10-11 02:37:58.074 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1806: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:58 compute-0 ceph-mon[191930]: pgmap v1806: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:37:59 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:37:59.380 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: SbGlobalUpdateEvent(events=('update',), table='SB_Global', conditions=None, old_conditions=None), priority=20 to row=SB_Global(external_ids={}, nb_cfg=10, options={'arp_ns_explicit_output': 'true', 'mac_prefix': 'fe:55:97', 'max_tunid': '16711680', 'northd_internal_version': '24.03.7-20.33.0-76.8', 'svc_monitor_mac': 'ce:9c:4f:b4:85:9b'}, ipsec=False) old=SB_Global(nb_cfg=9) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:37:59 compute-0 nova_compute[356901]: 2025-10-11 02:37:59.381 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:37:59 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:37:59.383 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Delaying updating chassis table for 9 seconds run /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:274
Oct 11 02:37:59 compute-0 podman[157119]: time="2025-10-11T02:37:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:37:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:37:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:37:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:37:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9068 "" "Go-http-client/1.1"
Oct 11 02:37:59 compute-0 nova_compute[356901]: 2025-10-11 02:37:59.947 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1807: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:38:01 compute-0 openstack_network_exporter[374316]: ERROR   02:38:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:38:01 compute-0 openstack_network_exporter[374316]: ERROR   02:38:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:38:01 compute-0 openstack_network_exporter[374316]: ERROR   02:38:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:38:01 compute-0 openstack_network_exporter[374316]: ERROR   02:38:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:38:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:38:01 compute-0 openstack_network_exporter[374316]: ERROR   02:38:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:38:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:38:01 compute-0 ceph-mon[191930]: pgmap v1807: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1808: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:02 compute-0 nova_compute[356901]: 2025-10-11 02:38:02.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:38:03 compute-0 nova_compute[356901]: 2025-10-11 02:38:03.077 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:03 compute-0 ceph-mon[191930]: pgmap v1808: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1809: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:04 compute-0 nova_compute[356901]: 2025-10-11 02:38:04.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:38:04 compute-0 nova_compute[356901]: 2025-10-11 02:38:04.952 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e135 do_prune osdmap full prune enabled
Oct 11 02:38:05 compute-0 ceph-mon[191930]: pgmap v1809: 321 pgs: 321 active+clean; 78 MiB data, 259 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e136 e136: 3 total, 3 up, 3 in
Oct 11 02:38:05 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e136: 3 total, 3 up, 3 in
Oct 11 02:38:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e136 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:38:05 compute-0 nova_compute[356901]: 2025-10-11 02:38:05.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:38:05 compute-0 nova_compute[356901]: 2025-10-11 02:38:05.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:38:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1811: 321 pgs: 321 active+clean; 82 MiB data, 264 MiB used, 60 GiB / 60 GiB avail; 3.4 KiB/s rd, 455 KiB/s wr, 6 op/s
Oct 11 02:38:06 compute-0 ceph-mon[191930]: osdmap e136: 3 total, 3 up, 3 in
Oct 11 02:38:06 compute-0 ceph-mon[191930]: pgmap v1811: 321 pgs: 321 active+clean; 82 MiB data, 264 MiB used, 60 GiB / 60 GiB avail; 3.4 KiB/s rd, 455 KiB/s wr, 6 op/s
Oct 11 02:38:06 compute-0 nova_compute[356901]: 2025-10-11 02:38:06.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:38:06 compute-0 nova_compute[356901]: 2025-10-11 02:38:06.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:38:06 compute-0 nova_compute[356901]: 2025-10-11 02:38:06.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00032571160545679496 of space, bias 1.0, pg target 0.09771348163703848 quantized to 32 (current 32)
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:38:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:38:07 compute-0 nova_compute[356901]: 2025-10-11 02:38:07.171 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:38:07 compute-0 nova_compute[356901]: 2025-10-11 02:38:07.172 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:38:07 compute-0 nova_compute[356901]: 2025-10-11 02:38:07.172 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:38:07 compute-0 nova_compute[356901]: 2025-10-11 02:38:07.173 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:38:07 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e136 do_prune osdmap full prune enabled
Oct 11 02:38:07 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 e137: 3 total, 3 up, 3 in
Oct 11 02:38:07 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e137: 3 total, 3 up, 3 in
Oct 11 02:38:08 compute-0 nova_compute[356901]: 2025-10-11 02:38:08.081 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:08 compute-0 podman[449885]: 2025-10-11 02:38:08.24973835 +0000 UTC m=+0.119330968 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:38:08 compute-0 podman[449883]: 2025-10-11 02:38:08.24975804 +0000 UTC m=+0.136005293 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 02:38:08 compute-0 podman[449884]: 2025-10-11 02:38:08.274826814 +0000 UTC m=+0.151966747 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, url=https://catalog.redhat.com/en/search?searchType=containers, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, release=1755695350, managed_by=edpm_ansible, name=ubi9-minimal, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.expose-services=, maintainer=Red Hat, Inc., vcs-type=git, io.buildah.version=1.33.7, vendor=Red Hat, Inc., com.redhat.component=ubi9-minimal-container, distribution-scope=public, build-date=2025-08-20T13:12:41, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, config_id=edpm, container_name=openstack_network_exporter, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., version=9.6, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, architecture=x86_64, io.openshift.tags=minimal rhel9)
Oct 11 02:38:08 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:38:08.386 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '10'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:38:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1813: 321 pgs: 321 active+clean; 102 MiB data, 288 MiB used, 60 GiB / 60 GiB avail; 29 KiB/s rd, 3.1 MiB/s wr, 39 op/s
Oct 11 02:38:08 compute-0 ceph-mon[191930]: osdmap e137: 3 total, 3 up, 3 in
Oct 11 02:38:08 compute-0 ceph-mon[191930]: pgmap v1813: 321 pgs: 321 active+clean; 102 MiB data, 288 MiB used, 60 GiB / 60 GiB avail; 29 KiB/s rd, 3.1 MiB/s wr, 39 op/s
Oct 11 02:38:08 compute-0 nova_compute[356901]: 2025-10-11 02:38:08.935 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:38:08 compute-0 nova_compute[356901]: 2025-10-11 02:38:08.971 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:38:08 compute-0 nova_compute[356901]: 2025-10-11 02:38:08.972 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:38:08 compute-0 nova_compute[356901]: 2025-10-11 02:38:08.972 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:38:08 compute-0 nova_compute[356901]: 2025-10-11 02:38:08.973 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:38:09 compute-0 nova_compute[356901]: 2025-10-11 02:38:09.010 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:38:09 compute-0 nova_compute[356901]: 2025-10-11 02:38:09.011 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:38:09 compute-0 nova_compute[356901]: 2025-10-11 02:38:09.011 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:38:09 compute-0 nova_compute[356901]: 2025-10-11 02:38:09.011 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:38:09 compute-0 nova_compute[356901]: 2025-10-11 02:38:09.011 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:38:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:38:09 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1597630332' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:38:09 compute-0 nova_compute[356901]: 2025-10-11 02:38:09.530 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.518s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:38:09 compute-0 nova_compute[356901]: 2025-10-11 02:38:09.678 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:38:09 compute-0 nova_compute[356901]: 2025-10-11 02:38:09.678 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:38:09 compute-0 nova_compute[356901]: 2025-10-11 02:38:09.679 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:38:09 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1597630332' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:38:09 compute-0 nova_compute[356901]: 2025-10-11 02:38:09.953 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:10 compute-0 nova_compute[356901]: 2025-10-11 02:38:10.268 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:38:10 compute-0 nova_compute[356901]: 2025-10-11 02:38:10.270 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3836MB free_disk=59.955204010009766GB free_vcpus=7 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:38:10 compute-0 nova_compute[356901]: 2025-10-11 02:38:10.271 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:38:10 compute-0 nova_compute[356901]: 2025-10-11 02:38:10.271 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:38:10 compute-0 nova_compute[356901]: 2025-10-11 02:38:10.362 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:38:10 compute-0 nova_compute[356901]: 2025-10-11 02:38:10.363 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 1 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:38:10 compute-0 nova_compute[356901]: 2025-10-11 02:38:10.363 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1024MB phys_disk=59GB used_disk=2GB total_vcpus=8 used_vcpus=1 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:38:10 compute-0 nova_compute[356901]: 2025-10-11 02:38:10.422 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:38:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1814: 321 pgs: 321 active+clean; 110 MiB data, 296 MiB used, 60 GiB / 60 GiB avail; 29 KiB/s rd, 4.1 MiB/s wr, 39 op/s
Oct 11 02:38:10 compute-0 ceph-mon[191930]: pgmap v1814: 321 pgs: 321 active+clean; 110 MiB data, 296 MiB used, 60 GiB / 60 GiB avail; 29 KiB/s rd, 4.1 MiB/s wr, 39 op/s
Oct 11 02:38:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:38:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:38:10 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3014834662' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:38:10 compute-0 nova_compute[356901]: 2025-10-11 02:38:10.967 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.545s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:38:10 compute-0 nova_compute[356901]: 2025-10-11 02:38:10.978 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:38:11 compute-0 nova_compute[356901]: 2025-10-11 02:38:11.002 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:38:11 compute-0 nova_compute[356901]: 2025-10-11 02:38:11.009 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:38:11 compute-0 nova_compute[356901]: 2025-10-11 02:38:11.009 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.738s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:38:11 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3014834662' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:38:11 compute-0 nova_compute[356901]: 2025-10-11 02:38:11.934 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:38:11 compute-0 nova_compute[356901]: 2025-10-11 02:38:11.935 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:38:11 compute-0 nova_compute[356901]: 2025-10-11 02:38:11.967 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:38:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1815: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 33 KiB/s rd, 5.1 MiB/s wr, 47 op/s
Oct 11 02:38:12 compute-0 ceph-mon[191930]: pgmap v1815: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 33 KiB/s rd, 5.1 MiB/s wr, 47 op/s
Oct 11 02:38:13 compute-0 nova_compute[356901]: 2025-10-11 02:38:13.084 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1816: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 30 KiB/s rd, 4.6 MiB/s wr, 42 op/s
Oct 11 02:38:14 compute-0 nova_compute[356901]: 2025-10-11 02:38:14.956 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:15 compute-0 podman[449990]: 2025-10-11 02:38:15.218705767 +0000 UTC m=+0.107525594 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, distribution-scope=public, io.openshift.tags=base rhel9, config_id=edpm, io.openshift.expose-services=, name=ubi9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, version=9.4, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, maintainer=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, release=1214.1726694543, vcs-type=git, architecture=x86_64, io.buildah.version=1.29.0, managed_by=edpm_ansible, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, com.redhat.component=ubi9-container, container_name=kepler, vendor=Red Hat, Inc., build-date=2024-09-18T21:23:30, summary=Provides the latest release of Red Hat Universal Base Image 9., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, release-0.7.12=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:38:15 compute-0 ceph-mon[191930]: pgmap v1816: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 30 KiB/s rd, 4.6 MiB/s wr, 42 op/s
Oct 11 02:38:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:38:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1817: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 23 KiB/s rd, 3.6 MiB/s wr, 31 op/s
Oct 11 02:38:17 compute-0 ceph-mon[191930]: pgmap v1817: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 23 KiB/s rd, 3.6 MiB/s wr, 31 op/s
Oct 11 02:38:18 compute-0 nova_compute[356901]: 2025-10-11 02:38:18.087 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1818: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 21 KiB/s rd, 3.4 MiB/s wr, 29 op/s
Oct 11 02:38:18 compute-0 ceph-mon[191930]: pgmap v1818: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 21 KiB/s rd, 3.4 MiB/s wr, 29 op/s
Oct 11 02:38:19 compute-0 nova_compute[356901]: 2025-10-11 02:38:19.960 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1819: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 2.8 KiB/s rd, 1.3 MiB/s wr, 5 op/s
Oct 11 02:38:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:38:21 compute-0 ceph-mon[191930]: pgmap v1819: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 2.8 KiB/s rd, 1.3 MiB/s wr, 5 op/s
Oct 11 02:38:22 compute-0 podman[450008]: 2025-10-11 02:38:22.188650488 +0000 UTC m=+0.085450440 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:38:22 compute-0 podman[450010]: 2025-10-11 02:38:22.218019812 +0000 UTC m=+0.097557909 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.build-date=20251007, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']})
Oct 11 02:38:22 compute-0 podman[450011]: 2025-10-11 02:38:22.251323641 +0000 UTC m=+0.126520986 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.license=GPLv2)
Oct 11 02:38:22 compute-0 podman[450009]: 2025-10-11 02:38:22.256416195 +0000 UTC m=+0.151713832 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009)
Oct 11 02:38:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1820: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 2.8 KiB/s rd, 683 KiB/s wr, 5 op/s
Oct 11 02:38:22 compute-0 ceph-mon[191930]: pgmap v1820: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 2.8 KiB/s rd, 683 KiB/s wr, 5 op/s
Oct 11 02:38:23 compute-0 nova_compute[356901]: 2025-10-11 02:38:23.090 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1821: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:24 compute-0 nova_compute[356901]: 2025-10-11 02:38:24.963 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:25 compute-0 ceph-mon[191930]: pgmap v1821: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:38:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:38:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:38:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:38:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:38:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:38:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:38:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1822: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:26 compute-0 ceph-mon[191930]: pgmap v1822: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:38:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3701771439' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:38:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:38:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3701771439' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:38:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3701771439' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:38:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3701771439' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:38:28 compute-0 nova_compute[356901]: 2025-10-11 02:38:28.092 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:28 compute-0 podman[450095]: 2025-10-11 02:38:28.206468542 +0000 UTC m=+0.088606322 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=iscsid, io.buildah.version=1.41.3, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:38:28 compute-0 podman[450094]: 2025-10-11 02:38:28.219967564 +0000 UTC m=+0.104376232 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, config_id=multipathd, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']})
Oct 11 02:38:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1823: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:29 compute-0 ceph-mon[191930]: pgmap v1823: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:29 compute-0 ovn_controller[88370]: 2025-10-11T02:38:29Z|00070|memory_trim|INFO|Detected inactivity (last active 30007 ms ago): trimming memory
Oct 11 02:38:29 compute-0 podman[157119]: time="2025-10-11T02:38:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:38:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:38:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:38:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:38:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9074 "" "Go-http-client/1.1"
Oct 11 02:38:29 compute-0 nova_compute[356901]: 2025-10-11 02:38:29.967 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1824: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:38:31 compute-0 openstack_network_exporter[374316]: ERROR   02:38:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:38:31 compute-0 openstack_network_exporter[374316]: ERROR   02:38:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:38:31 compute-0 openstack_network_exporter[374316]: ERROR   02:38:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:38:31 compute-0 openstack_network_exporter[374316]: ERROR   02:38:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:38:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:38:31 compute-0 openstack_network_exporter[374316]: ERROR   02:38:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:38:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:38:31 compute-0 ceph-mon[191930]: pgmap v1824: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1825: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:33 compute-0 nova_compute[356901]: 2025-10-11 02:38:33.095 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:33 compute-0 ceph-mon[191930]: pgmap v1825: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1826: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:34 compute-0 ceph-mon[191930]: pgmap v1826: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:34 compute-0 nova_compute[356901]: 2025-10-11 02:38:34.970 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:38:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1827: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:37 compute-0 ceph-mon[191930]: pgmap v1827: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:38 compute-0 nova_compute[356901]: 2025-10-11 02:38:38.098 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1828: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:39 compute-0 podman[450133]: 2025-10-11 02:38:39.22679172 +0000 UTC m=+0.108606402 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true)
Oct 11 02:38:39 compute-0 podman[450135]: 2025-10-11 02:38:39.232598216 +0000 UTC m=+0.103396856 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:38:39 compute-0 podman[450134]: 2025-10-11 02:38:39.269484344 +0000 UTC m=+0.155346512 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vcs-type=git, io.openshift.expose-services=, managed_by=edpm_ansible, url=https://catalog.redhat.com/en/search?searchType=containers, version=9.6, io.buildah.version=1.33.7, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., name=ubi9-minimal, build-date=2025-08-20T13:12:41, release=1755695350, vendor=Red Hat, Inc., com.redhat.component=ubi9-minimal-container, container_name=openstack_network_exporter, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, architecture=x86_64, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., distribution-scope=public, config_id=edpm)
Oct 11 02:38:39 compute-0 ceph-mon[191930]: pgmap v1828: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:39 compute-0 nova_compute[356901]: 2025-10-11 02:38:39.973 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:40 compute-0 nova_compute[356901]: 2025-10-11 02:38:40.095 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:40 compute-0 nova_compute[356901]: 2025-10-11 02:38:40.223 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:40 compute-0 nova_compute[356901]: 2025-10-11 02:38:40.307 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:40 compute-0 nova_compute[356901]: 2025-10-11 02:38:40.408 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1829: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:40 compute-0 ceph-mon[191930]: pgmap v1829: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:38:41 compute-0 nova_compute[356901]: 2025-10-11 02:38:41.728 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:42 compute-0 sudo[450197]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:38:42 compute-0 sudo[450197]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:38:42 compute-0 sudo[450197]: pam_unix(sudo:session): session closed for user root
Oct 11 02:38:42 compute-0 sudo[450222]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:38:42 compute-0 sudo[450222]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:38:42 compute-0 sudo[450222]: pam_unix(sudo:session): session closed for user root
Oct 11 02:38:42 compute-0 sudo[450247]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:38:42 compute-0 sudo[450247]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:38:42 compute-0 sudo[450247]: pam_unix(sudo:session): session closed for user root
Oct 11 02:38:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1830: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:42 compute-0 sudo[450272]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:38:42 compute-0 sudo[450272]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:38:43 compute-0 nova_compute[356901]: 2025-10-11 02:38:43.101 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:43 compute-0 sudo[450272]: pam_unix(sudo:session): session closed for user root
Oct 11 02:38:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:38:43 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:38:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:38:43 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:38:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:38:43 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:38:43 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 451b28ec-18ed-47ad-9af4-b2d409c2e1c6 does not exist
Oct 11 02:38:43 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 8464c82e-0d49-4e76-9d9b-237341c3f863 does not exist
Oct 11 02:38:43 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 49bacbce-589f-4fca-a6cb-db1ba53de5d6 does not exist
Oct 11 02:38:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:38:43 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:38:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:38:43 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:38:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:38:43 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:38:43 compute-0 sudo[450327]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:38:43 compute-0 sudo[450327]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:38:43 compute-0 sudo[450327]: pam_unix(sudo:session): session closed for user root
Oct 11 02:38:43 compute-0 ceph-mon[191930]: pgmap v1830: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:43 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:38:43 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:38:43 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:38:43 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:38:43 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:38:43 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:38:43 compute-0 sudo[450352]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:38:43 compute-0 sudo[450352]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:38:43 compute-0 sudo[450352]: pam_unix(sudo:session): session closed for user root
Oct 11 02:38:43 compute-0 sudo[450377]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:38:43 compute-0 sudo[450377]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:38:43 compute-0 sudo[450377]: pam_unix(sudo:session): session closed for user root
Oct 11 02:38:43 compute-0 sudo[450402]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:38:43 compute-0 sudo[450402]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:38:44 compute-0 podman[450464]: 2025-10-11 02:38:44.51107198 +0000 UTC m=+0.059319620 container create 8c4091ce4f3e3b872ae3c462503758e8cbf9533e20a49252cc8cf076ddd9e57f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_kapitsa, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=reef)
Oct 11 02:38:44 compute-0 systemd[1]: Started libpod-conmon-8c4091ce4f3e3b872ae3c462503758e8cbf9533e20a49252cc8cf076ddd9e57f.scope.
Oct 11 02:38:44 compute-0 podman[450464]: 2025-10-11 02:38:44.486850607 +0000 UTC m=+0.035098267 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:38:44 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:38:44 compute-0 podman[450464]: 2025-10-11 02:38:44.654843955 +0000 UTC m=+0.203091645 container init 8c4091ce4f3e3b872ae3c462503758e8cbf9533e20a49252cc8cf076ddd9e57f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_kapitsa, CEPH_REF=reef, org.label-schema.build-date=20250507, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:38:44 compute-0 podman[450464]: 2025-10-11 02:38:44.666984971 +0000 UTC m=+0.215232601 container start 8c4091ce4f3e3b872ae3c462503758e8cbf9533e20a49252cc8cf076ddd9e57f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_kapitsa, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default)
Oct 11 02:38:44 compute-0 podman[450464]: 2025-10-11 02:38:44.671670924 +0000 UTC m=+0.219918584 container attach 8c4091ce4f3e3b872ae3c462503758e8cbf9533e20a49252cc8cf076ddd9e57f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_kapitsa, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:38:44 compute-0 blissful_kapitsa[450481]: 167 167
Oct 11 02:38:44 compute-0 systemd[1]: libpod-8c4091ce4f3e3b872ae3c462503758e8cbf9533e20a49252cc8cf076ddd9e57f.scope: Deactivated successfully.
Oct 11 02:38:44 compute-0 podman[450464]: 2025-10-11 02:38:44.678133303 +0000 UTC m=+0.226380953 container died 8c4091ce4f3e3b872ae3c462503758e8cbf9533e20a49252cc8cf076ddd9e57f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_kapitsa, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:38:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1831: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:44 compute-0 systemd[1]: var-lib-containers-storage-overlay-b6dcdd2d2c12f6a1b62bf1744ca5b8a0454b6a64386bfd5715edd6c4f17ee2ba-merged.mount: Deactivated successfully.
Oct 11 02:38:44 compute-0 podman[450464]: 2025-10-11 02:38:44.745503762 +0000 UTC m=+0.293751402 container remove 8c4091ce4f3e3b872ae3c462503758e8cbf9533e20a49252cc8cf076ddd9e57f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_kapitsa, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:38:44 compute-0 systemd[1]: libpod-conmon-8c4091ce4f3e3b872ae3c462503758e8cbf9533e20a49252cc8cf076ddd9e57f.scope: Deactivated successfully.
Oct 11 02:38:44 compute-0 nova_compute[356901]: 2025-10-11 02:38:44.976 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:44 compute-0 podman[450504]: 2025-10-11 02:38:44.991628779 +0000 UTC m=+0.084450556 container create 81f42a0251d12665e73276d99b684db5addb711225bbd014fd5292af989bcd7c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_joliot, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2)
Oct 11 02:38:45 compute-0 podman[450504]: 2025-10-11 02:38:44.946503281 +0000 UTC m=+0.039325108 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:38:45 compute-0 systemd[1]: Started libpod-conmon-81f42a0251d12665e73276d99b684db5addb711225bbd014fd5292af989bcd7c.scope.
Oct 11 02:38:45 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:38:45 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/461d4c4ffeb019573280f1feed636a35f3a8e002404350edb40455c5f8390ef5/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:38:45 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/461d4c4ffeb019573280f1feed636a35f3a8e002404350edb40455c5f8390ef5/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:38:45 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/461d4c4ffeb019573280f1feed636a35f3a8e002404350edb40455c5f8390ef5/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:38:45 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/461d4c4ffeb019573280f1feed636a35f3a8e002404350edb40455c5f8390ef5/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:38:45 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/461d4c4ffeb019573280f1feed636a35f3a8e002404350edb40455c5f8390ef5/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:38:45 compute-0 podman[450504]: 2025-10-11 02:38:45.250102909 +0000 UTC m=+0.342924756 container init 81f42a0251d12665e73276d99b684db5addb711225bbd014fd5292af989bcd7c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_joliot, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:38:45 compute-0 podman[450504]: 2025-10-11 02:38:45.271500588 +0000 UTC m=+0.364322405 container start 81f42a0251d12665e73276d99b684db5addb711225bbd014fd5292af989bcd7c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_joliot, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:38:45 compute-0 podman[450504]: 2025-10-11 02:38:45.285029401 +0000 UTC m=+0.377851198 container attach 81f42a0251d12665e73276d99b684db5addb711225bbd014fd5292af989bcd7c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_joliot, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:38:45 compute-0 ceph-mon[191930]: pgmap v1831: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:38:46 compute-0 podman[450531]: 2025-10-11 02:38:46.229038524 +0000 UTC m=+0.124478642 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, io.openshift.expose-services=, name=ubi9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.29.0, distribution-scope=public, maintainer=Red Hat, Inc., managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-type=git, version=9.4, io.openshift.tags=base rhel9, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, architecture=x86_64, release-0.7.12=, release=1214.1726694543, build-date=2024-09-18T21:23:30, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., com.redhat.component=ubi9-container, io.k8s.display-name=Red Hat Universal Base Image 9, container_name=kepler, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543)
Oct 11 02:38:46 compute-0 upbeat_joliot[450520]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:38:46 compute-0 upbeat_joliot[450520]: --> relative data size: 1.0
Oct 11 02:38:46 compute-0 upbeat_joliot[450520]: --> All data devices are unavailable
Oct 11 02:38:46 compute-0 systemd[1]: libpod-81f42a0251d12665e73276d99b684db5addb711225bbd014fd5292af989bcd7c.scope: Deactivated successfully.
Oct 11 02:38:46 compute-0 systemd[1]: libpod-81f42a0251d12665e73276d99b684db5addb711225bbd014fd5292af989bcd7c.scope: Consumed 1.166s CPU time.
Oct 11 02:38:46 compute-0 podman[450504]: 2025-10-11 02:38:46.505999106 +0000 UTC m=+1.598820893 container died 81f42a0251d12665e73276d99b684db5addb711225bbd014fd5292af989bcd7c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_joliot, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:38:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1832: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:47 compute-0 ceph-mon[191930]: pgmap v1832: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:47 compute-0 systemd[1]: var-lib-containers-storage-overlay-461d4c4ffeb019573280f1feed636a35f3a8e002404350edb40455c5f8390ef5-merged.mount: Deactivated successfully.
Oct 11 02:38:47 compute-0 podman[450504]: 2025-10-11 02:38:47.795909821 +0000 UTC m=+2.888731608 container remove 81f42a0251d12665e73276d99b684db5addb711225bbd014fd5292af989bcd7c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=upbeat_joliot, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2)
Oct 11 02:38:47 compute-0 systemd[1]: libpod-conmon-81f42a0251d12665e73276d99b684db5addb711225bbd014fd5292af989bcd7c.scope: Deactivated successfully.
Oct 11 02:38:47 compute-0 sudo[450402]: pam_unix(sudo:session): session closed for user root
Oct 11 02:38:47 compute-0 sudo[450579]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:38:48 compute-0 sudo[450579]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:38:48 compute-0 sudo[450579]: pam_unix(sudo:session): session closed for user root
Oct 11 02:38:48 compute-0 nova_compute[356901]: 2025-10-11 02:38:48.103 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:48 compute-0 sudo[450604]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:38:48 compute-0 sudo[450604]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:38:48 compute-0 sudo[450604]: pam_unix(sudo:session): session closed for user root
Oct 11 02:38:48 compute-0 sudo[450629]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:38:48 compute-0 sudo[450629]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:38:48 compute-0 sudo[450629]: pam_unix(sudo:session): session closed for user root
Oct 11 02:38:48 compute-0 nova_compute[356901]: 2025-10-11 02:38:48.413 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:48 compute-0 sudo[450654]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:38:48 compute-0 sudo[450654]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:38:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1833: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:48 compute-0 ceph-mon[191930]: pgmap v1833: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:49 compute-0 podman[450719]: 2025-10-11 02:38:48.995287492 +0000 UTC m=+0.062934374 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:38:49 compute-0 podman[450719]: 2025-10-11 02:38:49.11583698 +0000 UTC m=+0.183483852 container create 5f4e43cba7b4cb7bbab34d3baaeac2d741b556f84ff6e7a5c09ae79645b7f685 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_dewdney, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507)
Oct 11 02:38:49 compute-0 systemd[1]: Started libpod-conmon-5f4e43cba7b4cb7bbab34d3baaeac2d741b556f84ff6e7a5c09ae79645b7f685.scope.
Oct 11 02:38:49 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:38:49 compute-0 podman[450719]: 2025-10-11 02:38:49.542612805 +0000 UTC m=+0.610259717 container init 5f4e43cba7b4cb7bbab34d3baaeac2d741b556f84ff6e7a5c09ae79645b7f685 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_dewdney, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_REF=reef)
Oct 11 02:38:49 compute-0 podman[450719]: 2025-10-11 02:38:49.556783275 +0000 UTC m=+0.624430137 container start 5f4e43cba7b4cb7bbab34d3baaeac2d741b556f84ff6e7a5c09ae79645b7f685 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_dewdney, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:38:49 compute-0 blissful_dewdney[450736]: 167 167
Oct 11 02:38:49 compute-0 systemd[1]: libpod-5f4e43cba7b4cb7bbab34d3baaeac2d741b556f84ff6e7a5c09ae79645b7f685.scope: Deactivated successfully.
Oct 11 02:38:49 compute-0 podman[450719]: 2025-10-11 02:38:49.719645838 +0000 UTC m=+0.787292800 container attach 5f4e43cba7b4cb7bbab34d3baaeac2d741b556f84ff6e7a5c09ae79645b7f685 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_dewdney, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:38:49 compute-0 podman[450719]: 2025-10-11 02:38:49.720437299 +0000 UTC m=+0.788084171 container died 5f4e43cba7b4cb7bbab34d3baaeac2d741b556f84ff6e7a5c09ae79645b7f685 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_dewdney, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:38:49 compute-0 nova_compute[356901]: 2025-10-11 02:38:49.769 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:49 compute-0 nova_compute[356901]: 2025-10-11 02:38:49.802 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:49 compute-0 nova_compute[356901]: 2025-10-11 02:38:49.943 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:49 compute-0 nova_compute[356901]: 2025-10-11 02:38:49.978 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:50 compute-0 systemd[1]: var-lib-containers-storage-overlay-ca65ef53eba5818e990be60cb90450751d545b099a30a9f65fc551249acfca7d-merged.mount: Deactivated successfully.
Oct 11 02:38:50 compute-0 nova_compute[356901]: 2025-10-11 02:38:50.305 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:50 compute-0 podman[450719]: 2025-10-11 02:38:50.601407535 +0000 UTC m=+1.669054407 container remove 5f4e43cba7b4cb7bbab34d3baaeac2d741b556f84ff6e7a5c09ae79645b7f685 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_dewdney, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:38:50 compute-0 systemd[1]: libpod-conmon-5f4e43cba7b4cb7bbab34d3baaeac2d741b556f84ff6e7a5c09ae79645b7f685.scope: Deactivated successfully.
Oct 11 02:38:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1834: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:50 compute-0 ceph-mon[191930]: pgmap v1834: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:50 compute-0 podman[450761]: 2025-10-11 02:38:50.867861753 +0000 UTC m=+0.058130979 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:38:50 compute-0 podman[450761]: 2025-10-11 02:38:50.981562803 +0000 UTC m=+0.171831959 container create 3ae3bdb1405e368e0c97650cfa48a45209d32e7b368046738eb8606e4c691026 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_moore, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:38:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:38:51 compute-0 systemd[1]: Started libpod-conmon-3ae3bdb1405e368e0c97650cfa48a45209d32e7b368046738eb8606e4c691026.scope.
Oct 11 02:38:51 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:38:51 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d4ee342f5911e92952e0a30a727be307b884a1a0005897239e951830559669e5/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:38:51 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d4ee342f5911e92952e0a30a727be307b884a1a0005897239e951830559669e5/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:38:51 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d4ee342f5911e92952e0a30a727be307b884a1a0005897239e951830559669e5/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:38:51 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d4ee342f5911e92952e0a30a727be307b884a1a0005897239e951830559669e5/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:38:51 compute-0 podman[450761]: 2025-10-11 02:38:51.211291332 +0000 UTC m=+0.401560488 container init 3ae3bdb1405e368e0c97650cfa48a45209d32e7b368046738eb8606e4c691026 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_moore, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS)
Oct 11 02:38:51 compute-0 podman[450761]: 2025-10-11 02:38:51.228125572 +0000 UTC m=+0.418394728 container start 3ae3bdb1405e368e0c97650cfa48a45209d32e7b368046738eb8606e4c691026 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_moore, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, ceph=True, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:38:51 compute-0 podman[450761]: 2025-10-11 02:38:51.242821766 +0000 UTC m=+0.433090922 container attach 3ae3bdb1405e368e0c97650cfa48a45209d32e7b368046738eb8606e4c691026 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_moore, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:38:52 compute-0 nova_compute[356901]: 2025-10-11 02:38:52.174 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:52 compute-0 funny_moore[450775]: {
Oct 11 02:38:52 compute-0 funny_moore[450775]:     "0": [
Oct 11 02:38:52 compute-0 funny_moore[450775]:         {
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "devices": [
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "/dev/loop3"
Oct 11 02:38:52 compute-0 funny_moore[450775]:             ],
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "lv_name": "ceph_lv0",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "lv_size": "21470642176",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "name": "ceph_lv0",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "tags": {
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.cluster_name": "ceph",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.crush_device_class": "",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.encrypted": "0",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.osd_id": "0",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.type": "block",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.vdo": "0"
Oct 11 02:38:52 compute-0 funny_moore[450775]:             },
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "type": "block",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "vg_name": "ceph_vg0"
Oct 11 02:38:52 compute-0 funny_moore[450775]:         }
Oct 11 02:38:52 compute-0 funny_moore[450775]:     ],
Oct 11 02:38:52 compute-0 funny_moore[450775]:     "1": [
Oct 11 02:38:52 compute-0 funny_moore[450775]:         {
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "devices": [
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "/dev/loop4"
Oct 11 02:38:52 compute-0 funny_moore[450775]:             ],
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "lv_name": "ceph_lv1",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "lv_size": "21470642176",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "name": "ceph_lv1",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "tags": {
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.cluster_name": "ceph",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.crush_device_class": "",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.encrypted": "0",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.osd_id": "1",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.type": "block",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.vdo": "0"
Oct 11 02:38:52 compute-0 funny_moore[450775]:             },
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "type": "block",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "vg_name": "ceph_vg1"
Oct 11 02:38:52 compute-0 funny_moore[450775]:         }
Oct 11 02:38:52 compute-0 funny_moore[450775]:     ],
Oct 11 02:38:52 compute-0 funny_moore[450775]:     "2": [
Oct 11 02:38:52 compute-0 funny_moore[450775]:         {
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "devices": [
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "/dev/loop5"
Oct 11 02:38:52 compute-0 funny_moore[450775]:             ],
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "lv_name": "ceph_lv2",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "lv_size": "21470642176",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "name": "ceph_lv2",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "tags": {
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.cluster_name": "ceph",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.crush_device_class": "",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.encrypted": "0",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.osd_id": "2",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.type": "block",
Oct 11 02:38:52 compute-0 funny_moore[450775]:                 "ceph.vdo": "0"
Oct 11 02:38:52 compute-0 funny_moore[450775]:             },
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "type": "block",
Oct 11 02:38:52 compute-0 funny_moore[450775]:             "vg_name": "ceph_vg2"
Oct 11 02:38:52 compute-0 funny_moore[450775]:         }
Oct 11 02:38:52 compute-0 funny_moore[450775]:     ]
Oct 11 02:38:52 compute-0 funny_moore[450775]: }
Oct 11 02:38:52 compute-0 systemd[1]: libpod-3ae3bdb1405e368e0c97650cfa48a45209d32e7b368046738eb8606e4c691026.scope: Deactivated successfully.
Oct 11 02:38:52 compute-0 podman[450787]: 2025-10-11 02:38:52.344178806 +0000 UTC m=+0.057531354 container died 3ae3bdb1405e368e0c97650cfa48a45209d32e7b368046738eb8606e4c691026 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_moore, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3)
Oct 11 02:38:52 compute-0 systemd[1]: var-lib-containers-storage-overlay-d4ee342f5911e92952e0a30a727be307b884a1a0005897239e951830559669e5-merged.mount: Deactivated successfully.
Oct 11 02:38:52 compute-0 podman[450788]: 2025-10-11 02:38:52.592730777 +0000 UTC m=+0.300388256 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.license=GPLv2, io.buildah.version=1.41.4, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:38:52 compute-0 podman[450790]: 2025-10-11 02:38:52.664386998 +0000 UTC m=+0.359832268 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_managed=true, io.buildah.version=1.41.3, managed_by=edpm_ansible, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, org.label-schema.build-date=20251009)
Oct 11 02:38:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1835: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:52 compute-0 podman[450787]: 2025-10-11 02:38:52.83489152 +0000 UTC m=+0.548244068 container remove 3ae3bdb1405e368e0c97650cfa48a45209d32e7b368046738eb8606e4c691026 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_moore, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:38:52 compute-0 systemd[1]: libpod-conmon-3ae3bdb1405e368e0c97650cfa48a45209d32e7b368046738eb8606e4c691026.scope: Deactivated successfully.
Oct 11 02:38:52 compute-0 podman[450789]: 2025-10-11 02:38:52.85212586 +0000 UTC m=+0.559598704 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, container_name=ovn_controller)
Oct 11 02:38:52 compute-0 podman[450786]: 2025-10-11 02:38:52.854559324 +0000 UTC m=+0.561868664 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:38:52 compute-0 sudo[450654]: pam_unix(sudo:session): session closed for user root
Oct 11 02:38:53 compute-0 ceph-mon[191930]: pgmap v1835: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:53 compute-0 sudo[450879]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:38:53 compute-0 sudo[450879]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:38:53 compute-0 sudo[450879]: pam_unix(sudo:session): session closed for user root
Oct 11 02:38:53 compute-0 nova_compute[356901]: 2025-10-11 02:38:53.107 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:53 compute-0 sudo[450904]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:38:53 compute-0 sudo[450904]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:38:53 compute-0 sudo[450904]: pam_unix(sudo:session): session closed for user root
Oct 11 02:38:53 compute-0 sudo[450929]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:38:53 compute-0 sudo[450929]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:38:53 compute-0 sudo[450929]: pam_unix(sudo:session): session closed for user root
Oct 11 02:38:53 compute-0 nova_compute[356901]: 2025-10-11 02:38:53.452 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:53 compute-0 sudo[450954]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:38:53 compute-0 sudo[450954]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:38:54 compute-0 podman[451018]: 2025-10-11 02:38:54.161143475 +0000 UTC m=+0.043571749 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:38:54 compute-0 podman[451018]: 2025-10-11 02:38:54.287528175 +0000 UTC m=+0.169956349 container create 161226de743a6d4e2e3b22cd3d6d45ef5e1966a68d0df00eeab387333a7b9889 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_ardinghelli, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 02:38:54 compute-0 systemd[1]: Started libpod-conmon-161226de743a6d4e2e3b22cd3d6d45ef5e1966a68d0df00eeab387333a7b9889.scope.
Oct 11 02:38:54 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:38:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1836: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:54 compute-0 podman[451018]: 2025-10-11 02:38:54.718358126 +0000 UTC m=+0.600786330 container init 161226de743a6d4e2e3b22cd3d6d45ef5e1966a68d0df00eeab387333a7b9889 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_ardinghelli, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:38:54 compute-0 podman[451018]: 2025-10-11 02:38:54.741384337 +0000 UTC m=+0.623812541 container start 161226de743a6d4e2e3b22cd3d6d45ef5e1966a68d0df00eeab387333a7b9889 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_ardinghelli, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:38:54 compute-0 brave_ardinghelli[451034]: 167 167
Oct 11 02:38:54 compute-0 systemd[1]: libpod-161226de743a6d4e2e3b22cd3d6d45ef5e1966a68d0df00eeab387333a7b9889.scope: Deactivated successfully.
Oct 11 02:38:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:38:54.866 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:38:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:38:54.868 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:38:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:38:54.869 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:38:54 compute-0 nova_compute[356901]: 2025-10-11 02:38:54.983 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:55 compute-0 podman[451018]: 2025-10-11 02:38:55.059857513 +0000 UTC m=+0.942285767 container attach 161226de743a6d4e2e3b22cd3d6d45ef5e1966a68d0df00eeab387333a7b9889 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_ardinghelli, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:38:55 compute-0 podman[451018]: 2025-10-11 02:38:55.061819994 +0000 UTC m=+0.944248198 container died 161226de743a6d4e2e3b22cd3d6d45ef5e1966a68d0df00eeab387333a7b9889 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_ardinghelli, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:38:55 compute-0 ceph-mon[191930]: pgmap v1836: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:55 compute-0 systemd[1]: var-lib-containers-storage-overlay-0d094ff4e8dfdb4f9ecf780d29d9d06a8d857acd330e7b4aca279f9b5341acf6-merged.mount: Deactivated successfully.
Oct 11 02:38:55 compute-0 podman[451018]: 2025-10-11 02:38:55.695126803 +0000 UTC m=+1.577555007 container remove 161226de743a6d4e2e3b22cd3d6d45ef5e1966a68d0df00eeab387333a7b9889 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_ardinghelli, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:38:55 compute-0 systemd[1]: libpod-conmon-161226de743a6d4e2e3b22cd3d6d45ef5e1966a68d0df00eeab387333a7b9889.scope: Deactivated successfully.
Oct 11 02:38:55 compute-0 nova_compute[356901]: 2025-10-11 02:38:55.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:38:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:38:56 compute-0 podman[451057]: 2025-10-11 02:38:55.964934579 +0000 UTC m=+0.041664500 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:38:56 compute-0 podman[451057]: 2025-10-11 02:38:56.11052208 +0000 UTC m=+0.187251951 container create db7efcbc5811dc199d4e512bbf14e7fec15522c264329a9bb67d2f153911c29d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_bardeen, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:38:56 compute-0 systemd[1]: Started libpod-conmon-db7efcbc5811dc199d4e512bbf14e7fec15522c264329a9bb67d2f153911c29d.scope.
Oct 11 02:38:56 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:38:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c2daf816aaf13a60e525aa57c5d0c4bb0082f0dcf4404a61813a928cb0774be7/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:38:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c2daf816aaf13a60e525aa57c5d0c4bb0082f0dcf4404a61813a928cb0774be7/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:38:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c2daf816aaf13a60e525aa57c5d0c4bb0082f0dcf4404a61813a928cb0774be7/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:38:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c2daf816aaf13a60e525aa57c5d0c4bb0082f0dcf4404a61813a928cb0774be7/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:38:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:38:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:38:56 compute-0 podman[451057]: 2025-10-11 02:38:56.613403343 +0000 UTC m=+0.690133214 container init db7efcbc5811dc199d4e512bbf14e7fec15522c264329a9bb67d2f153911c29d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_bardeen, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:38:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:38:56
Oct 11 02:38:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:38:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:38:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.control', 'backups', '.rgw.root', 'default.rgw.meta', 'volumes', 'cephfs.cephfs.data', 'vms', '.mgr', 'default.rgw.log', 'images', 'cephfs.cephfs.meta']
Oct 11 02:38:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:38:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:38:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:38:56 compute-0 podman[451057]: 2025-10-11 02:38:56.626360551 +0000 UTC m=+0.703090412 container start db7efcbc5811dc199d4e512bbf14e7fec15522c264329a9bb67d2f153911c29d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_bardeen, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, OSD_FLAVOR=default, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 02:38:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:38:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:38:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1837: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:56 compute-0 podman[451057]: 2025-10-11 02:38:56.762171948 +0000 UTC m=+0.838901829 container attach db7efcbc5811dc199d4e512bbf14e7fec15522c264329a9bb67d2f153911c29d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_bardeen, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:38:56 compute-0 ceph-mon[191930]: pgmap v1837: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:38:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:38:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:38:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:38:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:38:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:38:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:38:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:38:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:38:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]: {
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:         "osd_id": 1,
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:         "type": "bluestore"
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:     },
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:         "osd_id": 2,
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:         "type": "bluestore"
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:     },
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:         "osd_id": 0,
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:         "type": "bluestore"
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]:     }
Oct 11 02:38:57 compute-0 sweet_bardeen[451074]: }
Oct 11 02:38:57 compute-0 systemd[1]: libpod-db7efcbc5811dc199d4e512bbf14e7fec15522c264329a9bb67d2f153911c29d.scope: Deactivated successfully.
Oct 11 02:38:57 compute-0 systemd[1]: libpod-db7efcbc5811dc199d4e512bbf14e7fec15522c264329a9bb67d2f153911c29d.scope: Consumed 1.169s CPU time.
Oct 11 02:38:57 compute-0 podman[451107]: 2025-10-11 02:38:57.866024324 +0000 UTC m=+0.038757153 container died db7efcbc5811dc199d4e512bbf14e7fec15522c264329a9bb67d2f153911c29d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_bardeen, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:38:58 compute-0 nova_compute[356901]: 2025-10-11 02:38:58.113 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:58 compute-0 systemd[1]: var-lib-containers-storage-overlay-c2daf816aaf13a60e525aa57c5d0c4bb0082f0dcf4404a61813a928cb0774be7-merged.mount: Deactivated successfully.
Oct 11 02:38:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1838: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:59 compute-0 podman[451107]: 2025-10-11 02:38:59.195570354 +0000 UTC m=+1.368303183 container remove db7efcbc5811dc199d4e512bbf14e7fec15522c264329a9bb67d2f153911c29d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_bardeen, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.schema-version=1.0)
Oct 11 02:38:59 compute-0 systemd[1]: libpod-conmon-db7efcbc5811dc199d4e512bbf14e7fec15522c264329a9bb67d2f153911c29d.scope: Deactivated successfully.
Oct 11 02:38:59 compute-0 podman[451119]: 2025-10-11 02:38:59.281446047 +0000 UTC m=+1.001602237 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, container_name=iscsid, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, config_id=iscsid, managed_by=edpm_ansible, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.build-date=20251009)
Oct 11 02:38:59 compute-0 podman[451118]: 2025-10-11 02:38:59.281692973 +0000 UTC m=+1.003387803 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, container_name=multipathd, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, config_id=multipathd, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 02:38:59 compute-0 sudo[450954]: pam_unix(sudo:session): session closed for user root
Oct 11 02:38:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:38:59 compute-0 ceph-mon[191930]: pgmap v1838: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:38:59 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:38:59.561 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: SbGlobalUpdateEvent(events=('update',), table='SB_Global', conditions=None, old_conditions=None), priority=20 to row=SB_Global(external_ids={}, nb_cfg=11, options={'arp_ns_explicit_output': 'true', 'mac_prefix': 'fe:55:97', 'max_tunid': '16711680', 'northd_internal_version': '24.03.7-20.33.0-76.8', 'svc_monitor_mac': 'ce:9c:4f:b4:85:9b'}, ipsec=False) old=SB_Global(nb_cfg=10) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:38:59 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:38:59.563 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Delaying updating chassis table for 0 seconds run /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:274
Oct 11 02:38:59 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:38:59.565 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '11'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:38:59 compute-0 nova_compute[356901]: 2025-10-11 02:38:59.565 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:38:59 compute-0 nova_compute[356901]: 2025-10-11 02:38:59.603 2 DEBUG oslo_concurrency.lockutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Acquiring lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:38:59 compute-0 nova_compute[356901]: 2025-10-11 02:38:59.604 2 DEBUG oslo_concurrency.lockutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" acquired by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:38:59 compute-0 nova_compute[356901]: 2025-10-11 02:38:59.621 2 DEBUG nova.compute.manager [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Starting instance... _do_build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2402
Oct 11 02:38:59 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:38:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:38:59 compute-0 nova_compute[356901]: 2025-10-11 02:38:59.656 2 DEBUG oslo_concurrency.lockutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Acquiring lock "830c7581-3555-41db-9818-0961fc151818" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:38:59 compute-0 nova_compute[356901]: 2025-10-11 02:38:59.657 2 DEBUG oslo_concurrency.lockutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Lock "830c7581-3555-41db-9818-0961fc151818" acquired by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:38:59 compute-0 nova_compute[356901]: 2025-10-11 02:38:59.694 2 DEBUG nova.compute.manager [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Starting instance... _do_build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2402
Oct 11 02:38:59 compute-0 nova_compute[356901]: 2025-10-11 02:38:59.739 2 DEBUG oslo_concurrency.lockutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:38:59 compute-0 nova_compute[356901]: 2025-10-11 02:38:59.741 2 DEBUG oslo_concurrency.lockutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:38:59 compute-0 podman[157119]: time="2025-10-11T02:38:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:38:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:38:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:38:59 compute-0 nova_compute[356901]: 2025-10-11 02:38:59.766 2 DEBUG nova.virt.hardware [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Require both a host and instance NUMA topology to fit instance on host. numa_fit_instance_to_host /usr/lib/python3.9/site-packages/nova/virt/hardware.py:2368
Oct 11 02:38:59 compute-0 nova_compute[356901]: 2025-10-11 02:38:59.767 2 INFO nova.compute.claims [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Claim successful on node compute-0.ctlplane.example.com
Oct 11 02:38:59 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:38:59 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev bfd2e94a-8e4e-4b05-94cf-5922cf520b65 does not exist
Oct 11 02:38:59 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 56f4196d-52bf-4032-816f-a98602eb3cca does not exist
Oct 11 02:38:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:38:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9057 "" "Go-http-client/1.1"
Oct 11 02:38:59 compute-0 nova_compute[356901]: 2025-10-11 02:38:59.805 2 DEBUG oslo_concurrency.lockutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:38:59 compute-0 sudo[451158]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:38:59 compute-0 sudo[451158]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:38:59 compute-0 sudo[451158]: pam_unix(sudo:session): session closed for user root
Oct 11 02:38:59 compute-0 nova_compute[356901]: 2025-10-11 02:38:59.964 2 DEBUG oslo_concurrency.processutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:38:59 compute-0 nova_compute[356901]: 2025-10-11 02:38:59.996 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:00 compute-0 sudo[451183]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:39:00 compute-0 sudo[451183]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:39:00 compute-0 sudo[451183]: pam_unix(sudo:session): session closed for user root
Oct 11 02:39:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:39:00 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2636759804' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.473 2 DEBUG oslo_concurrency.processutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.509s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.486 2 DEBUG nova.compute.provider_tree [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.508 2 DEBUG nova.scheduler.client.report [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.533 2 DEBUG oslo_concurrency.lockutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: held 0.792s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.534 2 DEBUG nova.compute.manager [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Start building networks asynchronously for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2799
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.540 2 DEBUG oslo_concurrency.lockutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: waited 0.735s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.554 2 DEBUG nova.virt.hardware [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Require both a host and instance NUMA topology to fit instance on host. numa_fit_instance_to_host /usr/lib/python3.9/site-packages/nova/virt/hardware.py:2368
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.556 2 INFO nova.compute.claims [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Claim successful on node compute-0.ctlplane.example.com
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.603 2 DEBUG nova.compute.manager [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Allocating IP information in the background. _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1952
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.604 2 DEBUG nova.network.neutron [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] allocate_for_instance() allocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1156
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.630 2 INFO nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Ignoring supplied device name: /dev/vda. Libvirt can't honour user-supplied dev names
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.654 2 DEBUG nova.compute.manager [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Start building block device mappings for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2834
Oct 11 02:39:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1839: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.729 2 DEBUG oslo_concurrency.processutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:00 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:39:00 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:39:00 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2636759804' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.771 2 DEBUG nova.compute.manager [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Start spawning the instance on the hypervisor. _build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2608
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.775 2 DEBUG nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Creating instance directory _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4723
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.775 2 INFO nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Creating image(s)
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.866 2 DEBUG nova.storage.rbd_utils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] rbd image f5eb6746-7f42-4fa4-8e26-cda5cfa0c765_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.916 2 DEBUG nova.storage.rbd_utils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] rbd image f5eb6746-7f42-4fa4-8e26-cda5cfa0c765_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.960 2 DEBUG nova.storage.rbd_utils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] rbd image f5eb6746-7f42-4fa4-8e26-cda5cfa0c765_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.968 2 DEBUG oslo_concurrency.lockutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Acquiring lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:00 compute-0 nova_compute[356901]: 2025-10-11 02:39:00.969 2 DEBUG oslo_concurrency.lockutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.011 2 DEBUG oslo_concurrency.lockutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Acquiring lock "ee9601c7-f562-449e-9f5c-5e1355f3c130" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.012 2 DEBUG oslo_concurrency.lockutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Lock "ee9601c7-f562-449e-9f5c-5e1355f3c130" acquired by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.030 2 DEBUG nova.compute.manager [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Starting instance... _do_build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2402
Oct 11 02:39:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.087 2 DEBUG nova.policy [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Policy check for network:attach_external_network failed with credentials {'is_admin': False, 'user_id': '11c81e88a90342bba2c2816e4e3cb191', 'user_domain_id': 'default', 'system_scope': None, 'domain_id': None, 'project_id': 'dba4f6e51d33430ebf5566af53f6fbcc', 'project_domain_id': 'default', 'roles': ['member', 'reader'], 'is_admin_project': True, 'service_user_id': None, 'service_user_domain_id': None, 'service_project_id': None, 'service_project_domain_id': None, 'service_roles': []} authorize /usr/lib/python3.9/site-packages/nova/policy.py:203
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.090 2 DEBUG oslo_concurrency.lockutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.232 2 DEBUG nova.virt.libvirt.imagebackend [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Image locations are: [{'url': 'rbd://3c7617c3-7a20-523e-a9de-20c0d6ba41da/images/72f37f2e-4296-450e-9a12-10717f4ac7dc/snap', 'metadata': {'store': 'default_backend'}}, {'url': 'rbd://3c7617c3-7a20-523e-a9de-20c0d6ba41da/images/72f37f2e-4296-450e-9a12-10717f4ac7dc/snap', 'metadata': {}}] clone /usr/lib/python3.9/site-packages/nova/virt/libvirt/imagebackend.py:1085
Oct 11 02:39:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:39:01 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1331149103' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.274 2 DEBUG oslo_concurrency.processutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.545s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.283 2 DEBUG nova.compute.provider_tree [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.297 2 DEBUG nova.scheduler.client.report [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.319 2 DEBUG oslo_concurrency.lockutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: held 0.779s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.320 2 DEBUG nova.compute.manager [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Start building networks asynchronously for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2799
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.325 2 DEBUG oslo_concurrency.lockutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: waited 0.235s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.336 2 DEBUG nova.virt.hardware [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Require both a host and instance NUMA topology to fit instance on host. numa_fit_instance_to_host /usr/lib/python3.9/site-packages/nova/virt/hardware.py:2368
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.336 2 INFO nova.compute.claims [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Claim successful on node compute-0.ctlplane.example.com
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.404 2 DEBUG nova.compute.manager [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Allocating IP information in the background. _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1952
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.405 2 DEBUG nova.network.neutron [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] allocate_for_instance() allocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1156
Oct 11 02:39:01 compute-0 openstack_network_exporter[374316]: ERROR   02:39:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:39:01 compute-0 openstack_network_exporter[374316]: ERROR   02:39:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:39:01 compute-0 openstack_network_exporter[374316]: ERROR   02:39:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:39:01 compute-0 openstack_network_exporter[374316]: ERROR   02:39:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:39:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:39:01 compute-0 openstack_network_exporter[374316]: ERROR   02:39:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:39:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.424 2 INFO nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Ignoring supplied device name: /dev/vda. Libvirt can't honour user-supplied dev names
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.439 2 DEBUG nova.compute.manager [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Start building block device mappings for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2834
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.552 2 DEBUG oslo_concurrency.processutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.579 2 DEBUG nova.compute.manager [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Start spawning the instance on the hypervisor. _build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2608
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.581 2 DEBUG nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Creating instance directory _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4723
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.581 2 INFO nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Creating image(s)
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.612 2 DEBUG nova.storage.rbd_utils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] rbd image 830c7581-3555-41db-9818-0961fc151818_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.644 2 DEBUG nova.storage.rbd_utils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] rbd image 830c7581-3555-41db-9818-0961fc151818_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.673 2 DEBUG nova.storage.rbd_utils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] rbd image 830c7581-3555-41db-9818-0961fc151818_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.680 2 DEBUG oslo_concurrency.lockutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Acquiring lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:01 compute-0 nova_compute[356901]: 2025-10-11 02:39:01.684 2 DEBUG nova.policy [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Policy check for network:attach_external_network failed with credentials {'is_admin': False, 'user_id': '1b63c9bbae8845d99db73ca671aedcfc', 'user_domain_id': 'default', 'system_scope': None, 'domain_id': None, 'project_id': '56e45b830ec844e4802f14cd3e25bda2', 'project_domain_id': 'default', 'roles': ['member', 'reader'], 'is_admin_project': True, 'service_user_id': None, 'service_user_domain_id': None, 'service_project_id': None, 'service_project_domain_id': None, 'service_roles': []} authorize /usr/lib/python3.9/site-packages/nova/policy.py:203
Oct 11 02:39:01 compute-0 ceph-mon[191930]: pgmap v1839: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:39:01 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1331149103' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:39:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:39:02 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2671522645' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:39:02 compute-0 nova_compute[356901]: 2025-10-11 02:39:02.073 2 DEBUG oslo_concurrency.processutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.521s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:02 compute-0 nova_compute[356901]: 2025-10-11 02:39:02.087 2 DEBUG nova.compute.provider_tree [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:39:02 compute-0 nova_compute[356901]: 2025-10-11 02:39:02.114 2 DEBUG nova.scheduler.client.report [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:39:02 compute-0 nova_compute[356901]: 2025-10-11 02:39:02.149 2 DEBUG oslo_concurrency.lockutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: held 0.824s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:02 compute-0 nova_compute[356901]: 2025-10-11 02:39:02.150 2 DEBUG nova.compute.manager [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Start building networks asynchronously for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2799
Oct 11 02:39:02 compute-0 nova_compute[356901]: 2025-10-11 02:39:02.198 2 DEBUG nova.compute.manager [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Allocating IP information in the background. _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1952
Oct 11 02:39:02 compute-0 nova_compute[356901]: 2025-10-11 02:39:02.198 2 DEBUG nova.network.neutron [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] allocate_for_instance() allocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1156
Oct 11 02:39:02 compute-0 nova_compute[356901]: 2025-10-11 02:39:02.235 2 INFO nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Ignoring supplied device name: /dev/vda. Libvirt can't honour user-supplied dev names
Oct 11 02:39:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1840: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.166 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.170 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.174 2 DEBUG nova.network.neutron [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Successfully created port: d7c4233c-f79b-4f32-b896-c36d4abb7d26 _create_port_minimal /usr/lib/python3.9/site-packages/nova/network/neutron.py:548
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.181 2 DEBUG nova.network.neutron [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Successfully created port: 5cd25b0e-b4c9-408f-b456-59127a046cde _create_port_minimal /usr/lib/python3.9/site-packages/nova/network/neutron.py:548
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.188 2 DEBUG nova.policy [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Policy check for network:attach_external_network failed with credentials {'is_admin': False, 'user_id': '5539243c06f64f0694000d9748ff55dd', 'user_domain_id': 'default', 'system_scope': None, 'domain_id': None, 'project_id': '5d5e8b42281d410bb45cb6c2e8e3fcbd', 'project_domain_id': 'default', 'roles': ['member', 'reader'], 'is_admin_project': True, 'service_user_id': None, 'service_user_domain_id': None, 'service_project_id': None, 'service_project_domain_id': None, 'service_roles': []} authorize /usr/lib/python3.9/site-packages/nova/policy.py:203
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.193 2 DEBUG nova.compute.manager [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Start building block device mappings for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2834
Oct 11 02:39:03 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2671522645' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:39:03 compute-0 ceph-mon[191930]: pgmap v1840: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.204 2 DEBUG oslo_concurrency.lockutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Acquiring lock "49d4f343-d1b4-4594-96d2-0777a5ce8581" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.205 2 DEBUG oslo_concurrency.lockutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lock "49d4f343-d1b4-4594-96d2-0777a5ce8581" acquired by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.232 2 DEBUG nova.compute.manager [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Starting instance... _do_build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2402
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.354 2 DEBUG oslo_concurrency.lockutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.354 2 DEBUG oslo_concurrency.lockutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.364 2 DEBUG nova.virt.hardware [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Require both a host and instance NUMA topology to fit instance on host. numa_fit_instance_to_host /usr/lib/python3.9/site-packages/nova/virt/hardware.py:2368
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.364 2 INFO nova.compute.claims [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Claim successful on node compute-0.ctlplane.example.com
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.382 2 DEBUG nova.compute.manager [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Start spawning the instance on the hypervisor. _build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2608
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.384 2 DEBUG nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Creating instance directory _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4723
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.384 2 INFO nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Creating image(s)
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.430 2 DEBUG nova.storage.rbd_utils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] rbd image ee9601c7-f562-449e-9f5c-5e1355f3c130_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.509 2 DEBUG nova.storage.rbd_utils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] rbd image ee9601c7-f562-449e-9f5c-5e1355f3c130_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.578 2 DEBUG nova.storage.rbd_utils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] rbd image ee9601c7-f562-449e-9f5c-5e1355f3c130_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.590 2 DEBUG oslo_concurrency.lockutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Acquiring lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.798 2 DEBUG oslo_concurrency.processutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.863 2 DEBUG oslo_concurrency.processutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d.part --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.971 2 DEBUG oslo_concurrency.processutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d.part --force-share --output=json" returned: 0 in 0.109s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.974 2 DEBUG nova.virt.images [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] 72f37f2e-4296-450e-9a12-10717f4ac7dc was qcow2, converting to raw fetch_to_raw /usr/lib/python3.9/site-packages/nova/virt/images.py:242
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.976 2 DEBUG nova.privsep.utils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Path '/var/lib/nova/instances' supports direct I/O supports_direct_io /usr/lib/python3.9/site-packages/nova/privsep/utils.py:63
Oct 11 02:39:03 compute-0 nova_compute[356901]: 2025-10-11 02:39:03.977 2 DEBUG oslo_concurrency.processutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Running cmd (subprocess): qemu-img convert -t none -O raw -f qcow2 /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d.part /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d.converted execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:39:04 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3035791900' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:39:04 compute-0 nova_compute[356901]: 2025-10-11 02:39:04.357 2 DEBUG oslo_concurrency.processutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.559s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:04 compute-0 nova_compute[356901]: 2025-10-11 02:39:04.368 2 DEBUG nova.network.neutron [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Successfully updated port: d7c4233c-f79b-4f32-b896-c36d4abb7d26 _update_port /usr/lib/python3.9/site-packages/nova/network/neutron.py:586
Oct 11 02:39:04 compute-0 nova_compute[356901]: 2025-10-11 02:39:04.381 2 DEBUG nova.compute.provider_tree [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:39:04 compute-0 nova_compute[356901]: 2025-10-11 02:39:04.392 2 DEBUG oslo_concurrency.lockutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Acquiring lock "refresh_cache-f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:39:04 compute-0 nova_compute[356901]: 2025-10-11 02:39:04.393 2 DEBUG oslo_concurrency.lockutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Acquired lock "refresh_cache-f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:39:04 compute-0 nova_compute[356901]: 2025-10-11 02:39:04.394 2 DEBUG nova.network.neutron [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Building network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2010
Oct 11 02:39:04 compute-0 nova_compute[356901]: 2025-10-11 02:39:04.403 2 DEBUG nova.scheduler.client.report [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:39:04 compute-0 nova_compute[356901]: 2025-10-11 02:39:04.436 2 DEBUG oslo_concurrency.lockutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: held 1.082s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:04 compute-0 nova_compute[356901]: 2025-10-11 02:39:04.438 2 DEBUG nova.compute.manager [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Start building networks asynchronously for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2799
Oct 11 02:39:04 compute-0 nova_compute[356901]: 2025-10-11 02:39:04.500 2 DEBUG nova.compute.manager [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Allocating IP information in the background. _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1952
Oct 11 02:39:04 compute-0 nova_compute[356901]: 2025-10-11 02:39:04.501 2 DEBUG nova.network.neutron [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] allocate_for_instance() allocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1156
Oct 11 02:39:04 compute-0 nova_compute[356901]: 2025-10-11 02:39:04.531 2 INFO nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Ignoring supplied device name: /dev/vda. Libvirt can't honour user-supplied dev names
Oct 11 02:39:04 compute-0 nova_compute[356901]: 2025-10-11 02:39:04.558 2 DEBUG nova.compute.manager [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Start building block device mappings for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2834
Oct 11 02:39:04 compute-0 nova_compute[356901]: 2025-10-11 02:39:04.629 2 DEBUG nova.network.neutron [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Successfully created port: 887c6cbc-2d8f-44c3-959f-4c732f5d4040 _create_port_minimal /usr/lib/python3.9/site-packages/nova/network/neutron.py:548
Oct 11 02:39:04 compute-0 nova_compute[356901]: 2025-10-11 02:39:04.654 2 DEBUG nova.network.neutron [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Instance cache missing network info. _get_preexisting_port_ids /usr/lib/python3.9/site-packages/nova/network/neutron.py:3323
Oct 11 02:39:04 compute-0 nova_compute[356901]: 2025-10-11 02:39:04.679 2 DEBUG nova.compute.manager [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Start spawning the instance on the hypervisor. _build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2608
Oct 11 02:39:04 compute-0 nova_compute[356901]: 2025-10-11 02:39:04.683 2 DEBUG nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Creating instance directory _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4723
Oct 11 02:39:04 compute-0 nova_compute[356901]: 2025-10-11 02:39:04.684 2 INFO nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Creating image(s)
Oct 11 02:39:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1841: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 1.3 MiB/s rd, 5 op/s
Oct 11 02:39:04 compute-0 nova_compute[356901]: 2025-10-11 02:39:04.734 2 DEBUG nova.storage.rbd_utils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] rbd image 49d4f343-d1b4-4594-96d2-0777a5ce8581_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:04 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3035791900' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:39:05 compute-0 nova_compute[356901]: 2025-10-11 02:39:05.588 2 DEBUG nova.storage.rbd_utils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] rbd image 49d4f343-d1b4-4594-96d2-0777a5ce8581_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:05 compute-0 nova_compute[356901]: 2025-10-11 02:39:05.655 2 DEBUG nova.storage.rbd_utils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] rbd image 49d4f343-d1b4-4594-96d2-0777a5ce8581_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:05 compute-0 nova_compute[356901]: 2025-10-11 02:39:05.680 2 DEBUG oslo_concurrency.lockutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Acquiring lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:05 compute-0 nova_compute[356901]: 2025-10-11 02:39:05.684 2 DEBUG nova.policy [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Policy check for network:attach_external_network failed with credentials {'is_admin': False, 'user_id': '9a1414c7b75246f596af7745610a00a4', 'user_domain_id': 'default', 'system_scope': None, 'domain_id': None, 'project_id': 'd89911bf2931487c98dc0f44a8b67bca', 'project_domain_id': 'default', 'roles': ['reader', 'member'], 'is_admin_project': True, 'service_user_id': None, 'service_user_domain_id': None, 'service_project_id': None, 'service_project_domain_id': None, 'service_roles': []} authorize /usr/lib/python3.9/site-packages/nova/policy.py:203
Oct 11 02:39:05 compute-0 nova_compute[356901]: 2025-10-11 02:39:05.691 2 DEBUG nova.network.neutron [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Successfully updated port: 5cd25b0e-b4c9-408f-b456-59127a046cde _update_port /usr/lib/python3.9/site-packages/nova/network/neutron.py:586
Oct 11 02:39:05 compute-0 nova_compute[356901]: 2025-10-11 02:39:05.693 2 DEBUG nova.network.neutron [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Successfully updated port: 887c6cbc-2d8f-44c3-959f-4c732f5d4040 _update_port /usr/lib/python3.9/site-packages/nova/network/neutron.py:586
Oct 11 02:39:05 compute-0 nova_compute[356901]: 2025-10-11 02:39:05.694 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:05 compute-0 nova_compute[356901]: 2025-10-11 02:39:05.698 2 DEBUG nova.compute.manager [req-0eff6fa7-dc2a-4ffc-915e-090b0c66f395 req-9a59a68d-d65b-418f-a962-33160fda9e6a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Received event network-changed-d7c4233c-f79b-4f32-b896-c36d4abb7d26 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:05 compute-0 nova_compute[356901]: 2025-10-11 02:39:05.699 2 DEBUG nova.compute.manager [req-0eff6fa7-dc2a-4ffc-915e-090b0c66f395 req-9a59a68d-d65b-418f-a962-33160fda9e6a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Refreshing instance network info cache due to event network-changed-d7c4233c-f79b-4f32-b896-c36d4abb7d26. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:39:05 compute-0 nova_compute[356901]: 2025-10-11 02:39:05.699 2 DEBUG oslo_concurrency.lockutils [req-0eff6fa7-dc2a-4ffc-915e-090b0c66f395 req-9a59a68d-d65b-418f-a962-33160fda9e6a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:39:05 compute-0 nova_compute[356901]: 2025-10-11 02:39:05.725 2 DEBUG oslo_concurrency.lockutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Acquiring lock "refresh_cache-830c7581-3555-41db-9818-0961fc151818" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:39:05 compute-0 nova_compute[356901]: 2025-10-11 02:39:05.726 2 DEBUG oslo_concurrency.lockutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Acquired lock "refresh_cache-830c7581-3555-41db-9818-0961fc151818" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:39:05 compute-0 nova_compute[356901]: 2025-10-11 02:39:05.726 2 DEBUG nova.network.neutron [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Building network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2010
Oct 11 02:39:05 compute-0 nova_compute[356901]: 2025-10-11 02:39:05.729 2 DEBUG oslo_concurrency.lockutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Acquiring lock "refresh_cache-ee9601c7-f562-449e-9f5c-5e1355f3c130" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:39:05 compute-0 nova_compute[356901]: 2025-10-11 02:39:05.730 2 DEBUG oslo_concurrency.lockutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Acquired lock "refresh_cache-ee9601c7-f562-449e-9f5c-5e1355f3c130" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:39:05 compute-0 nova_compute[356901]: 2025-10-11 02:39:05.730 2 DEBUG nova.network.neutron [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Building network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2010
Oct 11 02:39:06 compute-0 nova_compute[356901]: 2025-10-11 02:39:06.059 2 DEBUG nova.compute.manager [req-82abc13b-6d95-40c6-8e20-9aa7bee9957f req-85cb4466-34ab-4151-b28c-62bf339dfbe3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Received event network-changed-5cd25b0e-b4c9-408f-b456-59127a046cde external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:06 compute-0 nova_compute[356901]: 2025-10-11 02:39:06.060 2 DEBUG nova.compute.manager [req-82abc13b-6d95-40c6-8e20-9aa7bee9957f req-85cb4466-34ab-4151-b28c-62bf339dfbe3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Refreshing instance network info cache due to event network-changed-5cd25b0e-b4c9-408f-b456-59127a046cde. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:39:06 compute-0 nova_compute[356901]: 2025-10-11 02:39:06.061 2 DEBUG oslo_concurrency.lockutils [req-82abc13b-6d95-40c6-8e20-9aa7bee9957f req-85cb4466-34ab-4151-b28c-62bf339dfbe3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-830c7581-3555-41db-9818-0961fc151818" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:39:06 compute-0 nova_compute[356901]: 2025-10-11 02:39:06.086 2 DEBUG nova.network.neutron [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Instance cache missing network info. _get_preexisting_port_ids /usr/lib/python3.9/site-packages/nova/network/neutron.py:3323
Oct 11 02:39:06 compute-0 nova_compute[356901]: 2025-10-11 02:39:06.135 2 DEBUG nova.network.neutron [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Instance cache missing network info. _get_preexisting_port_ids /usr/lib/python3.9/site-packages/nova/network/neutron.py:3323
Oct 11 02:39:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:39:06 compute-0 ceph-mon[191930]: pgmap v1841: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 1.3 MiB/s rd, 5 op/s
Oct 11 02:39:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1842: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 6 op/s
Oct 11 02:39:06 compute-0 nova_compute[356901]: 2025-10-11 02:39:06.728 2 DEBUG nova.network.neutron [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Successfully created port: 4076fda2-be62-4c52-b073-8bf26574dee1 _create_port_minimal /usr/lib/python3.9/site-packages/nova/network/neutron.py:548
Oct 11 02:39:06 compute-0 nova_compute[356901]: 2025-10-11 02:39:06.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:39:06 compute-0 nova_compute[356901]: 2025-10-11 02:39:06.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:39:06 compute-0 nova_compute[356901]: 2025-10-11 02:39:06.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:39:06 compute-0 nova_compute[356901]: 2025-10-11 02:39:06.926 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Skipping network cache update for instance because it is Building. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9871
Oct 11 02:39:06 compute-0 nova_compute[356901]: 2025-10-11 02:39:06.928 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 830c7581-3555-41db-9818-0961fc151818] Skipping network cache update for instance because it is Building. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9871
Oct 11 02:39:06 compute-0 nova_compute[356901]: 2025-10-11 02:39:06.929 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Skipping network cache update for instance because it is Building. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9871
Oct 11 02:39:06 compute-0 nova_compute[356901]: 2025-10-11 02:39:06.929 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Skipping network cache update for instance because it is Building. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9871
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.016 2 DEBUG nova.network.neutron [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Updating instance_info_cache with network_info: [{"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.039 2 DEBUG oslo_concurrency.lockutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Releasing lock "refresh_cache-f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.040 2 DEBUG nova.compute.manager [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Instance network_info: |[{"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}]| _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1967
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.041 2 DEBUG oslo_concurrency.lockutils [req-0eff6fa7-dc2a-4ffc-915e-090b0c66f395 req-9a59a68d-d65b-418f-a962-33160fda9e6a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.042 2 DEBUG nova.network.neutron [req-0eff6fa7-dc2a-4ffc-915e-090b0c66f395 req-9a59a68d-d65b-418f-a962-33160fda9e6a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Refreshing network info cache for port d7c4233c-f79b-4f32-b896-c36d4abb7d26 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0009191400908380543 of space, bias 1.0, pg target 0.2757420272514163 quantized to 32 (current 32)
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:39:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.231 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.231 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.232 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.232 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.858 2 DEBUG nova.network.neutron [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Updating instance_info_cache with network_info: [{"id": "5cd25b0e-b4c9-408f-b456-59127a046cde", "address": "fa:16:3e:23:45:c8", "network": {"id": "b6521a4e-cfb9-4743-91c3-85402b5661d9", "bridge": "br-int", "label": "tempest-ServersTestJSON-1548228308-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "56e45b830ec844e4802f14cd3e25bda2", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5cd25b0e-b4", "ovs_interfaceid": "5cd25b0e-b4c9-408f-b456-59127a046cde", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.860 2 DEBUG nova.network.neutron [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Updating instance_info_cache with network_info: [{"id": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "address": "fa:16:3e:3f:e1:d3", "network": {"id": "3e4cd915-df9e-44c4-860d-c0ba25a21e79", "bridge": "br-int", "label": "tempest-ServersTestManualDisk-1152012084-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.9", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "5d5e8b42281d410bb45cb6c2e8e3fcbd", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap887c6cbc-2d", "ovs_interfaceid": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.888 2 DEBUG oslo_concurrency.lockutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Releasing lock "refresh_cache-ee9601c7-f562-449e-9f5c-5e1355f3c130" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.889 2 DEBUG nova.compute.manager [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Instance network_info: |[{"id": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "address": "fa:16:3e:3f:e1:d3", "network": {"id": "3e4cd915-df9e-44c4-860d-c0ba25a21e79", "bridge": "br-int", "label": "tempest-ServersTestManualDisk-1152012084-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.9", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "5d5e8b42281d410bb45cb6c2e8e3fcbd", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap887c6cbc-2d", "ovs_interfaceid": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}]| _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1967
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.891 2 DEBUG oslo_concurrency.lockutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Releasing lock "refresh_cache-830c7581-3555-41db-9818-0961fc151818" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.891 2 DEBUG nova.compute.manager [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Instance network_info: |[{"id": "5cd25b0e-b4c9-408f-b456-59127a046cde", "address": "fa:16:3e:23:45:c8", "network": {"id": "b6521a4e-cfb9-4743-91c3-85402b5661d9", "bridge": "br-int", "label": "tempest-ServersTestJSON-1548228308-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "56e45b830ec844e4802f14cd3e25bda2", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5cd25b0e-b4", "ovs_interfaceid": "5cd25b0e-b4c9-408f-b456-59127a046cde", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}]| _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1967
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.891 2 DEBUG oslo_concurrency.lockutils [req-82abc13b-6d95-40c6-8e20-9aa7bee9957f req-85cb4466-34ab-4151-b28c-62bf339dfbe3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-830c7581-3555-41db-9818-0961fc151818" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.892 2 DEBUG nova.network.neutron [req-82abc13b-6d95-40c6-8e20-9aa7bee9957f req-85cb4466-34ab-4151-b28c-62bf339dfbe3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Refreshing network info cache for port 5cd25b0e-b4c9-408f-b456-59127a046cde _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.905 2 DEBUG nova.compute.manager [req-9161355f-9b91-4482-80ed-42f2a5989b28 req-ceb1c559-e647-448a-b4d5-2c5021b0efa4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Received event network-changed-887c6cbc-2d8f-44c3-959f-4c732f5d4040 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.905 2 DEBUG nova.compute.manager [req-9161355f-9b91-4482-80ed-42f2a5989b28 req-ceb1c559-e647-448a-b4d5-2c5021b0efa4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Refreshing instance network info cache due to event network-changed-887c6cbc-2d8f-44c3-959f-4c732f5d4040. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.905 2 DEBUG oslo_concurrency.lockutils [req-9161355f-9b91-4482-80ed-42f2a5989b28 req-ceb1c559-e647-448a-b4d5-2c5021b0efa4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-ee9601c7-f562-449e-9f5c-5e1355f3c130" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.905 2 DEBUG oslo_concurrency.lockutils [req-9161355f-9b91-4482-80ed-42f2a5989b28 req-ceb1c559-e647-448a-b4d5-2c5021b0efa4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-ee9601c7-f562-449e-9f5c-5e1355f3c130" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:39:07 compute-0 nova_compute[356901]: 2025-10-11 02:39:07.905 2 DEBUG nova.network.neutron [req-9161355f-9b91-4482-80ed-42f2a5989b28 req-ceb1c559-e647-448a-b4d5-2c5021b0efa4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Refreshing network info cache for port 887c6cbc-2d8f-44c3-959f-4c732f5d4040 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:39:08 compute-0 ceph-mon[191930]: pgmap v1842: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 6 op/s
Oct 11 02:39:08 compute-0 nova_compute[356901]: 2025-10-11 02:39:08.173 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:08 compute-0 nova_compute[356901]: 2025-10-11 02:39:08.224 2 DEBUG nova.network.neutron [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Successfully updated port: 4076fda2-be62-4c52-b073-8bf26574dee1 _update_port /usr/lib/python3.9/site-packages/nova/network/neutron.py:586
Oct 11 02:39:08 compute-0 nova_compute[356901]: 2025-10-11 02:39:08.241 2 DEBUG oslo_concurrency.lockutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Acquiring lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:39:08 compute-0 nova_compute[356901]: 2025-10-11 02:39:08.241 2 DEBUG oslo_concurrency.lockutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Acquired lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:39:08 compute-0 nova_compute[356901]: 2025-10-11 02:39:08.242 2 DEBUG nova.network.neutron [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Building network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2010
Oct 11 02:39:08 compute-0 ovn_controller[88370]: 2025-10-11T02:39:08Z|00071|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:39:08 compute-0 nova_compute[356901]: 2025-10-11 02:39:08.461 2 DEBUG nova.network.neutron [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Instance cache missing network info. _get_preexisting_port_ids /usr/lib/python3.9/site-packages/nova/network/neutron.py:3323
Oct 11 02:39:08 compute-0 nova_compute[356901]: 2025-10-11 02:39:08.578 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1843: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 6 op/s
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.301 2 DEBUG nova.network.neutron [req-0eff6fa7-dc2a-4ffc-915e-090b0c66f395 req-9a59a68d-d65b-418f-a962-33160fda9e6a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Updated VIF entry in instance network info cache for port d7c4233c-f79b-4f32-b896-c36d4abb7d26. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.302 2 DEBUG nova.network.neutron [req-0eff6fa7-dc2a-4ffc-915e-090b0c66f395 req-9a59a68d-d65b-418f-a962-33160fda9e6a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Updating instance_info_cache with network_info: [{"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.339 2 DEBUG oslo_concurrency.lockutils [req-0eff6fa7-dc2a-4ffc-915e-090b0c66f395 req-9a59a68d-d65b-418f-a962-33160fda9e6a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:39:09 compute-0 ceph-mon[191930]: pgmap v1843: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 6 op/s
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.700 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.723 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.724 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.725 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.726 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.727 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.728 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.934 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.934 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.935 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.935 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.936 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.973 2 DEBUG nova.network.neutron [req-9161355f-9b91-4482-80ed-42f2a5989b28 req-ceb1c559-e647-448a-b4d5-2c5021b0efa4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Updated VIF entry in instance network info cache for port 887c6cbc-2d8f-44c3-959f-4c732f5d4040. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.975 2 DEBUG nova.network.neutron [req-9161355f-9b91-4482-80ed-42f2a5989b28 req-ceb1c559-e647-448a-b4d5-2c5021b0efa4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Updating instance_info_cache with network_info: [{"id": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "address": "fa:16:3e:3f:e1:d3", "network": {"id": "3e4cd915-df9e-44c4-860d-c0ba25a21e79", "bridge": "br-int", "label": "tempest-ServersTestManualDisk-1152012084-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.9", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "5d5e8b42281d410bb45cb6c2e8e3fcbd", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap887c6cbc-2d", "ovs_interfaceid": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:39:09 compute-0 nova_compute[356901]: 2025-10-11 02:39:09.992 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:10 compute-0 nova_compute[356901]: 2025-10-11 02:39:10.001 2 DEBUG oslo_concurrency.lockutils [req-9161355f-9b91-4482-80ed-42f2a5989b28 req-ceb1c559-e647-448a-b4d5-2c5021b0efa4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-ee9601c7-f562-449e-9f5c-5e1355f3c130" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:39:10 compute-0 nova_compute[356901]: 2025-10-11 02:39:10.104 2 DEBUG nova.network.neutron [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Updating instance_info_cache with network_info: [{"id": "4076fda2-be62-4c52-b073-8bf26574dee1", "address": "fa:16:3e:c5:9b:82", "network": {"id": "eb08ca1c-c05f-4da5-9518-fb3b2d958ee2", "bridge": "br-int", "label": "tempest-AttachInterfacesUnderV243Test-139612684-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d89911bf2931487c98dc0f44a8b67bca", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap4076fda2-be", "ovs_interfaceid": "4076fda2-be62-4c52-b073-8bf26574dee1", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:39:10 compute-0 nova_compute[356901]: 2025-10-11 02:39:10.128 2 DEBUG oslo_concurrency.lockutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Releasing lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:39:10 compute-0 nova_compute[356901]: 2025-10-11 02:39:10.128 2 DEBUG nova.compute.manager [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Instance network_info: |[{"id": "4076fda2-be62-4c52-b073-8bf26574dee1", "address": "fa:16:3e:c5:9b:82", "network": {"id": "eb08ca1c-c05f-4da5-9518-fb3b2d958ee2", "bridge": "br-int", "label": "tempest-AttachInterfacesUnderV243Test-139612684-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d89911bf2931487c98dc0f44a8b67bca", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap4076fda2-be", "ovs_interfaceid": "4076fda2-be62-4c52-b073-8bf26574dee1", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}]| _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1967
Oct 11 02:39:10 compute-0 podman[451525]: 2025-10-11 02:39:10.221152928 +0000 UTC m=+0.107099008 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_ipmi)
Oct 11 02:39:10 compute-0 podman[451526]: 2025-10-11 02:39:10.241439458 +0000 UTC m=+0.125981501 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, build-date=2025-08-20T13:12:41, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, version=9.6, name=ubi9-minimal, config_id=edpm, distribution-scope=public, container_name=openstack_network_exporter, io.buildah.version=1.33.7, vcs-type=git, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., architecture=x86_64, io.openshift.tags=minimal rhel9, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.expose-services=, managed_by=edpm_ansible, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, release=1755695350, url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.component=ubi9-minimal-container)
Oct 11 02:39:10 compute-0 podman[451527]: 2025-10-11 02:39:10.258996756 +0000 UTC m=+0.127386497 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:39:10 compute-0 nova_compute[356901]: 2025-10-11 02:39:10.267 2 DEBUG nova.network.neutron [req-82abc13b-6d95-40c6-8e20-9aa7bee9957f req-85cb4466-34ab-4151-b28c-62bf339dfbe3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Updated VIF entry in instance network info cache for port 5cd25b0e-b4c9-408f-b456-59127a046cde. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:39:10 compute-0 nova_compute[356901]: 2025-10-11 02:39:10.268 2 DEBUG nova.network.neutron [req-82abc13b-6d95-40c6-8e20-9aa7bee9957f req-85cb4466-34ab-4151-b28c-62bf339dfbe3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Updating instance_info_cache with network_info: [{"id": "5cd25b0e-b4c9-408f-b456-59127a046cde", "address": "fa:16:3e:23:45:c8", "network": {"id": "b6521a4e-cfb9-4743-91c3-85402b5661d9", "bridge": "br-int", "label": "tempest-ServersTestJSON-1548228308-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "56e45b830ec844e4802f14cd3e25bda2", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5cd25b0e-b4", "ovs_interfaceid": "5cd25b0e-b4c9-408f-b456-59127a046cde", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:39:10 compute-0 nova_compute[356901]: 2025-10-11 02:39:10.285 2 DEBUG oslo_concurrency.lockutils [req-82abc13b-6d95-40c6-8e20-9aa7bee9957f req-85cb4466-34ab-4151-b28c-62bf339dfbe3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-830c7581-3555-41db-9818-0961fc151818" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:39:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:39:10 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1051564456' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:39:10 compute-0 nova_compute[356901]: 2025-10-11 02:39:10.490 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.554s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:10 compute-0 nova_compute[356901]: 2025-10-11 02:39:10.602 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:39:10 compute-0 nova_compute[356901]: 2025-10-11 02:39:10.602 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:39:10 compute-0 nova_compute[356901]: 2025-10-11 02:39:10.602 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:39:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1844: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 6 op/s
Oct 11 02:39:10 compute-0 nova_compute[356901]: 2025-10-11 02:39:10.870 2 DEBUG nova.compute.manager [req-f11b1a0c-4217-43eb-8747-3cdf47b19a10 req-c3f34e9f-be2c-4cf1-a3d2-c6e6cd30f0c0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Received event network-changed-4076fda2-be62-4c52-b073-8bf26574dee1 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:10 compute-0 nova_compute[356901]: 2025-10-11 02:39:10.871 2 DEBUG nova.compute.manager [req-f11b1a0c-4217-43eb-8747-3cdf47b19a10 req-c3f34e9f-be2c-4cf1-a3d2-c6e6cd30f0c0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Refreshing instance network info cache due to event network-changed-4076fda2-be62-4c52-b073-8bf26574dee1. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:39:10 compute-0 nova_compute[356901]: 2025-10-11 02:39:10.872 2 DEBUG oslo_concurrency.lockutils [req-f11b1a0c-4217-43eb-8747-3cdf47b19a10 req-c3f34e9f-be2c-4cf1-a3d2-c6e6cd30f0c0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:39:10 compute-0 nova_compute[356901]: 2025-10-11 02:39:10.873 2 DEBUG oslo_concurrency.lockutils [req-f11b1a0c-4217-43eb-8747-3cdf47b19a10 req-c3f34e9f-be2c-4cf1-a3d2-c6e6cd30f0c0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:39:10 compute-0 nova_compute[356901]: 2025-10-11 02:39:10.873 2 DEBUG nova.network.neutron [req-f11b1a0c-4217-43eb-8747-3cdf47b19a10 req-c3f34e9f-be2c-4cf1-a3d2-c6e6cd30f0c0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Refreshing network info cache for port 4076fda2-be62-4c52-b073-8bf26574dee1 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:39:11 compute-0 nova_compute[356901]: 2025-10-11 02:39:11.083 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:39:11 compute-0 nova_compute[356901]: 2025-10-11 02:39:11.084 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3790MB free_disk=59.955204010009766GB free_vcpus=7 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:39:11 compute-0 nova_compute[356901]: 2025-10-11 02:39:11.085 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:11 compute-0 nova_compute[356901]: 2025-10-11 02:39:11.085 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:11 compute-0 nova_compute[356901]: 2025-10-11 02:39:11.182 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:39:11 compute-0 nova_compute[356901]: 2025-10-11 02:39:11.182 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:39:11 compute-0 nova_compute[356901]: 2025-10-11 02:39:11.183 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 830c7581-3555-41db-9818-0961fc151818 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:39:11 compute-0 nova_compute[356901]: 2025-10-11 02:39:11.183 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance ee9601c7-f562-449e-9f5c-5e1355f3c130 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:39:11 compute-0 nova_compute[356901]: 2025-10-11 02:39:11.183 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 49d4f343-d1b4-4594-96d2-0777a5ce8581 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:39:11 compute-0 nova_compute[356901]: 2025-10-11 02:39:11.183 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 5 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:39:11 compute-0 nova_compute[356901]: 2025-10-11 02:39:11.184 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1536MB phys_disk=59GB used_disk=6GB total_vcpus=8 used_vcpus=5 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:39:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:39:11 compute-0 nova_compute[356901]: 2025-10-11 02:39:11.199 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing inventories for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:804
Oct 11 02:39:11 compute-0 nova_compute[356901]: 2025-10-11 02:39:11.224 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating ProviderTree inventory for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 from _refresh_and_get_inventory using data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} _refresh_and_get_inventory /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:768
Oct 11 02:39:11 compute-0 nova_compute[356901]: 2025-10-11 02:39:11.224 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating inventory in ProviderTree for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 with inventory: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:176
Oct 11 02:39:11 compute-0 nova_compute[356901]: 2025-10-11 02:39:11.237 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing aggregate associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, aggregates: None _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:813
Oct 11 02:39:11 compute-0 nova_compute[356901]: 2025-10-11 02:39:11.255 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing trait associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, traits: COMPUTE_VOLUME_EXTEND,COMPUTE_NET_VIF_MODEL_VMXNET3,HW_CPU_X86_SSSE3,COMPUTE_RESCUE_BFV,COMPUTE_SOCKET_PCI_NUMA_AFFINITY,COMPUTE_NODE,HW_CPU_X86_SVM,COMPUTE_STORAGE_BUS_SCSI,HW_CPU_X86_FMA3,COMPUTE_GRAPHICS_MODEL_NONE,COMPUTE_NET_VIF_MODEL_RTL8139,HW_CPU_X86_SSE4A,COMPUTE_IMAGE_TYPE_QCOW2,HW_CPU_X86_BMI2,HW_CPU_X86_SSE42,HW_CPU_X86_AVX2,COMPUTE_IMAGE_TYPE_RAW,COMPUTE_VIOMMU_MODEL_VIRTIO,HW_CPU_X86_AESNI,COMPUTE_STORAGE_BUS_FDC,COMPUTE_GRAPHICS_MODEL_VIRTIO,HW_CPU_X86_AMD_SVM,COMPUTE_NET_VIF_MODEL_NE2K_PCI,COMPUTE_ACCELERATORS,HW_CPU_X86_SSE2,COMPUTE_GRAPHICS_MODEL_VGA,HW_CPU_X86_ABM,HW_CPU_X86_AVX,COMPUTE_NET_VIF_MODEL_E1000,COMPUTE_STORAGE_BUS_USB,COMPUTE_NET_ATTACH_INTERFACE,HW_CPU_X86_MMX,COMPUTE_SECURITY_TPM_2_0,COMPUTE_IMAGE_TYPE_ISO,HW_CPU_X86_SSE41,COMPUTE_IMAGE_TYPE_AKI,COMPUTE_IMAGE_TYPE_AMI,COMPUTE_NET_ATTACH_INTERFACE_WITH_TAG,COMPUTE_DEVICE_TAGGING,COMPUTE_SECURITY_UEFI_SECURE_BOOT,COMPUTE_TRUSTED_CERTS,COMPUTE_NET_VIF_MODEL_VIRTIO,COMPUTE_VIOMMU_MODEL_INTEL,COMPUTE_STORAGE_BUS_SATA,HW_CPU_X86_SSE,COMPUTE_STORAGE_BUS_VIRTIO,COMPUTE_NET_VIF_MODEL_PCNET,COMPUTE_GRAPHICS_MODEL_CIRRUS,HW_CPU_X86_SHA,HW_CPU_X86_BMI,COMPUTE_NET_VIF_MODEL_E1000E,COMPUTE_NET_VIF_MODEL_SPAPR_VLAN,COMPUTE_VOLUME_ATTACH_WITH_TAG,COMPUTE_GRAPHICS_MODEL_BOCHS,COMPUTE_VIOMMU_MODEL_AUTO,COMPUTE_IMAGE_TYPE_ARI,HW_CPU_X86_CLMUL,COMPUTE_STORAGE_BUS_IDE,COMPUTE_VOLUME_MULTI_ATTACH,HW_CPU_X86_F16C,COMPUTE_SECURITY_TPM_1_2 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:825
Oct 11 02:39:11 compute-0 nova_compute[356901]: 2025-10-11 02:39:11.390 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:11 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1051564456' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:39:11 compute-0 ceph-mon[191930]: pgmap v1844: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 6 op/s
Oct 11 02:39:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:39:12 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1189993318' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.351 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.961s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.362 2 DEBUG oslo_concurrency.processutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] CMD "qemu-img convert -t none -O raw -f qcow2 /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d.part /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d.converted" returned: 0 in 8.385s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:12 compute-0 ovn_controller[88370]: 2025-10-11T02:39:12Z|00072|memory|INFO|peak resident set size grew 51% in last 4218.1 seconds, from 16000 kB to 24176 kB
Oct 11 02:39:12 compute-0 ovn_controller[88370]: 2025-10-11T02:39:12Z|00073|memory|INFO|idl-cells-OVN_Southbound:10158 idl-cells-Open_vSwitch:756 if_status_mgr_ifaces_state_usage-KB:1 if_status_mgr_ifaces_usage-KB:1 lflow-cache-entries-cache-expr:326 lflow-cache-entries-cache-matches:281 lflow-cache-size-KB:1335 local_datapath_usage-KB:3 ofctrl_desired_flow_usage-KB:609 ofctrl_installed_flow_usage-KB:445 ofctrl_sb_flow_ref_usage-KB:227
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.369 2 DEBUG oslo_concurrency.processutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d.converted --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.397 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.417 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.447 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.448 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 1.363s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.478 2 DEBUG oslo_concurrency.processutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d.converted --force-share --output=json" returned: 0 in 0.109s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.479 2 DEBUG oslo_concurrency.lockutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 11.510s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.523 2 DEBUG nova.storage.rbd_utils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] rbd image f5eb6746-7f42-4fa4-8e26-cda5cfa0c765_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.531 2 DEBUG oslo_concurrency.processutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d f5eb6746-7f42-4fa4-8e26-cda5cfa0c765_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.562 2 DEBUG oslo_concurrency.lockutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 10.881s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.563 2 DEBUG oslo_concurrency.lockutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.601 2 DEBUG nova.storage.rbd_utils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] rbd image 830c7581-3555-41db-9818-0961fc151818_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.610 2 DEBUG oslo_concurrency.processutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d 830c7581-3555-41db-9818-0961fc151818_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.644 2 DEBUG nova.network.neutron [req-f11b1a0c-4217-43eb-8747-3cdf47b19a10 req-c3f34e9f-be2c-4cf1-a3d2-c6e6cd30f0c0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Updated VIF entry in instance network info cache for port 4076fda2-be62-4c52-b073-8bf26574dee1. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.645 2 DEBUG nova.network.neutron [req-f11b1a0c-4217-43eb-8747-3cdf47b19a10 req-c3f34e9f-be2c-4cf1-a3d2-c6e6cd30f0c0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Updating instance_info_cache with network_info: [{"id": "4076fda2-be62-4c52-b073-8bf26574dee1", "address": "fa:16:3e:c5:9b:82", "network": {"id": "eb08ca1c-c05f-4da5-9518-fb3b2d958ee2", "bridge": "br-int", "label": "tempest-AttachInterfacesUnderV243Test-139612684-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d89911bf2931487c98dc0f44a8b67bca", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap4076fda2-be", "ovs_interfaceid": "4076fda2-be62-4c52-b073-8bf26574dee1", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.648 2 DEBUG oslo_concurrency.lockutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 9.058s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.648 2 DEBUG oslo_concurrency.lockutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1845: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 6 op/s
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.727 2 DEBUG nova.storage.rbd_utils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] rbd image ee9601c7-f562-449e-9f5c-5e1355f3c130_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.737 2 DEBUG oslo_concurrency.processutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d ee9601c7-f562-449e-9f5c-5e1355f3c130_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.755 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.757 2 DEBUG oslo_concurrency.lockutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 7.077s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.757 2 DEBUG oslo_concurrency.lockutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.863 2 DEBUG nova.storage.rbd_utils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] rbd image 49d4f343-d1b4-4594-96d2-0777a5ce8581_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.877 2 DEBUG oslo_concurrency.processutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d 49d4f343-d1b4-4594-96d2-0777a5ce8581_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:12 compute-0 nova_compute[356901]: 2025-10-11 02:39:12.909 2 DEBUG oslo_concurrency.lockutils [req-f11b1a0c-4217-43eb-8747-3cdf47b19a10 req-c3f34e9f-be2c-4cf1-a3d2-c6e6cd30f0c0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:39:12 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1189993318' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:39:13 compute-0 nova_compute[356901]: 2025-10-11 02:39:13.177 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:13 compute-0 nova_compute[356901]: 2025-10-11 02:39:13.448 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.867 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.869 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.871 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.880 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.881 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.881 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.881 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.882 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.883 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:39:13.881960) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.889 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2856 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.891 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.891 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.892 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.892 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.892 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.892 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.893 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 24 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.893 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:39:13.892800) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.894 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.894 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.894 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.894 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.894 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.895 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.895 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.896 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.896 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.896 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.896 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.897 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.897 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:39:13.895081) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.897 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.898 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.899 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.899 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:39:13.897878) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.900 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.900 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.900 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.901 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.901 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.902 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:39:13.901405) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.938 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.939 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.940 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.941 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.942 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.942 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.942 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.942 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.943 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:13.944 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:39:13.942960) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.000 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.001 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.002 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.002 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.002 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.002 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.002 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.003 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.003 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.003 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.003 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.004 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.003 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:39:14.003116) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.004 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.004 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.004 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.005 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.005 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.005 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.005 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.005 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.006 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.006 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.006 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.006 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.006 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.007 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.007 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.007 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:39:14.005184) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.007 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.007 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.007 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.008 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.008 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.008 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.008 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.008 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.009 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.009 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.009 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.009 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.010 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.010 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.010 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.011 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.011 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.011 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.011 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.011 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:39:14.007277) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.011 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.012 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:39:14.009062) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.012 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.012 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:39:14.011224) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.012 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.012 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.012 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.013 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.013 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.013 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.013 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:39:14.013178) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.044 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.045 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.045 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.045 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.046 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.046 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.046 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.046 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.046 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.046 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.047 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.047 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.047 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.047 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.047 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.047 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.047 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.047 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.048 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.048 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.048 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.048 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.048 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.048 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.048 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.048 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.048 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.049 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.049 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.049 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.049 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.049 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 33 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.049 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:39:14.046202) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.050 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.050 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.050 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.050 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:39:14.047507) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.050 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.050 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.050 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:39:14.048534) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.050 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.050 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 70 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.050 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:39:14.049282) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.051 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.051 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.051 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.051 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.051 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.051 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.051 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.051 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.051 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.051 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.052 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.052 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.052 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.052 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.052 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.052 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.052 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.052 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.052 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.052 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.053 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.053 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.053 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.053 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.053 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.053 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.053 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.054 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.054 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.054 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.054 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.054 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.054 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.054 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.054 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.054 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 52140000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.054 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.055 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.055 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.055 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.055 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.055 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.055 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2412 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.055 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.055 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.055 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.055 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.055 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.056 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.056 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.83984375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.056 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.056 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.056 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.057 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:39:14.050656) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.057 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:39:14.051449) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.057 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.057 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:39:14.052056) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.057 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:39:14.052797) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.057 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.057 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:39:14.053983) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.057 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.058 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.058 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:39:14.054679) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.058 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.058 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:39:14.055349) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.058 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.058 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:39:14.056022) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.058 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.058 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.058 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.058 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.059 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.059 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.059 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.059 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.059 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.059 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.059 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.060 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.060 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.060 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.060 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.060 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.061 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.061 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.061 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:39:14.061 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:39:14 compute-0 ceph-mon[191930]: pgmap v1845: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 6 op/s
Oct 11 02:39:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1846: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 341 B/s wr, 8 op/s
Oct 11 02:39:14 compute-0 nova_compute[356901]: 2025-10-11 02:39:14.998 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:15 compute-0 ceph-mon[191930]: pgmap v1846: 321 pgs: 321 active+clean; 118 MiB data, 304 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 341 B/s wr, 8 op/s
Oct 11 02:39:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:39:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1847: 321 pgs: 321 active+clean; 154 MiB data, 326 MiB used, 60 GiB / 60 GiB avail; 409 KiB/s rd, 2.0 MiB/s wr, 51 op/s
Oct 11 02:39:17 compute-0 ceph-mon[191930]: pgmap v1847: 321 pgs: 321 active+clean; 154 MiB data, 326 MiB used, 60 GiB / 60 GiB avail; 409 KiB/s rd, 2.0 MiB/s wr, 51 op/s
Oct 11 02:39:17 compute-0 nova_compute[356901]: 2025-10-11 02:39:17.296 2 DEBUG oslo_concurrency.processutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d f5eb6746-7f42-4fa4-8e26-cda5cfa0c765_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 4.765s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:17 compute-0 podman[451787]: 2025-10-11 02:39:17.304129694 +0000 UTC m=+0.185938767 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.29.0, managed_by=edpm_ansible, io.openshift.tags=base rhel9, summary=Provides the latest release of Red Hat Universal Base Image 9., vendor=Red Hat, Inc., com.redhat.component=ubi9-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., release=1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, version=9.4, io.k8s.display-name=Red Hat Universal Base Image 9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, architecture=x86_64, io.openshift.expose-services=, build-date=2024-09-18T21:23:30, config_id=edpm, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-type=git, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, distribution-scope=public, name=ubi9, release-0.7.12=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=kepler)
Oct 11 02:39:17 compute-0 nova_compute[356901]: 2025-10-11 02:39:17.503 2 DEBUG nova.storage.rbd_utils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] resizing rbd image f5eb6746-7f42-4fa4-8e26-cda5cfa0c765_disk to 1073741824 resize /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:288
Oct 11 02:39:17 compute-0 nova_compute[356901]: 2025-10-11 02:39:17.748 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:17 compute-0 nova_compute[356901]: 2025-10-11 02:39:17.753 2 DEBUG oslo_concurrency.processutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d ee9601c7-f562-449e-9f5c-5e1355f3c130_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 5.016s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:17 compute-0 nova_compute[356901]: 2025-10-11 02:39:17.950 2 DEBUG nova.storage.rbd_utils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] resizing rbd image ee9601c7-f562-449e-9f5c-5e1355f3c130_disk to 1073741824 resize /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:288
Oct 11 02:39:18 compute-0 nova_compute[356901]: 2025-10-11 02:39:18.352 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:18 compute-0 nova_compute[356901]: 2025-10-11 02:39:18.354 2 DEBUG oslo_concurrency.processutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d 49d4f343-d1b4-4594-96d2-0777a5ce8581_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 5.477s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:18 compute-0 nova_compute[356901]: 2025-10-11 02:39:18.355 2 DEBUG oslo_concurrency.processutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d 830c7581-3555-41db-9818-0961fc151818_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 5.746s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:18 compute-0 ovn_controller[88370]: 2025-10-11T02:39:18Z|00074|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:39:18 compute-0 nova_compute[356901]: 2025-10-11 02:39:18.602 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:18 compute-0 nova_compute[356901]: 2025-10-11 02:39:18.621 2 DEBUG nova.storage.rbd_utils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] resizing rbd image 49d4f343-d1b4-4594-96d2-0777a5ce8581_disk to 1073741824 resize /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:288
Oct 11 02:39:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1848: 321 pgs: 321 active+clean; 205 MiB data, 345 MiB used, 60 GiB / 60 GiB avail; 32 KiB/s rd, 3.6 MiB/s wr, 56 op/s
Oct 11 02:39:18 compute-0 nova_compute[356901]: 2025-10-11 02:39:18.882 2 DEBUG nova.storage.rbd_utils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] resizing rbd image 830c7581-3555-41db-9818-0961fc151818_disk to 1073741824 resize /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:288
Oct 11 02:39:19 compute-0 ceph-mon[191930]: pgmap v1848: 321 pgs: 321 active+clean; 205 MiB data, 345 MiB used, 60 GiB / 60 GiB avail; 32 KiB/s rd, 3.6 MiB/s wr, 56 op/s
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.001 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.114 2 DEBUG nova.objects.instance [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lazy-loading 'migration_context' on Instance uuid f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.129 2 DEBUG nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Created local disks _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4857
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.130 2 DEBUG nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Ensure instance console log exists: /var/lib/nova/instances/f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/console.log _ensure_console_log_for_instance /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4609
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.130 2 DEBUG oslo_concurrency.lockutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Acquiring lock "vgpu_resources" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.131 2 DEBUG oslo_concurrency.lockutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lock "vgpu_resources" acquired by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.132 2 DEBUG oslo_concurrency.lockutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lock "vgpu_resources" "released" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.134 2 DEBUG nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Start _get_guest_xml network_info=[{"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] disk_info={'disk_bus': 'virtio', 'cdrom_bus': 'sata', 'mapping': {'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.config': {'bus': 'sata', 'dev': 'sda', 'type': 'cdrom'}}} image_meta=ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:38:04Z,direct_url=<?>,disk_format='qcow2',id=72f37f2e-4296-450e-9a12-10717f4ac7dc,min_disk=0,min_ram=0,name='cirros-0.6.2-x86_64-disk.img',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:38:05Z,virtual_size=<?>,visibility=<?>) rescue=None block_device_info={'root_device_name': '/dev/vda', 'image': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'boot_index': 0, 'device_name': '/dev/vda', 'size': 0, 'encryption_format': None, 'image_id': '72f37f2e-4296-450e-9a12-10717f4ac7dc'}], 'ephemerals': [], 'block_device_mapping': [], 'swap': None} _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7549
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.144 2 WARNING nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.151 2 DEBUG nova.virt.libvirt.host [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V1... _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1653
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.152 2 DEBUG nova.virt.libvirt.host [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] CPU controller missing on host. _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1663
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.157 2 DEBUG nova.virt.libvirt.host [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V2... _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1672
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.158 2 DEBUG nova.virt.libvirt.host [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] CPU controller found on host. _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1679
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.159 2 DEBUG nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] CPU mode 'host-model' models '' was chosen, with extra flags: '' _get_guest_cpu_model_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:5396
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.160 2 DEBUG nova.virt.hardware [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Getting desirable topologies for flavor Flavor(created_at=2025-10-11T02:38:03Z,deleted=False,deleted_at=None,description=None,disabled=False,ephemeral_gb=0,extra_specs={hw_rng:allowed='True'},flavorid='6dff30d1-85df-4e9c-9163-a20ba47bb0c7',id=3,is_public=True,memory_mb=128,name='m1.nano',projects=<?>,root_gb=1,rxtx_factor=1.0,swap=0,updated_at=None,vcpu_weight=0,vcpus=1) and image_meta ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:38:04Z,direct_url=<?>,disk_format='qcow2',id=72f37f2e-4296-450e-9a12-10717f4ac7dc,min_disk=0,min_ram=0,name='cirros-0.6.2-x86_64-disk.img',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:38:05Z,virtual_size=<?>,visibility=<?>), allow threads: True _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:563
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.160 2 DEBUG nova.virt.hardware [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Flavor limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:348
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.161 2 DEBUG nova.virt.hardware [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Image limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:352
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.161 2 DEBUG nova.virt.hardware [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Flavor pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:388
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.162 2 DEBUG nova.virt.hardware [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Image pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:392
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.162 2 DEBUG nova.virt.hardware [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Chose sockets=0, cores=0, threads=0; limits were sockets=65536, cores=65536, threads=65536 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:430
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.163 2 DEBUG nova.virt.hardware [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Topology preferred VirtCPUTopology(cores=0,sockets=0,threads=0), maximum VirtCPUTopology(cores=65536,sockets=65536,threads=65536) _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:569
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.163 2 DEBUG nova.virt.hardware [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Build topologies for 1 vcpu(s) 1:1:1 _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:471
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.164 2 DEBUG nova.virt.hardware [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Got 1 possible topologies _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:501
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.165 2 DEBUG nova.virt.hardware [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Possible topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:575
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.165 2 DEBUG nova.virt.hardware [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Sorted desired topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:577
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.169 2 DEBUG oslo_concurrency.processutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:39:20 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2698947092' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1849: 321 pgs: 321 active+clean; 264 MiB data, 366 MiB used, 60 GiB / 60 GiB avail; 40 KiB/s rd, 5.3 MiB/s wr, 70 op/s
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.782 2 DEBUG oslo_concurrency.processutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.612s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.844 2 DEBUG nova.storage.rbd_utils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] rbd image f5eb6746-7f42-4fa4-8e26-cda5cfa0c765_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:20 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2698947092' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.859 2 DEBUG oslo_concurrency.processutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.931 2 DEBUG nova.objects.instance [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Lazy-loading 'migration_context' on Instance uuid ee9601c7-f562-449e-9f5c-5e1355f3c130 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.960 2 DEBUG nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Created local disks _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4857
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.961 2 DEBUG nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Ensure instance console log exists: /var/lib/nova/instances/ee9601c7-f562-449e-9f5c-5e1355f3c130/console.log _ensure_console_log_for_instance /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4609
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.962 2 DEBUG oslo_concurrency.lockutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Acquiring lock "vgpu_resources" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.964 2 DEBUG oslo_concurrency.lockutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Lock "vgpu_resources" acquired by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.965 2 DEBUG oslo_concurrency.lockutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Lock "vgpu_resources" "released" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.970 2 DEBUG nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Start _get_guest_xml network_info=[{"id": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "address": "fa:16:3e:3f:e1:d3", "network": {"id": "3e4cd915-df9e-44c4-860d-c0ba25a21e79", "bridge": "br-int", "label": "tempest-ServersTestManualDisk-1152012084-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.9", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "5d5e8b42281d410bb45cb6c2e8e3fcbd", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap887c6cbc-2d", "ovs_interfaceid": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] disk_info={'disk_bus': 'virtio', 'cdrom_bus': 'sata', 'mapping': {'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.config': {'bus': 'sata', 'dev': 'sda', 'type': 'cdrom'}}} image_meta=ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:38:04Z,direct_url=<?>,disk_format='qcow2',id=72f37f2e-4296-450e-9a12-10717f4ac7dc,min_disk=0,min_ram=0,name='cirros-0.6.2-x86_64-disk.img',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:38:05Z,virtual_size=<?>,visibility=<?>) rescue=None block_device_info={'root_device_name': '/dev/vda', 'image': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'boot_index': 0, 'device_name': '/dev/vda', 'size': 0, 'encryption_format': None, 'image_id': '72f37f2e-4296-450e-9a12-10717f4ac7dc'}], 'ephemerals': [], 'block_device_mapping': [], 'swap': None} _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7549
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.984 2 WARNING nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.996 2 DEBUG nova.virt.libvirt.host [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V1... _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1653
Oct 11 02:39:20 compute-0 nova_compute[356901]: 2025-10-11 02:39:20.997 2 DEBUG nova.virt.libvirt.host [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] CPU controller missing on host. _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1663
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.005 2 DEBUG nova.virt.libvirt.host [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V2... _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1672
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.006 2 DEBUG nova.virt.libvirt.host [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] CPU controller found on host. _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1679
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.007 2 DEBUG nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] CPU mode 'host-model' models '' was chosen, with extra flags: '' _get_guest_cpu_model_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:5396
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.008 2 DEBUG nova.virt.hardware [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Getting desirable topologies for flavor Flavor(created_at=2025-10-11T02:38:03Z,deleted=False,deleted_at=None,description=None,disabled=False,ephemeral_gb=0,extra_specs={hw_rng:allowed='True'},flavorid='6dff30d1-85df-4e9c-9163-a20ba47bb0c7',id=3,is_public=True,memory_mb=128,name='m1.nano',projects=<?>,root_gb=1,rxtx_factor=1.0,swap=0,updated_at=None,vcpu_weight=0,vcpus=1) and image_meta ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:38:04Z,direct_url=<?>,disk_format='qcow2',id=72f37f2e-4296-450e-9a12-10717f4ac7dc,min_disk=0,min_ram=0,name='cirros-0.6.2-x86_64-disk.img',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:38:05Z,virtual_size=<?>,visibility=<?>), allow threads: True _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:563
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.009 2 DEBUG nova.virt.hardware [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Flavor limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:348
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.010 2 DEBUG nova.virt.hardware [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Image limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:352
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.011 2 DEBUG nova.virt.hardware [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Flavor pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:388
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.011 2 DEBUG nova.virt.hardware [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Image pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:392
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.012 2 DEBUG nova.virt.hardware [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Chose sockets=0, cores=0, threads=0; limits were sockets=65536, cores=65536, threads=65536 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:430
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.013 2 DEBUG nova.virt.hardware [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Topology preferred VirtCPUTopology(cores=0,sockets=0,threads=0), maximum VirtCPUTopology(cores=65536,sockets=65536,threads=65536) _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:569
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.014 2 DEBUG nova.virt.hardware [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Build topologies for 1 vcpu(s) 1:1:1 _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:471
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.015 2 DEBUG nova.virt.hardware [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Got 1 possible topologies _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:501
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.015 2 DEBUG nova.virt.hardware [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Possible topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:575
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.017 2 DEBUG nova.virt.hardware [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Sorted desired topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:577
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.023 2 DEBUG oslo_concurrency.processutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:39:21 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #84. Immutable memtables: 0.
Oct 11 02:39:21 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:39:21.544850) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:39:21 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 47] Flushing memtable with next log file: 84
Oct 11 02:39:21 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150361544982, "job": 47, "event": "flush_started", "num_memtables": 1, "num_entries": 1133, "num_deletes": 257, "total_data_size": 1658833, "memory_usage": 1680600, "flush_reason": "Manual Compaction"}
Oct 11 02:39:21 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 47] Level-0 flush table #85: started
Oct 11 02:39:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:39:21 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2390034673' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:39:21 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3865725825' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.644 2 DEBUG oslo_concurrency.processutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.785s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.648 2 DEBUG nova.virt.libvirt.vif [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:38:58Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description='tempest-ServerActionsTestJSON-server-482072585',display_name='tempest-ServerActionsTestJSON-server-482072585',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-serveractionstestjson-server-482072585',id=6,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBEeyedFg6J90z3asuDBQl1/Bvzj806ldEmlyVo7UkMTJHBgXm6kW1TdMM5vQZaYHoLzJajtdp6cuAv6b+cT74TvAgDg4tZ7S8WdWrLaHLA9uudTCq+0DsKhebTJVvA2XxA==',key_name='tempest-keypair-177844218',keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='dba4f6e51d33430ebf5566af53f6fbcc',ramdisk_id='',reservation_id='r-xpsstq1e',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_hw_rng_model='virtio',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-ServerActionsTestJSON-1563605323',owner_user_name='tempest-ServerActionsTestJSON-1563605323-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:39:00Z,user_data='IyEvYmluL3NoCmVjaG8gIlByaW50aW5nIGNpcnJvcyB1c2VyIGF1dGhvcml6ZWQga2V5cyIKY2F0IH5jaXJyb3MvLnNzaC9hdXRob3JpemVkX2tleXMgfHwgdHJ1ZQo=',user_id='11c81e88a90342bba2c2816e4e3cb191',uuid=f5eb6746-7f42-4fa4-8e26-cda5cfa0c765,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} virt_type=kvm get_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:563
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.651 2 DEBUG nova.network.os_vif_util [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Converting VIF {"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:39:21 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150361654379, "cf_name": "default", "job": 47, "event": "table_file_creation", "file_number": 85, "file_size": 1632209, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 36821, "largest_seqno": 37953, "table_properties": {"data_size": 1626701, "index_size": 2903, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1541, "raw_key_size": 11524, "raw_average_key_size": 19, "raw_value_size": 1615617, "raw_average_value_size": 2715, "num_data_blocks": 130, "num_entries": 595, "num_filter_entries": 595, "num_deletions": 257, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760150254, "oldest_key_time": 1760150254, "file_creation_time": 1760150361, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 85, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:39:21 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 47] Flush lasted 109703 microseconds, and 11148 cpu microseconds.
Oct 11 02:39:21 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.654 2 DEBUG nova.network.os_vif_util [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:8d:b8:dd,bridge_name='br-int',has_traffic_filtering=True,id=d7c4233c-f79b-4f32-b896-c36d4abb7d26,network=Network(b4d521f7-7729-40fd-aa58-7126044eb166),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd7c4233c-f7') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.658 2 DEBUG nova.objects.instance [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lazy-loading 'pci_devices' on Instance uuid f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.662 2 DEBUG oslo_concurrency.processutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.638s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:21 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:39:21.654557) [db/flush_job.cc:967] [default] [JOB 47] Level-0 flush table #85: 1632209 bytes OK
Oct 11 02:39:21 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:39:21.654597) [db/memtable_list.cc:519] [default] Level-0 commit table #85 started
Oct 11 02:39:21 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:39:21.687347) [db/memtable_list.cc:722] [default] Level-0 commit table #85: memtable #1 done
Oct 11 02:39:21 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:39:21.687442) EVENT_LOG_v1 {"time_micros": 1760150361687378, "job": 47, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:39:21 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:39:21.687474) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:39:21 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 47] Try to delete WAL files size 1653597, prev total WAL file size 1653597, number of live WAL files 2.
Oct 11 02:39:21 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000081.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:39:21 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:39:21.689002) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '6C6F676D0031323539' seq:72057594037927935, type:22 .. '6C6F676D0031353132' seq:0, type:0; will stop at (end)
Oct 11 02:39:21 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 48] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:39:21 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 47 Base level 0, inputs: [85(1593KB)], [83(8551KB)]
Oct 11 02:39:21 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150361689085, "job": 48, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [85], "files_L6": [83], "score": -1, "input_data_size": 10388738, "oldest_snapshot_seqno": -1}
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.712 2 DEBUG nova.storage.rbd_utils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] rbd image ee9601c7-f562-449e-9f5c-5e1355f3c130_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.727 2 DEBUG oslo_concurrency.processutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.775 2 DEBUG nova.objects.instance [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lazy-loading 'migration_context' on Instance uuid 49d4f343-d1b4-4594-96d2-0777a5ce8581 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.791 2 DEBUG nova.objects.instance [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Lazy-loading 'migration_context' on Instance uuid 830c7581-3555-41db-9818-0961fc151818 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.796 2 DEBUG nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] End _get_guest_xml xml=<domain type="kvm">
Oct 11 02:39:21 compute-0 nova_compute[356901]:   <uuid>f5eb6746-7f42-4fa4-8e26-cda5cfa0c765</uuid>
Oct 11 02:39:21 compute-0 nova_compute[356901]:   <name>instance-00000006</name>
Oct 11 02:39:21 compute-0 nova_compute[356901]:   <memory>131072</memory>
Oct 11 02:39:21 compute-0 nova_compute[356901]:   <vcpu>1</vcpu>
Oct 11 02:39:21 compute-0 nova_compute[356901]:   <metadata>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.1">
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <nova:package version="27.5.2-0.20250829104910.6f8decf.el9"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <nova:name>tempest-ServerActionsTestJSON-server-482072585</nova:name>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <nova:creationTime>2025-10-11 02:39:20</nova:creationTime>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <nova:flavor name="m1.nano">
Oct 11 02:39:21 compute-0 nova_compute[356901]:         <nova:memory>128</nova:memory>
Oct 11 02:39:21 compute-0 nova_compute[356901]:         <nova:disk>1</nova:disk>
Oct 11 02:39:21 compute-0 nova_compute[356901]:         <nova:swap>0</nova:swap>
Oct 11 02:39:21 compute-0 nova_compute[356901]:         <nova:ephemeral>0</nova:ephemeral>
Oct 11 02:39:21 compute-0 nova_compute[356901]:         <nova:vcpus>1</nova:vcpus>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       </nova:flavor>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <nova:owner>
Oct 11 02:39:21 compute-0 nova_compute[356901]:         <nova:user uuid="11c81e88a90342bba2c2816e4e3cb191">tempest-ServerActionsTestJSON-1563605323-project-member</nova:user>
Oct 11 02:39:21 compute-0 nova_compute[356901]:         <nova:project uuid="dba4f6e51d33430ebf5566af53f6fbcc">tempest-ServerActionsTestJSON-1563605323</nova:project>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       </nova:owner>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <nova:root type="image" uuid="72f37f2e-4296-450e-9a12-10717f4ac7dc"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <nova:ports>
Oct 11 02:39:21 compute-0 nova_compute[356901]:         <nova:port uuid="d7c4233c-f79b-4f32-b896-c36d4abb7d26">
Oct 11 02:39:21 compute-0 nova_compute[356901]:           <nova:ip type="fixed" address="10.100.0.4" ipVersion="4"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:         </nova:port>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       </nova:ports>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     </nova:instance>
Oct 11 02:39:21 compute-0 nova_compute[356901]:   </metadata>
Oct 11 02:39:21 compute-0 nova_compute[356901]:   <sysinfo type="smbios">
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <system>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <entry name="manufacturer">RDO</entry>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <entry name="product">OpenStack Compute</entry>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <entry name="version">27.5.2-0.20250829104910.6f8decf.el9</entry>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <entry name="serial">f5eb6746-7f42-4fa4-8e26-cda5cfa0c765</entry>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <entry name="uuid">f5eb6746-7f42-4fa4-8e26-cda5cfa0c765</entry>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <entry name="family">Virtual Machine</entry>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     </system>
Oct 11 02:39:21 compute-0 nova_compute[356901]:   </sysinfo>
Oct 11 02:39:21 compute-0 nova_compute[356901]:   <os>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <type arch="x86_64" machine="q35">hvm</type>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <boot dev="hd"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <smbios mode="sysinfo"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:   </os>
Oct 11 02:39:21 compute-0 nova_compute[356901]:   <features>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <acpi/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <apic/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <vmcoreinfo/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:   </features>
Oct 11 02:39:21 compute-0 nova_compute[356901]:   <clock offset="utc">
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <timer name="pit" tickpolicy="delay"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <timer name="rtc" tickpolicy="catchup"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <timer name="hpet" present="no"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:   </clock>
Oct 11 02:39:21 compute-0 nova_compute[356901]:   <cpu mode="host-model" match="exact">
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <topology sockets="1" cores="1" threads="1"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:39:21 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/f5eb6746-7f42-4fa4-8e26-cda5cfa0c765_disk">
Oct 11 02:39:21 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       </source>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:39:21 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <target dev="vda" bus="virtio"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <disk type="network" device="cdrom">
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/f5eb6746-7f42-4fa4-8e26-cda5cfa0c765_disk.config">
Oct 11 02:39:21 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       </source>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:39:21 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <target dev="sda" bus="sata"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <interface type="ethernet">
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <mac address="fa:16:3e:8d:b8:dd"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <driver name="vhost" rx_queue_size="512"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <mtu size="1442"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <target dev="tapd7c4233c-f7"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     </interface>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <serial type="pty">
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <log file="/var/lib/nova/instances/f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/console.log" append="off"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     </serial>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <graphics type="vnc" autoport="yes" listen="::0"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <video>
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     </video>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <input type="tablet" bus="usb"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <rng model="virtio">
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <backend model="random">/dev/urandom</backend>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <controller type="usb" index="0"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     <memballoon model="virtio">
Oct 11 02:39:21 compute-0 nova_compute[356901]:       <stats period="10"/>
Oct 11 02:39:21 compute-0 nova_compute[356901]:     </memballoon>
Oct 11 02:39:21 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:39:21 compute-0 nova_compute[356901]: </domain>
Oct 11 02:39:21 compute-0 nova_compute[356901]:  _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7555
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.797 2 DEBUG nova.compute.manager [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Preparing to wait for external event network-vif-plugged-d7c4233c-f79b-4f32-b896-c36d4abb7d26 prepare_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:283
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.798 2 DEBUG oslo_concurrency.lockutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Acquiring lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.799 2 DEBUG oslo_concurrency.lockutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" acquired by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.799 2 DEBUG oslo_concurrency.lockutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" "released" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.800 2 DEBUG nova.virt.libvirt.vif [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:38:58Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description='tempest-ServerActionsTestJSON-server-482072585',display_name='tempest-ServerActionsTestJSON-server-482072585',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-serveractionstestjson-server-482072585',id=6,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBEeyedFg6J90z3asuDBQl1/Bvzj806ldEmlyVo7UkMTJHBgXm6kW1TdMM5vQZaYHoLzJajtdp6cuAv6b+cT74TvAgDg4tZ7S8WdWrLaHLA9uudTCq+0DsKhebTJVvA2XxA==',key_name='tempest-keypair-177844218',keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=PciDeviceList,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='dba4f6e51d33430ebf5566af53f6fbcc',ramdisk_id='',reservation_id='r-xpsstq1e',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_hw_rng_model='virtio',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-ServerActionsTestJSON-1563605323',owner_user_name='tempest-ServerActionsTestJSON-1563605323-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:39:00Z,user_data='IyEvYmluL3NoCmVjaG8gIlByaW50aW5nIGNpcnJvcyB1c2VyIGF1dGhvcml6ZWQga2V5cyIKY2F0IH5jaXJyb3MvLnNzaC9hdXRob3JpemVkX2tleXMgfHwgdHJ1ZQo=',user_id='11c81e88a90342bba2c2816e4e3cb191',uuid=f5eb6746-7f42-4fa4-8e26-cda5cfa0c765,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} plug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:710
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.800 2 DEBUG nova.network.os_vif_util [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Converting VIF {"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.801 2 DEBUG nova.network.os_vif_util [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:8d:b8:dd,bridge_name='br-int',has_traffic_filtering=True,id=d7c4233c-f79b-4f32-b896-c36d4abb7d26,network=Network(b4d521f7-7729-40fd-aa58-7126044eb166),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd7c4233c-f7') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.802 2 DEBUG os_vif [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Plugging vif VIFOpenVSwitch(active=False,address=fa:16:3e:8d:b8:dd,bridge_name='br-int',has_traffic_filtering=True,id=d7c4233c-f79b-4f32-b896-c36d4abb7d26,network=Network(b4d521f7-7729-40fd-aa58-7126044eb166),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd7c4233c-f7') plug /usr/lib/python3.9/site-packages/os_vif/__init__.py:76
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.802 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.803 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddBridgeCommand(_result=None, name=br-int, may_exist=True, datapath_type=system) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.804 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.806 2 DEBUG nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Created local disks _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4857
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.807 2 DEBUG nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Ensure instance console log exists: /var/lib/nova/instances/49d4f343-d1b4-4594-96d2-0777a5ce8581/console.log _ensure_console_log_for_instance /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4609
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.807 2 DEBUG oslo_concurrency.lockutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Acquiring lock "vgpu_resources" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.808 2 DEBUG oslo_concurrency.lockutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lock "vgpu_resources" acquired by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.808 2 DEBUG oslo_concurrency.lockutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lock "vgpu_resources" "released" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.810 2 DEBUG nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Start _get_guest_xml network_info=[{"id": "4076fda2-be62-4c52-b073-8bf26574dee1", "address": "fa:16:3e:c5:9b:82", "network": {"id": "eb08ca1c-c05f-4da5-9518-fb3b2d958ee2", "bridge": "br-int", "label": "tempest-AttachInterfacesUnderV243Test-139612684-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d89911bf2931487c98dc0f44a8b67bca", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap4076fda2-be", "ovs_interfaceid": "4076fda2-be62-4c52-b073-8bf26574dee1", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] disk_info={'disk_bus': 'virtio', 'cdrom_bus': 'sata', 'mapping': {'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.config': {'bus': 'sata', 'dev': 'sda', 'type': 'cdrom'}}} image_meta=ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:38:04Z,direct_url=<?>,disk_format='qcow2',id=72f37f2e-4296-450e-9a12-10717f4ac7dc,min_disk=0,min_ram=0,name='cirros-0.6.2-x86_64-disk.img',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:38:05Z,virtual_size=<?>,visibility=<?>) rescue=None block_device_info={'root_device_name': '/dev/vda', 'image': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'boot_index': 0, 'device_name': '/dev/vda', 'size': 0, 'encryption_format': None, 'image_id': '72f37f2e-4296-450e-9a12-10717f4ac7dc'}], 'ephemerals': [], 'block_device_mapping': [], 'swap': None} _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7549
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.812 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.812 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapd7c4233c-f7, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.813 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbSetCommand(_result=None, table=Interface, record=tapd7c4233c-f7, col_values=(('external_ids', {'iface-id': 'd7c4233c-f79b-4f32-b896-c36d4abb7d26', 'iface-status': 'active', 'attached-mac': 'fa:16:3e:8d:b8:dd', 'vm-uuid': 'f5eb6746-7f42-4fa4-8e26-cda5cfa0c765'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:21 compute-0 NetworkManager[44908]: <info>  [1760150361.8165] manager: (tapd7c4233c-f7): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/38)
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.815 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.819 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.822 2 DEBUG nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Created local disks _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4857
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.822 2 DEBUG nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Ensure instance console log exists: /var/lib/nova/instances/830c7581-3555-41db-9818-0961fc151818/console.log _ensure_console_log_for_instance /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4609
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.823 2 DEBUG oslo_concurrency.lockutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Acquiring lock "vgpu_resources" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.823 2 DEBUG oslo_concurrency.lockutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Lock "vgpu_resources" acquired by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.824 2 DEBUG oslo_concurrency.lockutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Lock "vgpu_resources" "released" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.826 2 DEBUG nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Start _get_guest_xml network_info=[{"id": "5cd25b0e-b4c9-408f-b456-59127a046cde", "address": "fa:16:3e:23:45:c8", "network": {"id": "b6521a4e-cfb9-4743-91c3-85402b5661d9", "bridge": "br-int", "label": "tempest-ServersTestJSON-1548228308-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "56e45b830ec844e4802f14cd3e25bda2", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5cd25b0e-b4", "ovs_interfaceid": "5cd25b0e-b4c9-408f-b456-59127a046cde", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] disk_info={'disk_bus': 'virtio', 'cdrom_bus': 'sata', 'mapping': {'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.config': {'bus': 'sata', 'dev': 'sda', 'type': 'cdrom'}}} image_meta=ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:38:04Z,direct_url=<?>,disk_format='qcow2',id=72f37f2e-4296-450e-9a12-10717f4ac7dc,min_disk=0,min_ram=0,name='cirros-0.6.2-x86_64-disk.img',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:38:05Z,virtual_size=<?>,visibility=<?>) rescue=None block_device_info={'root_device_name': '/dev/vda', 'image': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'boot_index': 0, 'device_name': '/dev/vda', 'size': 0, 'encryption_format': None, 'image_id': '72f37f2e-4296-450e-9a12-10717f4ac7dc'}], 'ephemerals': [], 'block_device_mapping': [], 'swap': None} _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7549
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.827 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.828 2 INFO os_vif [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Successfully plugged vif VIFOpenVSwitch(active=False,address=fa:16:3e:8d:b8:dd,bridge_name='br-int',has_traffic_filtering=True,id=d7c4233c-f79b-4f32-b896-c36d4abb7d26,network=Network(b4d521f7-7729-40fd-aa58-7126044eb166),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd7c4233c-f7')
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.839 2 WARNING nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.861 2 DEBUG nova.virt.libvirt.host [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V1... _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1653
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.863 2 DEBUG nova.virt.libvirt.host [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] CPU controller missing on host. _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1663
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.864 2 WARNING nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.869 2 DEBUG nova.virt.libvirt.host [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V2... _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1672
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.870 2 DEBUG nova.virt.libvirt.host [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] CPU controller found on host. _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1679
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.870 2 DEBUG nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] CPU mode 'host-model' models '' was chosen, with extra flags: '' _get_guest_cpu_model_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:5396
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.871 2 DEBUG nova.virt.hardware [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Getting desirable topologies for flavor Flavor(created_at=2025-10-11T02:38:03Z,deleted=False,deleted_at=None,description=None,disabled=False,ephemeral_gb=0,extra_specs={hw_rng:allowed='True'},flavorid='6dff30d1-85df-4e9c-9163-a20ba47bb0c7',id=3,is_public=True,memory_mb=128,name='m1.nano',projects=<?>,root_gb=1,rxtx_factor=1.0,swap=0,updated_at=None,vcpu_weight=0,vcpus=1) and image_meta ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:38:04Z,direct_url=<?>,disk_format='qcow2',id=72f37f2e-4296-450e-9a12-10717f4ac7dc,min_disk=0,min_ram=0,name='cirros-0.6.2-x86_64-disk.img',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:38:05Z,virtual_size=<?>,visibility=<?>), allow threads: True _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:563
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.871 2 DEBUG nova.virt.hardware [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Flavor limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:348
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.872 2 DEBUG nova.virt.hardware [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Image limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:352
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.872 2 DEBUG nova.virt.hardware [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Flavor pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:388
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.872 2 DEBUG nova.virt.hardware [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Image pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:392
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.873 2 DEBUG nova.virt.hardware [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Chose sockets=0, cores=0, threads=0; limits were sockets=65536, cores=65536, threads=65536 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:430
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.873 2 DEBUG nova.virt.hardware [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Topology preferred VirtCPUTopology(cores=0,sockets=0,threads=0), maximum VirtCPUTopology(cores=65536,sockets=65536,threads=65536) _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:569
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.873 2 DEBUG nova.virt.hardware [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Build topologies for 1 vcpu(s) 1:1:1 _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:471
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.874 2 DEBUG nova.virt.hardware [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Got 1 possible topologies _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:501
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.874 2 DEBUG nova.virt.hardware [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Possible topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:575
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.874 2 DEBUG nova.virt.hardware [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Sorted desired topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:577
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.877 2 DEBUG oslo_concurrency.processutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.905 2 DEBUG nova.virt.libvirt.host [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V1... _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1653
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.906 2 DEBUG nova.virt.libvirt.host [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] CPU controller missing on host. _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1663
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.912 2 DEBUG nova.virt.libvirt.host [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V2... _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1672
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.912 2 DEBUG nova.virt.libvirt.host [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] CPU controller found on host. _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1679
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.920 2 DEBUG nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] CPU mode 'host-model' models '' was chosen, with extra flags: '' _get_guest_cpu_model_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:5396
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.921 2 DEBUG nova.virt.hardware [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Getting desirable topologies for flavor Flavor(created_at=2025-10-11T02:38:03Z,deleted=False,deleted_at=None,description=None,disabled=False,ephemeral_gb=0,extra_specs={hw_rng:allowed='True'},flavorid='6dff30d1-85df-4e9c-9163-a20ba47bb0c7',id=3,is_public=True,memory_mb=128,name='m1.nano',projects=<?>,root_gb=1,rxtx_factor=1.0,swap=0,updated_at=None,vcpu_weight=0,vcpus=1) and image_meta ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:38:04Z,direct_url=<?>,disk_format='qcow2',id=72f37f2e-4296-450e-9a12-10717f4ac7dc,min_disk=0,min_ram=0,name='cirros-0.6.2-x86_64-disk.img',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:38:05Z,virtual_size=<?>,visibility=<?>), allow threads: True _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:563
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.921 2 DEBUG nova.virt.hardware [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Flavor limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:348
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.921 2 DEBUG nova.virt.hardware [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Image limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:352
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.922 2 DEBUG nova.virt.hardware [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Flavor pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:388
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.922 2 DEBUG nova.virt.hardware [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Image pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:392
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.922 2 DEBUG nova.virt.hardware [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Chose sockets=0, cores=0, threads=0; limits were sockets=65536, cores=65536, threads=65536 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:430
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.923 2 DEBUG nova.virt.hardware [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Topology preferred VirtCPUTopology(cores=0,sockets=0,threads=0), maximum VirtCPUTopology(cores=65536,sockets=65536,threads=65536) _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:569
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.923 2 DEBUG nova.virt.hardware [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Build topologies for 1 vcpu(s) 1:1:1 _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:471
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.923 2 DEBUG nova.virt.hardware [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Got 1 possible topologies _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:501
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.924 2 DEBUG nova.virt.hardware [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Possible topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:575
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.924 2 DEBUG nova.virt.hardware [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Sorted desired topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:577
Oct 11 02:39:21 compute-0 nova_compute[356901]: 2025-10-11 02:39:21.927 2 DEBUG oslo_concurrency.processutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:22 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 48] Generated table #86: 5737 keys, 10285606 bytes, temperature: kUnknown
Oct 11 02:39:22 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150362014892, "cf_name": "default", "job": 48, "event": "table_file_creation", "file_number": 86, "file_size": 10285606, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 10245361, "index_size": 24775, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 14405, "raw_key_size": 145372, "raw_average_key_size": 25, "raw_value_size": 10139744, "raw_average_value_size": 1767, "num_data_blocks": 1021, "num_entries": 5737, "num_filter_entries": 5737, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760150361, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 86, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:39:22 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:39:22 compute-0 ceph-mon[191930]: pgmap v1849: 321 pgs: 321 active+clean; 264 MiB data, 366 MiB used, 60 GiB / 60 GiB avail; 40 KiB/s rd, 5.3 MiB/s wr, 70 op/s
Oct 11 02:39:22 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2390034673' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:22 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3865725825' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.027 2 DEBUG nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] No BDM found with device name vda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.028 2 DEBUG nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] No BDM found with device name sda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.028 2 DEBUG nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] No VIF found with MAC fa:16:3e:8d:b8:dd, not building metadata _build_interface_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12092
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.029 2 INFO nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Using config drive
Oct 11 02:39:22 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:39:22.015160) [db/compaction/compaction_job.cc:1663] [default] [JOB 48] Compacted 1@0 + 1@6 files to L6 => 10285606 bytes
Oct 11 02:39:22 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:39:22.090564) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 31.9 rd, 31.6 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(1.6, 8.4 +0.0 blob) out(9.8 +0.0 blob), read-write-amplify(12.7) write-amplify(6.3) OK, records in: 6267, records dropped: 530 output_compression: NoCompression
Oct 11 02:39:22 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:39:22.090771) EVENT_LOG_v1 {"time_micros": 1760150362090589, "job": 48, "event": "compaction_finished", "compaction_time_micros": 325880, "compaction_time_cpu_micros": 38328, "output_level": 6, "num_output_files": 1, "total_output_size": 10285606, "num_input_records": 6267, "num_output_records": 5737, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:39:22 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000085.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:39:22 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150362096162, "job": 48, "event": "table_file_deletion", "file_number": 85}
Oct 11 02:39:22 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000083.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:39:22 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150362098939, "job": 48, "event": "table_file_deletion", "file_number": 83}
Oct 11 02:39:22 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:39:21.688756) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:39:22 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:39:22.099097) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:39:22 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:39:22.099105) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:39:22 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:39:22.099107) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:39:22 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:39:22.099108) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:39:22 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:39:22.099110) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.147 2 DEBUG nova.storage.rbd_utils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] rbd image f5eb6746-7f42-4fa4-8e26-cda5cfa0c765_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:39:22 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/808514683' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.355 2 DEBUG oslo_concurrency.processutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.628s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.358 2 DEBUG nova.virt.libvirt.vif [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] vif_type=ovs instance=Instance(access_ip_v4=1.1.1.1,access_ip_v6=::babe:dc0c:1602,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:39:00Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description='tempest-ServersTestManualDisk-server-1400384832',display_name='tempest-ServersTestManualDisk-server-1400384832',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-serverstestmanualdisk-server-1400384832',id=8,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBBL6x/f8Zf/LDDCofajEq020TpmyjW9NO7vA7SAYbWJhZilNkemXvyab+jylvxZHOw0v7ime7uJ1WDC6srAuyI4NaiRkhZgxf6/8nUXEMtEGfOh0ic3nB3uEET9l6hAh+A==',key_name='tempest-keypair-919071895',keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={hello='world'},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='5d5e8b42281d410bb45cb6c2e8e3fcbd',ramdisk_id='',reservation_id='r-8wgd9gbq',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_hw_rng_model='virtio',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-ServersTestManualDisk-1790007234',owner_user_name='tempest-ServersTestManualDisk-1790007234-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:39:03Z,user_data='IyEvYmluL3NoCmVjaG8gIlByaW50aW5nIGNpcnJvcyB1c2VyIGF1dGhvcml6ZWQga2V5cyIKY2F0IH5jaXJyb3MvLnNzaC9hdXRob3JpemVkX2tleXMgfHwgdHJ1ZQo=',user_id='5539243c06f64f0694000d9748ff55dd',uuid=ee9601c7-f562-449e-9f5c-5e1355f3c130,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "address": "fa:16:3e:3f:e1:d3", "network": {"id": "3e4cd915-df9e-44c4-860d-c0ba25a21e79", "bridge": "br-int", "label": "tempest-ServersTestManualDisk-1152012084-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.9", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "5d5e8b42281d410bb45cb6c2e8e3fcbd", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap887c6cbc-2d", "ovs_interfaceid": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} virt_type=kvm get_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:563
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.360 2 DEBUG nova.network.os_vif_util [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Converting VIF {"id": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "address": "fa:16:3e:3f:e1:d3", "network": {"id": "3e4cd915-df9e-44c4-860d-c0ba25a21e79", "bridge": "br-int", "label": "tempest-ServersTestManualDisk-1152012084-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.9", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "5d5e8b42281d410bb45cb6c2e8e3fcbd", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap887c6cbc-2d", "ovs_interfaceid": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.362 2 DEBUG nova.network.os_vif_util [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:3f:e1:d3,bridge_name='br-int',has_traffic_filtering=True,id=887c6cbc-2d8f-44c3-959f-4c732f5d4040,network=Network(3e4cd915-df9e-44c4-860d-c0ba25a21e79),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap887c6cbc-2d') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.365 2 DEBUG nova.objects.instance [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Lazy-loading 'pci_devices' on Instance uuid ee9601c7-f562-449e-9f5c-5e1355f3c130 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.387 2 DEBUG nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] End _get_guest_xml xml=<domain type="kvm">
Oct 11 02:39:22 compute-0 nova_compute[356901]:   <uuid>ee9601c7-f562-449e-9f5c-5e1355f3c130</uuid>
Oct 11 02:39:22 compute-0 nova_compute[356901]:   <name>instance-00000008</name>
Oct 11 02:39:22 compute-0 nova_compute[356901]:   <memory>131072</memory>
Oct 11 02:39:22 compute-0 nova_compute[356901]:   <vcpu>1</vcpu>
Oct 11 02:39:22 compute-0 nova_compute[356901]:   <metadata>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.1">
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <nova:package version="27.5.2-0.20250829104910.6f8decf.el9"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <nova:name>tempest-ServersTestManualDisk-server-1400384832</nova:name>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <nova:creationTime>2025-10-11 02:39:20</nova:creationTime>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <nova:flavor name="m1.nano">
Oct 11 02:39:22 compute-0 nova_compute[356901]:         <nova:memory>128</nova:memory>
Oct 11 02:39:22 compute-0 nova_compute[356901]:         <nova:disk>1</nova:disk>
Oct 11 02:39:22 compute-0 nova_compute[356901]:         <nova:swap>0</nova:swap>
Oct 11 02:39:22 compute-0 nova_compute[356901]:         <nova:ephemeral>0</nova:ephemeral>
Oct 11 02:39:22 compute-0 nova_compute[356901]:         <nova:vcpus>1</nova:vcpus>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       </nova:flavor>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <nova:owner>
Oct 11 02:39:22 compute-0 nova_compute[356901]:         <nova:user uuid="5539243c06f64f0694000d9748ff55dd">tempest-ServersTestManualDisk-1790007234-project-member</nova:user>
Oct 11 02:39:22 compute-0 nova_compute[356901]:         <nova:project uuid="5d5e8b42281d410bb45cb6c2e8e3fcbd">tempest-ServersTestManualDisk-1790007234</nova:project>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       </nova:owner>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <nova:root type="image" uuid="72f37f2e-4296-450e-9a12-10717f4ac7dc"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <nova:ports>
Oct 11 02:39:22 compute-0 nova_compute[356901]:         <nova:port uuid="887c6cbc-2d8f-44c3-959f-4c732f5d4040">
Oct 11 02:39:22 compute-0 nova_compute[356901]:           <nova:ip type="fixed" address="10.100.0.9" ipVersion="4"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:         </nova:port>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       </nova:ports>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     </nova:instance>
Oct 11 02:39:22 compute-0 nova_compute[356901]:   </metadata>
Oct 11 02:39:22 compute-0 nova_compute[356901]:   <sysinfo type="smbios">
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <system>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <entry name="manufacturer">RDO</entry>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <entry name="product">OpenStack Compute</entry>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <entry name="version">27.5.2-0.20250829104910.6f8decf.el9</entry>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <entry name="serial">ee9601c7-f562-449e-9f5c-5e1355f3c130</entry>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <entry name="uuid">ee9601c7-f562-449e-9f5c-5e1355f3c130</entry>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <entry name="family">Virtual Machine</entry>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     </system>
Oct 11 02:39:22 compute-0 nova_compute[356901]:   </sysinfo>
Oct 11 02:39:22 compute-0 nova_compute[356901]:   <os>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <type arch="x86_64" machine="q35">hvm</type>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <boot dev="hd"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <smbios mode="sysinfo"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:   </os>
Oct 11 02:39:22 compute-0 nova_compute[356901]:   <features>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <acpi/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <apic/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <vmcoreinfo/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:   </features>
Oct 11 02:39:22 compute-0 nova_compute[356901]:   <clock offset="utc">
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <timer name="pit" tickpolicy="delay"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <timer name="rtc" tickpolicy="catchup"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <timer name="hpet" present="no"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:   </clock>
Oct 11 02:39:22 compute-0 nova_compute[356901]:   <cpu mode="host-model" match="exact">
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <topology sockets="1" cores="1" threads="1"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:39:22 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/ee9601c7-f562-449e-9f5c-5e1355f3c130_disk">
Oct 11 02:39:22 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       </source>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:39:22 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <target dev="vda" bus="virtio"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <disk type="network" device="cdrom">
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/ee9601c7-f562-449e-9f5c-5e1355f3c130_disk.config">
Oct 11 02:39:22 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       </source>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:39:22 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <target dev="sda" bus="sata"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <interface type="ethernet">
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <mac address="fa:16:3e:3f:e1:d3"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <driver name="vhost" rx_queue_size="512"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <mtu size="1442"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <target dev="tap887c6cbc-2d"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     </interface>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <serial type="pty">
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <log file="/var/lib/nova/instances/ee9601c7-f562-449e-9f5c-5e1355f3c130/console.log" append="off"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     </serial>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <graphics type="vnc" autoport="yes" listen="::0"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <video>
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     </video>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <input type="tablet" bus="usb"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <rng model="virtio">
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <backend model="random">/dev/urandom</backend>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <controller type="usb" index="0"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     <memballoon model="virtio">
Oct 11 02:39:22 compute-0 nova_compute[356901]:       <stats period="10"/>
Oct 11 02:39:22 compute-0 nova_compute[356901]:     </memballoon>
Oct 11 02:39:22 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:39:22 compute-0 nova_compute[356901]: </domain>
Oct 11 02:39:22 compute-0 nova_compute[356901]:  _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7555
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.406 2 DEBUG nova.compute.manager [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Preparing to wait for external event network-vif-plugged-887c6cbc-2d8f-44c3-959f-4c732f5d4040 prepare_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:283
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.406 2 DEBUG oslo_concurrency.lockutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Acquiring lock "ee9601c7-f562-449e-9f5c-5e1355f3c130-events" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.407 2 DEBUG oslo_concurrency.lockutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Lock "ee9601c7-f562-449e-9f5c-5e1355f3c130-events" acquired by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.407 2 DEBUG oslo_concurrency.lockutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Lock "ee9601c7-f562-449e-9f5c-5e1355f3c130-events" "released" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.409 2 DEBUG nova.virt.libvirt.vif [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] vif_type=ovs instance=Instance(access_ip_v4=1.1.1.1,access_ip_v6=::babe:dc0c:1602,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:39:00Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description='tempest-ServersTestManualDisk-server-1400384832',display_name='tempest-ServersTestManualDisk-server-1400384832',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-serverstestmanualdisk-server-1400384832',id=8,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBBL6x/f8Zf/LDDCofajEq020TpmyjW9NO7vA7SAYbWJhZilNkemXvyab+jylvxZHOw0v7ime7uJ1WDC6srAuyI4NaiRkhZgxf6/8nUXEMtEGfOh0ic3nB3uEET9l6hAh+A==',key_name='tempest-keypair-919071895',keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={hello='world'},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=PciDeviceList,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='5d5e8b42281d410bb45cb6c2e8e3fcbd',ramdisk_id='',reservation_id='r-8wgd9gbq',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_hw_rng_model='virtio',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-ServersTestManualDisk-1790007234',owner_user_name='tempest-ServersTestManualDisk-1790007234-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:39:03Z,user_data='IyEvYmluL3NoCmVjaG8gIlByaW50aW5nIGNpcnJvcyB1c2VyIGF1dGhvcml6ZWQga2V5cyIKY2F0IH5jaXJyb3MvLnNzaC9hdXRob3JpemVkX2tleXMgfHwgdHJ1ZQo=',user_id='5539243c06f64f0694000d9748ff55dd',uuid=ee9601c7-f562-449e-9f5c-5e1355f3c130,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "address": "fa:16:3e:3f:e1:d3", "network": {"id": "3e4cd915-df9e-44c4-860d-c0ba25a21e79", "bridge": "br-int", "label": "tempest-ServersTestManualDisk-1152012084-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.9", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "5d5e8b42281d410bb45cb6c2e8e3fcbd", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap887c6cbc-2d", "ovs_interfaceid": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} plug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:710
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.411 2 DEBUG nova.network.os_vif_util [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Converting VIF {"id": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "address": "fa:16:3e:3f:e1:d3", "network": {"id": "3e4cd915-df9e-44c4-860d-c0ba25a21e79", "bridge": "br-int", "label": "tempest-ServersTestManualDisk-1152012084-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.9", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "5d5e8b42281d410bb45cb6c2e8e3fcbd", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap887c6cbc-2d", "ovs_interfaceid": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.413 2 DEBUG nova.network.os_vif_util [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:3f:e1:d3,bridge_name='br-int',has_traffic_filtering=True,id=887c6cbc-2d8f-44c3-959f-4c732f5d4040,network=Network(3e4cd915-df9e-44c4-860d-c0ba25a21e79),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap887c6cbc-2d') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.413 2 DEBUG os_vif [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Plugging vif VIFOpenVSwitch(active=False,address=fa:16:3e:3f:e1:d3,bridge_name='br-int',has_traffic_filtering=True,id=887c6cbc-2d8f-44c3-959f-4c732f5d4040,network=Network(3e4cd915-df9e-44c4-860d-c0ba25a21e79),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap887c6cbc-2d') plug /usr/lib/python3.9/site-packages/os_vif/__init__.py:76
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.414 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.415 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddBridgeCommand(_result=None, name=br-int, may_exist=True, datapath_type=system) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.415 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.418 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.419 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tap887c6cbc-2d, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.420 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbSetCommand(_result=None, table=Interface, record=tap887c6cbc-2d, col_values=(('external_ids', {'iface-id': '887c6cbc-2d8f-44c3-959f-4c732f5d4040', 'iface-status': 'active', 'attached-mac': 'fa:16:3e:3f:e1:d3', 'vm-uuid': 'ee9601c7-f562-449e-9f5c-5e1355f3c130'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:22 compute-0 NetworkManager[44908]: <info>  [1760150362.4235] manager: (tap887c6cbc-2d): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/39)
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.423 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.427 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.439 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.440 2 INFO os_vif [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Successfully plugged vif VIFOpenVSwitch(active=False,address=fa:16:3e:3f:e1:d3,bridge_name='br-int',has_traffic_filtering=True,id=887c6cbc-2d8f-44c3-959f-4c732f5d4040,network=Network(3e4cd915-df9e-44c4-860d-c0ba25a21e79),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap887c6cbc-2d')
Oct 11 02:39:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:39:22 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/6353745' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.493 2 DEBUG oslo_concurrency.processutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.616s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:39:22 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/285040880' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.536 2 DEBUG nova.storage.rbd_utils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] rbd image 49d4f343-d1b4-4594-96d2-0777a5ce8581_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.544 2 DEBUG oslo_concurrency.processutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.579 2 DEBUG oslo_concurrency.processutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.652s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.611 2 DEBUG nova.storage.rbd_utils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] rbd image 830c7581-3555-41db-9818-0961fc151818_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.621 2 DEBUG oslo_concurrency.processutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.657 2 DEBUG nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] No BDM found with device name vda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.658 2 DEBUG nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] No BDM found with device name sda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.658 2 DEBUG nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] No VIF found with MAC fa:16:3e:3f:e1:d3, not building metadata _build_interface_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12092
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.660 2 INFO nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Using config drive
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.708 2 DEBUG nova.storage.rbd_utils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] rbd image ee9601c7-f562-449e-9f5c-5e1355f3c130_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1850: 321 pgs: 321 active+clean; 304 MiB data, 389 MiB used, 60 GiB / 60 GiB avail; 62 KiB/s rd, 7.1 MiB/s wr, 100 op/s
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.777 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.838 2 INFO nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Creating config drive at /var/lib/nova/instances/f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.config
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.845 2 DEBUG oslo_concurrency.processutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Running cmd (subprocess): /usr/bin/mkisofs -o /var/lib/nova/instances/f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmpze7py8rf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:22 compute-0 nova_compute[356901]: 2025-10-11 02:39:22.989 2 DEBUG oslo_concurrency.processutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] CMD "/usr/bin/mkisofs -o /var/lib/nova/instances/f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmpze7py8rf" returned: 0 in 0.143s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:39:23 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/238781777' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:39:23 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/4208017175' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:23 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/808514683' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:23 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/6353745' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:23 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/285040880' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:23 compute-0 ceph-mon[191930]: pgmap v1850: 321 pgs: 321 active+clean; 304 MiB data, 389 MiB used, 60 GiB / 60 GiB avail; 62 KiB/s rd, 7.1 MiB/s wr, 100 op/s
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.199 2 DEBUG nova.storage.rbd_utils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] rbd image f5eb6746-7f42-4fa4-8e26-cda5cfa0c765_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.211 2 DEBUG oslo_concurrency.processutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.config f5eb6746-7f42-4fa4-8e26-cda5cfa0c765_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.242 2 DEBUG oslo_concurrency.processutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.622s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.245 2 DEBUG oslo_concurrency.processutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.701s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.247 2 DEBUG nova.virt.libvirt.vif [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] vif_type=ovs instance=Instance(access_ip_v4=1.1.1.1,access_ip_v6=::babe:dc0c:1602,architecture=None,auto_disk_config=True,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:38:58Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description='tempest-ServersTestJSON-server-1595957609',display_name='tempest-ServersTestJSON-server-1595957609',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-serverstestjson-server-1595957609',id=7,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBMzf2DYXJJIoJOC2qLbU8VUi6wgx/7Zr7U9fH8e3911FZRdaYVaxRqYR9UTydqtzyDp6Ms2EBS9n6gBBFdtM1Rrxxpe1Vohtnbt7VcSxvjKDQrCZDRyrT/SPrEwf5mXxSQ==',key_name='tempest-keypair-1620851907',keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={hello='world'},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='56e45b830ec844e4802f14cd3e25bda2',ramdisk_id='',reservation_id='r-afe03zy1',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_hw_rng_model='virtio',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-ServersTestJSON-2138603110',owner_user_name='tempest-ServersTestJSON-2138603110-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:39:01Z,user_data='IyEvYmluL3NoCmVjaG8gIlByaW50aW5nIGNpcnJvcyB1c2VyIGF1dGhvcml6ZWQga2V5cyIKY2F0IH5jaXJyb3MvLnNzaC9hdXRob3JpemVkX2tleXMgfHwgdHJ1ZQo=',user_id='1b63c9bbae8845d99db73ca671aedcfc',uuid=830c7581-3555-41db-9818-0961fc151818,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "5cd25b0e-b4c9-408f-b456-59127a046cde", "address": "fa:16:3e:23:45:c8", "network": {"id": "b6521a4e-cfb9-4743-91c3-85402b5661d9", "bridge": "br-int", "label": "tempest-ServersTestJSON-1548228308-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "56e45b830ec844e4802f14cd3e25bda2", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5cd25b0e-b4", "ovs_interfaceid": "5cd25b0e-b4c9-408f-b456-59127a046cde", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} virt_type=kvm get_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:563
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.247 2 DEBUG nova.network.os_vif_util [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Converting VIF {"id": "5cd25b0e-b4c9-408f-b456-59127a046cde", "address": "fa:16:3e:23:45:c8", "network": {"id": "b6521a4e-cfb9-4743-91c3-85402b5661d9", "bridge": "br-int", "label": "tempest-ServersTestJSON-1548228308-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "56e45b830ec844e4802f14cd3e25bda2", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5cd25b0e-b4", "ovs_interfaceid": "5cd25b0e-b4c9-408f-b456-59127a046cde", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.248 2 DEBUG nova.network.os_vif_util [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:23:45:c8,bridge_name='br-int',has_traffic_filtering=True,id=5cd25b0e-b4c9-408f-b456-59127a046cde,network=Network(b6521a4e-cfb9-4743-91c3-85402b5661d9),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap5cd25b0e-b4') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.250 2 DEBUG nova.objects.instance [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Lazy-loading 'pci_devices' on Instance uuid 830c7581-3555-41db-9818-0961fc151818 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:39:23 compute-0 podman[452393]: 2025-10-11 02:39:23.252945782 +0000 UTC m=+0.127640594 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009)
Oct 11 02:39:23 compute-0 podman[452389]: 2025-10-11 02:39:23.253807774 +0000 UTC m=+0.145805088 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:39:23 compute-0 podman[452392]: 2025-10-11 02:39:23.255092748 +0000 UTC m=+0.133203580 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, io.buildah.version=1.41.4, org.label-schema.name=CentOS Stream 10 Base Image, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.build-date=20251007)
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.257 2 DEBUG nova.virt.libvirt.vif [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:39:01Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description='tempest-AttachInterfacesUnderV243Test-server-402973055',display_name='tempest-AttachInterfacesUnderV243Test-server-402973055',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-attachinterfacesunderv243test-server-402973055',id=9,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBKe0LMc8LnBTAISBwhdLZZycX6z/Wtxh5iIrBfzvih3YfC2DgfsCYmIjzIMA1Bmi2PftRsJD/817XJgtfkV0jIbJQ/nBV4X5kWCjFiLmsxPozdtF2YLrErDo+eZfs6cn/g==',key_name='tempest-keypair-1693383324',keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='d89911bf2931487c98dc0f44a8b67bca',ramdisk_id='',reservation_id='r-9ixcygjt',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='reader,member',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_hw_rng_model='virtio',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-AttachInterfacesUnderV243Test-1568711783',owner_user_name='tempest-AttachInterfacesUnderV243Test-1568711783-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:39:04Z,user_data='IyEvYmluL3NoCmVjaG8gIlByaW50aW5nIGNpcnJvcyB1c2VyIGF1dGhvcml6ZWQga2V5cyIKY2F0IH5jaXJyb3MvLnNzaC9hdXRob3JpemVkX2tleXMgfHwgdHJ1ZQo=',user_id='9a1414c7b75246f596af7745610a00a4',uuid=49d4f343-d1b4-4594-96d2-0777a5ce8581,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "4076fda2-be62-4c52-b073-8bf26574dee1", "address": "fa:16:3e:c5:9b:82", "network": {"id": "eb08ca1c-c05f-4da5-9518-fb3b2d958ee2", "bridge": "br-int", "label": "tempest-AttachInterfacesUnderV243Test-139612684-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d89911bf2931487c98dc0f44a8b67bca", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap4076fda2-be", "ovs_interfaceid": "4076fda2-be62-4c52-b073-8bf26574dee1", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} virt_type=kvm get_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:563
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.258 2 DEBUG nova.network.os_vif_util [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Converting VIF {"id": "4076fda2-be62-4c52-b073-8bf26574dee1", "address": "fa:16:3e:c5:9b:82", "network": {"id": "eb08ca1c-c05f-4da5-9518-fb3b2d958ee2", "bridge": "br-int", "label": "tempest-AttachInterfacesUnderV243Test-139612684-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d89911bf2931487c98dc0f44a8b67bca", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap4076fda2-be", "ovs_interfaceid": "4076fda2-be62-4c52-b073-8bf26574dee1", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.258 2 DEBUG nova.network.os_vif_util [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:c5:9b:82,bridge_name='br-int',has_traffic_filtering=True,id=4076fda2-be62-4c52-b073-8bf26574dee1,network=Network(eb08ca1c-c05f-4da5-9518-fb3b2d958ee2),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap4076fda2-be') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.259 2 DEBUG nova.objects.instance [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lazy-loading 'pci_devices' on Instance uuid 49d4f343-d1b4-4594-96d2-0777a5ce8581 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.278 2 DEBUG nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] End _get_guest_xml xml=<domain type="kvm">
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <uuid>830c7581-3555-41db-9818-0961fc151818</uuid>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <name>instance-00000007</name>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <memory>131072</memory>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <vcpu>1</vcpu>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <metadata>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.1">
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <nova:package version="27.5.2-0.20250829104910.6f8decf.el9"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <nova:name>tempest-ServersTestJSON-server-1595957609</nova:name>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <nova:creationTime>2025-10-11 02:39:21</nova:creationTime>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <nova:flavor name="m1.nano">
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <nova:memory>128</nova:memory>
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <nova:disk>1</nova:disk>
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <nova:swap>0</nova:swap>
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <nova:ephemeral>0</nova:ephemeral>
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <nova:vcpus>1</nova:vcpus>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       </nova:flavor>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <nova:owner>
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <nova:user uuid="1b63c9bbae8845d99db73ca671aedcfc">tempest-ServersTestJSON-2138603110-project-member</nova:user>
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <nova:project uuid="56e45b830ec844e4802f14cd3e25bda2">tempest-ServersTestJSON-2138603110</nova:project>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       </nova:owner>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <nova:root type="image" uuid="72f37f2e-4296-450e-9a12-10717f4ac7dc"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <nova:ports>
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <nova:port uuid="5cd25b0e-b4c9-408f-b456-59127a046cde">
Oct 11 02:39:23 compute-0 nova_compute[356901]:           <nova:ip type="fixed" address="10.100.0.14" ipVersion="4"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:         </nova:port>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       </nova:ports>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     </nova:instance>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   </metadata>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <sysinfo type="smbios">
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <system>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <entry name="manufacturer">RDO</entry>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <entry name="product">OpenStack Compute</entry>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <entry name="version">27.5.2-0.20250829104910.6f8decf.el9</entry>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <entry name="serial">830c7581-3555-41db-9818-0961fc151818</entry>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <entry name="uuid">830c7581-3555-41db-9818-0961fc151818</entry>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <entry name="family">Virtual Machine</entry>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     </system>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   </sysinfo>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <os>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <type arch="x86_64" machine="q35">hvm</type>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <boot dev="hd"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <smbios mode="sysinfo"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   </os>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <features>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <acpi/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <apic/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <vmcoreinfo/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   </features>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <clock offset="utc">
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <timer name="pit" tickpolicy="delay"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <timer name="rtc" tickpolicy="catchup"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <timer name="hpet" present="no"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   </clock>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <cpu mode="host-model" match="exact">
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <topology sockets="1" cores="1" threads="1"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/830c7581-3555-41db-9818-0961fc151818_disk">
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       </source>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <target dev="vda" bus="virtio"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <disk type="network" device="cdrom">
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/830c7581-3555-41db-9818-0961fc151818_disk.config">
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       </source>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <target dev="sda" bus="sata"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <interface type="ethernet">
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <mac address="fa:16:3e:23:45:c8"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <driver name="vhost" rx_queue_size="512"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <mtu size="1442"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <target dev="tap5cd25b0e-b4"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     </interface>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <serial type="pty">
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <log file="/var/lib/nova/instances/830c7581-3555-41db-9818-0961fc151818/console.log" append="off"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     </serial>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <graphics type="vnc" autoport="yes" listen="::0"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <video>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     </video>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <input type="tablet" bus="usb"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <rng model="virtio">
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <backend model="random">/dev/urandom</backend>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="usb" index="0"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <memballoon model="virtio">
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <stats period="10"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     </memballoon>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:39:23 compute-0 nova_compute[356901]: </domain>
Oct 11 02:39:23 compute-0 nova_compute[356901]:  _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7555
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.279 2 DEBUG nova.compute.manager [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Preparing to wait for external event network-vif-plugged-5cd25b0e-b4c9-408f-b456-59127a046cde prepare_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:283
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.281 2 DEBUG oslo_concurrency.lockutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Acquiring lock "830c7581-3555-41db-9818-0961fc151818-events" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.281 2 DEBUG oslo_concurrency.lockutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Lock "830c7581-3555-41db-9818-0961fc151818-events" acquired by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.282 2 DEBUG oslo_concurrency.lockutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Lock "830c7581-3555-41db-9818-0961fc151818-events" "released" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.283 2 DEBUG nova.virt.libvirt.vif [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] vif_type=ovs instance=Instance(access_ip_v4=1.1.1.1,access_ip_v6=::babe:dc0c:1602,architecture=None,auto_disk_config=True,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:38:58Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description='tempest-ServersTestJSON-server-1595957609',display_name='tempest-ServersTestJSON-server-1595957609',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-serverstestjson-server-1595957609',id=7,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBMzf2DYXJJIoJOC2qLbU8VUi6wgx/7Zr7U9fH8e3911FZRdaYVaxRqYR9UTydqtzyDp6Ms2EBS9n6gBBFdtM1Rrxxpe1Vohtnbt7VcSxvjKDQrCZDRyrT/SPrEwf5mXxSQ==',key_name='tempest-keypair-1620851907',keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={hello='world'},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=PciDeviceList,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='56e45b830ec844e4802f14cd3e25bda2',ramdisk_id='',reservation_id='r-afe03zy1',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_hw_rng_model='virtio',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-ServersTestJSON-2138603110',owner_user_name='tempest-ServersTestJSON-2138603110-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:39:01Z,user_data='IyEvYmluL3NoCmVjaG8gIlByaW50aW5nIGNpcnJvcyB1c2VyIGF1dGhvcml6ZWQga2V5cyIKY2F0IH5jaXJyb3MvLnNzaC9hdXRob3JpemVkX2tleXMgfHwgdHJ1ZQo=',user_id='1b63c9bbae8845d99db73ca671aedcfc',uuid=830c7581-3555-41db-9818-0961fc151818,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "5cd25b0e-b4c9-408f-b456-59127a046cde", "address": "fa:16:3e:23:45:c8", "network": {"id": "b6521a4e-cfb9-4743-91c3-85402b5661d9", "bridge": "br-int", "label": "tempest-ServersTestJSON-1548228308-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "56e45b830ec844e4802f14cd3e25bda2", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5cd25b0e-b4", "ovs_interfaceid": "5cd25b0e-b4c9-408f-b456-59127a046cde", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} plug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:710
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.284 2 DEBUG nova.network.os_vif_util [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Converting VIF {"id": "5cd25b0e-b4c9-408f-b456-59127a046cde", "address": "fa:16:3e:23:45:c8", "network": {"id": "b6521a4e-cfb9-4743-91c3-85402b5661d9", "bridge": "br-int", "label": "tempest-ServersTestJSON-1548228308-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "56e45b830ec844e4802f14cd3e25bda2", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5cd25b0e-b4", "ovs_interfaceid": "5cd25b0e-b4c9-408f-b456-59127a046cde", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.285 2 DEBUG nova.network.os_vif_util [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:23:45:c8,bridge_name='br-int',has_traffic_filtering=True,id=5cd25b0e-b4c9-408f-b456-59127a046cde,network=Network(b6521a4e-cfb9-4743-91c3-85402b5661d9),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap5cd25b0e-b4') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.285 2 DEBUG os_vif [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Plugging vif VIFOpenVSwitch(active=False,address=fa:16:3e:23:45:c8,bridge_name='br-int',has_traffic_filtering=True,id=5cd25b0e-b4c9-408f-b456-59127a046cde,network=Network(b6521a4e-cfb9-4743-91c3-85402b5661d9),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap5cd25b0e-b4') plug /usr/lib/python3.9/site-packages/os_vif/__init__.py:76
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.287 2 DEBUG nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] End _get_guest_xml xml=<domain type="kvm">
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <uuid>49d4f343-d1b4-4594-96d2-0777a5ce8581</uuid>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <name>instance-00000009</name>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <memory>131072</memory>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <vcpu>1</vcpu>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <metadata>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.1">
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <nova:package version="27.5.2-0.20250829104910.6f8decf.el9"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <nova:name>tempest-AttachInterfacesUnderV243Test-server-402973055</nova:name>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <nova:creationTime>2025-10-11 02:39:21</nova:creationTime>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <nova:flavor name="m1.nano">
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <nova:memory>128</nova:memory>
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <nova:disk>1</nova:disk>
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <nova:swap>0</nova:swap>
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <nova:ephemeral>0</nova:ephemeral>
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <nova:vcpus>1</nova:vcpus>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       </nova:flavor>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <nova:owner>
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <nova:user uuid="9a1414c7b75246f596af7745610a00a4">tempest-AttachInterfacesUnderV243Test-1568711783-project-member</nova:user>
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <nova:project uuid="d89911bf2931487c98dc0f44a8b67bca">tempest-AttachInterfacesUnderV243Test-1568711783</nova:project>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       </nova:owner>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <nova:root type="image" uuid="72f37f2e-4296-450e-9a12-10717f4ac7dc"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <nova:ports>
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <nova:port uuid="4076fda2-be62-4c52-b073-8bf26574dee1">
Oct 11 02:39:23 compute-0 nova_compute[356901]:           <nova:ip type="fixed" address="10.100.0.14" ipVersion="4"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:         </nova:port>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       </nova:ports>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     </nova:instance>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   </metadata>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <sysinfo type="smbios">
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <system>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <entry name="manufacturer">RDO</entry>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <entry name="product">OpenStack Compute</entry>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <entry name="version">27.5.2-0.20250829104910.6f8decf.el9</entry>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <entry name="serial">49d4f343-d1b4-4594-96d2-0777a5ce8581</entry>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <entry name="uuid">49d4f343-d1b4-4594-96d2-0777a5ce8581</entry>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <entry name="family">Virtual Machine</entry>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     </system>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   </sysinfo>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <os>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <type arch="x86_64" machine="q35">hvm</type>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <boot dev="hd"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <smbios mode="sysinfo"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   </os>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <features>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <acpi/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <apic/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <vmcoreinfo/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   </features>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <clock offset="utc">
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <timer name="pit" tickpolicy="delay"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <timer name="rtc" tickpolicy="catchup"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <timer name="hpet" present="no"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   </clock>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <cpu mode="host-model" match="exact">
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <topology sockets="1" cores="1" threads="1"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/49d4f343-d1b4-4594-96d2-0777a5ce8581_disk">
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       </source>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <target dev="vda" bus="virtio"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <disk type="network" device="cdrom">
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/49d4f343-d1b4-4594-96d2-0777a5ce8581_disk.config">
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       </source>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:39:23 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <target dev="sda" bus="sata"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <interface type="ethernet">
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <mac address="fa:16:3e:c5:9b:82"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <driver name="vhost" rx_queue_size="512"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <mtu size="1442"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <target dev="tap4076fda2-be"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     </interface>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <serial type="pty">
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <log file="/var/lib/nova/instances/49d4f343-d1b4-4594-96d2-0777a5ce8581/console.log" append="off"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     </serial>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <graphics type="vnc" autoport="yes" listen="::0"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <video>
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     </video>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <input type="tablet" bus="usb"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <rng model="virtio">
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <backend model="random">/dev/urandom</backend>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <controller type="usb" index="0"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     <memballoon model="virtio">
Oct 11 02:39:23 compute-0 nova_compute[356901]:       <stats period="10"/>
Oct 11 02:39:23 compute-0 nova_compute[356901]:     </memballoon>
Oct 11 02:39:23 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:39:23 compute-0 nova_compute[356901]: </domain>
Oct 11 02:39:23 compute-0 nova_compute[356901]:  _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7555
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.288 2 DEBUG nova.compute.manager [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Preparing to wait for external event network-vif-plugged-4076fda2-be62-4c52-b073-8bf26574dee1 prepare_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:283
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.288 2 DEBUG oslo_concurrency.lockutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Acquiring lock "49d4f343-d1b4-4594-96d2-0777a5ce8581-events" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.288 2 DEBUG oslo_concurrency.lockutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lock "49d4f343-d1b4-4594-96d2-0777a5ce8581-events" acquired by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.288 2 DEBUG oslo_concurrency.lockutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lock "49d4f343-d1b4-4594-96d2-0777a5ce8581-events" "released" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.289 2 DEBUG nova.virt.libvirt.vif [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:39:01Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description='tempest-AttachInterfacesUnderV243Test-server-402973055',display_name='tempest-AttachInterfacesUnderV243Test-server-402973055',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-attachinterfacesunderv243test-server-402973055',id=9,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBKe0LMc8LnBTAISBwhdLZZycX6z/Wtxh5iIrBfzvih3YfC2DgfsCYmIjzIMA1Bmi2PftRsJD/817XJgtfkV0jIbJQ/nBV4X5kWCjFiLmsxPozdtF2YLrErDo+eZfs6cn/g==',key_name='tempest-keypair-1693383324',keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=PciDeviceList,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='d89911bf2931487c98dc0f44a8b67bca',ramdisk_id='',reservation_id='r-9ixcygjt',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='reader,member',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_hw_rng_model='virtio',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-AttachInterfacesUnderV243Test-1568711783',owner_user_name='tempest-AttachInterfacesUnderV243Test-1568711783-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:39:04Z,user_data='IyEvYmluL3NoCmVjaG8gIlByaW50aW5nIGNpcnJvcyB1c2VyIGF1dGhvcml6ZWQga2V5cyIKY2F0IH5jaXJyb3MvLnNzaC9hdXRob3JpemVkX2tleXMgfHwgdHJ1ZQo=',user_id='9a1414c7b75246f596af7745610a00a4',uuid=49d4f343-d1b4-4594-96d2-0777a5ce8581,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "4076fda2-be62-4c52-b073-8bf26574dee1", "address": "fa:16:3e:c5:9b:82", "network": {"id": "eb08ca1c-c05f-4da5-9518-fb3b2d958ee2", "bridge": "br-int", "label": "tempest-AttachInterfacesUnderV243Test-139612684-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d89911bf2931487c98dc0f44a8b67bca", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap4076fda2-be", "ovs_interfaceid": "4076fda2-be62-4c52-b073-8bf26574dee1", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} plug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:710
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.289 2 DEBUG nova.network.os_vif_util [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Converting VIF {"id": "4076fda2-be62-4c52-b073-8bf26574dee1", "address": "fa:16:3e:c5:9b:82", "network": {"id": "eb08ca1c-c05f-4da5-9518-fb3b2d958ee2", "bridge": "br-int", "label": "tempest-AttachInterfacesUnderV243Test-139612684-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d89911bf2931487c98dc0f44a8b67bca", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap4076fda2-be", "ovs_interfaceid": "4076fda2-be62-4c52-b073-8bf26574dee1", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.290 2 DEBUG nova.network.os_vif_util [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:c5:9b:82,bridge_name='br-int',has_traffic_filtering=True,id=4076fda2-be62-4c52-b073-8bf26574dee1,network=Network(eb08ca1c-c05f-4da5-9518-fb3b2d958ee2),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap4076fda2-be') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.290 2 DEBUG os_vif [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Plugging vif VIFOpenVSwitch(active=False,address=fa:16:3e:c5:9b:82,bridge_name='br-int',has_traffic_filtering=True,id=4076fda2-be62-4c52-b073-8bf26574dee1,network=Network(eb08ca1c-c05f-4da5-9518-fb3b2d958ee2),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap4076fda2-be') plug /usr/lib/python3.9/site-packages/os_vif/__init__.py:76
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.291 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.291 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddBridgeCommand(_result=None, name=br-int, may_exist=True, datapath_type=system) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.292 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.293 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.295 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddBridgeCommand(_result=None, name=br-int, may_exist=True, datapath_type=system) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.296 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.297 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.297 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tap5cd25b0e-b4, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.298 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbSetCommand(_result=None, table=Interface, record=tap5cd25b0e-b4, col_values=(('external_ids', {'iface-id': '5cd25b0e-b4c9-408f-b456-59127a046cde', 'iface-status': 'active', 'attached-mac': 'fa:16:3e:23:45:c8', 'vm-uuid': '830c7581-3555-41db-9818-0961fc151818'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.300 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:23 compute-0 NetworkManager[44908]: <info>  [1760150363.3011] manager: (tap5cd25b0e-b4): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/40)
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.302 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:39:23 compute-0 podman[452391]: 2025-10-11 02:39:23.305630788 +0000 UTC m=+0.186846491 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3)
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.320 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.321 2 INFO os_vif [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Successfully plugged vif VIFOpenVSwitch(active=False,address=fa:16:3e:23:45:c8,bridge_name='br-int',has_traffic_filtering=True,id=5cd25b0e-b4c9-408f-b456-59127a046cde,network=Network(b6521a4e-cfb9-4743-91c3-85402b5661d9),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap5cd25b0e-b4')
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.322 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.323 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tap4076fda2-be, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.323 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbSetCommand(_result=None, table=Interface, record=tap4076fda2-be, col_values=(('external_ids', {'iface-id': '4076fda2-be62-4c52-b073-8bf26574dee1', 'iface-status': 'active', 'attached-mac': 'fa:16:3e:c5:9b:82', 'vm-uuid': '49d4f343-d1b4-4594-96d2-0777a5ce8581'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:23 compute-0 NetworkManager[44908]: <info>  [1760150363.3256] manager: (tap4076fda2-be): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/41)
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.327 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.344 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.345 2 INFO os_vif [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Successfully plugged vif VIFOpenVSwitch(active=False,address=fa:16:3e:c5:9b:82,bridge_name='br-int',has_traffic_filtering=True,id=4076fda2-be62-4c52-b073-8bf26574dee1,network=Network(eb08ca1c-c05f-4da5-9518-fb3b2d958ee2),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap4076fda2-be')
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.422 2 DEBUG nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] No BDM found with device name vda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.423 2 DEBUG nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] No BDM found with device name sda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.423 2 DEBUG nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] No VIF found with MAC fa:16:3e:23:45:c8, not building metadata _build_interface_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12092
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.423 2 INFO nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Using config drive
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.493 2 DEBUG nova.storage.rbd_utils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] rbd image 830c7581-3555-41db-9818-0961fc151818_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.586 2 DEBUG nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] No BDM found with device name vda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.587 2 DEBUG nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] No BDM found with device name sda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.587 2 DEBUG nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] No VIF found with MAC fa:16:3e:c5:9b:82, not building metadata _build_interface_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12092
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.589 2 INFO nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Using config drive
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.632 2 DEBUG nova.storage.rbd_utils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] rbd image 49d4f343-d1b4-4594-96d2-0777a5ce8581_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.647 2 INFO nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Creating config drive at /var/lib/nova/instances/ee9601c7-f562-449e-9f5c-5e1355f3c130/disk.config
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.655 2 DEBUG oslo_concurrency.processutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Running cmd (subprocess): /usr/bin/mkisofs -o /var/lib/nova/instances/ee9601c7-f562-449e-9f5c-5e1355f3c130/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmpjrddebw1 execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.797 2 DEBUG oslo_concurrency.processutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] CMD "/usr/bin/mkisofs -o /var/lib/nova/instances/ee9601c7-f562-449e-9f5c-5e1355f3c130/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmpjrddebw1" returned: 0 in 0.142s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.849 2 DEBUG nova.storage.rbd_utils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] rbd image ee9601c7-f562-449e-9f5c-5e1355f3c130_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:23 compute-0 nova_compute[356901]: 2025-10-11 02:39:23.868 2 DEBUG oslo_concurrency.processutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/ee9601c7-f562-449e-9f5c-5e1355f3c130/disk.config ee9601c7-f562-449e-9f5c-5e1355f3c130_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:24 compute-0 nova_compute[356901]: 2025-10-11 02:39:24.051 2 INFO nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Creating config drive at /var/lib/nova/instances/830c7581-3555-41db-9818-0961fc151818/disk.config
Oct 11 02:39:24 compute-0 nova_compute[356901]: 2025-10-11 02:39:24.149 2 DEBUG oslo_concurrency.processutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Running cmd (subprocess): /usr/bin/mkisofs -o /var/lib/nova/instances/830c7581-3555-41db-9818-0961fc151818/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmpeq4ywa4_ execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:24 compute-0 nova_compute[356901]: 2025-10-11 02:39:24.207 2 INFO nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Creating config drive at /var/lib/nova/instances/49d4f343-d1b4-4594-96d2-0777a5ce8581/disk.config
Oct 11 02:39:24 compute-0 nova_compute[356901]: 2025-10-11 02:39:24.222 2 DEBUG oslo_concurrency.processutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Running cmd (subprocess): /usr/bin/mkisofs -o /var/lib/nova/instances/49d4f343-d1b4-4594-96d2-0777a5ce8581/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmpsemvkc6u execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:24 compute-0 nova_compute[356901]: 2025-10-11 02:39:24.323 2 DEBUG oslo_concurrency.processutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] CMD "/usr/bin/mkisofs -o /var/lib/nova/instances/830c7581-3555-41db-9818-0961fc151818/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmpeq4ywa4_" returned: 0 in 0.174s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:24 compute-0 nova_compute[356901]: 2025-10-11 02:39:24.373 2 DEBUG nova.storage.rbd_utils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] rbd image 830c7581-3555-41db-9818-0961fc151818_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:24 compute-0 nova_compute[356901]: 2025-10-11 02:39:24.382 2 DEBUG oslo_concurrency.processutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/830c7581-3555-41db-9818-0961fc151818/disk.config 830c7581-3555-41db-9818-0961fc151818_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:24 compute-0 nova_compute[356901]: 2025-10-11 02:39:24.405 2 DEBUG oslo_concurrency.processutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] CMD "/usr/bin/mkisofs -o /var/lib/nova/instances/49d4f343-d1b4-4594-96d2-0777a5ce8581/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmpsemvkc6u" returned: 0 in 0.183s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:24 compute-0 nova_compute[356901]: 2025-10-11 02:39:24.457 2 DEBUG nova.storage.rbd_utils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] rbd image 49d4f343-d1b4-4594-96d2-0777a5ce8581_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:24 compute-0 nova_compute[356901]: 2025-10-11 02:39:24.467 2 DEBUG oslo_concurrency.processutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/49d4f343-d1b4-4594-96d2-0777a5ce8581/disk.config 49d4f343-d1b4-4594-96d2-0777a5ce8581_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:24 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/238781777' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:24 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/4208017175' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1851: 321 pgs: 321 active+clean; 304 MiB data, 389 MiB used, 60 GiB / 60 GiB avail; 65 KiB/s rd, 7.1 MiB/s wr, 104 op/s
Oct 11 02:39:25 compute-0 nova_compute[356901]: 2025-10-11 02:39:25.004 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:25 compute-0 nova_compute[356901]: 2025-10-11 02:39:25.416 2 DEBUG oslo_concurrency.processutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.config f5eb6746-7f42-4fa4-8e26-cda5cfa0c765_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 2.206s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:25 compute-0 nova_compute[356901]: 2025-10-11 02:39:25.418 2 INFO nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Deleting local config drive /var/lib/nova/instances/f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.config because it was imported into RBD.
Oct 11 02:39:25 compute-0 systemd[1]: Starting libvirt secret daemon...
Oct 11 02:39:25 compute-0 nova_compute[356901]: 2025-10-11 02:39:25.464 2 DEBUG oslo_concurrency.processutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/ee9601c7-f562-449e-9f5c-5e1355f3c130/disk.config ee9601c7-f562-449e-9f5c-5e1355f3c130_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 1.596s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:25 compute-0 nova_compute[356901]: 2025-10-11 02:39:25.466 2 INFO nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Deleting local config drive /var/lib/nova/instances/ee9601c7-f562-449e-9f5c-5e1355f3c130/disk.config because it was imported into RBD.
Oct 11 02:39:25 compute-0 systemd[1]: Started libvirt secret daemon.
Oct 11 02:39:25 compute-0 nova_compute[356901]: 2025-10-11 02:39:25.515 2 DEBUG oslo_concurrency.processutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/830c7581-3555-41db-9818-0961fc151818/disk.config 830c7581-3555-41db-9818-0961fc151818_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 1.134s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:25 compute-0 nova_compute[356901]: 2025-10-11 02:39:25.516 2 INFO nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Deleting local config drive /var/lib/nova/instances/830c7581-3555-41db-9818-0961fc151818/disk.config because it was imported into RBD.
Oct 11 02:39:25 compute-0 nova_compute[356901]: 2025-10-11 02:39:25.527 2 DEBUG oslo_concurrency.processutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/49d4f343-d1b4-4594-96d2-0777a5ce8581/disk.config 49d4f343-d1b4-4594-96d2-0777a5ce8581_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 1.060s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:25 compute-0 nova_compute[356901]: 2025-10-11 02:39:25.528 2 INFO nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Deleting local config drive /var/lib/nova/instances/49d4f343-d1b4-4594-96d2-0777a5ce8581/disk.config because it was imported into RBD.
Oct 11 02:39:25 compute-0 ceph-mon[191930]: pgmap v1851: 321 pgs: 321 active+clean; 304 MiB data, 389 MiB used, 60 GiB / 60 GiB avail; 65 KiB/s rd, 7.1 MiB/s wr, 104 op/s
Oct 11 02:39:25 compute-0 NetworkManager[44908]: <info>  [1760150365.5880] manager: (tapd7c4233c-f7): new Tun device (/org/freedesktop/NetworkManager/Devices/42)
Oct 11 02:39:25 compute-0 kernel: tapd7c4233c-f7: entered promiscuous mode
Oct 11 02:39:25 compute-0 NetworkManager[44908]: <info>  [1760150365.5963] manager: (tap887c6cbc-2d): new Tun device (/org/freedesktop/NetworkManager/Devices/43)
Oct 11 02:39:25 compute-0 kernel: tap887c6cbc-2d: entered promiscuous mode
Oct 11 02:39:25 compute-0 nova_compute[356901]: 2025-10-11 02:39:25.610 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:25 compute-0 ovn_controller[88370]: 2025-10-11T02:39:25Z|00075|binding|INFO|Claiming lport 887c6cbc-2d8f-44c3-959f-4c732f5d4040 for this chassis.
Oct 11 02:39:25 compute-0 ovn_controller[88370]: 2025-10-11T02:39:25Z|00076|binding|INFO|887c6cbc-2d8f-44c3-959f-4c732f5d4040: Claiming fa:16:3e:3f:e1:d3 10.100.0.9
Oct 11 02:39:25 compute-0 ovn_controller[88370]: 2025-10-11T02:39:25Z|00077|binding|INFO|Claiming lport d7c4233c-f79b-4f32-b896-c36d4abb7d26 for this chassis.
Oct 11 02:39:25 compute-0 ovn_controller[88370]: 2025-10-11T02:39:25Z|00078|binding|INFO|d7c4233c-f79b-4f32-b896-c36d4abb7d26: Claiming fa:16:3e:8d:b8:dd 10.100.0.4
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.622 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:8d:b8:dd 10.100.0.4'], port_security=['fa:16:3e:8d:b8:dd 10.100.0.4'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.0.4/28', 'neutron:device_id': 'f5eb6746-7f42-4fa4-8e26-cda5cfa0c765', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-b4d521f7-7729-40fd-aa58-7126044eb166', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': 'dba4f6e51d33430ebf5566af53f6fbcc', 'neutron:revision_number': '2', 'neutron:security_group_ids': '82e011ad-d874-487b-b398-e13313bfa497', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=283b08c2-109a-4649-a6db-2339ca56efb4, chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], tunnel_key=3, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=d7c4233c-f79b-4f32-b896-c36d4abb7d26) old=Port_Binding(chassis=[]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.624 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:3f:e1:d3 10.100.0.9'], port_security=['fa:16:3e:3f:e1:d3 10.100.0.9'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.0.9/28', 'neutron:device_id': 'ee9601c7-f562-449e-9f5c-5e1355f3c130', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-3e4cd915-df9e-44c4-860d-c0ba25a21e79', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': '5d5e8b42281d410bb45cb6c2e8e3fcbd', 'neutron:revision_number': '2', 'neutron:security_group_ids': '5f6ab5ce-82e9-464a-b706-7e411b991d5a', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=e1fd6e13-0520-45f1-aea0-ba215ead7c6e, chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], tunnel_key=3, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=887c6cbc-2d8f-44c3-959f-4c732f5d4040) old=Port_Binding(chassis=[]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.625 286362 INFO neutron.agent.ovn.metadata.agent [-] Port d7c4233c-f79b-4f32-b896-c36d4abb7d26 in datapath b4d521f7-7729-40fd-aa58-7126044eb166 bound to our chassis
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.629 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network b4d521f7-7729-40fd-aa58-7126044eb166
Oct 11 02:39:25 compute-0 systemd-udevd[452711]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:39:25 compute-0 systemd-udevd[452712]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.647 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[238da9e5-b1c3-4f07-90f4-534151d6a82d]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.648 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Creating VETH tapb4d521f7-71 in ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166 namespace provision_datapath /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:665
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.651 422955 DEBUG neutron.privileged.agent.linux.ip_lib [-] Interface tapb4d521f7-70 not found in namespace None get_link_id /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:204
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.651 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[4bd5cd0d-82df-4fa6-b633-b779d51b5a4b]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.652 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[6b6151be-860d-4189-bda2-7f762396974b]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:25 compute-0 NetworkManager[44908]: <info>  [1760150365.6588] device (tapd7c4233c-f7): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 02:39:25 compute-0 NetworkManager[44908]: <info>  [1760150365.6596] device (tapd7c4233c-f7): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Oct 11 02:39:25 compute-0 NetworkManager[44908]: <info>  [1760150365.6620] device (tap887c6cbc-2d): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 02:39:25 compute-0 NetworkManager[44908]: <info>  [1760150365.6627] device (tap887c6cbc-2d): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Oct 11 02:39:25 compute-0 ovn_controller[88370]: 2025-10-11T02:39:25Z|00079|binding|INFO|Setting lport 887c6cbc-2d8f-44c3-959f-4c732f5d4040 ovn-installed in OVS
Oct 11 02:39:25 compute-0 ovn_controller[88370]: 2025-10-11T02:39:25Z|00080|binding|INFO|Setting lport 887c6cbc-2d8f-44c3-959f-4c732f5d4040 up in Southbound
Oct 11 02:39:25 compute-0 ovn_controller[88370]: 2025-10-11T02:39:25Z|00081|binding|INFO|Setting lport d7c4233c-f79b-4f32-b896-c36d4abb7d26 ovn-installed in OVS
Oct 11 02:39:25 compute-0 ovn_controller[88370]: 2025-10-11T02:39:25Z|00082|binding|INFO|Setting lport d7c4233c-f79b-4f32-b896-c36d4abb7d26 up in Southbound
Oct 11 02:39:25 compute-0 nova_compute[356901]: 2025-10-11 02:39:25.673 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:25 compute-0 kernel: tap5cd25b0e-b4: entered promiscuous mode
Oct 11 02:39:25 compute-0 NetworkManager[44908]: <info>  [1760150365.6849] manager: (tap5cd25b0e-b4): new Tun device (/org/freedesktop/NetworkManager/Devices/44)
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.683 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[9f7882af-84c8-4859-a585-96fb471697ce]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:25 compute-0 systemd-udevd[452723]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:39:25 compute-0 ovn_controller[88370]: 2025-10-11T02:39:25Z|00083|binding|INFO|Claiming lport 5cd25b0e-b4c9-408f-b456-59127a046cde for this chassis.
Oct 11 02:39:25 compute-0 ovn_controller[88370]: 2025-10-11T02:39:25Z|00084|binding|INFO|5cd25b0e-b4c9-408f-b456-59127a046cde: Claiming fa:16:3e:23:45:c8 10.100.0.14
Oct 11 02:39:25 compute-0 nova_compute[356901]: 2025-10-11 02:39:25.695 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:25 compute-0 nova_compute[356901]: 2025-10-11 02:39:25.697 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.704 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:23:45:c8 10.100.0.14'], port_security=['fa:16:3e:23:45:c8 10.100.0.14'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.0.14/28', 'neutron:device_id': '830c7581-3555-41db-9818-0961fc151818', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-b6521a4e-cfb9-4743-91c3-85402b5661d9', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': '56e45b830ec844e4802f14cd3e25bda2', 'neutron:revision_number': '2', 'neutron:security_group_ids': 'bbc7e081-070a-4369-95d6-91ae54e98ae0', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=35e0b6d0-7bf5-489c-89bb-c8a2aaebd2e8, chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], tunnel_key=3, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=5cd25b0e-b4c9-408f-b456-59127a046cde) old=Port_Binding(chassis=[]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:39:25 compute-0 NetworkManager[44908]: <info>  [1760150365.7089] device (tap5cd25b0e-b4): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 02:39:25 compute-0 NetworkManager[44908]: <info>  [1760150365.7100] device (tap5cd25b0e-b4): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Oct 11 02:39:25 compute-0 systemd-machined[137586]: New machine qemu-6-instance-00000006.
Oct 11 02:39:25 compute-0 NetworkManager[44908]: <info>  [1760150365.7220] manager: (tap4076fda2-be): new Tun device (/org/freedesktop/NetworkManager/Devices/45)
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.722 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[b336c361-86bf-4daf-b7a3-fdb4da4adf0f]: (4, ('net.ipv4.conf.all.promote_secondaries = 1\n', '', 0)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:25 compute-0 kernel: tap4076fda2-be: entered promiscuous mode
Oct 11 02:39:25 compute-0 ovn_controller[88370]: 2025-10-11T02:39:25Z|00085|binding|INFO|Setting lport 5cd25b0e-b4c9-408f-b456-59127a046cde ovn-installed in OVS
Oct 11 02:39:25 compute-0 ovn_controller[88370]: 2025-10-11T02:39:25Z|00086|binding|INFO|Setting lport 5cd25b0e-b4c9-408f-b456-59127a046cde up in Southbound
Oct 11 02:39:25 compute-0 systemd[1]: Started Virtual Machine qemu-6-instance-00000006.
Oct 11 02:39:25 compute-0 ovn_controller[88370]: 2025-10-11T02:39:25Z|00087|if_status|INFO|Not updating pb chassis for 4076fda2-be62-4c52-b073-8bf26574dee1 now as sb is readonly
Oct 11 02:39:25 compute-0 nova_compute[356901]: 2025-10-11 02:39:25.733 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:25 compute-0 ovn_controller[88370]: 2025-10-11T02:39:25Z|00088|binding|INFO|Claiming lport 4076fda2-be62-4c52-b073-8bf26574dee1 for this chassis.
Oct 11 02:39:25 compute-0 ovn_controller[88370]: 2025-10-11T02:39:25Z|00089|binding|INFO|4076fda2-be62-4c52-b073-8bf26574dee1: Claiming fa:16:3e:c5:9b:82 10.100.0.14
Oct 11 02:39:25 compute-0 NetworkManager[44908]: <info>  [1760150365.7437] device (tap4076fda2-be): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 02:39:25 compute-0 NetworkManager[44908]: <info>  [1760150365.7445] device (tap4076fda2-be): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.743 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:c5:9b:82 10.100.0.14'], port_security=['fa:16:3e:c5:9b:82 10.100.0.14'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.0.14/28', 'neutron:device_id': '49d4f343-d1b4-4594-96d2-0777a5ce8581', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': 'd89911bf2931487c98dc0f44a8b67bca', 'neutron:revision_number': '2', 'neutron:security_group_ids': '64f0fd08-8b1e-4dfa-b509-25fc417ccbb7', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=7609ba36-9ccd-4785-a05c-b11167a233de, chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], tunnel_key=3, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=4076fda2-be62-4c52-b073-8bf26574dee1) old=Port_Binding(chassis=[]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:39:25 compute-0 ovn_controller[88370]: 2025-10-11T02:39:25Z|00090|binding|INFO|Setting lport 4076fda2-be62-4c52-b073-8bf26574dee1 ovn-installed in OVS
Oct 11 02:39:25 compute-0 ovn_controller[88370]: 2025-10-11T02:39:25Z|00091|binding|INFO|Setting lport 4076fda2-be62-4c52-b073-8bf26574dee1 up in Southbound
Oct 11 02:39:25 compute-0 nova_compute[356901]: 2025-10-11 02:39:25.748 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:25 compute-0 systemd-machined[137586]: New machine qemu-7-instance-00000008.
Oct 11 02:39:25 compute-0 systemd[1]: Started Virtual Machine qemu-7-instance-00000008.
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.764 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[57fed9b9-6afa-4d6c-a8c4-3a5bf243195e]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:25 compute-0 nova_compute[356901]: 2025-10-11 02:39:25.766 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.772 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[c686854e-ab3c-470d-be31-a49f8856809d]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:25 compute-0 NetworkManager[44908]: <info>  [1760150365.7754] manager: (tapb4d521f7-70): new Veth device (/org/freedesktop/NetworkManager/Devices/46)
Oct 11 02:39:25 compute-0 systemd-machined[137586]: New machine qemu-8-instance-00000007.
Oct 11 02:39:25 compute-0 systemd[1]: Started Virtual Machine qemu-8-instance-00000007.
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.811 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[972c26b8-1ea1-44cb-a292-5c256dce622d]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.814 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[95fea63b-6d63-4a26-a24c-602c71f81708]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:25 compute-0 systemd-machined[137586]: New machine qemu-9-instance-00000009.
Oct 11 02:39:25 compute-0 NetworkManager[44908]: <info>  [1760150365.8438] device (tapb4d521f7-70): carrier: link connected
Oct 11 02:39:25 compute-0 systemd[1]: Started Virtual Machine qemu-9-instance-00000009.
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.848 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[2a656526-50dc-4cb5-bca4-570ea9dc171f]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.894 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[1877022b-f180-4ccb-a14f-6a2045c5f464]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapb4d521f7-71'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:9e:43:57'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 26], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 671357, 'reachable_time': 18296, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 1, 'outoctets': 76, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 1, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 76, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 1, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 452783, 'error': None, 'target': 'ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.918 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[66fc64c6-da7a-4444-8156-c48afc9453bb]: (4, ({'family': 10, 'prefixlen': 64, 'flags': 192, 'scope': 253, 'index': 2, 'attrs': [['IFA_ADDRESS', 'fe80::f816:3eff:fe9e:4357'], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 671357, 'tstamp': 671357}], ['IFA_FLAGS', 192]], 'header': {'length': 72, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 452790, 'error': None, 'target': 'ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'},)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.946 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[499d264b-2f07-45a7-abd0-d4f45ad48a24]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapb4d521f7-71'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:9e:43:57'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 26], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 671357, 'reachable_time': 18296, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 1, 'outoctets': 76, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 1, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 76, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 1, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 0, 'sequence_number': 255, 'pid': 452794, 'error': None, 'target': 'ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:25 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:25.994 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[a887511d-7aa7-43d3-b182-5e777af58290]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:26 compute-0 systemd[1]: Starting libvirt proxy daemon...
Oct 11 02:39:26 compute-0 systemd[1]: Started libvirt proxy daemon.
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.085 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[3f271f21-fb99-4383-a5ad-242ab956d3da]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.086 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapb4d521f7-70, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.087 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.087 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapb4d521f7-70, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:26 compute-0 kernel: tapb4d521f7-70: entered promiscuous mode
Oct 11 02:39:26 compute-0 nova_compute[356901]: 2025-10-11 02:39:26.089 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:26 compute-0 NetworkManager[44908]: <info>  [1760150366.0911] manager: (tapb4d521f7-70): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/47)
Oct 11 02:39:26 compute-0 nova_compute[356901]: 2025-10-11 02:39:26.093 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.096 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tapb4d521f7-70, col_values=(('external_ids', {'iface-id': 'aa37c6ed-d2db-4ed4-b1c9-cfd071cfd96a'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:26 compute-0 nova_compute[356901]: 2025-10-11 02:39:26.099 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:26 compute-0 ovn_controller[88370]: 2025-10-11T02:39:26Z|00092|binding|INFO|Releasing lport aa37c6ed-d2db-4ed4-b1c9-cfd071cfd96a from this chassis (sb_readonly=0)
Oct 11 02:39:26 compute-0 nova_compute[356901]: 2025-10-11 02:39:26.121 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.122 286362 DEBUG neutron.agent.linux.utils [-] Unable to access /var/lib/neutron/external/pids/b4d521f7-7729-40fd-aa58-7126044eb166.pid.haproxy; Error: [Errno 2] No such file or directory: '/var/lib/neutron/external/pids/b4d521f7-7729-40fd-aa58-7126044eb166.pid.haproxy' get_value_from_file /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:252
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.125 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[3f5566b6-2c46-48e0-81ff-af9815ecdfcf]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.126 286362 DEBUG neutron.agent.ovn.metadata.driver [-] haproxy_cfg = 
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: global
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     log         /dev/log local0 debug
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     log-tag     haproxy-metadata-proxy-b4d521f7-7729-40fd-aa58-7126044eb166
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     user        root
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     group       root
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     maxconn     1024
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     pidfile     /var/lib/neutron/external/pids/b4d521f7-7729-40fd-aa58-7126044eb166.pid.haproxy
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     daemon
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: defaults
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     log global
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     mode http
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     option httplog
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     option dontlognull
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     option http-server-close
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     option forwardfor
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     retries                 3
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     timeout http-request    30s
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     timeout connect         30s
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     timeout client          32s
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     timeout server          32s
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     timeout http-keep-alive 30s
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: listen listener
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     bind 169.254.169.254:80
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     server metadata /var/lib/neutron/metadata_proxy
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:     http-request add-header X-OVN-Network-ID b4d521f7-7729-40fd-aa58-7126044eb166
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]:  create_config_file /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/driver.py:107
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.127 286362 DEBUG neutron.agent.linux.utils [-] Running command: ['sudo', 'neutron-rootwrap', '/etc/neutron/rootwrap.conf', 'ip', 'netns', 'exec', 'ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166', 'env', 'PROCESS_TAG=haproxy-b4d521f7-7729-40fd-aa58-7126044eb166', 'haproxy', '-f', '/var/lib/neutron/ovn-metadata-proxy/b4d521f7-7729-40fd-aa58-7126044eb166.conf'] create_process /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:84
Oct 11 02:39:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:39:26 compute-0 nova_compute[356901]: 2025-10-11 02:39:26.256 2 DEBUG nova.compute.manager [req-ff39a1d0-3db8-45c4-a402-0b9ae3830881 req-f451f81b-2315-4f9c-b470-f9e2107469c0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Received event network-vif-plugged-d7c4233c-f79b-4f32-b896-c36d4abb7d26 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:26 compute-0 nova_compute[356901]: 2025-10-11 02:39:26.257 2 DEBUG oslo_concurrency.lockutils [req-ff39a1d0-3db8-45c4-a402-0b9ae3830881 req-f451f81b-2315-4f9c-b470-f9e2107469c0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:26 compute-0 nova_compute[356901]: 2025-10-11 02:39:26.257 2 DEBUG oslo_concurrency.lockutils [req-ff39a1d0-3db8-45c4-a402-0b9ae3830881 req-f451f81b-2315-4f9c-b470-f9e2107469c0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:26 compute-0 nova_compute[356901]: 2025-10-11 02:39:26.257 2 DEBUG oslo_concurrency.lockutils [req-ff39a1d0-3db8-45c4-a402-0b9ae3830881 req-f451f81b-2315-4f9c-b470-f9e2107469c0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:26 compute-0 nova_compute[356901]: 2025-10-11 02:39:26.257 2 DEBUG nova.compute.manager [req-ff39a1d0-3db8-45c4-a402-0b9ae3830881 req-f451f81b-2315-4f9c-b470-f9e2107469c0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Processing event network-vif-plugged-d7c4233c-f79b-4f32-b896-c36d4abb7d26 _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10808
Oct 11 02:39:26 compute-0 nova_compute[356901]: 2025-10-11 02:39:26.531 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:26 compute-0 podman[452983]: 2025-10-11 02:39:26.580487938 +0000 UTC m=+0.071221651 container create c0d88bfd9df1a49fd0c922f853153438c0fe1f0209bd8d1173c10be3e9662a0a (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:39:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:39:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:39:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:39:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:39:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:39:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:39:26 compute-0 systemd[1]: Started libpod-conmon-c0d88bfd9df1a49fd0c922f853153438c0fe1f0209bd8d1173c10be3e9662a0a.scope.
Oct 11 02:39:26 compute-0 podman[452983]: 2025-10-11 02:39:26.544468168 +0000 UTC m=+0.035201901 image pull 1061e4fafe13e0b9aa1ef2c904ba4ad70c44f3e87b1d831f16c6db34937f4022 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Oct 11 02:39:26 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:39:26 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/105c2d3147e202160e1a750aa76d9e8280033a073759090e8a4c0f535ce66f87/merged/var/lib/neutron supports timestamps until 2038 (0x7fffffff)
Oct 11 02:39:26 compute-0 podman[452983]: 2025-10-11 02:39:26.694642429 +0000 UTC m=+0.185376172 container init c0d88bfd9df1a49fd0c922f853153438c0fe1f0209bd8d1173c10be3e9662a0a (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 02:39:26 compute-0 podman[452983]: 2025-10-11 02:39:26.703691546 +0000 UTC m=+0.194425259 container start c0d88bfd9df1a49fd0c922f853153438c0fe1f0209bd8d1173c10be3e9662a0a (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3)
Oct 11 02:39:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1852: 321 pgs: 321 active+clean; 304 MiB data, 390 MiB used, 60 GiB / 60 GiB avail; 70 KiB/s rd, 7.1 MiB/s wr, 111 op/s
Oct 11 02:39:26 compute-0 neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166[453022]: [NOTICE]   (453027) : New worker (453029) forked
Oct 11 02:39:26 compute-0 neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166[453022]: [NOTICE]   (453027) : Loading success.
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.793 286362 INFO neutron.agent.ovn.metadata.agent [-] Port 887c6cbc-2d8f-44c3-959f-4c732f5d4040 in datapath 3e4cd915-df9e-44c4-860d-c0ba25a21e79 unbound from our chassis
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.806 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network 3e4cd915-df9e-44c4-860d-c0ba25a21e79
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.821 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[5721ba84-866b-4a7b-8676-97271e585612]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.823 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Creating VETH tap3e4cd915-d1 in ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79 namespace provision_datapath /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:665
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.826 422955 DEBUG neutron.privileged.agent.linux.ip_lib [-] Interface tap3e4cd915-d0 not found in namespace None get_link_id /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:204
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.826 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[a5fdc1a3-b542-4f96-ae67-04045f1b8237]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.828 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[ea1f3dc3-3d29-446a-bbdc-d434b85f9c4f]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.854 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[5cee1638-be80-45cf-a233-7a24bb2b7230]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.887 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[9c0f8fa0-47dc-4ee1-8857-7a945a35d3c6]: (4, ('net.ipv4.conf.all.promote_secondaries = 1\n', '', 0)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.928 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[e65b80ab-45ba-40fe-85ab-61c8e263983e]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:26 compute-0 systemd-udevd[452764]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:39:26 compute-0 NetworkManager[44908]: <info>  [1760150366.9395] manager: (tap3e4cd915-d0): new Veth device (/org/freedesktop/NetworkManager/Devices/48)
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.937 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[9ad8ed46-a4e8-4886-b62b-4aa364d94ced]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.988 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[eca122f9-ef31-464d-8683-a4c80a18ef3e]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:26 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:26.992 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[72b5abbf-8517-48a7-a60b-7894440c2ad9]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:27 compute-0 NetworkManager[44908]: <info>  [1760150367.0192] device (tap3e4cd915-d0): carrier: link connected
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:27.027 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[a7cf1540-e050-44fe-bbe5-1ad173b7e17d]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:27.053 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[f1ca1f6a-65d1-4d0b-bd6c-88f71a3b1a0d]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tap3e4cd915-d1'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:69:16:dd'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 27], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 671474, 'reachable_time': 27184, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 1, 'outoctets': 76, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 1, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 76, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 1, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 453049, 'error': None, 'target': 'ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:27.073 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[9d45a2e0-93e9-48f1-84a9-f48cf4526c68]: (4, ({'family': 10, 'prefixlen': 64, 'flags': 192, 'scope': 253, 'index': 2, 'attrs': [['IFA_ADDRESS', 'fe80::f816:3eff:fe69:16dd'], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 671474, 'tstamp': 671474}], ['IFA_FLAGS', 192]], 'header': {'length': 72, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 453050, 'error': None, 'target': 'ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'},)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:27.098 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[6f616e95-664b-4efb-b485-caa5689e4bf1]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tap3e4cd915-d1'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:69:16:dd'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 27], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 671474, 'reachable_time': 27184, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 1, 'outoctets': 76, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 1, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 76, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 1, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 0, 'sequence_number': 255, 'pid': 453051, 'error': None, 'target': 'ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.105 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150367.1032596, 49d4f343-d1b4-4594-96d2-0777a5ce8581 => Started> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.105 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] VM Started (Lifecycle Event)
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.107 2 DEBUG nova.compute.manager [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Instance event wait completed in 0 seconds for network-vif-plugged wait_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:577
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.116 2 DEBUG nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Guest created on hypervisor spawn /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4417
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.125 2 INFO nova.virt.libvirt.driver [-] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Instance spawned successfully.
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.126 2 DEBUG nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Attempting to register defaults for the following image properties: ['hw_cdrom_bus', 'hw_disk_bus', 'hw_input_bus', 'hw_pointer_model', 'hw_video_model', 'hw_vif_model'] _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:917
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:27.150 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[17e15ff2-11e7-49c3-94e6-82aa7eae430f]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.212 2 DEBUG nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Found default for hw_cdrom_bus of sata _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.212 2 DEBUG nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Found default for hw_disk_bus of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.213 2 DEBUG nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Found default for hw_input_bus of usb _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.213 2 DEBUG nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Found default for hw_pointer_model of usbtablet _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.213 2 DEBUG nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Found default for hw_video_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.214 2 DEBUG nova.virt.libvirt.driver [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Found default for hw_vif_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:27.219 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[e92c9bde-635a-4415-b41e-1dcc10e28e39]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:27.221 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tap3e4cd915-d0, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:27.222 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:27.223 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tap3e4cd915-d0, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.225 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:27 compute-0 kernel: tap3e4cd915-d0: entered promiscuous mode
Oct 11 02:39:27 compute-0 NetworkManager[44908]: <info>  [1760150367.2264] manager: (tap3e4cd915-d0): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/49)
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.227 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:27.228 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tap3e4cd915-d0, col_values=(('external_ids', {'iface-id': '7637b6d6-4d43-4536-be5a-8890e7876f1a'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.229 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:27 compute-0 ovn_controller[88370]: 2025-10-11T02:39:27Z|00093|binding|INFO|Releasing lport 7637b6d6-4d43-4536-be5a-8890e7876f1a from this chassis (sb_readonly=0)
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.244 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:27.244 286362 DEBUG neutron.agent.linux.utils [-] Unable to access /var/lib/neutron/external/pids/3e4cd915-df9e-44c4-860d-c0ba25a21e79.pid.haproxy; Error: [Errno 2] No such file or directory: '/var/lib/neutron/external/pids/3e4cd915-df9e-44c4-860d-c0ba25a21e79.pid.haproxy' get_value_from_file /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:252
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:27.245 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[c522e1d4-1ee9-490e-9cd9-bc313f64d9aa]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:27.246 286362 DEBUG neutron.agent.ovn.metadata.driver [-] haproxy_cfg = 
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: global
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     log         /dev/log local0 debug
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     log-tag     haproxy-metadata-proxy-3e4cd915-df9e-44c4-860d-c0ba25a21e79
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     user        root
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     group       root
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     maxconn     1024
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     pidfile     /var/lib/neutron/external/pids/3e4cd915-df9e-44c4-860d-c0ba25a21e79.pid.haproxy
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     daemon
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: defaults
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     log global
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     mode http
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     option httplog
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     option dontlognull
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     option http-server-close
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     option forwardfor
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     retries                 3
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     timeout http-request    30s
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     timeout connect         30s
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     timeout client          32s
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     timeout server          32s
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     timeout http-keep-alive 30s
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: listen listener
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     bind 169.254.169.254:80
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     server metadata /var/lib/neutron/metadata_proxy
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:     http-request add-header X-OVN-Network-ID 3e4cd915-df9e-44c4-860d-c0ba25a21e79
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]:  create_config_file /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/driver.py:107
Oct 11 02:39:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:27.247 286362 DEBUG neutron.agent.linux.utils [-] Running command: ['sudo', 'neutron-rootwrap', '/etc/neutron/rootwrap.conf', 'ip', 'netns', 'exec', 'ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79', 'env', 'PROCESS_TAG=haproxy-3e4cd915-df9e-44c4-860d-c0ba25a21e79', 'haproxy', '-f', '/var/lib/neutron/ovn-metadata-proxy/3e4cd915-df9e-44c4-860d-c0ba25a21e79.conf'] create_process /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:84
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.315 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.330 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150367.1038082, 49d4f343-d1b4-4594-96d2-0777a5ce8581 => Paused> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.331 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] VM Paused (Lifecycle Event)
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.370 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.377 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Synchronizing instance power state after lifecycle event "Paused"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 3 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.404 2 INFO nova.compute.manager [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Took 26.63 seconds to spawn the instance on the hypervisor.
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.405 2 DEBUG nova.compute.manager [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.430 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.430 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150367.103972, f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 => Started> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.430 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] VM Started (Lifecycle Event)
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.459 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.464 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Synchronizing instance power state after lifecycle event "Started"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.471 2 INFO nova.compute.manager [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Took 27.77 seconds to build instance.
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.481 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150367.1049876, f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 => Paused> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.481 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] VM Paused (Lifecycle Event)
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.488 2 DEBUG oslo_concurrency.lockutils [None req-1fcf5eb8-a90d-479b-bde4-b67c4303b6b2 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" "released" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: held 27.884s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.494 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.499 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150367.1117444, f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 => Resumed> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.499 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] VM Resumed (Lifecycle Event)
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.564 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.571 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Synchronizing instance power state after lifecycle event "Resumed"; current vm_state: active, current task_state: None, current DB power_state: 1, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.587 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150367.1545403, 830c7581-3555-41db-9818-0961fc151818 => Started> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.587 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 830c7581-3555-41db-9818-0961fc151818] VM Started (Lifecycle Event)
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.603 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 830c7581-3555-41db-9818-0961fc151818] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.610 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150367.1547394, 830c7581-3555-41db-9818-0961fc151818 => Paused> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.610 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 830c7581-3555-41db-9818-0961fc151818] VM Paused (Lifecycle Event)
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.624 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 830c7581-3555-41db-9818-0961fc151818] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.630 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 830c7581-3555-41db-9818-0961fc151818] Synchronizing instance power state after lifecycle event "Paused"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 3 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.654 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 830c7581-3555-41db-9818-0961fc151818] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.654 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150367.3845794, ee9601c7-f562-449e-9f5c-5e1355f3c130 => Started> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.654 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] VM Started (Lifecycle Event)
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.669 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.675 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150367.3846664, ee9601c7-f562-449e-9f5c-5e1355f3c130 => Paused> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.675 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] VM Paused (Lifecycle Event)
Oct 11 02:39:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:39:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/499916320' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.694 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:39:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/499916320' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.699 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Synchronizing instance power state after lifecycle event "Paused"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 3 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:39:27 compute-0 nova_compute[356901]: 2025-10-11 02:39:27.718 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:39:27 compute-0 podman[453082]: 2025-10-11 02:39:27.781123121 +0000 UTC m=+0.093496353 container create 5fc42075de49ca1453a62e5b97c96c1499a69701f03b6cbf9a2e4cf956b34a5c (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.vendor=CentOS)
Oct 11 02:39:27 compute-0 ceph-mon[191930]: pgmap v1852: 321 pgs: 321 active+clean; 304 MiB data, 390 MiB used, 60 GiB / 60 GiB avail; 70 KiB/s rd, 7.1 MiB/s wr, 111 op/s
Oct 11 02:39:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/499916320' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:39:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/499916320' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:39:27 compute-0 podman[453082]: 2025-10-11 02:39:27.748375386 +0000 UTC m=+0.060748618 image pull 1061e4fafe13e0b9aa1ef2c904ba4ad70c44f3e87b1d831f16c6db34937f4022 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Oct 11 02:39:27 compute-0 systemd[1]: Started libpod-conmon-5fc42075de49ca1453a62e5b97c96c1499a69701f03b6cbf9a2e4cf956b34a5c.scope.
Oct 11 02:39:27 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:39:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/8fd524dc9ed703aec15207e956d562a2002171d78827e03b7deac45167c6950e/merged/var/lib/neutron supports timestamps until 2038 (0x7fffffff)
Oct 11 02:39:27 compute-0 podman[453082]: 2025-10-11 02:39:27.903458776 +0000 UTC m=+0.215832038 container init 5fc42075de49ca1453a62e5b97c96c1499a69701f03b6cbf9a2e4cf956b34a5c (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.build-date=20251009)
Oct 11 02:39:27 compute-0 podman[453082]: 2025-10-11 02:39:27.914039182 +0000 UTC m=+0.226412404 container start 5fc42075de49ca1453a62e5b97c96c1499a69701f03b6cbf9a2e4cf956b34a5c (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79, tcib_managed=true, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:39:27 compute-0 neutron-haproxy-ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79[453095]: [NOTICE]   (453099) : New worker (453101) forked
Oct 11 02:39:27 compute-0 neutron-haproxy-ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79[453095]: [NOTICE]   (453099) : Loading success.
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.008 286362 INFO neutron.agent.ovn.metadata.agent [-] Port 5cd25b0e-b4c9-408f-b456-59127a046cde in datapath b6521a4e-cfb9-4743-91c3-85402b5661d9 unbound from our chassis
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.015 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network b6521a4e-cfb9-4743-91c3-85402b5661d9
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.034 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[5b3b2218-670b-474e-b87f-a28811e85a71]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.035 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Creating VETH tapb6521a4e-c1 in ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9 namespace provision_datapath /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:665
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.038 422955 DEBUG neutron.privileged.agent.linux.ip_lib [-] Interface tapb6521a4e-c0 not found in namespace None get_link_id /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:204
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.038 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[c5a1064c-b440-43e6-bb01-ea93579cf93d]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.040 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[308bbc43-34be-494f-9c80-cb158e30d634]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.079 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[c9f732b8-413d-4f12-82bf-79d88ae49057]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.112 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[05863a3f-6aa1-4fa0-a9f3-1f357a9167ee]: (4, ('net.ipv4.conf.all.promote_secondaries = 1\n', '', 0)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.152 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[02e38b48-a506-440b-8595-067fe1655202]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:28 compute-0 NetworkManager[44908]: <info>  [1760150368.1628] manager: (tapb6521a4e-c0): new Veth device (/org/freedesktop/NetworkManager/Devices/50)
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.160 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[7f2232de-aaa5-4424-9776-bec9045cf4e5]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.209 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[23a78248-d19e-4d7b-8986-8ec2233f2e35]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.214 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[c95ccd5b-3cd8-41d4-a631-4ecc069c371d]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:28 compute-0 NetworkManager[44908]: <info>  [1760150368.2498] device (tapb6521a4e-c0): carrier: link connected
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.258 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[a3a91aca-35a7-41b6-b075-3f838f1da52d]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.277 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[17720700-c63a-41a8-8468-395d1cb56eb0]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapb6521a4e-c1'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:96:cf:2e'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 28], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 671597, 'reachable_time': 19972, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 1, 'outoctets': 76, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 1, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 76, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 1, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 453121, 'error': None, 'target': 'ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.296 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[f20fc982-f53e-4b81-ab3a-b86795a65c05]: (4, ({'family': 10, 'prefixlen': 64, 'flags': 192, 'scope': 253, 'index': 2, 'attrs': [['IFA_ADDRESS', 'fe80::f816:3eff:fe96:cf2e'], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 671597, 'tstamp': 671597}], ['IFA_FLAGS', 192]], 'header': {'length': 72, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 453122, 'error': None, 'target': 'ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'},)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.323 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[8e6aa5d4-fa98-4d4c-bb3b-d1c56005dbd2]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapb6521a4e-c1'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:96:cf:2e'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 28], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 671597, 'reachable_time': 19972, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 1, 'outoctets': 76, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 1, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 76, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 1, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 0, 'sequence_number': 255, 'pid': 453123, 'error': None, 'target': 'ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:28 compute-0 nova_compute[356901]: 2025-10-11 02:39:28.326 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.379 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[a1628f1f-adf5-4b23-839c-ba67906d19d1]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.469 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[ca2bc050-da09-4c97-bd52-171aa28b45cc]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.470 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapb6521a4e-c0, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.470 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.471 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapb6521a4e-c0, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:28 compute-0 kernel: tapb6521a4e-c0: entered promiscuous mode
Oct 11 02:39:28 compute-0 NetworkManager[44908]: <info>  [1760150368.4744] manager: (tapb6521a4e-c0): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/51)
Oct 11 02:39:28 compute-0 nova_compute[356901]: 2025-10-11 02:39:28.476 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.480 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tapb6521a4e-c0, col_values=(('external_ids', {'iface-id': '0cf0c3d7-097b-4c42-9345-bd18acebd4e7'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:28 compute-0 ovn_controller[88370]: 2025-10-11T02:39:28Z|00094|binding|INFO|Releasing lport 0cf0c3d7-097b-4c42-9345-bd18acebd4e7 from this chassis (sb_readonly=0)
Oct 11 02:39:28 compute-0 nova_compute[356901]: 2025-10-11 02:39:28.482 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:28 compute-0 nova_compute[356901]: 2025-10-11 02:39:28.500 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.501 286362 DEBUG neutron.agent.linux.utils [-] Unable to access /var/lib/neutron/external/pids/b6521a4e-cfb9-4743-91c3-85402b5661d9.pid.haproxy; Error: [Errno 2] No such file or directory: '/var/lib/neutron/external/pids/b6521a4e-cfb9-4743-91c3-85402b5661d9.pid.haproxy' get_value_from_file /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:252
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.502 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[d05ff43b-83fb-4841-816b-59198383afc7]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.504 286362 DEBUG neutron.agent.ovn.metadata.driver [-] haproxy_cfg = 
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: global
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     log         /dev/log local0 debug
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     log-tag     haproxy-metadata-proxy-b6521a4e-cfb9-4743-91c3-85402b5661d9
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     user        root
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     group       root
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     maxconn     1024
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     pidfile     /var/lib/neutron/external/pids/b6521a4e-cfb9-4743-91c3-85402b5661d9.pid.haproxy
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     daemon
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: defaults
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     log global
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     mode http
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     option httplog
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     option dontlognull
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     option http-server-close
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     option forwardfor
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     retries                 3
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     timeout http-request    30s
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     timeout connect         30s
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     timeout client          32s
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     timeout server          32s
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     timeout http-keep-alive 30s
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: listen listener
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     bind 169.254.169.254:80
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     server metadata /var/lib/neutron/metadata_proxy
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:     http-request add-header X-OVN-Network-ID b6521a4e-cfb9-4743-91c3-85402b5661d9
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]:  create_config_file /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/driver.py:107
Oct 11 02:39:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:28.504 286362 DEBUG neutron.agent.linux.utils [-] Running command: ['sudo', 'neutron-rootwrap', '/etc/neutron/rootwrap.conf', 'ip', 'netns', 'exec', 'ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9', 'env', 'PROCESS_TAG=haproxy-b6521a4e-cfb9-4743-91c3-85402b5661d9', 'haproxy', '-f', '/var/lib/neutron/ovn-metadata-proxy/b6521a4e-cfb9-4743-91c3-85402b5661d9.conf'] create_process /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:84
Oct 11 02:39:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1853: 321 pgs: 321 active+clean; 304 MiB data, 390 MiB used, 60 GiB / 60 GiB avail; 48 KiB/s rd, 5.2 MiB/s wr, 71 op/s
Oct 11 02:39:28 compute-0 ceph-mon[191930]: pgmap v1853: 321 pgs: 321 active+clean; 304 MiB data, 390 MiB used, 60 GiB / 60 GiB avail; 48 KiB/s rd, 5.2 MiB/s wr, 71 op/s
Oct 11 02:39:29 compute-0 podman[453156]: 2025-10-11 02:39:29.004378216 +0000 UTC m=+0.097080137 container create aad9edf094ebeefe0b5f0255f6ba206af2d8a33000a12f8444029b4d954981f6 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3)
Oct 11 02:39:29 compute-0 podman[453156]: 2025-10-11 02:39:28.953973709 +0000 UTC m=+0.046675690 image pull 1061e4fafe13e0b9aa1ef2c904ba4ad70c44f3e87b1d831f16c6db34937f4022 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Oct 11 02:39:29 compute-0 systemd[1]: Started libpod-conmon-aad9edf094ebeefe0b5f0255f6ba206af2d8a33000a12f8444029b4d954981f6.scope.
Oct 11 02:39:29 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:39:29 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3ec573959d20c719212b0062191f8fefd2ca6ede5557eec44a1a114ecd9e4f7b/merged/var/lib/neutron supports timestamps until 2038 (0x7fffffff)
Oct 11 02:39:29 compute-0 podman[453156]: 2025-10-11 02:39:29.142550554 +0000 UTC m=+0.235252495 container init aad9edf094ebeefe0b5f0255f6ba206af2d8a33000a12f8444029b4d954981f6 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:39:29 compute-0 podman[453156]: 2025-10-11 02:39:29.161975161 +0000 UTC m=+0.254677072 container start aad9edf094ebeefe0b5f0255f6ba206af2d8a33000a12f8444029b4d954981f6 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, org.label-schema.license=GPLv2)
Oct 11 02:39:29 compute-0 neutron-haproxy-ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9[453170]: [NOTICE]   (453174) : New worker (453176) forked
Oct 11 02:39:29 compute-0 neutron-haproxy-ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9[453170]: [NOTICE]   (453174) : Loading success.
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.260 286362 INFO neutron.agent.ovn.metadata.agent [-] Port 4076fda2-be62-4c52-b073-8bf26574dee1 in datapath eb08ca1c-c05f-4da5-9518-fb3b2d958ee2 unbound from our chassis
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.262 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network eb08ca1c-c05f-4da5-9518-fb3b2d958ee2
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.280 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[67eee990-1e85-4e12-9727-66c1bcb96cb1]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.281 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Creating VETH tapeb08ca1c-c1 in ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2 namespace provision_datapath /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:665
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.284 422955 DEBUG neutron.privileged.agent.linux.ip_lib [-] Interface tapeb08ca1c-c0 not found in namespace None get_link_id /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:204
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.284 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[99bc60ba-afbe-45fa-813f-4b9f7503d29a]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.285 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[2ae9b45c-c1f6-4d58-bee3-1e6e9e2e4b78]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.315 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[e8e282f9-546b-4fdb-85ab-895e61d0fadd]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.344 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[929e481f-d68d-4344-bc27-e57608c0953a]: (4, ('net.ipv4.conf.all.promote_secondaries = 1\n', '', 0)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.387 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[e7590021-1f8c-4a0d-a58b-6b78987c23ad]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.396 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[b2d33ffe-5d5b-442d-af16-c79b08ae1d82]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:29 compute-0 NetworkManager[44908]: <info>  [1760150369.3975] manager: (tapeb08ca1c-c0): new Veth device (/org/freedesktop/NetworkManager/Devices/52)
Oct 11 02:39:29 compute-0 systemd-udevd[453229]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.452 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[809b3b76-b1cc-422f-93f4-e1baa4ba59e8]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.456 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[fd219b4b-2007-4b0d-94df-79a8bc25352e]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:29 compute-0 podman[453188]: 2025-10-11 02:39:29.457956591 +0000 UTC m=+0.109208483 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_id=multipathd, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, container_name=multipathd, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:39:29 compute-0 podman[453190]: 2025-10-11 02:39:29.474132143 +0000 UTC m=+0.124347778 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, container_name=iscsid, managed_by=edpm_ansible, tcib_managed=true, config_id=iscsid, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:39:29 compute-0 NetworkManager[44908]: <info>  [1760150369.4907] device (tapeb08ca1c-c0): carrier: link connected
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.499 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[d31c169d-46c1-477e-8edb-96c98dbbb805]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.521 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[98279a9a-4ea5-4bf8-97ef-9d8cf532114b]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapeb08ca1c-c1'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:c5:53:9c'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 29], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 671721, 'reachable_time': 25392, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 1, 'outoctets': 76, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 1, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 76, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 1, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 453250, 'error': None, 'target': 'ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.544 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[0cd5021a-464a-4e16-842a-84bfcd2033a6]: (4, ({'family': 10, 'prefixlen': 64, 'flags': 192, 'scope': 253, 'index': 2, 'attrs': [['IFA_ADDRESS', 'fe80::f816:3eff:fec5:539c'], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 671721, 'tstamp': 671721}], ['IFA_FLAGS', 192]], 'header': {'length': 72, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 453251, 'error': None, 'target': 'ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'},)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.568 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[3a9e165c-dc8e-45f5-8114-d3ad94f060c0]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapeb08ca1c-c1'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:c5:53:9c'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 29], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 671721, 'reachable_time': 25392, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 1, 'outoctets': 76, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 1, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 76, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 1, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 0, 'sequence_number': 255, 'pid': 453252, 'error': None, 'target': 'ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.605 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[1a84d5d2-0723-4f08-9cb2-6458b9e0b9e2]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.679 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[1cf117e1-fbf8-433a-9e9f-e90edd6d6bc1]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.681 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapeb08ca1c-c0, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.681 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.681 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapeb08ca1c-c0, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.683 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:29 compute-0 NetworkManager[44908]: <info>  [1760150369.6850] manager: (tapeb08ca1c-c0): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/53)
Oct 11 02:39:29 compute-0 kernel: tapeb08ca1c-c0: entered promiscuous mode
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.688 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.689 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tapeb08ca1c-c0, col_values=(('external_ids', {'iface-id': '3233307f-6a7e-4ff6-b881-6d68b60996c3'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:29 compute-0 ovn_controller[88370]: 2025-10-11T02:39:29Z|00095|binding|INFO|Releasing lport 3233307f-6a7e-4ff6-b881-6d68b60996c3 from this chassis (sb_readonly=0)
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.690 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.705 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.706 286362 DEBUG neutron.agent.linux.utils [-] Unable to access /var/lib/neutron/external/pids/eb08ca1c-c05f-4da5-9518-fb3b2d958ee2.pid.haproxy; Error: [Errno 2] No such file or directory: '/var/lib/neutron/external/pids/eb08ca1c-c05f-4da5-9518-fb3b2d958ee2.pid.haproxy' get_value_from_file /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:252
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.707 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[1120b4c5-2dd4-44d3-b7cd-4d8a3b2e67dc]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.708 286362 DEBUG neutron.agent.ovn.metadata.driver [-] haproxy_cfg = 
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: global
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     log         /dev/log local0 debug
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     log-tag     haproxy-metadata-proxy-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     user        root
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     group       root
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     maxconn     1024
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     pidfile     /var/lib/neutron/external/pids/eb08ca1c-c05f-4da5-9518-fb3b2d958ee2.pid.haproxy
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     daemon
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: defaults
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     log global
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     mode http
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     option httplog
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     option dontlognull
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     option http-server-close
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     option forwardfor
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     retries                 3
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     timeout http-request    30s
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     timeout connect         30s
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     timeout client          32s
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     timeout server          32s
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     timeout http-keep-alive 30s
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: listen listener
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     bind 169.254.169.254:80
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     server metadata /var/lib/neutron/metadata_proxy
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:     http-request add-header X-OVN-Network-ID eb08ca1c-c05f-4da5-9518-fb3b2d958ee2
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]:  create_config_file /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/driver.py:107
Oct 11 02:39:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:29.709 286362 DEBUG neutron.agent.linux.utils [-] Running command: ['sudo', 'neutron-rootwrap', '/etc/neutron/rootwrap.conf', 'ip', 'netns', 'exec', 'ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2', 'env', 'PROCESS_TAG=haproxy-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2', 'haproxy', '-f', '/var/lib/neutron/ovn-metadata-proxy/eb08ca1c-c05f-4da5-9518-fb3b2d958ee2.conf'] create_process /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:84
Oct 11 02:39:29 compute-0 podman[157119]: time="2025-10-11T02:39:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:39:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:39:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 49967 "" "Go-http-client/1.1"
Oct 11 02:39:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:39:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 10447 "" "Go-http-client/1.1"
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.954 2 DEBUG nova.compute.manager [req-cd5f321e-ee0d-4cfb-a360-2153ff37b138 req-4a9bb4af-25d6-4be6-bb59-9bb41f9124f4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Received event network-vif-plugged-d7c4233c-f79b-4f32-b896-c36d4abb7d26 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.954 2 DEBUG oslo_concurrency.lockutils [req-cd5f321e-ee0d-4cfb-a360-2153ff37b138 req-4a9bb4af-25d6-4be6-bb59-9bb41f9124f4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.954 2 DEBUG oslo_concurrency.lockutils [req-cd5f321e-ee0d-4cfb-a360-2153ff37b138 req-4a9bb4af-25d6-4be6-bb59-9bb41f9124f4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.955 2 DEBUG oslo_concurrency.lockutils [req-cd5f321e-ee0d-4cfb-a360-2153ff37b138 req-4a9bb4af-25d6-4be6-bb59-9bb41f9124f4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.955 2 DEBUG nova.compute.manager [req-cd5f321e-ee0d-4cfb-a360-2153ff37b138 req-4a9bb4af-25d6-4be6-bb59-9bb41f9124f4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] No waiting events found dispatching network-vif-plugged-d7c4233c-f79b-4f32-b896-c36d4abb7d26 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.955 2 WARNING nova.compute.manager [req-cd5f321e-ee0d-4cfb-a360-2153ff37b138 req-4a9bb4af-25d6-4be6-bb59-9bb41f9124f4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Received unexpected event network-vif-plugged-d7c4233c-f79b-4f32-b896-c36d4abb7d26 for instance with vm_state active and task_state None.
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.955 2 DEBUG nova.compute.manager [req-cd5f321e-ee0d-4cfb-a360-2153ff37b138 req-4a9bb4af-25d6-4be6-bb59-9bb41f9124f4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Received event network-vif-plugged-4076fda2-be62-4c52-b073-8bf26574dee1 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.955 2 DEBUG oslo_concurrency.lockutils [req-cd5f321e-ee0d-4cfb-a360-2153ff37b138 req-4a9bb4af-25d6-4be6-bb59-9bb41f9124f4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "49d4f343-d1b4-4594-96d2-0777a5ce8581-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.956 2 DEBUG oslo_concurrency.lockutils [req-cd5f321e-ee0d-4cfb-a360-2153ff37b138 req-4a9bb4af-25d6-4be6-bb59-9bb41f9124f4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "49d4f343-d1b4-4594-96d2-0777a5ce8581-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.956 2 DEBUG oslo_concurrency.lockutils [req-cd5f321e-ee0d-4cfb-a360-2153ff37b138 req-4a9bb4af-25d6-4be6-bb59-9bb41f9124f4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "49d4f343-d1b4-4594-96d2-0777a5ce8581-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.956 2 DEBUG nova.compute.manager [req-cd5f321e-ee0d-4cfb-a360-2153ff37b138 req-4a9bb4af-25d6-4be6-bb59-9bb41f9124f4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Processing event network-vif-plugged-4076fda2-be62-4c52-b073-8bf26574dee1 _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10808
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.956 2 DEBUG nova.compute.manager [req-cd5f321e-ee0d-4cfb-a360-2153ff37b138 req-4a9bb4af-25d6-4be6-bb59-9bb41f9124f4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Received event network-vif-plugged-4076fda2-be62-4c52-b073-8bf26574dee1 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.956 2 DEBUG oslo_concurrency.lockutils [req-cd5f321e-ee0d-4cfb-a360-2153ff37b138 req-4a9bb4af-25d6-4be6-bb59-9bb41f9124f4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "49d4f343-d1b4-4594-96d2-0777a5ce8581-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.957 2 DEBUG oslo_concurrency.lockutils [req-cd5f321e-ee0d-4cfb-a360-2153ff37b138 req-4a9bb4af-25d6-4be6-bb59-9bb41f9124f4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "49d4f343-d1b4-4594-96d2-0777a5ce8581-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.957 2 DEBUG oslo_concurrency.lockutils [req-cd5f321e-ee0d-4cfb-a360-2153ff37b138 req-4a9bb4af-25d6-4be6-bb59-9bb41f9124f4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "49d4f343-d1b4-4594-96d2-0777a5ce8581-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.957 2 DEBUG nova.compute.manager [req-cd5f321e-ee0d-4cfb-a360-2153ff37b138 req-4a9bb4af-25d6-4be6-bb59-9bb41f9124f4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] No waiting events found dispatching network-vif-plugged-4076fda2-be62-4c52-b073-8bf26574dee1 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.957 2 WARNING nova.compute.manager [req-cd5f321e-ee0d-4cfb-a360-2153ff37b138 req-4a9bb4af-25d6-4be6-bb59-9bb41f9124f4 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Received unexpected event network-vif-plugged-4076fda2-be62-4c52-b073-8bf26574dee1 for instance with vm_state building and task_state spawning.
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.958 2 DEBUG nova.compute.manager [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Instance event wait completed in 2 seconds for network-vif-plugged wait_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:577
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.964 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150369.964021, 49d4f343-d1b4-4594-96d2-0777a5ce8581 => Resumed> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.965 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] VM Resumed (Lifecycle Event)
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.966 2 DEBUG nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Guest created on hypervisor spawn /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4417
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.971 2 INFO nova.virt.libvirt.driver [-] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Instance spawned successfully.
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.972 2 DEBUG nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Attempting to register defaults for the following image properties: ['hw_cdrom_bus', 'hw_disk_bus', 'hw_input_bus', 'hw_pointer_model', 'hw_video_model', 'hw_vif_model'] _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:917
Oct 11 02:39:29 compute-0 nova_compute[356901]: 2025-10-11 02:39:29.993 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:30 compute-0 nova_compute[356901]: 2025-10-11 02:39:30.000 2 DEBUG nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Found default for hw_cdrom_bus of sata _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:30 compute-0 nova_compute[356901]: 2025-10-11 02:39:30.001 2 DEBUG nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Found default for hw_disk_bus of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:30 compute-0 nova_compute[356901]: 2025-10-11 02:39:30.001 2 DEBUG nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Found default for hw_input_bus of usb _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:30 compute-0 nova_compute[356901]: 2025-10-11 02:39:30.002 2 DEBUG nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Found default for hw_pointer_model of usbtablet _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:30 compute-0 nova_compute[356901]: 2025-10-11 02:39:30.002 2 DEBUG nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Found default for hw_video_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:30 compute-0 nova_compute[356901]: 2025-10-11 02:39:30.003 2 DEBUG nova.virt.libvirt.driver [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Found default for hw_vif_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:30 compute-0 nova_compute[356901]: 2025-10-11 02:39:30.007 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:30 compute-0 nova_compute[356901]: 2025-10-11 02:39:30.011 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Synchronizing instance power state after lifecycle event "Resumed"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:39:30 compute-0 nova_compute[356901]: 2025-10-11 02:39:30.033 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:39:30 compute-0 nova_compute[356901]: 2025-10-11 02:39:30.063 2 INFO nova.compute.manager [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Took 25.38 seconds to spawn the instance on the hypervisor.
Oct 11 02:39:30 compute-0 nova_compute[356901]: 2025-10-11 02:39:30.064 2 DEBUG nova.compute.manager [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:30 compute-0 nova_compute[356901]: 2025-10-11 02:39:30.148 2 INFO nova.compute.manager [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Took 26.81 seconds to build instance.
Oct 11 02:39:30 compute-0 nova_compute[356901]: 2025-10-11 02:39:30.164 2 DEBUG oslo_concurrency.lockutils [None req-e253a9fe-dde3-4532-b663-285caa1ea9e3 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lock "49d4f343-d1b4-4594-96d2-0777a5ce8581" "released" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: held 26.959s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:30 compute-0 podman[453285]: 2025-10-11 02:39:30.206259462 +0000 UTC m=+0.084773875 container create 6bd3c24d46294c44bf37984f9dfeac701f8f2c9971615da65f534c8dd48e82bf (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, io.buildah.version=1.41.3)
Oct 11 02:39:30 compute-0 systemd[1]: Started libpod-conmon-6bd3c24d46294c44bf37984f9dfeac701f8f2c9971615da65f534c8dd48e82bf.scope.
Oct 11 02:39:30 compute-0 podman[453285]: 2025-10-11 02:39:30.164752278 +0000 UTC m=+0.043266711 image pull 1061e4fafe13e0b9aa1ef2c904ba4ad70c44f3e87b1d831f16c6db34937f4022 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Oct 11 02:39:30 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:39:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/7e71b66d8877d7f23d01703ea80d07508b83ac063c4ff5aa8370fc1cdabd3c75/merged/var/lib/neutron supports timestamps until 2038 (0x7fffffff)
Oct 11 02:39:30 compute-0 podman[453285]: 2025-10-11 02:39:30.317174208 +0000 UTC m=+0.195688651 container init 6bd3c24d46294c44bf37984f9dfeac701f8f2c9971615da65f534c8dd48e82bf (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3)
Oct 11 02:39:30 compute-0 podman[453285]: 2025-10-11 02:39:30.326943964 +0000 UTC m=+0.205458377 container start 6bd3c24d46294c44bf37984f9dfeac701f8f2c9971615da65f534c8dd48e82bf (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:39:30 compute-0 neutron-haproxy-ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2[453300]: [NOTICE]   (453304) : New worker (453306) forked
Oct 11 02:39:30 compute-0 neutron-haproxy-ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2[453300]: [NOTICE]   (453304) : Loading success.
Oct 11 02:39:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1854: 321 pgs: 321 active+clean; 304 MiB data, 390 MiB used, 60 GiB / 60 GiB avail; 175 KiB/s rd, 3.6 MiB/s wr, 87 op/s
Oct 11 02:39:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:39:31 compute-0 openstack_network_exporter[374316]: ERROR   02:39:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:39:31 compute-0 openstack_network_exporter[374316]: ERROR   02:39:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:39:31 compute-0 openstack_network_exporter[374316]: ERROR   02:39:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:39:31 compute-0 openstack_network_exporter[374316]: ERROR   02:39:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:39:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:39:31 compute-0 openstack_network_exporter[374316]: ERROR   02:39:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:39:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:39:31 compute-0 ceph-mon[191930]: pgmap v1854: 321 pgs: 321 active+clean; 304 MiB data, 390 MiB used, 60 GiB / 60 GiB avail; 175 KiB/s rd, 3.6 MiB/s wr, 87 op/s
Oct 11 02:39:32 compute-0 nova_compute[356901]: 2025-10-11 02:39:32.453 2 DEBUG nova.compute.manager [req-fe71dd51-0ead-432f-b264-e424c5010f7c req-3240c116-a095-4e0c-a019-1ee726ae2a52 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Received event network-changed-d7c4233c-f79b-4f32-b896-c36d4abb7d26 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:32 compute-0 nova_compute[356901]: 2025-10-11 02:39:32.453 2 DEBUG nova.compute.manager [req-fe71dd51-0ead-432f-b264-e424c5010f7c req-3240c116-a095-4e0c-a019-1ee726ae2a52 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Refreshing instance network info cache due to event network-changed-d7c4233c-f79b-4f32-b896-c36d4abb7d26. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:39:32 compute-0 nova_compute[356901]: 2025-10-11 02:39:32.454 2 DEBUG oslo_concurrency.lockutils [req-fe71dd51-0ead-432f-b264-e424c5010f7c req-3240c116-a095-4e0c-a019-1ee726ae2a52 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:39:32 compute-0 nova_compute[356901]: 2025-10-11 02:39:32.454 2 DEBUG oslo_concurrency.lockutils [req-fe71dd51-0ead-432f-b264-e424c5010f7c req-3240c116-a095-4e0c-a019-1ee726ae2a52 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:39:32 compute-0 nova_compute[356901]: 2025-10-11 02:39:32.454 2 DEBUG nova.network.neutron [req-fe71dd51-0ead-432f-b264-e424c5010f7c req-3240c116-a095-4e0c-a019-1ee726ae2a52 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Refreshing network info cache for port d7c4233c-f79b-4f32-b896-c36d4abb7d26 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:39:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1855: 321 pgs: 321 active+clean; 304 MiB data, 390 MiB used, 60 GiB / 60 GiB avail; 888 KiB/s rd, 1.9 MiB/s wr, 105 op/s
Oct 11 02:39:32 compute-0 ceph-mon[191930]: pgmap v1855: 321 pgs: 321 active+clean; 304 MiB data, 390 MiB used, 60 GiB / 60 GiB avail; 888 KiB/s rd, 1.9 MiB/s wr, 105 op/s
Oct 11 02:39:33 compute-0 nova_compute[356901]: 2025-10-11 02:39:33.328 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.645 2 DEBUG nova.compute.manager [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Received event network-vif-plugged-887c6cbc-2d8f-44c3-959f-4c732f5d4040 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.645 2 DEBUG oslo_concurrency.lockutils [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "ee9601c7-f562-449e-9f5c-5e1355f3c130-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.646 2 DEBUG oslo_concurrency.lockutils [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "ee9601c7-f562-449e-9f5c-5e1355f3c130-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.646 2 DEBUG oslo_concurrency.lockutils [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "ee9601c7-f562-449e-9f5c-5e1355f3c130-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.646 2 DEBUG nova.compute.manager [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Processing event network-vif-plugged-887c6cbc-2d8f-44c3-959f-4c732f5d4040 _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10808
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.647 2 DEBUG nova.compute.manager [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Received event network-vif-plugged-887c6cbc-2d8f-44c3-959f-4c732f5d4040 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.647 2 DEBUG oslo_concurrency.lockutils [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "ee9601c7-f562-449e-9f5c-5e1355f3c130-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.648 2 DEBUG oslo_concurrency.lockutils [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "ee9601c7-f562-449e-9f5c-5e1355f3c130-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.648 2 DEBUG oslo_concurrency.lockutils [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "ee9601c7-f562-449e-9f5c-5e1355f3c130-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.648 2 DEBUG nova.compute.manager [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] No waiting events found dispatching network-vif-plugged-887c6cbc-2d8f-44c3-959f-4c732f5d4040 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.649 2 WARNING nova.compute.manager [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Received unexpected event network-vif-plugged-887c6cbc-2d8f-44c3-959f-4c732f5d4040 for instance with vm_state building and task_state spawning.
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.649 2 DEBUG nova.compute.manager [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Received event network-vif-plugged-5cd25b0e-b4c9-408f-b456-59127a046cde external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.649 2 DEBUG oslo_concurrency.lockutils [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "830c7581-3555-41db-9818-0961fc151818-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.650 2 DEBUG oslo_concurrency.lockutils [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "830c7581-3555-41db-9818-0961fc151818-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.650 2 DEBUG oslo_concurrency.lockutils [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "830c7581-3555-41db-9818-0961fc151818-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.650 2 DEBUG nova.compute.manager [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Processing event network-vif-plugged-5cd25b0e-b4c9-408f-b456-59127a046cde _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10808
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.650 2 DEBUG nova.compute.manager [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Received event network-vif-plugged-5cd25b0e-b4c9-408f-b456-59127a046cde external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.651 2 DEBUG oslo_concurrency.lockutils [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "830c7581-3555-41db-9818-0961fc151818-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.651 2 DEBUG oslo_concurrency.lockutils [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "830c7581-3555-41db-9818-0961fc151818-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.651 2 DEBUG oslo_concurrency.lockutils [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "830c7581-3555-41db-9818-0961fc151818-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.652 2 DEBUG nova.compute.manager [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] No waiting events found dispatching network-vif-plugged-5cd25b0e-b4c9-408f-b456-59127a046cde pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.652 2 WARNING nova.compute.manager [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Received unexpected event network-vif-plugged-5cd25b0e-b4c9-408f-b456-59127a046cde for instance with vm_state building and task_state spawning.
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.652 2 DEBUG nova.compute.manager [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Received event network-changed-4076fda2-be62-4c52-b073-8bf26574dee1 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.652 2 DEBUG nova.compute.manager [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Refreshing instance network info cache due to event network-changed-4076fda2-be62-4c52-b073-8bf26574dee1. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.653 2 DEBUG oslo_concurrency.lockutils [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.653 2 DEBUG oslo_concurrency.lockutils [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.654 2 DEBUG nova.network.neutron [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Refreshing network info cache for port 4076fda2-be62-4c52-b073-8bf26574dee1 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.655 2 DEBUG nova.network.neutron [req-fe71dd51-0ead-432f-b264-e424c5010f7c req-3240c116-a095-4e0c-a019-1ee726ae2a52 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Updated VIF entry in instance network info cache for port d7c4233c-f79b-4f32-b896-c36d4abb7d26. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.656 2 DEBUG nova.network.neutron [req-fe71dd51-0ead-432f-b264-e424c5010f7c req-3240c116-a095-4e0c-a019-1ee726ae2a52 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Updating instance_info_cache with network_info: [{"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.187", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.657 2 DEBUG nova.compute.manager [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Instance event wait completed in 7 seconds for network-vif-plugged wait_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:577
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.657 2 DEBUG nova.compute.manager [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Instance event wait completed in 7 seconds for network-vif-plugged wait_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:577
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.679 2 DEBUG oslo_concurrency.lockutils [req-fe71dd51-0ead-432f-b264-e424c5010f7c req-3240c116-a095-4e0c-a019-1ee726ae2a52 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.680 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150374.6791024, ee9601c7-f562-449e-9f5c-5e1355f3c130 => Resumed> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.681 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] VM Resumed (Lifecycle Event)
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.685 2 DEBUG nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Guest created on hypervisor spawn /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4417
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.686 2 DEBUG nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Guest created on hypervisor spawn /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4417
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.693 2 INFO nova.virt.libvirt.driver [-] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Instance spawned successfully.
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.693 2 DEBUG nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Attempting to register defaults for the following image properties: ['hw_cdrom_bus', 'hw_disk_bus', 'hw_input_bus', 'hw_pointer_model', 'hw_video_model', 'hw_vif_model'] _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:917
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.699 2 INFO nova.virt.libvirt.driver [-] [instance: 830c7581-3555-41db-9818-0961fc151818] Instance spawned successfully.
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.699 2 DEBUG nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Attempting to register defaults for the following image properties: ['hw_cdrom_bus', 'hw_disk_bus', 'hw_input_bus', 'hw_pointer_model', 'hw_video_model', 'hw_vif_model'] _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:917
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.712 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.716 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Synchronizing instance power state after lifecycle event "Resumed"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:39:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1856: 321 pgs: 321 active+clean; 304 MiB data, 390 MiB used, 60 GiB / 60 GiB avail; 2.9 MiB/s rd, 58 KiB/s wr, 143 op/s
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.737 2 DEBUG nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Found default for hw_cdrom_bus of sata _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.738 2 DEBUG nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Found default for hw_disk_bus of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.738 2 DEBUG nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Found default for hw_input_bus of usb _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.739 2 DEBUG nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Found default for hw_pointer_model of usbtablet _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.739 2 DEBUG nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Found default for hw_video_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.740 2 DEBUG nova.virt.libvirt.driver [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Found default for hw_vif_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.744 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.744 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150374.6792884, 830c7581-3555-41db-9818-0961fc151818 => Resumed> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.745 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 830c7581-3555-41db-9818-0961fc151818] VM Resumed (Lifecycle Event)
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.748 2 DEBUG nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Found default for hw_cdrom_bus of sata _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.749 2 DEBUG nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Found default for hw_disk_bus of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.749 2 DEBUG nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Found default for hw_input_bus of usb _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.750 2 DEBUG nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Found default for hw_pointer_model of usbtablet _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.750 2 DEBUG nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Found default for hw_video_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.751 2 DEBUG nova.virt.libvirt.driver [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Found default for hw_vif_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.788 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 830c7581-3555-41db-9818-0961fc151818] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.794 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 830c7581-3555-41db-9818-0961fc151818] Synchronizing instance power state after lifecycle event "Resumed"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.938 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 830c7581-3555-41db-9818-0961fc151818] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.953 2 INFO nova.compute.manager [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Took 31.57 seconds to spawn the instance on the hypervisor.
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.953 2 DEBUG nova.compute.manager [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.960 2 INFO nova.compute.manager [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Took 33.38 seconds to spawn the instance on the hypervisor.
Oct 11 02:39:34 compute-0 nova_compute[356901]: 2025-10-11 02:39:34.961 2 DEBUG nova.compute.manager [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:35 compute-0 nova_compute[356901]: 2025-10-11 02:39:35.010 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:35 compute-0 nova_compute[356901]: 2025-10-11 02:39:35.046 2 INFO nova.compute.manager [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Took 33.98 seconds to build instance.
Oct 11 02:39:35 compute-0 nova_compute[356901]: 2025-10-11 02:39:35.049 2 INFO nova.compute.manager [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Took 35.30 seconds to build instance.
Oct 11 02:39:35 compute-0 nova_compute[356901]: 2025-10-11 02:39:35.060 2 DEBUG oslo_concurrency.lockutils [None req-aa805dfd-d11e-4643-aec5-38f381ea4e46 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Lock "ee9601c7-f562-449e-9f5c-5e1355f3c130" "released" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: held 34.048s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:35 compute-0 nova_compute[356901]: 2025-10-11 02:39:35.077 2 DEBUG oslo_concurrency.lockutils [None req-8dd6ab92-c7c7-4560-868e-abef0d502bf9 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Lock "830c7581-3555-41db-9818-0961fc151818" "released" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: held 35.420s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:35 compute-0 ceph-mon[191930]: pgmap v1856: 321 pgs: 321 active+clean; 304 MiB data, 390 MiB used, 60 GiB / 60 GiB avail; 2.9 MiB/s rd, 58 KiB/s wr, 143 op/s
Oct 11 02:39:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:39:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1857: 321 pgs: 321 active+clean; 304 MiB data, 390 MiB used, 60 GiB / 60 GiB avail; 3.6 MiB/s rd, 58 KiB/s wr, 164 op/s
Oct 11 02:39:36 compute-0 ceph-mon[191930]: pgmap v1857: 321 pgs: 321 active+clean; 304 MiB data, 390 MiB used, 60 GiB / 60 GiB avail; 3.6 MiB/s rd, 58 KiB/s wr, 164 op/s
Oct 11 02:39:37 compute-0 nova_compute[356901]: 2025-10-11 02:39:37.480 2 DEBUG nova.network.neutron [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Updated VIF entry in instance network info cache for port 4076fda2-be62-4c52-b073-8bf26574dee1. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:39:37 compute-0 nova_compute[356901]: 2025-10-11 02:39:37.481 2 DEBUG nova.network.neutron [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Updating instance_info_cache with network_info: [{"id": "4076fda2-be62-4c52-b073-8bf26574dee1", "address": "fa:16:3e:c5:9b:82", "network": {"id": "eb08ca1c-c05f-4da5-9518-fb3b2d958ee2", "bridge": "br-int", "label": "tempest-AttachInterfacesUnderV243Test-139612684-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d89911bf2931487c98dc0f44a8b67bca", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap4076fda2-be", "ovs_interfaceid": "4076fda2-be62-4c52-b073-8bf26574dee1", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:39:37 compute-0 nova_compute[356901]: 2025-10-11 02:39:37.502 2 DEBUG oslo_concurrency.lockutils [req-2037e14a-d207-4c87-bf0f-b87f4898a740 req-1af8351b-7222-455b-a1d3-cea54b2627a3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:39:38 compute-0 nova_compute[356901]: 2025-10-11 02:39:38.330 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:38 compute-0 nova_compute[356901]: 2025-10-11 02:39:38.606 2 DEBUG nova.compute.manager [req-a963dca6-332d-4240-8b15-f09b5f73cd34 req-30e1c5f3-0a62-4a5c-8d25-6a1e4287b4c5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Received event network-changed-887c6cbc-2d8f-44c3-959f-4c732f5d4040 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:38 compute-0 nova_compute[356901]: 2025-10-11 02:39:38.607 2 DEBUG nova.compute.manager [req-a963dca6-332d-4240-8b15-f09b5f73cd34 req-30e1c5f3-0a62-4a5c-8d25-6a1e4287b4c5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Refreshing instance network info cache due to event network-changed-887c6cbc-2d8f-44c3-959f-4c732f5d4040. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:39:38 compute-0 nova_compute[356901]: 2025-10-11 02:39:38.607 2 DEBUG oslo_concurrency.lockutils [req-a963dca6-332d-4240-8b15-f09b5f73cd34 req-30e1c5f3-0a62-4a5c-8d25-6a1e4287b4c5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-ee9601c7-f562-449e-9f5c-5e1355f3c130" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:39:38 compute-0 nova_compute[356901]: 2025-10-11 02:39:38.607 2 DEBUG oslo_concurrency.lockutils [req-a963dca6-332d-4240-8b15-f09b5f73cd34 req-30e1c5f3-0a62-4a5c-8d25-6a1e4287b4c5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-ee9601c7-f562-449e-9f5c-5e1355f3c130" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:39:38 compute-0 nova_compute[356901]: 2025-10-11 02:39:38.607 2 DEBUG nova.network.neutron [req-a963dca6-332d-4240-8b15-f09b5f73cd34 req-30e1c5f3-0a62-4a5c-8d25-6a1e4287b4c5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Refreshing network info cache for port 887c6cbc-2d8f-44c3-959f-4c732f5d4040 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:39:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1858: 321 pgs: 321 active+clean; 304 MiB data, 390 MiB used, 60 GiB / 60 GiB avail; 4.7 MiB/s rd, 16 KiB/s wr, 193 op/s
Oct 11 02:39:39 compute-0 ceph-mon[191930]: pgmap v1858: 321 pgs: 321 active+clean; 304 MiB data, 390 MiB used, 60 GiB / 60 GiB avail; 4.7 MiB/s rd, 16 KiB/s wr, 193 op/s
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.013 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.229 2 DEBUG nova.network.neutron [req-a963dca6-332d-4240-8b15-f09b5f73cd34 req-30e1c5f3-0a62-4a5c-8d25-6a1e4287b4c5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Updated VIF entry in instance network info cache for port 887c6cbc-2d8f-44c3-959f-4c732f5d4040. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.230 2 DEBUG nova.network.neutron [req-a963dca6-332d-4240-8b15-f09b5f73cd34 req-30e1c5f3-0a62-4a5c-8d25-6a1e4287b4c5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Updating instance_info_cache with network_info: [{"id": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "address": "fa:16:3e:3f:e1:d3", "network": {"id": "3e4cd915-df9e-44c4-860d-c0ba25a21e79", "bridge": "br-int", "label": "tempest-ServersTestManualDisk-1152012084-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.9", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.207", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "5d5e8b42281d410bb45cb6c2e8e3fcbd", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap887c6cbc-2d", "ovs_interfaceid": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.250 2 DEBUG oslo_concurrency.lockutils [req-a963dca6-332d-4240-8b15-f09b5f73cd34 req-30e1c5f3-0a62-4a5c-8d25-6a1e4287b4c5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-ee9601c7-f562-449e-9f5c-5e1355f3c130" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.289 2 DEBUG oslo_concurrency.lockutils [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Acquiring lock "ee9601c7-f562-449e-9f5c-5e1355f3c130" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.290 2 DEBUG oslo_concurrency.lockutils [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Lock "ee9601c7-f562-449e-9f5c-5e1355f3c130" acquired by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.291 2 DEBUG oslo_concurrency.lockutils [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Acquiring lock "ee9601c7-f562-449e-9f5c-5e1355f3c130-events" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.291 2 DEBUG oslo_concurrency.lockutils [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Lock "ee9601c7-f562-449e-9f5c-5e1355f3c130-events" acquired by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.292 2 DEBUG oslo_concurrency.lockutils [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Lock "ee9601c7-f562-449e-9f5c-5e1355f3c130-events" "released" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.293 2 INFO nova.compute.manager [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Terminating instance
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.295 2 DEBUG nova.compute.manager [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Start destroying the instance on the hypervisor. _shutdown_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:3120
Oct 11 02:39:40 compute-0 kernel: tap887c6cbc-2d (unregistering): left promiscuous mode
Oct 11 02:39:40 compute-0 NetworkManager[44908]: <info>  [1760150380.3599] device (tap887c6cbc-2d): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')
Oct 11 02:39:40 compute-0 ovn_controller[88370]: 2025-10-11T02:39:40Z|00096|binding|INFO|Releasing lport 887c6cbc-2d8f-44c3-959f-4c732f5d4040 from this chassis (sb_readonly=0)
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.371 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:40 compute-0 ovn_controller[88370]: 2025-10-11T02:39:40Z|00097|binding|INFO|Setting lport 887c6cbc-2d8f-44c3-959f-4c732f5d4040 down in Southbound
Oct 11 02:39:40 compute-0 ovn_controller[88370]: 2025-10-11T02:39:40Z|00098|binding|INFO|Removing iface tap887c6cbc-2d ovn-installed in OVS
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.387 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.410 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:40 compute-0 systemd[1]: machine-qemu\x2d7\x2dinstance\x2d00000008.scope: Deactivated successfully.
Oct 11 02:39:40 compute-0 systemd[1]: machine-qemu\x2d7\x2dinstance\x2d00000008.scope: Consumed 7.266s CPU time.
Oct 11 02:39:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:40.444 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:3f:e1:d3 10.100.0.9'], port_security=['fa:16:3e:3f:e1:d3 10.100.0.9'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.0.9/28', 'neutron:device_id': 'ee9601c7-f562-449e-9f5c-5e1355f3c130', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-3e4cd915-df9e-44c4-860d-c0ba25a21e79', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': '5d5e8b42281d410bb45cb6c2e8e3fcbd', 'neutron:revision_number': '4', 'neutron:security_group_ids': '5f6ab5ce-82e9-464a-b706-7e411b991d5a', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:host_id': 'compute-0.ctlplane.example.com', 'neutron:port_fip': '192.168.122.207'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=e1fd6e13-0520-45f1-aea0-ba215ead7c6e, chassis=[], tunnel_key=3, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=887c6cbc-2d8f-44c3-959f-4c732f5d4040) old=Port_Binding(up=[True], chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:39:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:40.446 286362 INFO neutron.agent.ovn.metadata.agent [-] Port 887c6cbc-2d8f-44c3-959f-4c732f5d4040 in datapath 3e4cd915-df9e-44c4-860d-c0ba25a21e79 unbound from our chassis
Oct 11 02:39:40 compute-0 systemd-machined[137586]: Machine qemu-7-instance-00000008 terminated.
Oct 11 02:39:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:40.448 286362 DEBUG neutron.agent.ovn.metadata.agent [-] No valid VIF ports were found for network 3e4cd915-df9e-44c4-860d-c0ba25a21e79, tearing the namespace down if needed _get_provision_params /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:628
Oct 11 02:39:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:40.451 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[058d95d9-5250-4ec8-846a-7f8581442b2a]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:40.452 286362 INFO neutron.agent.ovn.metadata.agent [-] Cleaning up ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79 namespace which is not needed anymore
Oct 11 02:39:40 compute-0 podman[453318]: 2025-10-11 02:39:40.5205546 +0000 UTC m=+0.121292529 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.tags=minimal rhel9, build-date=2025-08-20T13:12:41, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-type=git, version=9.6, container_name=openstack_network_exporter, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.buildah.version=1.33.7, name=ubi9-minimal, vendor=Red Hat, Inc., maintainer=Red Hat, Inc., io.openshift.expose-services=, com.redhat.component=ubi9-minimal-container, distribution-scope=public, url=https://catalog.redhat.com/en/search?searchType=containers, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., architecture=x86_64, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, release=1755695350)
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.544 2 INFO nova.virt.libvirt.driver [-] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Instance destroyed successfully.
Oct 11 02:39:40 compute-0 podman[453319]: 2025-10-11 02:39:40.545979164 +0000 UTC m=+0.146462416 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:39:40 compute-0 podman[453315]: 2025-10-11 02:39:40.547340459 +0000 UTC m=+0.141826164 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, managed_by=edpm_ansible)
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.546 2 DEBUG nova.objects.instance [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Lazy-loading 'resources' on Instance uuid ee9601c7-f562-449e-9f5c-5e1355f3c130 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.592 2 DEBUG nova.virt.libvirt.vif [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] vif_type=ovs instance=Instance(access_ip_v4=1.1.1.1,access_ip_v6=::babe:dc0c:1602,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='True',created_at=2025-10-11T02:39:00Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=<?>,disable_terminate=False,display_description='tempest-ServersTestManualDisk-server-1400384832',display_name='tempest-ServersTestManualDisk-server-1400384832',ec2_ids=<?>,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-serverstestmanualdisk-server-1400384832',id=8,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBBL6x/f8Zf/LDDCofajEq020TpmyjW9NO7vA7SAYbWJhZilNkemXvyab+jylvxZHOw0v7ime7uJ1WDC6srAuyI4NaiRkhZgxf6/8nUXEMtEGfOh0ic3nB3uEET9l6hAh+A==',key_name='tempest-keypair-919071895',keypairs=<?>,launch_index=0,launched_at=2025-10-11T02:39:34Z,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={hello='world'},migration_context=<?>,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=<?>,power_state=1,progress=0,project_id='5d5e8b42281d410bb45cb6c2e8e3fcbd',ramdisk_id='',reservation_id='r-8wgd9gbq',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_cdrom_bus='sata',image_hw_disk_bus='virtio',image_hw_input_bus='usb',image_hw_machine_type='q35',image_hw_pointer_model='usbtablet',image_hw_rng_model='virtio',image_hw_video_model='virtio',image_hw_vif_model='virtio',image_min_disk='1',image_min_ram='0',owner_project_name='tempest-ServersTestManualDisk-1790007234',owner_user_name='tempest-ServersTestManualDisk-1790007234-project-member'},tags=<?>,task_state='deleting',terminated_at=None,trusted_certs=<?>,updated_at=2025-10-11T02:39:35Z,user_data='IyEvYmluL3NoCmVjaG8gIlByaW50aW5nIGNpcnJvcyB1c2VyIGF1dGhvcml6ZWQga2V5cyIKY2F0IH5jaXJyb3MvLnNzaC9hdXRob3JpemVkX2tleXMgfHwgdHJ1ZQo=',user_id='5539243c06f64f0694000d9748ff55dd',uuid=ee9601c7-f562-449e-9f5c-5e1355f3c130,vcpu_model=<?>,vcpus=1,vm_mode=None,vm_state='active') vif={"id": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "address": "fa:16:3e:3f:e1:d3", "network": {"id": "3e4cd915-df9e-44c4-860d-c0ba25a21e79", "bridge": "br-int", "label": "tempest-ServersTestManualDisk-1152012084-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.9", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "5d5e8b42281d410bb45cb6c2e8e3fcbd", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap887c6cbc-2d", "ovs_interfaceid": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} unplug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:828
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.595 2 DEBUG nova.network.os_vif_util [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Converting VIF {"id": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "address": "fa:16:3e:3f:e1:d3", "network": {"id": "3e4cd915-df9e-44c4-860d-c0ba25a21e79", "bridge": "br-int", "label": "tempest-ServersTestManualDisk-1152012084-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.9", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "5d5e8b42281d410bb45cb6c2e8e3fcbd", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap887c6cbc-2d", "ovs_interfaceid": "887c6cbc-2d8f-44c3-959f-4c732f5d4040", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.600 2 DEBUG nova.network.os_vif_util [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:3f:e1:d3,bridge_name='br-int',has_traffic_filtering=True,id=887c6cbc-2d8f-44c3-959f-4c732f5d4040,network=Network(3e4cd915-df9e-44c4-860d-c0ba25a21e79),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap887c6cbc-2d') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.600 2 DEBUG os_vif [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Unplugging vif VIFOpenVSwitch(active=False,address=fa:16:3e:3f:e1:d3,bridge_name='br-int',has_traffic_filtering=True,id=887c6cbc-2d8f-44c3-959f-4c732f5d4040,network=Network(3e4cd915-df9e-44c4-860d-c0ba25a21e79),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap887c6cbc-2d') unplug /usr/lib/python3.9/site-packages/os_vif/__init__.py:109
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.602 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.602 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tap887c6cbc-2d, bridge=br-int, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.604 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.606 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.609 2 INFO os_vif [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Successfully unplugged vif VIFOpenVSwitch(active=False,address=fa:16:3e:3f:e1:d3,bridge_name='br-int',has_traffic_filtering=True,id=887c6cbc-2d8f-44c3-959f-4c732f5d4040,network=Network(3e4cd915-df9e-44c4-860d-c0ba25a21e79),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap887c6cbc-2d')
Oct 11 02:39:40 compute-0 neutron-haproxy-ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79[453095]: [NOTICE]   (453099) : haproxy version is 2.8.14-c23fe91
Oct 11 02:39:40 compute-0 neutron-haproxy-ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79[453095]: [NOTICE]   (453099) : path to executable is /usr/sbin/haproxy
Oct 11 02:39:40 compute-0 neutron-haproxy-ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79[453095]: [WARNING]  (453099) : Exiting Master process...
Oct 11 02:39:40 compute-0 neutron-haproxy-ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79[453095]: [ALERT]    (453099) : Current worker (453101) exited with code 143 (Terminated)
Oct 11 02:39:40 compute-0 neutron-haproxy-ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79[453095]: [WARNING]  (453099) : All workers exited. Exiting... (0)
Oct 11 02:39:40 compute-0 systemd[1]: libpod-5fc42075de49ca1453a62e5b97c96c1499a69701f03b6cbf9a2e4cf956b34a5c.scope: Deactivated successfully.
Oct 11 02:39:40 compute-0 podman[453409]: 2025-10-11 02:39:40.692859099 +0000 UTC m=+0.066682822 container died 5fc42075de49ca1453a62e5b97c96c1499a69701f03b6cbf9a2e4cf956b34a5c (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2)
Oct 11 02:39:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1859: 321 pgs: 321 active+clean; 304 MiB data, 390 MiB used, 60 GiB / 60 GiB avail; 5.0 MiB/s rd, 15 KiB/s wr, 195 op/s
Oct 11 02:39:40 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-5fc42075de49ca1453a62e5b97c96c1499a69701f03b6cbf9a2e4cf956b34a5c-userdata-shm.mount: Deactivated successfully.
Oct 11 02:39:40 compute-0 systemd[1]: var-lib-containers-storage-overlay-8fd524dc9ed703aec15207e956d562a2002171d78827e03b7deac45167c6950e-merged.mount: Deactivated successfully.
Oct 11 02:39:40 compute-0 podman[453409]: 2025-10-11 02:39:40.756367198 +0000 UTC m=+0.130190921 container cleanup 5fc42075de49ca1453a62e5b97c96c1499a69701f03b6cbf9a2e4cf956b34a5c (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 02:39:40 compute-0 systemd[1]: libpod-conmon-5fc42075de49ca1453a62e5b97c96c1499a69701f03b6cbf9a2e4cf956b34a5c.scope: Deactivated successfully.
Oct 11 02:39:40 compute-0 ceph-mon[191930]: pgmap v1859: 321 pgs: 321 active+clean; 304 MiB data, 390 MiB used, 60 GiB / 60 GiB avail; 5.0 MiB/s rd, 15 KiB/s wr, 195 op/s
Oct 11 02:39:40 compute-0 podman[453454]: 2025-10-11 02:39:40.864126612 +0000 UTC m=+0.069272380 container remove 5fc42075de49ca1453a62e5b97c96c1499a69701f03b6cbf9a2e4cf956b34a5c (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.build-date=20251009)
Oct 11 02:39:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:40.882 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[735a599c-67a4-4a48-b8fc-419182668f4c]: (4, ('Sat Oct 11 02:39:40 AM UTC 2025 Stopping container neutron-haproxy-ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79 (5fc42075de49ca1453a62e5b97c96c1499a69701f03b6cbf9a2e4cf956b34a5c)\n5fc42075de49ca1453a62e5b97c96c1499a69701f03b6cbf9a2e4cf956b34a5c\nSat Oct 11 02:39:40 AM UTC 2025 Deleting container neutron-haproxy-ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79 (5fc42075de49ca1453a62e5b97c96c1499a69701f03b6cbf9a2e4cf956b34a5c)\n5fc42075de49ca1453a62e5b97c96c1499a69701f03b6cbf9a2e4cf956b34a5c\n', '', 0)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:40.888 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[c13a8060-6e79-4514-a7b0-5cb62572c0a7]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:40.892 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tap3e4cd915-d0, bridge=None, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:40 compute-0 kernel: tap3e4cd915-d0: left promiscuous mode
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.897 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:40.903 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[c9707ebe-a8f1-4228-9ccf-a8a2c756b2a9]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:40 compute-0 nova_compute[356901]: 2025-10-11 02:39:40.919 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:40.925 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[27dda2a9-8b95-4570-ae2a-ba00af63e447]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:40.928 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[9108a672-12b4-4516-b610-4f4f7e9d8986]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:40.946 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[1faa5b22-72a0-4b0a-9d52-22f1f73b5aa5]: (4, [{'family': 0, '__align': (), 'ifi_type': 772, 'index': 1, 'flags': 65609, 'change': 0, 'attrs': [['IFLA_IFNAME', 'lo'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UNKNOWN'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 65536], ['IFLA_MIN_MTU', 0], ['IFLA_MAX_MTU', 0], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 1], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 1], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 0], ['IFLA_CARRIER_UP_COUNT', 0], ['IFLA_CARRIER_DOWN_COUNT', 0], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', '00:00:00:00:00:00'], ['IFLA_BROADCAST', '00:00:00:00:00:00'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 1, 'nopolicy': 1, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 671464, 'reachable_time': 36972, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 65536, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 4294967295, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 4294967295, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 0, 'inoctets': 0, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 0, 'outoctets': 0, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 0, 'outmcastpkts': 0, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 0, 'outmcastoctets': 0, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 0, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 0, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1404, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 453468, 'error': None, 'target': 'ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:40 compute-0 systemd[1]: run-netns-ovnmeta\x2d3e4cd915\x2ddf9e\x2d44c4\x2d860d\x2dc0ba25a21e79.mount: Deactivated successfully.
Oct 11 02:39:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:40.968 286647 DEBUG neutron.privileged.agent.linux.ip_lib [-] Namespace ovnmeta-3e4cd915-df9e-44c4-860d-c0ba25a21e79 deleted. remove_netns /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:607
Oct 11 02:39:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:40.969 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[4d41fccc-10b1-402c-9b1c-ba39aaa7439c]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:39:41 compute-0 nova_compute[356901]: 2025-10-11 02:39:41.286 2 INFO nova.virt.libvirt.driver [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Deleting instance files /var/lib/nova/instances/ee9601c7-f562-449e-9f5c-5e1355f3c130_del
Oct 11 02:39:41 compute-0 nova_compute[356901]: 2025-10-11 02:39:41.288 2 INFO nova.virt.libvirt.driver [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Deletion of /var/lib/nova/instances/ee9601c7-f562-449e-9f5c-5e1355f3c130_del complete
Oct 11 02:39:41 compute-0 nova_compute[356901]: 2025-10-11 02:39:41.381 2 INFO nova.compute.manager [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Took 1.09 seconds to destroy the instance on the hypervisor.
Oct 11 02:39:41 compute-0 nova_compute[356901]: 2025-10-11 02:39:41.381 2 DEBUG oslo.service.loopingcall [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Waiting for function nova.compute.manager.ComputeManager._try_deallocate_network.<locals>._deallocate_network_with_retries to return. func /usr/lib/python3.9/site-packages/oslo_service/loopingcall.py:435
Oct 11 02:39:41 compute-0 nova_compute[356901]: 2025-10-11 02:39:41.382 2 DEBUG nova.compute.manager [-] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Deallocating network for instance _deallocate_network /usr/lib/python3.9/site-packages/nova/compute/manager.py:2259
Oct 11 02:39:41 compute-0 nova_compute[356901]: 2025-10-11 02:39:41.383 2 DEBUG nova.network.neutron [-] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] deallocate_for_instance() deallocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1803
Oct 11 02:39:42 compute-0 nova_compute[356901]: 2025-10-11 02:39:42.579 2 DEBUG nova.compute.manager [req-15a63c16-98e2-4b26-8307-3277352a91f8 req-7d71d9d3-2e70-48b0-9d32-0833024023aa 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Received event network-changed-5cd25b0e-b4c9-408f-b456-59127a046cde external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:42 compute-0 nova_compute[356901]: 2025-10-11 02:39:42.579 2 DEBUG nova.compute.manager [req-15a63c16-98e2-4b26-8307-3277352a91f8 req-7d71d9d3-2e70-48b0-9d32-0833024023aa 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Refreshing instance network info cache due to event network-changed-5cd25b0e-b4c9-408f-b456-59127a046cde. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:39:42 compute-0 nova_compute[356901]: 2025-10-11 02:39:42.579 2 DEBUG oslo_concurrency.lockutils [req-15a63c16-98e2-4b26-8307-3277352a91f8 req-7d71d9d3-2e70-48b0-9d32-0833024023aa 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-830c7581-3555-41db-9818-0961fc151818" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:39:42 compute-0 nova_compute[356901]: 2025-10-11 02:39:42.580 2 DEBUG oslo_concurrency.lockutils [req-15a63c16-98e2-4b26-8307-3277352a91f8 req-7d71d9d3-2e70-48b0-9d32-0833024023aa 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-830c7581-3555-41db-9818-0961fc151818" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:39:42 compute-0 nova_compute[356901]: 2025-10-11 02:39:42.580 2 DEBUG nova.network.neutron [req-15a63c16-98e2-4b26-8307-3277352a91f8 req-7d71d9d3-2e70-48b0-9d32-0833024023aa 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Refreshing network info cache for port 5cd25b0e-b4c9-408f-b456-59127a046cde _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:39:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1860: 321 pgs: 321 active+clean; 280 MiB data, 390 MiB used, 60 GiB / 60 GiB avail; 7.5 MiB/s rd, 597 B/s wr, 271 op/s
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.077 2 DEBUG oslo_concurrency.lockutils [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Acquiring lock "830c7581-3555-41db-9818-0961fc151818" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.078 2 DEBUG oslo_concurrency.lockutils [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Lock "830c7581-3555-41db-9818-0961fc151818" acquired by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.078 2 DEBUG oslo_concurrency.lockutils [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Acquiring lock "830c7581-3555-41db-9818-0961fc151818-events" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.078 2 DEBUG oslo_concurrency.lockutils [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Lock "830c7581-3555-41db-9818-0961fc151818-events" acquired by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.078 2 DEBUG oslo_concurrency.lockutils [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Lock "830c7581-3555-41db-9818-0961fc151818-events" "released" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.079 2 INFO nova.compute.manager [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Terminating instance
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.080 2 DEBUG nova.compute.manager [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Start destroying the instance on the hypervisor. _shutdown_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:3120
Oct 11 02:39:43 compute-0 kernel: tap5cd25b0e-b4 (unregistering): left promiscuous mode
Oct 11 02:39:43 compute-0 NetworkManager[44908]: <info>  [1760150383.1521] device (tap5cd25b0e-b4): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')
Oct 11 02:39:43 compute-0 ovn_controller[88370]: 2025-10-11T02:39:43Z|00099|binding|INFO|Releasing lport 5cd25b0e-b4c9-408f-b456-59127a046cde from this chassis (sb_readonly=0)
Oct 11 02:39:43 compute-0 ovn_controller[88370]: 2025-10-11T02:39:43Z|00100|binding|INFO|Setting lport 5cd25b0e-b4c9-408f-b456-59127a046cde down in Southbound
Oct 11 02:39:43 compute-0 ovn_controller[88370]: 2025-10-11T02:39:43Z|00101|binding|INFO|Removing iface tap5cd25b0e-b4 ovn-installed in OVS
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.170 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:43 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:43.172 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:23:45:c8 10.100.0.14'], port_security=['fa:16:3e:23:45:c8 10.100.0.14'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.0.14/28', 'neutron:device_id': '830c7581-3555-41db-9818-0961fc151818', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-b6521a4e-cfb9-4743-91c3-85402b5661d9', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': '56e45b830ec844e4802f14cd3e25bda2', 'neutron:revision_number': '4', 'neutron:security_group_ids': 'bbc7e081-070a-4369-95d6-91ae54e98ae0', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:host_id': 'compute-0.ctlplane.example.com', 'neutron:port_fip': '192.168.122.235'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=35e0b6d0-7bf5-489c-89bb-c8a2aaebd2e8, chassis=[], tunnel_key=3, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=5cd25b0e-b4c9-408f-b456-59127a046cde) old=Port_Binding(up=[True], chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:39:43 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:43.173 286362 INFO neutron.agent.ovn.metadata.agent [-] Port 5cd25b0e-b4c9-408f-b456-59127a046cde in datapath b6521a4e-cfb9-4743-91c3-85402b5661d9 unbound from our chassis
Oct 11 02:39:43 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:43.175 286362 DEBUG neutron.agent.ovn.metadata.agent [-] No valid VIF ports were found for network b6521a4e-cfb9-4743-91c3-85402b5661d9, tearing the namespace down if needed _get_provision_params /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:628
Oct 11 02:39:43 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:43.177 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[95abd077-63e2-4884-9e87-99cbd9514116]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:43 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:43.177 286362 INFO neutron.agent.ovn.metadata.agent [-] Cleaning up ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9 namespace which is not needed anymore
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.193 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:43 compute-0 systemd[1]: machine-qemu\x2d8\x2dinstance\x2d00000007.scope: Deactivated successfully.
Oct 11 02:39:43 compute-0 systemd[1]: machine-qemu\x2d8\x2dinstance\x2d00000007.scope: Consumed 9.770s CPU time.
Oct 11 02:39:43 compute-0 systemd-machined[137586]: Machine qemu-8-instance-00000007 terminated.
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.321 2 INFO nova.virt.libvirt.driver [-] [instance: 830c7581-3555-41db-9818-0961fc151818] Instance destroyed successfully.
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.322 2 DEBUG nova.objects.instance [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Lazy-loading 'resources' on Instance uuid 830c7581-3555-41db-9818-0961fc151818 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.334 2 DEBUG nova.virt.libvirt.vif [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] vif_type=ovs instance=Instance(access_ip_v4=1.1.1.1,access_ip_v6=::babe:dc0c:1602,architecture=None,auto_disk_config=True,availability_zone='nova',cell_name=None,cleaned=False,config_drive='True',created_at=2025-10-11T02:38:58Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=<?>,disable_terminate=False,display_description='tempest-ServersTestJSON-server-1595957609',display_name='tempest-ServersTestJSON-server-1595957609',ec2_ids=<?>,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-serverstestjson-server-1595957609',id=7,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBMzf2DYXJJIoJOC2qLbU8VUi6wgx/7Zr7U9fH8e3911FZRdaYVaxRqYR9UTydqtzyDp6Ms2EBS9n6gBBFdtM1Rrxxpe1Vohtnbt7VcSxvjKDQrCZDRyrT/SPrEwf5mXxSQ==',key_name='tempest-keypair-1620851907',keypairs=<?>,launch_index=0,launched_at=2025-10-11T02:39:34Z,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={hello='world'},migration_context=<?>,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=<?>,power_state=1,progress=0,project_id='56e45b830ec844e4802f14cd3e25bda2',ramdisk_id='',reservation_id='r-afe03zy1',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_cdrom_bus='sata',image_hw_disk_bus='virtio',image_hw_input_bus='usb',image_hw_machine_type='q35',image_hw_pointer_model='usbtablet',image_hw_rng_model='virtio',image_hw_video_model='virtio',image_hw_vif_model='virtio',image_min_disk='1',image_min_ram='0',owner_project_name='tempest-ServersTestJSON-2138603110',owner_user_name='tempest-ServersTestJSON-2138603110-project-member'},tags=<?>,task_state='deleting',terminated_at=None,trusted_certs=<?>,updated_at=2025-10-11T02:39:35Z,user_data='IyEvYmluL3NoCmVjaG8gIlByaW50aW5nIGNpcnJvcyB1c2VyIGF1dGhvcml6ZWQga2V5cyIKY2F0IH5jaXJyb3MvLnNzaC9hdXRob3JpemVkX2tleXMgfHwgdHJ1ZQo=',user_id='1b63c9bbae8845d99db73ca671aedcfc',uuid=830c7581-3555-41db-9818-0961fc151818,vcpu_model=<?>,vcpus=1,vm_mode=None,vm_state='active') vif={"id": "5cd25b0e-b4c9-408f-b456-59127a046cde", "address": "fa:16:3e:23:45:c8", "network": {"id": "b6521a4e-cfb9-4743-91c3-85402b5661d9", "bridge": "br-int", "label": "tempest-ServersTestJSON-1548228308-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "56e45b830ec844e4802f14cd3e25bda2", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5cd25b0e-b4", "ovs_interfaceid": "5cd25b0e-b4c9-408f-b456-59127a046cde", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} unplug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:828
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.335 2 DEBUG nova.network.os_vif_util [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Converting VIF {"id": "5cd25b0e-b4c9-408f-b456-59127a046cde", "address": "fa:16:3e:23:45:c8", "network": {"id": "b6521a4e-cfb9-4743-91c3-85402b5661d9", "bridge": "br-int", "label": "tempest-ServersTestJSON-1548228308-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "56e45b830ec844e4802f14cd3e25bda2", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5cd25b0e-b4", "ovs_interfaceid": "5cd25b0e-b4c9-408f-b456-59127a046cde", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.336 2 DEBUG nova.network.os_vif_util [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:23:45:c8,bridge_name='br-int',has_traffic_filtering=True,id=5cd25b0e-b4c9-408f-b456-59127a046cde,network=Network(b6521a4e-cfb9-4743-91c3-85402b5661d9),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap5cd25b0e-b4') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.336 2 DEBUG os_vif [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Unplugging vif VIFOpenVSwitch(active=False,address=fa:16:3e:23:45:c8,bridge_name='br-int',has_traffic_filtering=True,id=5cd25b0e-b4c9-408f-b456-59127a046cde,network=Network(b6521a4e-cfb9-4743-91c3-85402b5661d9),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap5cd25b0e-b4') unplug /usr/lib/python3.9/site-packages/os_vif/__init__.py:109
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.338 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.338 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tap5cd25b0e-b4, bridge=br-int, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:43 compute-0 neutron-haproxy-ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9[453170]: [NOTICE]   (453174) : haproxy version is 2.8.14-c23fe91
Oct 11 02:39:43 compute-0 neutron-haproxy-ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9[453170]: [NOTICE]   (453174) : path to executable is /usr/sbin/haproxy
Oct 11 02:39:43 compute-0 neutron-haproxy-ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9[453170]: [WARNING]  (453174) : Exiting Master process...
Oct 11 02:39:43 compute-0 neutron-haproxy-ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9[453170]: [WARNING]  (453174) : Exiting Master process...
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.345 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:39:43 compute-0 neutron-haproxy-ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9[453170]: [ALERT]    (453174) : Current worker (453176) exited with code 143 (Terminated)
Oct 11 02:39:43 compute-0 neutron-haproxy-ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9[453170]: [WARNING]  (453174) : All workers exited. Exiting... (0)
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.350 2 INFO os_vif [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Successfully unplugged vif VIFOpenVSwitch(active=False,address=fa:16:3e:23:45:c8,bridge_name='br-int',has_traffic_filtering=True,id=5cd25b0e-b4c9-408f-b456-59127a046cde,network=Network(b6521a4e-cfb9-4743-91c3-85402b5661d9),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap5cd25b0e-b4')
Oct 11 02:39:43 compute-0 systemd[1]: libpod-aad9edf094ebeefe0b5f0255f6ba206af2d8a33000a12f8444029b4d954981f6.scope: Deactivated successfully.
Oct 11 02:39:43 compute-0 podman[453493]: 2025-10-11 02:39:43.356838986 +0000 UTC m=+0.070482211 container died aad9edf094ebeefe0b5f0255f6ba206af2d8a33000a12f8444029b4d954981f6 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, io.buildah.version=1.41.3)
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.387 2 DEBUG oslo_concurrency.lockutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Acquiring lock "5279e85f-e35b-4ddd-8336-7f483712f743" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.387 2 DEBUG oslo_concurrency.lockutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Lock "5279e85f-e35b-4ddd-8336-7f483712f743" acquired by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:43 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-aad9edf094ebeefe0b5f0255f6ba206af2d8a33000a12f8444029b4d954981f6-userdata-shm.mount: Deactivated successfully.
Oct 11 02:39:43 compute-0 systemd[1]: var-lib-containers-storage-overlay-3ec573959d20c719212b0062191f8fefd2ca6ede5557eec44a1a114ecd9e4f7b-merged.mount: Deactivated successfully.
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.409 2 DEBUG nova.compute.manager [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Starting instance... _do_build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2402
Oct 11 02:39:43 compute-0 podman[453493]: 2025-10-11 02:39:43.414070821 +0000 UTC m=+0.127714016 container cleanup aad9edf094ebeefe0b5f0255f6ba206af2d8a33000a12f8444029b4d954981f6 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3)
Oct 11 02:39:43 compute-0 systemd[1]: libpod-conmon-aad9edf094ebeefe0b5f0255f6ba206af2d8a33000a12f8444029b4d954981f6.scope: Deactivated successfully.
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.505 2 DEBUG oslo_concurrency.lockutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.505 2 DEBUG oslo_concurrency.lockutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:43 compute-0 podman[453550]: 2025-10-11 02:39:43.516194168 +0000 UTC m=+0.059792403 container remove aad9edf094ebeefe0b5f0255f6ba206af2d8a33000a12f8444029b4d954981f6 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.517 2 DEBUG nova.virt.hardware [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Require both a host and instance NUMA topology to fit instance on host. numa_fit_instance_to_host /usr/lib/python3.9/site-packages/nova/virt/hardware.py:2368
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.518 2 INFO nova.compute.claims [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Claim successful on node compute-0.ctlplane.example.com
Oct 11 02:39:43 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:43.532 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[a45c18c0-046a-49d1-967a-14d34bbaf378]: (4, ('Sat Oct 11 02:39:43 AM UTC 2025 Stopping container neutron-haproxy-ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9 (aad9edf094ebeefe0b5f0255f6ba206af2d8a33000a12f8444029b4d954981f6)\naad9edf094ebeefe0b5f0255f6ba206af2d8a33000a12f8444029b4d954981f6\nSat Oct 11 02:39:43 AM UTC 2025 Deleting container neutron-haproxy-ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9 (aad9edf094ebeefe0b5f0255f6ba206af2d8a33000a12f8444029b4d954981f6)\naad9edf094ebeefe0b5f0255f6ba206af2d8a33000a12f8444029b4d954981f6\n', '', 0)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:43 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:43.540 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[de595543-b201-488d-9d24-bb1f26754719]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:43 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:43.541 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapb6521a4e-c0, bridge=None, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:43 compute-0 kernel: tapb6521a4e-c0: left promiscuous mode
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.547 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.562 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.565 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:43 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:43.568 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[a011a3cf-3a19-4b68-a556-fb8feee4eb74]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:43 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:43.596 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[6c6e788d-3fe4-4feb-b510-41a63b947780]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:43 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:43.599 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[e438a1b7-fabe-423c-81b6-a1a1e36cdf6f]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:43 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:43.629 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[1c385633-e38d-41e6-98c5-26efa742f9a9]: (4, [{'family': 0, '__align': (), 'ifi_type': 772, 'index': 1, 'flags': 65609, 'change': 0, 'attrs': [['IFLA_IFNAME', 'lo'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UNKNOWN'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 65536], ['IFLA_MIN_MTU', 0], ['IFLA_MAX_MTU', 0], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 1], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 1], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 0], ['IFLA_CARRIER_UP_COUNT', 0], ['IFLA_CARRIER_DOWN_COUNT', 0], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', '00:00:00:00:00:00'], ['IFLA_BROADCAST', '00:00:00:00:00:00'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 1, 'nopolicy': 1, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 671587, 'reachable_time': 31642, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 65536, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 4294967295, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 4294967295, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 0, 'inoctets': 0, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 0, 'outoctets': 0, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 0, 'outmcastpkts': 0, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 0, 'outmcastoctets': 0, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 0, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 0, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1404, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 453566, 'error': None, 'target': 'ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:43 compute-0 systemd[1]: run-netns-ovnmeta\x2db6521a4e\x2dcfb9\x2d4743\x2d91c3\x2d85402b5661d9.mount: Deactivated successfully.
Oct 11 02:39:43 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:43.638 286647 DEBUG neutron.privileged.agent.linux.ip_lib [-] Namespace ovnmeta-b6521a4e-cfb9-4743-91c3-85402b5661d9 deleted. remove_netns /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:607
Oct 11 02:39:43 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:43.638 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[b2bc8afa-23c6-4d45-9e99-fd17eb719221]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.771 2 DEBUG oslo_concurrency.processutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:43 compute-0 ceph-mon[191930]: pgmap v1860: 321 pgs: 321 active+clean; 280 MiB data, 390 MiB used, 60 GiB / 60 GiB avail; 7.5 MiB/s rd, 597 B/s wr, 271 op/s
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.886 2 DEBUG nova.network.neutron [-] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Updating instance_info_cache with network_info: [] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.920 2 INFO nova.compute.manager [-] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Took 2.54 seconds to deallocate network for instance.
Oct 11 02:39:43 compute-0 nova_compute[356901]: 2025-10-11 02:39:43.979 2 DEBUG oslo_concurrency.lockutils [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.update_usage" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.013 2 INFO nova.virt.libvirt.driver [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Deleting instance files /var/lib/nova/instances/830c7581-3555-41db-9818-0961fc151818_del
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.014 2 INFO nova.virt.libvirt.driver [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Deletion of /var/lib/nova/instances/830c7581-3555-41db-9818-0961fc151818_del complete
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.077 2 INFO nova.compute.manager [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Took 1.00 seconds to destroy the instance on the hypervisor.
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.078 2 DEBUG oslo.service.loopingcall [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Waiting for function nova.compute.manager.ComputeManager._try_deallocate_network.<locals>._deallocate_network_with_retries to return. func /usr/lib/python3.9/site-packages/oslo_service/loopingcall.py:435
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.078 2 DEBUG nova.compute.manager [-] [instance: 830c7581-3555-41db-9818-0961fc151818] Deallocating network for instance _deallocate_network /usr/lib/python3.9/site-packages/nova/compute/manager.py:2259
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.078 2 DEBUG nova.network.neutron [-] [instance: 830c7581-3555-41db-9818-0961fc151818] deallocate_for_instance() deallocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1803
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.197 2 DEBUG nova.network.neutron [req-15a63c16-98e2-4b26-8307-3277352a91f8 req-7d71d9d3-2e70-48b0-9d32-0833024023aa 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Updated VIF entry in instance network info cache for port 5cd25b0e-b4c9-408f-b456-59127a046cde. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.198 2 DEBUG nova.network.neutron [req-15a63c16-98e2-4b26-8307-3277352a91f8 req-7d71d9d3-2e70-48b0-9d32-0833024023aa 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Updating instance_info_cache with network_info: [{"id": "5cd25b0e-b4c9-408f-b456-59127a046cde", "address": "fa:16:3e:23:45:c8", "network": {"id": "b6521a4e-cfb9-4743-91c3-85402b5661d9", "bridge": "br-int", "label": "tempest-ServersTestJSON-1548228308-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.235", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "56e45b830ec844e4802f14cd3e25bda2", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5cd25b0e-b4", "ovs_interfaceid": "5cd25b0e-b4c9-408f-b456-59127a046cde", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:39:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:39:44 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2420225895' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.232 2 DEBUG oslo_concurrency.lockutils [req-15a63c16-98e2-4b26-8307-3277352a91f8 req-7d71d9d3-2e70-48b0-9d32-0833024023aa 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-830c7581-3555-41db-9818-0961fc151818" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.246 2 DEBUG oslo_concurrency.processutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.476s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.255 2 DEBUG nova.compute.provider_tree [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.273 2 DEBUG nova.scheduler.client.report [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.308 2 DEBUG oslo_concurrency.lockutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: held 0.803s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.309 2 DEBUG nova.compute.manager [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Start building networks asynchronously for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2799
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.312 2 DEBUG oslo_concurrency.lockutils [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: waited 0.333s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.382 2 DEBUG nova.compute.manager [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Allocating IP information in the background. _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1952
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.382 2 DEBUG nova.network.neutron [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] allocate_for_instance() allocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1156
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.419 2 INFO nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Ignoring supplied device name: /dev/vda. Libvirt can't honour user-supplied dev names
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.435 2 DEBUG nova.compute.manager [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Start building block device mappings for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2834
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.544 2 DEBUG oslo_concurrency.processutils [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.575 2 DEBUG nova.compute.manager [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Start spawning the instance on the hypervisor. _build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2608
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.578 2 DEBUG nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Creating instance directory _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4723
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.579 2 INFO nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Creating image(s)
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.613 2 DEBUG nova.storage.rbd_utils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] rbd image 5279e85f-e35b-4ddd-8336-7f483712f743_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.713 2 DEBUG nova.storage.rbd_utils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] rbd image 5279e85f-e35b-4ddd-8336-7f483712f743_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1861: 321 pgs: 321 active+clean; 248 MiB data, 384 MiB used, 60 GiB / 60 GiB avail; 6.8 MiB/s rd, 597 B/s wr, 257 op/s
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.758 2 DEBUG nova.storage.rbd_utils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] rbd image 5279e85f-e35b-4ddd-8336-7f483712f743_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.765 2 DEBUG oslo_concurrency.processutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.794 2 DEBUG nova.policy [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Policy check for network:attach_external_network failed with credentials {'is_admin': False, 'user_id': '7c06b99eac5242ddb9501f51d87567d2', 'user_domain_id': 'default', 'system_scope': None, 'domain_id': None, 'project_id': '4baea94e1c7d43e699eaac33512a8105', 'project_domain_id': 'default', 'roles': ['member', 'reader'], 'is_admin_project': True, 'service_user_id': None, 'service_user_domain_id': None, 'service_project_id': None, 'service_project_domain_id': None, 'service_roles': []} authorize /usr/lib/python3.9/site-packages/nova/policy.py:203
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.798 2 DEBUG nova.compute.manager [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Received event network-vif-unplugged-887c6cbc-2d8f-44c3-959f-4c732f5d4040 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.799 2 DEBUG oslo_concurrency.lockutils [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "ee9601c7-f562-449e-9f5c-5e1355f3c130-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.799 2 DEBUG oslo_concurrency.lockutils [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "ee9601c7-f562-449e-9f5c-5e1355f3c130-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.799 2 DEBUG oslo_concurrency.lockutils [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "ee9601c7-f562-449e-9f5c-5e1355f3c130-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.800 2 DEBUG nova.compute.manager [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] No waiting events found dispatching network-vif-unplugged-887c6cbc-2d8f-44c3-959f-4c732f5d4040 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.800 2 WARNING nova.compute.manager [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Received unexpected event network-vif-unplugged-887c6cbc-2d8f-44c3-959f-4c732f5d4040 for instance with vm_state deleted and task_state None.
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.800 2 DEBUG nova.compute.manager [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Received event network-vif-plugged-887c6cbc-2d8f-44c3-959f-4c732f5d4040 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.801 2 DEBUG oslo_concurrency.lockutils [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "ee9601c7-f562-449e-9f5c-5e1355f3c130-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.801 2 DEBUG oslo_concurrency.lockutils [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "ee9601c7-f562-449e-9f5c-5e1355f3c130-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.801 2 DEBUG oslo_concurrency.lockutils [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "ee9601c7-f562-449e-9f5c-5e1355f3c130-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.802 2 DEBUG nova.compute.manager [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] No waiting events found dispatching network-vif-plugged-887c6cbc-2d8f-44c3-959f-4c732f5d4040 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.802 2 WARNING nova.compute.manager [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Received unexpected event network-vif-plugged-887c6cbc-2d8f-44c3-959f-4c732f5d4040 for instance with vm_state deleted and task_state None.
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.802 2 DEBUG nova.compute.manager [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Received event network-vif-unplugged-5cd25b0e-b4c9-408f-b456-59127a046cde external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.803 2 DEBUG oslo_concurrency.lockutils [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "830c7581-3555-41db-9818-0961fc151818-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.803 2 DEBUG oslo_concurrency.lockutils [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "830c7581-3555-41db-9818-0961fc151818-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.803 2 DEBUG oslo_concurrency.lockutils [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "830c7581-3555-41db-9818-0961fc151818-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.804 2 DEBUG nova.compute.manager [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] No waiting events found dispatching network-vif-unplugged-5cd25b0e-b4c9-408f-b456-59127a046cde pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.804 2 DEBUG nova.compute.manager [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Received event network-vif-unplugged-5cd25b0e-b4c9-408f-b456-59127a046cde for instance with task_state deleting. _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10826
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.804 2 DEBUG nova.compute.manager [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Received event network-vif-plugged-5cd25b0e-b4c9-408f-b456-59127a046cde external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.805 2 DEBUG oslo_concurrency.lockutils [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "830c7581-3555-41db-9818-0961fc151818-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.805 2 DEBUG oslo_concurrency.lockutils [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "830c7581-3555-41db-9818-0961fc151818-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.805 2 DEBUG oslo_concurrency.lockutils [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "830c7581-3555-41db-9818-0961fc151818-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.805 2 DEBUG nova.compute.manager [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] No waiting events found dispatching network-vif-plugged-5cd25b0e-b4c9-408f-b456-59127a046cde pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.806 2 WARNING nova.compute.manager [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Received unexpected event network-vif-plugged-5cd25b0e-b4c9-408f-b456-59127a046cde for instance with vm_state active and task_state deleting.
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.806 2 DEBUG nova.compute.manager [req-f2922bd2-6768-4246-bc9e-f80b293ed922 req-7fdec863-401b-4b47-9edd-27781ed1576d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Received event network-vif-deleted-887c6cbc-2d8f-44c3-959f-4c732f5d4040 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:44 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2420225895' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:39:44 compute-0 ceph-mon[191930]: pgmap v1861: 321 pgs: 321 active+clean; 248 MiB data, 384 MiB used, 60 GiB / 60 GiB avail; 6.8 MiB/s rd, 597 B/s wr, 257 op/s
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.832 2 DEBUG oslo_concurrency.processutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d --force-share --output=json" returned: 0 in 0.067s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.833 2 DEBUG oslo_concurrency.lockutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Acquiring lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.834 2 DEBUG oslo_concurrency.lockutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.834 2 DEBUG oslo_concurrency.lockutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.869 2 DEBUG nova.storage.rbd_utils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] rbd image 5279e85f-e35b-4ddd-8336-7f483712f743_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:44 compute-0 nova_compute[356901]: 2025-10-11 02:39:44.877 2 DEBUG oslo_concurrency.processutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d 5279e85f-e35b-4ddd-8336-7f483712f743_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:45 compute-0 nova_compute[356901]: 2025-10-11 02:39:45.018 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:39:45 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2549386209' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:39:45 compute-0 nova_compute[356901]: 2025-10-11 02:39:45.121 2 DEBUG oslo_concurrency.processutils [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.578s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:45 compute-0 nova_compute[356901]: 2025-10-11 02:39:45.131 2 DEBUG nova.compute.provider_tree [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:39:45 compute-0 nova_compute[356901]: 2025-10-11 02:39:45.159 2 DEBUG nova.scheduler.client.report [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:39:45 compute-0 nova_compute[356901]: 2025-10-11 02:39:45.186 2 DEBUG oslo_concurrency.lockutils [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: held 0.874s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:45 compute-0 nova_compute[356901]: 2025-10-11 02:39:45.212 2 INFO nova.scheduler.client.report [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Deleted allocations for instance ee9601c7-f562-449e-9f5c-5e1355f3c130
Oct 11 02:39:45 compute-0 nova_compute[356901]: 2025-10-11 02:39:45.255 2 DEBUG oslo_concurrency.processutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d 5279e85f-e35b-4ddd-8336-7f483712f743_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.378s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:45 compute-0 nova_compute[356901]: 2025-10-11 02:39:45.301 2 DEBUG oslo_concurrency.lockutils [None req-08f6e837-c617-48de-bcad-a79a02032848 5539243c06f64f0694000d9748ff55dd 5d5e8b42281d410bb45cb6c2e8e3fcbd - - default default] Lock "ee9601c7-f562-449e-9f5c-5e1355f3c130" "released" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: held 5.012s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:45 compute-0 nova_compute[356901]: 2025-10-11 02:39:45.362 2 DEBUG nova.storage.rbd_utils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] resizing rbd image 5279e85f-e35b-4ddd-8336-7f483712f743_disk to 1073741824 resize /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:288
Oct 11 02:39:45 compute-0 nova_compute[356901]: 2025-10-11 02:39:45.519 2 DEBUG nova.objects.instance [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Lazy-loading 'migration_context' on Instance uuid 5279e85f-e35b-4ddd-8336-7f483712f743 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:39:45 compute-0 nova_compute[356901]: 2025-10-11 02:39:45.534 2 DEBUG nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Created local disks _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4857
Oct 11 02:39:45 compute-0 nova_compute[356901]: 2025-10-11 02:39:45.534 2 DEBUG nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Ensure instance console log exists: /var/lib/nova/instances/5279e85f-e35b-4ddd-8336-7f483712f743/console.log _ensure_console_log_for_instance /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4609
Oct 11 02:39:45 compute-0 nova_compute[356901]: 2025-10-11 02:39:45.535 2 DEBUG oslo_concurrency.lockutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Acquiring lock "vgpu_resources" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:45 compute-0 nova_compute[356901]: 2025-10-11 02:39:45.535 2 DEBUG oslo_concurrency.lockutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Lock "vgpu_resources" acquired by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:45 compute-0 nova_compute[356901]: 2025-10-11 02:39:45.535 2 DEBUG oslo_concurrency.lockutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Lock "vgpu_resources" "released" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:45 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2549386209' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:39:45 compute-0 nova_compute[356901]: 2025-10-11 02:39:45.911 2 DEBUG nova.network.neutron [-] [instance: 830c7581-3555-41db-9818-0961fc151818] Updating instance_info_cache with network_info: [] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:39:45 compute-0 nova_compute[356901]: 2025-10-11 02:39:45.948 2 INFO nova.compute.manager [-] [instance: 830c7581-3555-41db-9818-0961fc151818] Took 1.87 seconds to deallocate network for instance.
Oct 11 02:39:45 compute-0 nova_compute[356901]: 2025-10-11 02:39:45.996 2 DEBUG oslo_concurrency.lockutils [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.update_usage" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:45 compute-0 nova_compute[356901]: 2025-10-11 02:39:45.996 2 DEBUG oslo_concurrency.lockutils [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:46 compute-0 nova_compute[356901]: 2025-10-11 02:39:46.042 2 DEBUG nova.network.neutron [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Successfully created port: d690bf02-80b8-4bb8-808f-ccc93f22c545 _create_port_minimal /usr/lib/python3.9/site-packages/nova/network/neutron.py:548
Oct 11 02:39:46 compute-0 nova_compute[356901]: 2025-10-11 02:39:46.163 2 DEBUG oslo_concurrency.processutils [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:39:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:39:46 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2311792433' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:39:46 compute-0 nova_compute[356901]: 2025-10-11 02:39:46.634 2 DEBUG oslo_concurrency.processutils [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.471s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:46 compute-0 nova_compute[356901]: 2025-10-11 02:39:46.642 2 DEBUG nova.compute.provider_tree [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:39:46 compute-0 nova_compute[356901]: 2025-10-11 02:39:46.664 2 DEBUG nova.scheduler.client.report [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:39:46 compute-0 nova_compute[356901]: 2025-10-11 02:39:46.693 2 DEBUG oslo_concurrency.lockutils [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: held 0.697s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1862: 321 pgs: 321 active+clean; 240 MiB data, 359 MiB used, 60 GiB / 60 GiB avail; 4.8 MiB/s rd, 877 KiB/s wr, 207 op/s
Oct 11 02:39:46 compute-0 nova_compute[356901]: 2025-10-11 02:39:46.741 2 INFO nova.scheduler.client.report [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Deleted allocations for instance 830c7581-3555-41db-9818-0961fc151818
Oct 11 02:39:46 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2311792433' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:39:46 compute-0 ceph-mon[191930]: pgmap v1862: 321 pgs: 321 active+clean; 240 MiB data, 359 MiB used, 60 GiB / 60 GiB avail; 4.8 MiB/s rd, 877 KiB/s wr, 207 op/s
Oct 11 02:39:46 compute-0 nova_compute[356901]: 2025-10-11 02:39:46.836 2 DEBUG oslo_concurrency.lockutils [None req-ff75df3a-50b9-4d66-83d7-169ef408d490 1b63c9bbae8845d99db73ca671aedcfc 56e45b830ec844e4802f14cd3e25bda2 - - default default] Lock "830c7581-3555-41db-9818-0961fc151818" "released" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: held 3.759s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:46 compute-0 nova_compute[356901]: 2025-10-11 02:39:46.892 2 DEBUG nova.compute.manager [req-48b1ee69-ddce-4968-9321-5db5a1f8d3f6 req-0cd773f2-f6a6-4854-a45a-fbd2a2c843e3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 830c7581-3555-41db-9818-0961fc151818] Received event network-vif-deleted-5cd25b0e-b4c9-408f-b456-59127a046cde external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:48 compute-0 podman[453801]: 2025-10-11 02:39:48.253365119 +0000 UTC m=+0.132697745 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, build-date=2024-09-18T21:23:30, com.redhat.component=ubi9-container, release-0.7.12=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.4, config_id=edpm, io.openshift.tags=base rhel9, vcs-type=git, vendor=Red Hat, Inc., io.buildah.version=1.29.0, release=1214.1726694543, architecture=x86_64, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.k8s.display-name=Red Hat Universal Base Image 9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, maintainer=Red Hat, Inc., managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, name=ubi9, container_name=kepler, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543)
Oct 11 02:39:48 compute-0 nova_compute[356901]: 2025-10-11 02:39:48.296 2 DEBUG nova.network.neutron [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Successfully updated port: d690bf02-80b8-4bb8-808f-ccc93f22c545 _update_port /usr/lib/python3.9/site-packages/nova/network/neutron.py:586
Oct 11 02:39:48 compute-0 nova_compute[356901]: 2025-10-11 02:39:48.315 2 DEBUG oslo_concurrency.lockutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Acquiring lock "refresh_cache-5279e85f-e35b-4ddd-8336-7f483712f743" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:39:48 compute-0 nova_compute[356901]: 2025-10-11 02:39:48.315 2 DEBUG oslo_concurrency.lockutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Acquired lock "refresh_cache-5279e85f-e35b-4ddd-8336-7f483712f743" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:39:48 compute-0 nova_compute[356901]: 2025-10-11 02:39:48.316 2 DEBUG nova.network.neutron [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Building network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2010
Oct 11 02:39:48 compute-0 nova_compute[356901]: 2025-10-11 02:39:48.342 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:48 compute-0 nova_compute[356901]: 2025-10-11 02:39:48.445 2 DEBUG nova.compute.manager [req-ebfb143c-0241-4f11-959d-debe2c2aa8a8 req-4554faf0-a284-4417-911b-e2cb7cf08928 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Received event network-changed-d690bf02-80b8-4bb8-808f-ccc93f22c545 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:48 compute-0 nova_compute[356901]: 2025-10-11 02:39:48.446 2 DEBUG nova.compute.manager [req-ebfb143c-0241-4f11-959d-debe2c2aa8a8 req-4554faf0-a284-4417-911b-e2cb7cf08928 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Refreshing instance network info cache due to event network-changed-d690bf02-80b8-4bb8-808f-ccc93f22c545. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:39:48 compute-0 nova_compute[356901]: 2025-10-11 02:39:48.446 2 DEBUG oslo_concurrency.lockutils [req-ebfb143c-0241-4f11-959d-debe2c2aa8a8 req-4554faf0-a284-4417-911b-e2cb7cf08928 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-5279e85f-e35b-4ddd-8336-7f483712f743" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:39:48 compute-0 nova_compute[356901]: 2025-10-11 02:39:48.562 2 DEBUG nova.network.neutron [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Instance cache missing network info. _get_preexisting_port_ids /usr/lib/python3.9/site-packages/nova/network/neutron.py:3323
Oct 11 02:39:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1863: 321 pgs: 321 active+clean; 243 MiB data, 363 MiB used, 60 GiB / 60 GiB avail; 4.1 MiB/s rd, 1.5 MiB/s wr, 197 op/s
Oct 11 02:39:49 compute-0 ceph-mon[191930]: pgmap v1863: 321 pgs: 321 active+clean; 243 MiB data, 363 MiB used, 60 GiB / 60 GiB avail; 4.1 MiB/s rd, 1.5 MiB/s wr, 197 op/s
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.033 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.097 2 DEBUG nova.network.neutron [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Updating instance_info_cache with network_info: [{"id": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "address": "fa:16:3e:1e:a3:c2", "network": {"id": "42802124-ba47-4b6e-aa91-ecf257e5a54c", "bridge": "br-int", "label": "tempest-TestServerBasicOps-246725932-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.11", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "4baea94e1c7d43e699eaac33512a8105", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd690bf02-80", "ovs_interfaceid": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.137 2 DEBUG oslo_concurrency.lockutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Releasing lock "refresh_cache-5279e85f-e35b-4ddd-8336-7f483712f743" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.138 2 DEBUG nova.compute.manager [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Instance network_info: |[{"id": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "address": "fa:16:3e:1e:a3:c2", "network": {"id": "42802124-ba47-4b6e-aa91-ecf257e5a54c", "bridge": "br-int", "label": "tempest-TestServerBasicOps-246725932-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.11", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "4baea94e1c7d43e699eaac33512a8105", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd690bf02-80", "ovs_interfaceid": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}]| _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1967
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.138 2 DEBUG oslo_concurrency.lockutils [req-ebfb143c-0241-4f11-959d-debe2c2aa8a8 req-4554faf0-a284-4417-911b-e2cb7cf08928 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-5279e85f-e35b-4ddd-8336-7f483712f743" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.138 2 DEBUG nova.network.neutron [req-ebfb143c-0241-4f11-959d-debe2c2aa8a8 req-4554faf0-a284-4417-911b-e2cb7cf08928 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Refreshing network info cache for port d690bf02-80b8-4bb8-808f-ccc93f22c545 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.141 2 DEBUG nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Start _get_guest_xml network_info=[{"id": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "address": "fa:16:3e:1e:a3:c2", "network": {"id": "42802124-ba47-4b6e-aa91-ecf257e5a54c", "bridge": "br-int", "label": "tempest-TestServerBasicOps-246725932-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.11", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "4baea94e1c7d43e699eaac33512a8105", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd690bf02-80", "ovs_interfaceid": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] disk_info={'disk_bus': 'virtio', 'cdrom_bus': 'sata', 'mapping': {'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.config': {'bus': 'sata', 'dev': 'sda', 'type': 'cdrom'}}} image_meta=ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:38:04Z,direct_url=<?>,disk_format='qcow2',id=72f37f2e-4296-450e-9a12-10717f4ac7dc,min_disk=0,min_ram=0,name='cirros-0.6.2-x86_64-disk.img',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:38:05Z,virtual_size=<?>,visibility=<?>) rescue=None block_device_info={'root_device_name': '/dev/vda', 'image': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'boot_index': 0, 'device_name': '/dev/vda', 'size': 0, 'encryption_format': None, 'image_id': '72f37f2e-4296-450e-9a12-10717f4ac7dc'}], 'ephemerals': [], 'block_device_mapping': [], 'swap': None} _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7549
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.149 2 WARNING nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.157 2 DEBUG nova.virt.libvirt.host [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V1... _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1653
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.157 2 DEBUG nova.virt.libvirt.host [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] CPU controller missing on host. _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1663
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.163 2 DEBUG nova.virt.libvirt.host [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V2... _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1672
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.164 2 DEBUG nova.virt.libvirt.host [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] CPU controller found on host. _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1679
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.165 2 DEBUG nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] CPU mode 'host-model' models '' was chosen, with extra flags: '' _get_guest_cpu_model_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:5396
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.165 2 DEBUG nova.virt.hardware [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Getting desirable topologies for flavor Flavor(created_at=2025-10-11T02:38:03Z,deleted=False,deleted_at=None,description=None,disabled=False,ephemeral_gb=0,extra_specs={hw_rng:allowed='True'},flavorid='6dff30d1-85df-4e9c-9163-a20ba47bb0c7',id=3,is_public=True,memory_mb=128,name='m1.nano',projects=<?>,root_gb=1,rxtx_factor=1.0,swap=0,updated_at=None,vcpu_weight=0,vcpus=1) and image_meta ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:38:04Z,direct_url=<?>,disk_format='qcow2',id=72f37f2e-4296-450e-9a12-10717f4ac7dc,min_disk=0,min_ram=0,name='cirros-0.6.2-x86_64-disk.img',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:38:05Z,virtual_size=<?>,visibility=<?>), allow threads: True _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:563
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.166 2 DEBUG nova.virt.hardware [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Flavor limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:348
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.166 2 DEBUG nova.virt.hardware [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Image limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:352
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.166 2 DEBUG nova.virt.hardware [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Flavor pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:388
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.166 2 DEBUG nova.virt.hardware [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Image pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:392
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.166 2 DEBUG nova.virt.hardware [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Chose sockets=0, cores=0, threads=0; limits were sockets=65536, cores=65536, threads=65536 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:430
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.166 2 DEBUG nova.virt.hardware [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Topology preferred VirtCPUTopology(cores=0,sockets=0,threads=0), maximum VirtCPUTopology(cores=65536,sockets=65536,threads=65536) _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:569
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.167 2 DEBUG nova.virt.hardware [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Build topologies for 1 vcpu(s) 1:1:1 _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:471
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.167 2 DEBUG nova.virt.hardware [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Got 1 possible topologies _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:501
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.167 2 DEBUG nova.virt.hardware [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Possible topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:575
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.167 2 DEBUG nova.virt.hardware [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Sorted desired topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:577
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.171 2 DEBUG oslo_concurrency.processutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:39:50 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/479148849' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.675 2 DEBUG oslo_concurrency.processutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.504s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.721 2 DEBUG nova.storage.rbd_utils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] rbd image 5279e85f-e35b-4ddd-8336-7f483712f743_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:50 compute-0 nova_compute[356901]: 2025-10-11 02:39:50.732 2 DEBUG oslo_concurrency.processutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1864: 321 pgs: 321 active+clean; 258 MiB data, 363 MiB used, 60 GiB / 60 GiB avail; 3.0 MiB/s rd, 1.8 MiB/s wr, 176 op/s
Oct 11 02:39:50 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/479148849' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:50 compute-0 ceph-mon[191930]: pgmap v1864: 321 pgs: 321 active+clean; 258 MiB data, 363 MiB used, 60 GiB / 60 GiB avail; 3.0 MiB/s rd, 1.8 MiB/s wr, 176 op/s
Oct 11 02:39:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:39:51 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1677516336' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.269 2 DEBUG oslo_concurrency.processutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.537s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.273 2 DEBUG nova.virt.libvirt.vif [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='True',created_at=2025-10-11T02:39:42Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description='tempest-TestServerBasicOps-server-1008904616',display_name='tempest-TestServerBasicOps-server-1008904616',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-testserverbasicops-server-1008904616',id=10,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBOjuzQ3LnonURCqkkZftPTP2CVQZ094fsP/gjb0J12Q/oIXYRgm8hjl34CsIRDDXbEoycnqFNSnY3e5ccNwSkdNNg8FJr2lU+/xGbcHEKbP5d2E6j/rpPqS548dzP5mkuw==',key_name='tempest-TestServerBasicOps-125864562',keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={meta1='data1',meta2='data2',metaN='dataN'},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='4baea94e1c7d43e699eaac33512a8105',ramdisk_id='',reservation_id='r-u1w43ywp',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_hw_rng_model='virtio',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-TestServerBasicOps-633159161',owner_user_name='tempest-TestServerBasicOps-633159161-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:39:44Z,user_data='IyEvYmluL3NoCmVjaG8gIlByaW50aW5nIGNpcnJvcyB1c2VyIGF1dGhvcml6ZWQga2V5cyIKY2F0IH5jaXJyb3MvLnNzaC9hdXRob3JpemVkX2tleXMgfHwgdHJ1ZQo=',user_id='7c06b99eac5242ddb9501f51d87567d2',uuid=5279e85f-e35b-4ddd-8336-7f483712f743,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "address": "fa:16:3e:1e:a3:c2", "network": {"id": "42802124-ba47-4b6e-aa91-ecf257e5a54c", "bridge": "br-int", "label": "tempest-TestServerBasicOps-246725932-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.11", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "4baea94e1c7d43e699eaac33512a8105", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd690bf02-80", "ovs_interfaceid": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} virt_type=kvm get_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:563
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.274 2 DEBUG nova.network.os_vif_util [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Converting VIF {"id": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "address": "fa:16:3e:1e:a3:c2", "network": {"id": "42802124-ba47-4b6e-aa91-ecf257e5a54c", "bridge": "br-int", "label": "tempest-TestServerBasicOps-246725932-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.11", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "4baea94e1c7d43e699eaac33512a8105", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd690bf02-80", "ovs_interfaceid": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.277 2 DEBUG nova.network.os_vif_util [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:1e:a3:c2,bridge_name='br-int',has_traffic_filtering=True,id=d690bf02-80b8-4bb8-808f-ccc93f22c545,network=Network(42802124-ba47-4b6e-aa91-ecf257e5a54c),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd690bf02-80') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.281 2 DEBUG nova.objects.instance [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Lazy-loading 'pci_devices' on Instance uuid 5279e85f-e35b-4ddd-8336-7f483712f743 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.312 2 DEBUG nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] End _get_guest_xml xml=<domain type="kvm">
Oct 11 02:39:51 compute-0 nova_compute[356901]:   <uuid>5279e85f-e35b-4ddd-8336-7f483712f743</uuid>
Oct 11 02:39:51 compute-0 nova_compute[356901]:   <name>instance-0000000a</name>
Oct 11 02:39:51 compute-0 nova_compute[356901]:   <memory>131072</memory>
Oct 11 02:39:51 compute-0 nova_compute[356901]:   <vcpu>1</vcpu>
Oct 11 02:39:51 compute-0 nova_compute[356901]:   <metadata>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.1">
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <nova:package version="27.5.2-0.20250829104910.6f8decf.el9"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <nova:name>tempest-TestServerBasicOps-server-1008904616</nova:name>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <nova:creationTime>2025-10-11 02:39:50</nova:creationTime>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <nova:flavor name="m1.nano">
Oct 11 02:39:51 compute-0 nova_compute[356901]:         <nova:memory>128</nova:memory>
Oct 11 02:39:51 compute-0 nova_compute[356901]:         <nova:disk>1</nova:disk>
Oct 11 02:39:51 compute-0 nova_compute[356901]:         <nova:swap>0</nova:swap>
Oct 11 02:39:51 compute-0 nova_compute[356901]:         <nova:ephemeral>0</nova:ephemeral>
Oct 11 02:39:51 compute-0 nova_compute[356901]:         <nova:vcpus>1</nova:vcpus>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       </nova:flavor>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <nova:owner>
Oct 11 02:39:51 compute-0 nova_compute[356901]:         <nova:user uuid="7c06b99eac5242ddb9501f51d87567d2">tempest-TestServerBasicOps-633159161-project-member</nova:user>
Oct 11 02:39:51 compute-0 nova_compute[356901]:         <nova:project uuid="4baea94e1c7d43e699eaac33512a8105">tempest-TestServerBasicOps-633159161</nova:project>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       </nova:owner>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <nova:root type="image" uuid="72f37f2e-4296-450e-9a12-10717f4ac7dc"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <nova:ports>
Oct 11 02:39:51 compute-0 nova_compute[356901]:         <nova:port uuid="d690bf02-80b8-4bb8-808f-ccc93f22c545">
Oct 11 02:39:51 compute-0 nova_compute[356901]:           <nova:ip type="fixed" address="10.100.0.11" ipVersion="4"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:         </nova:port>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       </nova:ports>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     </nova:instance>
Oct 11 02:39:51 compute-0 nova_compute[356901]:   </metadata>
Oct 11 02:39:51 compute-0 nova_compute[356901]:   <sysinfo type="smbios">
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <system>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <entry name="manufacturer">RDO</entry>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <entry name="product">OpenStack Compute</entry>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <entry name="version">27.5.2-0.20250829104910.6f8decf.el9</entry>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <entry name="serial">5279e85f-e35b-4ddd-8336-7f483712f743</entry>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <entry name="uuid">5279e85f-e35b-4ddd-8336-7f483712f743</entry>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <entry name="family">Virtual Machine</entry>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     </system>
Oct 11 02:39:51 compute-0 nova_compute[356901]:   </sysinfo>
Oct 11 02:39:51 compute-0 nova_compute[356901]:   <os>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <type arch="x86_64" machine="q35">hvm</type>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <boot dev="hd"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <smbios mode="sysinfo"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:   </os>
Oct 11 02:39:51 compute-0 nova_compute[356901]:   <features>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <acpi/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <apic/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <vmcoreinfo/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:   </features>
Oct 11 02:39:51 compute-0 nova_compute[356901]:   <clock offset="utc">
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <timer name="pit" tickpolicy="delay"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <timer name="rtc" tickpolicy="catchup"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <timer name="hpet" present="no"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:   </clock>
Oct 11 02:39:51 compute-0 nova_compute[356901]:   <cpu mode="host-model" match="exact">
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <topology sockets="1" cores="1" threads="1"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:39:51 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/5279e85f-e35b-4ddd-8336-7f483712f743_disk">
Oct 11 02:39:51 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       </source>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:39:51 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <target dev="vda" bus="virtio"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <disk type="network" device="cdrom">
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/5279e85f-e35b-4ddd-8336-7f483712f743_disk.config">
Oct 11 02:39:51 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       </source>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:39:51 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <target dev="sda" bus="sata"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <interface type="ethernet">
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <mac address="fa:16:3e:1e:a3:c2"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <driver name="vhost" rx_queue_size="512"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <mtu size="1442"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <target dev="tapd690bf02-80"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     </interface>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <serial type="pty">
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <log file="/var/lib/nova/instances/5279e85f-e35b-4ddd-8336-7f483712f743/console.log" append="off"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     </serial>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <graphics type="vnc" autoport="yes" listen="::0"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <video>
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     </video>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <input type="tablet" bus="usb"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <rng model="virtio">
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <backend model="random">/dev/urandom</backend>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <controller type="usb" index="0"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     <memballoon model="virtio">
Oct 11 02:39:51 compute-0 nova_compute[356901]:       <stats period="10"/>
Oct 11 02:39:51 compute-0 nova_compute[356901]:     </memballoon>
Oct 11 02:39:51 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:39:51 compute-0 nova_compute[356901]: </domain>
Oct 11 02:39:51 compute-0 nova_compute[356901]:  _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7555
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.314 2 DEBUG nova.compute.manager [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Preparing to wait for external event network-vif-plugged-d690bf02-80b8-4bb8-808f-ccc93f22c545 prepare_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:283
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.315 2 DEBUG oslo_concurrency.lockutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Acquiring lock "5279e85f-e35b-4ddd-8336-7f483712f743-events" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.316 2 DEBUG oslo_concurrency.lockutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Lock "5279e85f-e35b-4ddd-8336-7f483712f743-events" acquired by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.317 2 DEBUG oslo_concurrency.lockutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Lock "5279e85f-e35b-4ddd-8336-7f483712f743-events" "released" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.318 2 DEBUG nova.virt.libvirt.vif [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='True',created_at=2025-10-11T02:39:42Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description='tempest-TestServerBasicOps-server-1008904616',display_name='tempest-TestServerBasicOps-server-1008904616',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-testserverbasicops-server-1008904616',id=10,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBOjuzQ3LnonURCqkkZftPTP2CVQZ094fsP/gjb0J12Q/oIXYRgm8hjl34CsIRDDXbEoycnqFNSnY3e5ccNwSkdNNg8FJr2lU+/xGbcHEKbP5d2E6j/rpPqS548dzP5mkuw==',key_name='tempest-TestServerBasicOps-125864562',keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={meta1='data1',meta2='data2',metaN='dataN'},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=PciDeviceList,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='4baea94e1c7d43e699eaac33512a8105',ramdisk_id='',reservation_id='r-u1w43ywp',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_hw_rng_model='virtio',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-TestServerBasicOps-633159161',owner_user_name='tempest-TestServerBasicOps-633159161-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:39:44Z,user_data='IyEvYmluL3NoCmVjaG8gIlByaW50aW5nIGNpcnJvcyB1c2VyIGF1dGhvcml6ZWQga2V5cyIKY2F0IH5jaXJyb3MvLnNzaC9hdXRob3JpemVkX2tleXMgfHwgdHJ1ZQo=',user_id='7c06b99eac5242ddb9501f51d87567d2',uuid=5279e85f-e35b-4ddd-8336-7f483712f743,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "address": "fa:16:3e:1e:a3:c2", "network": {"id": "42802124-ba47-4b6e-aa91-ecf257e5a54c", "bridge": "br-int", "label": "tempest-TestServerBasicOps-246725932-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.11", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "4baea94e1c7d43e699eaac33512a8105", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd690bf02-80", "ovs_interfaceid": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} plug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:710
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.318 2 DEBUG nova.network.os_vif_util [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Converting VIF {"id": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "address": "fa:16:3e:1e:a3:c2", "network": {"id": "42802124-ba47-4b6e-aa91-ecf257e5a54c", "bridge": "br-int", "label": "tempest-TestServerBasicOps-246725932-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.11", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "4baea94e1c7d43e699eaac33512a8105", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd690bf02-80", "ovs_interfaceid": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.319 2 DEBUG nova.network.os_vif_util [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:1e:a3:c2,bridge_name='br-int',has_traffic_filtering=True,id=d690bf02-80b8-4bb8-808f-ccc93f22c545,network=Network(42802124-ba47-4b6e-aa91-ecf257e5a54c),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd690bf02-80') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.321 2 DEBUG os_vif [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Plugging vif VIFOpenVSwitch(active=False,address=fa:16:3e:1e:a3:c2,bridge_name='br-int',has_traffic_filtering=True,id=d690bf02-80b8-4bb8-808f-ccc93f22c545,network=Network(42802124-ba47-4b6e-aa91-ecf257e5a54c),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd690bf02-80') plug /usr/lib/python3.9/site-packages/os_vif/__init__.py:76
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.322 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.322 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddBridgeCommand(_result=None, name=br-int, may_exist=True, datapath_type=system) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.323 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.328 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.328 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapd690bf02-80, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.329 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbSetCommand(_result=None, table=Interface, record=tapd690bf02-80, col_values=(('external_ids', {'iface-id': 'd690bf02-80b8-4bb8-808f-ccc93f22c545', 'iface-status': 'active', 'attached-mac': 'fa:16:3e:1e:a3:c2', 'vm-uuid': '5279e85f-e35b-4ddd-8336-7f483712f743'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:51 compute-0 NetworkManager[44908]: <info>  [1760150391.3332] manager: (tapd690bf02-80): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/54)
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.332 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.338 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.341 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.342 2 INFO os_vif [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Successfully plugged vif VIFOpenVSwitch(active=False,address=fa:16:3e:1e:a3:c2,bridge_name='br-int',has_traffic_filtering=True,id=d690bf02-80b8-4bb8-808f-ccc93f22c545,network=Network(42802124-ba47-4b6e-aa91-ecf257e5a54c),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd690bf02-80')
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.441 2 DEBUG nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] No BDM found with device name vda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.442 2 DEBUG nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] No BDM found with device name sda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.443 2 DEBUG nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] No VIF found with MAC fa:16:3e:1e:a3:c2, not building metadata _build_interface_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12092
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.444 2 INFO nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Using config drive
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.508 2 DEBUG nova.storage.rbd_utils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] rbd image 5279e85f-e35b-4ddd-8336-7f483712f743_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:51 compute-0 ovn_controller[88370]: 2025-10-11T02:39:51Z|00102|binding|INFO|Releasing lport 3233307f-6a7e-4ff6-b881-6d68b60996c3 from this chassis (sb_readonly=0)
Oct 11 02:39:51 compute-0 ovn_controller[88370]: 2025-10-11T02:39:51Z|00103|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:39:51 compute-0 ovn_controller[88370]: 2025-10-11T02:39:51Z|00104|binding|INFO|Releasing lport aa37c6ed-d2db-4ed4-b1c9-cfd071cfd96a from this chassis (sb_readonly=0)
Oct 11 02:39:51 compute-0 nova_compute[356901]: 2025-10-11 02:39:51.703 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:51 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1677516336' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:39:52 compute-0 nova_compute[356901]: 2025-10-11 02:39:52.626 2 DEBUG nova.network.neutron [req-ebfb143c-0241-4f11-959d-debe2c2aa8a8 req-4554faf0-a284-4417-911b-e2cb7cf08928 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Updated VIF entry in instance network info cache for port d690bf02-80b8-4bb8-808f-ccc93f22c545. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:39:52 compute-0 nova_compute[356901]: 2025-10-11 02:39:52.628 2 DEBUG nova.network.neutron [req-ebfb143c-0241-4f11-959d-debe2c2aa8a8 req-4554faf0-a284-4417-911b-e2cb7cf08928 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Updating instance_info_cache with network_info: [{"id": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "address": "fa:16:3e:1e:a3:c2", "network": {"id": "42802124-ba47-4b6e-aa91-ecf257e5a54c", "bridge": "br-int", "label": "tempest-TestServerBasicOps-246725932-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.11", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "4baea94e1c7d43e699eaac33512a8105", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd690bf02-80", "ovs_interfaceid": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:39:52 compute-0 nova_compute[356901]: 2025-10-11 02:39:52.652 2 DEBUG oslo_concurrency.lockutils [req-ebfb143c-0241-4f11-959d-debe2c2aa8a8 req-4554faf0-a284-4417-911b-e2cb7cf08928 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-5279e85f-e35b-4ddd-8336-7f483712f743" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:39:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1865: 321 pgs: 321 active+clean; 258 MiB data, 368 MiB used, 60 GiB / 60 GiB avail; 2.7 MiB/s rd, 1.8 MiB/s wr, 165 op/s
Oct 11 02:39:52 compute-0 ceph-mon[191930]: pgmap v1865: 321 pgs: 321 active+clean; 258 MiB data, 368 MiB used, 60 GiB / 60 GiB avail; 2.7 MiB/s rd, 1.8 MiB/s wr, 165 op/s
Oct 11 02:39:53 compute-0 nova_compute[356901]: 2025-10-11 02:39:53.009 2 INFO nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Creating config drive at /var/lib/nova/instances/5279e85f-e35b-4ddd-8336-7f483712f743/disk.config
Oct 11 02:39:53 compute-0 nova_compute[356901]: 2025-10-11 02:39:53.022 2 DEBUG oslo_concurrency.processutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Running cmd (subprocess): /usr/bin/mkisofs -o /var/lib/nova/instances/5279e85f-e35b-4ddd-8336-7f483712f743/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmp389br7v5 execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:53 compute-0 nova_compute[356901]: 2025-10-11 02:39:53.177 2 DEBUG oslo_concurrency.processutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] CMD "/usr/bin/mkisofs -o /var/lib/nova/instances/5279e85f-e35b-4ddd-8336-7f483712f743/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmp389br7v5" returned: 0 in 0.155s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:53 compute-0 nova_compute[356901]: 2025-10-11 02:39:53.237 2 DEBUG nova.storage.rbd_utils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] rbd image 5279e85f-e35b-4ddd-8336-7f483712f743_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:39:53 compute-0 nova_compute[356901]: 2025-10-11 02:39:53.251 2 DEBUG oslo_concurrency.processutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/5279e85f-e35b-4ddd-8336-7f483712f743/disk.config 5279e85f-e35b-4ddd-8336-7f483712f743_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:39:53 compute-0 nova_compute[356901]: 2025-10-11 02:39:53.576 2 DEBUG oslo_concurrency.processutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/5279e85f-e35b-4ddd-8336-7f483712f743/disk.config 5279e85f-e35b-4ddd-8336-7f483712f743_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.325s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:39:53 compute-0 nova_compute[356901]: 2025-10-11 02:39:53.579 2 INFO nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Deleting local config drive /var/lib/nova/instances/5279e85f-e35b-4ddd-8336-7f483712f743/disk.config because it was imported into RBD.
Oct 11 02:39:53 compute-0 kernel: tapd690bf02-80: entered promiscuous mode
Oct 11 02:39:53 compute-0 ovn_controller[88370]: 2025-10-11T02:39:53Z|00105|binding|INFO|Claiming lport d690bf02-80b8-4bb8-808f-ccc93f22c545 for this chassis.
Oct 11 02:39:53 compute-0 nova_compute[356901]: 2025-10-11 02:39:53.693 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:53 compute-0 NetworkManager[44908]: <info>  [1760150393.7002] manager: (tapd690bf02-80): new Tun device (/org/freedesktop/NetworkManager/Devices/55)
Oct 11 02:39:53 compute-0 ovn_controller[88370]: 2025-10-11T02:39:53Z|00106|binding|INFO|d690bf02-80b8-4bb8-808f-ccc93f22c545: Claiming fa:16:3e:1e:a3:c2 10.100.0.11
Oct 11 02:39:53 compute-0 ovn_controller[88370]: 2025-10-11T02:39:53Z|00107|binding|INFO|Setting lport d690bf02-80b8-4bb8-808f-ccc93f22c545 ovn-installed in OVS
Oct 11 02:39:53 compute-0 nova_compute[356901]: 2025-10-11 02:39:53.730 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:53 compute-0 systemd-udevd[453981]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:39:53 compute-0 systemd-machined[137586]: New machine qemu-10-instance-0000000a.
Oct 11 02:39:53 compute-0 NetworkManager[44908]: <info>  [1760150393.7880] device (tapd690bf02-80): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 02:39:53 compute-0 NetworkManager[44908]: <info>  [1760150393.7921] device (tapd690bf02-80): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Oct 11 02:39:53 compute-0 systemd[1]: Started Virtual Machine qemu-10-instance-0000000a.
Oct 11 02:39:53 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:53.805 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:1e:a3:c2 10.100.0.11'], port_security=['fa:16:3e:1e:a3:c2 10.100.0.11'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.0.11/28', 'neutron:device_id': '5279e85f-e35b-4ddd-8336-7f483712f743', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-42802124-ba47-4b6e-aa91-ecf257e5a54c', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': '4baea94e1c7d43e699eaac33512a8105', 'neutron:revision_number': '2', 'neutron:security_group_ids': '296fb7dc-64e4-4a98-a3da-17dca5d61e60 e089678a-fa1c-4efb-93a6-d0762a85e1fe', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=691c8fe6-f576-4439-8b00-8821d031fd8a, chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], tunnel_key=3, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=d690bf02-80b8-4bb8-808f-ccc93f22c545) old=Port_Binding(chassis=[]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:39:53 compute-0 ovn_controller[88370]: 2025-10-11T02:39:53Z|00108|binding|INFO|Setting lport d690bf02-80b8-4bb8-808f-ccc93f22c545 up in Southbound
Oct 11 02:39:53 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:53.806 286362 INFO neutron.agent.ovn.metadata.agent [-] Port d690bf02-80b8-4bb8-808f-ccc93f22c545 in datapath 42802124-ba47-4b6e-aa91-ecf257e5a54c bound to our chassis
Oct 11 02:39:53 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:53.809 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network 42802124-ba47-4b6e-aa91-ecf257e5a54c
Oct 11 02:39:53 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:53.824 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[82df396e-65aa-4c9a-a741-065c0bc18f22]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:53 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:53.829 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Creating VETH tap42802124-b1 in ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c namespace provision_datapath /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:665
Oct 11 02:39:53 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:53.831 422955 DEBUG neutron.privileged.agent.linux.ip_lib [-] Interface tap42802124-b0 not found in namespace None get_link_id /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:204
Oct 11 02:39:53 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:53.831 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[69ee1c98-cd47-4588-8f34-84a935c7f431]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:53 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:53.834 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[7b4c637d-63e9-4942-8e8b-39ece5b74841]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:53 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:53.852 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[d5e6ebfd-9037-4a7a-8ee6-31ca499d1885]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:53 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:53.884 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[06453e2f-0722-4638-a93f-390dd65fc4a4]: (4, ('net.ipv4.conf.all.promote_secondaries = 1\n', '', 0)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:53 compute-0 podman[453958]: 2025-10-11 02:39:53.898131282 +0000 UTC m=+0.146766298 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, tcib_managed=true, org.label-schema.build-date=20251007, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, config_id=edpm, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:39:53 compute-0 podman[453956]: 2025-10-11 02:39:53.90140094 +0000 UTC m=+0.154626068 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:39:53 compute-0 podman[453959]: 2025-10-11 02:39:53.913393048 +0000 UTC m=+0.153501492 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:39:53 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:53.921 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[a864f98a-c207-4f25-b9f6-137ce1a50ac7]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:53 compute-0 NetworkManager[44908]: <info>  [1760150393.9479] manager: (tap42802124-b0): new Veth device (/org/freedesktop/NetworkManager/Devices/56)
Oct 11 02:39:53 compute-0 systemd-udevd[453990]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:39:53 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:53.948 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[e56483cf-3fc6-4c39-bee4-2c04da93183d]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:53 compute-0 podman[453957]: 2025-10-11 02:39:53.967156696 +0000 UTC m=+0.217715913 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_managed=true, config_id=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, managed_by=edpm_ansible)
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:54.000 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[c0b2da70-0c6d-4e4e-9395-daf77525b31a]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:54.004 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[c7ea940b-eec4-4b70-80c0-4336368177c5]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:54 compute-0 NetworkManager[44908]: <info>  [1760150394.0320] device (tap42802124-b0): carrier: link connected
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:54.041 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[a7950464-7e85-498f-9fb9-e85211d52a9e]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:54.062 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[cebd7152-1880-41fb-94ae-71ee689a40ba]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tap42802124-b1'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:13:b9:1d'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 33], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 674175, 'reachable_time': 20915, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 1, 'outoctets': 76, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 1, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 76, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 1, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 454075, 'error': None, 'target': 'ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:54.098 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[c8838100-20bc-4f8e-9bc6-d81720b894d1]: (4, ({'family': 10, 'prefixlen': 64, 'flags': 192, 'scope': 253, 'index': 2, 'attrs': [['IFA_ADDRESS', 'fe80::f816:3eff:fe13:b91d'], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 674175, 'tstamp': 674175}], ['IFA_FLAGS', 192]], 'header': {'length': 72, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 454076, 'error': None, 'target': 'ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'},)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:54.125 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[90dca6a9-7014-4ab7-afb9-22bbe0e18593]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tap42802124-b1'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:13:b9:1d'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 33], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 674175, 'reachable_time': 20915, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 1, 'outoctets': 76, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 1, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 76, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 1, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 0, 'sequence_number': 255, 'pid': 454077, 'error': None, 'target': 'ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:54.183 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[e26fa883-d6fd-4142-919f-d7570b19297c]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:54.251 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[bebdfdc5-094a-4c89-ab4b-93959cb9f973]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:54.252 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tap42802124-b0, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:54.253 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:39:54 compute-0 ovn_controller[88370]: 2025-10-11T02:39:54Z|00109|binding|INFO|Releasing lport 3233307f-6a7e-4ff6-b881-6d68b60996c3 from this chassis (sb_readonly=0)
Oct 11 02:39:54 compute-0 ovn_controller[88370]: 2025-10-11T02:39:54Z|00110|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:39:54 compute-0 ovn_controller[88370]: 2025-10-11T02:39:54Z|00111|binding|INFO|Releasing lport aa37c6ed-d2db-4ed4-b1c9-cfd071cfd96a from this chassis (sb_readonly=0)
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:54.253 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tap42802124-b0, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:54 compute-0 NetworkManager[44908]: <info>  [1760150394.2570] manager: (tap42802124-b0): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/57)
Oct 11 02:39:54 compute-0 kernel: tap42802124-b0: entered promiscuous mode
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:54.260 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tap42802124-b0, col_values=(('external_ids', {'iface-id': '896fe5e8-8895-492a-9e5f-23d2477d5716'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:54.265 286362 DEBUG neutron.agent.linux.utils [-] Unable to access /var/lib/neutron/external/pids/42802124-ba47-4b6e-aa91-ecf257e5a54c.pid.haproxy; Error: [Errno 2] No such file or directory: '/var/lib/neutron/external/pids/42802124-ba47-4b6e-aa91-ecf257e5a54c.pid.haproxy' get_value_from_file /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:252
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:54.266 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[133b01cf-7823-4794-9f74-e130791f1997]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:54.267 286362 DEBUG neutron.agent.ovn.metadata.driver [-] haproxy_cfg = 
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: global
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     log         /dev/log local0 debug
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     log-tag     haproxy-metadata-proxy-42802124-ba47-4b6e-aa91-ecf257e5a54c
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     user        root
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     group       root
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     maxconn     1024
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     pidfile     /var/lib/neutron/external/pids/42802124-ba47-4b6e-aa91-ecf257e5a54c.pid.haproxy
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     daemon
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: defaults
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     log global
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     mode http
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     option httplog
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     option dontlognull
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     option http-server-close
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     option forwardfor
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     retries                 3
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     timeout http-request    30s
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     timeout connect         30s
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     timeout client          32s
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     timeout server          32s
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     timeout http-keep-alive 30s
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: listen listener
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     bind 169.254.169.254:80
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     server metadata /var/lib/neutron/metadata_proxy
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:     http-request add-header X-OVN-Network-ID 42802124-ba47-4b6e-aa91-ecf257e5a54c
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]:  create_config_file /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/driver.py:107
Oct 11 02:39:54 compute-0 nova_compute[356901]: 2025-10-11 02:39:54.267 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:54.267 286362 DEBUG neutron.agent.linux.utils [-] Running command: ['sudo', 'neutron-rootwrap', '/etc/neutron/rootwrap.conf', 'ip', 'netns', 'exec', 'ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c', 'env', 'PROCESS_TAG=haproxy-42802124-ba47-4b6e-aa91-ecf257e5a54c', 'haproxy', '-f', '/var/lib/neutron/ovn-metadata-proxy/42802124-ba47-4b6e-aa91-ecf257e5a54c.conf'] create_process /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:84
Oct 11 02:39:54 compute-0 ovn_controller[88370]: 2025-10-11T02:39:54Z|00112|binding|INFO|Releasing lport 896fe5e8-8895-492a-9e5f-23d2477d5716 from this chassis (sb_readonly=0)
Oct 11 02:39:54 compute-0 nova_compute[356901]: 2025-10-11 02:39:54.309 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:54 compute-0 nova_compute[356901]: 2025-10-11 02:39:54.319 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:54 compute-0 nova_compute[356901]: 2025-10-11 02:39:54.504 2 DEBUG nova.compute.manager [req-68c33765-33b2-43eb-abf5-6e06273cc965 req-1e301989-9047-4f34-b340-46741d024c3c 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Received event network-vif-plugged-d690bf02-80b8-4bb8-808f-ccc93f22c545 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:54 compute-0 nova_compute[356901]: 2025-10-11 02:39:54.505 2 DEBUG oslo_concurrency.lockutils [req-68c33765-33b2-43eb-abf5-6e06273cc965 req-1e301989-9047-4f34-b340-46741d024c3c 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "5279e85f-e35b-4ddd-8336-7f483712f743-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:54 compute-0 nova_compute[356901]: 2025-10-11 02:39:54.506 2 DEBUG oslo_concurrency.lockutils [req-68c33765-33b2-43eb-abf5-6e06273cc965 req-1e301989-9047-4f34-b340-46741d024c3c 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "5279e85f-e35b-4ddd-8336-7f483712f743-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:54 compute-0 nova_compute[356901]: 2025-10-11 02:39:54.507 2 DEBUG oslo_concurrency.lockutils [req-68c33765-33b2-43eb-abf5-6e06273cc965 req-1e301989-9047-4f34-b340-46741d024c3c 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "5279e85f-e35b-4ddd-8336-7f483712f743-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:54 compute-0 nova_compute[356901]: 2025-10-11 02:39:54.507 2 DEBUG nova.compute.manager [req-68c33765-33b2-43eb-abf5-6e06273cc965 req-1e301989-9047-4f34-b340-46741d024c3c 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Processing event network-vif-plugged-d690bf02-80b8-4bb8-808f-ccc93f22c545 _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10808
Oct 11 02:39:54 compute-0 podman[454151]: 2025-10-11 02:39:54.739077165 +0000 UTC m=+0.085187925 container create 4286e5f3292bbeb655ac3c1bc515f36eda23a9dac6671a880e42a0335a35c9f3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.build-date=20251009)
Oct 11 02:39:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1866: 321 pgs: 321 active+clean; 258 MiB data, 368 MiB used, 60 GiB / 60 GiB avail; 57 KiB/s rd, 1.8 MiB/s wr, 69 op/s
Oct 11 02:39:54 compute-0 podman[454151]: 2025-10-11 02:39:54.692706182 +0000 UTC m=+0.038816972 image pull 1061e4fafe13e0b9aa1ef2c904ba4ad70c44f3e87b1d831f16c6db34937f4022 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Oct 11 02:39:54 compute-0 systemd[1]: Started libpod-conmon-4286e5f3292bbeb655ac3c1bc515f36eda23a9dac6671a880e42a0335a35c9f3.scope.
Oct 11 02:39:54 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:39:54 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/356334884cd4355d05eb94a623c582f2a6197aa2e36e3e9a4b41331a632e46e6/merged/var/lib/neutron supports timestamps until 2038 (0x7fffffff)
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:54.867 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:54.868 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:39:54.870 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:54 compute-0 podman[454151]: 2025-10-11 02:39:54.870852442 +0000 UTC m=+0.216963242 container init 4286e5f3292bbeb655ac3c1bc515f36eda23a9dac6671a880e42a0335a35c9f3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:39:54 compute-0 podman[454151]: 2025-10-11 02:39:54.881298664 +0000 UTC m=+0.227409424 container start 4286e5f3292bbeb655ac3c1bc515f36eda23a9dac6671a880e42a0335a35c9f3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3)
Oct 11 02:39:54 compute-0 neutron-haproxy-ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c[454166]: [NOTICE]   (454170) : New worker (454172) forked
Oct 11 02:39:54 compute-0 neutron-haproxy-ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c[454166]: [NOTICE]   (454170) : Loading success.
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.025 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.304 2 DEBUG nova.compute.manager [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Instance event wait completed in 0 seconds for network-vif-plugged wait_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:577
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.306 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150395.303554, 5279e85f-e35b-4ddd-8336-7f483712f743 => Started> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.307 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] VM Started (Lifecycle Event)
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.318 2 DEBUG nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Guest created on hypervisor spawn /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4417
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.324 2 INFO nova.virt.libvirt.driver [-] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Instance spawned successfully.
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.325 2 DEBUG nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Attempting to register defaults for the following image properties: ['hw_cdrom_bus', 'hw_disk_bus', 'hw_input_bus', 'hw_pointer_model', 'hw_video_model', 'hw_vif_model'] _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:917
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.351 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.367 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Synchronizing instance power state after lifecycle event "Started"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.374 2 DEBUG nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Found default for hw_cdrom_bus of sata _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.375 2 DEBUG nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Found default for hw_disk_bus of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.376 2 DEBUG nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Found default for hw_input_bus of usb _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.377 2 DEBUG nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Found default for hw_pointer_model of usbtablet _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.378 2 DEBUG nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Found default for hw_video_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.379 2 DEBUG nova.virt.libvirt.driver [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Found default for hw_vif_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.428 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.429 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150395.3036616, 5279e85f-e35b-4ddd-8336-7f483712f743 => Paused> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.430 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] VM Paused (Lifecycle Event)
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.469 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.477 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150395.3138103, 5279e85f-e35b-4ddd-8336-7f483712f743 => Resumed> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.478 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] VM Resumed (Lifecycle Event)
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.496 2 INFO nova.compute.manager [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Took 10.92 seconds to spawn the instance on the hypervisor.
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.497 2 DEBUG nova.compute.manager [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.511 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.517 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Synchronizing instance power state after lifecycle event "Resumed"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.540 2 DEBUG nova.virt.driver [-] Emitting event <LifecycleEvent: 1760150380.5394692, ee9601c7-f562-449e-9f5c-5e1355f3c130 => Stopped> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.540 2 INFO nova.compute.manager [-] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] VM Stopped (Lifecycle Event)
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.560 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.570 2 DEBUG nova.compute.manager [None req-a2c1ad8b-cd83-4ea9-b2f3-f54bc7d5af28 - - - - - -] [instance: ee9601c7-f562-449e-9f5c-5e1355f3c130] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.585 2 INFO nova.compute.manager [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Took 12.11 seconds to build instance.
Oct 11 02:39:55 compute-0 nova_compute[356901]: 2025-10-11 02:39:55.620 2 DEBUG oslo_concurrency.lockutils [None req-4d5721f6-7f68-4825-9d2e-b9182a46f33a 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Lock "5279e85f-e35b-4ddd-8336-7f483712f743" "released" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: held 12.233s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:55 compute-0 ceph-mon[191930]: pgmap v1866: 321 pgs: 321 active+clean; 258 MiB data, 368 MiB used, 60 GiB / 60 GiB avail; 57 KiB/s rd, 1.8 MiB/s wr, 69 op/s
Oct 11 02:39:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:39:56 compute-0 nova_compute[356901]: 2025-10-11 02:39:56.334 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:39:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:39:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:39:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:39:56
Oct 11 02:39:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:39:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:39:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.meta', 'images', 'cephfs.cephfs.data', 'backups', 'cephfs.cephfs.meta', 'vms', 'volumes', 'default.rgw.log', 'default.rgw.control', '.mgr', '.rgw.root']
Oct 11 02:39:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:39:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:39:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:39:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:39:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:39:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1867: 321 pgs: 321 active+clean; 258 MiB data, 368 MiB used, 60 GiB / 60 GiB avail; 34 KiB/s rd, 1.8 MiB/s wr, 52 op/s
Oct 11 02:39:56 compute-0 nova_compute[356901]: 2025-10-11 02:39:56.792 2 DEBUG nova.compute.manager [req-596d2aee-6d91-4a3d-80b7-3baba9f6e22f req-6a9a80c2-f1d9-4f15-8df4-00fa2906b2a5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Received event network-vif-plugged-d690bf02-80b8-4bb8-808f-ccc93f22c545 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:39:56 compute-0 nova_compute[356901]: 2025-10-11 02:39:56.793 2 DEBUG oslo_concurrency.lockutils [req-596d2aee-6d91-4a3d-80b7-3baba9f6e22f req-6a9a80c2-f1d9-4f15-8df4-00fa2906b2a5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "5279e85f-e35b-4ddd-8336-7f483712f743-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:39:56 compute-0 nova_compute[356901]: 2025-10-11 02:39:56.793 2 DEBUG oslo_concurrency.lockutils [req-596d2aee-6d91-4a3d-80b7-3baba9f6e22f req-6a9a80c2-f1d9-4f15-8df4-00fa2906b2a5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "5279e85f-e35b-4ddd-8336-7f483712f743-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:39:56 compute-0 nova_compute[356901]: 2025-10-11 02:39:56.794 2 DEBUG oslo_concurrency.lockutils [req-596d2aee-6d91-4a3d-80b7-3baba9f6e22f req-6a9a80c2-f1d9-4f15-8df4-00fa2906b2a5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "5279e85f-e35b-4ddd-8336-7f483712f743-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:39:56 compute-0 nova_compute[356901]: 2025-10-11 02:39:56.794 2 DEBUG nova.compute.manager [req-596d2aee-6d91-4a3d-80b7-3baba9f6e22f req-6a9a80c2-f1d9-4f15-8df4-00fa2906b2a5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] No waiting events found dispatching network-vif-plugged-d690bf02-80b8-4bb8-808f-ccc93f22c545 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:39:56 compute-0 nova_compute[356901]: 2025-10-11 02:39:56.795 2 WARNING nova.compute.manager [req-596d2aee-6d91-4a3d-80b7-3baba9f6e22f req-6a9a80c2-f1d9-4f15-8df4-00fa2906b2a5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Received unexpected event network-vif-plugged-d690bf02-80b8-4bb8-808f-ccc93f22c545 for instance with vm_state active and task_state None.
Oct 11 02:39:56 compute-0 ceph-mon[191930]: pgmap v1867: 321 pgs: 321 active+clean; 258 MiB data, 368 MiB used, 60 GiB / 60 GiB avail; 34 KiB/s rd, 1.8 MiB/s wr, 52 op/s
Oct 11 02:39:56 compute-0 nova_compute[356901]: 2025-10-11 02:39:56.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:39:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:39:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:39:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:39:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:39:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:39:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:39:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:39:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:39:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:39:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:39:58 compute-0 nova_compute[356901]: 2025-10-11 02:39:58.320 2 DEBUG nova.virt.driver [-] Emitting event <LifecycleEvent: 1760150383.3183992, 830c7581-3555-41db-9818-0961fc151818 => Stopped> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:39:58 compute-0 nova_compute[356901]: 2025-10-11 02:39:58.325 2 INFO nova.compute.manager [-] [instance: 830c7581-3555-41db-9818-0961fc151818] VM Stopped (Lifecycle Event)
Oct 11 02:39:58 compute-0 nova_compute[356901]: 2025-10-11 02:39:58.358 2 DEBUG nova.compute.manager [None req-4f3c1d5d-0ae2-4ea9-ad24-b23b12f57779 - - - - - -] [instance: 830c7581-3555-41db-9818-0961fc151818] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:39:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1868: 321 pgs: 321 active+clean; 258 MiB data, 368 MiB used, 60 GiB / 60 GiB avail; 475 KiB/s rd, 954 KiB/s wr, 56 op/s
Oct 11 02:39:58 compute-0 ceph-mon[191930]: pgmap v1868: 321 pgs: 321 active+clean; 258 MiB data, 368 MiB used, 60 GiB / 60 GiB avail; 475 KiB/s rd, 954 KiB/s wr, 56 op/s
Oct 11 02:39:59 compute-0 podman[157119]: time="2025-10-11T02:39:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:39:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:39:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 49967 "" "Go-http-client/1.1"
Oct 11 02:39:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:39:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 10450 "" "Go-http-client/1.1"
Oct 11 02:40:00 compute-0 nova_compute[356901]: 2025-10-11 02:40:00.027 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:00 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:00.099 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: SbGlobalUpdateEvent(events=('update',), table='SB_Global', conditions=None, old_conditions=None), priority=20 to row=SB_Global(external_ids={}, nb_cfg=12, options={'arp_ns_explicit_output': 'true', 'mac_prefix': 'fe:55:97', 'max_tunid': '16711680', 'northd_internal_version': '24.03.7-20.33.0-76.8', 'svc_monitor_mac': 'ce:9c:4f:b4:85:9b'}, ipsec=False) old=SB_Global(nb_cfg=11) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:40:00 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:00.100 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Delaying updating chassis table for 6 seconds run /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:274
Oct 11 02:40:00 compute-0 nova_compute[356901]: 2025-10-11 02:40:00.109 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:00 compute-0 sudo[454182]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:40:00 compute-0 sudo[454182]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:00 compute-0 sudo[454182]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:00 compute-0 podman[454181]: 2025-10-11 02:40:00.249879685 +0000 UTC m=+0.128849942 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd)
Oct 11 02:40:00 compute-0 podman[454183]: 2025-10-11 02:40:00.251714088 +0000 UTC m=+0.115458923 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_managed=true, io.buildah.version=1.41.3, config_id=iscsid, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:40:00 compute-0 sudo[454241]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:40:00 compute-0 sudo[454241]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:00 compute-0 sudo[454241]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:00 compute-0 nova_compute[356901]: 2025-10-11 02:40:00.362 2 DEBUG nova.compute.manager [req-b776bc2d-d05d-43b8-9b4e-6c6d06c43802 req-c962eaa9-8434-4ac6-885b-acb00d0ae6de 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Received event network-changed-d690bf02-80b8-4bb8-808f-ccc93f22c545 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:40:00 compute-0 nova_compute[356901]: 2025-10-11 02:40:00.366 2 DEBUG nova.compute.manager [req-b776bc2d-d05d-43b8-9b4e-6c6d06c43802 req-c962eaa9-8434-4ac6-885b-acb00d0ae6de 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Refreshing instance network info cache due to event network-changed-d690bf02-80b8-4bb8-808f-ccc93f22c545. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:40:00 compute-0 nova_compute[356901]: 2025-10-11 02:40:00.368 2 DEBUG oslo_concurrency.lockutils [req-b776bc2d-d05d-43b8-9b4e-6c6d06c43802 req-c962eaa9-8434-4ac6-885b-acb00d0ae6de 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-5279e85f-e35b-4ddd-8336-7f483712f743" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:40:00 compute-0 nova_compute[356901]: 2025-10-11 02:40:00.369 2 DEBUG oslo_concurrency.lockutils [req-b776bc2d-d05d-43b8-9b4e-6c6d06c43802 req-c962eaa9-8434-4ac6-885b-acb00d0ae6de 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-5279e85f-e35b-4ddd-8336-7f483712f743" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:40:00 compute-0 nova_compute[356901]: 2025-10-11 02:40:00.370 2 DEBUG nova.network.neutron [req-b776bc2d-d05d-43b8-9b4e-6c6d06c43802 req-c962eaa9-8434-4ac6-885b-acb00d0ae6de 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Refreshing network info cache for port d690bf02-80b8-4bb8-808f-ccc93f22c545 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:40:00 compute-0 sudo[454267]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:40:00 compute-0 sudo[454267]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:00 compute-0 sudo[454267]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:00 compute-0 sudo[454292]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ls
Oct 11 02:40:00 compute-0 sudo[454292]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1869: 321 pgs: 321 active+clean; 258 MiB data, 368 MiB used, 60 GiB / 60 GiB avail; 779 KiB/s rd, 270 KiB/s wr, 52 op/s
Oct 11 02:40:00 compute-0 ceph-mon[191930]: pgmap v1869: 321 pgs: 321 active+clean; 258 MiB data, 368 MiB used, 60 GiB / 60 GiB avail; 779 KiB/s rd, 270 KiB/s wr, 52 op/s
Oct 11 02:40:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:40:01 compute-0 nova_compute[356901]: 2025-10-11 02:40:01.340 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:01 compute-0 podman[454387]: 2025-10-11 02:40:01.372072966 +0000 UTC m=+0.137278575 container exec ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:40:01 compute-0 openstack_network_exporter[374316]: ERROR   02:40:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:40:01 compute-0 openstack_network_exporter[374316]: ERROR   02:40:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:40:01 compute-0 openstack_network_exporter[374316]: ERROR   02:40:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:40:01 compute-0 openstack_network_exporter[374316]: ERROR   02:40:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:40:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:40:01 compute-0 openstack_network_exporter[374316]: ERROR   02:40:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:40:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:40:01 compute-0 podman[454387]: 2025-10-11 02:40:01.473079469 +0000 UTC m=+0.238285068 container exec_died ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:40:02 compute-0 sudo[454292]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:40:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:40:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:40:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1870: 321 pgs: 321 active+clean; 258 MiB data, 368 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 14 KiB/s wr, 65 op/s
Oct 11 02:40:02 compute-0 nova_compute[356901]: 2025-10-11 02:40:02.771 2 DEBUG nova.network.neutron [req-b776bc2d-d05d-43b8-9b4e-6c6d06c43802 req-c962eaa9-8434-4ac6-885b-acb00d0ae6de 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Updated VIF entry in instance network info cache for port d690bf02-80b8-4bb8-808f-ccc93f22c545. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:40:02 compute-0 nova_compute[356901]: 2025-10-11 02:40:02.773 2 DEBUG nova.network.neutron [req-b776bc2d-d05d-43b8-9b4e-6c6d06c43802 req-c962eaa9-8434-4ac6-885b-acb00d0ae6de 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Updating instance_info_cache with network_info: [{"id": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "address": "fa:16:3e:1e:a3:c2", "network": {"id": "42802124-ba47-4b6e-aa91-ecf257e5a54c", "bridge": "br-int", "label": "tempest-TestServerBasicOps-246725932-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.11", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.226", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "4baea94e1c7d43e699eaac33512a8105", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd690bf02-80", "ovs_interfaceid": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:40:02 compute-0 nova_compute[356901]: 2025-10-11 02:40:02.838 2 DEBUG oslo_concurrency.lockutils [req-b776bc2d-d05d-43b8-9b4e-6c6d06c43802 req-c962eaa9-8434-4ac6-885b-acb00d0ae6de 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-5279e85f-e35b-4ddd-8336-7f483712f743" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:40:02 compute-0 nova_compute[356901]: 2025-10-11 02:40:02.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:40:03 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:40:03 compute-0 sudo[454526]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:40:03 compute-0 sudo[454526]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:03 compute-0 sudo[454526]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:03 compute-0 sudo[454551]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:40:03 compute-0 sudo[454551]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:03 compute-0 sudo[454551]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:03 compute-0 sudo[454576]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:40:03 compute-0 sudo[454576]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:03 compute-0 sudo[454576]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:03 compute-0 sudo[454601]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:40:03 compute-0 sudo[454601]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:04 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:40:04 compute-0 ceph-mon[191930]: pgmap v1870: 321 pgs: 321 active+clean; 258 MiB data, 368 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 14 KiB/s wr, 65 op/s
Oct 11 02:40:04 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:40:04 compute-0 sudo[454601]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:40:04 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:40:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:40:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:40:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:40:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:40:04 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev ba078955-46e9-48e7-8767-36daddff58ac does not exist
Oct 11 02:40:04 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 6418c6d6-e358-4bfc-ba84-42b642228366 does not exist
Oct 11 02:40:04 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 3fb18f74-d432-45c5-8fef-7d3c32e13cfc does not exist
Oct 11 02:40:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:40:04 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:40:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:40:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:40:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:40:04 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:40:04 compute-0 sudo[454656]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:40:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1871: 321 pgs: 321 active+clean; 258 MiB data, 369 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 15 KiB/s wr, 80 op/s
Oct 11 02:40:04 compute-0 sudo[454656]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:04 compute-0 sudo[454656]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:04 compute-0 sudo[454681]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:40:04 compute-0 sudo[454681]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:04 compute-0 sudo[454681]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:04 compute-0 sudo[454706]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:40:04 compute-0 sudo[454706]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:04 compute-0 sudo[454706]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:05 compute-0 nova_compute[356901]: 2025-10-11 02:40:05.033 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:05 compute-0 sudo[454731]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:40:05 compute-0 sudo[454731]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:40:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:40:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:40:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:40:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:40:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:40:05 compute-0 ceph-mon[191930]: pgmap v1871: 321 pgs: 321 active+clean; 258 MiB data, 369 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 15 KiB/s wr, 80 op/s
Oct 11 02:40:05 compute-0 nova_compute[356901]: 2025-10-11 02:40:05.407 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:05 compute-0 podman[454792]: 2025-10-11 02:40:05.653152891 +0000 UTC m=+0.068666331 container create bb37eca845096fdada8be885a784c250d7463117286ebff0bc0e0fb812bcfc2f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_taussig, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef)
Oct 11 02:40:05 compute-0 podman[454792]: 2025-10-11 02:40:05.627715308 +0000 UTC m=+0.043228768 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:40:05 compute-0 systemd[1]: Started libpod-conmon-bb37eca845096fdada8be885a784c250d7463117286ebff0bc0e0fb812bcfc2f.scope.
Oct 11 02:40:05 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:40:05 compute-0 podman[454792]: 2025-10-11 02:40:05.82013641 +0000 UTC m=+0.235649870 container init bb37eca845096fdada8be885a784c250d7463117286ebff0bc0e0fb812bcfc2f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_taussig, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, ceph=True)
Oct 11 02:40:05 compute-0 podman[454792]: 2025-10-11 02:40:05.831414441 +0000 UTC m=+0.246927881 container start bb37eca845096fdada8be885a784c250d7463117286ebff0bc0e0fb812bcfc2f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_taussig, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 02:40:05 compute-0 podman[454792]: 2025-10-11 02:40:05.836984055 +0000 UTC m=+0.252497485 container attach bb37eca845096fdada8be885a784c250d7463117286ebff0bc0e0fb812bcfc2f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_taussig, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3)
Oct 11 02:40:05 compute-0 laughing_taussig[454808]: 167 167
Oct 11 02:40:05 compute-0 systemd[1]: libpod-bb37eca845096fdada8be885a784c250d7463117286ebff0bc0e0fb812bcfc2f.scope: Deactivated successfully.
Oct 11 02:40:05 compute-0 conmon[454808]: conmon bb37eca845096fdada8b <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-bb37eca845096fdada8be885a784c250d7463117286ebff0bc0e0fb812bcfc2f.scope/container/memory.events
Oct 11 02:40:05 compute-0 podman[454813]: 2025-10-11 02:40:05.907309881 +0000 UTC m=+0.046937116 container died bb37eca845096fdada8be885a784c250d7463117286ebff0bc0e0fb812bcfc2f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_taussig, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:40:05 compute-0 systemd[1]: var-lib-containers-storage-overlay-e1780e609923057de4dc6f7373314799fe3a14931c07ea4da458392c21a52ff9-merged.mount: Deactivated successfully.
Oct 11 02:40:05 compute-0 podman[454813]: 2025-10-11 02:40:05.980278556 +0000 UTC m=+0.119905761 container remove bb37eca845096fdada8be885a784c250d7463117286ebff0bc0e0fb812bcfc2f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=laughing_taussig, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:40:05 compute-0 systemd[1]: libpod-conmon-bb37eca845096fdada8be885a784c250d7463117286ebff0bc0e0fb812bcfc2f.scope: Deactivated successfully.
Oct 11 02:40:06 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:06.104 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '12'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:40:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:40:06 compute-0 podman[454831]: 2025-10-11 02:40:06.252997612 +0000 UTC m=+0.071416690 container create ab008921d81c779954887be9c4a34c3d7b2d8bf2b695ec8c21d92d8b6c256359 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elated_tharp, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:40:06 compute-0 podman[454831]: 2025-10-11 02:40:06.225182395 +0000 UTC m=+0.043601503 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:40:06 compute-0 systemd[1]: Started libpod-conmon-ab008921d81c779954887be9c4a34c3d7b2d8bf2b695ec8c21d92d8b6c256359.scope.
Oct 11 02:40:06 compute-0 nova_compute[356901]: 2025-10-11 02:40:06.346 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:06 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:40:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a1268ab63d76ffe6aec9ffc6bb33d80afd5ddc13b71a50470c7d19373fa0841e/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:40:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a1268ab63d76ffe6aec9ffc6bb33d80afd5ddc13b71a50470c7d19373fa0841e/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:40:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a1268ab63d76ffe6aec9ffc6bb33d80afd5ddc13b71a50470c7d19373fa0841e/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:40:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a1268ab63d76ffe6aec9ffc6bb33d80afd5ddc13b71a50470c7d19373fa0841e/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:40:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a1268ab63d76ffe6aec9ffc6bb33d80afd5ddc13b71a50470c7d19373fa0841e/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:40:06 compute-0 podman[454831]: 2025-10-11 02:40:06.402472414 +0000 UTC m=+0.220891522 container init ab008921d81c779954887be9c4a34c3d7b2d8bf2b695ec8c21d92d8b6c256359 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elated_tharp, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef)
Oct 11 02:40:06 compute-0 podman[454831]: 2025-10-11 02:40:06.42209344 +0000 UTC m=+0.240512508 container start ab008921d81c779954887be9c4a34c3d7b2d8bf2b695ec8c21d92d8b6c256359 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elated_tharp, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_REF=reef)
Oct 11 02:40:06 compute-0 podman[454831]: 2025-10-11 02:40:06.426212387 +0000 UTC m=+0.244631505 container attach ab008921d81c779954887be9c4a34c3d7b2d8bf2b695ec8c21d92d8b6c256359 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elated_tharp, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS)
Oct 11 02:40:06 compute-0 ovn_controller[88370]: 2025-10-11T02:40:06Z|00012|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:8d:b8:dd 10.100.0.4
Oct 11 02:40:06 compute-0 ovn_controller[88370]: 2025-10-11T02:40:06Z|00013|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:8d:b8:dd 10.100.0.4
Oct 11 02:40:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1872: 321 pgs: 321 active+clean; 276 MiB data, 380 MiB used, 60 GiB / 60 GiB avail; 2.0 MiB/s rd, 1.5 MiB/s wr, 100 op/s
Oct 11 02:40:06 compute-0 ceph-mon[191930]: pgmap v1872: 321 pgs: 321 active+clean; 276 MiB data, 380 MiB used, 60 GiB / 60 GiB avail; 2.0 MiB/s rd, 1.5 MiB/s wr, 100 op/s
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0018959569536693365 of space, bias 1.0, pg target 0.568787086100801 quantized to 32 (current 32)
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0009191400908380543 of space, bias 1.0, pg target 0.2757420272514163 quantized to 32 (current 32)
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:40:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:40:07 compute-0 elated_tharp[454847]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:40:07 compute-0 elated_tharp[454847]: --> relative data size: 1.0
Oct 11 02:40:07 compute-0 elated_tharp[454847]: --> All data devices are unavailable
Oct 11 02:40:07 compute-0 systemd[1]: libpod-ab008921d81c779954887be9c4a34c3d7b2d8bf2b695ec8c21d92d8b6c256359.scope: Deactivated successfully.
Oct 11 02:40:07 compute-0 systemd[1]: libpod-ab008921d81c779954887be9c4a34c3d7b2d8bf2b695ec8c21d92d8b6c256359.scope: Consumed 1.172s CPU time.
Oct 11 02:40:07 compute-0 podman[454831]: 2025-10-11 02:40:07.751172913 +0000 UTC m=+1.569592011 container died ab008921d81c779954887be9c4a34c3d7b2d8bf2b695ec8c21d92d8b6c256359 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elated_tharp, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:40:07 compute-0 systemd[1]: var-lib-containers-storage-overlay-a1268ab63d76ffe6aec9ffc6bb33d80afd5ddc13b71a50470c7d19373fa0841e-merged.mount: Deactivated successfully.
Oct 11 02:40:07 compute-0 podman[454831]: 2025-10-11 02:40:07.816693157 +0000 UTC m=+1.635112235 container remove ab008921d81c779954887be9c4a34c3d7b2d8bf2b695ec8c21d92d8b6c256359 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elated_tharp, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 02:40:07 compute-0 systemd[1]: libpod-conmon-ab008921d81c779954887be9c4a34c3d7b2d8bf2b695ec8c21d92d8b6c256359.scope: Deactivated successfully.
Oct 11 02:40:07 compute-0 sudo[454731]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:07 compute-0 nova_compute[356901]: 2025-10-11 02:40:07.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:40:07 compute-0 nova_compute[356901]: 2025-10-11 02:40:07.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:40:07 compute-0 nova_compute[356901]: 2025-10-11 02:40:07.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:40:07 compute-0 ovn_controller[88370]: 2025-10-11T02:40:07Z|00014|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:c5:9b:82 10.100.0.14
Oct 11 02:40:07 compute-0 ovn_controller[88370]: 2025-10-11T02:40:07Z|00015|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:c5:9b:82 10.100.0.14
Oct 11 02:40:07 compute-0 sudo[454888]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:40:07 compute-0 sudo[454888]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:07 compute-0 sudo[454888]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:08 compute-0 sudo[454913]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:40:08 compute-0 sudo[454913]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:08 compute-0 sudo[454913]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:08 compute-0 sudo[454938]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:40:08 compute-0 sudo[454938]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:08 compute-0 sudo[454938]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:08 compute-0 sudo[454963]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:40:08 compute-0 sudo[454963]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:08 compute-0 nova_compute[356901]: 2025-10-11 02:40:08.494 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:40:08 compute-0 nova_compute[356901]: 2025-10-11 02:40:08.495 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:40:08 compute-0 nova_compute[356901]: 2025-10-11 02:40:08.495 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:40:08 compute-0 nova_compute[356901]: 2025-10-11 02:40:08.496 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:40:08 compute-0 podman[455025]: 2025-10-11 02:40:08.750136131 +0000 UTC m=+0.070239749 container create ff2f7349baeb969e73473ffb21fd094e91f6f66e7748935b383228786d3bd1f6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_bartik, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:40:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1873: 321 pgs: 321 active+clean; 306 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 2.3 MiB/s rd, 3.0 MiB/s wr, 138 op/s
Oct 11 02:40:08 compute-0 systemd[1]: Started libpod-conmon-ff2f7349baeb969e73473ffb21fd094e91f6f66e7748935b383228786d3bd1f6.scope.
Oct 11 02:40:08 compute-0 podman[455025]: 2025-10-11 02:40:08.729797728 +0000 UTC m=+0.049901366 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:40:08 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:40:08 compute-0 podman[455025]: 2025-10-11 02:40:08.860220437 +0000 UTC m=+0.180324075 container init ff2f7349baeb969e73473ffb21fd094e91f6f66e7748935b383228786d3bd1f6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_bartik, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, io.buildah.version=1.39.3)
Oct 11 02:40:08 compute-0 ceph-mon[191930]: pgmap v1873: 321 pgs: 321 active+clean; 306 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 2.3 MiB/s rd, 3.0 MiB/s wr, 138 op/s
Oct 11 02:40:08 compute-0 podman[455025]: 2025-10-11 02:40:08.874221334 +0000 UTC m=+0.194324942 container start ff2f7349baeb969e73473ffb21fd094e91f6f66e7748935b383228786d3bd1f6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_bartik, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507)
Oct 11 02:40:08 compute-0 podman[455025]: 2025-10-11 02:40:08.87874111 +0000 UTC m=+0.198844758 container attach ff2f7349baeb969e73473ffb21fd094e91f6f66e7748935b383228786d3bd1f6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_bartik, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 02:40:08 compute-0 nervous_bartik[455041]: 167 167
Oct 11 02:40:08 compute-0 systemd[1]: libpod-ff2f7349baeb969e73473ffb21fd094e91f6f66e7748935b383228786d3bd1f6.scope: Deactivated successfully.
Oct 11 02:40:08 compute-0 podman[455025]: 2025-10-11 02:40:08.885048123 +0000 UTC m=+0.205151751 container died ff2f7349baeb969e73473ffb21fd094e91f6f66e7748935b383228786d3bd1f6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_bartik, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:40:08 compute-0 systemd[1]: var-lib-containers-storage-overlay-d4a95f64d8ff1397dd1947b6e34c63214940126eb3c32a95d9f51a3d20179e4d-merged.mount: Deactivated successfully.
Oct 11 02:40:08 compute-0 podman[455025]: 2025-10-11 02:40:08.944695614 +0000 UTC m=+0.264799232 container remove ff2f7349baeb969e73473ffb21fd094e91f6f66e7748935b383228786d3bd1f6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_bartik, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:40:08 compute-0 systemd[1]: libpod-conmon-ff2f7349baeb969e73473ffb21fd094e91f6f66e7748935b383228786d3bd1f6.scope: Deactivated successfully.
Oct 11 02:40:09 compute-0 podman[455066]: 2025-10-11 02:40:09.189490514 +0000 UTC m=+0.082141209 container create 0bc60c0fc5fcad371d03e6bf756415197c9dbdb98f5d0b3e8698470abe36334f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_cray, ceph=True, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:40:09 compute-0 podman[455066]: 2025-10-11 02:40:09.160274325 +0000 UTC m=+0.052925050 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:40:09 compute-0 systemd[1]: Started libpod-conmon-0bc60c0fc5fcad371d03e6bf756415197c9dbdb98f5d0b3e8698470abe36334f.scope.
Oct 11 02:40:09 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:40:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a9e11e272cd9178cc0809b14b814461bf09c9c86e51225d95bd2d8917ca70efa/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:40:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a9e11e272cd9178cc0809b14b814461bf09c9c86e51225d95bd2d8917ca70efa/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:40:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a9e11e272cd9178cc0809b14b814461bf09c9c86e51225d95bd2d8917ca70efa/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:40:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a9e11e272cd9178cc0809b14b814461bf09c9c86e51225d95bd2d8917ca70efa/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:40:09 compute-0 podman[455066]: 2025-10-11 02:40:09.305374136 +0000 UTC m=+0.198024861 container init 0bc60c0fc5fcad371d03e6bf756415197c9dbdb98f5d0b3e8698470abe36334f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_cray, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:40:09 compute-0 podman[455066]: 2025-10-11 02:40:09.322472514 +0000 UTC m=+0.215123219 container start 0bc60c0fc5fcad371d03e6bf756415197c9dbdb98f5d0b3e8698470abe36334f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_cray, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:40:09 compute-0 podman[455066]: 2025-10-11 02:40:09.326992149 +0000 UTC m=+0.219642884 container attach 0bc60c0fc5fcad371d03e6bf756415197c9dbdb98f5d0b3e8698470abe36334f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_cray, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0)
Oct 11 02:40:10 compute-0 nova_compute[356901]: 2025-10-11 02:40:10.036 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:10 compute-0 elastic_cray[455082]: {
Oct 11 02:40:10 compute-0 elastic_cray[455082]:     "0": [
Oct 11 02:40:10 compute-0 elastic_cray[455082]:         {
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "devices": [
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "/dev/loop3"
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             ],
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "lv_name": "ceph_lv0",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "lv_size": "21470642176",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "name": "ceph_lv0",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "tags": {
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.cluster_name": "ceph",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.crush_device_class": "",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.encrypted": "0",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.osd_id": "0",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.type": "block",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.vdo": "0"
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             },
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "type": "block",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "vg_name": "ceph_vg0"
Oct 11 02:40:10 compute-0 elastic_cray[455082]:         }
Oct 11 02:40:10 compute-0 elastic_cray[455082]:     ],
Oct 11 02:40:10 compute-0 elastic_cray[455082]:     "1": [
Oct 11 02:40:10 compute-0 elastic_cray[455082]:         {
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "devices": [
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "/dev/loop4"
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             ],
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "lv_name": "ceph_lv1",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "lv_size": "21470642176",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "name": "ceph_lv1",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "tags": {
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.cluster_name": "ceph",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.crush_device_class": "",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.encrypted": "0",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.osd_id": "1",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.type": "block",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.vdo": "0"
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             },
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "type": "block",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "vg_name": "ceph_vg1"
Oct 11 02:40:10 compute-0 elastic_cray[455082]:         }
Oct 11 02:40:10 compute-0 elastic_cray[455082]:     ],
Oct 11 02:40:10 compute-0 elastic_cray[455082]:     "2": [
Oct 11 02:40:10 compute-0 elastic_cray[455082]:         {
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "devices": [
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "/dev/loop5"
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             ],
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "lv_name": "ceph_lv2",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "lv_size": "21470642176",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "name": "ceph_lv2",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "tags": {
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.cluster_name": "ceph",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.crush_device_class": "",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.encrypted": "0",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.osd_id": "2",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.type": "block",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:                 "ceph.vdo": "0"
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             },
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "type": "block",
Oct 11 02:40:10 compute-0 elastic_cray[455082]:             "vg_name": "ceph_vg2"
Oct 11 02:40:10 compute-0 elastic_cray[455082]:         }
Oct 11 02:40:10 compute-0 elastic_cray[455082]:     ]
Oct 11 02:40:10 compute-0 elastic_cray[455082]: }
Oct 11 02:40:10 compute-0 systemd[1]: libpod-0bc60c0fc5fcad371d03e6bf756415197c9dbdb98f5d0b3e8698470abe36334f.scope: Deactivated successfully.
Oct 11 02:40:10 compute-0 podman[455091]: 2025-10-11 02:40:10.230568143 +0000 UTC m=+0.068949768 container died 0bc60c0fc5fcad371d03e6bf756415197c9dbdb98f5d0b3e8698470abe36334f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_cray, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:40:10 compute-0 systemd[1]: var-lib-containers-storage-overlay-a9e11e272cd9178cc0809b14b814461bf09c9c86e51225d95bd2d8917ca70efa-merged.mount: Deactivated successfully.
Oct 11 02:40:10 compute-0 podman[455091]: 2025-10-11 02:40:10.350681353 +0000 UTC m=+0.189062958 container remove 0bc60c0fc5fcad371d03e6bf756415197c9dbdb98f5d0b3e8698470abe36334f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_cray, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507)
Oct 11 02:40:10 compute-0 systemd[1]: libpod-conmon-0bc60c0fc5fcad371d03e6bf756415197c9dbdb98f5d0b3e8698470abe36334f.scope: Deactivated successfully.
Oct 11 02:40:10 compute-0 sudo[454963]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:10 compute-0 sudo[455106]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:40:10 compute-0 sudo[455106]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:10 compute-0 sudo[455106]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:10 compute-0 sudo[455131]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:40:10 compute-0 sudo[455131]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:10 compute-0 sudo[455131]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:10 compute-0 sudo[455175]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:40:10 compute-0 sudo[455175]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:10 compute-0 sudo[455175]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:10 compute-0 podman[455155]: 2025-10-11 02:40:10.736129084 +0000 UTC m=+0.094297992 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20251009, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:40:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1874: 321 pgs: 321 active+clean; 316 MiB data, 412 MiB used, 60 GiB / 60 GiB avail; 2.0 MiB/s rd, 4.2 MiB/s wr, 149 op/s
Oct 11 02:40:10 compute-0 podman[455157]: 2025-10-11 02:40:10.770471295 +0000 UTC m=+0.124920603 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:40:10 compute-0 podman[455156]: 2025-10-11 02:40:10.78243536 +0000 UTC m=+0.135553473 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=1755695350, managed_by=edpm_ansible, version=9.6, distribution-scope=public, vendor=Red Hat, Inc., architecture=x86_64, name=ubi9-minimal, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, vcs-type=git, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, config_id=edpm, io.openshift.expose-services=, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, container_name=openstack_network_exporter, io.buildah.version=1.33.7, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, com.redhat.component=ubi9-minimal-container, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., url=https://catalog.redhat.com/en/search?searchType=containers, build-date=2025-08-20T13:12:41)
Oct 11 02:40:10 compute-0 sudo[455240]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:40:10 compute-0 sudo[455240]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:10 compute-0 nova_compute[356901]: 2025-10-11 02:40:10.847 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:40:10 compute-0 nova_compute[356901]: 2025-10-11 02:40:10.862 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:40:10 compute-0 nova_compute[356901]: 2025-10-11 02:40:10.863 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:40:10 compute-0 nova_compute[356901]: 2025-10-11 02:40:10.863 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:40:10 compute-0 nova_compute[356901]: 2025-10-11 02:40:10.864 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:40:10 compute-0 nova_compute[356901]: 2025-10-11 02:40:10.864 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:40:10 compute-0 nova_compute[356901]: 2025-10-11 02:40:10.864 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:40:10 compute-0 ceph-mon[191930]: pgmap v1874: 321 pgs: 321 active+clean; 316 MiB data, 412 MiB used, 60 GiB / 60 GiB avail; 2.0 MiB/s rd, 4.2 MiB/s wr, 149 op/s
Oct 11 02:40:11 compute-0 nova_compute[356901]: 2025-10-11 02:40:11.000 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:11 compute-0 unix_chkpwd[455298]: password check failed for user (root)
Oct 11 02:40:11 compute-0 sshd-session[455104]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.159  user=root
Oct 11 02:40:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:40:11 compute-0 podman[455306]: 2025-10-11 02:40:11.314569521 +0000 UTC m=+0.060254639 container create f0b7c9d3464424483cecb146ef8060637f8e83f623ee8582f64799cb331f7399 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_buck, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:40:11 compute-0 nova_compute[356901]: 2025-10-11 02:40:11.349 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:11 compute-0 systemd[1]: Started libpod-conmon-f0b7c9d3464424483cecb146ef8060637f8e83f623ee8582f64799cb331f7399.scope.
Oct 11 02:40:11 compute-0 podman[455306]: 2025-10-11 02:40:11.294585781 +0000 UTC m=+0.040270929 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:40:11 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:40:11 compute-0 podman[455306]: 2025-10-11 02:40:11.454524207 +0000 UTC m=+0.200209355 container init f0b7c9d3464424483cecb146ef8060637f8e83f623ee8582f64799cb331f7399 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_buck, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_REF=reef)
Oct 11 02:40:11 compute-0 podman[455306]: 2025-10-11 02:40:11.465718491 +0000 UTC m=+0.211403609 container start f0b7c9d3464424483cecb146ef8060637f8e83f623ee8582f64799cb331f7399 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_buck, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS)
Oct 11 02:40:11 compute-0 podman[455306]: 2025-10-11 02:40:11.470290351 +0000 UTC m=+0.215975529 container attach f0b7c9d3464424483cecb146ef8060637f8e83f623ee8582f64799cb331f7399 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_buck, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 02:40:11 compute-0 happy_buck[455322]: 167 167
Oct 11 02:40:11 compute-0 systemd[1]: libpod-f0b7c9d3464424483cecb146ef8060637f8e83f623ee8582f64799cb331f7399.scope: Deactivated successfully.
Oct 11 02:40:11 compute-0 conmon[455322]: conmon f0b7c9d3464424483cec <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-f0b7c9d3464424483cecb146ef8060637f8e83f623ee8582f64799cb331f7399.scope/container/memory.events
Oct 11 02:40:11 compute-0 podman[455306]: 2025-10-11 02:40:11.478545297 +0000 UTC m=+0.224230405 container died f0b7c9d3464424483cecb146ef8060637f8e83f623ee8582f64799cb331f7399 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_buck, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 02:40:11 compute-0 systemd[1]: var-lib-containers-storage-overlay-a297808f26bd0597093faa8ccbbf91af30dd5e0b5ec2567beb42827d5f6832f3-merged.mount: Deactivated successfully.
Oct 11 02:40:11 compute-0 podman[455306]: 2025-10-11 02:40:11.526061177 +0000 UTC m=+0.271746315 container remove f0b7c9d3464424483cecb146ef8060637f8e83f623ee8582f64799cb331f7399 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=happy_buck, ceph=True, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 02:40:11 compute-0 systemd[1]: libpod-conmon-f0b7c9d3464424483cecb146ef8060637f8e83f623ee8582f64799cb331f7399.scope: Deactivated successfully.
Oct 11 02:40:11 compute-0 podman[455344]: 2025-10-11 02:40:11.811107193 +0000 UTC m=+0.087099735 container create b65acaa8b24e45c26a45f15a5623cb5fddd13d74f0d054c422fe718a3a06862e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nifty_booth, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:40:11 compute-0 nova_compute[356901]: 2025-10-11 02:40:11.859 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:40:11 compute-0 podman[455344]: 2025-10-11 02:40:11.783475744 +0000 UTC m=+0.059468296 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:40:11 compute-0 systemd[1]: Started libpod-conmon-b65acaa8b24e45c26a45f15a5623cb5fddd13d74f0d054c422fe718a3a06862e.scope.
Oct 11 02:40:11 compute-0 nova_compute[356901]: 2025-10-11 02:40:11.891 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:40:11 compute-0 nova_compute[356901]: 2025-10-11 02:40:11.922 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:40:11 compute-0 nova_compute[356901]: 2025-10-11 02:40:11.923 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:40:11 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:40:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6728be102de9b953ed90cb75dcd9d63ff5e7ee5a3bb4a55e4363b3ff63cb01e8/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:40:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6728be102de9b953ed90cb75dcd9d63ff5e7ee5a3bb4a55e4363b3ff63cb01e8/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:40:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6728be102de9b953ed90cb75dcd9d63ff5e7ee5a3bb4a55e4363b3ff63cb01e8/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:40:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6728be102de9b953ed90cb75dcd9d63ff5e7ee5a3bb4a55e4363b3ff63cb01e8/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:40:11 compute-0 nova_compute[356901]: 2025-10-11 02:40:11.949 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:11 compute-0 nova_compute[356901]: 2025-10-11 02:40:11.952 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:11 compute-0 nova_compute[356901]: 2025-10-11 02:40:11.952 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:11 compute-0 nova_compute[356901]: 2025-10-11 02:40:11.953 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:40:11 compute-0 nova_compute[356901]: 2025-10-11 02:40:11.954 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:40:11 compute-0 podman[455344]: 2025-10-11 02:40:11.964486833 +0000 UTC m=+0.240479395 container init b65acaa8b24e45c26a45f15a5623cb5fddd13d74f0d054c422fe718a3a06862e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nifty_booth, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True)
Oct 11 02:40:11 compute-0 podman[455344]: 2025-10-11 02:40:11.987517069 +0000 UTC m=+0.263509611 container start b65acaa8b24e45c26a45f15a5623cb5fddd13d74f0d054c422fe718a3a06862e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nifty_booth, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:40:11 compute-0 podman[455344]: 2025-10-11 02:40:11.992954881 +0000 UTC m=+0.268947453 container attach b65acaa8b24e45c26a45f15a5623cb5fddd13d74f0d054c422fe718a3a06862e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nifty_booth, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:40:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:40:12 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3670998328' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:40:12 compute-0 nova_compute[356901]: 2025-10-11 02:40:12.473 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.519s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:40:12 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3670998328' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:40:12 compute-0 nova_compute[356901]: 2025-10-11 02:40:12.627 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000a as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:40:12 compute-0 nova_compute[356901]: 2025-10-11 02:40:12.628 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000a as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:40:12 compute-0 nova_compute[356901]: 2025-10-11 02:40:12.636 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000009 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:40:12 compute-0 nova_compute[356901]: 2025-10-11 02:40:12.637 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000009 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:40:12 compute-0 nova_compute[356901]: 2025-10-11 02:40:12.643 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000006 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:40:12 compute-0 nova_compute[356901]: 2025-10-11 02:40:12.643 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000006 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:40:12 compute-0 nova_compute[356901]: 2025-10-11 02:40:12.650 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:40:12 compute-0 nova_compute[356901]: 2025-10-11 02:40:12.651 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:40:12 compute-0 nova_compute[356901]: 2025-10-11 02:40:12.652 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:40:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1875: 321 pgs: 321 active+clean; 324 MiB data, 419 MiB used, 60 GiB / 60 GiB avail; 1.8 MiB/s rd, 4.3 MiB/s wr, 165 op/s
Oct 11 02:40:13 compute-0 nifty_booth[455361]: {
Oct 11 02:40:13 compute-0 nifty_booth[455361]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:40:13 compute-0 nifty_booth[455361]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:40:13 compute-0 nifty_booth[455361]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:40:13 compute-0 nifty_booth[455361]:         "osd_id": 1,
Oct 11 02:40:13 compute-0 nifty_booth[455361]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:40:13 compute-0 nifty_booth[455361]:         "type": "bluestore"
Oct 11 02:40:13 compute-0 nifty_booth[455361]:     },
Oct 11 02:40:13 compute-0 nifty_booth[455361]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:40:13 compute-0 nifty_booth[455361]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:40:13 compute-0 nifty_booth[455361]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:40:13 compute-0 nifty_booth[455361]:         "osd_id": 2,
Oct 11 02:40:13 compute-0 nifty_booth[455361]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:40:13 compute-0 nifty_booth[455361]:         "type": "bluestore"
Oct 11 02:40:13 compute-0 nifty_booth[455361]:     },
Oct 11 02:40:13 compute-0 nifty_booth[455361]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:40:13 compute-0 nifty_booth[455361]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:40:13 compute-0 nifty_booth[455361]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:40:13 compute-0 nifty_booth[455361]:         "osd_id": 0,
Oct 11 02:40:13 compute-0 nifty_booth[455361]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:40:13 compute-0 nifty_booth[455361]:         "type": "bluestore"
Oct 11 02:40:13 compute-0 nifty_booth[455361]:     }
Oct 11 02:40:13 compute-0 nifty_booth[455361]: }
Oct 11 02:40:13 compute-0 nova_compute[356901]: 2025-10-11 02:40:13.250 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:40:13 compute-0 nova_compute[356901]: 2025-10-11 02:40:13.252 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3136MB free_disk=59.84402084350586GB free_vcpus=4 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:40:13 compute-0 nova_compute[356901]: 2025-10-11 02:40:13.252 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:13 compute-0 nova_compute[356901]: 2025-10-11 02:40:13.252 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:13 compute-0 systemd[1]: libpod-b65acaa8b24e45c26a45f15a5623cb5fddd13d74f0d054c422fe718a3a06862e.scope: Deactivated successfully.
Oct 11 02:40:13 compute-0 systemd[1]: libpod-b65acaa8b24e45c26a45f15a5623cb5fddd13d74f0d054c422fe718a3a06862e.scope: Consumed 1.207s CPU time.
Oct 11 02:40:13 compute-0 podman[455344]: 2025-10-11 02:40:13.275101409 +0000 UTC m=+1.551093951 container died b65acaa8b24e45c26a45f15a5623cb5fddd13d74f0d054c422fe718a3a06862e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nifty_booth, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:40:13 compute-0 systemd[1]: var-lib-containers-storage-overlay-6728be102de9b953ed90cb75dcd9d63ff5e7ee5a3bb4a55e4363b3ff63cb01e8-merged.mount: Deactivated successfully.
Oct 11 02:40:13 compute-0 nova_compute[356901]: 2025-10-11 02:40:13.335 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:40:13 compute-0 nova_compute[356901]: 2025-10-11 02:40:13.335 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:40:13 compute-0 nova_compute[356901]: 2025-10-11 02:40:13.335 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 49d4f343-d1b4-4594-96d2-0777a5ce8581 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:40:13 compute-0 nova_compute[356901]: 2025-10-11 02:40:13.336 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 5279e85f-e35b-4ddd-8336-7f483712f743 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:40:13 compute-0 nova_compute[356901]: 2025-10-11 02:40:13.336 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 4 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:40:13 compute-0 nova_compute[356901]: 2025-10-11 02:40:13.336 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1408MB phys_disk=59GB used_disk=5GB total_vcpus=8 used_vcpus=4 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:40:13 compute-0 podman[455344]: 2025-10-11 02:40:13.3664158 +0000 UTC m=+1.642408332 container remove b65acaa8b24e45c26a45f15a5623cb5fddd13d74f0d054c422fe718a3a06862e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nifty_booth, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_REF=reef, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 02:40:13 compute-0 systemd[1]: libpod-conmon-b65acaa8b24e45c26a45f15a5623cb5fddd13d74f0d054c422fe718a3a06862e.scope: Deactivated successfully.
Oct 11 02:40:13 compute-0 sudo[455240]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:40:13 compute-0 nova_compute[356901]: 2025-10-11 02:40:13.424 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:40:13 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:40:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:40:13 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:40:13 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 82ac177b-661c-4780-971b-2767d4dbda2f does not exist
Oct 11 02:40:13 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev d39595ad-c46a-4854-aaa5-b6b108f77489 does not exist
Oct 11 02:40:13 compute-0 ceph-mon[191930]: pgmap v1875: 321 pgs: 321 active+clean; 324 MiB data, 419 MiB used, 60 GiB / 60 GiB avail; 1.8 MiB/s rd, 4.3 MiB/s wr, 165 op/s
Oct 11 02:40:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:40:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:40:13 compute-0 sudo[455430]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:40:13 compute-0 sudo[455430]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:13 compute-0 sudo[455430]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:13 compute-0 sshd-session[455104]: Failed password for root from 193.46.255.159 port 48076 ssh2
Oct 11 02:40:13 compute-0 sudo[455473]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:40:13 compute-0 sudo[455473]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:40:13 compute-0 sudo[455473]: pam_unix(sudo:session): session closed for user root
Oct 11 02:40:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:40:13 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2688291903' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:40:13 compute-0 nova_compute[356901]: 2025-10-11 02:40:13.916 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.492s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:40:13 compute-0 nova_compute[356901]: 2025-10-11 02:40:13.930 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:40:13 compute-0 nova_compute[356901]: 2025-10-11 02:40:13.953 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:40:13 compute-0 nova_compute[356901]: 2025-10-11 02:40:13.991 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:40:13 compute-0 nova_compute[356901]: 2025-10-11 02:40:13.992 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.740s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:14 compute-0 unix_chkpwd[455501]: password check failed for user (root)
Oct 11 02:40:14 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2688291903' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:40:14 compute-0 nova_compute[356901]: 2025-10-11 02:40:14.668 2 DEBUG oslo_concurrency.lockutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Acquiring lock "0ec010f2-6758-466d-900f-c8c6ffe81844" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:14 compute-0 nova_compute[356901]: 2025-10-11 02:40:14.670 2 DEBUG oslo_concurrency.lockutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Lock "0ec010f2-6758-466d-900f-c8c6ffe81844" acquired by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: waited 0.003s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:14 compute-0 nova_compute[356901]: 2025-10-11 02:40:14.704 2 DEBUG nova.compute.manager [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Starting instance... _do_build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2402
Oct 11 02:40:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1876: 321 pgs: 321 active+clean; 324 MiB data, 419 MiB used, 60 GiB / 60 GiB avail; 928 KiB/s rd, 4.3 MiB/s wr, 136 op/s
Oct 11 02:40:14 compute-0 nova_compute[356901]: 2025-10-11 02:40:14.793 2 DEBUG oslo_concurrency.lockutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:14 compute-0 nova_compute[356901]: 2025-10-11 02:40:14.794 2 DEBUG oslo_concurrency.lockutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:14 compute-0 nova_compute[356901]: 2025-10-11 02:40:14.805 2 DEBUG nova.virt.hardware [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Require both a host and instance NUMA topology to fit instance on host. numa_fit_instance_to_host /usr/lib/python3.9/site-packages/nova/virt/hardware.py:2368
Oct 11 02:40:14 compute-0 nova_compute[356901]: 2025-10-11 02:40:14.806 2 INFO nova.compute.claims [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Claim successful on node compute-0.ctlplane.example.com
Oct 11 02:40:15 compute-0 nova_compute[356901]: 2025-10-11 02:40:15.141 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:15 compute-0 nova_compute[356901]: 2025-10-11 02:40:15.217 2 DEBUG oslo_concurrency.processutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:40:15 compute-0 ceph-mon[191930]: pgmap v1876: 321 pgs: 321 active+clean; 324 MiB data, 419 MiB used, 60 GiB / 60 GiB avail; 928 KiB/s rd, 4.3 MiB/s wr, 136 op/s
Oct 11 02:40:15 compute-0 sshd-session[455104]: Failed password for root from 193.46.255.159 port 48076 ssh2
Oct 11 02:40:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:40:15 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2182250002' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:40:15 compute-0 nova_compute[356901]: 2025-10-11 02:40:15.756 2 DEBUG oslo_concurrency.processutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.539s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:40:15 compute-0 nova_compute[356901]: 2025-10-11 02:40:15.770 2 DEBUG nova.compute.provider_tree [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:40:15 compute-0 nova_compute[356901]: 2025-10-11 02:40:15.790 2 DEBUG nova.scheduler.client.report [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:40:15 compute-0 nova_compute[356901]: 2025-10-11 02:40:15.824 2 DEBUG oslo_concurrency.lockutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: held 1.030s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:15 compute-0 nova_compute[356901]: 2025-10-11 02:40:15.826 2 DEBUG nova.compute.manager [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Start building networks asynchronously for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2799
Oct 11 02:40:15 compute-0 nova_compute[356901]: 2025-10-11 02:40:15.890 2 DEBUG nova.compute.manager [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Allocating IP information in the background. _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1952
Oct 11 02:40:15 compute-0 nova_compute[356901]: 2025-10-11 02:40:15.890 2 DEBUG nova.network.neutron [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] allocate_for_instance() allocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1156
Oct 11 02:40:15 compute-0 nova_compute[356901]: 2025-10-11 02:40:15.910 2 INFO nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Ignoring supplied device name: /dev/vda. Libvirt can't honour user-supplied dev names
Oct 11 02:40:15 compute-0 nova_compute[356901]: 2025-10-11 02:40:15.932 2 DEBUG nova.compute.manager [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Start building block device mappings for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2834
Oct 11 02:40:16 compute-0 nova_compute[356901]: 2025-10-11 02:40:16.018 2 DEBUG nova.compute.manager [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Start spawning the instance on the hypervisor. _build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2608
Oct 11 02:40:16 compute-0 nova_compute[356901]: 2025-10-11 02:40:16.021 2 DEBUG nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Creating instance directory _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4723
Oct 11 02:40:16 compute-0 nova_compute[356901]: 2025-10-11 02:40:16.022 2 INFO nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Creating image(s)
Oct 11 02:40:16 compute-0 unix_chkpwd[455538]: password check failed for user (root)
Oct 11 02:40:16 compute-0 nova_compute[356901]: 2025-10-11 02:40:16.089 2 DEBUG nova.storage.rbd_utils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] rbd image 0ec010f2-6758-466d-900f-c8c6ffe81844_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:40:16 compute-0 nova_compute[356901]: 2025-10-11 02:40:16.169 2 DEBUG nova.storage.rbd_utils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] rbd image 0ec010f2-6758-466d-900f-c8c6ffe81844_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:40:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:40:16 compute-0 nova_compute[356901]: 2025-10-11 02:40:16.243 2 DEBUG nova.storage.rbd_utils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] rbd image 0ec010f2-6758-466d-900f-c8c6ffe81844_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:40:16 compute-0 nova_compute[356901]: 2025-10-11 02:40:16.258 2 DEBUG oslo_concurrency.processutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:40:16 compute-0 nova_compute[356901]: 2025-10-11 02:40:16.340 2 DEBUG oslo_concurrency.processutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d --force-share --output=json" returned: 0 in 0.082s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:40:16 compute-0 nova_compute[356901]: 2025-10-11 02:40:16.342 2 DEBUG oslo_concurrency.lockutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Acquiring lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:16 compute-0 nova_compute[356901]: 2025-10-11 02:40:16.344 2 DEBUG oslo_concurrency.lockutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:16 compute-0 nova_compute[356901]: 2025-10-11 02:40:16.345 2 DEBUG oslo_concurrency.lockutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:16 compute-0 nova_compute[356901]: 2025-10-11 02:40:16.400 2 DEBUG nova.storage.rbd_utils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] rbd image 0ec010f2-6758-466d-900f-c8c6ffe81844_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:40:16 compute-0 nova_compute[356901]: 2025-10-11 02:40:16.409 2 DEBUG oslo_concurrency.processutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d 0ec010f2-6758-466d-900f-c8c6ffe81844_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:40:16 compute-0 nova_compute[356901]: 2025-10-11 02:40:16.444 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:16 compute-0 nova_compute[356901]: 2025-10-11 02:40:16.450 2 DEBUG nova.policy [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Policy check for network:attach_external_network failed with credentials {'is_admin': False, 'user_id': '7632dc55de9f4e5599b0d2b6ef832832', 'user_domain_id': 'default', 'system_scope': None, 'domain_id': None, 'project_id': 'd22dc9557f7a4dada059632a83957c8f', 'project_domain_id': 'default', 'roles': ['reader', 'member'], 'is_admin_project': True, 'service_user_id': None, 'service_user_domain_id': None, 'service_project_id': None, 'service_project_domain_id': None, 'service_roles': []} authorize /usr/lib/python3.9/site-packages/nova/policy.py:203
Oct 11 02:40:16 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2182250002' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:40:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1877: 321 pgs: 321 active+clean; 324 MiB data, 419 MiB used, 60 GiB / 60 GiB avail; 637 KiB/s rd, 4.3 MiB/s wr, 121 op/s
Oct 11 02:40:16 compute-0 nova_compute[356901]: 2025-10-11 02:40:16.845 2 DEBUG oslo_concurrency.processutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d 0ec010f2-6758-466d-900f-c8c6ffe81844_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.437s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:40:16 compute-0 nova_compute[356901]: 2025-10-11 02:40:16.977 2 DEBUG nova.storage.rbd_utils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] resizing rbd image 0ec010f2-6758-466d-900f-c8c6ffe81844_disk to 1073741824 resize /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:288
Oct 11 02:40:17 compute-0 nova_compute[356901]: 2025-10-11 02:40:17.209 2 DEBUG nova.objects.instance [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Lazy-loading 'migration_context' on Instance uuid 0ec010f2-6758-466d-900f-c8c6ffe81844 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:40:17 compute-0 nova_compute[356901]: 2025-10-11 02:40:17.222 2 DEBUG nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Created local disks _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4857
Oct 11 02:40:17 compute-0 nova_compute[356901]: 2025-10-11 02:40:17.223 2 DEBUG nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Ensure instance console log exists: /var/lib/nova/instances/0ec010f2-6758-466d-900f-c8c6ffe81844/console.log _ensure_console_log_for_instance /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4609
Oct 11 02:40:17 compute-0 nova_compute[356901]: 2025-10-11 02:40:17.223 2 DEBUG oslo_concurrency.lockutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Acquiring lock "vgpu_resources" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:17 compute-0 nova_compute[356901]: 2025-10-11 02:40:17.224 2 DEBUG oslo_concurrency.lockutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Lock "vgpu_resources" acquired by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:17 compute-0 nova_compute[356901]: 2025-10-11 02:40:17.224 2 DEBUG oslo_concurrency.lockutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Lock "vgpu_resources" "released" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:17 compute-0 nova_compute[356901]: 2025-10-11 02:40:17.445 2 DEBUG nova.network.neutron [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Successfully created port: 5aa70f06-b185-47e2-9107-2f51edc00ff2 _create_port_minimal /usr/lib/python3.9/site-packages/nova/network/neutron.py:548
Oct 11 02:40:17 compute-0 sshd-session[455104]: Failed password for root from 193.46.255.159 port 48076 ssh2
Oct 11 02:40:17 compute-0 ceph-mon[191930]: pgmap v1877: 321 pgs: 321 active+clean; 324 MiB data, 419 MiB used, 60 GiB / 60 GiB avail; 637 KiB/s rd, 4.3 MiB/s wr, 121 op/s
Oct 11 02:40:17 compute-0 sshd-session[455104]: Received disconnect from 193.46.255.159 port 48076:11:  [preauth]
Oct 11 02:40:17 compute-0 sshd-session[455104]: Disconnected from authenticating user root 193.46.255.159 port 48076 [preauth]
Oct 11 02:40:17 compute-0 sshd-session[455104]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.159  user=root
Oct 11 02:40:18 compute-0 unix_chkpwd[455693]: password check failed for user (root)
Oct 11 02:40:18 compute-0 sshd-session[455691]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.159  user=root
Oct 11 02:40:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1878: 321 pgs: 321 active+clean; 339 MiB data, 428 MiB used, 60 GiB / 60 GiB avail; 602 KiB/s rd, 3.5 MiB/s wr, 101 op/s
Oct 11 02:40:18 compute-0 nova_compute[356901]: 2025-10-11 02:40:18.800 2 DEBUG nova.network.neutron [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Successfully updated port: 5aa70f06-b185-47e2-9107-2f51edc00ff2 _update_port /usr/lib/python3.9/site-packages/nova/network/neutron.py:586
Oct 11 02:40:18 compute-0 nova_compute[356901]: 2025-10-11 02:40:18.824 2 DEBUG oslo_concurrency.lockutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Acquiring lock "refresh_cache-0ec010f2-6758-466d-900f-c8c6ffe81844" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:40:18 compute-0 nova_compute[356901]: 2025-10-11 02:40:18.825 2 DEBUG oslo_concurrency.lockutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Acquired lock "refresh_cache-0ec010f2-6758-466d-900f-c8c6ffe81844" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:40:18 compute-0 nova_compute[356901]: 2025-10-11 02:40:18.825 2 DEBUG nova.network.neutron [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Building network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2010
Oct 11 02:40:18 compute-0 ceph-mon[191930]: pgmap v1878: 321 pgs: 321 active+clean; 339 MiB data, 428 MiB used, 60 GiB / 60 GiB avail; 602 KiB/s rd, 3.5 MiB/s wr, 101 op/s
Oct 11 02:40:18 compute-0 nova_compute[356901]: 2025-10-11 02:40:18.943 2 DEBUG nova.compute.manager [req-9376b0ad-1f16-48a7-b803-4c6fffb246b3 req-0061cbb1-fbb2-4a6b-97b2-26c575929ca7 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Received event network-changed-5aa70f06-b185-47e2-9107-2f51edc00ff2 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:40:18 compute-0 nova_compute[356901]: 2025-10-11 02:40:18.944 2 DEBUG nova.compute.manager [req-9376b0ad-1f16-48a7-b803-4c6fffb246b3 req-0061cbb1-fbb2-4a6b-97b2-26c575929ca7 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Refreshing instance network info cache due to event network-changed-5aa70f06-b185-47e2-9107-2f51edc00ff2. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:40:18 compute-0 nova_compute[356901]: 2025-10-11 02:40:18.945 2 DEBUG oslo_concurrency.lockutils [req-9376b0ad-1f16-48a7-b803-4c6fffb246b3 req-0061cbb1-fbb2-4a6b-97b2-26c575929ca7 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-0ec010f2-6758-466d-900f-c8c6ffe81844" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:40:19 compute-0 nova_compute[356901]: 2025-10-11 02:40:19.014 2 DEBUG nova.network.neutron [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Instance cache missing network info. _get_preexisting_port_ids /usr/lib/python3.9/site-packages/nova/network/neutron.py:3323
Oct 11 02:40:19 compute-0 podman[455694]: 2025-10-11 02:40:19.260714198 +0000 UTC m=+0.137053244 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, version=9.4, io.buildah.version=1.29.0, release=1214.1726694543, io.openshift.expose-services=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2024-09-18T21:23:30, config_id=edpm, name=ubi9, release-0.7.12=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=base rhel9, vendor=Red Hat, Inc., architecture=x86_64, maintainer=Red Hat, Inc., container_name=kepler, com.redhat.component=ubi9-container, io.k8s.display-name=Red Hat Universal Base Image 9, managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-type=git)
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.040 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.230 2 DEBUG nova.network.neutron [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Updating instance_info_cache with network_info: [{"id": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "address": "fa:16:3e:05:79:4e", "network": {"id": "6f7e3ff0-592e-41ad-80ba-fdc1878bf07b", "bridge": "br-int", "label": "tempest-ServerAddressesTestJSON-1773621559-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.7", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d22dc9557f7a4dada059632a83957c8f", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5aa70f06-b1", "ovs_interfaceid": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.284 2 DEBUG oslo_concurrency.lockutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Releasing lock "refresh_cache-0ec010f2-6758-466d-900f-c8c6ffe81844" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.286 2 DEBUG nova.compute.manager [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Instance network_info: |[{"id": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "address": "fa:16:3e:05:79:4e", "network": {"id": "6f7e3ff0-592e-41ad-80ba-fdc1878bf07b", "bridge": "br-int", "label": "tempest-ServerAddressesTestJSON-1773621559-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.7", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d22dc9557f7a4dada059632a83957c8f", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5aa70f06-b1", "ovs_interfaceid": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}]| _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1967
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.287 2 DEBUG oslo_concurrency.lockutils [req-9376b0ad-1f16-48a7-b803-4c6fffb246b3 req-0061cbb1-fbb2-4a6b-97b2-26c575929ca7 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-0ec010f2-6758-466d-900f-c8c6ffe81844" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.288 2 DEBUG nova.network.neutron [req-9376b0ad-1f16-48a7-b803-4c6fffb246b3 req-0061cbb1-fbb2-4a6b-97b2-26c575929ca7 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Refreshing network info cache for port 5aa70f06-b185-47e2-9107-2f51edc00ff2 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.293 2 DEBUG nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Start _get_guest_xml network_info=[{"id": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "address": "fa:16:3e:05:79:4e", "network": {"id": "6f7e3ff0-592e-41ad-80ba-fdc1878bf07b", "bridge": "br-int", "label": "tempest-ServerAddressesTestJSON-1773621559-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.7", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d22dc9557f7a4dada059632a83957c8f", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5aa70f06-b1", "ovs_interfaceid": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] disk_info={'disk_bus': 'virtio', 'cdrom_bus': 'sata', 'mapping': {'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.config': {'bus': 'sata', 'dev': 'sda', 'type': 'cdrom'}}} image_meta=ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:38:04Z,direct_url=<?>,disk_format='qcow2',id=72f37f2e-4296-450e-9a12-10717f4ac7dc,min_disk=0,min_ram=0,name='cirros-0.6.2-x86_64-disk.img',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:38:05Z,virtual_size=<?>,visibility=<?>) rescue=None block_device_info={'root_device_name': '/dev/vda', 'image': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'boot_index': 0, 'device_name': '/dev/vda', 'size': 0, 'encryption_format': None, 'image_id': '72f37f2e-4296-450e-9a12-10717f4ac7dc'}], 'ephemerals': [], 'block_device_mapping': [], 'swap': None} _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7549
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.319 2 WARNING nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.326 2 DEBUG nova.virt.libvirt.host [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V1... _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1653
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.327 2 DEBUG nova.virt.libvirt.host [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] CPU controller missing on host. _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1663
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.334 2 DEBUG nova.virt.libvirt.host [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V2... _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1672
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.335 2 DEBUG nova.virt.libvirt.host [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] CPU controller found on host. _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1679
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.336 2 DEBUG nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] CPU mode 'host-model' models '' was chosen, with extra flags: '' _get_guest_cpu_model_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:5396
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.337 2 DEBUG nova.virt.hardware [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Getting desirable topologies for flavor Flavor(created_at=2025-10-11T02:38:03Z,deleted=False,deleted_at=None,description=None,disabled=False,ephemeral_gb=0,extra_specs={hw_rng:allowed='True'},flavorid='6dff30d1-85df-4e9c-9163-a20ba47bb0c7',id=3,is_public=True,memory_mb=128,name='m1.nano',projects=<?>,root_gb=1,rxtx_factor=1.0,swap=0,updated_at=None,vcpu_weight=0,vcpus=1) and image_meta ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:38:04Z,direct_url=<?>,disk_format='qcow2',id=72f37f2e-4296-450e-9a12-10717f4ac7dc,min_disk=0,min_ram=0,name='cirros-0.6.2-x86_64-disk.img',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:38:05Z,virtual_size=<?>,visibility=<?>), allow threads: True _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:563
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.338 2 DEBUG nova.virt.hardware [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Flavor limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:348
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.338 2 DEBUG nova.virt.hardware [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Image limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:352
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.339 2 DEBUG nova.virt.hardware [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Flavor pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:388
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.340 2 DEBUG nova.virt.hardware [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Image pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:392
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.340 2 DEBUG nova.virt.hardware [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Chose sockets=0, cores=0, threads=0; limits were sockets=65536, cores=65536, threads=65536 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:430
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.341 2 DEBUG nova.virt.hardware [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Topology preferred VirtCPUTopology(cores=0,sockets=0,threads=0), maximum VirtCPUTopology(cores=65536,sockets=65536,threads=65536) _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:569
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.342 2 DEBUG nova.virt.hardware [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Build topologies for 1 vcpu(s) 1:1:1 _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:471
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.342 2 DEBUG nova.virt.hardware [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Got 1 possible topologies _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:501
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.343 2 DEBUG nova.virt.hardware [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Possible topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:575
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.343 2 DEBUG nova.virt.hardware [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Sorted desired topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:577
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.347 2 DEBUG oslo_concurrency.processutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:40:20 compute-0 sshd-session[455691]: Failed password for root from 193.46.255.159 port 56358 ssh2
Oct 11 02:40:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1879: 321 pgs: 321 active+clean; 359 MiB data, 436 MiB used, 60 GiB / 60 GiB avail; 301 KiB/s rd, 2.7 MiB/s wr, 86 op/s
Oct 11 02:40:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:40:20 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1552471374' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.857 2 DEBUG oslo_concurrency.processutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.510s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:40:20 compute-0 ceph-mon[191930]: pgmap v1879: 321 pgs: 321 active+clean; 359 MiB data, 436 MiB used, 60 GiB / 60 GiB avail; 301 KiB/s rd, 2.7 MiB/s wr, 86 op/s
Oct 11 02:40:20 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1552471374' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.922 2 DEBUG nova.storage.rbd_utils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] rbd image 0ec010f2-6758-466d-900f-c8c6ffe81844_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:40:20 compute-0 nova_compute[356901]: 2025-10-11 02:40:20.932 2 DEBUG oslo_concurrency.processutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:40:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:40:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:40:21 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3077035616' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.442 2 DEBUG oslo_concurrency.processutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.510s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.447 2 DEBUG nova.virt.libvirt.vif [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:40:13Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description='tempest-ServerAddressesTestJSON-server-1273435186',display_name='tempest-ServerAddressesTestJSON-server-1273435186',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-serveraddressestestjson-server-1273435186',id=11,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data=None,key_name=None,keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='d22dc9557f7a4dada059632a83957c8f',ramdisk_id='',reservation_id='r-jlhhx8n0',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='reader,member',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_hw_rng_model='virtio',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-ServerAddressesTestJSON-1658687559',owner_user_name='tempest-ServerAddressesTestJSON-1658687559-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:40:15Z,user_data=None,user_id='7632dc55de9f4e5599b0d2b6ef832832',uuid=0ec010f2-6758-466d-900f-c8c6ffe81844,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "address": "fa:16:3e:05:79:4e", "network": {"id": "6f7e3ff0-592e-41ad-80ba-fdc1878bf07b", "bridge": "br-int", "label": "tempest-ServerAddressesTestJSON-1773621559-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.7", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d22dc9557f7a4dada059632a83957c8f", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5aa70f06-b1", "ovs_interfaceid": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} virt_type=kvm get_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:563
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.449 2 DEBUG nova.network.os_vif_util [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Converting VIF {"id": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "address": "fa:16:3e:05:79:4e", "network": {"id": "6f7e3ff0-592e-41ad-80ba-fdc1878bf07b", "bridge": "br-int", "label": "tempest-ServerAddressesTestJSON-1773621559-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.7", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d22dc9557f7a4dada059632a83957c8f", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5aa70f06-b1", "ovs_interfaceid": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.451 2 DEBUG nova.network.os_vif_util [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:05:79:4e,bridge_name='br-int',has_traffic_filtering=True,id=5aa70f06-b185-47e2-9107-2f51edc00ff2,network=Network(6f7e3ff0-592e-41ad-80ba-fdc1878bf07b),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap5aa70f06-b1') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.454 2 DEBUG nova.objects.instance [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Lazy-loading 'pci_devices' on Instance uuid 0ec010f2-6758-466d-900f-c8c6ffe81844 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.458 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.486 2 DEBUG nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] End _get_guest_xml xml=<domain type="kvm">
Oct 11 02:40:21 compute-0 nova_compute[356901]:   <uuid>0ec010f2-6758-466d-900f-c8c6ffe81844</uuid>
Oct 11 02:40:21 compute-0 nova_compute[356901]:   <name>instance-0000000b</name>
Oct 11 02:40:21 compute-0 nova_compute[356901]:   <memory>131072</memory>
Oct 11 02:40:21 compute-0 nova_compute[356901]:   <vcpu>1</vcpu>
Oct 11 02:40:21 compute-0 nova_compute[356901]:   <metadata>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.1">
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <nova:package version="27.5.2-0.20250829104910.6f8decf.el9"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <nova:name>tempest-ServerAddressesTestJSON-server-1273435186</nova:name>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <nova:creationTime>2025-10-11 02:40:20</nova:creationTime>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <nova:flavor name="m1.nano">
Oct 11 02:40:21 compute-0 nova_compute[356901]:         <nova:memory>128</nova:memory>
Oct 11 02:40:21 compute-0 nova_compute[356901]:         <nova:disk>1</nova:disk>
Oct 11 02:40:21 compute-0 nova_compute[356901]:         <nova:swap>0</nova:swap>
Oct 11 02:40:21 compute-0 nova_compute[356901]:         <nova:ephemeral>0</nova:ephemeral>
Oct 11 02:40:21 compute-0 nova_compute[356901]:         <nova:vcpus>1</nova:vcpus>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       </nova:flavor>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <nova:owner>
Oct 11 02:40:21 compute-0 nova_compute[356901]:         <nova:user uuid="7632dc55de9f4e5599b0d2b6ef832832">tempest-ServerAddressesTestJSON-1658687559-project-member</nova:user>
Oct 11 02:40:21 compute-0 nova_compute[356901]:         <nova:project uuid="d22dc9557f7a4dada059632a83957c8f">tempest-ServerAddressesTestJSON-1658687559</nova:project>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       </nova:owner>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <nova:root type="image" uuid="72f37f2e-4296-450e-9a12-10717f4ac7dc"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <nova:ports>
Oct 11 02:40:21 compute-0 nova_compute[356901]:         <nova:port uuid="5aa70f06-b185-47e2-9107-2f51edc00ff2">
Oct 11 02:40:21 compute-0 nova_compute[356901]:           <nova:ip type="fixed" address="10.100.0.7" ipVersion="4"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:         </nova:port>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       </nova:ports>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     </nova:instance>
Oct 11 02:40:21 compute-0 nova_compute[356901]:   </metadata>
Oct 11 02:40:21 compute-0 nova_compute[356901]:   <sysinfo type="smbios">
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <system>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <entry name="manufacturer">RDO</entry>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <entry name="product">OpenStack Compute</entry>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <entry name="version">27.5.2-0.20250829104910.6f8decf.el9</entry>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <entry name="serial">0ec010f2-6758-466d-900f-c8c6ffe81844</entry>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <entry name="uuid">0ec010f2-6758-466d-900f-c8c6ffe81844</entry>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <entry name="family">Virtual Machine</entry>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     </system>
Oct 11 02:40:21 compute-0 nova_compute[356901]:   </sysinfo>
Oct 11 02:40:21 compute-0 nova_compute[356901]:   <os>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <type arch="x86_64" machine="q35">hvm</type>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <boot dev="hd"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <smbios mode="sysinfo"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:   </os>
Oct 11 02:40:21 compute-0 nova_compute[356901]:   <features>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <acpi/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <apic/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <vmcoreinfo/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:   </features>
Oct 11 02:40:21 compute-0 nova_compute[356901]:   <clock offset="utc">
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <timer name="pit" tickpolicy="delay"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <timer name="rtc" tickpolicy="catchup"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <timer name="hpet" present="no"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:   </clock>
Oct 11 02:40:21 compute-0 nova_compute[356901]:   <cpu mode="host-model" match="exact">
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <topology sockets="1" cores="1" threads="1"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:40:21 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/0ec010f2-6758-466d-900f-c8c6ffe81844_disk">
Oct 11 02:40:21 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       </source>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:40:21 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <target dev="vda" bus="virtio"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <disk type="network" device="cdrom">
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/0ec010f2-6758-466d-900f-c8c6ffe81844_disk.config">
Oct 11 02:40:21 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       </source>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:40:21 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <target dev="sda" bus="sata"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <interface type="ethernet">
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <mac address="fa:16:3e:05:79:4e"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <driver name="vhost" rx_queue_size="512"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <mtu size="1442"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <target dev="tap5aa70f06-b1"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     </interface>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <serial type="pty">
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <log file="/var/lib/nova/instances/0ec010f2-6758-466d-900f-c8c6ffe81844/console.log" append="off"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     </serial>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <graphics type="vnc" autoport="yes" listen="::0"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <video>
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     </video>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <input type="tablet" bus="usb"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <rng model="virtio">
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <backend model="random">/dev/urandom</backend>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <controller type="usb" index="0"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     <memballoon model="virtio">
Oct 11 02:40:21 compute-0 nova_compute[356901]:       <stats period="10"/>
Oct 11 02:40:21 compute-0 nova_compute[356901]:     </memballoon>
Oct 11 02:40:21 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:40:21 compute-0 nova_compute[356901]: </domain>
Oct 11 02:40:21 compute-0 nova_compute[356901]:  _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7555
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.503 2 DEBUG nova.compute.manager [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Preparing to wait for external event network-vif-plugged-5aa70f06-b185-47e2-9107-2f51edc00ff2 prepare_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:283
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.504 2 DEBUG oslo_concurrency.lockutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Acquiring lock "0ec010f2-6758-466d-900f-c8c6ffe81844-events" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.504 2 DEBUG oslo_concurrency.lockutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Lock "0ec010f2-6758-466d-900f-c8c6ffe81844-events" acquired by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.505 2 DEBUG oslo_concurrency.lockutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Lock "0ec010f2-6758-466d-900f-c8c6ffe81844-events" "released" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.506 2 DEBUG nova.virt.libvirt.vif [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:40:13Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description='tempest-ServerAddressesTestJSON-server-1273435186',display_name='tempest-ServerAddressesTestJSON-server-1273435186',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-serveraddressestestjson-server-1273435186',id=11,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data=None,key_name=None,keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=PciDeviceList,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='d22dc9557f7a4dada059632a83957c8f',ramdisk_id='',reservation_id='r-jlhhx8n0',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='reader,member',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_hw_rng_model='virtio',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-ServerAddressesTestJSON-1658687559',owner_user_name='tempest-ServerAddressesTestJSON-1658687559-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:40:15Z,user_data=None,user_id='7632dc55de9f4e5599b0d2b6ef832832',uuid=0ec010f2-6758-466d-900f-c8c6ffe81844,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "address": "fa:16:3e:05:79:4e", "network": {"id": "6f7e3ff0-592e-41ad-80ba-fdc1878bf07b", "bridge": "br-int", "label": "tempest-ServerAddressesTestJSON-1773621559-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.7", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d22dc9557f7a4dada059632a83957c8f", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5aa70f06-b1", "ovs_interfaceid": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} plug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:710
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.506 2 DEBUG nova.network.os_vif_util [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Converting VIF {"id": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "address": "fa:16:3e:05:79:4e", "network": {"id": "6f7e3ff0-592e-41ad-80ba-fdc1878bf07b", "bridge": "br-int", "label": "tempest-ServerAddressesTestJSON-1773621559-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.7", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d22dc9557f7a4dada059632a83957c8f", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5aa70f06-b1", "ovs_interfaceid": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.507 2 DEBUG nova.network.os_vif_util [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:05:79:4e,bridge_name='br-int',has_traffic_filtering=True,id=5aa70f06-b185-47e2-9107-2f51edc00ff2,network=Network(6f7e3ff0-592e-41ad-80ba-fdc1878bf07b),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap5aa70f06-b1') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.511 2 DEBUG os_vif [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Plugging vif VIFOpenVSwitch(active=False,address=fa:16:3e:05:79:4e,bridge_name='br-int',has_traffic_filtering=True,id=5aa70f06-b185-47e2-9107-2f51edc00ff2,network=Network(6f7e3ff0-592e-41ad-80ba-fdc1878bf07b),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap5aa70f06-b1') plug /usr/lib/python3.9/site-packages/os_vif/__init__.py:76
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.512 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.513 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddBridgeCommand(_result=None, name=br-int, may_exist=True, datapath_type=system) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.514 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.520 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.521 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tap5aa70f06-b1, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.523 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbSetCommand(_result=None, table=Interface, record=tap5aa70f06-b1, col_values=(('external_ids', {'iface-id': '5aa70f06-b185-47e2-9107-2f51edc00ff2', 'iface-status': 'active', 'attached-mac': 'fa:16:3e:05:79:4e', 'vm-uuid': '0ec010f2-6758-466d-900f-c8c6ffe81844'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:40:21 compute-0 NetworkManager[44908]: <info>  [1760150421.5287] manager: (tap5aa70f06-b1): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/58)
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.528 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.534 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.540 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.541 2 INFO os_vif [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Successfully plugged vif VIFOpenVSwitch(active=False,address=fa:16:3e:05:79:4e,bridge_name='br-int',has_traffic_filtering=True,id=5aa70f06-b185-47e2-9107-2f51edc00ff2,network=Network(6f7e3ff0-592e-41ad-80ba-fdc1878bf07b),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap5aa70f06-b1')
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.629 2 DEBUG nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] No BDM found with device name vda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.630 2 DEBUG nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] No BDM found with device name sda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.630 2 DEBUG nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] No VIF found with MAC fa:16:3e:05:79:4e, not building metadata _build_interface_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12092
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.631 2 INFO nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Using config drive
Oct 11 02:40:21 compute-0 nova_compute[356901]: 2025-10-11 02:40:21.672 2 DEBUG nova.storage.rbd_utils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] rbd image 0ec010f2-6758-466d-900f-c8c6ffe81844_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:40:21 compute-0 unix_chkpwd[455796]: password check failed for user (root)
Oct 11 02:40:21 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3077035616' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:40:22 compute-0 nova_compute[356901]: 2025-10-11 02:40:22.003 2 INFO nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Creating config drive at /var/lib/nova/instances/0ec010f2-6758-466d-900f-c8c6ffe81844/disk.config
Oct 11 02:40:22 compute-0 nova_compute[356901]: 2025-10-11 02:40:22.014 2 DEBUG oslo_concurrency.processutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Running cmd (subprocess): /usr/bin/mkisofs -o /var/lib/nova/instances/0ec010f2-6758-466d-900f-c8c6ffe81844/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmpnath5tsh execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:40:22 compute-0 nova_compute[356901]: 2025-10-11 02:40:22.180 2 DEBUG oslo_concurrency.processutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] CMD "/usr/bin/mkisofs -o /var/lib/nova/instances/0ec010f2-6758-466d-900f-c8c6ffe81844/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmpnath5tsh" returned: 0 in 0.166s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:40:22 compute-0 nova_compute[356901]: 2025-10-11 02:40:22.253 2 DEBUG nova.storage.rbd_utils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] rbd image 0ec010f2-6758-466d-900f-c8c6ffe81844_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:40:22 compute-0 nova_compute[356901]: 2025-10-11 02:40:22.269 2 DEBUG oslo_concurrency.processutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/0ec010f2-6758-466d-900f-c8c6ffe81844/disk.config 0ec010f2-6758-466d-900f-c8c6ffe81844_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:40:22 compute-0 nova_compute[356901]: 2025-10-11 02:40:22.534 2 DEBUG oslo_concurrency.processutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/0ec010f2-6758-466d-900f-c8c6ffe81844/disk.config 0ec010f2-6758-466d-900f-c8c6ffe81844_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.264s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:40:22 compute-0 nova_compute[356901]: 2025-10-11 02:40:22.535 2 INFO nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Deleting local config drive /var/lib/nova/instances/0ec010f2-6758-466d-900f-c8c6ffe81844/disk.config because it was imported into RBD.
Oct 11 02:40:22 compute-0 kernel: tap5aa70f06-b1: entered promiscuous mode
Oct 11 02:40:22 compute-0 ovn_controller[88370]: 2025-10-11T02:40:22Z|00113|binding|INFO|Claiming lport 5aa70f06-b185-47e2-9107-2f51edc00ff2 for this chassis.
Oct 11 02:40:22 compute-0 ovn_controller[88370]: 2025-10-11T02:40:22Z|00114|binding|INFO|5aa70f06-b185-47e2-9107-2f51edc00ff2: Claiming fa:16:3e:05:79:4e 10.100.0.7
Oct 11 02:40:22 compute-0 nova_compute[356901]: 2025-10-11 02:40:22.648 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:22 compute-0 NetworkManager[44908]: <info>  [1760150422.6540] manager: (tap5aa70f06-b1): new Tun device (/org/freedesktop/NetworkManager/Devices/59)
Oct 11 02:40:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:22.654 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:05:79:4e 10.100.0.7'], port_security=['fa:16:3e:05:79:4e 10.100.0.7'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.0.7/28', 'neutron:device_id': '0ec010f2-6758-466d-900f-c8c6ffe81844', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': 'd22dc9557f7a4dada059632a83957c8f', 'neutron:revision_number': '2', 'neutron:security_group_ids': 'b3bb0626-da30-4fd8-8b0a-d558ae8653ae', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=b9e48fb9-a9ec-41d4-a970-8790332da1c3, chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], tunnel_key=3, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=5aa70f06-b185-47e2-9107-2f51edc00ff2) old=Port_Binding(chassis=[]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:40:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:22.655 286362 INFO neutron.agent.ovn.metadata.agent [-] Port 5aa70f06-b185-47e2-9107-2f51edc00ff2 in datapath 6f7e3ff0-592e-41ad-80ba-fdc1878bf07b bound to our chassis
Oct 11 02:40:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:22.659 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network 6f7e3ff0-592e-41ad-80ba-fdc1878bf07b
Oct 11 02:40:22 compute-0 ovn_controller[88370]: 2025-10-11T02:40:22Z|00115|binding|INFO|Setting lport 5aa70f06-b185-47e2-9107-2f51edc00ff2 ovn-installed in OVS
Oct 11 02:40:22 compute-0 ovn_controller[88370]: 2025-10-11T02:40:22Z|00116|binding|INFO|Setting lport 5aa70f06-b185-47e2-9107-2f51edc00ff2 up in Southbound
Oct 11 02:40:22 compute-0 nova_compute[356901]: 2025-10-11 02:40:22.676 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:22 compute-0 nova_compute[356901]: 2025-10-11 02:40:22.685 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:22.684 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[1f3d32e5-fb34-4963-bcb6-5b1d625ab00c]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:22.687 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Creating VETH tap6f7e3ff0-51 in ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b namespace provision_datapath /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:665
Oct 11 02:40:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:22.692 422955 DEBUG neutron.privileged.agent.linux.ip_lib [-] Interface tap6f7e3ff0-50 not found in namespace None get_link_id /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:204
Oct 11 02:40:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:22.692 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[4dfd5625-b6a7-48a6-935b-11943acefb1f]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:22 compute-0 systemd-udevd[455848]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:40:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:22.695 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[5c333f01-38e3-4d7d-9283-8707a1785f54]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:22 compute-0 NetworkManager[44908]: <info>  [1760150422.7260] device (tap5aa70f06-b1): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 02:40:22 compute-0 systemd-machined[137586]: New machine qemu-11-instance-0000000b.
Oct 11 02:40:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:22.722 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[4e4eae75-1640-46a7-9b31-11e7889161c2]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:22 compute-0 systemd[1]: Started Virtual Machine qemu-11-instance-0000000b.
Oct 11 02:40:22 compute-0 NetworkManager[44908]: <info>  [1760150422.7517] device (tap5aa70f06-b1): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Oct 11 02:40:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:22.754 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[2e94ba74-64b5-442d-bc88-120e33aecf50]: (4, ('net.ipv4.conf.all.promote_secondaries = 1\n', '', 0)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1880: 321 pgs: 321 active+clean; 370 MiB data, 440 MiB used, 60 GiB / 60 GiB avail; 133 KiB/s rd, 1.9 MiB/s wr, 55 op/s
Oct 11 02:40:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:22.789 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[95aea6ed-4403-44c9-97d7-3cf6cec90489]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:22 compute-0 systemd-udevd[455854]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:40:22 compute-0 NetworkManager[44908]: <info>  [1760150422.8096] manager: (tap6f7e3ff0-50): new Veth device (/org/freedesktop/NetworkManager/Devices/60)
Oct 11 02:40:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:22.807 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[72b56610-f427-49d7-98f0-23066ba536f0]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:22.847 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[b21e7b58-b119-4199-a6ae-943fff870e13]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:22.851 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[56f7c3ae-9912-48e9-bc91-81c95e5931de]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:22 compute-0 NetworkManager[44908]: <info>  [1760150422.8778] device (tap6f7e3ff0-50): carrier: link connected
Oct 11 02:40:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:22.891 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[401a077e-2f7f-41fe-80fb-4ffa26e086ef]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:22 compute-0 ceph-mon[191930]: pgmap v1880: 321 pgs: 321 active+clean; 370 MiB data, 440 MiB used, 60 GiB / 60 GiB avail; 133 KiB/s rd, 1.9 MiB/s wr, 55 op/s
Oct 11 02:40:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:22.914 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[b4bccc49-f9f5-445a-899e-7e3db0faa9a4]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tap6f7e3ff0-51'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:3e:7d:d9'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 35], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 677060, 'reachable_time': 19928, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 1, 'outoctets': 76, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 1, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 76, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 1, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 455883, 'error': None, 'target': 'ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:22.935 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[74db7287-d5d1-41e8-b398-d83e0d068ed1]: (4, ({'family': 10, 'prefixlen': 64, 'flags': 192, 'scope': 253, 'index': 2, 'attrs': [['IFA_ADDRESS', 'fe80::f816:3eff:fe3e:7dd9'], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 677060, 'tstamp': 677060}], ['IFA_FLAGS', 192]], 'header': {'length': 72, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 455884, 'error': None, 'target': 'ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'},)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:22.957 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[c0f3b747-0887-4acb-aa23-65708acdd398]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tap6f7e3ff0-51'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:3e:7d:d9'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 35], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 677060, 'reachable_time': 19928, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 1, 'outoctets': 76, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 1, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 76, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 1, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 0, 'sequence_number': 255, 'pid': 455885, 'error': None, 'target': 'ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:23.004 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[0f1b10d8-e393-4b9e-8bab-043b08f84d65]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:23.103 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[f73c5769-967d-4614-b4f5-4e875ca512ca]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:23.106 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tap6f7e3ff0-50, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:23.106 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:23.107 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tap6f7e3ff0-50, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:40:23 compute-0 NetworkManager[44908]: <info>  [1760150423.1113] manager: (tap6f7e3ff0-50): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/61)
Oct 11 02:40:23 compute-0 kernel: tap6f7e3ff0-50: entered promiscuous mode
Oct 11 02:40:23 compute-0 nova_compute[356901]: 2025-10-11 02:40:23.112 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:23 compute-0 nova_compute[356901]: 2025-10-11 02:40:23.115 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:23.116 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tap6f7e3ff0-50, col_values=(('external_ids', {'iface-id': '4d5ff8cb-4868-4f22-b348-133ac4b5dde0'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:40:23 compute-0 ovn_controller[88370]: 2025-10-11T02:40:23Z|00117|binding|INFO|Releasing lport 4d5ff8cb-4868-4f22-b348-133ac4b5dde0 from this chassis (sb_readonly=0)
Oct 11 02:40:23 compute-0 nova_compute[356901]: 2025-10-11 02:40:23.118 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:23 compute-0 nova_compute[356901]: 2025-10-11 02:40:23.123 2 DEBUG nova.network.neutron [req-9376b0ad-1f16-48a7-b803-4c6fffb246b3 req-0061cbb1-fbb2-4a6b-97b2-26c575929ca7 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Updated VIF entry in instance network info cache for port 5aa70f06-b185-47e2-9107-2f51edc00ff2. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:40:23 compute-0 nova_compute[356901]: 2025-10-11 02:40:23.123 2 DEBUG nova.network.neutron [req-9376b0ad-1f16-48a7-b803-4c6fffb246b3 req-0061cbb1-fbb2-4a6b-97b2-26c575929ca7 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Updating instance_info_cache with network_info: [{"id": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "address": "fa:16:3e:05:79:4e", "network": {"id": "6f7e3ff0-592e-41ad-80ba-fdc1878bf07b", "bridge": "br-int", "label": "tempest-ServerAddressesTestJSON-1773621559-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.7", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d22dc9557f7a4dada059632a83957c8f", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5aa70f06-b1", "ovs_interfaceid": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:40:23 compute-0 nova_compute[356901]: 2025-10-11 02:40:23.128 2 DEBUG nova.compute.manager [req-e4f4f865-76db-4ff6-a2ef-65d4d303bc41 req-c8c9ffc5-6b14-4479-b8d4-f19614dbd441 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Received event network-vif-plugged-5aa70f06-b185-47e2-9107-2f51edc00ff2 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:40:23 compute-0 nova_compute[356901]: 2025-10-11 02:40:23.128 2 DEBUG oslo_concurrency.lockutils [req-e4f4f865-76db-4ff6-a2ef-65d4d303bc41 req-c8c9ffc5-6b14-4479-b8d4-f19614dbd441 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "0ec010f2-6758-466d-900f-c8c6ffe81844-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:23 compute-0 nova_compute[356901]: 2025-10-11 02:40:23.129 2 DEBUG oslo_concurrency.lockutils [req-e4f4f865-76db-4ff6-a2ef-65d4d303bc41 req-c8c9ffc5-6b14-4479-b8d4-f19614dbd441 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "0ec010f2-6758-466d-900f-c8c6ffe81844-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:23 compute-0 nova_compute[356901]: 2025-10-11 02:40:23.129 2 DEBUG oslo_concurrency.lockutils [req-e4f4f865-76db-4ff6-a2ef-65d4d303bc41 req-c8c9ffc5-6b14-4479-b8d4-f19614dbd441 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "0ec010f2-6758-466d-900f-c8c6ffe81844-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:23 compute-0 nova_compute[356901]: 2025-10-11 02:40:23.129 2 DEBUG nova.compute.manager [req-e4f4f865-76db-4ff6-a2ef-65d4d303bc41 req-c8c9ffc5-6b14-4479-b8d4-f19614dbd441 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Processing event network-vif-plugged-5aa70f06-b185-47e2-9107-2f51edc00ff2 _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10808
Oct 11 02:40:23 compute-0 nova_compute[356901]: 2025-10-11 02:40:23.138 2 DEBUG oslo_concurrency.lockutils [req-9376b0ad-1f16-48a7-b803-4c6fffb246b3 req-0061cbb1-fbb2-4a6b-97b2-26c575929ca7 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-0ec010f2-6758-466d-900f-c8c6ffe81844" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:40:23 compute-0 nova_compute[356901]: 2025-10-11 02:40:23.142 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:23.143 286362 DEBUG neutron.agent.linux.utils [-] Unable to access /var/lib/neutron/external/pids/6f7e3ff0-592e-41ad-80ba-fdc1878bf07b.pid.haproxy; Error: [Errno 2] No such file or directory: '/var/lib/neutron/external/pids/6f7e3ff0-592e-41ad-80ba-fdc1878bf07b.pid.haproxy' get_value_from_file /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:252
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:23.147 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[a2c56a3b-c8a6-400b-9d92-30f5f59b98f4]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:23.148 286362 DEBUG neutron.agent.ovn.metadata.driver [-] haproxy_cfg = 
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]: global
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     log         /dev/log local0 debug
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     log-tag     haproxy-metadata-proxy-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     user        root
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     group       root
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     maxconn     1024
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     pidfile     /var/lib/neutron/external/pids/6f7e3ff0-592e-41ad-80ba-fdc1878bf07b.pid.haproxy
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     daemon
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]: defaults
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     log global
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     mode http
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     option httplog
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     option dontlognull
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     option http-server-close
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     option forwardfor
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     retries                 3
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     timeout http-request    30s
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     timeout connect         30s
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     timeout client          32s
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     timeout server          32s
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     timeout http-keep-alive 30s
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]: listen listener
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     bind 169.254.169.254:80
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     server metadata /var/lib/neutron/metadata_proxy
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:     http-request add-header X-OVN-Network-ID 6f7e3ff0-592e-41ad-80ba-fdc1878bf07b
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]:  create_config_file /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/driver.py:107
Oct 11 02:40:23 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:23.149 286362 DEBUG neutron.agent.linux.utils [-] Running command: ['sudo', 'neutron-rootwrap', '/etc/neutron/rootwrap.conf', 'ip', 'netns', 'exec', 'ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b', 'env', 'PROCESS_TAG=haproxy-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b', 'haproxy', '-f', '/var/lib/neutron/ovn-metadata-proxy/6f7e3ff0-592e-41ad-80ba-fdc1878bf07b.conf'] create_process /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:84
Oct 11 02:40:23 compute-0 sshd-session[455691]: Failed password for root from 193.46.255.159 port 56358 ssh2
Oct 11 02:40:23 compute-0 podman[455916]: 2025-10-11 02:40:23.70848229 +0000 UTC m=+0.098630260 container create 58508134d61b85c78884f22a32a79ea4f80d6b42ea547dbcee44ac5bdc85b7ef (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.schema-version=1.0)
Oct 11 02:40:23 compute-0 podman[455916]: 2025-10-11 02:40:23.668439094 +0000 UTC m=+0.058587104 image pull 1061e4fafe13e0b9aa1ef2c904ba4ad70c44f3e87b1d831f16c6db34937f4022 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Oct 11 02:40:23 compute-0 systemd[1]: Started libpod-conmon-58508134d61b85c78884f22a32a79ea4f80d6b42ea547dbcee44ac5bdc85b7ef.scope.
Oct 11 02:40:23 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:40:23 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4e3f0ddddfe73ab0523c25a5a46f87764bf75663f59bdb8d2c26d7f27c411d0c/merged/var/lib/neutron supports timestamps until 2038 (0x7fffffff)
Oct 11 02:40:23 compute-0 podman[455916]: 2025-10-11 02:40:23.848031544 +0000 UTC m=+0.238179594 container init 58508134d61b85c78884f22a32a79ea4f80d6b42ea547dbcee44ac5bdc85b7ef (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:40:23 compute-0 podman[455916]: 2025-10-11 02:40:23.856393488 +0000 UTC m=+0.246541468 container start 58508134d61b85c78884f22a32a79ea4f80d6b42ea547dbcee44ac5bdc85b7ef (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:40:23 compute-0 neutron-haproxy-ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b[455946]: [NOTICE]   (455969) : New worker (455974) forked
Oct 11 02:40:23 compute-0 neutron-haproxy-ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b[455946]: [NOTICE]   (455969) : Loading success.
Oct 11 02:40:24 compute-0 podman[455988]: 2025-10-11 02:40:24.220396369 +0000 UTC m=+0.108220081 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:40:24 compute-0 podman[455991]: 2025-10-11 02:40:24.248000335 +0000 UTC m=+0.129962015 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, managed_by=edpm_ansible)
Oct 11 02:40:24 compute-0 podman[455990]: 2025-10-11 02:40:24.262726992 +0000 UTC m=+0.145874336 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:40:24 compute-0 podman[455989]: 2025-10-11 02:40:24.284616149 +0000 UTC m=+0.158902778 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, managed_by=edpm_ansible, tcib_managed=true)
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.650 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150424.6504192, 0ec010f2-6758-466d-900f-c8c6ffe81844 => Started> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.651 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] VM Started (Lifecycle Event)
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.653 2 DEBUG nova.compute.manager [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Instance event wait completed in 0 seconds for network-vif-plugged wait_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:577
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.658 2 DEBUG nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Guest created on hypervisor spawn /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4417
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.663 2 INFO nova.virt.libvirt.driver [-] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Instance spawned successfully.
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.663 2 DEBUG nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Attempting to register defaults for the following image properties: ['hw_cdrom_bus', 'hw_disk_bus', 'hw_input_bus', 'hw_pointer_model', 'hw_video_model', 'hw_vif_model'] _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:917
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.678 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.691 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Synchronizing instance power state after lifecycle event "Started"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.696 2 DEBUG nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Found default for hw_cdrom_bus of sata _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.696 2 DEBUG nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Found default for hw_disk_bus of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.696 2 DEBUG nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Found default for hw_input_bus of usb _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.697 2 DEBUG nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Found default for hw_pointer_model of usbtablet _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.697 2 DEBUG nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Found default for hw_video_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.697 2 DEBUG nova.virt.libvirt.driver [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Found default for hw_vif_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.731 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.731 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150424.6505935, 0ec010f2-6758-466d-900f-c8c6ffe81844 => Paused> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.731 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] VM Paused (Lifecycle Event)
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.757 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:40:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1881: 321 pgs: 321 active+clean; 370 MiB data, 440 MiB used, 60 GiB / 60 GiB avail; 21 KiB/s rd, 1.8 MiB/s wr, 32 op/s
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.763 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150424.657768, 0ec010f2-6758-466d-900f-c8c6ffe81844 => Resumed> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.763 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] VM Resumed (Lifecycle Event)
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.779 2 INFO nova.compute.manager [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Took 8.76 seconds to spawn the instance on the hypervisor.
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.779 2 DEBUG nova.compute.manager [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.783 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.791 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Synchronizing instance power state after lifecycle event "Resumed"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.824 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.845 2 INFO nova.compute.manager [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Took 10.08 seconds to build instance.
Oct 11 02:40:24 compute-0 nova_compute[356901]: 2025-10-11 02:40:24.864 2 DEBUG oslo_concurrency.lockutils [None req-da662ab2-7e1b-4194-bc86-73a217529b38 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Lock "0ec010f2-6758-466d-900f-c8c6ffe81844" "released" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: held 10.193s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:24 compute-0 ceph-mon[191930]: pgmap v1881: 321 pgs: 321 active+clean; 370 MiB data, 440 MiB used, 60 GiB / 60 GiB avail; 21 KiB/s rd, 1.8 MiB/s wr, 32 op/s
Oct 11 02:40:25 compute-0 nova_compute[356901]: 2025-10-11 02:40:25.044 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:25 compute-0 unix_chkpwd[456072]: password check failed for user (root)
Oct 11 02:40:25 compute-0 nova_compute[356901]: 2025-10-11 02:40:25.221 2 DEBUG nova.compute.manager [req-5c620010-55ad-41ca-bdfa-a5d3a24f8ba0 req-2e6aca70-6965-4fbb-a238-08c05a5c4ac8 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Received event network-vif-plugged-5aa70f06-b185-47e2-9107-2f51edc00ff2 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:40:25 compute-0 nova_compute[356901]: 2025-10-11 02:40:25.222 2 DEBUG oslo_concurrency.lockutils [req-5c620010-55ad-41ca-bdfa-a5d3a24f8ba0 req-2e6aca70-6965-4fbb-a238-08c05a5c4ac8 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "0ec010f2-6758-466d-900f-c8c6ffe81844-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:25 compute-0 nova_compute[356901]: 2025-10-11 02:40:25.223 2 DEBUG oslo_concurrency.lockutils [req-5c620010-55ad-41ca-bdfa-a5d3a24f8ba0 req-2e6aca70-6965-4fbb-a238-08c05a5c4ac8 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "0ec010f2-6758-466d-900f-c8c6ffe81844-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:25 compute-0 nova_compute[356901]: 2025-10-11 02:40:25.223 2 DEBUG oslo_concurrency.lockutils [req-5c620010-55ad-41ca-bdfa-a5d3a24f8ba0 req-2e6aca70-6965-4fbb-a238-08c05a5c4ac8 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "0ec010f2-6758-466d-900f-c8c6ffe81844-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:25 compute-0 nova_compute[356901]: 2025-10-11 02:40:25.223 2 DEBUG nova.compute.manager [req-5c620010-55ad-41ca-bdfa-a5d3a24f8ba0 req-2e6aca70-6965-4fbb-a238-08c05a5c4ac8 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] No waiting events found dispatching network-vif-plugged-5aa70f06-b185-47e2-9107-2f51edc00ff2 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:40:25 compute-0 nova_compute[356901]: 2025-10-11 02:40:25.224 2 WARNING nova.compute.manager [req-5c620010-55ad-41ca-bdfa-a5d3a24f8ba0 req-2e6aca70-6965-4fbb-a238-08c05a5c4ac8 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Received unexpected event network-vif-plugged-5aa70f06-b185-47e2-9107-2f51edc00ff2 for instance with vm_state active and task_state None.
Oct 11 02:40:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:40:26 compute-0 nova_compute[356901]: 2025-10-11 02:40:26.531 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:40:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:40:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:40:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:40:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:40:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:40:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1882: 321 pgs: 321 active+clean; 370 MiB data, 440 MiB used, 60 GiB / 60 GiB avail; 32 KiB/s rd, 1.8 MiB/s wr, 37 op/s
Oct 11 02:40:26 compute-0 ceph-mon[191930]: pgmap v1882: 321 pgs: 321 active+clean; 370 MiB data, 440 MiB used, 60 GiB / 60 GiB avail; 32 KiB/s rd, 1.8 MiB/s wr, 37 op/s
Oct 11 02:40:27 compute-0 sshd-session[455691]: Failed password for root from 193.46.255.159 port 56358 ssh2
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.577 2 DEBUG oslo_concurrency.lockutils [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Acquiring lock "0ec010f2-6758-466d-900f-c8c6ffe81844" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.578 2 DEBUG oslo_concurrency.lockutils [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Lock "0ec010f2-6758-466d-900f-c8c6ffe81844" acquired by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.578 2 DEBUG oslo_concurrency.lockutils [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Acquiring lock "0ec010f2-6758-466d-900f-c8c6ffe81844-events" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.579 2 DEBUG oslo_concurrency.lockutils [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Lock "0ec010f2-6758-466d-900f-c8c6ffe81844-events" acquired by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.579 2 DEBUG oslo_concurrency.lockutils [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Lock "0ec010f2-6758-466d-900f-c8c6ffe81844-events" "released" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.580 2 INFO nova.compute.manager [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Terminating instance
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.581 2 DEBUG nova.compute.manager [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Start destroying the instance on the hypervisor. _shutdown_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:3120
Oct 11 02:40:27 compute-0 kernel: tap5aa70f06-b1 (unregistering): left promiscuous mode
Oct 11 02:40:27 compute-0 NetworkManager[44908]: <info>  [1760150427.6943] device (tap5aa70f06-b1): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')
Oct 11 02:40:27 compute-0 ovn_controller[88370]: 2025-10-11T02:40:27Z|00118|binding|INFO|Releasing lport 5aa70f06-b185-47e2-9107-2f51edc00ff2 from this chassis (sb_readonly=0)
Oct 11 02:40:27 compute-0 ovn_controller[88370]: 2025-10-11T02:40:27Z|00119|binding|INFO|Setting lport 5aa70f06-b185-47e2-9107-2f51edc00ff2 down in Southbound
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.705 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:27 compute-0 ovn_controller[88370]: 2025-10-11T02:40:27Z|00120|binding|INFO|Removing iface tap5aa70f06-b1 ovn-installed in OVS
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.709 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:40:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2355062069' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:40:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:27.713 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:05:79:4e 10.100.0.7'], port_security=['fa:16:3e:05:79:4e 10.100.0.7'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.0.7/28', 'neutron:device_id': '0ec010f2-6758-466d-900f-c8c6ffe81844', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': 'd22dc9557f7a4dada059632a83957c8f', 'neutron:revision_number': '4', 'neutron:security_group_ids': 'b3bb0626-da30-4fd8-8b0a-d558ae8653ae', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:host_id': 'compute-0.ctlplane.example.com'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=b9e48fb9-a9ec-41d4-a970-8790332da1c3, chassis=[], tunnel_key=3, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=5aa70f06-b185-47e2-9107-2f51edc00ff2) old=Port_Binding(up=[True], chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:40:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:27.716 286362 INFO neutron.agent.ovn.metadata.agent [-] Port 5aa70f06-b185-47e2-9107-2f51edc00ff2 in datapath 6f7e3ff0-592e-41ad-80ba-fdc1878bf07b unbound from our chassis
Oct 11 02:40:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:40:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2355062069' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:40:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:27.719 286362 DEBUG neutron.agent.ovn.metadata.agent [-] No valid VIF ports were found for network 6f7e3ff0-592e-41ad-80ba-fdc1878bf07b, tearing the namespace down if needed _get_provision_params /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:628
Oct 11 02:40:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:27.722 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[e8826fdd-2ec9-4a62-98c1-1b0e54cf83c0]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:27.723 286362 INFO neutron.agent.ovn.metadata.agent [-] Cleaning up ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b namespace which is not needed anymore
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.733 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:27 compute-0 systemd[1]: machine-qemu\x2d11\x2dinstance\x2d0000000b.scope: Deactivated successfully.
Oct 11 02:40:27 compute-0 systemd[1]: machine-qemu\x2d11\x2dinstance\x2d0000000b.scope: Consumed 4.995s CPU time.
Oct 11 02:40:27 compute-0 systemd-machined[137586]: Machine qemu-11-instance-0000000b terminated.
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.828 2 INFO nova.virt.libvirt.driver [-] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Instance destroyed successfully.
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.830 2 DEBUG nova.objects.instance [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Lazy-loading 'resources' on Instance uuid 0ec010f2-6758-466d-900f-c8c6ffe81844 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.844 2 DEBUG nova.virt.libvirt.vif [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='True',created_at=2025-10-11T02:40:13Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=<?>,disable_terminate=False,display_description='tempest-ServerAddressesTestJSON-server-1273435186',display_name='tempest-ServerAddressesTestJSON-server-1273435186',ec2_ids=<?>,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-serveraddressestestjson-server-1273435186',id=11,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data=None,key_name=None,keypairs=<?>,launch_index=0,launched_at=2025-10-11T02:40:24Z,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={},migration_context=<?>,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=<?>,power_state=1,progress=0,project_id='d22dc9557f7a4dada059632a83957c8f',ramdisk_id='',reservation_id='r-jlhhx8n0',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='reader,member',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_cdrom_bus='sata',image_hw_disk_bus='virtio',image_hw_input_bus='usb',image_hw_machine_type='q35',image_hw_pointer_model='usbtablet',image_hw_rng_model='virtio',image_hw_video_model='virtio',image_hw_vif_model='virtio',image_min_disk='1',image_min_ram='0',owner_project_name='tempest-ServerAddressesTestJSON-1658687559',owner_user_name='tempest-ServerAddressesTestJSON-1658687559-project-member'},tags=<?>,task_state='deleting',terminated_at=None,trusted_certs=<?>,updated_at=2025-10-11T02:40:24Z,user_data=None,user_id='7632dc55de9f4e5599b0d2b6ef832832',uuid=0ec010f2-6758-466d-900f-c8c6ffe81844,vcpu_model=<?>,vcpus=1,vm_mode=None,vm_state='active') vif={"id": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "address": "fa:16:3e:05:79:4e", "network": {"id": "6f7e3ff0-592e-41ad-80ba-fdc1878bf07b", "bridge": "br-int", "label": "tempest-ServerAddressesTestJSON-1773621559-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.7", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d22dc9557f7a4dada059632a83957c8f", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5aa70f06-b1", "ovs_interfaceid": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} unplug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:828
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.844 2 DEBUG nova.network.os_vif_util [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Converting VIF {"id": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "address": "fa:16:3e:05:79:4e", "network": {"id": "6f7e3ff0-592e-41ad-80ba-fdc1878bf07b", "bridge": "br-int", "label": "tempest-ServerAddressesTestJSON-1773621559-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.7", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d22dc9557f7a4dada059632a83957c8f", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap5aa70f06-b1", "ovs_interfaceid": "5aa70f06-b185-47e2-9107-2f51edc00ff2", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.845 2 DEBUG nova.network.os_vif_util [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:05:79:4e,bridge_name='br-int',has_traffic_filtering=True,id=5aa70f06-b185-47e2-9107-2f51edc00ff2,network=Network(6f7e3ff0-592e-41ad-80ba-fdc1878bf07b),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap5aa70f06-b1') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.846 2 DEBUG os_vif [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Unplugging vif VIFOpenVSwitch(active=False,address=fa:16:3e:05:79:4e,bridge_name='br-int',has_traffic_filtering=True,id=5aa70f06-b185-47e2-9107-2f51edc00ff2,network=Network(6f7e3ff0-592e-41ad-80ba-fdc1878bf07b),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap5aa70f06-b1') unplug /usr/lib/python3.9/site-packages/os_vif/__init__.py:109
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.848 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.849 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tap5aa70f06-b1, bridge=br-int, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.851 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.853 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.854 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:27 compute-0 nova_compute[356901]: 2025-10-11 02:40:27.857 2 INFO os_vif [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Successfully unplugged vif VIFOpenVSwitch(active=False,address=fa:16:3e:05:79:4e,bridge_name='br-int',has_traffic_filtering=True,id=5aa70f06-b185-47e2-9107-2f51edc00ff2,network=Network(6f7e3ff0-592e-41ad-80ba-fdc1878bf07b),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap5aa70f06-b1')
Oct 11 02:40:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2355062069' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:40:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2355062069' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:40:27 compute-0 neutron-haproxy-ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b[455946]: [NOTICE]   (455969) : haproxy version is 2.8.14-c23fe91
Oct 11 02:40:27 compute-0 neutron-haproxy-ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b[455946]: [NOTICE]   (455969) : path to executable is /usr/sbin/haproxy
Oct 11 02:40:27 compute-0 neutron-haproxy-ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b[455946]: [WARNING]  (455969) : Exiting Master process...
Oct 11 02:40:27 compute-0 neutron-haproxy-ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b[455946]: [WARNING]  (455969) : Exiting Master process...
Oct 11 02:40:27 compute-0 neutron-haproxy-ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b[455946]: [ALERT]    (455969) : Current worker (455974) exited with code 143 (Terminated)
Oct 11 02:40:27 compute-0 neutron-haproxy-ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b[455946]: [WARNING]  (455969) : All workers exited. Exiting... (0)
Oct 11 02:40:27 compute-0 systemd[1]: libpod-58508134d61b85c78884f22a32a79ea4f80d6b42ea547dbcee44ac5bdc85b7ef.scope: Deactivated successfully.
Oct 11 02:40:27 compute-0 podman[456115]: 2025-10-11 02:40:27.977339643 +0000 UTC m=+0.082159177 container died 58508134d61b85c78884f22a32a79ea4f80d6b42ea547dbcee44ac5bdc85b7ef (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true)
Oct 11 02:40:28 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-58508134d61b85c78884f22a32a79ea4f80d6b42ea547dbcee44ac5bdc85b7ef-userdata-shm.mount: Deactivated successfully.
Oct 11 02:40:28 compute-0 systemd[1]: var-lib-containers-storage-overlay-4e3f0ddddfe73ab0523c25a5a46f87764bf75663f59bdb8d2c26d7f27c411d0c-merged.mount: Deactivated successfully.
Oct 11 02:40:28 compute-0 podman[456115]: 2025-10-11 02:40:28.058139319 +0000 UTC m=+0.162958863 container cleanup 58508134d61b85c78884f22a32a79ea4f80d6b42ea547dbcee44ac5bdc85b7ef (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS)
Oct 11 02:40:28 compute-0 systemd[1]: libpod-conmon-58508134d61b85c78884f22a32a79ea4f80d6b42ea547dbcee44ac5bdc85b7ef.scope: Deactivated successfully.
Oct 11 02:40:28 compute-0 nova_compute[356901]: 2025-10-11 02:40:28.097 2 DEBUG nova.compute.manager [req-ac845db1-eb8b-457e-b94a-673a37675536 req-f733b414-ab29-4ad6-89c1-68da612cb6c6 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Received event network-vif-unplugged-5aa70f06-b185-47e2-9107-2f51edc00ff2 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:40:28 compute-0 nova_compute[356901]: 2025-10-11 02:40:28.098 2 DEBUG oslo_concurrency.lockutils [req-ac845db1-eb8b-457e-b94a-673a37675536 req-f733b414-ab29-4ad6-89c1-68da612cb6c6 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "0ec010f2-6758-466d-900f-c8c6ffe81844-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:28 compute-0 nova_compute[356901]: 2025-10-11 02:40:28.098 2 DEBUG oslo_concurrency.lockutils [req-ac845db1-eb8b-457e-b94a-673a37675536 req-f733b414-ab29-4ad6-89c1-68da612cb6c6 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "0ec010f2-6758-466d-900f-c8c6ffe81844-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:28 compute-0 nova_compute[356901]: 2025-10-11 02:40:28.099 2 DEBUG oslo_concurrency.lockutils [req-ac845db1-eb8b-457e-b94a-673a37675536 req-f733b414-ab29-4ad6-89c1-68da612cb6c6 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "0ec010f2-6758-466d-900f-c8c6ffe81844-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:28 compute-0 nova_compute[356901]: 2025-10-11 02:40:28.100 2 DEBUG nova.compute.manager [req-ac845db1-eb8b-457e-b94a-673a37675536 req-f733b414-ab29-4ad6-89c1-68da612cb6c6 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] No waiting events found dispatching network-vif-unplugged-5aa70f06-b185-47e2-9107-2f51edc00ff2 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:40:28 compute-0 nova_compute[356901]: 2025-10-11 02:40:28.100 2 DEBUG nova.compute.manager [req-ac845db1-eb8b-457e-b94a-673a37675536 req-f733b414-ab29-4ad6-89c1-68da612cb6c6 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Received event network-vif-unplugged-5aa70f06-b185-47e2-9107-2f51edc00ff2 for instance with task_state deleting. _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10826
Oct 11 02:40:28 compute-0 podman[456156]: 2025-10-11 02:40:28.17553261 +0000 UTC m=+0.079249816 container remove 58508134d61b85c78884f22a32a79ea4f80d6b42ea547dbcee44ac5bdc85b7ef (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0)
Oct 11 02:40:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:28.187 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[fc48686e-c802-435a-90f3-605c18eedd1a]: (4, ('Sat Oct 11 02:40:27 AM UTC 2025 Stopping container neutron-haproxy-ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b (58508134d61b85c78884f22a32a79ea4f80d6b42ea547dbcee44ac5bdc85b7ef)\n58508134d61b85c78884f22a32a79ea4f80d6b42ea547dbcee44ac5bdc85b7ef\nSat Oct 11 02:40:28 AM UTC 2025 Deleting container neutron-haproxy-ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b (58508134d61b85c78884f22a32a79ea4f80d6b42ea547dbcee44ac5bdc85b7ef)\n58508134d61b85c78884f22a32a79ea4f80d6b42ea547dbcee44ac5bdc85b7ef\n', '', 0)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:28.191 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[d1540fa9-594f-48c7-89ff-3e24031c2d26]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:28.193 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tap6f7e3ff0-50, bridge=None, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:40:28 compute-0 kernel: tap6f7e3ff0-50: left promiscuous mode
Oct 11 02:40:28 compute-0 nova_compute[356901]: 2025-10-11 02:40:28.197 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:28 compute-0 nova_compute[356901]: 2025-10-11 02:40:28.214 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:28.221 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[95659354-86d6-44f0-97a7-582af1dc9885]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:28.248 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[5d8f6c3b-27b2-42fb-9186-38c31e52c976]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:28.254 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[ded51ce5-43f1-4a76-adfd-9c64df378dd4]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:28 compute-0 sshd-session[455691]: Received disconnect from 193.46.255.159 port 56358:11:  [preauth]
Oct 11 02:40:28 compute-0 sshd-session[455691]: Disconnected from authenticating user root 193.46.255.159 port 56358 [preauth]
Oct 11 02:40:28 compute-0 sshd-session[455691]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.159  user=root
Oct 11 02:40:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:28.284 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[1a281369-9165-44e4-9c19-72b9e06e22f9]: (4, [{'family': 0, '__align': (), 'ifi_type': 772, 'index': 1, 'flags': 65609, 'change': 0, 'attrs': [['IFLA_IFNAME', 'lo'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UNKNOWN'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 65536], ['IFLA_MIN_MTU', 0], ['IFLA_MAX_MTU', 0], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 1], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 1], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 0], ['IFLA_CARRIER_UP_COUNT', 0], ['IFLA_CARRIER_DOWN_COUNT', 0], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', '00:00:00:00:00:00'], ['IFLA_BROADCAST', '00:00:00:00:00:00'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 1, 'nopolicy': 1, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 677051, 'reachable_time': 25827, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 65536, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 4294967295, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 4294967295, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 0, 'inoctets': 0, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 0, 'outoctets': 0, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 0, 'outmcastpkts': 0, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 0, 'outmcastoctets': 0, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 0, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 0, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1404, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 456171, 'error': None, 'target': 'ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:28 compute-0 systemd[1]: run-netns-ovnmeta\x2d6f7e3ff0\x2d592e\x2d41ad\x2d80ba\x2dfdc1878bf07b.mount: Deactivated successfully.
Oct 11 02:40:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:28.293 286647 DEBUG neutron.privileged.agent.linux.ip_lib [-] Namespace ovnmeta-6f7e3ff0-592e-41ad-80ba-fdc1878bf07b deleted. remove_netns /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:607
Oct 11 02:40:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:28.294 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[41587f4a-c62d-4c08-9dd0-e1e4e5c92d19]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:28 compute-0 nova_compute[356901]: 2025-10-11 02:40:28.550 2 INFO nova.virt.libvirt.driver [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Deleting instance files /var/lib/nova/instances/0ec010f2-6758-466d-900f-c8c6ffe81844_del
Oct 11 02:40:28 compute-0 nova_compute[356901]: 2025-10-11 02:40:28.551 2 INFO nova.virt.libvirt.driver [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Deletion of /var/lib/nova/instances/0ec010f2-6758-466d-900f-c8c6ffe81844_del complete
Oct 11 02:40:28 compute-0 nova_compute[356901]: 2025-10-11 02:40:28.632 2 INFO nova.compute.manager [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Took 1.05 seconds to destroy the instance on the hypervisor.
Oct 11 02:40:28 compute-0 nova_compute[356901]: 2025-10-11 02:40:28.634 2 DEBUG oslo.service.loopingcall [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Waiting for function nova.compute.manager.ComputeManager._try_deallocate_network.<locals>._deallocate_network_with_retries to return. func /usr/lib/python3.9/site-packages/oslo_service/loopingcall.py:435
Oct 11 02:40:28 compute-0 nova_compute[356901]: 2025-10-11 02:40:28.634 2 DEBUG nova.compute.manager [-] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Deallocating network for instance _deallocate_network /usr/lib/python3.9/site-packages/nova/compute/manager.py:2259
Oct 11 02:40:28 compute-0 nova_compute[356901]: 2025-10-11 02:40:28.634 2 DEBUG nova.network.neutron [-] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] deallocate_for_instance() deallocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1803
Oct 11 02:40:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1883: 321 pgs: 321 active+clean; 370 MiB data, 440 MiB used, 60 GiB / 60 GiB avail; 712 KiB/s rd, 1.8 MiB/s wr, 59 op/s
Oct 11 02:40:28 compute-0 ceph-mon[191930]: pgmap v1883: 321 pgs: 321 active+clean; 370 MiB data, 440 MiB used, 60 GiB / 60 GiB avail; 712 KiB/s rd, 1.8 MiB/s wr, 59 op/s
Oct 11 02:40:29 compute-0 unix_chkpwd[456174]: password check failed for user (root)
Oct 11 02:40:29 compute-0 sshd-session[456172]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.159  user=root
Oct 11 02:40:29 compute-0 podman[157119]: time="2025-10-11T02:40:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:40:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:40:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 49967 "" "Go-http-client/1.1"
Oct 11 02:40:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:40:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 10468 "" "Go-http-client/1.1"
Oct 11 02:40:30 compute-0 nova_compute[356901]: 2025-10-11 02:40:30.046 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:30 compute-0 nova_compute[356901]: 2025-10-11 02:40:30.093 2 DEBUG nova.network.neutron [-] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Updating instance_info_cache with network_info: [] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:40:30 compute-0 nova_compute[356901]: 2025-10-11 02:40:30.109 2 INFO nova.compute.manager [-] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Took 1.47 seconds to deallocate network for instance.
Oct 11 02:40:30 compute-0 nova_compute[356901]: 2025-10-11 02:40:30.188 2 DEBUG oslo_concurrency.lockutils [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.update_usage" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:30 compute-0 nova_compute[356901]: 2025-10-11 02:40:30.189 2 DEBUG oslo_concurrency.lockutils [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:30 compute-0 nova_compute[356901]: 2025-10-11 02:40:30.254 2 DEBUG nova.compute.manager [req-7791f57e-0219-48c8-9b6d-395b99ad7d8b req-956b9012-c25d-4081-9349-286d5c0ce4bf 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Received event network-vif-plugged-5aa70f06-b185-47e2-9107-2f51edc00ff2 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:40:30 compute-0 nova_compute[356901]: 2025-10-11 02:40:30.255 2 DEBUG oslo_concurrency.lockutils [req-7791f57e-0219-48c8-9b6d-395b99ad7d8b req-956b9012-c25d-4081-9349-286d5c0ce4bf 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "0ec010f2-6758-466d-900f-c8c6ffe81844-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:30 compute-0 nova_compute[356901]: 2025-10-11 02:40:30.256 2 DEBUG oslo_concurrency.lockutils [req-7791f57e-0219-48c8-9b6d-395b99ad7d8b req-956b9012-c25d-4081-9349-286d5c0ce4bf 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "0ec010f2-6758-466d-900f-c8c6ffe81844-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:30 compute-0 nova_compute[356901]: 2025-10-11 02:40:30.256 2 DEBUG oslo_concurrency.lockutils [req-7791f57e-0219-48c8-9b6d-395b99ad7d8b req-956b9012-c25d-4081-9349-286d5c0ce4bf 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "0ec010f2-6758-466d-900f-c8c6ffe81844-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:30 compute-0 nova_compute[356901]: 2025-10-11 02:40:30.257 2 DEBUG nova.compute.manager [req-7791f57e-0219-48c8-9b6d-395b99ad7d8b req-956b9012-c25d-4081-9349-286d5c0ce4bf 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] No waiting events found dispatching network-vif-plugged-5aa70f06-b185-47e2-9107-2f51edc00ff2 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:40:30 compute-0 nova_compute[356901]: 2025-10-11 02:40:30.257 2 WARNING nova.compute.manager [req-7791f57e-0219-48c8-9b6d-395b99ad7d8b req-956b9012-c25d-4081-9349-286d5c0ce4bf 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Received unexpected event network-vif-plugged-5aa70f06-b185-47e2-9107-2f51edc00ff2 for instance with vm_state deleted and task_state None.
Oct 11 02:40:30 compute-0 nova_compute[356901]: 2025-10-11 02:40:30.258 2 DEBUG nova.compute.manager [req-7791f57e-0219-48c8-9b6d-395b99ad7d8b req-956b9012-c25d-4081-9349-286d5c0ce4bf 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Received event network-vif-deleted-5aa70f06-b185-47e2-9107-2f51edc00ff2 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:40:30 compute-0 nova_compute[356901]: 2025-10-11 02:40:30.370 2 DEBUG oslo_concurrency.processutils [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:40:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1884: 321 pgs: 321 active+clean; 350 MiB data, 432 MiB used, 60 GiB / 60 GiB avail; 1.1 MiB/s rd, 1.1 MiB/s wr, 87 op/s
Oct 11 02:40:30 compute-0 ceph-mon[191930]: pgmap v1884: 321 pgs: 321 active+clean; 350 MiB data, 432 MiB used, 60 GiB / 60 GiB avail; 1.1 MiB/s rd, 1.1 MiB/s wr, 87 op/s
Oct 11 02:40:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:40:30 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1726042830' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:40:30 compute-0 nova_compute[356901]: 2025-10-11 02:40:30.998 2 DEBUG oslo_concurrency.processutils [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.628s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:40:31 compute-0 nova_compute[356901]: 2025-10-11 02:40:31.010 2 DEBUG nova.compute.provider_tree [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:40:31 compute-0 nova_compute[356901]: 2025-10-11 02:40:31.030 2 DEBUG nova.scheduler.client.report [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:40:31 compute-0 sshd-session[456172]: Failed password for root from 193.46.255.159 port 60724 ssh2
Oct 11 02:40:31 compute-0 nova_compute[356901]: 2025-10-11 02:40:31.062 2 DEBUG oslo_concurrency.lockutils [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: held 0.872s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:31 compute-0 nova_compute[356901]: 2025-10-11 02:40:31.091 2 INFO nova.scheduler.client.report [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Deleted allocations for instance 0ec010f2-6758-466d-900f-c8c6ffe81844
Oct 11 02:40:31 compute-0 podman[456197]: 2025-10-11 02:40:31.225267967 +0000 UTC m=+0.111725213 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, container_name=multipathd, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, managed_by=edpm_ansible)
Oct 11 02:40:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:40:31 compute-0 podman[456198]: 2025-10-11 02:40:31.26467314 +0000 UTC m=+0.153265971 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, container_name=iscsid, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid)
Oct 11 02:40:31 compute-0 nova_compute[356901]: 2025-10-11 02:40:31.385 2 DEBUG oslo_concurrency.lockutils [None req-02a9986d-5bff-4a8e-b44b-ea1bbabfc898 7632dc55de9f4e5599b0d2b6ef832832 d22dc9557f7a4dada059632a83957c8f - - default default] Lock "0ec010f2-6758-466d-900f-c8c6ffe81844" "released" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: held 3.806s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:31 compute-0 openstack_network_exporter[374316]: ERROR   02:40:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:40:31 compute-0 openstack_network_exporter[374316]: ERROR   02:40:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:40:31 compute-0 openstack_network_exporter[374316]: ERROR   02:40:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:40:31 compute-0 openstack_network_exporter[374316]: ERROR   02:40:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:40:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:40:31 compute-0 openstack_network_exporter[374316]: ERROR   02:40:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:40:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:40:31 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1726042830' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:40:32 compute-0 unix_chkpwd[456232]: password check failed for user (root)
Oct 11 02:40:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1885: 321 pgs: 321 active+clean; 339 MiB data, 428 MiB used, 60 GiB / 60 GiB avail; 1.4 MiB/s rd, 411 KiB/s wr, 88 op/s
Oct 11 02:40:32 compute-0 nova_compute[356901]: 2025-10-11 02:40:32.852 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:32 compute-0 ceph-mon[191930]: pgmap v1885: 321 pgs: 321 active+clean; 339 MiB data, 428 MiB used, 60 GiB / 60 GiB avail; 1.4 MiB/s rd, 411 KiB/s wr, 88 op/s
Oct 11 02:40:33 compute-0 ovn_controller[88370]: 2025-10-11T02:40:33Z|00016|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:1e:a3:c2 10.100.0.11
Oct 11 02:40:33 compute-0 ovn_controller[88370]: 2025-10-11T02:40:33Z|00017|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:1e:a3:c2 10.100.0.11
Oct 11 02:40:34 compute-0 sshd-session[456172]: Failed password for root from 193.46.255.159 port 60724 ssh2
Oct 11 02:40:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1886: 321 pgs: 321 active+clean; 341 MiB data, 436 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 1.5 MiB/s wr, 119 op/s
Oct 11 02:40:34 compute-0 ceph-mon[191930]: pgmap v1886: 321 pgs: 321 active+clean; 341 MiB data, 436 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 1.5 MiB/s wr, 119 op/s
Oct 11 02:40:35 compute-0 nova_compute[356901]: 2025-10-11 02:40:35.064 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:35 compute-0 unix_chkpwd[456233]: password check failed for user (root)
Oct 11 02:40:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:40:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1887: 321 pgs: 321 active+clean; 356 MiB data, 444 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 2.1 MiB/s wr, 141 op/s
Oct 11 02:40:36 compute-0 ceph-mon[191930]: pgmap v1887: 321 pgs: 321 active+clean; 356 MiB data, 444 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 2.1 MiB/s wr, 141 op/s
Oct 11 02:40:37 compute-0 ovn_controller[88370]: 2025-10-11T02:40:37Z|00121|binding|INFO|Releasing lport 3233307f-6a7e-4ff6-b881-6d68b60996c3 from this chassis (sb_readonly=0)
Oct 11 02:40:37 compute-0 ovn_controller[88370]: 2025-10-11T02:40:37Z|00122|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:40:37 compute-0 ovn_controller[88370]: 2025-10-11T02:40:37Z|00123|binding|INFO|Releasing lport aa37c6ed-d2db-4ed4-b1c9-cfd071cfd96a from this chassis (sb_readonly=0)
Oct 11 02:40:37 compute-0 ovn_controller[88370]: 2025-10-11T02:40:37Z|00124|binding|INFO|Releasing lport 896fe5e8-8895-492a-9e5f-23d2477d5716 from this chassis (sb_readonly=0)
Oct 11 02:40:37 compute-0 nova_compute[356901]: 2025-10-11 02:40:37.158 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:37 compute-0 sshd-session[456172]: Failed password for root from 193.46.255.159 port 60724 ssh2
Oct 11 02:40:37 compute-0 nova_compute[356901]: 2025-10-11 02:40:37.855 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:37 compute-0 nova_compute[356901]: 2025-10-11 02:40:37.908 2 DEBUG nova.objects.instance [None req-2359aafb-fd5a-442d-b514-5787b5c759f2 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lazy-loading 'flavor' on Instance uuid 49d4f343-d1b4-4594-96d2-0777a5ce8581 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:40:37 compute-0 nova_compute[356901]: 2025-10-11 02:40:37.949 2 DEBUG oslo_concurrency.lockutils [None req-2359aafb-fd5a-442d-b514-5787b5c759f2 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Acquiring lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:40:37 compute-0 nova_compute[356901]: 2025-10-11 02:40:37.950 2 DEBUG oslo_concurrency.lockutils [None req-2359aafb-fd5a-442d-b514-5787b5c759f2 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Acquired lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:40:37 compute-0 nova_compute[356901]: 2025-10-11 02:40:37.988 2 DEBUG oslo_concurrency.lockutils [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Acquiring lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" by "nova.compute.manager.ComputeManager.reboot_instance.<locals>.do_reboot_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:37 compute-0 nova_compute[356901]: 2025-10-11 02:40:37.990 2 DEBUG oslo_concurrency.lockutils [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" acquired by "nova.compute.manager.ComputeManager.reboot_instance.<locals>.do_reboot_instance" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:37 compute-0 nova_compute[356901]: 2025-10-11 02:40:37.990 2 INFO nova.compute.manager [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Rebooting instance
Oct 11 02:40:38 compute-0 nova_compute[356901]: 2025-10-11 02:40:38.007 2 DEBUG oslo_concurrency.lockutils [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Acquiring lock "refresh_cache-f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:40:38 compute-0 nova_compute[356901]: 2025-10-11 02:40:38.007 2 DEBUG oslo_concurrency.lockutils [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Acquired lock "refresh_cache-f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:40:38 compute-0 nova_compute[356901]: 2025-10-11 02:40:38.007 2 DEBUG nova.network.neutron [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Building network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2010
Oct 11 02:40:38 compute-0 sshd-session[456172]: Received disconnect from 193.46.255.159 port 60724:11:  [preauth]
Oct 11 02:40:38 compute-0 sshd-session[456172]: Disconnected from authenticating user root 193.46.255.159 port 60724 [preauth]
Oct 11 02:40:38 compute-0 sshd-session[456172]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.159  user=root
Oct 11 02:40:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1888: 321 pgs: 321 active+clean; 357 MiB data, 444 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 2.1 MiB/s wr, 136 op/s
Oct 11 02:40:38 compute-0 ceph-mon[191930]: pgmap v1888: 321 pgs: 321 active+clean; 357 MiB data, 444 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 2.1 MiB/s wr, 136 op/s
Oct 11 02:40:39 compute-0 nova_compute[356901]: 2025-10-11 02:40:39.494 2 DEBUG nova.network.neutron [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Updating instance_info_cache with network_info: [{"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.187", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:40:39 compute-0 nova_compute[356901]: 2025-10-11 02:40:39.528 2 DEBUG oslo_concurrency.lockutils [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Releasing lock "refresh_cache-f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:40:39 compute-0 nova_compute[356901]: 2025-10-11 02:40:39.530 2 DEBUG nova.compute.manager [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:40:39 compute-0 kernel: tapd7c4233c-f7 (unregistering): left promiscuous mode
Oct 11 02:40:39 compute-0 NetworkManager[44908]: <info>  [1760150439.7790] device (tapd7c4233c-f7): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')
Oct 11 02:40:39 compute-0 nova_compute[356901]: 2025-10-11 02:40:39.796 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:39 compute-0 ovn_controller[88370]: 2025-10-11T02:40:39Z|00125|binding|INFO|Releasing lport d7c4233c-f79b-4f32-b896-c36d4abb7d26 from this chassis (sb_readonly=0)
Oct 11 02:40:39 compute-0 ovn_controller[88370]: 2025-10-11T02:40:39Z|00126|binding|INFO|Setting lport d7c4233c-f79b-4f32-b896-c36d4abb7d26 down in Southbound
Oct 11 02:40:39 compute-0 ovn_controller[88370]: 2025-10-11T02:40:39Z|00127|binding|INFO|Removing iface tapd7c4233c-f7 ovn-installed in OVS
Oct 11 02:40:39 compute-0 nova_compute[356901]: 2025-10-11 02:40:39.804 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:39 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:39.813 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:8d:b8:dd 10.100.0.4'], port_security=['fa:16:3e:8d:b8:dd 10.100.0.4'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.0.4/28', 'neutron:device_id': 'f5eb6746-7f42-4fa4-8e26-cda5cfa0c765', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-b4d521f7-7729-40fd-aa58-7126044eb166', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': 'dba4f6e51d33430ebf5566af53f6fbcc', 'neutron:revision_number': '4', 'neutron:security_group_ids': '82e011ad-d874-487b-b398-e13313bfa497', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:host_id': 'compute-0.ctlplane.example.com', 'neutron:port_fip': '192.168.122.187'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=283b08c2-109a-4649-a6db-2339ca56efb4, chassis=[], tunnel_key=3, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=d7c4233c-f79b-4f32-b896-c36d4abb7d26) old=Port_Binding(up=[True], chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:40:39 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:39.815 286362 INFO neutron.agent.ovn.metadata.agent [-] Port d7c4233c-f79b-4f32-b896-c36d4abb7d26 in datapath b4d521f7-7729-40fd-aa58-7126044eb166 unbound from our chassis
Oct 11 02:40:39 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:39.817 286362 DEBUG neutron.agent.ovn.metadata.agent [-] No valid VIF ports were found for network b4d521f7-7729-40fd-aa58-7126044eb166, tearing the namespace down if needed _get_provision_params /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:628
Oct 11 02:40:39 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:39.819 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[bf0c0307-66ec-4f2f-b07f-e9eadafd85ef]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:39 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:39.819 286362 INFO neutron.agent.ovn.metadata.agent [-] Cleaning up ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166 namespace which is not needed anymore
Oct 11 02:40:39 compute-0 nova_compute[356901]: 2025-10-11 02:40:39.834 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:39 compute-0 systemd[1]: machine-qemu\x2d6\x2dinstance\x2d00000006.scope: Deactivated successfully.
Oct 11 02:40:39 compute-0 systemd[1]: machine-qemu\x2d6\x2dinstance\x2d00000006.scope: Consumed 43.677s CPU time.
Oct 11 02:40:39 compute-0 systemd-machined[137586]: Machine qemu-6-instance-00000006 terminated.
Oct 11 02:40:39 compute-0 nova_compute[356901]: 2025-10-11 02:40:39.891 2 DEBUG nova.network.neutron [None req-2359aafb-fd5a-442d-b514-5787b5c759f2 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Building network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2010
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:39.999 2 DEBUG nova.compute.manager [req-7cb15a41-230d-4698-95b3-84c029e264c8 req-1bc0df13-0e8a-4e61-b96b-cd01865b564f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Received event network-changed-4076fda2-be62-4c52-b073-8bf26574dee1 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.000 2 DEBUG nova.compute.manager [req-7cb15a41-230d-4698-95b3-84c029e264c8 req-1bc0df13-0e8a-4e61-b96b-cd01865b564f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Refreshing instance network info cache due to event network-changed-4076fda2-be62-4c52-b073-8bf26574dee1. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.000 2 DEBUG oslo_concurrency.lockutils [req-7cb15a41-230d-4698-95b3-84c029e264c8 req-1bc0df13-0e8a-4e61-b96b-cd01865b564f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:40:40 compute-0 neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166[453022]: [NOTICE]   (453027) : haproxy version is 2.8.14-c23fe91
Oct 11 02:40:40 compute-0 neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166[453022]: [NOTICE]   (453027) : path to executable is /usr/sbin/haproxy
Oct 11 02:40:40 compute-0 neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166[453022]: [WARNING]  (453027) : Exiting Master process...
Oct 11 02:40:40 compute-0 neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166[453022]: [ALERT]    (453027) : Current worker (453029) exited with code 143 (Terminated)
Oct 11 02:40:40 compute-0 neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166[453022]: [WARNING]  (453027) : All workers exited. Exiting... (0)
Oct 11 02:40:40 compute-0 systemd[1]: libpod-c0d88bfd9df1a49fd0c922f853153438c0fe1f0209bd8d1173c10be3e9662a0a.scope: Deactivated successfully.
Oct 11 02:40:40 compute-0 podman[456258]: 2025-10-11 02:40:40.063539316 +0000 UTC m=+0.082451723 container died c0d88bfd9df1a49fd0c922f853153438c0fe1f0209bd8d1173c10be3e9662a0a (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.068 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.084 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.096 2 INFO nova.virt.libvirt.driver [-] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Instance destroyed successfully.
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.097 2 DEBUG nova.objects.instance [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lazy-loading 'resources' on Instance uuid f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.098 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.112 2 DEBUG nova.virt.libvirt.vif [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='True',created_at=2025-10-11T02:38:58Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=<?>,disable_terminate=False,display_description='tempest-ServerActionsTestJSON-server-482072585',display_name='tempest-ServerActionsTestJSON-server-482072585',ec2_ids=<?>,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-serveractionstestjson-server-482072585',id=6,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBEeyedFg6J90z3asuDBQl1/Bvzj806ldEmlyVo7UkMTJHBgXm6kW1TdMM5vQZaYHoLzJajtdp6cuAv6b+cT74TvAgDg4tZ7S8WdWrLaHLA9uudTCq+0DsKhebTJVvA2XxA==',key_name='tempest-keypair-177844218',keypairs=<?>,launch_index=0,launched_at=2025-10-11T02:39:27Z,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={},migration_context=<?>,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=<?>,power_state=1,progress=0,project_id='dba4f6e51d33430ebf5566af53f6fbcc',ramdisk_id='',reservation_id='r-xpsstq1e',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_cdrom_bus='sata',image_hw_disk_bus='virtio',image_hw_input_bus='usb',image_hw_machine_type='q35',image_hw_pointer_model='usbtablet',image_hw_rng_model='virtio',image_hw_video_model='virtio',image_hw_vif_model='virtio',image_min_disk='1',image_min_ram='0',owner_project_name='tempest-ServerActionsTestJSON-1563605323',owner_user_name='tempest-ServerActionsTestJSON-1563605323-project-member'},tags=<?>,task_state='reboot_started_hard',terminated_at=None,trusted_certs=<?>,updated_at=2025-10-11T02:40:39Z,user_data='IyEvYmluL3NoCmVjaG8gIlByaW50aW5nIGNpcnJvcyB1c2VyIGF1dGhvcml6ZWQga2V5cyIKY2F0IH5jaXJyb3MvLnNzaC9hdXRob3JpemVkX2tleXMgfHwgdHJ1ZQo=',user_id='11c81e88a90342bba2c2816e4e3cb191',uuid=f5eb6746-7f42-4fa4-8e26-cda5cfa0c765,vcpu_model=<?>,vcpus=1,vm_mode=None,vm_state='active') vif={"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.187", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} unplug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:828
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.113 2 DEBUG nova.network.os_vif_util [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Converting VIF {"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.187", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.114 2 DEBUG nova.network.os_vif_util [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Converted object VIFOpenVSwitch(active=True,address=fa:16:3e:8d:b8:dd,bridge_name='br-int',has_traffic_filtering=True,id=d7c4233c-f79b-4f32-b896-c36d4abb7d26,network=Network(b4d521f7-7729-40fd-aa58-7126044eb166),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd7c4233c-f7') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.115 2 DEBUG os_vif [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Unplugging vif VIFOpenVSwitch(active=True,address=fa:16:3e:8d:b8:dd,bridge_name='br-int',has_traffic_filtering=True,id=d7c4233c-f79b-4f32-b896-c36d4abb7d26,network=Network(b4d521f7-7729-40fd-aa58-7126044eb166),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd7c4233c-f7') unplug /usr/lib/python3.9/site-packages/os_vif/__init__.py:109
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.116 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.116 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapd7c4233c-f7, bridge=br-int, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.118 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.120 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.121 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.124 2 INFO os_vif [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Successfully unplugged vif VIFOpenVSwitch(active=True,address=fa:16:3e:8d:b8:dd,bridge_name='br-int',has_traffic_filtering=True,id=d7c4233c-f79b-4f32-b896-c36d4abb7d26,network=Network(b4d521f7-7729-40fd-aa58-7126044eb166),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd7c4233c-f7')
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.132 2 DEBUG nova.virt.libvirt.driver [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Start _get_guest_xml network_info=[{"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.187", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] disk_info={'disk_bus': 'virtio', 'cdrom_bus': 'sata', 'mapping': {'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.config': {'bus': 'sata', 'dev': 'sda', 'type': 'cdrom'}}} image_meta=ImageMeta(checksum=<?>,container_format='bare',created_at=<?>,direct_url=<?>,disk_format='qcow2',id=72f37f2e-4296-450e-9a12-10717f4ac7dc,min_disk=1,min_ram=0,name=<?>,owner=<?>,properties=ImageMetaProps,protected=<?>,size=<?>,status=<?>,tags=<?>,updated_at=<?>,virtual_size=<?>,visibility=<?>) rescue=None block_device_info={'root_device_name': '/dev/vda', 'image': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'boot_index': 0, 'device_name': '/dev/vda', 'size': 0, 'encryption_format': None, 'image_id': '72f37f2e-4296-450e-9a12-10717f4ac7dc'}], 'ephemerals': [], 'block_device_mapping': [], 'swap': None} _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7549
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.143 2 WARNING nova.virt.libvirt.driver [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:40:40 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-c0d88bfd9df1a49fd0c922f853153438c0fe1f0209bd8d1173c10be3e9662a0a-userdata-shm.mount: Deactivated successfully.
Oct 11 02:40:40 compute-0 systemd[1]: var-lib-containers-storage-overlay-105c2d3147e202160e1a750aa76d9e8280033a073759090e8a4c0f535ce66f87-merged.mount: Deactivated successfully.
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.155 2 DEBUG nova.virt.libvirt.host [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V1... _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1653
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.157 2 DEBUG nova.virt.libvirt.host [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] CPU controller missing on host. _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1663
Oct 11 02:40:40 compute-0 podman[456258]: 2025-10-11 02:40:40.160217224 +0000 UTC m=+0.179129641 container cleanup c0d88bfd9df1a49fd0c922f853153438c0fe1f0209bd8d1173c10be3e9662a0a (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.162 2 DEBUG nova.virt.libvirt.host [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V2... _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1672
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.163 2 DEBUG nova.virt.libvirt.host [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] CPU controller found on host. _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1679
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.163 2 DEBUG nova.virt.libvirt.driver [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] CPU mode 'host-model' models '' was chosen, with extra flags: '' _get_guest_cpu_model_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:5396
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.164 2 DEBUG nova.virt.hardware [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Getting desirable topologies for flavor Flavor(created_at=2025-10-11T02:38:03Z,deleted=False,deleted_at=None,description=None,disabled=False,ephemeral_gb=0,extra_specs={hw_rng:allowed='True'},flavorid='6dff30d1-85df-4e9c-9163-a20ba47bb0c7',id=3,is_public=True,memory_mb=128,name='m1.nano',projects=<?>,root_gb=1,rxtx_factor=1.0,swap=0,updated_at=None,vcpu_weight=0,vcpus=1) and image_meta ImageMeta(checksum=<?>,container_format='bare',created_at=<?>,direct_url=<?>,disk_format='qcow2',id=72f37f2e-4296-450e-9a12-10717f4ac7dc,min_disk=1,min_ram=0,name=<?>,owner=<?>,properties=ImageMetaProps,protected=<?>,size=<?>,status=<?>,tags=<?>,updated_at=<?>,virtual_size=<?>,visibility=<?>), allow threads: True _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:563
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.164 2 DEBUG nova.virt.hardware [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Flavor limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:348
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.165 2 DEBUG nova.virt.hardware [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Image limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:352
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.165 2 DEBUG nova.virt.hardware [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Flavor pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:388
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.165 2 DEBUG nova.virt.hardware [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Image pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:392
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.166 2 DEBUG nova.virt.hardware [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Chose sockets=0, cores=0, threads=0; limits were sockets=65536, cores=65536, threads=65536 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:430
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.166 2 DEBUG nova.virt.hardware [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Topology preferred VirtCPUTopology(cores=0,sockets=0,threads=0), maximum VirtCPUTopology(cores=65536,sockets=65536,threads=65536) _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:569
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.166 2 DEBUG nova.virt.hardware [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Build topologies for 1 vcpu(s) 1:1:1 _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:471
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.167 2 DEBUG nova.virt.hardware [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Got 1 possible topologies _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:501
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.167 2 DEBUG nova.virt.hardware [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Possible topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:575
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.167 2 DEBUG nova.virt.hardware [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Sorted desired topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:577
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.167 2 DEBUG nova.objects.instance [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lazy-loading 'vcpu_model' on Instance uuid f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:40:40 compute-0 systemd[1]: libpod-conmon-c0d88bfd9df1a49fd0c922f853153438c0fe1f0209bd8d1173c10be3e9662a0a.scope: Deactivated successfully.
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.193 2 DEBUG oslo_concurrency.processutils [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:40:40 compute-0 podman[456296]: 2025-10-11 02:40:40.317169771 +0000 UTC m=+0.115420701 container remove c0d88bfd9df1a49fd0c922f853153438c0fe1f0209bd8d1173c10be3e9662a0a (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:40:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:40.327 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[6a6ad443-2cae-4f82-b014-13cdef26476c]: (4, ('Sat Oct 11 02:40:39 AM UTC 2025 Stopping container neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166 (c0d88bfd9df1a49fd0c922f853153438c0fe1f0209bd8d1173c10be3e9662a0a)\nc0d88bfd9df1a49fd0c922f853153438c0fe1f0209bd8d1173c10be3e9662a0a\nSat Oct 11 02:40:40 AM UTC 2025 Deleting container neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166 (c0d88bfd9df1a49fd0c922f853153438c0fe1f0209bd8d1173c10be3e9662a0a)\nc0d88bfd9df1a49fd0c922f853153438c0fe1f0209bd8d1173c10be3e9662a0a\n', '', 0)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:40.329 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[d1e0c99f-17eb-4240-b355-6d83eda6380b]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:40.331 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapb4d521f7-70, bridge=None, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.335 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:40 compute-0 kernel: tapb4d521f7-70: left promiscuous mode
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.354 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:40.358 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[af708aab-0f38-4a50-a0b1-c44745634314]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:40.389 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[4a73fceb-9c90-4f1a-b979-f7964b3944b7]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:40.392 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[db1254a2-c6a2-40f9-a6ec-2e54d35d3c19]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:40.416 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[73bfd0f0-3459-4d23-8fd6-65aabac04923]: (4, [{'family': 0, '__align': (), 'ifi_type': 772, 'index': 1, 'flags': 65609, 'change': 0, 'attrs': [['IFLA_IFNAME', 'lo'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UNKNOWN'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 65536], ['IFLA_MIN_MTU', 0], ['IFLA_MAX_MTU', 0], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 1], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 1], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 0], ['IFLA_CARRIER_UP_COUNT', 0], ['IFLA_CARRIER_DOWN_COUNT', 0], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', '00:00:00:00:00:00'], ['IFLA_BROADCAST', '00:00:00:00:00:00'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 1, 'nopolicy': 1, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 671348, 'reachable_time': 44071, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 65536, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 4294967295, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 4294967295, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 0, 'inoctets': 0, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 0, 'outoctets': 0, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 0, 'outmcastpkts': 0, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 0, 'outmcastoctets': 0, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 0, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 0, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1404, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 456330, 'error': None, 'target': 'ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:40.420 286647 DEBUG neutron.privileged.agent.linux.ip_lib [-] Namespace ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166 deleted. remove_netns /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:607
Oct 11 02:40:40 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:40.421 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[bbe6eacd-9d23-4c21-95aa-b5180f42f745]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:40 compute-0 systemd[1]: run-netns-ovnmeta\x2db4d521f7\x2d7729\x2d40fd\x2daa58\x2d7126044eb166.mount: Deactivated successfully.
Oct 11 02:40:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:40:40 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1812166980' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.670 2 DEBUG oslo_concurrency.processutils [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.477s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:40:40 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1812166980' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:40:40 compute-0 nova_compute[356901]: 2025-10-11 02:40:40.735 2 DEBUG oslo_concurrency.processutils [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:40:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1889: 321 pgs: 321 active+clean; 357 MiB data, 444 MiB used, 60 GiB / 60 GiB avail; 1.0 MiB/s rd, 2.2 MiB/s wr, 117 op/s
Oct 11 02:40:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:40:41 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/588876834' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:40:41 compute-0 podman[456371]: 2025-10-11 02:40:41.223488285 +0000 UTC m=+0.102997622 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, container_name=ceilometer_agent_ipmi)
Oct 11 02:40:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.246 2 DEBUG oslo_concurrency.processutils [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.511s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.248 2 DEBUG nova.virt.libvirt.vif [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='True',created_at=2025-10-11T02:38:58Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=<?>,disable_terminate=False,display_description='tempest-ServerActionsTestJSON-server-482072585',display_name='tempest-ServerActionsTestJSON-server-482072585',ec2_ids=<?>,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-serveractionstestjson-server-482072585',id=6,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBEeyedFg6J90z3asuDBQl1/Bvzj806ldEmlyVo7UkMTJHBgXm6kW1TdMM5vQZaYHoLzJajtdp6cuAv6b+cT74TvAgDg4tZ7S8WdWrLaHLA9uudTCq+0DsKhebTJVvA2XxA==',key_name='tempest-keypair-177844218',keypairs=<?>,launch_index=0,launched_at=2025-10-11T02:39:27Z,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={},migration_context=<?>,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=<?>,power_state=1,progress=0,project_id='dba4f6e51d33430ebf5566af53f6fbcc',ramdisk_id='',reservation_id='r-xpsstq1e',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_cdrom_bus='sata',image_hw_disk_bus='virtio',image_hw_input_bus='usb',image_hw_machine_type='q35',image_hw_pointer_model='usbtablet',image_hw_rng_model='virtio',image_hw_video_model='virtio',image_hw_vif_model='virtio',image_min_disk='1',image_min_ram='0',owner_project_name='tempest-ServerActionsTestJSON-1563605323',owner_user_name='tempest-ServerActionsTestJSON-1563605323-project-member'},tags=<?>,task_state='reboot_started_hard',terminated_at=None,trusted_certs=<?>,updated_at=2025-10-11T02:40:39Z,user_data='IyEvYmluL3NoCmVjaG8gIlByaW50aW5nIGNpcnJvcyB1c2VyIGF1dGhvcml6ZWQga2V5cyIKY2F0IH5jaXJyb3MvLnNzaC9hdXRob3JpemVkX2tleXMgfHwgdHJ1ZQo=',user_id='11c81e88a90342bba2c2816e4e3cb191',uuid=f5eb6746-7f42-4fa4-8e26-cda5cfa0c765,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='active') vif={"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.187", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} virt_type=kvm get_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:563
Oct 11 02:40:41 compute-0 podman[456372]: 2025-10-11 02:40:41.24865392 +0000 UTC m=+0.127895301 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.tags=minimal rhel9, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, build-date=2025-08-20T13:12:41, container_name=openstack_network_exporter, architecture=x86_64, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, release=1755695350, config_id=edpm, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, version=9.6, com.redhat.component=ubi9-minimal-container, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, name=ubi9-minimal, io.buildah.version=1.33.7, url=https://catalog.redhat.com/en/search?searchType=containers, distribution-scope=public, io.openshift.expose-services=, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-type=git, vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.248 2 DEBUG nova.network.os_vif_util [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Converting VIF {"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.187", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.249 2 DEBUG nova.network.os_vif_util [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Converted object VIFOpenVSwitch(active=True,address=fa:16:3e:8d:b8:dd,bridge_name='br-int',has_traffic_filtering=True,id=d7c4233c-f79b-4f32-b896-c36d4abb7d26,network=Network(b4d521f7-7729-40fd-aa58-7126044eb166),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd7c4233c-f7') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.250 2 DEBUG nova.objects.instance [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lazy-loading 'pci_devices' on Instance uuid f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:40:41 compute-0 podman[456373]: 2025-10-11 02:40:41.25344158 +0000 UTC m=+0.111692603 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.268 2 DEBUG nova.virt.libvirt.driver [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] End _get_guest_xml xml=<domain type="kvm">
Oct 11 02:40:41 compute-0 nova_compute[356901]:   <uuid>f5eb6746-7f42-4fa4-8e26-cda5cfa0c765</uuid>
Oct 11 02:40:41 compute-0 nova_compute[356901]:   <name>instance-00000006</name>
Oct 11 02:40:41 compute-0 nova_compute[356901]:   <memory>131072</memory>
Oct 11 02:40:41 compute-0 nova_compute[356901]:   <vcpu>1</vcpu>
Oct 11 02:40:41 compute-0 nova_compute[356901]:   <metadata>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.1">
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <nova:package version="27.5.2-0.20250829104910.6f8decf.el9"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <nova:name>tempest-ServerActionsTestJSON-server-482072585</nova:name>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <nova:creationTime>2025-10-11 02:40:40</nova:creationTime>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <nova:flavor name="m1.nano">
Oct 11 02:40:41 compute-0 nova_compute[356901]:         <nova:memory>128</nova:memory>
Oct 11 02:40:41 compute-0 nova_compute[356901]:         <nova:disk>1</nova:disk>
Oct 11 02:40:41 compute-0 nova_compute[356901]:         <nova:swap>0</nova:swap>
Oct 11 02:40:41 compute-0 nova_compute[356901]:         <nova:ephemeral>0</nova:ephemeral>
Oct 11 02:40:41 compute-0 nova_compute[356901]:         <nova:vcpus>1</nova:vcpus>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       </nova:flavor>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <nova:owner>
Oct 11 02:40:41 compute-0 nova_compute[356901]:         <nova:user uuid="11c81e88a90342bba2c2816e4e3cb191">tempest-ServerActionsTestJSON-1563605323-project-member</nova:user>
Oct 11 02:40:41 compute-0 nova_compute[356901]:         <nova:project uuid="dba4f6e51d33430ebf5566af53f6fbcc">tempest-ServerActionsTestJSON-1563605323</nova:project>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       </nova:owner>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <nova:root type="image" uuid="72f37f2e-4296-450e-9a12-10717f4ac7dc"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <nova:ports>
Oct 11 02:40:41 compute-0 nova_compute[356901]:         <nova:port uuid="d7c4233c-f79b-4f32-b896-c36d4abb7d26">
Oct 11 02:40:41 compute-0 nova_compute[356901]:           <nova:ip type="fixed" address="10.100.0.4" ipVersion="4"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:         </nova:port>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       </nova:ports>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     </nova:instance>
Oct 11 02:40:41 compute-0 nova_compute[356901]:   </metadata>
Oct 11 02:40:41 compute-0 nova_compute[356901]:   <sysinfo type="smbios">
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <system>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <entry name="manufacturer">RDO</entry>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <entry name="product">OpenStack Compute</entry>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <entry name="version">27.5.2-0.20250829104910.6f8decf.el9</entry>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <entry name="serial">f5eb6746-7f42-4fa4-8e26-cda5cfa0c765</entry>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <entry name="uuid">f5eb6746-7f42-4fa4-8e26-cda5cfa0c765</entry>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <entry name="family">Virtual Machine</entry>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     </system>
Oct 11 02:40:41 compute-0 nova_compute[356901]:   </sysinfo>
Oct 11 02:40:41 compute-0 nova_compute[356901]:   <os>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <type arch="x86_64" machine="q35">hvm</type>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <boot dev="hd"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <smbios mode="sysinfo"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:   </os>
Oct 11 02:40:41 compute-0 nova_compute[356901]:   <features>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <acpi/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <apic/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <vmcoreinfo/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:   </features>
Oct 11 02:40:41 compute-0 nova_compute[356901]:   <clock offset="utc">
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <timer name="pit" tickpolicy="delay"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <timer name="rtc" tickpolicy="catchup"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <timer name="hpet" present="no"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:   </clock>
Oct 11 02:40:41 compute-0 nova_compute[356901]:   <cpu mode="host-model" match="exact">
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <topology sockets="1" cores="1" threads="1"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:40:41 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/f5eb6746-7f42-4fa4-8e26-cda5cfa0c765_disk">
Oct 11 02:40:41 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       </source>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:40:41 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <target dev="vda" bus="virtio"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <disk type="network" device="cdrom">
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/f5eb6746-7f42-4fa4-8e26-cda5cfa0c765_disk.config">
Oct 11 02:40:41 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       </source>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:40:41 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <target dev="sda" bus="sata"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <interface type="ethernet">
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <mac address="fa:16:3e:8d:b8:dd"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <driver name="vhost" rx_queue_size="512"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <mtu size="1442"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <target dev="tapd7c4233c-f7"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     </interface>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <serial type="pty">
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <log file="/var/lib/nova/instances/f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/console.log" append="off"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     </serial>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <graphics type="vnc" autoport="yes" listen="::0"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <video>
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     </video>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <input type="tablet" bus="usb"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <input type="keyboard" bus="usb"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <rng model="virtio">
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <backend model="random">/dev/urandom</backend>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <controller type="usb" index="0"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     <memballoon model="virtio">
Oct 11 02:40:41 compute-0 nova_compute[356901]:       <stats period="10"/>
Oct 11 02:40:41 compute-0 nova_compute[356901]:     </memballoon>
Oct 11 02:40:41 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:40:41 compute-0 nova_compute[356901]: </domain>
Oct 11 02:40:41 compute-0 nova_compute[356901]:  _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7555
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.270 2 DEBUG nova.virt.libvirt.driver [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] skipping disk for instance-00000006 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.270 2 DEBUG nova.virt.libvirt.driver [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] skipping disk for instance-00000006 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.271 2 DEBUG nova.virt.libvirt.vif [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='True',created_at=2025-10-11T02:38:58Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=<?>,disable_terminate=False,display_description='tempest-ServerActionsTestJSON-server-482072585',display_name='tempest-ServerActionsTestJSON-server-482072585',ec2_ids=<?>,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-serveractionstestjson-server-482072585',id=6,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBEeyedFg6J90z3asuDBQl1/Bvzj806ldEmlyVo7UkMTJHBgXm6kW1TdMM5vQZaYHoLzJajtdp6cuAv6b+cT74TvAgDg4tZ7S8WdWrLaHLA9uudTCq+0DsKhebTJVvA2XxA==',key_name='tempest-keypair-177844218',keypairs=<?>,launch_index=0,launched_at=2025-10-11T02:39:27Z,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={},migration_context=<?>,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=PciDeviceList,pci_requests=<?>,power_state=1,progress=0,project_id='dba4f6e51d33430ebf5566af53f6fbcc',ramdisk_id='',reservation_id='r-xpsstq1e',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_cdrom_bus='sata',image_hw_disk_bus='virtio',image_hw_input_bus='usb',image_hw_machine_type='q35',image_hw_pointer_model='usbtablet',image_hw_rng_model='virtio',image_hw_video_model='virtio',image_hw_vif_model='virtio',image_min_disk='1',image_min_ram='0',owner_project_name='tempest-ServerActionsTestJSON-1563605323',owner_user_name='tempest-ServerActionsTestJSON-1563605323-project-member'},tags=<?>,task_state='reboot_started_hard',terminated_at=None,trusted_certs=<?>,updated_at=2025-10-11T02:40:39Z,user_data='IyEvYmluL3NoCmVjaG8gIlByaW50aW5nIGNpcnJvcyB1c2VyIGF1dGhvcml6ZWQga2V5cyIKY2F0IH5jaXJyb3MvLnNzaC9hdXRob3JpemVkX2tleXMgfHwgdHJ1ZQo=',user_id='11c81e88a90342bba2c2816e4e3cb191',uuid=f5eb6746-7f42-4fa4-8e26-cda5cfa0c765,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='active') vif={"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.187", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} plug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:710
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.272 2 DEBUG nova.network.os_vif_util [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Converting VIF {"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.187", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.272 2 DEBUG nova.network.os_vif_util [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Converted object VIFOpenVSwitch(active=True,address=fa:16:3e:8d:b8:dd,bridge_name='br-int',has_traffic_filtering=True,id=d7c4233c-f79b-4f32-b896-c36d4abb7d26,network=Network(b4d521f7-7729-40fd-aa58-7126044eb166),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd7c4233c-f7') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.273 2 DEBUG os_vif [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Plugging vif VIFOpenVSwitch(active=True,address=fa:16:3e:8d:b8:dd,bridge_name='br-int',has_traffic_filtering=True,id=d7c4233c-f79b-4f32-b896-c36d4abb7d26,network=Network(b4d521f7-7729-40fd-aa58-7126044eb166),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd7c4233c-f7') plug /usr/lib/python3.9/site-packages/os_vif/__init__.py:76
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.274 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.274 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddBridgeCommand(_result=None, name=br-int, may_exist=True, datapath_type=system) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.275 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.279 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.279 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapd7c4233c-f7, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.280 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbSetCommand(_result=None, table=Interface, record=tapd7c4233c-f7, col_values=(('external_ids', {'iface-id': 'd7c4233c-f79b-4f32-b896-c36d4abb7d26', 'iface-status': 'active', 'attached-mac': 'fa:16:3e:8d:b8:dd', 'vm-uuid': 'f5eb6746-7f42-4fa4-8e26-cda5cfa0c765'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:40:41 compute-0 NetworkManager[44908]: <info>  [1760150441.2844] manager: (tapd7c4233c-f7): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/62)
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.284 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.291 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.292 2 INFO os_vif [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Successfully plugged vif VIFOpenVSwitch(active=True,address=fa:16:3e:8d:b8:dd,bridge_name='br-int',has_traffic_filtering=True,id=d7c4233c-f79b-4f32-b896-c36d4abb7d26,network=Network(b4d521f7-7729-40fd-aa58-7126044eb166),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd7c4233c-f7')
Oct 11 02:40:41 compute-0 kernel: tapd7c4233c-f7: entered promiscuous mode
Oct 11 02:40:41 compute-0 ovn_controller[88370]: 2025-10-11T02:40:41Z|00128|binding|INFO|Claiming lport d7c4233c-f79b-4f32-b896-c36d4abb7d26 for this chassis.
Oct 11 02:40:41 compute-0 ovn_controller[88370]: 2025-10-11T02:40:41Z|00129|binding|INFO|d7c4233c-f79b-4f32-b896-c36d4abb7d26: Claiming fa:16:3e:8d:b8:dd 10.100.0.4
Oct 11 02:40:41 compute-0 NetworkManager[44908]: <info>  [1760150441.4163] manager: (tapd7c4233c-f7): new Tun device (/org/freedesktop/NetworkManager/Devices/63)
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.414 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:41 compute-0 systemd-udevd[456239]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.423 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:8d:b8:dd 10.100.0.4'], port_security=['fa:16:3e:8d:b8:dd 10.100.0.4'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.0.4/28', 'neutron:device_id': 'f5eb6746-7f42-4fa4-8e26-cda5cfa0c765', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-b4d521f7-7729-40fd-aa58-7126044eb166', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': 'dba4f6e51d33430ebf5566af53f6fbcc', 'neutron:revision_number': '4', 'neutron:security_group_ids': '82e011ad-d874-487b-b398-e13313bfa497', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:host_id': 'compute-0.ctlplane.example.com', 'neutron:port_fip': '192.168.122.187'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=283b08c2-109a-4649-a6db-2339ca56efb4, chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], tunnel_key=3, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=d7c4233c-f79b-4f32-b896-c36d4abb7d26) old=Port_Binding(chassis=[]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.424 286362 INFO neutron.agent.ovn.metadata.agent [-] Port d7c4233c-f79b-4f32-b896-c36d4abb7d26 in datapath b4d521f7-7729-40fd-aa58-7126044eb166 bound to our chassis
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.427 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network b4d521f7-7729-40fd-aa58-7126044eb166
Oct 11 02:40:41 compute-0 NetworkManager[44908]: <info>  [1760150441.4432] device (tapd7c4233c-f7): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 02:40:41 compute-0 NetworkManager[44908]: <info>  [1760150441.4440] device (tapd7c4233c-f7): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Oct 11 02:40:41 compute-0 ovn_controller[88370]: 2025-10-11T02:40:41Z|00130|binding|INFO|Setting lport d7c4233c-f79b-4f32-b896-c36d4abb7d26 ovn-installed in OVS
Oct 11 02:40:41 compute-0 ovn_controller[88370]: 2025-10-11T02:40:41Z|00131|binding|INFO|Setting lport d7c4233c-f79b-4f32-b896-c36d4abb7d26 up in Southbound
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.446 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[b0321efd-d907-4638-a544-8464e6d0bc74]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.447 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Creating VETH tapb4d521f7-71 in ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166 namespace provision_datapath /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:665
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.448 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.451 422955 DEBUG neutron.privileged.agent.linux.ip_lib [-] Interface tapb4d521f7-70 not found in namespace None get_link_id /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:204
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.451 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[714840f8-7449-4fdd-886c-e1c59316cee3]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.454 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.456 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[115efb9b-1188-4e2d-828e-3936a764599c]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:41 compute-0 systemd-machined[137586]: New machine qemu-12-instance-00000006.
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.474 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[aa3d10ae-23bd-454f-85f4-490bf49fddc7]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:41 compute-0 systemd[1]: Started Virtual Machine qemu-12-instance-00000006.
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.497 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[5ac8cac3-3825-40bf-8253-422614bb671c]: (4, ('net.ipv4.conf.all.promote_secondaries = 1\n', '', 0)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.540 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[7735b06d-2a18-4dc5-a71c-8059414f7f28]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.547 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[93c9b47c-5124-406b-8852-8519d00deee7]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:41 compute-0 NetworkManager[44908]: <info>  [1760150441.5493] manager: (tapb4d521f7-70): new Veth device (/org/freedesktop/NetworkManager/Devices/64)
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.595 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[0bfd753a-a3fe-41a5-a52b-b021d81a676f]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.599 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[6bbb0c16-38b1-497f-88a7-b8e8838c17af]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:41 compute-0 NetworkManager[44908]: <info>  [1760150441.6283] device (tapb4d521f7-70): carrier: link connected
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.635 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[401096c6-af4b-42a5-8097-582df208e99a]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.658 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[37e9d2ed-5bdb-4bfc-bc7b-8a490a56edd8]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapb4d521f7-71'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:9e:43:57'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 2, 'rx_bytes': 110, 'tx_bytes': 180, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 2, 'rx_bytes': 110, 'tx_bytes': 180, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 39], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 678935, 'reachable_time': 18678, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 2, 'outoctets': 152, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 2, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 152, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 2, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 456478, 'error': None, 'target': 'ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.678 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[b4eb5e4e-e682-4fcf-97fe-4dd00af041a2]: (4, ({'family': 10, 'prefixlen': 64, 'flags': 192, 'scope': 253, 'index': 2, 'attrs': [['IFA_ADDRESS', 'fe80::f816:3eff:fe9e:4357'], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 678935, 'tstamp': 678935}], ['IFA_FLAGS', 192]], 'header': {'length': 72, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 456479, 'error': None, 'target': 'ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'},)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:41 compute-0 ceph-mon[191930]: pgmap v1889: 321 pgs: 321 active+clean; 357 MiB data, 444 MiB used, 60 GiB / 60 GiB avail; 1.0 MiB/s rd, 2.2 MiB/s wr, 117 op/s
Oct 11 02:40:41 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/588876834' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.706 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[eb71b7dc-fc65-4a09-bfe4-7fb7624fb728]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapb4d521f7-71'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:9e:43:57'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 2, 'rx_bytes': 110, 'tx_bytes': 180, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 2, 'rx_bytes': 110, 'tx_bytes': 180, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 39], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 678935, 'reachable_time': 18678, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 2, 'outoctets': 152, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 2, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 152, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 2, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 0, 'sequence_number': 255, 'pid': 456480, 'error': None, 'target': 'ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.753 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[3c6f7d67-fc3b-4edc-893c-a9e95e14b431]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.788 2 DEBUG nova.network.neutron [None req-2359aafb-fd5a-442d-b514-5787b5c759f2 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Updating instance_info_cache with network_info: [{"id": "4076fda2-be62-4c52-b073-8bf26574dee1", "address": "fa:16:3e:c5:9b:82", "network": {"id": "eb08ca1c-c05f-4da5-9518-fb3b2d958ee2", "bridge": "br-int", "label": "tempest-AttachInterfacesUnderV243Test-139612684-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}, {"address": "10.100.0.7", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d89911bf2931487c98dc0f44a8b67bca", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap4076fda2-be", "ovs_interfaceid": "4076fda2-be62-4c52-b073-8bf26574dee1", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.810 2 DEBUG oslo_concurrency.lockutils [None req-2359aafb-fd5a-442d-b514-5787b5c759f2 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Releasing lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.811 2 DEBUG nova.compute.manager [None req-2359aafb-fd5a-442d-b514-5787b5c759f2 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Inject network info _inject_network_info /usr/lib/python3.9/site-packages/nova/compute/manager.py:7144
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.811 2 DEBUG nova.compute.manager [None req-2359aafb-fd5a-442d-b514-5787b5c759f2 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] network_info to inject: |[{"id": "4076fda2-be62-4c52-b073-8bf26574dee1", "address": "fa:16:3e:c5:9b:82", "network": {"id": "eb08ca1c-c05f-4da5-9518-fb3b2d958ee2", "bridge": "br-int", "label": "tempest-AttachInterfacesUnderV243Test-139612684-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}, {"address": "10.100.0.7", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d89911bf2931487c98dc0f44a8b67bca", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap4076fda2-be", "ovs_interfaceid": "4076fda2-be62-4c52-b073-8bf26574dee1", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}]| _inject_network_info /usr/lib/python3.9/site-packages/nova/compute/manager.py:7145
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.813 2 DEBUG oslo_concurrency.lockutils [req-7cb15a41-230d-4698-95b3-84c029e264c8 req-1bc0df13-0e8a-4e61-b96b-cd01865b564f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.813 2 DEBUG nova.network.neutron [req-7cb15a41-230d-4698-95b3-84c029e264c8 req-1bc0df13-0e8a-4e61-b96b-cd01865b564f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Refreshing network info cache for port 4076fda2-be62-4c52-b073-8bf26574dee1 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.846 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[e4890462-856b-4006-9917-85dd2d805df8]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.848 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapb4d521f7-70, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.848 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.849 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapb4d521f7-70, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.851 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:41 compute-0 kernel: tapb4d521f7-70: entered promiscuous mode
Oct 11 02:40:41 compute-0 NetworkManager[44908]: <info>  [1760150441.8524] manager: (tapb4d521f7-70): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/65)
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.854 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.854 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tapb4d521f7-70, col_values=(('external_ids', {'iface-id': 'aa37c6ed-d2db-4ed4-b1c9-cfd071cfd96a'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.855 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:41 compute-0 ovn_controller[88370]: 2025-10-11T02:40:41Z|00132|binding|INFO|Releasing lport aa37c6ed-d2db-4ed4-b1c9-cfd071cfd96a from this chassis (sb_readonly=0)
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.857 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.858 286362 DEBUG neutron.agent.linux.utils [-] Unable to access /var/lib/neutron/external/pids/b4d521f7-7729-40fd-aa58-7126044eb166.pid.haproxy; Error: [Errno 2] No such file or directory: '/var/lib/neutron/external/pids/b4d521f7-7729-40fd-aa58-7126044eb166.pid.haproxy' get_value_from_file /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:252
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.859 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[a4d39317-da29-46f8-aefb-112fd3d414d7]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.861 286362 DEBUG neutron.agent.ovn.metadata.driver [-] haproxy_cfg = 
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: global
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     log         /dev/log local0 debug
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     log-tag     haproxy-metadata-proxy-b4d521f7-7729-40fd-aa58-7126044eb166
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     user        root
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     group       root
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     maxconn     1024
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     pidfile     /var/lib/neutron/external/pids/b4d521f7-7729-40fd-aa58-7126044eb166.pid.haproxy
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     daemon
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: defaults
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     log global
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     mode http
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     option httplog
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     option dontlognull
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     option http-server-close
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     option forwardfor
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     retries                 3
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     timeout http-request    30s
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     timeout connect         30s
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     timeout client          32s
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     timeout server          32s
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     timeout http-keep-alive 30s
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: listen listener
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     bind 169.254.169.254:80
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     server metadata /var/lib/neutron/metadata_proxy
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:     http-request add-header X-OVN-Network-ID b4d521f7-7729-40fd-aa58-7126044eb166
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]:  create_config_file /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/driver.py:107
Oct 11 02:40:41 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:41.862 286362 DEBUG neutron.agent.linux.utils [-] Running command: ['sudo', 'neutron-rootwrap', '/etc/neutron/rootwrap.conf', 'ip', 'netns', 'exec', 'ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166', 'env', 'PROCESS_TAG=haproxy-b4d521f7-7729-40fd-aa58-7126044eb166', 'haproxy', '-f', '/var/lib/neutron/ovn-metadata-proxy/b4d521f7-7729-40fd-aa58-7126044eb166.conf'] create_process /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:84
Oct 11 02:40:41 compute-0 nova_compute[356901]: 2025-10-11 02:40:41.874 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:42 compute-0 podman[456552]: 2025-10-11 02:40:42.363780022 +0000 UTC m=+0.088356624 container create eca91bf27f53a58ce460e280cfdd0da4c405777e4a1b81c336462d6a18a82c7a (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:40:42 compute-0 podman[456552]: 2025-10-11 02:40:42.318965378 +0000 UTC m=+0.043542010 image pull 1061e4fafe13e0b9aa1ef2c904ba4ad70c44f3e87b1d831f16c6db34937f4022 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Oct 11 02:40:42 compute-0 systemd[1]: Started libpod-conmon-eca91bf27f53a58ce460e280cfdd0da4c405777e4a1b81c336462d6a18a82c7a.scope.
Oct 11 02:40:42 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:40:42 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c0d34672f581e91d347ecc7586ed939fa1ae746dcc8cb29c4b4994d69a8c07a3/merged/var/lib/neutron supports timestamps until 2038 (0x7fffffff)
Oct 11 02:40:42 compute-0 podman[456552]: 2025-10-11 02:40:42.482201695 +0000 UTC m=+0.206778307 container init eca91bf27f53a58ce460e280cfdd0da4c405777e4a1b81c336462d6a18a82c7a (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:40:42 compute-0 podman[456552]: 2025-10-11 02:40:42.492839907 +0000 UTC m=+0.217416499 container start eca91bf27f53a58ce460e280cfdd0da4c405777e4a1b81c336462d6a18a82c7a (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:40:42 compute-0 neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166[456567]: [NOTICE]   (456571) : New worker (456573) forked
Oct 11 02:40:42 compute-0 neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166[456567]: [NOTICE]   (456571) : Loading success.
Oct 11 02:40:42 compute-0 nova_compute[356901]: 2025-10-11 02:40:42.743 2 DEBUG nova.virt.libvirt.host [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Removed pending event for f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 due to event _event_emit_delayed /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:438
Oct 11 02:40:42 compute-0 nova_compute[356901]: 2025-10-11 02:40:42.743 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150442.7430646, f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 => Resumed> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:40:42 compute-0 nova_compute[356901]: 2025-10-11 02:40:42.744 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] VM Resumed (Lifecycle Event)
Oct 11 02:40:42 compute-0 nova_compute[356901]: 2025-10-11 02:40:42.749 2 DEBUG nova.compute.manager [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Instance event wait completed in 0 seconds for  wait_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:577
Oct 11 02:40:42 compute-0 nova_compute[356901]: 2025-10-11 02:40:42.756 2 INFO nova.virt.libvirt.driver [-] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Instance rebooted successfully.
Oct 11 02:40:42 compute-0 nova_compute[356901]: 2025-10-11 02:40:42.756 2 DEBUG nova.compute.manager [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:40:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1890: 321 pgs: 321 active+clean; 357 MiB data, 444 MiB used, 60 GiB / 60 GiB avail; 658 KiB/s rd, 2.2 MiB/s wr, 88 op/s
Oct 11 02:40:42 compute-0 nova_compute[356901]: 2025-10-11 02:40:42.785 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:40:42 compute-0 nova_compute[356901]: 2025-10-11 02:40:42.792 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Synchronizing instance power state after lifecycle event "Resumed"; current vm_state: active, current task_state: reboot_started_hard, current DB power_state: 1, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:40:42 compute-0 nova_compute[356901]: 2025-10-11 02:40:42.823 2 DEBUG oslo_concurrency.lockutils [None req-6f7cb79f-944e-4c09-b947-16afc6e33397 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" "released" by "nova.compute.manager.ComputeManager.reboot_instance.<locals>.do_reboot_instance" :: held 4.834s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:42 compute-0 nova_compute[356901]: 2025-10-11 02:40:42.827 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150442.7505205, f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 => Started> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:40:42 compute-0 nova_compute[356901]: 2025-10-11 02:40:42.828 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] VM Started (Lifecycle Event)
Oct 11 02:40:42 compute-0 nova_compute[356901]: 2025-10-11 02:40:42.829 2 DEBUG nova.virt.driver [-] Emitting event <LifecycleEvent: 1760150427.8218946, 0ec010f2-6758-466d-900f-c8c6ffe81844 => Stopped> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:40:42 compute-0 nova_compute[356901]: 2025-10-11 02:40:42.830 2 INFO nova.compute.manager [-] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] VM Stopped (Lifecycle Event)
Oct 11 02:40:42 compute-0 nova_compute[356901]: 2025-10-11 02:40:42.861 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:40:42 compute-0 nova_compute[356901]: 2025-10-11 02:40:42.870 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Synchronizing instance power state after lifecycle event "Started"; current vm_state: active, current task_state: None, current DB power_state: 1, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:40:42 compute-0 ceph-mon[191930]: pgmap v1890: 321 pgs: 321 active+clean; 357 MiB data, 444 MiB used, 60 GiB / 60 GiB avail; 658 KiB/s rd, 2.2 MiB/s wr, 88 op/s
Oct 11 02:40:42 compute-0 nova_compute[356901]: 2025-10-11 02:40:42.913 2 DEBUG nova.compute.manager [None req-75cd3063-caab-424b-ac48-5404c353cc21 - - - - - -] [instance: 0ec010f2-6758-466d-900f-c8c6ffe81844] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:40:43 compute-0 nova_compute[356901]: 2025-10-11 02:40:43.665 2 DEBUG nova.objects.instance [None req-8987e209-16a8-4e72-9c01-d66f2faf8f92 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lazy-loading 'flavor' on Instance uuid 49d4f343-d1b4-4594-96d2-0777a5ce8581 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:40:43 compute-0 nova_compute[356901]: 2025-10-11 02:40:43.676 2 DEBUG nova.compute.manager [req-96622c66-9215-453d-9da9-a185e2bbfaac req-75588553-89e9-4cc6-923f-5a5115e037c9 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Received event network-vif-unplugged-d7c4233c-f79b-4f32-b896-c36d4abb7d26 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:40:43 compute-0 nova_compute[356901]: 2025-10-11 02:40:43.677 2 DEBUG oslo_concurrency.lockutils [req-96622c66-9215-453d-9da9-a185e2bbfaac req-75588553-89e9-4cc6-923f-5a5115e037c9 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:43 compute-0 nova_compute[356901]: 2025-10-11 02:40:43.677 2 DEBUG oslo_concurrency.lockutils [req-96622c66-9215-453d-9da9-a185e2bbfaac req-75588553-89e9-4cc6-923f-5a5115e037c9 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:43 compute-0 nova_compute[356901]: 2025-10-11 02:40:43.677 2 DEBUG oslo_concurrency.lockutils [req-96622c66-9215-453d-9da9-a185e2bbfaac req-75588553-89e9-4cc6-923f-5a5115e037c9 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:43 compute-0 nova_compute[356901]: 2025-10-11 02:40:43.678 2 DEBUG nova.compute.manager [req-96622c66-9215-453d-9da9-a185e2bbfaac req-75588553-89e9-4cc6-923f-5a5115e037c9 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] No waiting events found dispatching network-vif-unplugged-d7c4233c-f79b-4f32-b896-c36d4abb7d26 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:40:43 compute-0 nova_compute[356901]: 2025-10-11 02:40:43.678 2 WARNING nova.compute.manager [req-96622c66-9215-453d-9da9-a185e2bbfaac req-75588553-89e9-4cc6-923f-5a5115e037c9 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Received unexpected event network-vif-unplugged-d7c4233c-f79b-4f32-b896-c36d4abb7d26 for instance with vm_state active and task_state None.
Oct 11 02:40:43 compute-0 nova_compute[356901]: 2025-10-11 02:40:43.710 2 DEBUG oslo_concurrency.lockutils [None req-8987e209-16a8-4e72-9c01-d66f2faf8f92 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Acquiring lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:40:44 compute-0 nova_compute[356901]: 2025-10-11 02:40:44.135 2 DEBUG nova.network.neutron [req-7cb15a41-230d-4698-95b3-84c029e264c8 req-1bc0df13-0e8a-4e61-b96b-cd01865b564f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Updated VIF entry in instance network info cache for port 4076fda2-be62-4c52-b073-8bf26574dee1. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:40:44 compute-0 nova_compute[356901]: 2025-10-11 02:40:44.136 2 DEBUG nova.network.neutron [req-7cb15a41-230d-4698-95b3-84c029e264c8 req-1bc0df13-0e8a-4e61-b96b-cd01865b564f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Updating instance_info_cache with network_info: [{"id": "4076fda2-be62-4c52-b073-8bf26574dee1", "address": "fa:16:3e:c5:9b:82", "network": {"id": "eb08ca1c-c05f-4da5-9518-fb3b2d958ee2", "bridge": "br-int", "label": "tempest-AttachInterfacesUnderV243Test-139612684-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}, {"address": "10.100.0.7", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d89911bf2931487c98dc0f44a8b67bca", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap4076fda2-be", "ovs_interfaceid": "4076fda2-be62-4c52-b073-8bf26574dee1", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:40:44 compute-0 nova_compute[356901]: 2025-10-11 02:40:44.165 2 DEBUG oslo_concurrency.lockutils [req-7cb15a41-230d-4698-95b3-84c029e264c8 req-1bc0df13-0e8a-4e61-b96b-cd01865b564f 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:40:44 compute-0 nova_compute[356901]: 2025-10-11 02:40:44.166 2 DEBUG oslo_concurrency.lockutils [None req-8987e209-16a8-4e72-9c01-d66f2faf8f92 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Acquired lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:40:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1891: 321 pgs: 321 active+clean; 357 MiB data, 444 MiB used, 60 GiB / 60 GiB avail; 387 KiB/s rd, 2.2 MiB/s wr, 73 op/s
Oct 11 02:40:44 compute-0 ceph-mon[191930]: pgmap v1891: 321 pgs: 321 active+clean; 357 MiB data, 444 MiB used, 60 GiB / 60 GiB avail; 387 KiB/s rd, 2.2 MiB/s wr, 73 op/s
Oct 11 02:40:45 compute-0 nova_compute[356901]: 2025-10-11 02:40:45.071 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:45 compute-0 nova_compute[356901]: 2025-10-11 02:40:45.847 2 DEBUG nova.compute.manager [req-da25efba-9119-4029-b6ff-445ab502e08c req-1533fd2a-6884-47a0-8c70-ccc9d9de4de5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Received event network-vif-plugged-d7c4233c-f79b-4f32-b896-c36d4abb7d26 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:40:45 compute-0 nova_compute[356901]: 2025-10-11 02:40:45.848 2 DEBUG oslo_concurrency.lockutils [req-da25efba-9119-4029-b6ff-445ab502e08c req-1533fd2a-6884-47a0-8c70-ccc9d9de4de5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:45 compute-0 nova_compute[356901]: 2025-10-11 02:40:45.849 2 DEBUG oslo_concurrency.lockutils [req-da25efba-9119-4029-b6ff-445ab502e08c req-1533fd2a-6884-47a0-8c70-ccc9d9de4de5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:45 compute-0 nova_compute[356901]: 2025-10-11 02:40:45.849 2 DEBUG oslo_concurrency.lockutils [req-da25efba-9119-4029-b6ff-445ab502e08c req-1533fd2a-6884-47a0-8c70-ccc9d9de4de5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:45 compute-0 nova_compute[356901]: 2025-10-11 02:40:45.850 2 DEBUG nova.compute.manager [req-da25efba-9119-4029-b6ff-445ab502e08c req-1533fd2a-6884-47a0-8c70-ccc9d9de4de5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] No waiting events found dispatching network-vif-plugged-d7c4233c-f79b-4f32-b896-c36d4abb7d26 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:40:45 compute-0 nova_compute[356901]: 2025-10-11 02:40:45.851 2 WARNING nova.compute.manager [req-da25efba-9119-4029-b6ff-445ab502e08c req-1533fd2a-6884-47a0-8c70-ccc9d9de4de5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Received unexpected event network-vif-plugged-d7c4233c-f79b-4f32-b896-c36d4abb7d26 for instance with vm_state active and task_state None.
Oct 11 02:40:45 compute-0 nova_compute[356901]: 2025-10-11 02:40:45.851 2 DEBUG nova.compute.manager [req-da25efba-9119-4029-b6ff-445ab502e08c req-1533fd2a-6884-47a0-8c70-ccc9d9de4de5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Received event network-vif-plugged-d7c4233c-f79b-4f32-b896-c36d4abb7d26 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:40:45 compute-0 nova_compute[356901]: 2025-10-11 02:40:45.851 2 DEBUG oslo_concurrency.lockutils [req-da25efba-9119-4029-b6ff-445ab502e08c req-1533fd2a-6884-47a0-8c70-ccc9d9de4de5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:45 compute-0 nova_compute[356901]: 2025-10-11 02:40:45.852 2 DEBUG oslo_concurrency.lockutils [req-da25efba-9119-4029-b6ff-445ab502e08c req-1533fd2a-6884-47a0-8c70-ccc9d9de4de5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:45 compute-0 nova_compute[356901]: 2025-10-11 02:40:45.852 2 DEBUG oslo_concurrency.lockutils [req-da25efba-9119-4029-b6ff-445ab502e08c req-1533fd2a-6884-47a0-8c70-ccc9d9de4de5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:45 compute-0 nova_compute[356901]: 2025-10-11 02:40:45.852 2 DEBUG nova.compute.manager [req-da25efba-9119-4029-b6ff-445ab502e08c req-1533fd2a-6884-47a0-8c70-ccc9d9de4de5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] No waiting events found dispatching network-vif-plugged-d7c4233c-f79b-4f32-b896-c36d4abb7d26 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:40:45 compute-0 nova_compute[356901]: 2025-10-11 02:40:45.853 2 WARNING nova.compute.manager [req-da25efba-9119-4029-b6ff-445ab502e08c req-1533fd2a-6884-47a0-8c70-ccc9d9de4de5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Received unexpected event network-vif-plugged-d7c4233c-f79b-4f32-b896-c36d4abb7d26 for instance with vm_state active and task_state None.
Oct 11 02:40:45 compute-0 nova_compute[356901]: 2025-10-11 02:40:45.853 2 DEBUG nova.compute.manager [req-da25efba-9119-4029-b6ff-445ab502e08c req-1533fd2a-6884-47a0-8c70-ccc9d9de4de5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Received event network-vif-plugged-d7c4233c-f79b-4f32-b896-c36d4abb7d26 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:40:45 compute-0 nova_compute[356901]: 2025-10-11 02:40:45.854 2 DEBUG oslo_concurrency.lockutils [req-da25efba-9119-4029-b6ff-445ab502e08c req-1533fd2a-6884-47a0-8c70-ccc9d9de4de5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:45 compute-0 nova_compute[356901]: 2025-10-11 02:40:45.855 2 DEBUG oslo_concurrency.lockutils [req-da25efba-9119-4029-b6ff-445ab502e08c req-1533fd2a-6884-47a0-8c70-ccc9d9de4de5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:45 compute-0 nova_compute[356901]: 2025-10-11 02:40:45.856 2 DEBUG oslo_concurrency.lockutils [req-da25efba-9119-4029-b6ff-445ab502e08c req-1533fd2a-6884-47a0-8c70-ccc9d9de4de5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:45 compute-0 nova_compute[356901]: 2025-10-11 02:40:45.857 2 DEBUG nova.compute.manager [req-da25efba-9119-4029-b6ff-445ab502e08c req-1533fd2a-6884-47a0-8c70-ccc9d9de4de5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] No waiting events found dispatching network-vif-plugged-d7c4233c-f79b-4f32-b896-c36d4abb7d26 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:40:45 compute-0 nova_compute[356901]: 2025-10-11 02:40:45.858 2 WARNING nova.compute.manager [req-da25efba-9119-4029-b6ff-445ab502e08c req-1533fd2a-6884-47a0-8c70-ccc9d9de4de5 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Received unexpected event network-vif-plugged-d7c4233c-f79b-4f32-b896-c36d4abb7d26 for instance with vm_state active and task_state None.
Oct 11 02:40:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:40:46 compute-0 nova_compute[356901]: 2025-10-11 02:40:46.284 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:46 compute-0 nova_compute[356901]: 2025-10-11 02:40:46.348 2 DEBUG nova.network.neutron [None req-8987e209-16a8-4e72-9c01-d66f2faf8f92 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Building network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2010
Oct 11 02:40:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1892: 321 pgs: 321 active+clean; 357 MiB data, 444 MiB used, 60 GiB / 60 GiB avail; 1.0 MiB/s rd, 706 KiB/s wr, 63 op/s
Oct 11 02:40:46 compute-0 ceph-mon[191930]: pgmap v1892: 321 pgs: 321 active+clean; 357 MiB data, 444 MiB used, 60 GiB / 60 GiB avail; 1.0 MiB/s rd, 706 KiB/s wr, 63 op/s
Oct 11 02:40:47 compute-0 nova_compute[356901]: 2025-10-11 02:40:47.584 2 DEBUG nova.network.neutron [None req-8987e209-16a8-4e72-9c01-d66f2faf8f92 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Updating instance_info_cache with network_info: [{"id": "4076fda2-be62-4c52-b073-8bf26574dee1", "address": "fa:16:3e:c5:9b:82", "network": {"id": "eb08ca1c-c05f-4da5-9518-fb3b2d958ee2", "bridge": "br-int", "label": "tempest-AttachInterfacesUnderV243Test-139612684-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d89911bf2931487c98dc0f44a8b67bca", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap4076fda2-be", "ovs_interfaceid": "4076fda2-be62-4c52-b073-8bf26574dee1", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:40:47 compute-0 nova_compute[356901]: 2025-10-11 02:40:47.608 2 DEBUG oslo_concurrency.lockutils [None req-8987e209-16a8-4e72-9c01-d66f2faf8f92 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Releasing lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:40:47 compute-0 nova_compute[356901]: 2025-10-11 02:40:47.608 2 DEBUG nova.compute.manager [None req-8987e209-16a8-4e72-9c01-d66f2faf8f92 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Inject network info _inject_network_info /usr/lib/python3.9/site-packages/nova/compute/manager.py:7144
Oct 11 02:40:47 compute-0 nova_compute[356901]: 2025-10-11 02:40:47.609 2 DEBUG nova.compute.manager [None req-8987e209-16a8-4e72-9c01-d66f2faf8f92 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] network_info to inject: |[{"id": "4076fda2-be62-4c52-b073-8bf26574dee1", "address": "fa:16:3e:c5:9b:82", "network": {"id": "eb08ca1c-c05f-4da5-9518-fb3b2d958ee2", "bridge": "br-int", "label": "tempest-AttachInterfacesUnderV243Test-139612684-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d89911bf2931487c98dc0f44a8b67bca", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap4076fda2-be", "ovs_interfaceid": "4076fda2-be62-4c52-b073-8bf26574dee1", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}]| _inject_network_info /usr/lib/python3.9/site-packages/nova/compute/manager.py:7145
Oct 11 02:40:48 compute-0 nova_compute[356901]: 2025-10-11 02:40:48.028 2 DEBUG nova.compute.manager [req-9e108e24-4956-438e-99e9-ed0e57da2206 req-d77aac63-41ca-472c-93ad-bb15916a0085 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Received event network-changed-4076fda2-be62-4c52-b073-8bf26574dee1 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:40:48 compute-0 nova_compute[356901]: 2025-10-11 02:40:48.028 2 DEBUG nova.compute.manager [req-9e108e24-4956-438e-99e9-ed0e57da2206 req-d77aac63-41ca-472c-93ad-bb15916a0085 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Refreshing instance network info cache due to event network-changed-4076fda2-be62-4c52-b073-8bf26574dee1. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:40:48 compute-0 nova_compute[356901]: 2025-10-11 02:40:48.029 2 DEBUG oslo_concurrency.lockutils [req-9e108e24-4956-438e-99e9-ed0e57da2206 req-d77aac63-41ca-472c-93ad-bb15916a0085 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:40:48 compute-0 nova_compute[356901]: 2025-10-11 02:40:48.029 2 DEBUG oslo_concurrency.lockutils [req-9e108e24-4956-438e-99e9-ed0e57da2206 req-d77aac63-41ca-472c-93ad-bb15916a0085 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:40:48 compute-0 nova_compute[356901]: 2025-10-11 02:40:48.030 2 DEBUG nova.network.neutron [req-9e108e24-4956-438e-99e9-ed0e57da2206 req-d77aac63-41ca-472c-93ad-bb15916a0085 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Refreshing network info cache for port 4076fda2-be62-4c52-b073-8bf26574dee1 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:40:48 compute-0 nova_compute[356901]: 2025-10-11 02:40:48.767 2 DEBUG oslo_concurrency.lockutils [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Acquiring lock "49d4f343-d1b4-4594-96d2-0777a5ce8581" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:48 compute-0 nova_compute[356901]: 2025-10-11 02:40:48.767 2 DEBUG oslo_concurrency.lockutils [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lock "49d4f343-d1b4-4594-96d2-0777a5ce8581" acquired by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:48 compute-0 nova_compute[356901]: 2025-10-11 02:40:48.769 2 DEBUG oslo_concurrency.lockutils [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Acquiring lock "49d4f343-d1b4-4594-96d2-0777a5ce8581-events" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:48 compute-0 nova_compute[356901]: 2025-10-11 02:40:48.770 2 DEBUG oslo_concurrency.lockutils [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lock "49d4f343-d1b4-4594-96d2-0777a5ce8581-events" acquired by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:48 compute-0 nova_compute[356901]: 2025-10-11 02:40:48.771 2 DEBUG oslo_concurrency.lockutils [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lock "49d4f343-d1b4-4594-96d2-0777a5ce8581-events" "released" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:48 compute-0 nova_compute[356901]: 2025-10-11 02:40:48.773 2 INFO nova.compute.manager [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Terminating instance
Oct 11 02:40:48 compute-0 nova_compute[356901]: 2025-10-11 02:40:48.775 2 DEBUG nova.compute.manager [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Start destroying the instance on the hypervisor. _shutdown_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:3120
Oct 11 02:40:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1893: 321 pgs: 321 active+clean; 357 MiB data, 444 MiB used, 60 GiB / 60 GiB avail; 1.1 MiB/s rd, 31 KiB/s wr, 50 op/s
Oct 11 02:40:48 compute-0 kernel: tap4076fda2-be (unregistering): left promiscuous mode
Oct 11 02:40:48 compute-0 NetworkManager[44908]: <info>  [1760150448.8857] device (tap4076fda2-be): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')
Oct 11 02:40:48 compute-0 ceph-mon[191930]: pgmap v1893: 321 pgs: 321 active+clean; 357 MiB data, 444 MiB used, 60 GiB / 60 GiB avail; 1.1 MiB/s rd, 31 KiB/s wr, 50 op/s
Oct 11 02:40:48 compute-0 ovn_controller[88370]: 2025-10-11T02:40:48Z|00133|binding|INFO|Releasing lport 4076fda2-be62-4c52-b073-8bf26574dee1 from this chassis (sb_readonly=0)
Oct 11 02:40:48 compute-0 ovn_controller[88370]: 2025-10-11T02:40:48Z|00134|binding|INFO|Setting lport 4076fda2-be62-4c52-b073-8bf26574dee1 down in Southbound
Oct 11 02:40:48 compute-0 ovn_controller[88370]: 2025-10-11T02:40:48Z|00135|binding|INFO|Removing iface tap4076fda2-be ovn-installed in OVS
Oct 11 02:40:48 compute-0 nova_compute[356901]: 2025-10-11 02:40:48.902 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:48 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:48.912 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:c5:9b:82 10.100.0.14'], port_security=['fa:16:3e:c5:9b:82 10.100.0.14'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.0.14/28', 'neutron:device_id': '49d4f343-d1b4-4594-96d2-0777a5ce8581', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': 'd89911bf2931487c98dc0f44a8b67bca', 'neutron:revision_number': '6', 'neutron:security_group_ids': '64f0fd08-8b1e-4dfa-b509-25fc417ccbb7', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:host_id': 'compute-0.ctlplane.example.com', 'neutron:port_fip': '192.168.122.245'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=7609ba36-9ccd-4785-a05c-b11167a233de, chassis=[], tunnel_key=3, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=4076fda2-be62-4c52-b073-8bf26574dee1) old=Port_Binding(up=[True], chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:40:48 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:48.916 286362 INFO neutron.agent.ovn.metadata.agent [-] Port 4076fda2-be62-4c52-b073-8bf26574dee1 in datapath eb08ca1c-c05f-4da5-9518-fb3b2d958ee2 unbound from our chassis
Oct 11 02:40:48 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:48.919 286362 DEBUG neutron.agent.ovn.metadata.agent [-] No valid VIF ports were found for network eb08ca1c-c05f-4da5-9518-fb3b2d958ee2, tearing the namespace down if needed _get_provision_params /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:628
Oct 11 02:40:48 compute-0 nova_compute[356901]: 2025-10-11 02:40:48.921 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:48 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:48.921 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[84634ad8-7f6d-4925-872c-778d96c033c4]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:48 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:48.923 286362 INFO neutron.agent.ovn.metadata.agent [-] Cleaning up ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2 namespace which is not needed anymore
Oct 11 02:40:48 compute-0 systemd[1]: machine-qemu\x2d9\x2dinstance\x2d00000009.scope: Deactivated successfully.
Oct 11 02:40:48 compute-0 systemd[1]: machine-qemu\x2d9\x2dinstance\x2d00000009.scope: Consumed 43.838s CPU time.
Oct 11 02:40:48 compute-0 systemd-machined[137586]: Machine qemu-9-instance-00000009 terminated.
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.025 2 INFO nova.virt.libvirt.driver [-] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Instance destroyed successfully.
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.026 2 DEBUG nova.objects.instance [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lazy-loading 'resources' on Instance uuid 49d4f343-d1b4-4594-96d2-0777a5ce8581 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.042 2 DEBUG nova.virt.libvirt.vif [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='True',created_at=2025-10-11T02:39:01Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=<?>,disable_terminate=False,display_description='tempest-AttachInterfacesUnderV243Test-server-402973055',display_name='tempest-AttachInterfacesUnderV243Test-server-402973055',ec2_ids=<?>,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-attachinterfacesunderv243test-server-402973055',id=9,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBKe0LMc8LnBTAISBwhdLZZycX6z/Wtxh5iIrBfzvih3YfC2DgfsCYmIjzIMA1Bmi2PftRsJD/817XJgtfkV0jIbJQ/nBV4X5kWCjFiLmsxPozdtF2YLrErDo+eZfs6cn/g==',key_name='tempest-keypair-1693383324',keypairs=<?>,launch_index=0,launched_at=2025-10-11T02:39:30Z,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={},migration_context=<?>,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=<?>,power_state=1,progress=0,project_id='d89911bf2931487c98dc0f44a8b67bca',ramdisk_id='',reservation_id='r-9ixcygjt',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='reader,member',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_cdrom_bus='sata',image_hw_disk_bus='virtio',image_hw_input_bus='usb',image_hw_machine_type='q35',image_hw_pointer_model='usbtablet',image_hw_rng_model='virtio',image_hw_video_model='virtio',image_hw_vif_model='virtio',image_min_disk='1',image_min_ram='0',owner_project_name='tempest-AttachInterfacesUnderV243Test-1568711783',owner_user_name='tempest-AttachInterfacesUnderV243Test-1568711783-project-member'},tags=<?>,task_state='deleting',terminated_at=None,trusted_certs=<?>,updated_at=2025-10-11T02:40:47Z,user_data='IyEvYmluL3NoCmVjaG8gIlByaW50aW5nIGNpcnJvcyB1c2VyIGF1dGhvcml6ZWQga2V5cyIKY2F0IH5jaXJyb3MvLnNzaC9hdXRob3JpemVkX2tleXMgfHwgdHJ1ZQo=',user_id='9a1414c7b75246f596af7745610a00a4',uuid=49d4f343-d1b4-4594-96d2-0777a5ce8581,vcpu_model=<?>,vcpus=1,vm_mode=None,vm_state='active') vif={"id": "4076fda2-be62-4c52-b073-8bf26574dee1", "address": "fa:16:3e:c5:9b:82", "network": {"id": "eb08ca1c-c05f-4da5-9518-fb3b2d958ee2", "bridge": "br-int", "label": "tempest-AttachInterfacesUnderV243Test-139612684-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d89911bf2931487c98dc0f44a8b67bca", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap4076fda2-be", "ovs_interfaceid": "4076fda2-be62-4c52-b073-8bf26574dee1", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} unplug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:828
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.043 2 DEBUG nova.network.os_vif_util [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Converting VIF {"id": "4076fda2-be62-4c52-b073-8bf26574dee1", "address": "fa:16:3e:c5:9b:82", "network": {"id": "eb08ca1c-c05f-4da5-9518-fb3b2d958ee2", "bridge": "br-int", "label": "tempest-AttachInterfacesUnderV243Test-139612684-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d89911bf2931487c98dc0f44a8b67bca", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap4076fda2-be", "ovs_interfaceid": "4076fda2-be62-4c52-b073-8bf26574dee1", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.044 2 DEBUG nova.network.os_vif_util [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Converted object VIFOpenVSwitch(active=True,address=fa:16:3e:c5:9b:82,bridge_name='br-int',has_traffic_filtering=True,id=4076fda2-be62-4c52-b073-8bf26574dee1,network=Network(eb08ca1c-c05f-4da5-9518-fb3b2d958ee2),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap4076fda2-be') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.044 2 DEBUG os_vif [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Unplugging vif VIFOpenVSwitch(active=True,address=fa:16:3e:c5:9b:82,bridge_name='br-int',has_traffic_filtering=True,id=4076fda2-be62-4c52-b073-8bf26574dee1,network=Network(eb08ca1c-c05f-4da5-9518-fb3b2d958ee2),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap4076fda2-be') unplug /usr/lib/python3.9/site-packages/os_vif/__init__.py:109
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.046 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.047 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tap4076fda2-be, bridge=br-int, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.050 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.052 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.058 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.061 2 INFO os_vif [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Successfully unplugged vif VIFOpenVSwitch(active=True,address=fa:16:3e:c5:9b:82,bridge_name='br-int',has_traffic_filtering=True,id=4076fda2-be62-4c52-b073-8bf26574dee1,network=Network(eb08ca1c-c05f-4da5-9518-fb3b2d958ee2),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap4076fda2-be')
Oct 11 02:40:49 compute-0 neutron-haproxy-ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2[453300]: [NOTICE]   (453304) : haproxy version is 2.8.14-c23fe91
Oct 11 02:40:49 compute-0 neutron-haproxy-ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2[453300]: [NOTICE]   (453304) : path to executable is /usr/sbin/haproxy
Oct 11 02:40:49 compute-0 neutron-haproxy-ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2[453300]: [WARNING]  (453304) : Exiting Master process...
Oct 11 02:40:49 compute-0 neutron-haproxy-ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2[453300]: [WARNING]  (453304) : Exiting Master process...
Oct 11 02:40:49 compute-0 neutron-haproxy-ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2[453300]: [ALERT]    (453304) : Current worker (453306) exited with code 143 (Terminated)
Oct 11 02:40:49 compute-0 neutron-haproxy-ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2[453300]: [WARNING]  (453304) : All workers exited. Exiting... (0)
Oct 11 02:40:49 compute-0 systemd[1]: libpod-6bd3c24d46294c44bf37984f9dfeac701f8f2c9971615da65f534c8dd48e82bf.scope: Deactivated successfully.
Oct 11 02:40:49 compute-0 podman[456615]: 2025-10-11 02:40:49.14741814 +0000 UTC m=+0.072930874 container died 6bd3c24d46294c44bf37984f9dfeac701f8f2c9971615da65f534c8dd48e82bf (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:40:49 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-6bd3c24d46294c44bf37984f9dfeac701f8f2c9971615da65f534c8dd48e82bf-userdata-shm.mount: Deactivated successfully.
Oct 11 02:40:49 compute-0 systemd[1]: var-lib-containers-storage-overlay-7e71b66d8877d7f23d01703ea80d07508b83ac063c4ff5aa8370fc1cdabd3c75-merged.mount: Deactivated successfully.
Oct 11 02:40:49 compute-0 podman[456615]: 2025-10-11 02:40:49.212610301 +0000 UTC m=+0.138123035 container cleanup 6bd3c24d46294c44bf37984f9dfeac701f8f2c9971615da65f534c8dd48e82bf (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_managed=true)
Oct 11 02:40:49 compute-0 systemd[1]: libpod-conmon-6bd3c24d46294c44bf37984f9dfeac701f8f2c9971615da65f534c8dd48e82bf.scope: Deactivated successfully.
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.303 2 DEBUG nova.network.neutron [req-9e108e24-4956-438e-99e9-ed0e57da2206 req-d77aac63-41ca-472c-93ad-bb15916a0085 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Updated VIF entry in instance network info cache for port 4076fda2-be62-4c52-b073-8bf26574dee1. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.304 2 DEBUG nova.network.neutron [req-9e108e24-4956-438e-99e9-ed0e57da2206 req-d77aac63-41ca-472c-93ad-bb15916a0085 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Updating instance_info_cache with network_info: [{"id": "4076fda2-be62-4c52-b073-8bf26574dee1", "address": "fa:16:3e:c5:9b:82", "network": {"id": "eb08ca1c-c05f-4da5-9518-fb3b2d958ee2", "bridge": "br-int", "label": "tempest-AttachInterfacesUnderV243Test-139612684-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.14", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.245", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "d89911bf2931487c98dc0f44a8b67bca", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap4076fda2-be", "ovs_interfaceid": "4076fda2-be62-4c52-b073-8bf26574dee1", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.325 2 DEBUG oslo_concurrency.lockutils [req-9e108e24-4956-438e-99e9-ed0e57da2206 req-d77aac63-41ca-472c-93ad-bb15916a0085 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-49d4f343-d1b4-4594-96d2-0777a5ce8581" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:40:49 compute-0 podman[456660]: 2025-10-11 02:40:49.368900354 +0000 UTC m=+0.109816453 container remove 6bd3c24d46294c44bf37984f9dfeac701f8f2c9971615da65f534c8dd48e82bf (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:40:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:49.386 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[b52e109f-89fc-4d4d-ad0e-c53486c4463f]: (4, ('Sat Oct 11 02:40:49 AM UTC 2025 Stopping container neutron-haproxy-ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2 (6bd3c24d46294c44bf37984f9dfeac701f8f2c9971615da65f534c8dd48e82bf)\n6bd3c24d46294c44bf37984f9dfeac701f8f2c9971615da65f534c8dd48e82bf\nSat Oct 11 02:40:49 AM UTC 2025 Deleting container neutron-haproxy-ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2 (6bd3c24d46294c44bf37984f9dfeac701f8f2c9971615da65f534c8dd48e82bf)\n6bd3c24d46294c44bf37984f9dfeac701f8f2c9971615da65f534c8dd48e82bf\n', '', 0)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:49.388 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[6a339df9-5f99-4328-b4d8-9f8f7338eb23]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:49.389 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapeb08ca1c-c0, bridge=None, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:40:49 compute-0 kernel: tapeb08ca1c-c0: left promiscuous mode
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.392 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.411 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:49.416 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[864fbc3b-7067-41d3-8c6a-31383569f9ea]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:49.444 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[0bbfd18b-fc81-4e89-946c-ede6bf298e87]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:49.446 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[6fab2f99-6023-445e-afec-d90ec7f7fbfc]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:49.470 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[bc7f5f25-b296-4f61-a67e-bb7078dd1765]: (4, [{'family': 0, '__align': (), 'ifi_type': 772, 'index': 1, 'flags': 65609, 'change': 0, 'attrs': [['IFLA_IFNAME', 'lo'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UNKNOWN'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 65536], ['IFLA_MIN_MTU', 0], ['IFLA_MAX_MTU', 0], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 1], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 1], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 0], ['IFLA_CARRIER_UP_COUNT', 0], ['IFLA_CARRIER_DOWN_COUNT', 0], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', '00:00:00:00:00:00'], ['IFLA_BROADCAST', '00:00:00:00:00:00'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 1, 'nopolicy': 1, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 671710, 'reachable_time': 34371, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 65536, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 4294967295, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 4294967295, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 0, 'inoctets': 0, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 0, 'outoctets': 0, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 0, 'outmcastpkts': 0, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 0, 'outmcastoctets': 0, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 0, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 0, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1404, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 456680, 'error': None, 'target': 'ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:49.475 286647 DEBUG neutron.privileged.agent.linux.ip_lib [-] Namespace ovnmeta-eb08ca1c-c05f-4da5-9518-fb3b2d958ee2 deleted. remove_netns /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:607
Oct 11 02:40:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:49.475 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[121c4ef6-f122-41ee-8e99-d6c266bd9d72]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:40:49 compute-0 systemd[1]: run-netns-ovnmeta\x2deb08ca1c\x2dc05f\x2d4da5\x2d9518\x2dfb3b2d958ee2.mount: Deactivated successfully.
Oct 11 02:40:49 compute-0 podman[456671]: 2025-10-11 02:40:49.555146662 +0000 UTC m=+0.119307011 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.expose-services=, version=9.4, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_id=edpm, name=ubi9, release=1214.1726694543, com.redhat.component=ubi9-container, distribution-scope=public, release-0.7.12=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., architecture=x86_64, vcs-type=git, managed_by=edpm_ansible, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, build-date=2024-09-18T21:23:30, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, maintainer=Red Hat, Inc., container_name=kepler, io.buildah.version=1.29.0, summary=Provides the latest release of Red Hat Universal Base Image 9., io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.tags=base rhel9)
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.742 2 INFO nova.virt.libvirt.driver [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Deleting instance files /var/lib/nova/instances/49d4f343-d1b4-4594-96d2-0777a5ce8581_del
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.743 2 INFO nova.virt.libvirt.driver [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Deletion of /var/lib/nova/instances/49d4f343-d1b4-4594-96d2-0777a5ce8581_del complete
Oct 11 02:40:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e137 do_prune osdmap full prune enabled
Oct 11 02:40:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e138 e138: 3 total, 3 up, 3 in
Oct 11 02:40:49 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e138: 3 total, 3 up, 3 in
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.921 2 INFO nova.compute.manager [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Took 1.15 seconds to destroy the instance on the hypervisor.
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.922 2 DEBUG oslo.service.loopingcall [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Waiting for function nova.compute.manager.ComputeManager._try_deallocate_network.<locals>._deallocate_network_with_retries to return. func /usr/lib/python3.9/site-packages/oslo_service/loopingcall.py:435
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.922 2 DEBUG nova.compute.manager [-] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Deallocating network for instance _deallocate_network /usr/lib/python3.9/site-packages/nova/compute/manager.py:2259
Oct 11 02:40:49 compute-0 nova_compute[356901]: 2025-10-11 02:40:49.923 2 DEBUG nova.network.neutron [-] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] deallocate_for_instance() deallocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1803
Oct 11 02:40:50 compute-0 nova_compute[356901]: 2025-10-11 02:40:50.075 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:50 compute-0 nova_compute[356901]: 2025-10-11 02:40:50.115 2 DEBUG nova.compute.manager [req-e2c8b7ee-93b3-4421-8695-00316d7bb06c req-791ebca6-a08f-4260-a5c5-8b4ef9fc5e80 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Received event network-vif-unplugged-4076fda2-be62-4c52-b073-8bf26574dee1 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:40:50 compute-0 nova_compute[356901]: 2025-10-11 02:40:50.115 2 DEBUG oslo_concurrency.lockutils [req-e2c8b7ee-93b3-4421-8695-00316d7bb06c req-791ebca6-a08f-4260-a5c5-8b4ef9fc5e80 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "49d4f343-d1b4-4594-96d2-0777a5ce8581-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:50 compute-0 nova_compute[356901]: 2025-10-11 02:40:50.115 2 DEBUG oslo_concurrency.lockutils [req-e2c8b7ee-93b3-4421-8695-00316d7bb06c req-791ebca6-a08f-4260-a5c5-8b4ef9fc5e80 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "49d4f343-d1b4-4594-96d2-0777a5ce8581-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:50 compute-0 nova_compute[356901]: 2025-10-11 02:40:50.116 2 DEBUG oslo_concurrency.lockutils [req-e2c8b7ee-93b3-4421-8695-00316d7bb06c req-791ebca6-a08f-4260-a5c5-8b4ef9fc5e80 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "49d4f343-d1b4-4594-96d2-0777a5ce8581-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:50 compute-0 nova_compute[356901]: 2025-10-11 02:40:50.116 2 DEBUG nova.compute.manager [req-e2c8b7ee-93b3-4421-8695-00316d7bb06c req-791ebca6-a08f-4260-a5c5-8b4ef9fc5e80 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] No waiting events found dispatching network-vif-unplugged-4076fda2-be62-4c52-b073-8bf26574dee1 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:40:50 compute-0 nova_compute[356901]: 2025-10-11 02:40:50.116 2 DEBUG nova.compute.manager [req-e2c8b7ee-93b3-4421-8695-00316d7bb06c req-791ebca6-a08f-4260-a5c5-8b4ef9fc5e80 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Received event network-vif-unplugged-4076fda2-be62-4c52-b073-8bf26574dee1 for instance with task_state deleting. _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10826
Oct 11 02:40:50 compute-0 nova_compute[356901]: 2025-10-11 02:40:50.116 2 DEBUG nova.compute.manager [req-e2c8b7ee-93b3-4421-8695-00316d7bb06c req-791ebca6-a08f-4260-a5c5-8b4ef9fc5e80 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Received event network-vif-plugged-4076fda2-be62-4c52-b073-8bf26574dee1 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:40:50 compute-0 nova_compute[356901]: 2025-10-11 02:40:50.117 2 DEBUG oslo_concurrency.lockutils [req-e2c8b7ee-93b3-4421-8695-00316d7bb06c req-791ebca6-a08f-4260-a5c5-8b4ef9fc5e80 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "49d4f343-d1b4-4594-96d2-0777a5ce8581-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:50 compute-0 nova_compute[356901]: 2025-10-11 02:40:50.117 2 DEBUG oslo_concurrency.lockutils [req-e2c8b7ee-93b3-4421-8695-00316d7bb06c req-791ebca6-a08f-4260-a5c5-8b4ef9fc5e80 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "49d4f343-d1b4-4594-96d2-0777a5ce8581-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:50 compute-0 nova_compute[356901]: 2025-10-11 02:40:50.117 2 DEBUG oslo_concurrency.lockutils [req-e2c8b7ee-93b3-4421-8695-00316d7bb06c req-791ebca6-a08f-4260-a5c5-8b4ef9fc5e80 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "49d4f343-d1b4-4594-96d2-0777a5ce8581-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:50 compute-0 nova_compute[356901]: 2025-10-11 02:40:50.117 2 DEBUG nova.compute.manager [req-e2c8b7ee-93b3-4421-8695-00316d7bb06c req-791ebca6-a08f-4260-a5c5-8b4ef9fc5e80 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] No waiting events found dispatching network-vif-plugged-4076fda2-be62-4c52-b073-8bf26574dee1 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:40:50 compute-0 nova_compute[356901]: 2025-10-11 02:40:50.117 2 WARNING nova.compute.manager [req-e2c8b7ee-93b3-4421-8695-00316d7bb06c req-791ebca6-a08f-4260-a5c5-8b4ef9fc5e80 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Received unexpected event network-vif-plugged-4076fda2-be62-4c52-b073-8bf26574dee1 for instance with vm_state active and task_state deleting.
Oct 11 02:40:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1895: 321 pgs: 321 active+clean; 357 MiB data, 444 MiB used, 60 GiB / 60 GiB avail; 2.3 MiB/s rd, 4.5 KiB/s wr, 86 op/s
Oct 11 02:40:50 compute-0 ceph-mon[191930]: osdmap e138: 3 total, 3 up, 3 in
Oct 11 02:40:50 compute-0 ceph-mon[191930]: pgmap v1895: 321 pgs: 321 active+clean; 357 MiB data, 444 MiB used, 60 GiB / 60 GiB avail; 2.3 MiB/s rd, 4.5 KiB/s wr, 86 op/s
Oct 11 02:40:51 compute-0 nova_compute[356901]: 2025-10-11 02:40:51.206 2 DEBUG nova.network.neutron [-] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Updating instance_info_cache with network_info: [] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:40:51 compute-0 nova_compute[356901]: 2025-10-11 02:40:51.220 2 INFO nova.compute.manager [-] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Took 1.30 seconds to deallocate network for instance.
Oct 11 02:40:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e138 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:40:51 compute-0 nova_compute[356901]: 2025-10-11 02:40:51.267 2 DEBUG oslo_concurrency.lockutils [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.update_usage" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:51 compute-0 nova_compute[356901]: 2025-10-11 02:40:51.268 2 DEBUG oslo_concurrency.lockutils [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:51 compute-0 nova_compute[356901]: 2025-10-11 02:40:51.405 2 DEBUG oslo_concurrency.processutils [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:40:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:40:51 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2930191983' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:40:51 compute-0 nova_compute[356901]: 2025-10-11 02:40:51.870 2 DEBUG oslo_concurrency.processutils [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.465s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:40:51 compute-0 nova_compute[356901]: 2025-10-11 02:40:51.884 2 DEBUG nova.compute.provider_tree [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:40:51 compute-0 nova_compute[356901]: 2025-10-11 02:40:51.907 2 DEBUG nova.scheduler.client.report [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:40:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e138 do_prune osdmap full prune enabled
Oct 11 02:40:51 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2930191983' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:40:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e139 e139: 3 total, 3 up, 3 in
Oct 11 02:40:51 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e139: 3 total, 3 up, 3 in
Oct 11 02:40:51 compute-0 nova_compute[356901]: 2025-10-11 02:40:51.940 2 DEBUG oslo_concurrency.lockutils [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: held 0.673s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:51 compute-0 nova_compute[356901]: 2025-10-11 02:40:51.980 2 INFO nova.scheduler.client.report [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Deleted allocations for instance 49d4f343-d1b4-4594-96d2-0777a5ce8581
Oct 11 02:40:52 compute-0 nova_compute[356901]: 2025-10-11 02:40:52.080 2 DEBUG oslo_concurrency.lockutils [None req-03fa4139-33f7-4129-9afe-02ced3840156 9a1414c7b75246f596af7745610a00a4 d89911bf2931487c98dc0f44a8b67bca - - default default] Lock "49d4f343-d1b4-4594-96d2-0777a5ce8581" "released" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: held 3.313s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:52 compute-0 nova_compute[356901]: 2025-10-11 02:40:52.230 2 DEBUG nova.compute.manager [req-7e636358-a037-49f6-a6ac-04f02e6d7871 req-9cb3e340-c181-4b2d-9141-c91e9dd447c8 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Received event network-vif-deleted-4076fda2-be62-4c52-b073-8bf26574dee1 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:40:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1897: 321 pgs: 321 active+clean; 329 MiB data, 431 MiB used, 60 GiB / 60 GiB avail; 2.8 MiB/s rd, 4.6 KiB/s wr, 130 op/s
Oct 11 02:40:52 compute-0 ceph-mon[191930]: osdmap e139: 3 total, 3 up, 3 in
Oct 11 02:40:52 compute-0 ceph-mon[191930]: pgmap v1897: 321 pgs: 321 active+clean; 329 MiB data, 431 MiB used, 60 GiB / 60 GiB avail; 2.8 MiB/s rd, 4.6 KiB/s wr, 130 op/s
Oct 11 02:40:54 compute-0 nova_compute[356901]: 2025-10-11 02:40:54.051 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1898: 321 pgs: 321 active+clean; 277 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 1.8 MiB/s rd, 6.5 KiB/s wr, 153 op/s
Oct 11 02:40:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:54.869 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:40:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:54.870 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:40:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:40:54.872 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:40:54 compute-0 ceph-mon[191930]: pgmap v1898: 321 pgs: 321 active+clean; 277 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 1.8 MiB/s rd, 6.5 KiB/s wr, 153 op/s
Oct 11 02:40:55 compute-0 nova_compute[356901]: 2025-10-11 02:40:55.078 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:55 compute-0 podman[456717]: 2025-10-11 02:40:55.239984923 +0000 UTC m=+0.120952843 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:40:55 compute-0 podman[456719]: 2025-10-11 02:40:55.247798779 +0000 UTC m=+0.122489366 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, io.buildah.version=1.41.4, tcib_managed=true, config_id=edpm, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007)
Oct 11 02:40:55 compute-0 podman[456720]: 2025-10-11 02:40:55.252429137 +0000 UTC m=+0.108289843 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, container_name=ovn_metadata_agent, managed_by=edpm_ansible, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent)
Oct 11 02:40:55 compute-0 podman[456718]: 2025-10-11 02:40:55.277482539 +0000 UTC m=+0.153994256 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, org.label-schema.license=GPLv2)
Oct 11 02:40:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e139 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #87. Immutable memtables: 0.
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:40:56.259976) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 49] Flushing memtable with next log file: 87
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150456260076, "job": 49, "event": "flush_started", "num_memtables": 1, "num_entries": 1081, "num_deletes": 250, "total_data_size": 1491636, "memory_usage": 1517192, "flush_reason": "Manual Compaction"}
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 49] Level-0 flush table #88: started
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150456268961, "cf_name": "default", "job": 49, "event": "table_file_creation", "file_number": 88, "file_size": 926402, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 37954, "largest_seqno": 39034, "table_properties": {"data_size": 922175, "index_size": 1751, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1413, "raw_key_size": 11292, "raw_average_key_size": 20, "raw_value_size": 913000, "raw_average_value_size": 1687, "num_data_blocks": 79, "num_entries": 541, "num_filter_entries": 541, "num_deletions": 250, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760150361, "oldest_key_time": 1760150361, "file_creation_time": 1760150456, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 88, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 49] Flush lasted 9017 microseconds, and 4079 cpu microseconds.
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:40:56.269009) [db/flush_job.cc:967] [default] [JOB 49] Level-0 flush table #88: 926402 bytes OK
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:40:56.269029) [db/memtable_list.cc:519] [default] Level-0 commit table #88 started
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:40:56.270946) [db/memtable_list.cc:722] [default] Level-0 commit table #88: memtable #1 done
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:40:56.270958) EVENT_LOG_v1 {"time_micros": 1760150456270954, "job": 49, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:40:56.270975) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 49] Try to delete WAL files size 1486579, prev total WAL file size 1486579, number of live WAL files 2.
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000084.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:40:56.272738) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '6D6772737461740031353034' seq:72057594037927935, type:22 .. '6D6772737461740031373535' seq:0, type:0; will stop at (end)
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 50] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 49 Base level 0, inputs: [88(904KB)], [86(10044KB)]
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150456272830, "job": 50, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [88], "files_L6": [86], "score": -1, "input_data_size": 11212008, "oldest_snapshot_seqno": -1}
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 50] Generated table #89: 5803 keys, 8450803 bytes, temperature: kUnknown
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150456361946, "cf_name": "default", "job": 50, "event": "table_file_creation", "file_number": 89, "file_size": 8450803, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 8413691, "index_size": 21514, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 14533, "raw_key_size": 146983, "raw_average_key_size": 25, "raw_value_size": 8310469, "raw_average_value_size": 1432, "num_data_blocks": 888, "num_entries": 5803, "num_filter_entries": 5803, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760150456, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 89, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:40:56.362445) [db/compaction/compaction_job.cc:1663] [default] [JOB 50] Compacted 1@0 + 1@6 files to L6 => 8450803 bytes
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:40:56.368153) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 125.7 rd, 94.7 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(0.9, 9.8 +0.0 blob) out(8.1 +0.0 blob), read-write-amplify(21.2) write-amplify(9.1) OK, records in: 6278, records dropped: 475 output_compression: NoCompression
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:40:56.368187) EVENT_LOG_v1 {"time_micros": 1760150456368171, "job": 50, "event": "compaction_finished", "compaction_time_micros": 89228, "compaction_time_cpu_micros": 40083, "output_level": 6, "num_output_files": 1, "total_output_size": 8450803, "num_input_records": 6278, "num_output_records": 5803, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000088.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150456368790, "job": 50, "event": "table_file_deletion", "file_number": 88}
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000086.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150456372451, "job": 50, "event": "table_file_deletion", "file_number": 86}
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:40:56.272425) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:40:56.373055) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:40:56.373066) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:40:56.373069) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:40:56.373072) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:40:56 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:40:56.373076) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:40:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:40:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:40:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:40:56
Oct 11 02:40:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:40:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:40:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['volumes', '.rgw.root', 'default.rgw.control', '.mgr', 'images', 'backups', 'cephfs.cephfs.data', 'default.rgw.meta', 'default.rgw.log', 'cephfs.cephfs.meta', 'vms']
Oct 11 02:40:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:40:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:40:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:40:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:40:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:40:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1899: 321 pgs: 321 active+clean; 277 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 1.2 MiB/s rd, 7.1 KiB/s wr, 141 op/s
Oct 11 02:40:57 compute-0 ovn_controller[88370]: 2025-10-11T02:40:57Z|00136|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:40:57 compute-0 ovn_controller[88370]: 2025-10-11T02:40:57Z|00137|binding|INFO|Releasing lport aa37c6ed-d2db-4ed4-b1c9-cfd071cfd96a from this chassis (sb_readonly=0)
Oct 11 02:40:57 compute-0 ovn_controller[88370]: 2025-10-11T02:40:57Z|00138|binding|INFO|Releasing lport 896fe5e8-8895-492a-9e5f-23d2477d5716 from this chassis (sb_readonly=0)
Oct 11 02:40:57 compute-0 nova_compute[356901]: 2025-10-11 02:40:57.247 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:57 compute-0 ceph-mon[191930]: pgmap v1899: 321 pgs: 321 active+clean; 277 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 1.2 MiB/s rd, 7.1 KiB/s wr, 141 op/s
Oct 11 02:40:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:40:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:40:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:40:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:40:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:40:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:40:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:40:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:40:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:40:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:40:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1900: 321 pgs: 321 active+clean; 277 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 65 KiB/s rd, 6.4 KiB/s wr, 91 op/s
Oct 11 02:40:58 compute-0 ceph-mon[191930]: pgmap v1900: 321 pgs: 321 active+clean; 277 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 65 KiB/s rd, 6.4 KiB/s wr, 91 op/s
Oct 11 02:40:58 compute-0 nova_compute[356901]: 2025-10-11 02:40:58.967 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:40:59 compute-0 nova_compute[356901]: 2025-10-11 02:40:59.057 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:40:59 compute-0 podman[157119]: time="2025-10-11T02:40:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:40:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:40:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 48733 "" "Go-http-client/1.1"
Oct 11 02:40:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:40:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9995 "" "Go-http-client/1.1"
Oct 11 02:41:00 compute-0 nova_compute[356901]: 2025-10-11 02:41:00.081 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1901: 321 pgs: 321 active+clean; 277 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 57 KiB/s rd, 5.7 KiB/s wr, 81 op/s
Oct 11 02:41:00 compute-0 ceph-mon[191930]: pgmap v1901: 321 pgs: 321 active+clean; 277 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 57 KiB/s rd, 5.7 KiB/s wr, 81 op/s
Oct 11 02:41:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e139 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 322961408
Oct 11 02:41:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e139 do_prune osdmap full prune enabled
Oct 11 02:41:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 e140: 3 total, 3 up, 3 in
Oct 11 02:41:01 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e140: 3 total, 3 up, 3 in
Oct 11 02:41:01 compute-0 openstack_network_exporter[374316]: ERROR   02:41:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:41:01 compute-0 openstack_network_exporter[374316]: ERROR   02:41:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:41:01 compute-0 openstack_network_exporter[374316]: ERROR   02:41:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:41:01 compute-0 openstack_network_exporter[374316]: ERROR   02:41:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:41:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:41:01 compute-0 openstack_network_exporter[374316]: ERROR   02:41:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:41:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:41:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:01.785 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: SbGlobalUpdateEvent(events=('update',), table='SB_Global', conditions=None, old_conditions=None), priority=20 to row=SB_Global(external_ids={}, nb_cfg=13, options={'arp_ns_explicit_output': 'true', 'mac_prefix': 'fe:55:97', 'max_tunid': '16711680', 'northd_internal_version': '24.03.7-20.33.0-76.8', 'svc_monitor_mac': 'ce:9c:4f:b4:85:9b'}, ipsec=False) old=SB_Global(nb_cfg=12) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:41:01 compute-0 nova_compute[356901]: 2025-10-11 02:41:01.786 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:01.787 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Delaying updating chassis table for 8 seconds run /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:274
Oct 11 02:41:02 compute-0 podman[456800]: 2025-10-11 02:41:02.234971133 +0000 UTC m=+0.114753402 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=iscsid, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, container_name=iscsid, org.label-schema.build-date=20251009)
Oct 11 02:41:02 compute-0 podman[456799]: 2025-10-11 02:41:02.269147277 +0000 UTC m=+0.153625376 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, container_name=multipathd, org.label-schema.license=GPLv2)
Oct 11 02:41:02 compute-0 ceph-mon[191930]: osdmap e140: 3 total, 3 up, 3 in
Oct 11 02:41:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1903: 321 pgs: 321 active+clean; 277 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 4.0 KiB/s wr, 51 op/s
Oct 11 02:41:02 compute-0 nova_compute[356901]: 2025-10-11 02:41:02.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:41:03 compute-0 ceph-mon[191930]: pgmap v1903: 321 pgs: 321 active+clean; 277 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 4.0 KiB/s wr, 51 op/s
Oct 11 02:41:04 compute-0 nova_compute[356901]: 2025-10-11 02:41:04.021 2 DEBUG nova.virt.driver [-] Emitting event <LifecycleEvent: 1760150449.019577, 49d4f343-d1b4-4594-96d2-0777a5ce8581 => Stopped> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:41:04 compute-0 nova_compute[356901]: 2025-10-11 02:41:04.021 2 INFO nova.compute.manager [-] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] VM Stopped (Lifecycle Event)
Oct 11 02:41:04 compute-0 nova_compute[356901]: 2025-10-11 02:41:04.042 2 DEBUG nova.compute.manager [None req-5632757f-cdd0-40c1-9d0e-d644426e9d4c - - - - - -] [instance: 49d4f343-d1b4-4594-96d2-0777a5ce8581] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:41:04 compute-0 nova_compute[356901]: 2025-10-11 02:41:04.061 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:04 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:04.189 286629 DEBUG eventlet.wsgi.server [-] (286629) accepted '' server /usr/lib/python3.9/site-packages/eventlet/wsgi.py:1004
Oct 11 02:41:04 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:04.192 286629 DEBUG neutron.agent.ovn.metadata.server [-] Request: GET /latest/meta-data/public-ipv4 HTTP/1.0
Oct 11 02:41:04 compute-0 ovn_metadata_agent[286344]: Accept: */*
Oct 11 02:41:04 compute-0 ovn_metadata_agent[286344]: Connection: close
Oct 11 02:41:04 compute-0 ovn_metadata_agent[286344]: Content-Type: text/plain
Oct 11 02:41:04 compute-0 ovn_metadata_agent[286344]: Host: 169.254.169.254
Oct 11 02:41:04 compute-0 ovn_metadata_agent[286344]: User-Agent: curl/7.84.0
Oct 11 02:41:04 compute-0 ovn_metadata_agent[286344]: X-Forwarded-For: 10.100.0.11
Oct 11 02:41:04 compute-0 ovn_metadata_agent[286344]: X-Ovn-Network-Id: 42802124-ba47-4b6e-aa91-ecf257e5a54c __call__ /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/server.py:82
Oct 11 02:41:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1904: 321 pgs: 321 active+clean; 277 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 2.7 KiB/s rd, 613 B/s wr, 4 op/s
Oct 11 02:41:04 compute-0 ceph-mon[191930]: pgmap v1904: 321 pgs: 321 active+clean; 277 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 2.7 KiB/s rd, 613 B/s wr, 4 op/s
Oct 11 02:41:05 compute-0 nova_compute[356901]: 2025-10-11 02:41:05.086 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:05 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:05.544 286629 DEBUG neutron.agent.ovn.metadata.server [-] <Response [200]> _proxy_request /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/server.py:161
Oct 11 02:41:05 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:05.546 286629 INFO eventlet.wsgi.server [-] 10.100.0.11,<local> "GET /latest/meta-data/public-ipv4 HTTP/1.1" status: 200  len: 151 time: 1.3542593
Oct 11 02:41:05 compute-0 haproxy-metadata-proxy-42802124-ba47-4b6e-aa91-ecf257e5a54c[454172]: 10.100.0.11:50922 [11/Oct/2025:02:41:04.188] listener listener/metadata 0/0/0/1357/1357 200 135 - - ---- 1/1/0/0/0 0/0 "GET /latest/meta-data/public-ipv4 HTTP/1.1"
Oct 11 02:41:05 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:05.688 286629 DEBUG eventlet.wsgi.server [-] (286629) accepted '' server /usr/lib/python3.9/site-packages/eventlet/wsgi.py:1004
Oct 11 02:41:05 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:05.690 286629 DEBUG neutron.agent.ovn.metadata.server [-] Request: POST /openstack/2013-10-17/password HTTP/1.0
Oct 11 02:41:05 compute-0 ovn_metadata_agent[286344]: Accept: */*
Oct 11 02:41:05 compute-0 ovn_metadata_agent[286344]: Connection: close
Oct 11 02:41:05 compute-0 ovn_metadata_agent[286344]: Content-Length: 100
Oct 11 02:41:05 compute-0 ovn_metadata_agent[286344]: Content-Type: application/x-www-form-urlencoded
Oct 11 02:41:05 compute-0 ovn_metadata_agent[286344]: Host: 169.254.169.254
Oct 11 02:41:05 compute-0 ovn_metadata_agent[286344]: User-Agent: curl/7.84.0
Oct 11 02:41:05 compute-0 ovn_metadata_agent[286344]: X-Forwarded-For: 10.100.0.11
Oct 11 02:41:05 compute-0 ovn_metadata_agent[286344]: X-Ovn-Network-Id: 42802124-ba47-4b6e-aa91-ecf257e5a54c
Oct 11 02:41:05 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:41:05 compute-0 ovn_metadata_agent[286344]: testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest __call__ /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/server.py:82
Oct 11 02:41:05 compute-0 nova_compute[356901]: 2025-10-11 02:41:05.808 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:06 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:06.045 286629 DEBUG neutron.agent.ovn.metadata.server [-] <Response [200]> _proxy_request /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/server.py:161
Oct 11 02:41:06 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:06.046 286629 INFO eventlet.wsgi.server [-] 10.100.0.11,<local> "POST /openstack/2013-10-17/password HTTP/1.1" status: 200  len: 134 time: 0.3565185
Oct 11 02:41:06 compute-0 haproxy-metadata-proxy-42802124-ba47-4b6e-aa91-ecf257e5a54c[454172]: 10.100.0.11:50938 [11/Oct/2025:02:41:05.686] listener listener/metadata 0/0/0/359/359 200 118 - - ---- 1/1/0/0/0 0/0 "POST /openstack/2013-10-17/password HTTP/1.1"
Oct 11 02:41:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:41:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1905: 321 pgs: 321 active+clean; 277 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 818 B/s rd, 920 B/s wr, 0 op/s
Oct 11 02:41:06 compute-0 ceph-mon[191930]: pgmap v1905: 321 pgs: 321 active+clean; 277 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 818 B/s rd, 920 B/s wr, 0 op/s
Oct 11 02:41:06 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #90. Immutable memtables: 0.
Oct 11 02:41:06 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:06.899618) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:41:06 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 51] Flushing memtable with next log file: 90
Oct 11 02:41:06 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150466899726, "job": 51, "event": "flush_started", "num_memtables": 1, "num_entries": 359, "num_deletes": 251, "total_data_size": 196158, "memory_usage": 203000, "flush_reason": "Manual Compaction"}
Oct 11 02:41:06 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 51] Level-0 flush table #91: started
Oct 11 02:41:06 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150466905037, "cf_name": "default", "job": 51, "event": "table_file_creation", "file_number": 91, "file_size": 194505, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 39035, "largest_seqno": 39393, "table_properties": {"data_size": 192291, "index_size": 378, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 773, "raw_key_size": 5647, "raw_average_key_size": 18, "raw_value_size": 187810, "raw_average_value_size": 623, "num_data_blocks": 17, "num_entries": 301, "num_filter_entries": 301, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760150457, "oldest_key_time": 1760150457, "file_creation_time": 1760150466, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 91, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:41:06 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 51] Flush lasted 5442 microseconds, and 1762 cpu microseconds.
Oct 11 02:41:06 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:41:06 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:06.905079) [db/flush_job.cc:967] [default] [JOB 51] Level-0 flush table #91: 194505 bytes OK
Oct 11 02:41:06 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:06.905097) [db/memtable_list.cc:519] [default] Level-0 commit table #91 started
Oct 11 02:41:06 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:06.907134) [db/memtable_list.cc:722] [default] Level-0 commit table #91: memtable #1 done
Oct 11 02:41:06 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:06.907151) EVENT_LOG_v1 {"time_micros": 1760150466907146, "job": 51, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:41:06 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:06.907172) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:41:06 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 51] Try to delete WAL files size 193766, prev total WAL file size 193766, number of live WAL files 2.
Oct 11 02:41:06 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000087.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:41:06 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:06.908005) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F730033353134' seq:72057594037927935, type:22 .. '7061786F730033373636' seq:0, type:0; will stop at (end)
Oct 11 02:41:06 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 52] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:41:06 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 51 Base level 0, inputs: [91(189KB)], [89(8252KB)]
Oct 11 02:41:06 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150466908135, "job": 52, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [91], "files_L6": [89], "score": -1, "input_data_size": 8645308, "oldest_snapshot_seqno": -1}
Oct 11 02:41:07 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 52] Generated table #92: 5590 keys, 6924287 bytes, temperature: kUnknown
Oct 11 02:41:07 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150467000391, "cf_name": "default", "job": 52, "event": "table_file_creation", "file_number": 92, "file_size": 6924287, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 6890115, "index_size": 19086, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 14021, "raw_key_size": 143256, "raw_average_key_size": 25, "raw_value_size": 6792099, "raw_average_value_size": 1215, "num_data_blocks": 777, "num_entries": 5590, "num_filter_entries": 5590, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760150466, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 92, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:41:07 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:41:07 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:07.001270) [db/compaction/compaction_job.cc:1663] [default] [JOB 52] Compacted 1@0 + 1@6 files to L6 => 6924287 bytes
Oct 11 02:41:07 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:07.003612) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 93.1 rd, 74.6 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(0.2, 8.1 +0.0 blob) out(6.6 +0.0 blob), read-write-amplify(80.0) write-amplify(35.6) OK, records in: 6104, records dropped: 514 output_compression: NoCompression
Oct 11 02:41:07 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:07.003629) EVENT_LOG_v1 {"time_micros": 1760150467003620, "job": 52, "event": "compaction_finished", "compaction_time_micros": 92873, "compaction_time_cpu_micros": 37859, "output_level": 6, "num_output_files": 1, "total_output_size": 6924287, "num_input_records": 6104, "num_output_records": 5590, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:41:07 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000091.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:41:07 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150467004070, "job": 52, "event": "table_file_deletion", "file_number": 91}
Oct 11 02:41:07 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000089.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:41:07 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150467006135, "job": 52, "event": "table_file_deletion", "file_number": 89}
Oct 11 02:41:07 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:06.907717) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:41:07 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:07.006402) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:41:07 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:07.006411) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:41:07 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:07.006413) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:41:07 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:07.006415) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:41:07 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:07.006417) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.002071212944422739 of space, bias 1.0, pg target 0.6213638833268217 quantized to 32 (current 32)
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0009191400908380543 of space, bias 1.0, pg target 0.2757420272514163 quantized to 32 (current 32)
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:41:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:41:07 compute-0 nova_compute[356901]: 2025-10-11 02:41:07.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.293 2 DEBUG oslo_concurrency.lockutils [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Acquiring lock "5279e85f-e35b-4ddd-8336-7f483712f743" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.295 2 DEBUG oslo_concurrency.lockutils [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Lock "5279e85f-e35b-4ddd-8336-7f483712f743" acquired by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.295 2 DEBUG oslo_concurrency.lockutils [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Acquiring lock "5279e85f-e35b-4ddd-8336-7f483712f743-events" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.296 2 DEBUG oslo_concurrency.lockutils [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Lock "5279e85f-e35b-4ddd-8336-7f483712f743-events" acquired by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.296 2 DEBUG oslo_concurrency.lockutils [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Lock "5279e85f-e35b-4ddd-8336-7f483712f743-events" "released" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.298 2 INFO nova.compute.manager [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Terminating instance
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.299 2 DEBUG nova.compute.manager [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Start destroying the instance on the hypervisor. _shutdown_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:3120
Oct 11 02:41:08 compute-0 kernel: tapd690bf02-80 (unregistering): left promiscuous mode
Oct 11 02:41:08 compute-0 NetworkManager[44908]: <info>  [1760150468.4011] device (tapd690bf02-80): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')
Oct 11 02:41:08 compute-0 ovn_controller[88370]: 2025-10-11T02:41:08Z|00139|binding|INFO|Releasing lport d690bf02-80b8-4bb8-808f-ccc93f22c545 from this chassis (sb_readonly=0)
Oct 11 02:41:08 compute-0 ovn_controller[88370]: 2025-10-11T02:41:08Z|00140|binding|INFO|Setting lport d690bf02-80b8-4bb8-808f-ccc93f22c545 down in Southbound
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.410 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:08 compute-0 ovn_controller[88370]: 2025-10-11T02:41:08Z|00141|binding|INFO|Removing iface tapd690bf02-80 ovn-installed in OVS
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.416 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:08 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:08.419 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:1e:a3:c2 10.100.0.11'], port_security=['fa:16:3e:1e:a3:c2 10.100.0.11'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.0.11/28', 'neutron:device_id': '5279e85f-e35b-4ddd-8336-7f483712f743', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-42802124-ba47-4b6e-aa91-ecf257e5a54c', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': '4baea94e1c7d43e699eaac33512a8105', 'neutron:revision_number': '4', 'neutron:security_group_ids': '296fb7dc-64e4-4a98-a3da-17dca5d61e60 e089678a-fa1c-4efb-93a6-d0762a85e1fe', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:host_id': 'compute-0.ctlplane.example.com', 'neutron:port_fip': '192.168.122.226'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=691c8fe6-f576-4439-8b00-8821d031fd8a, chassis=[], tunnel_key=3, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=d690bf02-80b8-4bb8-808f-ccc93f22c545) old=Port_Binding(up=[True], chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:41:08 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:08.421 286362 INFO neutron.agent.ovn.metadata.agent [-] Port d690bf02-80b8-4bb8-808f-ccc93f22c545 in datapath 42802124-ba47-4b6e-aa91-ecf257e5a54c unbound from our chassis
Oct 11 02:41:08 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:08.431 286362 DEBUG neutron.agent.ovn.metadata.agent [-] No valid VIF ports were found for network 42802124-ba47-4b6e-aa91-ecf257e5a54c, tearing the namespace down if needed _get_provision_params /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:628
Oct 11 02:41:08 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:08.432 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[f4c62596-8bcb-466c-903a-15616efe8e46]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:08 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:08.433 286362 INFO neutron.agent.ovn.metadata.agent [-] Cleaning up ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c namespace which is not needed anymore
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.448 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:08 compute-0 systemd[1]: machine-qemu\x2d10\x2dinstance\x2d0000000a.scope: Deactivated successfully.
Oct 11 02:41:08 compute-0 systemd[1]: machine-qemu\x2d10\x2dinstance\x2d0000000a.scope: Consumed 44.678s CPU time.
Oct 11 02:41:08 compute-0 systemd-machined[137586]: Machine qemu-10-instance-0000000a terminated.
Oct 11 02:41:08 compute-0 neutron-haproxy-ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c[454166]: [NOTICE]   (454170) : haproxy version is 2.8.14-c23fe91
Oct 11 02:41:08 compute-0 neutron-haproxy-ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c[454166]: [NOTICE]   (454170) : path to executable is /usr/sbin/haproxy
Oct 11 02:41:08 compute-0 neutron-haproxy-ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c[454166]: [WARNING]  (454170) : Exiting Master process...
Oct 11 02:41:08 compute-0 neutron-haproxy-ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c[454166]: [ALERT]    (454170) : Current worker (454172) exited with code 143 (Terminated)
Oct 11 02:41:08 compute-0 neutron-haproxy-ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c[454166]: [WARNING]  (454170) : All workers exited. Exiting... (0)
Oct 11 02:41:08 compute-0 systemd[1]: libpod-4286e5f3292bbeb655ac3c1bc515f36eda23a9dac6671a880e42a0335a35c9f3.scope: Deactivated successfully.
Oct 11 02:41:08 compute-0 podman[456858]: 2025-10-11 02:41:08.759856394 +0000 UTC m=+0.123583525 container died 4286e5f3292bbeb655ac3c1bc515f36eda23a9dac6671a880e42a0335a35c9f3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.760 2 INFO nova.virt.libvirt.driver [-] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Instance destroyed successfully.
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.761 2 DEBUG nova.objects.instance [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Lazy-loading 'resources' on Instance uuid 5279e85f-e35b-4ddd-8336-7f483712f743 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.767 2 DEBUG nova.compute.manager [req-a3a4379b-76a0-4faf-8bb1-6fc75999e75e req-68c44fe2-085d-4110-ab5d-6c45516ea474 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Received event network-vif-unplugged-d690bf02-80b8-4bb8-808f-ccc93f22c545 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.768 2 DEBUG oslo_concurrency.lockutils [req-a3a4379b-76a0-4faf-8bb1-6fc75999e75e req-68c44fe2-085d-4110-ab5d-6c45516ea474 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "5279e85f-e35b-4ddd-8336-7f483712f743-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.769 2 DEBUG oslo_concurrency.lockutils [req-a3a4379b-76a0-4faf-8bb1-6fc75999e75e req-68c44fe2-085d-4110-ab5d-6c45516ea474 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "5279e85f-e35b-4ddd-8336-7f483712f743-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.769 2 DEBUG oslo_concurrency.lockutils [req-a3a4379b-76a0-4faf-8bb1-6fc75999e75e req-68c44fe2-085d-4110-ab5d-6c45516ea474 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "5279e85f-e35b-4ddd-8336-7f483712f743-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.770 2 DEBUG nova.compute.manager [req-a3a4379b-76a0-4faf-8bb1-6fc75999e75e req-68c44fe2-085d-4110-ab5d-6c45516ea474 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] No waiting events found dispatching network-vif-unplugged-d690bf02-80b8-4bb8-808f-ccc93f22c545 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.770 2 DEBUG nova.compute.manager [req-a3a4379b-76a0-4faf-8bb1-6fc75999e75e req-68c44fe2-085d-4110-ab5d-6c45516ea474 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Received event network-vif-unplugged-d690bf02-80b8-4bb8-808f-ccc93f22c545 for instance with task_state deleting. _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10826
Oct 11 02:41:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1906: 321 pgs: 321 active+clean; 277 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 5.6 KiB/s rd, 1023 B/s wr, 3 op/s
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.798 2 DEBUG nova.virt.libvirt.vif [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='True',created_at=2025-10-11T02:39:42Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=<?>,disable_terminate=False,display_description='tempest-TestServerBasicOps-server-1008904616',display_name='tempest-TestServerBasicOps-server-1008904616',ec2_ids=<?>,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-testserverbasicops-server-1008904616',id=10,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBOjuzQ3LnonURCqkkZftPTP2CVQZ094fsP/gjb0J12Q/oIXYRgm8hjl34CsIRDDXbEoycnqFNSnY3e5ccNwSkdNNg8FJr2lU+/xGbcHEKbP5d2E6j/rpPqS548dzP5mkuw==',key_name='tempest-TestServerBasicOps-125864562',keypairs=<?>,launch_index=0,launched_at=2025-10-11T02:39:55Z,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={meta1='data1',meta2='data2',metaN='dataN'},migration_context=<?>,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=<?>,power_state=1,progress=0,project_id='4baea94e1c7d43e699eaac33512a8105',ramdisk_id='',reservation_id='r-u1w43ywp',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_cdrom_bus='sata',image_hw_disk_bus='virtio',image_hw_input_bus='usb',image_hw_machine_type='q35',image_hw_pointer_model='usbtablet',image_hw_rng_model='virtio',image_hw_video_model='virtio',image_hw_vif_model='virtio',image_min_disk='1',image_min_ram='0',owner_project_name='tempest-TestServerBasicOps-633159161',owner_user_name='tempest-TestServerBasicOps-633159161-project-member',password_0='testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest',password_1='',password_2='',password_3=''},tags=<?>,task_state='deleting',terminated_at=None,trusted_certs=<?>,updated_at=2025-10-11T02:41:05Z,user_data='IyEvYmluL3NoCmVjaG8gIlByaW50aW5nIGNpcnJvcyB1c2VyIGF1dGhvcml6ZWQga2V5cyIKY2F0IH5jaXJyb3MvLnNzaC9hdXRob3JpemVkX2tleXMgfHwgdHJ1ZQo=',user_id='7c06b99eac5242ddb9501f51d87567d2',uuid=5279e85f-e35b-4ddd-8336-7f483712f743,vcpu_model=<?>,vcpus=1,vm_mode=None,vm_state='active') vif={"id": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "address": "fa:16:3e:1e:a3:c2", "network": {"id": "42802124-ba47-4b6e-aa91-ecf257e5a54c", "bridge": "br-int", "label": "tempest-TestServerBasicOps-246725932-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.11", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.226", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "4baea94e1c7d43e699eaac33512a8105", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd690bf02-80", "ovs_interfaceid": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} unplug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:828
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.798 2 DEBUG nova.network.os_vif_util [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Converting VIF {"id": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "address": "fa:16:3e:1e:a3:c2", "network": {"id": "42802124-ba47-4b6e-aa91-ecf257e5a54c", "bridge": "br-int", "label": "tempest-TestServerBasicOps-246725932-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.11", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.226", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "4baea94e1c7d43e699eaac33512a8105", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd690bf02-80", "ovs_interfaceid": "d690bf02-80b8-4bb8-808f-ccc93f22c545", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.799 2 DEBUG nova.network.os_vif_util [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Converted object VIFOpenVSwitch(active=True,address=fa:16:3e:1e:a3:c2,bridge_name='br-int',has_traffic_filtering=True,id=d690bf02-80b8-4bb8-808f-ccc93f22c545,network=Network(42802124-ba47-4b6e-aa91-ecf257e5a54c),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd690bf02-80') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.800 2 DEBUG os_vif [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Unplugging vif VIFOpenVSwitch(active=True,address=fa:16:3e:1e:a3:c2,bridge_name='br-int',has_traffic_filtering=True,id=d690bf02-80b8-4bb8-808f-ccc93f22c545,network=Network(42802124-ba47-4b6e-aa91-ecf257e5a54c),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd690bf02-80') unplug /usr/lib/python3.9/site-packages/os_vif/__init__.py:109
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.804 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.806 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapd690bf02-80, bridge=br-int, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.808 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.811 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:08 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-4286e5f3292bbeb655ac3c1bc515f36eda23a9dac6671a880e42a0335a35c9f3-userdata-shm.mount: Deactivated successfully.
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.819 2 INFO os_vif [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Successfully unplugged vif VIFOpenVSwitch(active=True,address=fa:16:3e:1e:a3:c2,bridge_name='br-int',has_traffic_filtering=True,id=d690bf02-80b8-4bb8-808f-ccc93f22c545,network=Network(42802124-ba47-4b6e-aa91-ecf257e5a54c),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd690bf02-80')
Oct 11 02:41:08 compute-0 systemd[1]: var-lib-containers-storage-overlay-356334884cd4355d05eb94a623c582f2a6197aa2e36e3e9a4b41331a632e46e6-merged.mount: Deactivated successfully.
Oct 11 02:41:08 compute-0 podman[456858]: 2025-10-11 02:41:08.837140639 +0000 UTC m=+0.200867730 container cleanup 4286e5f3292bbeb655ac3c1bc515f36eda23a9dac6671a880e42a0335a35c9f3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3)
Oct 11 02:41:08 compute-0 systemd[1]: libpod-conmon-4286e5f3292bbeb655ac3c1bc515f36eda23a9dac6671a880e42a0335a35c9f3.scope: Deactivated successfully.
Oct 11 02:41:08 compute-0 ceph-mon[191930]: pgmap v1906: 321 pgs: 321 active+clean; 277 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 5.6 KiB/s rd, 1023 B/s wr, 3 op/s
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:41:08 compute-0 podman[456915]: 2025-10-11 02:41:08.951096957 +0000 UTC m=+0.073645345 container remove 4286e5f3292bbeb655ac3c1bc515f36eda23a9dac6671a880e42a0335a35c9f3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:41:08 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:08.964 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[f45f1d8a-b503-4ae9-8ce7-51b6688908fa]: (4, ('Sat Oct 11 02:41:08 AM UTC 2025 Stopping container neutron-haproxy-ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c (4286e5f3292bbeb655ac3c1bc515f36eda23a9dac6671a880e42a0335a35c9f3)\n4286e5f3292bbeb655ac3c1bc515f36eda23a9dac6671a880e42a0335a35c9f3\nSat Oct 11 02:41:08 AM UTC 2025 Deleting container neutron-haproxy-ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c (4286e5f3292bbeb655ac3c1bc515f36eda23a9dac6671a880e42a0335a35c9f3)\n4286e5f3292bbeb655ac3c1bc515f36eda23a9dac6671a880e42a0335a35c9f3\n', '', 0)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:08 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:08.969 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[96b21a22-6b58-41ba-8bc5-2f836026bb9b]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:08 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:08.971 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tap42802124-b0, bridge=None, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.974 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:08 compute-0 kernel: tap42802124-b0: left promiscuous mode
Oct 11 02:41:08 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:08.983 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[66e025d4-2e22-4303-a00e-a6c7ffcd58fb]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:08 compute-0 nova_compute[356901]: 2025-10-11 02:41:08.984 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:09 compute-0 nova_compute[356901]: 2025-10-11 02:41:09.010 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:09 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:09.013 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[35d78e7d-c8b3-4616-a59a-cc950cf81509]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:09 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:09.017 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[04075d5a-7780-4313-a00f-3cb7607567b7]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:09 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:09.040 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[667a5afd-d12d-40f4-9562-1be290968209]: (4, [{'family': 0, '__align': (), 'ifi_type': 772, 'index': 1, 'flags': 65609, 'change': 0, 'attrs': [['IFLA_IFNAME', 'lo'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UNKNOWN'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 65536], ['IFLA_MIN_MTU', 0], ['IFLA_MAX_MTU', 0], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 1], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 1], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 0], ['IFLA_CARRIER_UP_COUNT', 0], ['IFLA_CARRIER_DOWN_COUNT', 0], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', '00:00:00:00:00:00'], ['IFLA_BROADCAST', '00:00:00:00:00:00'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 1, 'nopolicy': 1, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 674164, 'reachable_time': 42331, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 65536, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 4294967295, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 4294967295, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 0, 'inoctets': 0, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 0, 'outoctets': 0, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 0, 'outmcastpkts': 0, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 0, 'outmcastoctets': 0, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 0, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 0, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1404, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 456932, 'error': None, 'target': 'ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:09 compute-0 systemd[1]: run-netns-ovnmeta\x2d42802124\x2dba47\x2d4b6e\x2daa91\x2decf257e5a54c.mount: Deactivated successfully.
Oct 11 02:41:09 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:09.045 286647 DEBUG neutron.privileged.agent.linux.ip_lib [-] Namespace ovnmeta-42802124-ba47-4b6e-aa91-ecf257e5a54c deleted. remove_netns /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:607
Oct 11 02:41:09 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:09.046 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[32ebded1-76bf-44e8-910b-58c1526f185b]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:41:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3600.0 total, 600.0 interval
                                            Cumulative writes: 8663 writes, 39K keys, 8663 commit groups, 1.0 writes per commit group, ingest: 0.05 GB, 0.01 MB/s
                                            Cumulative WAL: 8663 writes, 8663 syncs, 1.00 writes per sync, written: 0.05 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 1371 writes, 6682 keys, 1371 commit groups, 1.0 writes per commit group, ingest: 8.77 MB, 0.01 MB/s
                                            Interval WAL: 1371 writes, 1371 syncs, 1.00 writes per sync, written: 0.01 GB, 0.01 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
                                            
                                            ** Compaction Stats [default] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   1.0      0.0     85.3      0.56              0.21        26    0.022       0      0       0.0       0.0
                                              L6      1/0    6.60 MB   0.0      0.2     0.0      0.2       0.2      0.0       0.0   3.9    134.0    109.6      1.70              0.84        25    0.068    128K    13K       0.0       0.0
                                             Sum      1/0    6.60 MB   0.0      0.2     0.0      0.2       0.2      0.1       0.0   4.9    100.7    103.6      2.26              1.05        51    0.044    128K    13K       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.1     0.0      0.1       0.1      0.0       0.0   7.4     70.2     68.3      0.96              0.29        14    0.069     42K   3569       0.0       0.0
                                            
                                            ** Compaction Stats [default] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Low      0/0    0.00 KB   0.0      0.2     0.0      0.2       0.2      0.0       0.0   0.0    134.0    109.6      1.70              0.84        25    0.068    128K    13K       0.0       0.0
                                            High      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0     85.8      0.56              0.21        25    0.022       0      0       0.0       0.0
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0     12.0      0.00              0.00         1    0.004       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 3600.0 total, 600.0 interval
                                            Flush(GB): cumulative 0.047, interval 0.009
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.23 GB write, 0.07 MB/s write, 0.22 GB read, 0.06 MB/s read, 2.3 seconds
                                            Interval compaction: 0.06 GB write, 0.11 MB/s write, 0.07 GB read, 0.11 MB/s read, 1.0 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x55816e47f1f0#2 capacity: 304.00 MB usage: 27.45 MB table_size: 0 occupancy: 18446744073709551615 collections: 7 last_copies: 0 last_secs: 0.000238 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(1757,26.49 MB,8.71234%) FilterBlock(52,363.98 KB,0.116925%) IndexBlock(52,623.80 KB,0.200387%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [default] **
Oct 11 02:41:09 compute-0 nova_compute[356901]: 2025-10-11 02:41:09.534 2 INFO nova.virt.libvirt.driver [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Deleting instance files /var/lib/nova/instances/5279e85f-e35b-4ddd-8336-7f483712f743_del
Oct 11 02:41:09 compute-0 nova_compute[356901]: 2025-10-11 02:41:09.535 2 INFO nova.virt.libvirt.driver [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Deletion of /var/lib/nova/instances/5279e85f-e35b-4ddd-8336-7f483712f743_del complete
Oct 11 02:41:09 compute-0 nova_compute[356901]: 2025-10-11 02:41:09.603 2 INFO nova.compute.manager [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Took 1.30 seconds to destroy the instance on the hypervisor.
Oct 11 02:41:09 compute-0 nova_compute[356901]: 2025-10-11 02:41:09.604 2 DEBUG oslo.service.loopingcall [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Waiting for function nova.compute.manager.ComputeManager._try_deallocate_network.<locals>._deallocate_network_with_retries to return. func /usr/lib/python3.9/site-packages/oslo_service/loopingcall.py:435
Oct 11 02:41:09 compute-0 nova_compute[356901]: 2025-10-11 02:41:09.604 2 DEBUG nova.compute.manager [-] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Deallocating network for instance _deallocate_network /usr/lib/python3.9/site-packages/nova/compute/manager.py:2259
Oct 11 02:41:09 compute-0 nova_compute[356901]: 2025-10-11 02:41:09.605 2 DEBUG nova.network.neutron [-] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] deallocate_for_instance() deallocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1803
Oct 11 02:41:09 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:09.790 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '13'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:41:09 compute-0 nova_compute[356901]: 2025-10-11 02:41:09.891 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:41:09 compute-0 nova_compute[356901]: 2025-10-11 02:41:09.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:41:09 compute-0 nova_compute[356901]: 2025-10-11 02:41:09.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:41:10 compute-0 nova_compute[356901]: 2025-10-11 02:41:10.090 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:10 compute-0 nova_compute[356901]: 2025-10-11 02:41:10.108 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:41:10 compute-0 nova_compute[356901]: 2025-10-11 02:41:10.108 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:41:10 compute-0 nova_compute[356901]: 2025-10-11 02:41:10.109 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:41:10 compute-0 nova_compute[356901]: 2025-10-11 02:41:10.445 2 DEBUG nova.network.neutron [-] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Updating instance_info_cache with network_info: [] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:41:10 compute-0 nova_compute[356901]: 2025-10-11 02:41:10.470 2 INFO nova.compute.manager [-] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Took 0.87 seconds to deallocate network for instance.
Oct 11 02:41:10 compute-0 nova_compute[356901]: 2025-10-11 02:41:10.537 2 DEBUG oslo_concurrency.lockutils [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.update_usage" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:41:10 compute-0 nova_compute[356901]: 2025-10-11 02:41:10.538 2 DEBUG oslo_concurrency.lockutils [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:41:10 compute-0 nova_compute[356901]: 2025-10-11 02:41:10.589 2 DEBUG nova.compute.manager [req-aaad420b-8831-4e98-86cb-b3802ba883a1 req-5e670701-ba4b-494f-ad90-6aefb576570e 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Received event network-vif-deleted-d690bf02-80b8-4bb8-808f-ccc93f22c545 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:41:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1907: 321 pgs: 321 active+clean; 277 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 6.5 KiB/s rd, 7.4 KiB/s wr, 4 op/s
Oct 11 02:41:10 compute-0 nova_compute[356901]: 2025-10-11 02:41:10.891 2 DEBUG nova.compute.manager [req-84f6ee79-baa0-47dc-9bf0-81f8df15577f req-64b36863-1a10-4b95-9b11-cf9cc8a182e8 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Received event network-vif-plugged-d690bf02-80b8-4bb8-808f-ccc93f22c545 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:41:10 compute-0 nova_compute[356901]: 2025-10-11 02:41:10.892 2 DEBUG oslo_concurrency.lockutils [req-84f6ee79-baa0-47dc-9bf0-81f8df15577f req-64b36863-1a10-4b95-9b11-cf9cc8a182e8 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "5279e85f-e35b-4ddd-8336-7f483712f743-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:41:10 compute-0 nova_compute[356901]: 2025-10-11 02:41:10.893 2 DEBUG oslo_concurrency.lockutils [req-84f6ee79-baa0-47dc-9bf0-81f8df15577f req-64b36863-1a10-4b95-9b11-cf9cc8a182e8 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "5279e85f-e35b-4ddd-8336-7f483712f743-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:41:10 compute-0 nova_compute[356901]: 2025-10-11 02:41:10.894 2 DEBUG oslo_concurrency.lockutils [req-84f6ee79-baa0-47dc-9bf0-81f8df15577f req-64b36863-1a10-4b95-9b11-cf9cc8a182e8 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "5279e85f-e35b-4ddd-8336-7f483712f743-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:41:10 compute-0 nova_compute[356901]: 2025-10-11 02:41:10.894 2 DEBUG nova.compute.manager [req-84f6ee79-baa0-47dc-9bf0-81f8df15577f req-64b36863-1a10-4b95-9b11-cf9cc8a182e8 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] No waiting events found dispatching network-vif-plugged-d690bf02-80b8-4bb8-808f-ccc93f22c545 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:41:10 compute-0 nova_compute[356901]: 2025-10-11 02:41:10.896 2 WARNING nova.compute.manager [req-84f6ee79-baa0-47dc-9bf0-81f8df15577f req-64b36863-1a10-4b95-9b11-cf9cc8a182e8 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Received unexpected event network-vif-plugged-d690bf02-80b8-4bb8-808f-ccc93f22c545 for instance with vm_state deleted and task_state None.
Oct 11 02:41:10 compute-0 ceph-mon[191930]: pgmap v1907: 321 pgs: 321 active+clean; 277 MiB data, 398 MiB used, 60 GiB / 60 GiB avail; 6.5 KiB/s rd, 7.4 KiB/s wr, 4 op/s
Oct 11 02:41:10 compute-0 nova_compute[356901]: 2025-10-11 02:41:10.973 2 DEBUG oslo_concurrency.processutils [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:41:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:41:11 compute-0 nova_compute[356901]: 2025-10-11 02:41:11.325 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:41:11 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3829424127' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:41:11 compute-0 nova_compute[356901]: 2025-10-11 02:41:11.513 2 DEBUG oslo_concurrency.processutils [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.540s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:41:11 compute-0 nova_compute[356901]: 2025-10-11 02:41:11.523 2 DEBUG nova.compute.provider_tree [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:41:11 compute-0 nova_compute[356901]: 2025-10-11 02:41:11.539 2 DEBUG nova.scheduler.client.report [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:41:11 compute-0 nova_compute[356901]: 2025-10-11 02:41:11.560 2 DEBUG oslo_concurrency.lockutils [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: held 1.022s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:41:11 compute-0 nova_compute[356901]: 2025-10-11 02:41:11.602 2 INFO nova.scheduler.client.report [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Deleted allocations for instance 5279e85f-e35b-4ddd-8336-7f483712f743
Oct 11 02:41:11 compute-0 nova_compute[356901]: 2025-10-11 02:41:11.682 2 DEBUG oslo_concurrency.lockutils [None req-346d0475-491f-44dd-89aa-e5f441e7056b 7c06b99eac5242ddb9501f51d87567d2 4baea94e1c7d43e699eaac33512a8105 - - default default] Lock "5279e85f-e35b-4ddd-8336-7f483712f743" "released" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: held 3.387s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:41:11 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3829424127' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:41:12 compute-0 nova_compute[356901]: 2025-10-11 02:41:12.085 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Updating instance_info_cache with network_info: [{"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.187", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:41:12 compute-0 nova_compute[356901]: 2025-10-11 02:41:12.103 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:41:12 compute-0 nova_compute[356901]: 2025-10-11 02:41:12.103 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:41:12 compute-0 nova_compute[356901]: 2025-10-11 02:41:12.103 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:41:12 compute-0 nova_compute[356901]: 2025-10-11 02:41:12.104 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:41:12 compute-0 podman[456956]: 2025-10-11 02:41:12.212530825 +0000 UTC m=+0.098254116 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_id=edpm, org.label-schema.schema-version=1.0)
Oct 11 02:41:12 compute-0 podman[456958]: 2025-10-11 02:41:12.227438977 +0000 UTC m=+0.101086544 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:41:12 compute-0 podman[456957]: 2025-10-11 02:41:12.247034168 +0000 UTC m=+0.130185078 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, distribution-scope=public, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.buildah.version=1.33.7, io.openshift.expose-services=, maintainer=Red Hat, Inc., release=1755695350, version=9.6, com.redhat.component=ubi9-minimal-container, vcs-type=git, vendor=Red Hat, Inc., architecture=x86_64, name=ubi9-minimal, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, container_name=openstack_network_exporter, url=https://catalog.redhat.com/en/search?searchType=containers, build-date=2025-08-20T13:12:41, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., managed_by=edpm_ansible)
Oct 11 02:41:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1908: 321 pgs: 321 active+clean; 247 MiB data, 380 MiB used, 60 GiB / 60 GiB avail; 8.5 KiB/s rd, 6.9 KiB/s wr, 6 op/s
Oct 11 02:41:12 compute-0 nova_compute[356901]: 2025-10-11 02:41:12.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:41:12 compute-0 ceph-mon[191930]: pgmap v1908: 321 pgs: 321 active+clean; 247 MiB data, 380 MiB used, 60 GiB / 60 GiB avail; 8.5 KiB/s rd, 6.9 KiB/s wr, 6 op/s
Oct 11 02:41:13 compute-0 sudo[457017]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:41:13 compute-0 sudo[457017]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:41:13 compute-0 sudo[457017]: pam_unix(sudo:session): session closed for user root
Oct 11 02:41:13 compute-0 nova_compute[356901]: 2025-10-11 02:41:13.809 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.868 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.869 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.869 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.870 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.878 14 DEBUG ceilometer.compute.discovery [-] Querying metadata for instance f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 from Nova API get_server /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:176
Oct 11 02:41:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:13.880 14 DEBUG novaclient.v2.client [-] REQ: curl -g -i -X GET https://nova-internal.openstack.svc:8774/v2.1/servers/f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 -H "Accept: application/json" -H "User-Agent: python-novaclient" -H "X-Auth-Token: {SHA256}d674387017edb5d8543811c363b3a2965950a94ddf4462840fede0e79ac258e9" -H "X-OpenStack-Nova-API-Version: 2.1" _http_log_request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:572
Oct 11 02:41:13 compute-0 nova_compute[356901]: 2025-10-11 02:41:13.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:41:13 compute-0 sudo[457042]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:41:13 compute-0 sudo[457042]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:41:13 compute-0 sudo[457042]: pam_unix(sudo:session): session closed for user root
Oct 11 02:41:13 compute-0 nova_compute[356901]: 2025-10-11 02:41:13.924 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:41:13 compute-0 nova_compute[356901]: 2025-10-11 02:41:13.925 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:41:13 compute-0 nova_compute[356901]: 2025-10-11 02:41:13.926 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:41:13 compute-0 nova_compute[356901]: 2025-10-11 02:41:13.926 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:41:13 compute-0 nova_compute[356901]: 2025-10-11 02:41:13.926 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:41:13 compute-0 sudo[457068]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:41:13 compute-0 sudo[457068]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:41:13 compute-0 sudo[457068]: pam_unix(sudo:session): session closed for user root
Oct 11 02:41:14 compute-0 sudo[457094]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:41:14 compute-0 sudo[457094]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:41:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:41:14 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/54989341' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:41:14 compute-0 nova_compute[356901]: 2025-10-11 02:41:14.430 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.504s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:41:14 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/54989341' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:41:14 compute-0 nova_compute[356901]: 2025-10-11 02:41:14.642 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000006 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:41:14 compute-0 nova_compute[356901]: 2025-10-11 02:41:14.643 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000006 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:41:14 compute-0 nova_compute[356901]: 2025-10-11 02:41:14.675 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:41:14 compute-0 nova_compute[356901]: 2025-10-11 02:41:14.676 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:41:14 compute-0 nova_compute[356901]: 2025-10-11 02:41:14.676 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:41:14 compute-0 sudo[457094]: pam_unix(sudo:session): session closed for user root
Oct 11 02:41:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:41:14 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:41:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:41:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:41:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:41:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1909: 321 pgs: 321 active+clean; 198 MiB data, 358 MiB used, 60 GiB / 60 GiB avail; 26 KiB/s rd, 7.3 KiB/s wr, 31 op/s
Oct 11 02:41:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:41:14 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 5a9017ef-b9ba-48df-a9e6-1c9ec04fb77e does not exist
Oct 11 02:41:14 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev fdf00332-7b52-45d3-8ece-b948714791d8 does not exist
Oct 11 02:41:14 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev cb79b810-31c4-437e-aed5-90fd61146846 does not exist
Oct 11 02:41:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:41:14 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:41:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:41:14 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:41:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:41:14 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:41:14 compute-0 sudo[457171]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:41:14 compute-0 sudo[457171]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:41:14 compute-0 sudo[457171]: pam_unix(sudo:session): session closed for user root
Oct 11 02:41:15 compute-0 sudo[457196]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:41:15 compute-0 sudo[457196]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:41:15 compute-0 sudo[457196]: pam_unix(sudo:session): session closed for user root
Oct 11 02:41:15 compute-0 nova_compute[356901]: 2025-10-11 02:41:15.092 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:15 compute-0 sudo[457221]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:41:15 compute-0 sudo[457221]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:41:15 compute-0 sudo[457221]: pam_unix(sudo:session): session closed for user root
Oct 11 02:41:15 compute-0 nova_compute[356901]: 2025-10-11 02:41:15.188 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:41:15 compute-0 nova_compute[356901]: 2025-10-11 02:41:15.190 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3519MB free_disk=59.88142776489258GB free_vcpus=6 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:41:15 compute-0 nova_compute[356901]: 2025-10-11 02:41:15.190 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:41:15 compute-0 nova_compute[356901]: 2025-10-11 02:41:15.191 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:41:15 compute-0 sudo[457246]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:41:15 compute-0 sudo[457246]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:41:15 compute-0 nova_compute[356901]: 2025-10-11 02:41:15.287 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:41:15 compute-0 nova_compute[356901]: 2025-10-11 02:41:15.288 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:41:15 compute-0 nova_compute[356901]: 2025-10-11 02:41:15.289 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 2 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:41:15 compute-0 nova_compute[356901]: 2025-10-11 02:41:15.289 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1152MB phys_disk=59GB used_disk=3GB total_vcpus=8 used_vcpus=2 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:41:15 compute-0 nova_compute[356901]: 2025-10-11 02:41:15.356 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:41:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:41:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:41:15 compute-0 ceph-mon[191930]: pgmap v1909: 321 pgs: 321 active+clean; 198 MiB data, 358 MiB used, 60 GiB / 60 GiB avail; 26 KiB/s rd, 7.3 KiB/s wr, 31 op/s
Oct 11 02:41:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:41:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:41:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:41:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:41:15 compute-0 podman[457327]: 2025-10-11 02:41:15.76775736 +0000 UTC m=+0.079671882 container create 0824eba734cc19e03f4f44be4c6aa4cfe7ef2d3266bb5e24701a29cf069112bc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_bassi, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:41:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:41:15 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2926609210' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:41:15 compute-0 systemd[1]: Started libpod-conmon-0824eba734cc19e03f4f44be4c6aa4cfe7ef2d3266bb5e24701a29cf069112bc.scope.
Oct 11 02:41:15 compute-0 podman[457327]: 2025-10-11 02:41:15.739078148 +0000 UTC m=+0.050992700 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:41:15 compute-0 nova_compute[356901]: 2025-10-11 02:41:15.834 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.478s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:41:15 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:41:15 compute-0 nova_compute[356901]: 2025-10-11 02:41:15.849 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:41:15 compute-0 podman[457327]: 2025-10-11 02:41:15.870980922 +0000 UTC m=+0.182895444 container init 0824eba734cc19e03f4f44be4c6aa4cfe7ef2d3266bb5e24701a29cf069112bc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_bassi, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 02:41:15 compute-0 podman[457327]: 2025-10-11 02:41:15.883970511 +0000 UTC m=+0.195885003 container start 0824eba734cc19e03f4f44be4c6aa4cfe7ef2d3266bb5e24701a29cf069112bc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_bassi, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.887 14 DEBUG novaclient.v2.client [-] RESP: [200] Connection: Keep-Alive Content-Length: 1977 Content-Type: application/json Date: Sat, 11 Oct 2025 02:41:13 GMT Keep-Alive: timeout=5, max=100 OpenStack-API-Version: compute 2.1 Server: Apache Vary: OpenStack-API-Version,X-OpenStack-Nova-API-Version X-OpenStack-Nova-API-Version: 2.1 x-compute-request-id: req-b656a810-7b1e-42e0-8abe-d362e6870ffc x-openstack-request-id: req-b656a810-7b1e-42e0-8abe-d362e6870ffc _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:613
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.888 14 DEBUG novaclient.v2.client [-] RESP BODY: {"server": {"id": "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765", "name": "tempest-ServerActionsTestJSON-server-482072585", "status": "ACTIVE", "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "user_id": "11c81e88a90342bba2c2816e4e3cb191", "metadata": {}, "hostId": "18f379f693a88c1aa68ae28e837180b86d8df27b9dde70a80ce56664", "image": {"id": "72f37f2e-4296-450e-9a12-10717f4ac7dc", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/images/72f37f2e-4296-450e-9a12-10717f4ac7dc"}]}, "flavor": {"id": "6dff30d1-85df-4e9c-9163-a20ba47bb0c7", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/flavors/6dff30d1-85df-4e9c-9163-a20ba47bb0c7"}]}, "created": "2025-10-11T02:38:58Z", "updated": "2025-10-11T02:40:42Z", "addresses": {"tempest-ServerActionsTestJSON-976896854-network": [{"version": 4, "addr": "10.100.0.4", "OS-EXT-IPS:type": "fixed", "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:8d:b8:dd"}, {"version": 4, "addr": "192.168.122.187", "OS-EXT-IPS:type": "floating", "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:8d:b8:dd"}]}, "accessIPv4": "", "accessIPv6": "", "links": [{"rel": "self", "href": "https://nova-internal.openstack.svc:8774/v2.1/servers/f5eb6746-7f42-4fa4-8e26-cda5cfa0c765"}, {"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/servers/f5eb6746-7f42-4fa4-8e26-cda5cfa0c765"}], "OS-DCF:diskConfig": "MANUAL", "progress": 0, "OS-EXT-AZ:availability_zone": "nova", "config_drive": "True", "key_name": "tempest-keypair-177844218", "OS-SRV-USG:launched_at": "2025-10-11T02:39:27.000000", "OS-SRV-USG:terminated_at": null, "security_groups": [{"name": "tempest-securitygroup--276511033"}], "OS-EXT-SRV-ATTR:host": "compute-0.ctlplane.example.com", "OS-EXT-SRV-ATTR:instance_name": "instance-00000006", "OS-EXT-SRV-ATTR:hypervisor_hostname": "compute-0.ctlplane.example.com", "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-EXT-STS:power_state": 1, "os-extended-volumes:volumes_attached": []}} _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:648
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.888 14 DEBUG novaclient.v2.client [-] GET call to compute for https://nova-internal.openstack.svc:8774/v2.1/servers/f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 used request id req-b656a810-7b1e-42e0-8abe-d362e6870ffc request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:1073
Oct 11 02:41:15 compute-0 podman[457327]: 2025-10-11 02:41:15.888701061 +0000 UTC m=+0.200615583 container attach 0824eba734cc19e03f4f44be4c6aa4cfe7ef2d3266bb5e24701a29cf069112bc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_bassi, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.890 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': 'f5eb6746-7f42-4fa4-8e26-cda5cfa0c765', 'name': 'tempest-ServerActionsTestJSON-server-482072585', 'flavor': {'id': '6dff30d1-85df-4e9c-9163-a20ba47bb0c7', 'name': 'm1.nano', 'vcpus': 1, 'ram': 128, 'disk': 1, 'ephemeral': 0, 'swap': 0}, 'image': {'id': '72f37f2e-4296-450e-9a12-10717f4ac7dc'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000006', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': 'dba4f6e51d33430ebf5566af53f6fbcc', 'user_id': '11c81e88a90342bba2c2816e4e3cb191', 'hostId': '18f379f693a88c1aa68ae28e837180b86d8df27b9dde70a80ce56664', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:41:15 compute-0 quirky_bassi[457345]: 167 167
Oct 11 02:41:15 compute-0 systemd[1]: libpod-0824eba734cc19e03f4f44be4c6aa4cfe7ef2d3266bb5e24701a29cf069112bc.scope: Deactivated successfully.
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.894 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.894 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.894 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.894 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.894 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.896 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:41:15.894907) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.904 14 DEBUG ceilometer.compute.virt.libvirt.inspector [-] No delta meter predecessor for f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 / tapd7c4233c-f7 inspect_vnics /usr/lib/python3.12/site-packages/ceilometer/compute/virt/libvirt/inspector.py:143
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.904 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/network.incoming.bytes volume: 110 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.909 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2856 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.909 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.910 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.910 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.910 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.910 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.910 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.910 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/network.outgoing.packets volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.911 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 24 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.911 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:41:15.910705) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.911 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.912 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.912 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.912 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.912 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.912 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.913 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.913 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.913 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:41:15.912675) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.914 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.914 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.914 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.914 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.914 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.914 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.914 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.915 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.915 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.915 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.916 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:41:15.914657) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.916 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.916 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.916 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.916 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.916 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:41:15.916491) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.933 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.933 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.device.capacity volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:15 compute-0 nova_compute[356901]: 2025-10-11 02:41:15.936 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.953 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.953 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.953 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.954 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.954 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.954 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.954 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.955 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.955 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.955 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:41:15.955144) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:15 compute-0 podman[457350]: 2025-10-11 02:41:15.956299609 +0000 UTC m=+0.046954378 container died 0824eba734cc19e03f4f44be4c6aa4cfe7ef2d3266bb5e24701a29cf069112bc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_bassi, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef)
Oct 11 02:41:15 compute-0 nova_compute[356901]: 2025-10-11 02:41:15.964 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:41:15 compute-0 nova_compute[356901]: 2025-10-11 02:41:15.965 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.774s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.982 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.device.read.bytes volume: 23816192 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:15.983 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.device.read.bytes volume: 2048 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:15 compute-0 systemd[1]: var-lib-containers-storage-overlay-a473903dfd4dccab91f5382285daee2c12417bf53c4f320441c1c3f1f4cd8c84-merged.mount: Deactivated successfully.
Oct 11 02:41:16 compute-0 podman[457350]: 2025-10-11 02:41:16.016624846 +0000 UTC m=+0.107279615 container remove 0824eba734cc19e03f4f44be4c6aa4cfe7ef2d3266bb5e24701a29cf069112bc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_bassi, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS)
Oct 11 02:41:16 compute-0 systemd[1]: libpod-conmon-0824eba734cc19e03f4f44be4c6aa4cfe7ef2d3266bb5e24701a29cf069112bc.scope: Deactivated successfully.
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.028 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.041 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.042 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.042 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.043 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.043 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.043 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.043 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.045 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:41:16.044011) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.044 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.045 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.device.read.latency volume: 1167385612 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.045 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.device.read.latency volume: 1714896 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.046 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.046 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.046 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.047 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.047 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.047 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.047 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.047 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.048 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:41:16.047763) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.047 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.048 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.device.read.requests volume: 770 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.048 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.device.read.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.048 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.049 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.049 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.049 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.050 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.050 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.050 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.050 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.050 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:41:16.050494) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.050 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.051 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.052 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.device.usage volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.052 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.054 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.054 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.055 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.055 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.055 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.055 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.056 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.056 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.056 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.056 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.058 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:41:16.056204) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.057 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.059 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.059 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.060 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.060 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.060 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.061 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.061 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.061 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.062 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.062 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.062 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.062 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.063 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.064 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.064 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.065 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:41:16.061745) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.065 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.067 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.067 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.067 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.068 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:41:16.067344) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.093 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.115 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.116 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.116 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.116 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.116 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.116 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.116 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.116 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.117 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.117 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.117 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.117 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.118 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.118 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.118 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.118 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.118 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.118 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.118 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.118 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.119 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.119 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.119 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.rate in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.119 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.119 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:41:16.116827) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.119 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.119 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:41:16.118627) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.119 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.rate heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.120 14 DEBUG ceilometer.compute.pollsters [-] LibvirtInspector does not provide data for IncomingBytesRatePollster get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:162
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.120 14 ERROR ceilometer.polling.manager [-] Prevent pollster network.incoming.bytes.rate from polling [<NovaLikeServer: tempest-ServerActionsTestJSON-server-482072585>] on source pollsters anymore!: ceilometer.polling.plugin_base.PollsterPermanentError: [<NovaLikeServer: tempest-ServerActionsTestJSON-server-482072585>]
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.120 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.120 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.120 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.120 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.120 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.121 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.121 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.121 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.121 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.121 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.121 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.121 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/network.incoming.packets volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.122 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 33 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.122 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.122 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.122 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.122 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.122 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.122 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.122 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.123 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.123 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.123 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.123 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.123 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.123 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.124 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.124 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.124 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.124 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.124 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.124 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.124 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.125 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.125 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.125 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.rate (2025-10-11T02:41:16.119952) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.125 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:41:16.120902) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.125 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.125 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:41:16.121712) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.125 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:41:16.122789) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.125 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.125 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:41:16.124024) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.125 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.125 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:41:16.124826) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.125 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.126 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.126 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.126 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.126 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/disk.device.allocation volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.126 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:41:16.126056) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.126 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.127 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.127 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.127 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.127 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.127 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.127 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.127 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.128 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.128 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.128 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.128 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.128 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.129 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.129 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.129 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.129 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.129 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/cpu volume: 32090000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.129 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 53810000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.129 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.130 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.130 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:41:16.128007) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.130 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.130 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.130 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:41:16.129221) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.130 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.130 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.130 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/network.outgoing.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.130 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2412 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.131 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.131 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.131 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.131 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.132 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.131 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:41:16.130384) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.132 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.132 14 DEBUG ceilometer.compute.pollsters [-] f5eb6746-7f42-4fa4-8e26-cda5cfa0c765/memory.usage volume: 40.45703125 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.132 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.83984375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.132 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.132 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.132 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.rate in the context of pollsters
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.132 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.133 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.133 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.rate heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.133 14 DEBUG ceilometer.compute.pollsters [-] LibvirtInspector does not provide data for OutgoingBytesRatePollster get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:162
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.133 14 ERROR ceilometer.polling.manager [-] Prevent pollster network.outgoing.bytes.rate from polling [<NovaLikeServer: tempest-ServerActionsTestJSON-server-482072585>] on source pollsters anymore!: ceilometer.polling.plugin_base.PollsterPermanentError: [<NovaLikeServer: tempest-ServerActionsTestJSON-server-482072585>]
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.133 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:41:16.132070) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.133 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.rate (2025-10-11T02:41:16.133138) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.134 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.134 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.134 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.134 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.134 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.134 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.134 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.134 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.134 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.135 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.135 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.135 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.135 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.135 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.135 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.135 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.135 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.135 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.136 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.136 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.136 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.136 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.136 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.136 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.136 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:41:16.136 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:41:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:41:16 compute-0 podman[457371]: 2025-10-11 02:41:16.285675779 +0000 UTC m=+0.070895509 container create 25ec51299b980e0497110463c911baa3c19831b83334aee928bf7be38b0b05c2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_bhaskara, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=reef, io.buildah.version=1.39.3)
Oct 11 02:41:16 compute-0 podman[457371]: 2025-10-11 02:41:16.261310986 +0000 UTC m=+0.046530726 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:41:16 compute-0 systemd[1]: Started libpod-conmon-25ec51299b980e0497110463c911baa3c19831b83334aee928bf7be38b0b05c2.scope.
Oct 11 02:41:16 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:41:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ea13c9c04acf07207d1b25c856cba454846b8d59ab716da015f425c0a1f7dc18/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:41:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ea13c9c04acf07207d1b25c856cba454846b8d59ab716da015f425c0a1f7dc18/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:41:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ea13c9c04acf07207d1b25c856cba454846b8d59ab716da015f425c0a1f7dc18/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:41:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ea13c9c04acf07207d1b25c856cba454846b8d59ab716da015f425c0a1f7dc18/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:41:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ea13c9c04acf07207d1b25c856cba454846b8d59ab716da015f425c0a1f7dc18/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:41:16 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2926609210' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:41:16 compute-0 podman[457371]: 2025-10-11 02:41:16.463123071 +0000 UTC m=+0.248342851 container init 25ec51299b980e0497110463c911baa3c19831b83334aee928bf7be38b0b05c2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_bhaskara, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507)
Oct 11 02:41:16 compute-0 podman[457371]: 2025-10-11 02:41:16.488519584 +0000 UTC m=+0.273739324 container start 25ec51299b980e0497110463c911baa3c19831b83334aee928bf7be38b0b05c2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_bhaskara, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:41:16 compute-0 podman[457371]: 2025-10-11 02:41:16.494103198 +0000 UTC m=+0.279322938 container attach 25ec51299b980e0497110463c911baa3c19831b83334aee928bf7be38b0b05c2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_bhaskara, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:41:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1910: 321 pgs: 321 active+clean; 198 MiB data, 358 MiB used, 60 GiB / 60 GiB avail; 26 KiB/s rd, 7.2 KiB/s wr, 31 op/s
Oct 11 02:41:17 compute-0 ceph-mon[191930]: pgmap v1910: 321 pgs: 321 active+clean; 198 MiB data, 358 MiB used, 60 GiB / 60 GiB avail; 26 KiB/s rd, 7.2 KiB/s wr, 31 op/s
Oct 11 02:41:17 compute-0 recursing_bhaskara[457386]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:41:17 compute-0 recursing_bhaskara[457386]: --> relative data size: 1.0
Oct 11 02:41:17 compute-0 recursing_bhaskara[457386]: --> All data devices are unavailable
Oct 11 02:41:17 compute-0 systemd[1]: libpod-25ec51299b980e0497110463c911baa3c19831b83334aee928bf7be38b0b05c2.scope: Deactivated successfully.
Oct 11 02:41:17 compute-0 systemd[1]: libpod-25ec51299b980e0497110463c911baa3c19831b83334aee928bf7be38b0b05c2.scope: Consumed 1.203s CPU time.
Oct 11 02:41:17 compute-0 podman[457415]: 2025-10-11 02:41:17.910709427 +0000 UTC m=+0.062199690 container died 25ec51299b980e0497110463c911baa3c19831b83334aee928bf7be38b0b05c2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_bhaskara, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:41:17 compute-0 systemd[1]: var-lib-containers-storage-overlay-ea13c9c04acf07207d1b25c856cba454846b8d59ab716da015f425c0a1f7dc18-merged.mount: Deactivated successfully.
Oct 11 02:41:18 compute-0 podman[457415]: 2025-10-11 02:41:18.003367197 +0000 UTC m=+0.154857400 container remove 25ec51299b980e0497110463c911baa3c19831b83334aee928bf7be38b0b05c2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_bhaskara, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2)
Oct 11 02:41:18 compute-0 systemd[1]: libpod-conmon-25ec51299b980e0497110463c911baa3c19831b83334aee928bf7be38b0b05c2.scope: Deactivated successfully.
Oct 11 02:41:18 compute-0 ovn_controller[88370]: 2025-10-11T02:41:18Z|00018|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:8d:b8:dd 10.100.0.4
Oct 11 02:41:18 compute-0 sudo[457246]: pam_unix(sudo:session): session closed for user root
Oct 11 02:41:18 compute-0 sudo[457428]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:41:18 compute-0 sudo[457428]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:41:18 compute-0 sudo[457428]: pam_unix(sudo:session): session closed for user root
Oct 11 02:41:18 compute-0 sudo[457453]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:41:18 compute-0 sudo[457453]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:41:18 compute-0 sudo[457453]: pam_unix(sudo:session): session closed for user root
Oct 11 02:41:18 compute-0 sudo[457478]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:41:18 compute-0 sudo[457478]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:41:18 compute-0 sudo[457478]: pam_unix(sudo:session): session closed for user root
Oct 11 02:41:18 compute-0 sudo[457503]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:41:18 compute-0 sudo[457503]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:41:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1911: 321 pgs: 321 active+clean; 198 MiB data, 358 MiB used, 60 GiB / 60 GiB avail; 255 KiB/s rd, 7.2 KiB/s wr, 46 op/s
Oct 11 02:41:18 compute-0 nova_compute[356901]: 2025-10-11 02:41:18.815 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:18 compute-0 ceph-mon[191930]: pgmap v1911: 321 pgs: 321 active+clean; 198 MiB data, 358 MiB used, 60 GiB / 60 GiB avail; 255 KiB/s rd, 7.2 KiB/s wr, 46 op/s
Oct 11 02:41:19 compute-0 podman[457566]: 2025-10-11 02:41:19.033346963 +0000 UTC m=+0.071608270 container create 0a079fdda63c076dd3cf3462357ad176e237935a07b3b7319f95bfbe7a181cde (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_snyder, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True)
Oct 11 02:41:19 compute-0 podman[457566]: 2025-10-11 02:41:18.998551561 +0000 UTC m=+0.036812948 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:41:19 compute-0 systemd[1]: Started libpod-conmon-0a079fdda63c076dd3cf3462357ad176e237935a07b3b7319f95bfbe7a181cde.scope.
Oct 11 02:41:19 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:41:19 compute-0 podman[457566]: 2025-10-11 02:41:19.162403728 +0000 UTC m=+0.200665055 container init 0a079fdda63c076dd3cf3462357ad176e237935a07b3b7319f95bfbe7a181cde (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_snyder, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, ceph=True, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:41:19 compute-0 podman[457566]: 2025-10-11 02:41:19.176099907 +0000 UTC m=+0.214361204 container start 0a079fdda63c076dd3cf3462357ad176e237935a07b3b7319f95bfbe7a181cde (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_snyder, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_REF=reef, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:41:19 compute-0 podman[457566]: 2025-10-11 02:41:19.181536497 +0000 UTC m=+0.219797874 container attach 0a079fdda63c076dd3cf3462357ad176e237935a07b3b7319f95bfbe7a181cde (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_snyder, ceph=True, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:41:19 compute-0 adoring_snyder[457582]: 167 167
Oct 11 02:41:19 compute-0 systemd[1]: libpod-0a079fdda63c076dd3cf3462357ad176e237935a07b3b7319f95bfbe7a181cde.scope: Deactivated successfully.
Oct 11 02:41:19 compute-0 podman[457566]: 2025-10-11 02:41:19.190896225 +0000 UTC m=+0.229157512 container died 0a079fdda63c076dd3cf3462357ad176e237935a07b3b7319f95bfbe7a181cde (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_snyder, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:41:19 compute-0 systemd[1]: var-lib-containers-storage-overlay-c7b8d84b1ccf837cd30a1950acfc7f1896e13a11d8215cd705b7dcf02ce88a02-merged.mount: Deactivated successfully.
Oct 11 02:41:19 compute-0 podman[457566]: 2025-10-11 02:41:19.246494132 +0000 UTC m=+0.284755429 container remove 0a079fdda63c076dd3cf3462357ad176e237935a07b3b7319f95bfbe7a181cde (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_snyder, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:41:19 compute-0 systemd[1]: libpod-conmon-0a079fdda63c076dd3cf3462357ad176e237935a07b3b7319f95bfbe7a181cde.scope: Deactivated successfully.
Oct 11 02:41:19 compute-0 podman[457605]: 2025-10-11 02:41:19.535759754 +0000 UTC m=+0.091573962 container create 553e1814e6200c2dabbbbf8e659b35331659f7469b914d54684ed76aed6039bf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_wozniak, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:41:19 compute-0 podman[457605]: 2025-10-11 02:41:19.505908749 +0000 UTC m=+0.061722977 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:41:19 compute-0 systemd[1]: Started libpod-conmon-553e1814e6200c2dabbbbf8e659b35331659f7469b914d54684ed76aed6039bf.scope.
Oct 11 02:41:19 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:41:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5d36b7fbef76eb3938a285b52ca038e559841762a7ffe2a2984a921170b0e527/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:41:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5d36b7fbef76eb3938a285b52ca038e559841762a7ffe2a2984a921170b0e527/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:41:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5d36b7fbef76eb3938a285b52ca038e559841762a7ffe2a2984a921170b0e527/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:41:19 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5d36b7fbef76eb3938a285b52ca038e559841762a7ffe2a2984a921170b0e527/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:41:19 compute-0 podman[457605]: 2025-10-11 02:41:19.712531077 +0000 UTC m=+0.268345295 container init 553e1814e6200c2dabbbbf8e659b35331659f7469b914d54684ed76aed6039bf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_wozniak, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default)
Oct 11 02:41:19 compute-0 podman[457605]: 2025-10-11 02:41:19.727703407 +0000 UTC m=+0.283517595 container start 553e1814e6200c2dabbbbf8e659b35331659f7469b914d54684ed76aed6039bf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_wozniak, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:41:19 compute-0 podman[457605]: 2025-10-11 02:41:19.734001751 +0000 UTC m=+0.289815969 container attach 553e1814e6200c2dabbbbf8e659b35331659f7469b914d54684ed76aed6039bf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_wozniak, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:41:19 compute-0 podman[457622]: 2025-10-11 02:41:19.768396531 +0000 UTC m=+0.113668872 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, distribution-scope=public, summary=Provides the latest release of Red Hat Universal Base Image 9., managed_by=edpm_ansible, container_name=kepler, release-0.7.12=, architecture=x86_64, io.k8s.display-name=Red Hat Universal Base Image 9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, vendor=Red Hat, Inc., version=9.4, com.redhat.component=ubi9-container, io.buildah.version=1.29.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, io.openshift.expose-services=, maintainer=Red Hat, Inc., release=1214.1726694543, build-date=2024-09-18T21:23:30, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.openshift.tags=base rhel9)
Oct 11 02:41:19 compute-0 nova_compute[356901]: 2025-10-11 02:41:19.899 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_incomplete_migrations run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:41:19 compute-0 nova_compute[356901]: 2025-10-11 02:41:19.899 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances with incomplete migration  _cleanup_incomplete_migrations /usr/lib/python3.9/site-packages/nova/compute/manager.py:11183
Oct 11 02:41:20 compute-0 nova_compute[356901]: 2025-10-11 02:41:20.052 2 DEBUG oslo_concurrency.lockutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquiring lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:41:20 compute-0 nova_compute[356901]: 2025-10-11 02:41:20.054 2 DEBUG oslo_concurrency.lockutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d" acquired by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:41:20 compute-0 nova_compute[356901]: 2025-10-11 02:41:20.069 2 DEBUG nova.compute.manager [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Starting instance... _do_build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2402
Oct 11 02:41:20 compute-0 nova_compute[356901]: 2025-10-11 02:41:20.096 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:20 compute-0 nova_compute[356901]: 2025-10-11 02:41:20.145 2 DEBUG oslo_concurrency.lockutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:41:20 compute-0 nova_compute[356901]: 2025-10-11 02:41:20.146 2 DEBUG oslo_concurrency.lockutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:41:20 compute-0 nova_compute[356901]: 2025-10-11 02:41:20.157 2 DEBUG nova.virt.hardware [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Require both a host and instance NUMA topology to fit instance on host. numa_fit_instance_to_host /usr/lib/python3.9/site-packages/nova/virt/hardware.py:2368
Oct 11 02:41:20 compute-0 nova_compute[356901]: 2025-10-11 02:41:20.159 2 INFO nova.compute.claims [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Claim successful on node compute-0.ctlplane.example.com
Oct 11 02:41:20 compute-0 ovn_controller[88370]: 2025-10-11T02:41:20Z|00142|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:41:20 compute-0 ovn_controller[88370]: 2025-10-11T02:41:20Z|00143|binding|INFO|Releasing lport aa37c6ed-d2db-4ed4-b1c9-cfd071cfd96a from this chassis (sb_readonly=0)
Oct 11 02:41:20 compute-0 nova_compute[356901]: 2025-10-11 02:41:20.326 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:20 compute-0 nova_compute[356901]: 2025-10-11 02:41:20.329 2 DEBUG oslo_concurrency.processutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]: {
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:     "0": [
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:         {
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "devices": [
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "/dev/loop3"
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             ],
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "lv_name": "ceph_lv0",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "lv_size": "21470642176",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "name": "ceph_lv0",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "tags": {
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.cluster_name": "ceph",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.crush_device_class": "",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.encrypted": "0",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.osd_id": "0",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.type": "block",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.vdo": "0"
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             },
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "type": "block",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "vg_name": "ceph_vg0"
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:         }
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:     ],
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:     "1": [
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:         {
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "devices": [
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "/dev/loop4"
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             ],
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "lv_name": "ceph_lv1",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "lv_size": "21470642176",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "name": "ceph_lv1",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "tags": {
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.cluster_name": "ceph",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.crush_device_class": "",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.encrypted": "0",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.osd_id": "1",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.type": "block",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.vdo": "0"
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             },
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "type": "block",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "vg_name": "ceph_vg1"
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:         }
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:     ],
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:     "2": [
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:         {
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "devices": [
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "/dev/loop5"
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             ],
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "lv_name": "ceph_lv2",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "lv_size": "21470642176",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "name": "ceph_lv2",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "tags": {
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.cluster_name": "ceph",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.crush_device_class": "",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.encrypted": "0",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.osd_id": "2",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.type": "block",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:                 "ceph.vdo": "0"
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             },
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "type": "block",
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:             "vg_name": "ceph_vg2"
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:         }
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]:     ]
Oct 11 02:41:20 compute-0 mystifying_wozniak[457623]: }
Oct 11 02:41:20 compute-0 systemd[1]: libpod-553e1814e6200c2dabbbbf8e659b35331659f7469b914d54684ed76aed6039bf.scope: Deactivated successfully.
Oct 11 02:41:20 compute-0 podman[457670]: 2025-10-11 02:41:20.745181838 +0000 UTC m=+0.062095386 container died 553e1814e6200c2dabbbbf8e659b35331659f7469b914d54684ed76aed6039bf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_wozniak, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0)
Oct 11 02:41:20 compute-0 systemd[1]: var-lib-containers-storage-overlay-5d36b7fbef76eb3938a285b52ca038e559841762a7ffe2a2984a921170b0e527-merged.mount: Deactivated successfully.
Oct 11 02:41:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1912: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 321 KiB/s rd, 18 KiB/s wr, 51 op/s
Oct 11 02:41:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:41:20 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3293590786' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:41:20 compute-0 podman[457670]: 2025-10-11 02:41:20.833736665 +0000 UTC m=+0.150650193 container remove 553e1814e6200c2dabbbbf8e659b35331659f7469b914d54684ed76aed6039bf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_wozniak, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 02:41:20 compute-0 nova_compute[356901]: 2025-10-11 02:41:20.839 2 DEBUG oslo_concurrency.processutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.511s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:41:20 compute-0 systemd[1]: libpod-conmon-553e1814e6200c2dabbbbf8e659b35331659f7469b914d54684ed76aed6039bf.scope: Deactivated successfully.
Oct 11 02:41:20 compute-0 nova_compute[356901]: 2025-10-11 02:41:20.849 2 DEBUG nova.compute.provider_tree [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:41:20 compute-0 ceph-mon[191930]: pgmap v1912: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 321 KiB/s rd, 18 KiB/s wr, 51 op/s
Oct 11 02:41:20 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3293590786' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:41:20 compute-0 nova_compute[356901]: 2025-10-11 02:41:20.865 2 DEBUG nova.scheduler.client.report [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:41:20 compute-0 sudo[457503]: pam_unix(sudo:session): session closed for user root
Oct 11 02:41:20 compute-0 nova_compute[356901]: 2025-10-11 02:41:20.964 2 DEBUG oslo_concurrency.lockutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: held 0.819s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:41:20 compute-0 nova_compute[356901]: 2025-10-11 02:41:20.966 2 DEBUG nova.compute.manager [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Start building networks asynchronously for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2799
Oct 11 02:41:20 compute-0 sudo[457687]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:41:20 compute-0 sudo[457687]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:41:20 compute-0 sudo[457687]: pam_unix(sudo:session): session closed for user root
Oct 11 02:41:21 compute-0 sudo[457712]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:41:21 compute-0 sudo[457712]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:41:21 compute-0 sudo[457712]: pam_unix(sudo:session): session closed for user root
Oct 11 02:41:21 compute-0 nova_compute[356901]: 2025-10-11 02:41:21.106 2 DEBUG nova.compute.manager [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Allocating IP information in the background. _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1952
Oct 11 02:41:21 compute-0 nova_compute[356901]: 2025-10-11 02:41:21.107 2 DEBUG nova.network.neutron [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] allocate_for_instance() allocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1156
Oct 11 02:41:21 compute-0 sudo[457737]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:41:21 compute-0 sudo[457737]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:41:21 compute-0 sudo[457737]: pam_unix(sudo:session): session closed for user root
Oct 11 02:41:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:41:21 compute-0 sudo[457762]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:41:21 compute-0 sudo[457762]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:41:21 compute-0 nova_compute[356901]: 2025-10-11 02:41:21.308 2 INFO nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Ignoring supplied device name: /dev/vda. Libvirt can't honour user-supplied dev names
Oct 11 02:41:21 compute-0 nova_compute[356901]: 2025-10-11 02:41:21.328 2 DEBUG nova.compute.manager [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Start building block device mappings for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2834
Oct 11 02:41:21 compute-0 nova_compute[356901]: 2025-10-11 02:41:21.353 2 DEBUG nova.policy [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Policy check for network:attach_external_network failed with credentials {'is_admin': False, 'user_id': 'bcba1b18a2ad479587a15fe415ae307a', 'user_domain_id': 'default', 'system_scope': None, 'domain_id': None, 'project_id': '86dfc4ba5e494748b86bc9b983426779', 'project_domain_id': 'default', 'roles': ['member', 'reader'], 'is_admin_project': True, 'service_user_id': None, 'service_user_domain_id': None, 'service_project_id': None, 'service_project_domain_id': None, 'service_roles': []} authorize /usr/lib/python3.9/site-packages/nova/policy.py:203
Oct 11 02:41:21 compute-0 nova_compute[356901]: 2025-10-11 02:41:21.433 2 DEBUG nova.compute.manager [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Start spawning the instance on the hypervisor. _build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2608
Oct 11 02:41:21 compute-0 nova_compute[356901]: 2025-10-11 02:41:21.435 2 DEBUG nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Creating instance directory _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4723
Oct 11 02:41:21 compute-0 nova_compute[356901]: 2025-10-11 02:41:21.436 2 INFO nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Creating image(s)
Oct 11 02:41:21 compute-0 nova_compute[356901]: 2025-10-11 02:41:21.477 2 DEBUG nova.storage.rbd_utils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] rbd image 2a3deab0-7a22-486d-86a2-2fc870c8ab2d_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:41:21 compute-0 nova_compute[356901]: 2025-10-11 02:41:21.521 2 DEBUG nova.storage.rbd_utils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] rbd image 2a3deab0-7a22-486d-86a2-2fc870c8ab2d_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:41:21 compute-0 nova_compute[356901]: 2025-10-11 02:41:21.560 2 DEBUG nova.storage.rbd_utils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] rbd image 2a3deab0-7a22-486d-86a2-2fc870c8ab2d_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:41:21 compute-0 nova_compute[356901]: 2025-10-11 02:41:21.574 2 DEBUG oslo_concurrency.processutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:41:21 compute-0 nova_compute[356901]: 2025-10-11 02:41:21.659 2 DEBUG oslo_concurrency.processutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d --force-share --output=json" returned: 0 in 0.085s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:41:21 compute-0 nova_compute[356901]: 2025-10-11 02:41:21.660 2 DEBUG oslo_concurrency.lockutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquiring lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:41:21 compute-0 nova_compute[356901]: 2025-10-11 02:41:21.660 2 DEBUG oslo_concurrency.lockutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:41:21 compute-0 nova_compute[356901]: 2025-10-11 02:41:21.661 2 DEBUG oslo_concurrency.lockutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:41:21 compute-0 nova_compute[356901]: 2025-10-11 02:41:21.686 2 DEBUG nova.storage.rbd_utils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] rbd image 2a3deab0-7a22-486d-86a2-2fc870c8ab2d_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:41:21 compute-0 nova_compute[356901]: 2025-10-11 02:41:21.694 2 DEBUG oslo_concurrency.processutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d 2a3deab0-7a22-486d-86a2-2fc870c8ab2d_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:41:21 compute-0 podman[457900]: 2025-10-11 02:41:21.831797699 +0000 UTC m=+0.087311013 container create 0433a21de309759b303d05ded5f1df0ca410b5386b1d71aea46ffd716ce67e71 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_williamson, CEPH_REF=reef, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:41:21 compute-0 podman[457900]: 2025-10-11 02:41:21.798759496 +0000 UTC m=+0.054272850 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:41:21 compute-0 systemd[1]: Started libpod-conmon-0433a21de309759b303d05ded5f1df0ca410b5386b1d71aea46ffd716ce67e71.scope.
Oct 11 02:41:21 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:41:22 compute-0 podman[457900]: 2025-10-11 02:41:22.011313189 +0000 UTC m=+0.266826513 container init 0433a21de309759b303d05ded5f1df0ca410b5386b1d71aea46ffd716ce67e71 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_williamson, io.buildah.version=1.39.3, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:41:22 compute-0 podman[457900]: 2025-10-11 02:41:22.022402615 +0000 UTC m=+0.277915919 container start 0433a21de309759b303d05ded5f1df0ca410b5386b1d71aea46ffd716ce67e71 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_williamson, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.39.3)
Oct 11 02:41:22 compute-0 musing_williamson[457934]: 167 167
Oct 11 02:41:22 compute-0 podman[457900]: 2025-10-11 02:41:22.030493879 +0000 UTC m=+0.286007203 container attach 0433a21de309759b303d05ded5f1df0ca410b5386b1d71aea46ffd716ce67e71 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_williamson, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3)
Oct 11 02:41:22 compute-0 systemd[1]: libpod-0433a21de309759b303d05ded5f1df0ca410b5386b1d71aea46ffd716ce67e71.scope: Deactivated successfully.
Oct 11 02:41:22 compute-0 podman[457900]: 2025-10-11 02:41:22.032921506 +0000 UTC m=+0.288434810 container died 0433a21de309759b303d05ded5f1df0ca410b5386b1d71aea46ffd716ce67e71 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_williamson, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:41:22 compute-0 systemd[1]: var-lib-containers-storage-overlay-cac2daf253a1041e5fcd0ee7495649f7d99d5b4f1d4d5e85ae98819d45636323-merged.mount: Deactivated successfully.
Oct 11 02:41:22 compute-0 podman[457900]: 2025-10-11 02:41:22.082470025 +0000 UTC m=+0.337983349 container remove 0433a21de309759b303d05ded5f1df0ca410b5386b1d71aea46ffd716ce67e71 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_williamson, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:41:22 compute-0 systemd[1]: libpod-conmon-0433a21de309759b303d05ded5f1df0ca410b5386b1d71aea46ffd716ce67e71.scope: Deactivated successfully.
Oct 11 02:41:22 compute-0 nova_compute[356901]: 2025-10-11 02:41:22.106 2 DEBUG oslo_concurrency.processutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d 2a3deab0-7a22-486d-86a2-2fc870c8ab2d_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.412s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:41:22 compute-0 nova_compute[356901]: 2025-10-11 02:41:22.191 2 DEBUG nova.network.neutron [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Successfully created port: e332b5d8-f31d-4e8a-99d2-7cc96428d93a _create_port_minimal /usr/lib/python3.9/site-packages/nova/network/neutron.py:548
Oct 11 02:41:22 compute-0 nova_compute[356901]: 2025-10-11 02:41:22.205 2 DEBUG nova.storage.rbd_utils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] resizing rbd image 2a3deab0-7a22-486d-86a2-2fc870c8ab2d_disk to 1073741824 resize /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:288
Oct 11 02:41:22 compute-0 podman[458008]: 2025-10-11 02:41:22.323173015 +0000 UTC m=+0.070184330 container create af5612f81734c420dd16111b65884d589774010258e8522106952080c765ed1f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_chatterjee, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:41:22 compute-0 nova_compute[356901]: 2025-10-11 02:41:22.375 2 DEBUG nova.objects.instance [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lazy-loading 'migration_context' on Instance uuid 2a3deab0-7a22-486d-86a2-2fc870c8ab2d obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:41:22 compute-0 systemd[1]: Started libpod-conmon-af5612f81734c420dd16111b65884d589774010258e8522106952080c765ed1f.scope.
Oct 11 02:41:22 compute-0 podman[458008]: 2025-10-11 02:41:22.299654365 +0000 UTC m=+0.046665760 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:41:22 compute-0 nova_compute[356901]: 2025-10-11 02:41:22.402 2 DEBUG nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Created local disks _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4857
Oct 11 02:41:22 compute-0 nova_compute[356901]: 2025-10-11 02:41:22.404 2 DEBUG nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Ensure instance console log exists: /var/lib/nova/instances/2a3deab0-7a22-486d-86a2-2fc870c8ab2d/console.log _ensure_console_log_for_instance /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4609
Oct 11 02:41:22 compute-0 nova_compute[356901]: 2025-10-11 02:41:22.404 2 DEBUG oslo_concurrency.lockutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquiring lock "vgpu_resources" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:41:22 compute-0 nova_compute[356901]: 2025-10-11 02:41:22.405 2 DEBUG oslo_concurrency.lockutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "vgpu_resources" acquired by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:41:22 compute-0 nova_compute[356901]: 2025-10-11 02:41:22.406 2 DEBUG oslo_concurrency.lockutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "vgpu_resources" "released" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:41:22 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:41:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/81b1f545387fc7b469a78e988380b9f5a86d0b93ee0671ed114298b6d5b0c98e/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:41:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/81b1f545387fc7b469a78e988380b9f5a86d0b93ee0671ed114298b6d5b0c98e/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:41:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/81b1f545387fc7b469a78e988380b9f5a86d0b93ee0671ed114298b6d5b0c98e/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:41:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/81b1f545387fc7b469a78e988380b9f5a86d0b93ee0671ed114298b6d5b0c98e/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:41:22 compute-0 podman[458008]: 2025-10-11 02:41:22.473702624 +0000 UTC m=+0.220713959 container init af5612f81734c420dd16111b65884d589774010258e8522106952080c765ed1f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_chatterjee, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 02:41:22 compute-0 podman[458008]: 2025-10-11 02:41:22.488473032 +0000 UTC m=+0.235484367 container start af5612f81734c420dd16111b65884d589774010258e8522106952080c765ed1f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_chatterjee, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:41:22 compute-0 podman[458008]: 2025-10-11 02:41:22.493036958 +0000 UTC m=+0.240048293 container attach af5612f81734c420dd16111b65884d589774010258e8522106952080c765ed1f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_chatterjee, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:41:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1913: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 448 KiB/s rd, 13 KiB/s wr, 64 op/s
Oct 11 02:41:22 compute-0 ceph-mon[191930]: pgmap v1913: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 448 KiB/s rd, 13 KiB/s wr, 64 op/s
Oct 11 02:41:23 compute-0 nova_compute[356901]: 2025-10-11 02:41:23.151 2 DEBUG nova.network.neutron [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Successfully updated port: e332b5d8-f31d-4e8a-99d2-7cc96428d93a _update_port /usr/lib/python3.9/site-packages/nova/network/neutron.py:586
Oct 11 02:41:23 compute-0 nova_compute[356901]: 2025-10-11 02:41:23.168 2 DEBUG oslo_concurrency.lockutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquiring lock "refresh_cache-2a3deab0-7a22-486d-86a2-2fc870c8ab2d" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:41:23 compute-0 nova_compute[356901]: 2025-10-11 02:41:23.168 2 DEBUG oslo_concurrency.lockutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquired lock "refresh_cache-2a3deab0-7a22-486d-86a2-2fc870c8ab2d" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:41:23 compute-0 nova_compute[356901]: 2025-10-11 02:41:23.169 2 DEBUG nova.network.neutron [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Building network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2010
Oct 11 02:41:23 compute-0 nova_compute[356901]: 2025-10-11 02:41:23.271 2 DEBUG nova.compute.manager [req-b4bd2899-cfe3-4b93-aa74-d4c97100c527 req-09aae1da-934c-4281-8e13-7346890d2439 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Received event network-changed-e332b5d8-f31d-4e8a-99d2-7cc96428d93a external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:41:23 compute-0 nova_compute[356901]: 2025-10-11 02:41:23.271 2 DEBUG nova.compute.manager [req-b4bd2899-cfe3-4b93-aa74-d4c97100c527 req-09aae1da-934c-4281-8e13-7346890d2439 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Refreshing instance network info cache due to event network-changed-e332b5d8-f31d-4e8a-99d2-7cc96428d93a. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:41:23 compute-0 nova_compute[356901]: 2025-10-11 02:41:23.271 2 DEBUG oslo_concurrency.lockutils [req-b4bd2899-cfe3-4b93-aa74-d4c97100c527 req-09aae1da-934c-4281-8e13-7346890d2439 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-2a3deab0-7a22-486d-86a2-2fc870c8ab2d" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:41:23 compute-0 nova_compute[356901]: 2025-10-11 02:41:23.358 2 DEBUG nova.network.neutron [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Instance cache missing network info. _get_preexisting_port_ids /usr/lib/python3.9/site-packages/nova/network/neutron.py:3323
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]: {
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:         "osd_id": 1,
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:         "type": "bluestore"
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:     },
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:         "osd_id": 2,
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:         "type": "bluestore"
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:     },
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:         "osd_id": 0,
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:         "type": "bluestore"
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]:     }
Oct 11 02:41:23 compute-0 lucid_chatterjee[458044]: }
Oct 11 02:41:23 compute-0 systemd[1]: libpod-af5612f81734c420dd16111b65884d589774010258e8522106952080c765ed1f.scope: Deactivated successfully.
Oct 11 02:41:23 compute-0 podman[458008]: 2025-10-11 02:41:23.680718812 +0000 UTC m=+1.427730167 container died af5612f81734c420dd16111b65884d589774010258e8522106952080c765ed1f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_chatterjee, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS)
Oct 11 02:41:23 compute-0 systemd[1]: libpod-af5612f81734c420dd16111b65884d589774010258e8522106952080c765ed1f.scope: Consumed 1.184s CPU time.
Oct 11 02:41:23 compute-0 systemd[1]: var-lib-containers-storage-overlay-81b1f545387fc7b469a78e988380b9f5a86d0b93ee0671ed114298b6d5b0c98e-merged.mount: Deactivated successfully.
Oct 11 02:41:23 compute-0 nova_compute[356901]: 2025-10-11 02:41:23.753 2 DEBUG nova.virt.driver [-] Emitting event <LifecycleEvent: 1760150468.7520425, 5279e85f-e35b-4ddd-8336-7f483712f743 => Stopped> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:41:23 compute-0 nova_compute[356901]: 2025-10-11 02:41:23.754 2 INFO nova.compute.manager [-] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] VM Stopped (Lifecycle Event)
Oct 11 02:41:23 compute-0 podman[458008]: 2025-10-11 02:41:23.76136404 +0000 UTC m=+1.508375355 container remove af5612f81734c420dd16111b65884d589774010258e8522106952080c765ed1f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_chatterjee, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 02:41:23 compute-0 nova_compute[356901]: 2025-10-11 02:41:23.778 2 DEBUG nova.compute.manager [None req-0a5a532b-9137-4ff3-aff9-cdd3cb0deb05 - - - - - -] [instance: 5279e85f-e35b-4ddd-8336-7f483712f743] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:41:23 compute-0 systemd[1]: libpod-conmon-af5612f81734c420dd16111b65884d589774010258e8522106952080c765ed1f.scope: Deactivated successfully.
Oct 11 02:41:23 compute-0 sudo[457762]: pam_unix(sudo:session): session closed for user root
Oct 11 02:41:23 compute-0 nova_compute[356901]: 2025-10-11 02:41:23.819 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:41:23 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:41:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:41:23 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:41:23 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 0538b9b0-5d5c-4240-8d18-ec33fde4da20 does not exist
Oct 11 02:41:23 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev d6861224-fc02-4ffe-8b0d-cb02aa87b49c does not exist
Oct 11 02:41:23 compute-0 sudo[458089]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:41:23 compute-0 sudo[458089]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:41:23 compute-0 sudo[458089]: pam_unix(sudo:session): session closed for user root
Oct 11 02:41:24 compute-0 sudo[458114]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:41:24 compute-0 sudo[458114]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:41:24 compute-0 sudo[458114]: pam_unix(sudo:session): session closed for user root
Oct 11 02:41:24 compute-0 ovn_controller[88370]: 2025-10-11T02:41:24Z|00144|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:41:24 compute-0 ovn_controller[88370]: 2025-10-11T02:41:24Z|00145|binding|INFO|Releasing lport aa37c6ed-d2db-4ed4-b1c9-cfd071cfd96a from this chassis (sb_readonly=0)
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.206 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.476 2 DEBUG nova.network.neutron [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Updating instance_info_cache with network_info: [{"id": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "address": "fa:16:3e:c6:9a:0a", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape332b5d8-f3", "ovs_interfaceid": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.500 2 DEBUG oslo_concurrency.lockutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Releasing lock "refresh_cache-2a3deab0-7a22-486d-86a2-2fc870c8ab2d" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.500 2 DEBUG nova.compute.manager [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Instance network_info: |[{"id": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "address": "fa:16:3e:c6:9a:0a", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape332b5d8-f3", "ovs_interfaceid": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}]| _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1967
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.501 2 DEBUG oslo_concurrency.lockutils [req-b4bd2899-cfe3-4b93-aa74-d4c97100c527 req-09aae1da-934c-4281-8e13-7346890d2439 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-2a3deab0-7a22-486d-86a2-2fc870c8ab2d" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.502 2 DEBUG nova.network.neutron [req-b4bd2899-cfe3-4b93-aa74-d4c97100c527 req-09aae1da-934c-4281-8e13-7346890d2439 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Refreshing network info cache for port e332b5d8-f31d-4e8a-99d2-7cc96428d93a _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.507 2 DEBUG nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Start _get_guest_xml network_info=[{"id": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "address": "fa:16:3e:c6:9a:0a", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape332b5d8-f3", "ovs_interfaceid": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] disk_info={'disk_bus': 'virtio', 'cdrom_bus': 'sata', 'mapping': {'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.config': {'bus': 'sata', 'dev': 'sda', 'type': 'cdrom'}}} image_meta=ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:38:04Z,direct_url=<?>,disk_format='qcow2',id=72f37f2e-4296-450e-9a12-10717f4ac7dc,min_disk=0,min_ram=0,name='cirros-0.6.2-x86_64-disk.img',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:38:05Z,virtual_size=<?>,visibility=<?>) rescue=None block_device_info={'root_device_name': '/dev/vda', 'image': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'boot_index': 0, 'device_name': '/dev/vda', 'size': 0, 'encryption_format': None, 'image_id': '72f37f2e-4296-450e-9a12-10717f4ac7dc'}], 'ephemerals': [], 'block_device_mapping': [], 'swap': None} _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7549
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.517 2 WARNING nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.524 2 DEBUG nova.virt.libvirt.host [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V1... _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1653
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.524 2 DEBUG nova.virt.libvirt.host [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CPU controller missing on host. _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1663
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.533 2 DEBUG nova.virt.libvirt.host [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V2... _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1672
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.534 2 DEBUG nova.virt.libvirt.host [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CPU controller found on host. _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1679
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.534 2 DEBUG nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CPU mode 'host-model' models '' was chosen, with extra flags: '' _get_guest_cpu_model_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:5396
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.534 2 DEBUG nova.virt.hardware [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Getting desirable topologies for flavor Flavor(created_at=2025-10-11T02:38:03Z,deleted=False,deleted_at=None,description=None,disabled=False,ephemeral_gb=0,extra_specs={hw_rng:allowed='True'},flavorid='6dff30d1-85df-4e9c-9163-a20ba47bb0c7',id=3,is_public=True,memory_mb=128,name='m1.nano',projects=<?>,root_gb=1,rxtx_factor=1.0,swap=0,updated_at=None,vcpu_weight=0,vcpus=1) and image_meta ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:38:04Z,direct_url=<?>,disk_format='qcow2',id=72f37f2e-4296-450e-9a12-10717f4ac7dc,min_disk=0,min_ram=0,name='cirros-0.6.2-x86_64-disk.img',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:38:05Z,virtual_size=<?>,visibility=<?>), allow threads: True _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:563
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.535 2 DEBUG nova.virt.hardware [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Flavor limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:348
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.535 2 DEBUG nova.virt.hardware [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Image limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:352
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.535 2 DEBUG nova.virt.hardware [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Flavor pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:388
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.535 2 DEBUG nova.virt.hardware [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Image pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:392
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.535 2 DEBUG nova.virt.hardware [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Chose sockets=0, cores=0, threads=0; limits were sockets=65536, cores=65536, threads=65536 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:430
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.535 2 DEBUG nova.virt.hardware [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Topology preferred VirtCPUTopology(cores=0,sockets=0,threads=0), maximum VirtCPUTopology(cores=65536,sockets=65536,threads=65536) _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:569
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.536 2 DEBUG nova.virt.hardware [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Build topologies for 1 vcpu(s) 1:1:1 _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:471
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.536 2 DEBUG nova.virt.hardware [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Got 1 possible topologies _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:501
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.536 2 DEBUG nova.virt.hardware [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Possible topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:575
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.536 2 DEBUG nova.virt.hardware [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Sorted desired topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:577
Oct 11 02:41:24 compute-0 nova_compute[356901]: 2025-10-11 02:41:24.539 2 DEBUG oslo_concurrency.processutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:41:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1914: 321 pgs: 321 active+clean; 219 MiB data, 358 MiB used, 60 GiB / 60 GiB avail; 549 KiB/s rd, 582 KiB/s wr, 81 op/s
Oct 11 02:41:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:41:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:41:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:41:25 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/4021451633' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.067 2 DEBUG oslo_concurrency.processutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.528s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.105 2 DEBUG nova.storage.rbd_utils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] rbd image 2a3deab0-7a22-486d-86a2-2fc870c8ab2d_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.113 2 DEBUG oslo_concurrency.processutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.136 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:41:25 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3545033874' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.629 2 DEBUG oslo_concurrency.processutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.517s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.631 2 DEBUG nova.virt.libvirt.vif [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:41:19Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description='tempest-TestNetworkBasicOps-server-983701941',display_name='tempest-TestNetworkBasicOps-server-983701941',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-testnetworkbasicops-server-983701941',id=12,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBMITvXHGkIM8LS83kfV77RgpBE+Sw7Cf/gWnJ4njPpAm2utZ405mb/3SnZv98p+/HwAaSeNUeKJwLq/o7HlE9jBBurf1QCYsMBy+p+t8FriZaItil7Hb0u4A6Vs88VckEQ==',key_name='tempest-TestNetworkBasicOps-1838815715',keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='86dfc4ba5e494748b86bc9b983426779',ramdisk_id='',reservation_id='r-43re01xt',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_hw_rng_model='virtio',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-TestNetworkBasicOps-494564743',owner_user_name='tempest-TestNetworkBasicOps-494564743-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:41:21Z,user_data=None,user_id='bcba1b18a2ad479587a15fe415ae307a',uuid=2a3deab0-7a22-486d-86a2-2fc870c8ab2d,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "address": "fa:16:3e:c6:9a:0a", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape332b5d8-f3", "ovs_interfaceid": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} virt_type=kvm get_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:563
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.631 2 DEBUG nova.network.os_vif_util [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Converting VIF {"id": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "address": "fa:16:3e:c6:9a:0a", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape332b5d8-f3", "ovs_interfaceid": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.632 2 DEBUG nova.network.os_vif_util [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:c6:9a:0a,bridge_name='br-int',has_traffic_filtering=True,id=e332b5d8-f31d-4e8a-99d2-7cc96428d93a,network=Network(c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tape332b5d8-f3') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.634 2 DEBUG nova.objects.instance [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lazy-loading 'pci_devices' on Instance uuid 2a3deab0-7a22-486d-86a2-2fc870c8ab2d obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.652 2 DEBUG nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] End _get_guest_xml xml=<domain type="kvm">
Oct 11 02:41:25 compute-0 nova_compute[356901]:   <uuid>2a3deab0-7a22-486d-86a2-2fc870c8ab2d</uuid>
Oct 11 02:41:25 compute-0 nova_compute[356901]:   <name>instance-0000000c</name>
Oct 11 02:41:25 compute-0 nova_compute[356901]:   <memory>131072</memory>
Oct 11 02:41:25 compute-0 nova_compute[356901]:   <vcpu>1</vcpu>
Oct 11 02:41:25 compute-0 nova_compute[356901]:   <metadata>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.1">
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <nova:package version="27.5.2-0.20250829104910.6f8decf.el9"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <nova:name>tempest-TestNetworkBasicOps-server-983701941</nova:name>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <nova:creationTime>2025-10-11 02:41:24</nova:creationTime>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <nova:flavor name="m1.nano">
Oct 11 02:41:25 compute-0 nova_compute[356901]:         <nova:memory>128</nova:memory>
Oct 11 02:41:25 compute-0 nova_compute[356901]:         <nova:disk>1</nova:disk>
Oct 11 02:41:25 compute-0 nova_compute[356901]:         <nova:swap>0</nova:swap>
Oct 11 02:41:25 compute-0 nova_compute[356901]:         <nova:ephemeral>0</nova:ephemeral>
Oct 11 02:41:25 compute-0 nova_compute[356901]:         <nova:vcpus>1</nova:vcpus>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       </nova:flavor>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <nova:owner>
Oct 11 02:41:25 compute-0 nova_compute[356901]:         <nova:user uuid="bcba1b18a2ad479587a15fe415ae307a">tempest-TestNetworkBasicOps-494564743-project-member</nova:user>
Oct 11 02:41:25 compute-0 nova_compute[356901]:         <nova:project uuid="86dfc4ba5e494748b86bc9b983426779">tempest-TestNetworkBasicOps-494564743</nova:project>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       </nova:owner>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <nova:root type="image" uuid="72f37f2e-4296-450e-9a12-10717f4ac7dc"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <nova:ports>
Oct 11 02:41:25 compute-0 nova_compute[356901]:         <nova:port uuid="e332b5d8-f31d-4e8a-99d2-7cc96428d93a">
Oct 11 02:41:25 compute-0 nova_compute[356901]:           <nova:ip type="fixed" address="10.100.0.4" ipVersion="4"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:         </nova:port>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       </nova:ports>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     </nova:instance>
Oct 11 02:41:25 compute-0 nova_compute[356901]:   </metadata>
Oct 11 02:41:25 compute-0 nova_compute[356901]:   <sysinfo type="smbios">
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <system>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <entry name="manufacturer">RDO</entry>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <entry name="product">OpenStack Compute</entry>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <entry name="version">27.5.2-0.20250829104910.6f8decf.el9</entry>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <entry name="serial">2a3deab0-7a22-486d-86a2-2fc870c8ab2d</entry>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <entry name="uuid">2a3deab0-7a22-486d-86a2-2fc870c8ab2d</entry>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <entry name="family">Virtual Machine</entry>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     </system>
Oct 11 02:41:25 compute-0 nova_compute[356901]:   </sysinfo>
Oct 11 02:41:25 compute-0 nova_compute[356901]:   <os>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <type arch="x86_64" machine="q35">hvm</type>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <boot dev="hd"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <smbios mode="sysinfo"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:   </os>
Oct 11 02:41:25 compute-0 nova_compute[356901]:   <features>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <acpi/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <apic/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <vmcoreinfo/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:   </features>
Oct 11 02:41:25 compute-0 nova_compute[356901]:   <clock offset="utc">
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <timer name="pit" tickpolicy="delay"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <timer name="rtc" tickpolicy="catchup"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <timer name="hpet" present="no"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:   </clock>
Oct 11 02:41:25 compute-0 nova_compute[356901]:   <cpu mode="host-model" match="exact">
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <topology sockets="1" cores="1" threads="1"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:41:25 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/2a3deab0-7a22-486d-86a2-2fc870c8ab2d_disk">
Oct 11 02:41:25 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       </source>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:41:25 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <target dev="vda" bus="virtio"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <disk type="network" device="cdrom">
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/2a3deab0-7a22-486d-86a2-2fc870c8ab2d_disk.config">
Oct 11 02:41:25 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       </source>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:41:25 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <target dev="sda" bus="sata"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <interface type="ethernet">
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <mac address="fa:16:3e:c6:9a:0a"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <driver name="vhost" rx_queue_size="512"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <mtu size="1442"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <target dev="tape332b5d8-f3"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     </interface>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <serial type="pty">
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <log file="/var/lib/nova/instances/2a3deab0-7a22-486d-86a2-2fc870c8ab2d/console.log" append="off"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     </serial>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <graphics type="vnc" autoport="yes" listen="::0"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <video>
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     </video>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <input type="tablet" bus="usb"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <rng model="virtio">
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <backend model="random">/dev/urandom</backend>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <controller type="usb" index="0"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     <memballoon model="virtio">
Oct 11 02:41:25 compute-0 nova_compute[356901]:       <stats period="10"/>
Oct 11 02:41:25 compute-0 nova_compute[356901]:     </memballoon>
Oct 11 02:41:25 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:41:25 compute-0 nova_compute[356901]: </domain>
Oct 11 02:41:25 compute-0 nova_compute[356901]:  _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7555
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.654 2 DEBUG nova.compute.manager [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Preparing to wait for external event network-vif-plugged-e332b5d8-f31d-4e8a-99d2-7cc96428d93a prepare_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:283
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.654 2 DEBUG oslo_concurrency.lockutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquiring lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d-events" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.655 2 DEBUG oslo_concurrency.lockutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d-events" acquired by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.655 2 DEBUG oslo_concurrency.lockutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d-events" "released" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.656 2 DEBUG nova.virt.libvirt.vif [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:41:19Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description='tempest-TestNetworkBasicOps-server-983701941',display_name='tempest-TestNetworkBasicOps-server-983701941',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-testnetworkbasicops-server-983701941',id=12,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBMITvXHGkIM8LS83kfV77RgpBE+Sw7Cf/gWnJ4njPpAm2utZ405mb/3SnZv98p+/HwAaSeNUeKJwLq/o7HlE9jBBurf1QCYsMBy+p+t8FriZaItil7Hb0u4A6Vs88VckEQ==',key_name='tempest-TestNetworkBasicOps-1838815715',keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=PciDeviceList,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='86dfc4ba5e494748b86bc9b983426779',ramdisk_id='',reservation_id='r-43re01xt',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_hw_rng_model='virtio',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-TestNetworkBasicOps-494564743',owner_user_name='tempest-TestNetworkBasicOps-494564743-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:41:21Z,user_data=None,user_id='bcba1b18a2ad479587a15fe415ae307a',uuid=2a3deab0-7a22-486d-86a2-2fc870c8ab2d,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "address": "fa:16:3e:c6:9a:0a", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape332b5d8-f3", "ovs_interfaceid": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} plug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:710
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.656 2 DEBUG nova.network.os_vif_util [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Converting VIF {"id": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "address": "fa:16:3e:c6:9a:0a", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape332b5d8-f3", "ovs_interfaceid": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.657 2 DEBUG nova.network.os_vif_util [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:c6:9a:0a,bridge_name='br-int',has_traffic_filtering=True,id=e332b5d8-f31d-4e8a-99d2-7cc96428d93a,network=Network(c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tape332b5d8-f3') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.657 2 DEBUG os_vif [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Plugging vif VIFOpenVSwitch(active=False,address=fa:16:3e:c6:9a:0a,bridge_name='br-int',has_traffic_filtering=True,id=e332b5d8-f31d-4e8a-99d2-7cc96428d93a,network=Network(c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tape332b5d8-f3') plug /usr/lib/python3.9/site-packages/os_vif/__init__.py:76
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.658 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.658 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddBridgeCommand(_result=None, name=br-int, may_exist=True, datapath_type=system) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.658 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.662 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.662 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tape332b5d8-f3, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.663 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbSetCommand(_result=None, table=Interface, record=tape332b5d8-f3, col_values=(('external_ids', {'iface-id': 'e332b5d8-f31d-4e8a-99d2-7cc96428d93a', 'iface-status': 'active', 'attached-mac': 'fa:16:3e:c6:9a:0a', 'vm-uuid': '2a3deab0-7a22-486d-86a2-2fc870c8ab2d'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:41:25 compute-0 NetworkManager[44908]: <info>  [1760150485.6658] manager: (tape332b5d8-f3): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/66)
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.669 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.679 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.681 2 INFO os_vif [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Successfully plugged vif VIFOpenVSwitch(active=False,address=fa:16:3e:c6:9a:0a,bridge_name='br-int',has_traffic_filtering=True,id=e332b5d8-f31d-4e8a-99d2-7cc96428d93a,network=Network(c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tape332b5d8-f3')
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.807 2 DEBUG nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] No BDM found with device name vda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.807 2 DEBUG nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] No BDM found with device name sda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.807 2 DEBUG nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] No VIF found with MAC fa:16:3e:c6:9a:0a, not building metadata _build_interface_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12092
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.808 2 INFO nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Using config drive
Oct 11 02:41:25 compute-0 ceph-mon[191930]: pgmap v1914: 321 pgs: 321 active+clean; 219 MiB data, 358 MiB used, 60 GiB / 60 GiB avail; 549 KiB/s rd, 582 KiB/s wr, 81 op/s
Oct 11 02:41:25 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/4021451633' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:41:25 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3545033874' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:41:25 compute-0 nova_compute[356901]: 2025-10-11 02:41:25.848 2 DEBUG nova.storage.rbd_utils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] rbd image 2a3deab0-7a22-486d-86a2-2fc870c8ab2d_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:41:26 compute-0 podman[458224]: 2025-10-11 02:41:26.212784569 +0000 UTC m=+0.089667238 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_metadata_agent, org.label-schema.build-date=20251009, container_name=ovn_metadata_agent, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:41:26 compute-0 podman[458223]: 2025-10-11 02:41:26.23849373 +0000 UTC m=+0.109463976 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, tcib_managed=true, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, org.label-schema.license=GPLv2, config_id=edpm)
Oct 11 02:41:26 compute-0 podman[458221]: 2025-10-11 02:41:26.244872726 +0000 UTC m=+0.120932782 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:41:26 compute-0 podman[458222]: 2025-10-11 02:41:26.26385912 +0000 UTC m=+0.145185242 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_controller, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:41:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #93. Immutable memtables: 0.
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:26.276728) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 53] Flushing memtable with next log file: 93
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150486276849, "job": 53, "event": "flush_started", "num_memtables": 1, "num_entries": 440, "num_deletes": 250, "total_data_size": 323252, "memory_usage": 333144, "flush_reason": "Manual Compaction"}
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 53] Level-0 flush table #94: started
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150486283489, "cf_name": "default", "job": 53, "event": "table_file_creation", "file_number": 94, "file_size": 321161, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 39394, "largest_seqno": 39833, "table_properties": {"data_size": 318563, "index_size": 634, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 901, "raw_key_size": 5306, "raw_average_key_size": 15, "raw_value_size": 313469, "raw_average_value_size": 935, "num_data_blocks": 27, "num_entries": 335, "num_filter_entries": 335, "num_deletions": 250, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760150468, "oldest_key_time": 1760150468, "file_creation_time": 1760150486, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 94, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 53] Flush lasted 6788 microseconds, and 2501 cpu microseconds.
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:26.283527) [db/flush_job.cc:967] [default] [JOB 53] Level-0 flush table #94: 321161 bytes OK
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:26.283552) [db/memtable_list.cc:519] [default] Level-0 commit table #94 started
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:26.285817) [db/memtable_list.cc:722] [default] Level-0 commit table #94: memtable #1 done
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:26.285832) EVENT_LOG_v1 {"time_micros": 1760150486285826, "job": 53, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:26.285845) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 53] Try to delete WAL files size 320546, prev total WAL file size 320546, number of live WAL files 2.
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000090.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:26.286956) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '6B760030' seq:72057594037927935, type:22 .. '6B7600323531' seq:0, type:0; will stop at (end)
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 54] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 53 Base level 0, inputs: [94(313KB)], [92(6761KB)]
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150486287040, "job": 54, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [94], "files_L6": [92], "score": -1, "input_data_size": 7245448, "oldest_snapshot_seqno": -1}
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 54] Generated table #95: 5414 keys, 6525429 bytes, temperature: kUnknown
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150486334779, "cf_name": "default", "job": 54, "event": "table_file_creation", "file_number": 95, "file_size": 6525429, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 6492381, "index_size": 18410, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 13573, "raw_key_size": 141414, "raw_average_key_size": 26, "raw_value_size": 6397299, "raw_average_value_size": 1181, "num_data_blocks": 729, "num_entries": 5414, "num_filter_entries": 5414, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760150486, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 95, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:26.335619) [db/compaction/compaction_job.cc:1663] [default] [JOB 54] Compacted 1@0 + 1@6 files to L6 => 6525429 bytes
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:26.337694) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 151.5 rd, 136.4 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(0.3, 6.6 +0.0 blob) out(6.2 +0.0 blob), read-write-amplify(42.9) write-amplify(20.3) OK, records in: 5925, records dropped: 511 output_compression: NoCompression
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:26.337712) EVENT_LOG_v1 {"time_micros": 1760150486337703, "job": 54, "event": "compaction_finished", "compaction_time_micros": 47826, "compaction_time_cpu_micros": 25110, "output_level": 6, "num_output_files": 1, "total_output_size": 6525429, "num_input_records": 5925, "num_output_records": 5414, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000094.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150486338166, "job": 54, "event": "table_file_deletion", "file_number": 94}
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000092.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150486340141, "job": 54, "event": "table_file_deletion", "file_number": 92}
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:26.286751) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:26.340480) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:26.340484) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:26.340485) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:26.340487) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:41:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:41:26.340488) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:41:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:41:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:41:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:41:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:41:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:41:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:41:26 compute-0 nova_compute[356901]: 2025-10-11 02:41:26.678 2 INFO nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Creating config drive at /var/lib/nova/instances/2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.config
Oct 11 02:41:26 compute-0 nova_compute[356901]: 2025-10-11 02:41:26.685 2 DEBUG oslo_concurrency.processutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Running cmd (subprocess): /usr/bin/mkisofs -o /var/lib/nova/instances/2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmp9ea3wij1 execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:41:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1915: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 541 KiB/s rd, 1.8 MiB/s wr, 71 op/s
Oct 11 02:41:26 compute-0 nova_compute[356901]: 2025-10-11 02:41:26.835 2 DEBUG oslo_concurrency.processutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CMD "/usr/bin/mkisofs -o /var/lib/nova/instances/2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmp9ea3wij1" returned: 0 in 0.150s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:41:26 compute-0 nova_compute[356901]: 2025-10-11 02:41:26.883 2 DEBUG nova.storage.rbd_utils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] rbd image 2a3deab0-7a22-486d-86a2-2fc870c8ab2d_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:41:26 compute-0 nova_compute[356901]: 2025-10-11 02:41:26.896 2 DEBUG oslo_concurrency.processutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.config 2a3deab0-7a22-486d-86a2-2fc870c8ab2d_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:41:27 compute-0 nova_compute[356901]: 2025-10-11 02:41:26.998 2 DEBUG nova.network.neutron [req-b4bd2899-cfe3-4b93-aa74-d4c97100c527 req-09aae1da-934c-4281-8e13-7346890d2439 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Updated VIF entry in instance network info cache for port e332b5d8-f31d-4e8a-99d2-7cc96428d93a. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:41:27 compute-0 nova_compute[356901]: 2025-10-11 02:41:27.001 2 DEBUG nova.network.neutron [req-b4bd2899-cfe3-4b93-aa74-d4c97100c527 req-09aae1da-934c-4281-8e13-7346890d2439 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Updating instance_info_cache with network_info: [{"id": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "address": "fa:16:3e:c6:9a:0a", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape332b5d8-f3", "ovs_interfaceid": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:41:27 compute-0 nova_compute[356901]: 2025-10-11 02:41:27.026 2 DEBUG oslo_concurrency.lockutils [req-b4bd2899-cfe3-4b93-aa74-d4c97100c527 req-09aae1da-934c-4281-8e13-7346890d2439 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-2a3deab0-7a22-486d-86a2-2fc870c8ab2d" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:41:27 compute-0 nova_compute[356901]: 2025-10-11 02:41:27.150 2 DEBUG oslo_concurrency.processutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.config 2a3deab0-7a22-486d-86a2-2fc870c8ab2d_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.253s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:41:27 compute-0 nova_compute[356901]: 2025-10-11 02:41:27.151 2 INFO nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Deleting local config drive /var/lib/nova/instances/2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.config because it was imported into RBD.
Oct 11 02:41:27 compute-0 virtqemud[153560]: End of file while reading data: Input/output error
Oct 11 02:41:27 compute-0 kernel: tape332b5d8-f3: entered promiscuous mode
Oct 11 02:41:27 compute-0 NetworkManager[44908]: <info>  [1760150487.2574] manager: (tape332b5d8-f3): new Tun device (/org/freedesktop/NetworkManager/Devices/67)
Oct 11 02:41:27 compute-0 nova_compute[356901]: 2025-10-11 02:41:27.261 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:27 compute-0 ovn_controller[88370]: 2025-10-11T02:41:27Z|00146|binding|INFO|Claiming lport e332b5d8-f31d-4e8a-99d2-7cc96428d93a for this chassis.
Oct 11 02:41:27 compute-0 ovn_controller[88370]: 2025-10-11T02:41:27Z|00147|binding|INFO|e332b5d8-f31d-4e8a-99d2-7cc96428d93a: Claiming fa:16:3e:c6:9a:0a 10.100.0.4
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.272 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:c6:9a:0a 10.100.0.4'], port_security=['fa:16:3e:c6:9a:0a 10.100.0.4'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.0.4/28', 'neutron:device_id': '2a3deab0-7a22-486d-86a2-2fc870c8ab2d', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': '86dfc4ba5e494748b86bc9b983426779', 'neutron:revision_number': '2', 'neutron:security_group_ids': '856a6c8c-5c19-45b0-83e8-c2918301c124', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=b0ebf874-dd0a-4bac-aa4a-3eee85fcb8ba, chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], tunnel_key=3, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=e332b5d8-f31d-4e8a-99d2-7cc96428d93a) old=Port_Binding(chassis=[]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.277 286362 INFO neutron.agent.ovn.metadata.agent [-] Port e332b5d8-f31d-4e8a-99d2-7cc96428d93a in datapath c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635 bound to our chassis
Oct 11 02:41:27 compute-0 ceph-mon[191930]: pgmap v1915: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 541 KiB/s rd, 1.8 MiB/s wr, 71 op/s
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.285 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635
Oct 11 02:41:27 compute-0 ovn_controller[88370]: 2025-10-11T02:41:27Z|00148|binding|INFO|Setting lport e332b5d8-f31d-4e8a-99d2-7cc96428d93a ovn-installed in OVS
Oct 11 02:41:27 compute-0 ovn_controller[88370]: 2025-10-11T02:41:27Z|00149|binding|INFO|Setting lport e332b5d8-f31d-4e8a-99d2-7cc96428d93a up in Southbound
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.300 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[7c24634a-bd9f-4a82-8720-ab551bd2a4cb]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.301 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Creating VETH tapc35c5e7e-41 in ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635 namespace provision_datapath /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:665
Oct 11 02:41:27 compute-0 nova_compute[356901]: 2025-10-11 02:41:27.302 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.305 422955 DEBUG neutron.privileged.agent.linux.ip_lib [-] Interface tapc35c5e7e-40 not found in namespace None get_link_id /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:204
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.306 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[7cd05adf-760e-461a-9f17-48b3d15432b6]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.307 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[18bc452b-49fb-4a99-ab98-042f9f8d6114]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:27 compute-0 nova_compute[356901]: 2025-10-11 02:41:27.309 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.326 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[e40ee4d8-4b7e-40e8-a548-473324c23957]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:27 compute-0 systemd-machined[137586]: New machine qemu-13-instance-0000000c.
Oct 11 02:41:27 compute-0 systemd-udevd[458358]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:41:27 compute-0 systemd[1]: Started Virtual Machine qemu-13-instance-0000000c.
Oct 11 02:41:27 compute-0 NetworkManager[44908]: <info>  [1760150487.3606] device (tape332b5d8-f3): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.357 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[84f2d43d-e3cc-4e62-8819-98fe1b25de33]: (4, ('net.ipv4.conf.all.promote_secondaries = 1\n', '', 0)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:27 compute-0 NetworkManager[44908]: <info>  [1760150487.3618] device (tape332b5d8-f3): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.395 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[452a6fd5-42dd-4e29-b3d1-42a2739a33a3]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.402 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[f98a5094-e281-4fc2-93dd-cf9d9960460a]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:27 compute-0 NetworkManager[44908]: <info>  [1760150487.4048] manager: (tapc35c5e7e-40): new Veth device (/org/freedesktop/NetworkManager/Devices/68)
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.445 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[f58b1d3c-60c2-42e0-89f5-5750422b7c38]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.448 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[e8ab34fc-8e98-4a6d-b145-ff09930c1ea4]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:27 compute-0 NetworkManager[44908]: <info>  [1760150487.4806] device (tapc35c5e7e-40): carrier: link connected
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.489 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[44c5c0fc-8c2c-4d46-8d12-fc30b96f8794]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.515 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[4ae4329c-814f-4466-9ee7-67b0ca5e8dd5]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapc35c5e7e-41'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:b1:e9:cd'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 43], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 683520, 'reachable_time': 17291, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 1, 'outoctets': 76, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 1, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 76, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 1, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 458388, 'error': None, 'target': 'ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.552 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[4efcdacb-00ba-40c8-807a-4b73d1650aa4]: (4, ({'family': 10, 'prefixlen': 64, 'flags': 192, 'scope': 253, 'index': 2, 'attrs': [['IFA_ADDRESS', 'fe80::f816:3eff:feb1:e9cd'], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 683520, 'tstamp': 683520}], ['IFA_FLAGS', 192]], 'header': {'length': 72, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 458389, 'error': None, 'target': 'ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'},)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.578 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[8e3f6d28-7070-4631-80f8-d8b4cadb0d0a]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapc35c5e7e-41'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:b1:e9:cd'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 43], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 683520, 'reachable_time': 17291, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 1, 'outoctets': 76, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 1, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 76, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 1, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 0, 'sequence_number': 255, 'pid': 458390, 'error': None, 'target': 'ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.632 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[cf24eaf2-bd9e-4221-b54d-f43cde97245b]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:27 compute-0 nova_compute[356901]: 2025-10-11 02:41:27.657 2 DEBUG nova.compute.manager [req-8e787e36-62d4-4be8-8929-ed3a198d5dc6 req-d82a5750-607c-44b2-acf6-cea77d399482 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Received event network-vif-plugged-e332b5d8-f31d-4e8a-99d2-7cc96428d93a external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:41:27 compute-0 nova_compute[356901]: 2025-10-11 02:41:27.657 2 DEBUG oslo_concurrency.lockutils [req-8e787e36-62d4-4be8-8929-ed3a198d5dc6 req-d82a5750-607c-44b2-acf6-cea77d399482 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:41:27 compute-0 nova_compute[356901]: 2025-10-11 02:41:27.658 2 DEBUG oslo_concurrency.lockutils [req-8e787e36-62d4-4be8-8929-ed3a198d5dc6 req-d82a5750-607c-44b2-acf6-cea77d399482 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:41:27 compute-0 nova_compute[356901]: 2025-10-11 02:41:27.661 2 DEBUG oslo_concurrency.lockutils [req-8e787e36-62d4-4be8-8929-ed3a198d5dc6 req-d82a5750-607c-44b2-acf6-cea77d399482 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.003s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:41:27 compute-0 nova_compute[356901]: 2025-10-11 02:41:27.662 2 DEBUG nova.compute.manager [req-8e787e36-62d4-4be8-8929-ed3a198d5dc6 req-d82a5750-607c-44b2-acf6-cea77d399482 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Processing event network-vif-plugged-e332b5d8-f31d-4e8a-99d2-7cc96428d93a _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10808
Oct 11 02:41:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:41:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1628506977' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:41:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:41:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1628506977' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.731 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[b32361ee-9883-43e2-b2e2-6178a0654314]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.733 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapc35c5e7e-40, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.733 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.733 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapc35c5e7e-40, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:41:27 compute-0 nova_compute[356901]: 2025-10-11 02:41:27.735 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:27 compute-0 kernel: tapc35c5e7e-40: entered promiscuous mode
Oct 11 02:41:27 compute-0 NetworkManager[44908]: <info>  [1760150487.7364] manager: (tapc35c5e7e-40): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/69)
Oct 11 02:41:27 compute-0 nova_compute[356901]: 2025-10-11 02:41:27.740 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.745 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tapc35c5e7e-40, col_values=(('external_ids', {'iface-id': 'ffb676d8-51f5-4de3-a31a-71adc7412138'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:41:27 compute-0 nova_compute[356901]: 2025-10-11 02:41:27.746 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:27 compute-0 ovn_controller[88370]: 2025-10-11T02:41:27Z|00150|binding|INFO|Releasing lport ffb676d8-51f5-4de3-a31a-71adc7412138 from this chassis (sb_readonly=0)
Oct 11 02:41:27 compute-0 nova_compute[356901]: 2025-10-11 02:41:27.747 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.749 286362 DEBUG neutron.agent.linux.utils [-] Unable to access /var/lib/neutron/external/pids/c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635.pid.haproxy; Error: [Errno 2] No such file or directory: '/var/lib/neutron/external/pids/c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635.pid.haproxy' get_value_from_file /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:252
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.749 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[b212517f-b23c-4be6-9263-0ef75f660c62]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.750 286362 DEBUG neutron.agent.ovn.metadata.driver [-] haproxy_cfg = 
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: global
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     log         /dev/log local0 debug
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     log-tag     haproxy-metadata-proxy-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     user        root
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     group       root
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     maxconn     1024
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     pidfile     /var/lib/neutron/external/pids/c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635.pid.haproxy
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     daemon
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: defaults
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     log global
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     mode http
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     option httplog
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     option dontlognull
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     option http-server-close
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     option forwardfor
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     retries                 3
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     timeout http-request    30s
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     timeout connect         30s
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     timeout client          32s
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     timeout server          32s
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     timeout http-keep-alive 30s
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: listen listener
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     bind 169.254.169.254:80
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     server metadata /var/lib/neutron/metadata_proxy
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:     http-request add-header X-OVN-Network-ID c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]:  create_config_file /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/driver.py:107
Oct 11 02:41:27 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:27.751 286362 DEBUG neutron.agent.linux.utils [-] Running command: ['sudo', 'neutron-rootwrap', '/etc/neutron/rootwrap.conf', 'ip', 'netns', 'exec', 'ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635', 'env', 'PROCESS_TAG=haproxy-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635', 'haproxy', '-f', '/var/lib/neutron/ovn-metadata-proxy/c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635.conf'] create_process /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:84
Oct 11 02:41:27 compute-0 nova_compute[356901]: 2025-10-11 02:41:27.763 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:28 compute-0 podman[458461]: 2025-10-11 02:41:28.178847879 +0000 UTC m=+0.062817826 container create 13f91ee0043106e729c13bb04ee2289ca5c3e36406333ea3ecfe7dbd1fd7590e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true)
Oct 11 02:41:28 compute-0 systemd[1]: Started libpod-conmon-13f91ee0043106e729c13bb04ee2289ca5c3e36406333ea3ecfe7dbd1fd7590e.scope.
Oct 11 02:41:28 compute-0 podman[458461]: 2025-10-11 02:41:28.147900694 +0000 UTC m=+0.031870651 image pull 1061e4fafe13e0b9aa1ef2c904ba4ad70c44f3e87b1d831f16c6db34937f4022 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Oct 11 02:41:28 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:41:28 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5138affe420e75c815ea5d7a265622e95016109742cc4959933cbbd024cbf1f9/merged/var/lib/neutron supports timestamps until 2038 (0x7fffffff)
Oct 11 02:41:28 compute-0 podman[458461]: 2025-10-11 02:41:28.297745514 +0000 UTC m=+0.181715461 container init 13f91ee0043106e729c13bb04ee2289ca5c3e36406333ea3ecfe7dbd1fd7590e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635, tcib_managed=true, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:41:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1628506977' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:41:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1628506977' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:41:28 compute-0 podman[458461]: 2025-10-11 02:41:28.308673266 +0000 UTC m=+0.192643183 container start 13f91ee0043106e729c13bb04ee2289ca5c3e36406333ea3ecfe7dbd1fd7590e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 02:41:28 compute-0 neutron-haproxy-ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635[458476]: [NOTICE]   (458480) : New worker (458482) forked
Oct 11 02:41:28 compute-0 neutron-haproxy-ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635[458476]: [NOTICE]   (458480) : Loading success.
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.578 2 DEBUG nova.compute.manager [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Instance event wait completed in 0 seconds for network-vif-plugged wait_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:577
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.578 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150488.5763085, 2a3deab0-7a22-486d-86a2-2fc870c8ab2d => Started> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.579 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] VM Started (Lifecycle Event)
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.587 2 DEBUG nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Guest created on hypervisor spawn /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4417
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.595 2 INFO nova.virt.libvirt.driver [-] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Instance spawned successfully.
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.595 2 DEBUG nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Attempting to register defaults for the following image properties: ['hw_cdrom_bus', 'hw_disk_bus', 'hw_input_bus', 'hw_pointer_model', 'hw_video_model', 'hw_vif_model'] _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:917
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.599 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.605 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Synchronizing instance power state after lifecycle event "Started"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.616 2 DEBUG nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Found default for hw_cdrom_bus of sata _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.616 2 DEBUG nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Found default for hw_disk_bus of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.616 2 DEBUG nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Found default for hw_input_bus of usb _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.617 2 DEBUG nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Found default for hw_pointer_model of usbtablet _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.617 2 DEBUG nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Found default for hw_video_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.617 2 DEBUG nova.virt.libvirt.driver [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Found default for hw_vif_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.624 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.626 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150488.5764472, 2a3deab0-7a22-486d-86a2-2fc870c8ab2d => Paused> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.626 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] VM Paused (Lifecycle Event)
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.648 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.654 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150488.585441, 2a3deab0-7a22-486d-86a2-2fc870c8ab2d => Resumed> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.654 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] VM Resumed (Lifecycle Event)
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.664 2 INFO nova.compute.manager [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Took 7.23 seconds to spawn the instance on the hypervisor.
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.664 2 DEBUG nova.compute.manager [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.673 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.679 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Synchronizing instance power state after lifecycle event "Resumed"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.699 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.726 2 INFO nova.compute.manager [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Took 8.61 seconds to build instance.
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.739 2 DEBUG oslo_concurrency.lockutils [None req-311978a7-33ca-4cac-8b6f-c311f8a6f10b bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d" "released" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: held 8.686s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:41:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1916: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 542 KiB/s rd, 1.8 MiB/s wr, 71 op/s
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._run_pending_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11145
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.922 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] There are 0 instances to clean _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11154
Oct 11 02:41:28 compute-0 nova_compute[356901]: 2025-10-11 02:41:28.923 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_expired_console_auth_tokens run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:41:29 compute-0 ceph-mon[191930]: pgmap v1916: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 542 KiB/s rd, 1.8 MiB/s wr, 71 op/s
Oct 11 02:41:29 compute-0 podman[157119]: time="2025-10-11T02:41:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:41:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:41:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 48733 "" "Go-http-client/1.1"
Oct 11 02:41:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:41:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9989 "" "Go-http-client/1.1"
Oct 11 02:41:29 compute-0 nova_compute[356901]: 2025-10-11 02:41:29.882 2 DEBUG nova.compute.manager [req-b20a00f0-01cd-4835-b023-22b328c9d291 req-761a4edc-6128-4d04-a5db-bd4d6fe6d1c1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Received event network-vif-plugged-e332b5d8-f31d-4e8a-99d2-7cc96428d93a external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:41:29 compute-0 nova_compute[356901]: 2025-10-11 02:41:29.883 2 DEBUG oslo_concurrency.lockutils [req-b20a00f0-01cd-4835-b023-22b328c9d291 req-761a4edc-6128-4d04-a5db-bd4d6fe6d1c1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:41:29 compute-0 nova_compute[356901]: 2025-10-11 02:41:29.883 2 DEBUG oslo_concurrency.lockutils [req-b20a00f0-01cd-4835-b023-22b328c9d291 req-761a4edc-6128-4d04-a5db-bd4d6fe6d1c1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:41:29 compute-0 nova_compute[356901]: 2025-10-11 02:41:29.883 2 DEBUG oslo_concurrency.lockutils [req-b20a00f0-01cd-4835-b023-22b328c9d291 req-761a4edc-6128-4d04-a5db-bd4d6fe6d1c1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:41:29 compute-0 nova_compute[356901]: 2025-10-11 02:41:29.884 2 DEBUG nova.compute.manager [req-b20a00f0-01cd-4835-b023-22b328c9d291 req-761a4edc-6128-4d04-a5db-bd4d6fe6d1c1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] No waiting events found dispatching network-vif-plugged-e332b5d8-f31d-4e8a-99d2-7cc96428d93a pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:41:29 compute-0 nova_compute[356901]: 2025-10-11 02:41:29.884 2 WARNING nova.compute.manager [req-b20a00f0-01cd-4835-b023-22b328c9d291 req-761a4edc-6128-4d04-a5db-bd4d6fe6d1c1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Received unexpected event network-vif-plugged-e332b5d8-f31d-4e8a-99d2-7cc96428d93a for instance with vm_state active and task_state None.
Oct 11 02:41:30 compute-0 nova_compute[356901]: 2025-10-11 02:41:30.103 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:30 compute-0 nova_compute[356901]: 2025-10-11 02:41:30.666 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1917: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 315 KiB/s rd, 1.8 MiB/s wr, 61 op/s
Oct 11 02:41:30 compute-0 ceph-mon[191930]: pgmap v1917: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 315 KiB/s rd, 1.8 MiB/s wr, 61 op/s
Oct 11 02:41:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:41:31 compute-0 openstack_network_exporter[374316]: ERROR   02:41:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:41:31 compute-0 openstack_network_exporter[374316]: ERROR   02:41:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:41:31 compute-0 openstack_network_exporter[374316]: ERROR   02:41:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:41:31 compute-0 openstack_network_exporter[374316]: ERROR   02:41:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:41:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:41:31 compute-0 openstack_network_exporter[374316]: ERROR   02:41:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:41:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:41:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1918: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 917 KiB/s rd, 1.8 MiB/s wr, 79 op/s
Oct 11 02:41:32 compute-0 ceph-mon[191930]: pgmap v1918: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 917 KiB/s rd, 1.8 MiB/s wr, 79 op/s
Oct 11 02:41:33 compute-0 podman[458491]: 2025-10-11 02:41:33.227633438 +0000 UTC m=+0.116910431 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=multipathd, container_name=multipathd)
Oct 11 02:41:33 compute-0 podman[458492]: 2025-10-11 02:41:33.246996553 +0000 UTC m=+0.140019549 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=iscsid, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:41:34 compute-0 nova_compute[356901]: 2025-10-11 02:41:34.435 2 DEBUG nova.compute.manager [req-b0df3f1b-6fcf-4ef4-9af2-6cf6f92284e0 req-41278947-7c30-4277-8ae4-a7ccd6b34278 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Received event network-changed-e332b5d8-f31d-4e8a-99d2-7cc96428d93a external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:41:34 compute-0 nova_compute[356901]: 2025-10-11 02:41:34.435 2 DEBUG nova.compute.manager [req-b0df3f1b-6fcf-4ef4-9af2-6cf6f92284e0 req-41278947-7c30-4277-8ae4-a7ccd6b34278 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Refreshing instance network info cache due to event network-changed-e332b5d8-f31d-4e8a-99d2-7cc96428d93a. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:41:34 compute-0 nova_compute[356901]: 2025-10-11 02:41:34.436 2 DEBUG oslo_concurrency.lockutils [req-b0df3f1b-6fcf-4ef4-9af2-6cf6f92284e0 req-41278947-7c30-4277-8ae4-a7ccd6b34278 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-2a3deab0-7a22-486d-86a2-2fc870c8ab2d" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:41:34 compute-0 nova_compute[356901]: 2025-10-11 02:41:34.436 2 DEBUG oslo_concurrency.lockutils [req-b0df3f1b-6fcf-4ef4-9af2-6cf6f92284e0 req-41278947-7c30-4277-8ae4-a7ccd6b34278 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-2a3deab0-7a22-486d-86a2-2fc870c8ab2d" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:41:34 compute-0 nova_compute[356901]: 2025-10-11 02:41:34.436 2 DEBUG nova.network.neutron [req-b0df3f1b-6fcf-4ef4-9af2-6cf6f92284e0 req-41278947-7c30-4277-8ae4-a7ccd6b34278 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Refreshing network info cache for port e332b5d8-f31d-4e8a-99d2-7cc96428d93a _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:41:34 compute-0 nova_compute[356901]: 2025-10-11 02:41:34.690 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1919: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.3 MiB/s rd, 1.8 MiB/s wr, 86 op/s
Oct 11 02:41:34 compute-0 ceph-mon[191930]: pgmap v1919: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.3 MiB/s rd, 1.8 MiB/s wr, 86 op/s
Oct 11 02:41:35 compute-0 nova_compute[356901]: 2025-10-11 02:41:35.106 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:35 compute-0 nova_compute[356901]: 2025-10-11 02:41:35.669 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:41:36 compute-0 nova_compute[356901]: 2025-10-11 02:41:36.591 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1920: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 1.2 MiB/s wr, 89 op/s
Oct 11 02:41:36 compute-0 ceph-mon[191930]: pgmap v1920: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 1.2 MiB/s wr, 89 op/s
Oct 11 02:41:37 compute-0 nova_compute[356901]: 2025-10-11 02:41:37.013 2 DEBUG nova.network.neutron [req-b0df3f1b-6fcf-4ef4-9af2-6cf6f92284e0 req-41278947-7c30-4277-8ae4-a7ccd6b34278 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Updated VIF entry in instance network info cache for port e332b5d8-f31d-4e8a-99d2-7cc96428d93a. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:41:37 compute-0 nova_compute[356901]: 2025-10-11 02:41:37.014 2 DEBUG nova.network.neutron [req-b0df3f1b-6fcf-4ef4-9af2-6cf6f92284e0 req-41278947-7c30-4277-8ae4-a7ccd6b34278 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Updating instance_info_cache with network_info: [{"id": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "address": "fa:16:3e:c6:9a:0a", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.233", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape332b5d8-f3", "ovs_interfaceid": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:41:37 compute-0 nova_compute[356901]: 2025-10-11 02:41:37.041 2 DEBUG oslo_concurrency.lockutils [req-b0df3f1b-6fcf-4ef4-9af2-6cf6f92284e0 req-41278947-7c30-4277-8ae4-a7ccd6b34278 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-2a3deab0-7a22-486d-86a2-2fc870c8ab2d" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:41:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1921: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 21 KiB/s wr, 74 op/s
Oct 11 02:41:38 compute-0 ceph-mon[191930]: pgmap v1921: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 21 KiB/s wr, 74 op/s
Oct 11 02:41:39 compute-0 nova_compute[356901]: 2025-10-11 02:41:39.444 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:40 compute-0 nova_compute[356901]: 2025-10-11 02:41:40.108 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:40 compute-0 nova_compute[356901]: 2025-10-11 02:41:40.289 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:40 compute-0 nova_compute[356901]: 2025-10-11 02:41:40.671 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1922: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 22 KiB/s wr, 74 op/s
Oct 11 02:41:40 compute-0 ceph-mon[191930]: pgmap v1922: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 22 KiB/s wr, 74 op/s
Oct 11 02:41:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:41:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1923: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 9.4 KiB/s wr, 69 op/s
Oct 11 02:41:42 compute-0 ceph-mon[191930]: pgmap v1923: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 9.4 KiB/s wr, 69 op/s
Oct 11 02:41:43 compute-0 podman[458528]: 2025-10-11 02:41:43.234328717 +0000 UTC m=+0.114011920 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, tcib_managed=true, config_id=edpm, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.3, org.label-schema.build-date=20251009)
Oct 11 02:41:43 compute-0 podman[458529]: 2025-10-11 02:41:43.269884199 +0000 UTC m=+0.136900042 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, managed_by=edpm_ansible, url=https://catalog.redhat.com/en/search?searchType=containers, vendor=Red Hat, Inc., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, release=1755695350, architecture=x86_64, version=9.6, build-date=2025-08-20T13:12:41, distribution-scope=public, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., com.redhat.component=ubi9-minimal-container, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, container_name=openstack_network_exporter, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.expose-services=, vcs-type=git, io.buildah.version=1.33.7, config_id=edpm)
Oct 11 02:41:43 compute-0 podman[458530]: 2025-10-11 02:41:43.287683781 +0000 UTC m=+0.152073492 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:41:44 compute-0 nova_compute[356901]: 2025-10-11 02:41:44.286 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1924: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.3 MiB/s rd, 9.1 KiB/s wr, 43 op/s
Oct 11 02:41:44 compute-0 ceph-mon[191930]: pgmap v1924: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.3 MiB/s rd, 9.1 KiB/s wr, 43 op/s
Oct 11 02:41:45 compute-0 nova_compute[356901]: 2025-10-11 02:41:45.111 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:45 compute-0 nova_compute[356901]: 2025-10-11 02:41:45.676 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:41:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1925: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 735 KiB/s rd, 4.0 KiB/s wr, 23 op/s
Oct 11 02:41:46 compute-0 ceph-mon[191930]: pgmap v1925: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 735 KiB/s rd, 4.0 KiB/s wr, 23 op/s
Oct 11 02:41:47 compute-0 nova_compute[356901]: 2025-10-11 02:41:47.691 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.239 2 DEBUG oslo_concurrency.lockutils [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Acquiring lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.240 2 DEBUG oslo_concurrency.lockutils [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" acquired by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.241 2 DEBUG oslo_concurrency.lockutils [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Acquiring lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.243 2 DEBUG oslo_concurrency.lockutils [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" acquired by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.244 2 DEBUG oslo_concurrency.lockutils [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765-events" "released" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.246 2 INFO nova.compute.manager [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Terminating instance
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.248 2 DEBUG nova.compute.manager [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Start destroying the instance on the hypervisor. _shutdown_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:3120
Oct 11 02:41:48 compute-0 kernel: tapd7c4233c-f7 (unregistering): left promiscuous mode
Oct 11 02:41:48 compute-0 NetworkManager[44908]: <info>  [1760150508.3684] device (tapd7c4233c-f7): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.391 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:48 compute-0 ovn_controller[88370]: 2025-10-11T02:41:48Z|00151|binding|INFO|Releasing lport d7c4233c-f79b-4f32-b896-c36d4abb7d26 from this chassis (sb_readonly=0)
Oct 11 02:41:48 compute-0 ovn_controller[88370]: 2025-10-11T02:41:48Z|00152|binding|INFO|Setting lport d7c4233c-f79b-4f32-b896-c36d4abb7d26 down in Southbound
Oct 11 02:41:48 compute-0 ovn_controller[88370]: 2025-10-11T02:41:48Z|00153|binding|INFO|Removing iface tapd7c4233c-f7 ovn-installed in OVS
Oct 11 02:41:48 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:48.400 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:8d:b8:dd 10.100.0.4'], port_security=['fa:16:3e:8d:b8:dd 10.100.0.4'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.0.4/28', 'neutron:device_id': 'f5eb6746-7f42-4fa4-8e26-cda5cfa0c765', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-b4d521f7-7729-40fd-aa58-7126044eb166', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': 'dba4f6e51d33430ebf5566af53f6fbcc', 'neutron:revision_number': '6', 'neutron:security_group_ids': '82e011ad-d874-487b-b398-e13313bfa497', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:host_id': 'compute-0.ctlplane.example.com', 'neutron:port_fip': '192.168.122.187'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=283b08c2-109a-4649-a6db-2339ca56efb4, chassis=[], tunnel_key=3, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=d7c4233c-f79b-4f32-b896-c36d4abb7d26) old=Port_Binding(up=[True], chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:41:48 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:48.402 286362 INFO neutron.agent.ovn.metadata.agent [-] Port d7c4233c-f79b-4f32-b896-c36d4abb7d26 in datapath b4d521f7-7729-40fd-aa58-7126044eb166 unbound from our chassis
Oct 11 02:41:48 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:48.407 286362 DEBUG neutron.agent.ovn.metadata.agent [-] No valid VIF ports were found for network b4d521f7-7729-40fd-aa58-7126044eb166, tearing the namespace down if needed _get_provision_params /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:628
Oct 11 02:41:48 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:48.409 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[56793ec1-46c1-479a-97eb-8bab501c5f33]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.410 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:48 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:48.412 286362 INFO neutron.agent.ovn.metadata.agent [-] Cleaning up ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166 namespace which is not needed anymore
Oct 11 02:41:48 compute-0 systemd[1]: machine-qemu\x2d12\x2dinstance\x2d00000006.scope: Deactivated successfully.
Oct 11 02:41:48 compute-0 systemd[1]: machine-qemu\x2d12\x2dinstance\x2d00000006.scope: Consumed 44.585s CPU time.
Oct 11 02:41:48 compute-0 systemd-machined[137586]: Machine qemu-12-instance-00000006 terminated.
Oct 11 02:41:48 compute-0 ovn_controller[88370]: 2025-10-11T02:41:48Z|00154|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:41:48 compute-0 ovn_controller[88370]: 2025-10-11T02:41:48Z|00155|binding|INFO|Releasing lport ffb676d8-51f5-4de3-a31a-71adc7412138 from this chassis (sb_readonly=0)
Oct 11 02:41:48 compute-0 ovn_controller[88370]: 2025-10-11T02:41:48Z|00156|binding|INFO|Releasing lport aa37c6ed-d2db-4ed4-b1c9-cfd071cfd96a from this chassis (sb_readonly=0)
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.627 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.707 2 INFO nova.virt.libvirt.driver [-] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Instance destroyed successfully.
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.708 2 DEBUG nova.objects.instance [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lazy-loading 'resources' on Instance uuid f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:41:48 compute-0 neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166[456567]: [NOTICE]   (456571) : haproxy version is 2.8.14-c23fe91
Oct 11 02:41:48 compute-0 neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166[456567]: [NOTICE]   (456571) : path to executable is /usr/sbin/haproxy
Oct 11 02:41:48 compute-0 neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166[456567]: [WARNING]  (456571) : Exiting Master process...
Oct 11 02:41:48 compute-0 neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166[456567]: [ALERT]    (456571) : Current worker (456573) exited with code 143 (Terminated)
Oct 11 02:41:48 compute-0 neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166[456567]: [WARNING]  (456571) : All workers exited. Exiting... (0)
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.728 2 DEBUG nova.virt.libvirt.vif [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='True',created_at=2025-10-11T02:38:58Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=<?>,disable_terminate=False,display_description='tempest-ServerActionsTestJSON-server-482072585',display_name='tempest-ServerActionsTestJSON-server-482072585',ec2_ids=<?>,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-serveractionstestjson-server-482072585',id=6,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBEeyedFg6J90z3asuDBQl1/Bvzj806ldEmlyVo7UkMTJHBgXm6kW1TdMM5vQZaYHoLzJajtdp6cuAv6b+cT74TvAgDg4tZ7S8WdWrLaHLA9uudTCq+0DsKhebTJVvA2XxA==',key_name='tempest-keypair-177844218',keypairs=<?>,launch_index=0,launched_at=2025-10-11T02:39:27Z,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={},migration_context=<?>,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=<?>,power_state=1,progress=0,project_id='dba4f6e51d33430ebf5566af53f6fbcc',ramdisk_id='',reservation_id='r-xpsstq1e',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_cdrom_bus='sata',image_hw_disk_bus='virtio',image_hw_input_bus='usb',image_hw_machine_type='q35',image_hw_pointer_model='usbtablet',image_hw_rng_model='virtio',image_hw_video_model='virtio',image_hw_vif_model='virtio',image_min_disk='1',image_min_ram='0',owner_project_name='tempest-ServerActionsTestJSON-1563605323',owner_user_name='tempest-ServerActionsTestJSON-1563605323-project-member'},tags=<?>,task_state='deleting',terminated_at=None,trusted_certs=<?>,updated_at=2025-10-11T02:40:42Z,user_data='IyEvYmluL3NoCmVjaG8gIlByaW50aW5nIGNpcnJvcyB1c2VyIGF1dGhvcml6ZWQga2V5cyIKY2F0IH5jaXJyb3MvLnNzaC9hdXRob3JpemVkX2tleXMgfHwgdHJ1ZQo=',user_id='11c81e88a90342bba2c2816e4e3cb191',uuid=f5eb6746-7f42-4fa4-8e26-cda5cfa0c765,vcpu_model=<?>,vcpus=1,vm_mode=None,vm_state='active') vif={"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.187", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} unplug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:828
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.729 2 DEBUG nova.network.os_vif_util [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Converting VIF {"id": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "address": "fa:16:3e:8d:b8:dd", "network": {"id": "b4d521f7-7729-40fd-aa58-7126044eb166", "bridge": "br-int", "label": "tempest-ServerActionsTestJSON-976896854-network", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.187", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "dba4f6e51d33430ebf5566af53f6fbcc", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tapd7c4233c-f7", "ovs_interfaceid": "d7c4233c-f79b-4f32-b896-c36d4abb7d26", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.730 2 DEBUG nova.network.os_vif_util [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Converted object VIFOpenVSwitch(active=True,address=fa:16:3e:8d:b8:dd,bridge_name='br-int',has_traffic_filtering=True,id=d7c4233c-f79b-4f32-b896-c36d4abb7d26,network=Network(b4d521f7-7729-40fd-aa58-7126044eb166),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd7c4233c-f7') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.731 2 DEBUG os_vif [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Unplugging vif VIFOpenVSwitch(active=True,address=fa:16:3e:8d:b8:dd,bridge_name='br-int',has_traffic_filtering=True,id=d7c4233c-f79b-4f32-b896-c36d4abb7d26,network=Network(b4d521f7-7729-40fd-aa58-7126044eb166),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd7c4233c-f7') unplug /usr/lib/python3.9/site-packages/os_vif/__init__.py:109
Oct 11 02:41:48 compute-0 systemd[1]: libpod-eca91bf27f53a58ce460e280cfdd0da4c405777e4a1b81c336462d6a18a82c7a.scope: Deactivated successfully.
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.733 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:48 compute-0 conmon[456567]: conmon eca91bf27f53a58ce460 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-eca91bf27f53a58ce460e280cfdd0da4c405777e4a1b81c336462d6a18a82c7a.scope/container/memory.events
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.733 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapd7c4233c-f7, bridge=br-int, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.741 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:48 compute-0 podman[458612]: 2025-10-11 02:41:48.74363939 +0000 UTC m=+0.100895578 container died eca91bf27f53a58ce460e280cfdd0da4c405777e4a1b81c336462d6a18a82c7a (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.745 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.747 2 INFO os_vif [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Successfully unplugged vif VIFOpenVSwitch(active=True,address=fa:16:3e:8d:b8:dd,bridge_name='br-int',has_traffic_filtering=True,id=d7c4233c-f79b-4f32-b896-c36d4abb7d26,network=Network(b4d521f7-7729-40fd-aa58-7126044eb166),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tapd7c4233c-f7')
Oct 11 02:41:48 compute-0 systemd[1]: var-lib-containers-storage-overlay-c0d34672f581e91d347ecc7586ed939fa1ae746dcc8cb29c4b4994d69a8c07a3-merged.mount: Deactivated successfully.
Oct 11 02:41:48 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-eca91bf27f53a58ce460e280cfdd0da4c405777e4a1b81c336462d6a18a82c7a-userdata-shm.mount: Deactivated successfully.
Oct 11 02:41:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1926: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 2.3 KiB/s rd, 1023 B/s wr, 0 op/s
Oct 11 02:41:48 compute-0 podman[458612]: 2025-10-11 02:41:48.818958691 +0000 UTC m=+0.176214889 container cleanup eca91bf27f53a58ce460e280cfdd0da4c405777e4a1b81c336462d6a18a82c7a (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.license=GPLv2, io.buildah.version=1.41.3)
Oct 11 02:41:48 compute-0 systemd[1]: libpod-conmon-eca91bf27f53a58ce460e280cfdd0da4c405777e4a1b81c336462d6a18a82c7a.scope: Deactivated successfully.
Oct 11 02:41:48 compute-0 ceph-mon[191930]: pgmap v1926: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 2.3 KiB/s rd, 1023 B/s wr, 0 op/s
Oct 11 02:41:48 compute-0 podman[458670]: 2025-10-11 02:41:48.947448241 +0000 UTC m=+0.084286020 container remove eca91bf27f53a58ce460e280cfdd0da4c405777e4a1b81c336462d6a18a82c7a (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:41:48 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:48.969 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[1635225f-beca-4700-97c1-eeed9eb6068e]: (4, ('Sat Oct 11 02:41:48 AM UTC 2025 Stopping container neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166 (eca91bf27f53a58ce460e280cfdd0da4c405777e4a1b81c336462d6a18a82c7a)\neca91bf27f53a58ce460e280cfdd0da4c405777e4a1b81c336462d6a18a82c7a\nSat Oct 11 02:41:48 AM UTC 2025 Deleting container neutron-haproxy-ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166 (eca91bf27f53a58ce460e280cfdd0da4c405777e4a1b81c336462d6a18a82c7a)\neca91bf27f53a58ce460e280cfdd0da4c405777e4a1b81c336462d6a18a82c7a\n', '', 0)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:48 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:48.973 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[e05d9b70-ed2d-4602-964a-4c145d12a2e5]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:48 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:48.974 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapb4d521f7-70, bridge=None, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:41:48 compute-0 nova_compute[356901]: 2025-10-11 02:41:48.977 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:48 compute-0 kernel: tapb4d521f7-70: left promiscuous mode
Oct 11 02:41:48 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:48.993 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[cccdfb69-f76c-4392-9bfc-031d54f63bb4]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:49 compute-0 nova_compute[356901]: 2025-10-11 02:41:49.009 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:49.013 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[babb0f24-211c-4c4c-9eb6-fa489f475f64]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:49.019 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[54141fa3-1866-42ec-b30d-9aaa9e569c52]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:49.054 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[302b1a2e-721f-4d2c-8dce-b2ee8a948604]: (4, [{'family': 0, '__align': (), 'ifi_type': 772, 'index': 1, 'flags': 65609, 'change': 0, 'attrs': [['IFLA_IFNAME', 'lo'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UNKNOWN'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 65536], ['IFLA_MIN_MTU', 0], ['IFLA_MAX_MTU', 0], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 1], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 1], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 0], ['IFLA_CARRIER_UP_COUNT', 0], ['IFLA_CARRIER_DOWN_COUNT', 0], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', '00:00:00:00:00:00'], ['IFLA_BROADCAST', '00:00:00:00:00:00'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 1, 'nopolicy': 1, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 678926, 'reachable_time': 29158, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 65536, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 4294967295, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 4294967295, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 0, 'inoctets': 0, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 0, 'outoctets': 0, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 0, 'outmcastpkts': 0, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 0, 'outmcastoctets': 0, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 0, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 0, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1404, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 458684, 'error': None, 'target': 'ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:49 compute-0 systemd[1]: run-netns-ovnmeta\x2db4d521f7\x2d7729\x2d40fd\x2daa58\x2d7126044eb166.mount: Deactivated successfully.
Oct 11 02:41:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:49.058 286647 DEBUG neutron.privileged.agent.linux.ip_lib [-] Namespace ovnmeta-b4d521f7-7729-40fd-aa58-7126044eb166 deleted. remove_netns /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:607
Oct 11 02:41:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:49.059 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[09dbb4ef-ee4c-467f-85d7-55b90348a466]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:41:49 compute-0 nova_compute[356901]: 2025-10-11 02:41:49.388 2 INFO nova.virt.libvirt.driver [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Deleting instance files /var/lib/nova/instances/f5eb6746-7f42-4fa4-8e26-cda5cfa0c765_del
Oct 11 02:41:49 compute-0 nova_compute[356901]: 2025-10-11 02:41:49.389 2 INFO nova.virt.libvirt.driver [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Deletion of /var/lib/nova/instances/f5eb6746-7f42-4fa4-8e26-cda5cfa0c765_del complete
Oct 11 02:41:49 compute-0 nova_compute[356901]: 2025-10-11 02:41:49.434 2 INFO nova.compute.manager [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Took 1.18 seconds to destroy the instance on the hypervisor.
Oct 11 02:41:49 compute-0 nova_compute[356901]: 2025-10-11 02:41:49.435 2 DEBUG oslo.service.loopingcall [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Waiting for function nova.compute.manager.ComputeManager._try_deallocate_network.<locals>._deallocate_network_with_retries to return. func /usr/lib/python3.9/site-packages/oslo_service/loopingcall.py:435
Oct 11 02:41:49 compute-0 nova_compute[356901]: 2025-10-11 02:41:49.436 2 DEBUG nova.compute.manager [-] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Deallocating network for instance _deallocate_network /usr/lib/python3.9/site-packages/nova/compute/manager.py:2259
Oct 11 02:41:49 compute-0 nova_compute[356901]: 2025-10-11 02:41:49.436 2 DEBUG nova.network.neutron [-] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] deallocate_for_instance() deallocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1803
Oct 11 02:41:50 compute-0 nova_compute[356901]: 2025-10-11 02:41:50.115 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:50 compute-0 podman[458687]: 2025-10-11 02:41:50.237031441 +0000 UTC m=+0.123925555 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, summary=Provides the latest release of Red Hat Universal Base Image 9., architecture=x86_64, managed_by=edpm_ansible, release-0.7.12=, io.k8s.display-name=Red Hat Universal Base Image 9, name=ubi9, com.redhat.component=ubi9-container, release=1214.1726694543, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2024-09-18T21:23:30, distribution-scope=public, container_name=kepler, io.buildah.version=1.29.0, vcs-type=git, version=9.4, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.expose-services=, maintainer=Red Hat, Inc., vendor=Red Hat, Inc.)
Oct 11 02:41:50 compute-0 nova_compute[356901]: 2025-10-11 02:41:50.801 2 DEBUG nova.network.neutron [-] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Updating instance_info_cache with network_info: [] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:41:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1927: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 4.7 KiB/s rd, 1.1 KiB/s wr, 3 op/s
Oct 11 02:41:50 compute-0 nova_compute[356901]: 2025-10-11 02:41:50.857 2 INFO nova.compute.manager [-] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Took 1.42 seconds to deallocate network for instance.
Oct 11 02:41:50 compute-0 nova_compute[356901]: 2025-10-11 02:41:50.899 2 DEBUG oslo_concurrency.lockutils [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.update_usage" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:41:50 compute-0 nova_compute[356901]: 2025-10-11 02:41:50.901 2 DEBUG oslo_concurrency.lockutils [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:41:50 compute-0 ceph-mon[191930]: pgmap v1927: 321 pgs: 321 active+clean; 246 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 4.7 KiB/s rd, 1.1 KiB/s wr, 3 op/s
Oct 11 02:41:51 compute-0 nova_compute[356901]: 2025-10-11 02:41:51.009 2 DEBUG nova.compute.manager [req-3bae77bb-f036-4615-9898-fa52475a578b req-2c0c798c-0e51-4e7b-b88a-17e19988428a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Received event network-vif-deleted-d7c4233c-f79b-4f32-b896-c36d4abb7d26 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:41:51 compute-0 nova_compute[356901]: 2025-10-11 02:41:51.010 2 DEBUG oslo_concurrency.processutils [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:41:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:41:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:41:51 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3806320652' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:41:51 compute-0 nova_compute[356901]: 2025-10-11 02:41:51.508 2 DEBUG oslo_concurrency.processutils [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.498s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:41:51 compute-0 nova_compute[356901]: 2025-10-11 02:41:51.519 2 DEBUG nova.compute.provider_tree [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:41:51 compute-0 nova_compute[356901]: 2025-10-11 02:41:51.540 2 DEBUG nova.scheduler.client.report [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:41:51 compute-0 nova_compute[356901]: 2025-10-11 02:41:51.566 2 DEBUG oslo_concurrency.lockutils [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: held 0.665s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:41:51 compute-0 nova_compute[356901]: 2025-10-11 02:41:51.598 2 INFO nova.scheduler.client.report [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Deleted allocations for instance f5eb6746-7f42-4fa4-8e26-cda5cfa0c765
Oct 11 02:41:51 compute-0 nova_compute[356901]: 2025-10-11 02:41:51.695 2 DEBUG oslo_concurrency.lockutils [None req-895636d4-98db-40d3-b138-668d390d38fb 11c81e88a90342bba2c2816e4e3cb191 dba4f6e51d33430ebf5566af53f6fbcc - - default default] Lock "f5eb6746-7f42-4fa4-8e26-cda5cfa0c765" "released" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: held 3.455s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:41:51 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3806320652' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:41:52 compute-0 ovn_controller[88370]: 2025-10-11T02:41:52Z|00157|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:41:52 compute-0 ovn_controller[88370]: 2025-10-11T02:41:52Z|00158|binding|INFO|Releasing lport ffb676d8-51f5-4de3-a31a-71adc7412138 from this chassis (sb_readonly=0)
Oct 11 02:41:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1928: 321 pgs: 321 active+clean; 221 MiB data, 352 MiB used, 60 GiB / 60 GiB avail; 7.4 KiB/s rd, 938 B/s wr, 9 op/s
Oct 11 02:41:52 compute-0 nova_compute[356901]: 2025-10-11 02:41:52.824 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:52 compute-0 ceph-mon[191930]: pgmap v1928: 321 pgs: 321 active+clean; 221 MiB data, 352 MiB used, 60 GiB / 60 GiB avail; 7.4 KiB/s rd, 938 B/s wr, 9 op/s
Oct 11 02:41:53 compute-0 nova_compute[356901]: 2025-10-11 02:41:53.738 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1929: 321 pgs: 321 active+clean; 165 MiB data, 326 MiB used, 60 GiB / 60 GiB avail; 21 KiB/s rd, 1.2 KiB/s wr, 28 op/s
Oct 11 02:41:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:54.871 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:41:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:54.872 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:41:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:41:54.875 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.003s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:41:54 compute-0 ceph-mon[191930]: pgmap v1929: 321 pgs: 321 active+clean; 165 MiB data, 326 MiB used, 60 GiB / 60 GiB avail; 21 KiB/s rd, 1.2 KiB/s wr, 28 op/s
Oct 11 02:41:55 compute-0 nova_compute[356901]: 2025-10-11 02:41:55.119 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:41:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:41:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:41:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:41:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:41:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:41:56
Oct 11 02:41:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:41:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:41:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['volumes', 'default.rgw.log', 'cephfs.cephfs.meta', 'backups', '.rgw.root', 'default.rgw.meta', '.mgr', 'default.rgw.control', 'images', 'cephfs.cephfs.data', 'vms']
Oct 11 02:41:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:41:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:41:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:41:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1930: 321 pgs: 321 active+clean; 165 MiB data, 326 MiB used, 60 GiB / 60 GiB avail; 21 KiB/s rd, 1.2 KiB/s wr, 28 op/s
Oct 11 02:41:56 compute-0 ceph-mon[191930]: pgmap v1930: 321 pgs: 321 active+clean; 165 MiB data, 326 MiB used, 60 GiB / 60 GiB avail; 21 KiB/s rd, 1.2 KiB/s wr, 28 op/s
Oct 11 02:41:57 compute-0 podman[458728]: 2025-10-11 02:41:57.232706793 +0000 UTC m=+0.119576497 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:41:57 compute-0 podman[458731]: 2025-10-11 02:41:57.243555641 +0000 UTC m=+0.103732848 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent)
Oct 11 02:41:57 compute-0 podman[458730]: 2025-10-11 02:41:57.266741612 +0000 UTC m=+0.138559341 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_compute, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, config_id=edpm, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true)
Oct 11 02:41:57 compute-0 podman[458729]: 2025-10-11 02:41:57.296378328 +0000 UTC m=+0.170304511 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, org.label-schema.schema-version=1.0, container_name=ovn_controller, io.buildah.version=1.41.3)
Oct 11 02:41:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:41:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:41:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:41:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:41:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:41:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:41:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:41:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:41:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:41:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:41:57 compute-0 ceph-mgr[192233]: client.0 ms_handle_reset on v2:192.168.122.100:6800/1088804496
Oct 11 02:41:57 compute-0 ovn_controller[88370]: 2025-10-11T02:41:57Z|00159|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:41:57 compute-0 ovn_controller[88370]: 2025-10-11T02:41:57Z|00160|binding|INFO|Releasing lport ffb676d8-51f5-4de3-a31a-71adc7412138 from this chassis (sb_readonly=0)
Oct 11 02:41:57 compute-0 nova_compute[356901]: 2025-10-11 02:41:57.688 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:57 compute-0 nova_compute[356901]: 2025-10-11 02:41:57.933 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:41:58 compute-0 nova_compute[356901]: 2025-10-11 02:41:58.743 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:41:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1931: 321 pgs: 321 active+clean; 165 MiB data, 326 MiB used, 60 GiB / 60 GiB avail; 21 KiB/s rd, 1.2 KiB/s wr, 28 op/s
Oct 11 02:41:58 compute-0 ceph-mon[191930]: pgmap v1931: 321 pgs: 321 active+clean; 165 MiB data, 326 MiB used, 60 GiB / 60 GiB avail; 21 KiB/s rd, 1.2 KiB/s wr, 28 op/s
Oct 11 02:41:59 compute-0 podman[157119]: time="2025-10-11T02:41:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:41:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:41:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:41:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:41:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9528 "" "Go-http-client/1.1"
Oct 11 02:42:00 compute-0 nova_compute[356901]: 2025-10-11 02:42:00.122 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1932: 321 pgs: 321 active+clean; 165 MiB data, 326 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 1.2 KiB/s wr, 27 op/s
Oct 11 02:42:00 compute-0 ceph-mon[191930]: pgmap v1932: 321 pgs: 321 active+clean; 165 MiB data, 326 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 1.2 KiB/s wr, 27 op/s
Oct 11 02:42:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:42:01 compute-0 openstack_network_exporter[374316]: ERROR   02:42:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:42:01 compute-0 openstack_network_exporter[374316]: ERROR   02:42:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:42:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:42:01 compute-0 openstack_network_exporter[374316]: ERROR   02:42:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:42:01 compute-0 openstack_network_exporter[374316]: ERROR   02:42:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:42:01 compute-0 openstack_network_exporter[374316]: ERROR   02:42:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:42:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:42:02 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:02.188 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: SbGlobalUpdateEvent(events=('update',), table='SB_Global', conditions=None, old_conditions=None), priority=20 to row=SB_Global(external_ids={}, nb_cfg=14, options={'arp_ns_explicit_output': 'true', 'mac_prefix': 'fe:55:97', 'max_tunid': '16711680', 'northd_internal_version': '24.03.7-20.33.0-76.8', 'svc_monitor_mac': 'ce:9c:4f:b4:85:9b'}, ipsec=False) old=SB_Global(nb_cfg=13) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:42:02 compute-0 nova_compute[356901]: 2025-10-11 02:42:02.189 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:02 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:02.191 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Delaying updating chassis table for 8 seconds run /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:274
Oct 11 02:42:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1933: 321 pgs: 321 active+clean; 165 MiB data, 326 MiB used, 60 GiB / 60 GiB avail; 29 KiB/s rd, 137 KiB/s wr, 29 op/s
Oct 11 02:42:02 compute-0 nova_compute[356901]: 2025-10-11 02:42:02.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:42:02 compute-0 ceph-mon[191930]: pgmap v1933: 321 pgs: 321 active+clean; 165 MiB data, 326 MiB used, 60 GiB / 60 GiB avail; 29 KiB/s rd, 137 KiB/s wr, 29 op/s
Oct 11 02:42:03 compute-0 nova_compute[356901]: 2025-10-11 02:42:03.703 2 DEBUG nova.virt.driver [-] Emitting event <LifecycleEvent: 1760150508.7020335, f5eb6746-7f42-4fa4-8e26-cda5cfa0c765 => Stopped> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:42:03 compute-0 nova_compute[356901]: 2025-10-11 02:42:03.704 2 INFO nova.compute.manager [-] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] VM Stopped (Lifecycle Event)
Oct 11 02:42:03 compute-0 nova_compute[356901]: 2025-10-11 02:42:03.732 2 DEBUG nova.compute.manager [None req-1bed408e-6cf2-4a0e-8009-fdb284e21483 - - - - - -] [instance: f5eb6746-7f42-4fa4-8e26-cda5cfa0c765] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:42:03 compute-0 nova_compute[356901]: 2025-10-11 02:42:03.746 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:04 compute-0 ovn_controller[88370]: 2025-10-11T02:42:04Z|00019|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:c6:9a:0a 10.100.0.4
Oct 11 02:42:04 compute-0 ovn_controller[88370]: 2025-10-11T02:42:04Z|00020|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:c6:9a:0a 10.100.0.4
Oct 11 02:42:04 compute-0 podman[458810]: 2025-10-11 02:42:04.236893373 +0000 UTC m=+0.110782270 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=iscsid)
Oct 11 02:42:04 compute-0 podman[458809]: 2025-10-11 02:42:04.242856624 +0000 UTC m=+0.136883030 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, config_id=multipathd, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, container_name=multipathd)
Oct 11 02:42:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1934: 321 pgs: 321 active+clean; 189 MiB data, 344 MiB used, 60 GiB / 60 GiB avail; 190 KiB/s rd, 1.7 MiB/s wr, 62 op/s
Oct 11 02:42:04 compute-0 ceph-mon[191930]: pgmap v1934: 321 pgs: 321 active+clean; 189 MiB data, 344 MiB used, 60 GiB / 60 GiB avail; 190 KiB/s rd, 1.7 MiB/s wr, 62 op/s
Oct 11 02:42:05 compute-0 nova_compute[356901]: 2025-10-11 02:42:05.127 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:05 compute-0 nova_compute[356901]: 2025-10-11 02:42:05.594 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:42:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1935: 321 pgs: 321 active+clean; 196 MiB data, 350 MiB used, 60 GiB / 60 GiB avail; 253 KiB/s rd, 2.1 MiB/s wr, 57 op/s
Oct 11 02:42:06 compute-0 ceph-mon[191930]: pgmap v1935: 321 pgs: 321 active+clean; 196 MiB data, 350 MiB used, 60 GiB / 60 GiB avail; 253 KiB/s rd, 2.1 MiB/s wr, 57 op/s
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0013077429063293611 of space, bias 1.0, pg target 0.3923228718988083 quantized to 32 (current 32)
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0009191400908380543 of space, bias 1.0, pg target 0.2757420272514163 quantized to 32 (current 32)
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:42:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:42:07 compute-0 nova_compute[356901]: 2025-10-11 02:42:07.744 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:07 compute-0 nova_compute[356901]: 2025-10-11 02:42:07.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:42:08 compute-0 nova_compute[356901]: 2025-10-11 02:42:08.750 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1936: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 268 KiB/s rd, 2.1 MiB/s wr, 59 op/s
Oct 11 02:42:08 compute-0 ceph-mon[191930]: pgmap v1936: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 268 KiB/s rd, 2.1 MiB/s wr, 59 op/s
Oct 11 02:42:09 compute-0 nova_compute[356901]: 2025-10-11 02:42:09.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:42:09 compute-0 nova_compute[356901]: 2025-10-11 02:42:09.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:42:09 compute-0 nova_compute[356901]: 2025-10-11 02:42:09.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:42:10 compute-0 nova_compute[356901]: 2025-10-11 02:42:10.130 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:10 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:10.194 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '14'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:42:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1937: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 268 KiB/s rd, 2.1 MiB/s wr, 59 op/s
Oct 11 02:42:10 compute-0 nova_compute[356901]: 2025-10-11 02:42:10.891 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:42:10 compute-0 ceph-mon[191930]: pgmap v1937: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 268 KiB/s rd, 2.1 MiB/s wr, 59 op/s
Oct 11 02:42:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:42:11 compute-0 nova_compute[356901]: 2025-10-11 02:42:11.453 2 INFO nova.compute.manager [None req-ee6a5e06-3c6f-4aa9-8566-72304422622a bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Get console output
Oct 11 02:42:11 compute-0 nova_compute[356901]: 2025-10-11 02:42:11.467 2 INFO oslo.privsep.daemon [None req-ee6a5e06-3c6f-4aa9-8566-72304422622a bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Running privsep helper: ['sudo', 'nova-rootwrap', '/etc/nova/rootwrap.conf', 'privsep-helper', '--config-file', '/etc/nova/nova.conf', '--config-file', '/etc/nova/nova-compute.conf', '--config-dir', '/etc/nova/nova.conf.d', '--privsep_context', 'nova.privsep.sys_admin_pctxt', '--privsep_sock_path', '/tmp/tmp3d7a6d0x/privsep.sock']
Oct 11 02:42:11 compute-0 nova_compute[356901]: 2025-10-11 02:42:11.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:42:11 compute-0 nova_compute[356901]: 2025-10-11 02:42:11.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:42:11 compute-0 nova_compute[356901]: 2025-10-11 02:42:11.968 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944
Oct 11 02:42:12 compute-0 nova_compute[356901]: 2025-10-11 02:42:12.396 2 INFO oslo.privsep.daemon [None req-ee6a5e06-3c6f-4aa9-8566-72304422622a bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Spawned new privsep daemon via rootwrap
Oct 11 02:42:12 compute-0 nova_compute[356901]: 2025-10-11 02:42:12.220 6605 INFO oslo.privsep.daemon [-] privsep daemon starting
Oct 11 02:42:12 compute-0 nova_compute[356901]: 2025-10-11 02:42:12.227 6605 INFO oslo.privsep.daemon [-] privsep process running with uid/gid: 0/0
Oct 11 02:42:12 compute-0 nova_compute[356901]: 2025-10-11 02:42:12.232 6605 INFO oslo.privsep.daemon [-] privsep process running with capabilities (eff/prm/inh): CAP_CHOWN|CAP_DAC_OVERRIDE|CAP_DAC_READ_SEARCH|CAP_FOWNER|CAP_NET_ADMIN|CAP_SYS_ADMIN/CAP_CHOWN|CAP_DAC_OVERRIDE|CAP_DAC_READ_SEARCH|CAP_FOWNER|CAP_NET_ADMIN|CAP_SYS_ADMIN/none
Oct 11 02:42:12 compute-0 nova_compute[356901]: 2025-10-11 02:42:12.232 6605 INFO oslo.privsep.daemon [-] privsep daemon running as pid 6605
Oct 11 02:42:12 compute-0 nova_compute[356901]: 2025-10-11 02:42:12.508 6605 INFO nova.privsep.libvirt [-] Ignored error while reading from instance console pty: can't concat NoneType to bytes
Oct 11 02:42:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1938: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 268 KiB/s rd, 2.1 MiB/s wr, 59 op/s
Oct 11 02:42:12 compute-0 ceph-mon[191930]: pgmap v1938: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 268 KiB/s rd, 2.1 MiB/s wr, 59 op/s
Oct 11 02:42:13 compute-0 ovn_controller[88370]: 2025-10-11T02:42:13Z|00161|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:42:13 compute-0 ovn_controller[88370]: 2025-10-11T02:42:13Z|00162|binding|INFO|Releasing lport ffb676d8-51f5-4de3-a31a-71adc7412138 from this chassis (sb_readonly=0)
Oct 11 02:42:13 compute-0 nova_compute[356901]: 2025-10-11 02:42:13.313 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:13 compute-0 nova_compute[356901]: 2025-10-11 02:42:13.754 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:13 compute-0 nova_compute[356901]: 2025-10-11 02:42:13.965 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:42:14 compute-0 podman[458856]: 2025-10-11 02:42:14.244746139 +0000 UTC m=+0.113418220 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:42:14 compute-0 podman[458854]: 2025-10-11 02:42:14.25434931 +0000 UTC m=+0.134499518 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, container_name=ceilometer_agent_ipmi, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.3, managed_by=edpm_ansible)
Oct 11 02:42:14 compute-0 podman[458855]: 2025-10-11 02:42:14.27090265 +0000 UTC m=+0.146238233 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, distribution-scope=public, managed_by=edpm_ansible, architecture=x86_64, io.openshift.tags=minimal rhel9, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, maintainer=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, build-date=2025-08-20T13:12:41, release=1755695350, config_id=edpm, container_name=openstack_network_exporter, io.openshift.expose-services=, com.redhat.component=ubi9-minimal-container, vcs-type=git, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, url=https://catalog.redhat.com/en/search?searchType=containers, vendor=Red Hat, Inc., io.buildah.version=1.33.7, version=9.6, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Oct 11 02:42:14 compute-0 nova_compute[356901]: 2025-10-11 02:42:14.703 2 DEBUG nova.compute.manager [req-125cce30-fccd-4c0c-b9e7-b51d34c6b11d req-316afa76-d40a-4e4e-ae64-3b3ed347ecec 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Received event network-changed-e332b5d8-f31d-4e8a-99d2-7cc96428d93a external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:42:14 compute-0 nova_compute[356901]: 2025-10-11 02:42:14.704 2 DEBUG nova.compute.manager [req-125cce30-fccd-4c0c-b9e7-b51d34c6b11d req-316afa76-d40a-4e4e-ae64-3b3ed347ecec 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Refreshing instance network info cache due to event network-changed-e332b5d8-f31d-4e8a-99d2-7cc96428d93a. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:42:14 compute-0 nova_compute[356901]: 2025-10-11 02:42:14.706 2 DEBUG oslo_concurrency.lockutils [req-125cce30-fccd-4c0c-b9e7-b51d34c6b11d req-316afa76-d40a-4e4e-ae64-3b3ed347ecec 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-2a3deab0-7a22-486d-86a2-2fc870c8ab2d" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:42:14 compute-0 nova_compute[356901]: 2025-10-11 02:42:14.706 2 DEBUG oslo_concurrency.lockutils [req-125cce30-fccd-4c0c-b9e7-b51d34c6b11d req-316afa76-d40a-4e4e-ae64-3b3ed347ecec 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-2a3deab0-7a22-486d-86a2-2fc870c8ab2d" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:42:14 compute-0 nova_compute[356901]: 2025-10-11 02:42:14.707 2 DEBUG nova.network.neutron [req-125cce30-fccd-4c0c-b9e7-b51d34c6b11d req-316afa76-d40a-4e4e-ae64-3b3ed347ecec 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Refreshing network info cache for port e332b5d8-f31d-4e8a-99d2-7cc96428d93a _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:42:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1939: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 256 KiB/s rd, 2.0 MiB/s wr, 55 op/s
Oct 11 02:42:14 compute-0 nova_compute[356901]: 2025-10-11 02:42:14.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:42:14 compute-0 nova_compute[356901]: 2025-10-11 02:42:14.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:42:14 compute-0 ceph-mon[191930]: pgmap v1939: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 256 KiB/s rd, 2.0 MiB/s wr, 55 op/s
Oct 11 02:42:14 compute-0 nova_compute[356901]: 2025-10-11 02:42:14.933 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:42:14 compute-0 nova_compute[356901]: 2025-10-11 02:42:14.934 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:42:14 compute-0 nova_compute[356901]: 2025-10-11 02:42:14.934 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:42:14 compute-0 nova_compute[356901]: 2025-10-11 02:42:14.935 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:42:14 compute-0 nova_compute[356901]: 2025-10-11 02:42:14.936 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:42:15 compute-0 nova_compute[356901]: 2025-10-11 02:42:15.134 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:42:15 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3415465380' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:42:15 compute-0 nova_compute[356901]: 2025-10-11 02:42:15.493 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.558s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:42:15 compute-0 nova_compute[356901]: 2025-10-11 02:42:15.592 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000c as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:42:15 compute-0 nova_compute[356901]: 2025-10-11 02:42:15.592 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000c as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:42:15 compute-0 nova_compute[356901]: 2025-10-11 02:42:15.597 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:42:15 compute-0 nova_compute[356901]: 2025-10-11 02:42:15.597 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:42:15 compute-0 nova_compute[356901]: 2025-10-11 02:42:15.597 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:42:15 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3415465380' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:42:16 compute-0 nova_compute[356901]: 2025-10-11 02:42:16.132 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:42:16 compute-0 nova_compute[356901]: 2025-10-11 02:42:16.133 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3520MB free_disk=59.90969467163086GB free_vcpus=6 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:42:16 compute-0 nova_compute[356901]: 2025-10-11 02:42:16.134 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:42:16 compute-0 nova_compute[356901]: 2025-10-11 02:42:16.134 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:42:16 compute-0 nova_compute[356901]: 2025-10-11 02:42:16.207 2 DEBUG nova.network.neutron [req-125cce30-fccd-4c0c-b9e7-b51d34c6b11d req-316afa76-d40a-4e4e-ae64-3b3ed347ecec 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Updated VIF entry in instance network info cache for port e332b5d8-f31d-4e8a-99d2-7cc96428d93a. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:42:16 compute-0 nova_compute[356901]: 2025-10-11 02:42:16.208 2 DEBUG nova.network.neutron [req-125cce30-fccd-4c0c-b9e7-b51d34c6b11d req-316afa76-d40a-4e4e-ae64-3b3ed347ecec 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Updating instance_info_cache with network_info: [{"id": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "address": "fa:16:3e:c6:9a:0a", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape332b5d8-f3", "ovs_interfaceid": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:42:16 compute-0 nova_compute[356901]: 2025-10-11 02:42:16.213 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:42:16 compute-0 nova_compute[356901]: 2025-10-11 02:42:16.214 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 2a3deab0-7a22-486d-86a2-2fc870c8ab2d actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:42:16 compute-0 nova_compute[356901]: 2025-10-11 02:42:16.214 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 2 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:42:16 compute-0 nova_compute[356901]: 2025-10-11 02:42:16.214 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1152MB phys_disk=59GB used_disk=3GB total_vcpus=8 used_vcpus=2 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:42:16 compute-0 nova_compute[356901]: 2025-10-11 02:42:16.228 2 DEBUG oslo_concurrency.lockutils [req-125cce30-fccd-4c0c-b9e7-b51d34c6b11d req-316afa76-d40a-4e4e-ae64-3b3ed347ecec 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-2a3deab0-7a22-486d-86a2-2fc870c8ab2d" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:42:16 compute-0 nova_compute[356901]: 2025-10-11 02:42:16.264 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:42:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:42:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:42:16 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1891148637' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:42:16 compute-0 nova_compute[356901]: 2025-10-11 02:42:16.792 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.528s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:42:16 compute-0 nova_compute[356901]: 2025-10-11 02:42:16.806 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:42:16 compute-0 nova_compute[356901]: 2025-10-11 02:42:16.826 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:42:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1940: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 92 KiB/s rd, 485 KiB/s wr, 17 op/s
Oct 11 02:42:16 compute-0 nova_compute[356901]: 2025-10-11 02:42:16.863 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:42:16 compute-0 nova_compute[356901]: 2025-10-11 02:42:16.864 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.730s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:42:16 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1891148637' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:42:16 compute-0 ceph-mon[191930]: pgmap v1940: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 92 KiB/s rd, 485 KiB/s wr, 17 op/s
Oct 11 02:42:18 compute-0 nova_compute[356901]: 2025-10-11 02:42:18.761 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1941: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 21 KiB/s rd, 19 KiB/s wr, 2 op/s
Oct 11 02:42:18 compute-0 ceph-mon[191930]: pgmap v1941: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 21 KiB/s rd, 19 KiB/s wr, 2 op/s
Oct 11 02:42:20 compute-0 nova_compute[356901]: 2025-10-11 02:42:20.139 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1942: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 5.0 KiB/s rd, 16 KiB/s wr, 0 op/s
Oct 11 02:42:20 compute-0 ceph-mon[191930]: pgmap v1942: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 5.0 KiB/s rd, 16 KiB/s wr, 0 op/s
Oct 11 02:42:21 compute-0 podman[458958]: 2025-10-11 02:42:21.259092954 +0000 UTC m=+0.136798598 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, distribution-scope=public, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, managed_by=edpm_ansible, build-date=2024-09-18T21:23:30, vcs-type=git, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.openshift.tags=base rhel9, release=1214.1726694543, release-0.7.12=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.openshift.expose-services=, summary=Provides the latest release of Red Hat Universal Base Image 9., vendor=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.buildah.version=1.29.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., version=9.4, container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, architecture=x86_64, com.redhat.component=ubi9-container)
Oct 11 02:42:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:42:21 compute-0 nova_compute[356901]: 2025-10-11 02:42:21.746 2 DEBUG oslo_concurrency.lockutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquiring lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:42:21 compute-0 nova_compute[356901]: 2025-10-11 02:42:21.747 2 DEBUG oslo_concurrency.lockutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98" acquired by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:42:21 compute-0 nova_compute[356901]: 2025-10-11 02:42:21.772 2 DEBUG nova.compute.manager [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Starting instance... _do_build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2402
Oct 11 02:42:21 compute-0 nova_compute[356901]: 2025-10-11 02:42:21.863 2 DEBUG oslo_concurrency.lockutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:42:21 compute-0 nova_compute[356901]: 2025-10-11 02:42:21.864 2 DEBUG oslo_concurrency.lockutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:42:21 compute-0 nova_compute[356901]: 2025-10-11 02:42:21.872 2 DEBUG nova.virt.hardware [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Require both a host and instance NUMA topology to fit instance on host. numa_fit_instance_to_host /usr/lib/python3.9/site-packages/nova/virt/hardware.py:2368
Oct 11 02:42:21 compute-0 nova_compute[356901]: 2025-10-11 02:42:21.872 2 INFO nova.compute.claims [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Claim successful on node compute-0.ctlplane.example.com
Oct 11 02:42:22 compute-0 nova_compute[356901]: 2025-10-11 02:42:22.022 2 DEBUG oslo_concurrency.processutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:42:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:42:22 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2104258141' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:42:22 compute-0 nova_compute[356901]: 2025-10-11 02:42:22.555 2 DEBUG oslo_concurrency.processutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.533s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:42:22 compute-0 nova_compute[356901]: 2025-10-11 02:42:22.567 2 DEBUG nova.compute.provider_tree [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:42:22 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2104258141' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:42:22 compute-0 nova_compute[356901]: 2025-10-11 02:42:22.585 2 DEBUG nova.scheduler.client.report [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:42:22 compute-0 nova_compute[356901]: 2025-10-11 02:42:22.608 2 DEBUG oslo_concurrency.lockutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: held 0.744s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:42:22 compute-0 nova_compute[356901]: 2025-10-11 02:42:22.609 2 DEBUG nova.compute.manager [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Start building networks asynchronously for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2799
Oct 11 02:42:22 compute-0 nova_compute[356901]: 2025-10-11 02:42:22.670 2 DEBUG nova.compute.manager [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Allocating IP information in the background. _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1952
Oct 11 02:42:22 compute-0 nova_compute[356901]: 2025-10-11 02:42:22.671 2 DEBUG nova.network.neutron [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] allocate_for_instance() allocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1156
Oct 11 02:42:22 compute-0 nova_compute[356901]: 2025-10-11 02:42:22.700 2 INFO nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Ignoring supplied device name: /dev/vda. Libvirt can't honour user-supplied dev names
Oct 11 02:42:22 compute-0 nova_compute[356901]: 2025-10-11 02:42:22.721 2 DEBUG nova.compute.manager [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Start building block device mappings for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2834
Oct 11 02:42:22 compute-0 nova_compute[356901]: 2025-10-11 02:42:22.838 2 DEBUG nova.compute.manager [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Start spawning the instance on the hypervisor. _build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2608
Oct 11 02:42:22 compute-0 nova_compute[356901]: 2025-10-11 02:42:22.839 2 DEBUG nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Creating instance directory _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4723
Oct 11 02:42:22 compute-0 nova_compute[356901]: 2025-10-11 02:42:22.840 2 INFO nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Creating image(s)
Oct 11 02:42:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1943: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 5.0 KiB/s rd, 16 KiB/s wr, 0 op/s
Oct 11 02:42:22 compute-0 nova_compute[356901]: 2025-10-11 02:42:22.873 2 DEBUG nova.storage.rbd_utils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] rbd image 97d9494c-4ce4-4ff3-a0fa-d5cda135da98_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:42:22 compute-0 nova_compute[356901]: 2025-10-11 02:42:22.914 2 DEBUG nova.storage.rbd_utils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] rbd image 97d9494c-4ce4-4ff3-a0fa-d5cda135da98_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:42:22 compute-0 nova_compute[356901]: 2025-10-11 02:42:22.952 2 DEBUG nova.storage.rbd_utils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] rbd image 97d9494c-4ce4-4ff3-a0fa-d5cda135da98_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:42:22 compute-0 nova_compute[356901]: 2025-10-11 02:42:22.968 2 DEBUG oslo_concurrency.processutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:42:23 compute-0 nova_compute[356901]: 2025-10-11 02:42:23.042 2 DEBUG nova.policy [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Policy check for network:attach_external_network failed with credentials {'is_admin': False, 'user_id': 'bcba1b18a2ad479587a15fe415ae307a', 'user_domain_id': 'default', 'system_scope': None, 'domain_id': None, 'project_id': '86dfc4ba5e494748b86bc9b983426779', 'project_domain_id': 'default', 'roles': ['member', 'reader'], 'is_admin_project': True, 'service_user_id': None, 'service_user_domain_id': None, 'service_project_id': None, 'service_project_domain_id': None, 'service_roles': []} authorize /usr/lib/python3.9/site-packages/nova/policy.py:203
Oct 11 02:42:23 compute-0 nova_compute[356901]: 2025-10-11 02:42:23.048 2 DEBUG oslo_concurrency.processutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d --force-share --output=json" returned: 0 in 0.080s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:42:23 compute-0 nova_compute[356901]: 2025-10-11 02:42:23.050 2 DEBUG oslo_concurrency.lockutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquiring lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:42:23 compute-0 nova_compute[356901]: 2025-10-11 02:42:23.050 2 DEBUG oslo_concurrency.lockutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:42:23 compute-0 nova_compute[356901]: 2025-10-11 02:42:23.051 2 DEBUG oslo_concurrency.lockutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:42:23 compute-0 nova_compute[356901]: 2025-10-11 02:42:23.084 2 DEBUG nova.storage.rbd_utils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] rbd image 97d9494c-4ce4-4ff3-a0fa-d5cda135da98_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:42:23 compute-0 nova_compute[356901]: 2025-10-11 02:42:23.091 2 DEBUG oslo_concurrency.processutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d 97d9494c-4ce4-4ff3-a0fa-d5cda135da98_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:42:23 compute-0 nova_compute[356901]: 2025-10-11 02:42:23.506 2 DEBUG oslo_concurrency.processutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d 97d9494c-4ce4-4ff3-a0fa-d5cda135da98_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.415s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:42:23 compute-0 ceph-mon[191930]: pgmap v1943: 321 pgs: 321 active+clean; 198 MiB data, 351 MiB used, 60 GiB / 60 GiB avail; 5.0 KiB/s rd, 16 KiB/s wr, 0 op/s
Oct 11 02:42:23 compute-0 nova_compute[356901]: 2025-10-11 02:42:23.655 2 DEBUG nova.storage.rbd_utils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] resizing rbd image 97d9494c-4ce4-4ff3-a0fa-d5cda135da98_disk to 1073741824 resize /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:288
Oct 11 02:42:23 compute-0 nova_compute[356901]: 2025-10-11 02:42:23.764 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:23 compute-0 nova_compute[356901]: 2025-10-11 02:42:23.913 2 DEBUG nova.objects.instance [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lazy-loading 'migration_context' on Instance uuid 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:42:23 compute-0 nova_compute[356901]: 2025-10-11 02:42:23.937 2 DEBUG nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Created local disks _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4857
Oct 11 02:42:23 compute-0 nova_compute[356901]: 2025-10-11 02:42:23.938 2 DEBUG nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Ensure instance console log exists: /var/lib/nova/instances/97d9494c-4ce4-4ff3-a0fa-d5cda135da98/console.log _ensure_console_log_for_instance /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4609
Oct 11 02:42:23 compute-0 nova_compute[356901]: 2025-10-11 02:42:23.939 2 DEBUG oslo_concurrency.lockutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquiring lock "vgpu_resources" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:42:23 compute-0 nova_compute[356901]: 2025-10-11 02:42:23.940 2 DEBUG oslo_concurrency.lockutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "vgpu_resources" acquired by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:42:23 compute-0 nova_compute[356901]: 2025-10-11 02:42:23.941 2 DEBUG oslo_concurrency.lockutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "vgpu_resources" "released" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:42:23 compute-0 nova_compute[356901]: 2025-10-11 02:42:23.947 2 DEBUG nova.network.neutron [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Successfully created port: 6c94515a-556d-4aeb-b39e-6e043f460cd8 _create_port_minimal /usr/lib/python3.9/site-packages/nova/network/neutron.py:548
Oct 11 02:42:24 compute-0 sudo[459166]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:42:24 compute-0 sudo[459166]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:24 compute-0 sudo[459166]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:24 compute-0 sudo[459191]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:42:24 compute-0 sudo[459191]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:24 compute-0 sudo[459191]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:24 compute-0 sudo[459216]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:42:24 compute-0 sudo[459216]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:24 compute-0 sudo[459216]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:24 compute-0 sudo[459241]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:42:24 compute-0 sudo[459241]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:24 compute-0 nova_compute[356901]: 2025-10-11 02:42:24.733 2 DEBUG nova.network.neutron [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Successfully updated port: 6c94515a-556d-4aeb-b39e-6e043f460cd8 _update_port /usr/lib/python3.9/site-packages/nova/network/neutron.py:586
Oct 11 02:42:24 compute-0 nova_compute[356901]: 2025-10-11 02:42:24.755 2 DEBUG oslo_concurrency.lockutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquiring lock "refresh_cache-97d9494c-4ce4-4ff3-a0fa-d5cda135da98" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:42:24 compute-0 nova_compute[356901]: 2025-10-11 02:42:24.757 2 DEBUG oslo_concurrency.lockutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquired lock "refresh_cache-97d9494c-4ce4-4ff3-a0fa-d5cda135da98" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:42:24 compute-0 nova_compute[356901]: 2025-10-11 02:42:24.758 2 DEBUG nova.network.neutron [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Building network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2010
Oct 11 02:42:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1944: 321 pgs: 321 active+clean; 222 MiB data, 363 MiB used, 60 GiB / 60 GiB avail; 5.2 KiB/s rd, 1017 KiB/s wr, 2 op/s
Oct 11 02:42:24 compute-0 nova_compute[356901]: 2025-10-11 02:42:24.862 2 DEBUG nova.compute.manager [req-0a3b728f-d7c1-4581-86b8-974e032cceb3 req-5d49cef6-1887-4737-b37a-79499fb16f3d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Received event network-changed-6c94515a-556d-4aeb-b39e-6e043f460cd8 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:42:24 compute-0 nova_compute[356901]: 2025-10-11 02:42:24.863 2 DEBUG nova.compute.manager [req-0a3b728f-d7c1-4581-86b8-974e032cceb3 req-5d49cef6-1887-4737-b37a-79499fb16f3d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Refreshing instance network info cache due to event network-changed-6c94515a-556d-4aeb-b39e-6e043f460cd8. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:42:24 compute-0 nova_compute[356901]: 2025-10-11 02:42:24.864 2 DEBUG oslo_concurrency.lockutils [req-0a3b728f-d7c1-4581-86b8-974e032cceb3 req-5d49cef6-1887-4737-b37a-79499fb16f3d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-97d9494c-4ce4-4ff3-a0fa-d5cda135da98" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:42:24 compute-0 ceph-mon[191930]: pgmap v1944: 321 pgs: 321 active+clean; 222 MiB data, 363 MiB used, 60 GiB / 60 GiB avail; 5.2 KiB/s rd, 1017 KiB/s wr, 2 op/s
Oct 11 02:42:24 compute-0 nova_compute[356901]: 2025-10-11 02:42:24.952 2 DEBUG nova.network.neutron [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Instance cache missing network info. _get_preexisting_port_ids /usr/lib/python3.9/site-packages/nova/network/neutron.py:3323
Oct 11 02:42:25 compute-0 nova_compute[356901]: 2025-10-11 02:42:25.141 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:25 compute-0 sudo[459241]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:25 compute-0 sudo[459295]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:42:25 compute-0 sudo[459295]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:25 compute-0 sudo[459295]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:25 compute-0 sudo[459320]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:42:25 compute-0 sudo[459320]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:25 compute-0 sudo[459320]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:25 compute-0 sudo[459345]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:42:25 compute-0 sudo[459345]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:25 compute-0 sudo[459345]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:25 compute-0 sudo[459370]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 list-networks
Oct 11 02:42:25 compute-0 sudo[459370]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:26 compute-0 sudo[459370]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:42:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:42:26 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:42:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:42:26 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:42:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:42:26 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:42:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:42:26 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:42:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:42:26 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:42:26 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 32dfb598-bc46-445f-ac0b-55a9141cd2ad does not exist
Oct 11 02:42:26 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 9135265d-a6f9-4867-9ffc-30a43f4cebf7 does not exist
Oct 11 02:42:26 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 1a627b84-b5a9-421a-a752-c68641cd2d58 does not exist
Oct 11 02:42:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:42:26 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:42:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:42:26 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:42:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:42:26 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:42:26 compute-0 sudo[459413]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:42:26 compute-0 sudo[459413]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:26 compute-0 sudo[459413]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:26 compute-0 sudo[459438]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:42:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:42:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:42:26 compute-0 sudo[459438]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:26 compute-0 sudo[459438]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:42:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:42:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:42:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:42:26 compute-0 sudo[459463]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:42:26 compute-0 sudo[459463]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:26 compute-0 sudo[459463]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1945: 321 pgs: 321 active+clean; 234 MiB data, 369 MiB used, 60 GiB / 60 GiB avail; 5.6 KiB/s rd, 1.5 MiB/s wr, 4 op/s
Oct 11 02:42:26 compute-0 sudo[459488]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:42:26 compute-0 sudo[459488]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.200 2 DEBUG nova.network.neutron [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Updating instance_info_cache with network_info: [{"id": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "address": "fa:16:3e:0f:4d:31", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.3", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap6c94515a-55", "ovs_interfaceid": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.227 2 DEBUG oslo_concurrency.lockutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Releasing lock "refresh_cache-97d9494c-4ce4-4ff3-a0fa-d5cda135da98" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.227 2 DEBUG nova.compute.manager [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Instance network_info: |[{"id": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "address": "fa:16:3e:0f:4d:31", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.3", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap6c94515a-55", "ovs_interfaceid": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}]| _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1967
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.228 2 DEBUG oslo_concurrency.lockutils [req-0a3b728f-d7c1-4581-86b8-974e032cceb3 req-5d49cef6-1887-4737-b37a-79499fb16f3d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-97d9494c-4ce4-4ff3-a0fa-d5cda135da98" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.228 2 DEBUG nova.network.neutron [req-0a3b728f-d7c1-4581-86b8-974e032cceb3 req-5d49cef6-1887-4737-b37a-79499fb16f3d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Refreshing network info cache for port 6c94515a-556d-4aeb-b39e-6e043f460cd8 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.232 2 DEBUG nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Start _get_guest_xml network_info=[{"id": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "address": "fa:16:3e:0f:4d:31", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.3", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap6c94515a-55", "ovs_interfaceid": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] disk_info={'disk_bus': 'virtio', 'cdrom_bus': 'sata', 'mapping': {'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.config': {'bus': 'sata', 'dev': 'sda', 'type': 'cdrom'}}} image_meta=ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:38:04Z,direct_url=<?>,disk_format='qcow2',id=72f37f2e-4296-450e-9a12-10717f4ac7dc,min_disk=0,min_ram=0,name='cirros-0.6.2-x86_64-disk.img',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:38:05Z,virtual_size=<?>,visibility=<?>) rescue=None block_device_info={'root_device_name': '/dev/vda', 'image': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'boot_index': 0, 'device_name': '/dev/vda', 'size': 0, 'encryption_format': None, 'image_id': '72f37f2e-4296-450e-9a12-10717f4ac7dc'}], 'ephemerals': [], 'block_device_mapping': [], 'swap': None} _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7549
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.239 2 WARNING nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.248 2 DEBUG nova.virt.libvirt.host [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V1... _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1653
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.249 2 DEBUG nova.virt.libvirt.host [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CPU controller missing on host. _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1663
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.259 2 DEBUG nova.virt.libvirt.host [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V2... _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1672
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.260 2 DEBUG nova.virt.libvirt.host [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CPU controller found on host. _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1679
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.261 2 DEBUG nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CPU mode 'host-model' models '' was chosen, with extra flags: '' _get_guest_cpu_model_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:5396
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.261 2 DEBUG nova.virt.hardware [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Getting desirable topologies for flavor Flavor(created_at=2025-10-11T02:38:03Z,deleted=False,deleted_at=None,description=None,disabled=False,ephemeral_gb=0,extra_specs={hw_rng:allowed='True'},flavorid='6dff30d1-85df-4e9c-9163-a20ba47bb0c7',id=3,is_public=True,memory_mb=128,name='m1.nano',projects=<?>,root_gb=1,rxtx_factor=1.0,swap=0,updated_at=None,vcpu_weight=0,vcpus=1) and image_meta ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:38:04Z,direct_url=<?>,disk_format='qcow2',id=72f37f2e-4296-450e-9a12-10717f4ac7dc,min_disk=0,min_ram=0,name='cirros-0.6.2-x86_64-disk.img',owner='97026531b3404a11869cb85a059c4a0d',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:38:05Z,virtual_size=<?>,visibility=<?>), allow threads: True _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:563
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.262 2 DEBUG nova.virt.hardware [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Flavor limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:348
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.262 2 DEBUG nova.virt.hardware [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Image limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:352
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.262 2 DEBUG nova.virt.hardware [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Flavor pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:388
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.263 2 DEBUG nova.virt.hardware [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Image pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:392
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.263 2 DEBUG nova.virt.hardware [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Chose sockets=0, cores=0, threads=0; limits were sockets=65536, cores=65536, threads=65536 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:430
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.264 2 DEBUG nova.virt.hardware [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Topology preferred VirtCPUTopology(cores=0,sockets=0,threads=0), maximum VirtCPUTopology(cores=65536,sockets=65536,threads=65536) _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:569
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.264 2 DEBUG nova.virt.hardware [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Build topologies for 1 vcpu(s) 1:1:1 _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:471
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.265 2 DEBUG nova.virt.hardware [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Got 1 possible topologies _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:501
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.265 2 DEBUG nova.virt.hardware [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Possible topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:575
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.265 2 DEBUG nova.virt.hardware [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Sorted desired topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:577
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.269 2 DEBUG oslo_concurrency.processutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:42:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:42:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:42:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:42:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:42:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:42:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:42:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:42:27 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:42:27 compute-0 ceph-mon[191930]: pgmap v1945: 321 pgs: 321 active+clean; 234 MiB data, 369 MiB used, 60 GiB / 60 GiB avail; 5.6 KiB/s rd, 1.5 MiB/s wr, 4 op/s
Oct 11 02:42:27 compute-0 podman[459554]: 2025-10-11 02:42:27.375844019 +0000 UTC m=+0.066029897 container create 01d803e122e8fecffbb754b231a1ebeb5e26eb9eca6066ad92e909a49b868b63 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_haslett, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS)
Oct 11 02:42:27 compute-0 podman[459554]: 2025-10-11 02:42:27.351476832 +0000 UTC m=+0.041662740 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:42:27 compute-0 systemd[1]: Started libpod-conmon-01d803e122e8fecffbb754b231a1ebeb5e26eb9eca6066ad92e909a49b868b63.scope.
Oct 11 02:42:27 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:42:27 compute-0 podman[459554]: 2025-10-11 02:42:27.515385209 +0000 UTC m=+0.205571097 container init 01d803e122e8fecffbb754b231a1ebeb5e26eb9eca6066ad92e909a49b868b63 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_haslett, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, ceph=True)
Oct 11 02:42:27 compute-0 podman[459554]: 2025-10-11 02:42:27.527371521 +0000 UTC m=+0.217557389 container start 01d803e122e8fecffbb754b231a1ebeb5e26eb9eca6066ad92e909a49b868b63 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_haslett, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:42:27 compute-0 podman[459554]: 2025-10-11 02:42:27.53130638 +0000 UTC m=+0.221492248 container attach 01d803e122e8fecffbb754b231a1ebeb5e26eb9eca6066ad92e909a49b868b63 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_haslett, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS)
Oct 11 02:42:27 compute-0 elegant_haslett[459614]: 167 167
Oct 11 02:42:27 compute-0 systemd[1]: libpod-01d803e122e8fecffbb754b231a1ebeb5e26eb9eca6066ad92e909a49b868b63.scope: Deactivated successfully.
Oct 11 02:42:27 compute-0 podman[459554]: 2025-10-11 02:42:27.537977332 +0000 UTC m=+0.228163200 container died 01d803e122e8fecffbb754b231a1ebeb5e26eb9eca6066ad92e909a49b868b63 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_haslett, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True)
Oct 11 02:42:27 compute-0 podman[459583]: 2025-10-11 02:42:27.568060681 +0000 UTC m=+0.119351060 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3)
Oct 11 02:42:27 compute-0 systemd[1]: var-lib-containers-storage-overlay-df83670a6212fc72b68cb81c5faa9b966ca3b9c81aa4cd9ba89e22ec9700a4cb-merged.mount: Deactivated successfully.
Oct 11 02:42:27 compute-0 podman[459577]: 2025-10-11 02:42:27.577868648 +0000 UTC m=+0.124906708 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_id=edpm, org.label-schema.name=CentOS Stream 10 Base Image, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:42:27 compute-0 podman[459554]: 2025-10-11 02:42:27.584571291 +0000 UTC m=+0.274757159 container remove 01d803e122e8fecffbb754b231a1ebeb5e26eb9eca6066ad92e909a49b868b63 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elegant_haslett, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:42:27 compute-0 podman[459568]: 2025-10-11 02:42:27.586602442 +0000 UTC m=+0.147982016 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:42:27 compute-0 systemd[1]: libpod-conmon-01d803e122e8fecffbb754b231a1ebeb5e26eb9eca6066ad92e909a49b868b63.scope: Deactivated successfully.
Oct 11 02:42:27 compute-0 podman[459571]: 2025-10-11 02:42:27.63678561 +0000 UTC m=+0.196806462 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_managed=true)
Oct 11 02:42:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:42:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2139223255' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:42:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:42:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2139223255' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:42:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:42:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1353827115' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:42:27 compute-0 podman[459694]: 2025-10-11 02:42:27.805423549 +0000 UTC m=+0.070676348 container create ce9e2c667510a930e6e56811c70f7482894e631e83ecd97fd8cbcb389951b6c6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_germain, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.836 2 DEBUG oslo_concurrency.processutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.567s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:42:27 compute-0 podman[459694]: 2025-10-11 02:42:27.773103132 +0000 UTC m=+0.038356021 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:42:27 compute-0 systemd[1]: Started libpod-conmon-ce9e2c667510a930e6e56811c70f7482894e631e83ecd97fd8cbcb389951b6c6.scope.
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.886 2 DEBUG nova.storage.rbd_utils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] rbd image 97d9494c-4ce4-4ff3-a0fa-d5cda135da98_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:42:27 compute-0 nova_compute[356901]: 2025-10-11 02:42:27.897 2 DEBUG oslo_concurrency.processutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:42:27 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:42:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/158fac24e372cd2f32f43a3be10607dff6c3fae616a3db20db4dbe5678717233/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:42:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/158fac24e372cd2f32f43a3be10607dff6c3fae616a3db20db4dbe5678717233/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:42:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/158fac24e372cd2f32f43a3be10607dff6c3fae616a3db20db4dbe5678717233/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:42:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/158fac24e372cd2f32f43a3be10607dff6c3fae616a3db20db4dbe5678717233/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:42:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/158fac24e372cd2f32f43a3be10607dff6c3fae616a3db20db4dbe5678717233/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:42:27 compute-0 podman[459694]: 2025-10-11 02:42:27.939031979 +0000 UTC m=+0.204284808 container init ce9e2c667510a930e6e56811c70f7482894e631e83ecd97fd8cbcb389951b6c6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_germain, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:42:27 compute-0 podman[459694]: 2025-10-11 02:42:27.959193569 +0000 UTC m=+0.224446368 container start ce9e2c667510a930e6e56811c70f7482894e631e83ecd97fd8cbcb389951b6c6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_germain, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507)
Oct 11 02:42:27 compute-0 podman[459694]: 2025-10-11 02:42:27.964047215 +0000 UTC m=+0.229300004 container attach ce9e2c667510a930e6e56811c70f7482894e631e83ecd97fd8cbcb389951b6c6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_germain, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default)
Oct 11 02:42:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2139223255' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:42:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2139223255' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:42:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1353827115' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:42:28 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:42:28 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1732692437' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.391 2 DEBUG oslo_concurrency.processutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.494s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.394 2 DEBUG nova.virt.libvirt.vif [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:42:20Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description='tempest-TestNetworkBasicOps-server-1362866622',display_name='tempest-TestNetworkBasicOps-server-1362866622',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-testnetworkbasicops-server-1362866622',id=13,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBBvOI8sBNl5TpioVKGpLN8dhq3coyN+dxFn+5vc5Z4DYhusOh+pMF8qIT/hioWLecBE4NbVqzpuQToM0paZi+FH/wtMu/qV3DwVgbNJMA/2dr3YQIFl6T0rS5QbQV7dDoQ==',key_name='tempest-TestNetworkBasicOps-845830948',keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='86dfc4ba5e494748b86bc9b983426779',ramdisk_id='',reservation_id='r-ww57d129',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_hw_rng_model='virtio',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-TestNetworkBasicOps-494564743',owner_user_name='tempest-TestNetworkBasicOps-494564743-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:42:22Z,user_data=None,user_id='bcba1b18a2ad479587a15fe415ae307a',uuid=97d9494c-4ce4-4ff3-a0fa-d5cda135da98,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "address": "fa:16:3e:0f:4d:31", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.3", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap6c94515a-55", "ovs_interfaceid": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} virt_type=kvm get_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:563
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.396 2 DEBUG nova.network.os_vif_util [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Converting VIF {"id": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "address": "fa:16:3e:0f:4d:31", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.3", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap6c94515a-55", "ovs_interfaceid": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.398 2 DEBUG nova.network.os_vif_util [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:0f:4d:31,bridge_name='br-int',has_traffic_filtering=True,id=6c94515a-556d-4aeb-b39e-6e043f460cd8,network=Network(c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap6c94515a-55') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.400 2 DEBUG nova.objects.instance [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lazy-loading 'pci_devices' on Instance uuid 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.428 2 DEBUG nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] End _get_guest_xml xml=<domain type="kvm">
Oct 11 02:42:28 compute-0 nova_compute[356901]:   <uuid>97d9494c-4ce4-4ff3-a0fa-d5cda135da98</uuid>
Oct 11 02:42:28 compute-0 nova_compute[356901]:   <name>instance-0000000d</name>
Oct 11 02:42:28 compute-0 nova_compute[356901]:   <memory>131072</memory>
Oct 11 02:42:28 compute-0 nova_compute[356901]:   <vcpu>1</vcpu>
Oct 11 02:42:28 compute-0 nova_compute[356901]:   <metadata>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.1">
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <nova:package version="27.5.2-0.20250829104910.6f8decf.el9"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <nova:name>tempest-TestNetworkBasicOps-server-1362866622</nova:name>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <nova:creationTime>2025-10-11 02:42:27</nova:creationTime>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <nova:flavor name="m1.nano">
Oct 11 02:42:28 compute-0 nova_compute[356901]:         <nova:memory>128</nova:memory>
Oct 11 02:42:28 compute-0 nova_compute[356901]:         <nova:disk>1</nova:disk>
Oct 11 02:42:28 compute-0 nova_compute[356901]:         <nova:swap>0</nova:swap>
Oct 11 02:42:28 compute-0 nova_compute[356901]:         <nova:ephemeral>0</nova:ephemeral>
Oct 11 02:42:28 compute-0 nova_compute[356901]:         <nova:vcpus>1</nova:vcpus>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       </nova:flavor>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <nova:owner>
Oct 11 02:42:28 compute-0 nova_compute[356901]:         <nova:user uuid="bcba1b18a2ad479587a15fe415ae307a">tempest-TestNetworkBasicOps-494564743-project-member</nova:user>
Oct 11 02:42:28 compute-0 nova_compute[356901]:         <nova:project uuid="86dfc4ba5e494748b86bc9b983426779">tempest-TestNetworkBasicOps-494564743</nova:project>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       </nova:owner>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <nova:root type="image" uuid="72f37f2e-4296-450e-9a12-10717f4ac7dc"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <nova:ports>
Oct 11 02:42:28 compute-0 nova_compute[356901]:         <nova:port uuid="6c94515a-556d-4aeb-b39e-6e043f460cd8">
Oct 11 02:42:28 compute-0 nova_compute[356901]:           <nova:ip type="fixed" address="10.100.0.3" ipVersion="4"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:         </nova:port>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       </nova:ports>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     </nova:instance>
Oct 11 02:42:28 compute-0 nova_compute[356901]:   </metadata>
Oct 11 02:42:28 compute-0 nova_compute[356901]:   <sysinfo type="smbios">
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <system>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <entry name="manufacturer">RDO</entry>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <entry name="product">OpenStack Compute</entry>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <entry name="version">27.5.2-0.20250829104910.6f8decf.el9</entry>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <entry name="serial">97d9494c-4ce4-4ff3-a0fa-d5cda135da98</entry>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <entry name="uuid">97d9494c-4ce4-4ff3-a0fa-d5cda135da98</entry>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <entry name="family">Virtual Machine</entry>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     </system>
Oct 11 02:42:28 compute-0 nova_compute[356901]:   </sysinfo>
Oct 11 02:42:28 compute-0 nova_compute[356901]:   <os>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <type arch="x86_64" machine="q35">hvm</type>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <boot dev="hd"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <smbios mode="sysinfo"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:   </os>
Oct 11 02:42:28 compute-0 nova_compute[356901]:   <features>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <acpi/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <apic/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <vmcoreinfo/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:   </features>
Oct 11 02:42:28 compute-0 nova_compute[356901]:   <clock offset="utc">
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <timer name="pit" tickpolicy="delay"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <timer name="rtc" tickpolicy="catchup"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <timer name="hpet" present="no"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:   </clock>
Oct 11 02:42:28 compute-0 nova_compute[356901]:   <cpu mode="host-model" match="exact">
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <topology sockets="1" cores="1" threads="1"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:42:28 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/97d9494c-4ce4-4ff3-a0fa-d5cda135da98_disk">
Oct 11 02:42:28 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       </source>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:42:28 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <target dev="vda" bus="virtio"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <disk type="network" device="cdrom">
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/97d9494c-4ce4-4ff3-a0fa-d5cda135da98_disk.config">
Oct 11 02:42:28 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       </source>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:42:28 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <target dev="sda" bus="sata"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <interface type="ethernet">
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <mac address="fa:16:3e:0f:4d:31"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <driver name="vhost" rx_queue_size="512"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <mtu size="1442"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <target dev="tap6c94515a-55"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     </interface>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <serial type="pty">
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <log file="/var/lib/nova/instances/97d9494c-4ce4-4ff3-a0fa-d5cda135da98/console.log" append="off"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     </serial>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <graphics type="vnc" autoport="yes" listen="::0"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <video>
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     </video>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <input type="tablet" bus="usb"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <rng model="virtio">
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <backend model="random">/dev/urandom</backend>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <controller type="usb" index="0"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     <memballoon model="virtio">
Oct 11 02:42:28 compute-0 nova_compute[356901]:       <stats period="10"/>
Oct 11 02:42:28 compute-0 nova_compute[356901]:     </memballoon>
Oct 11 02:42:28 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:42:28 compute-0 nova_compute[356901]: </domain>
Oct 11 02:42:28 compute-0 nova_compute[356901]:  _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7555
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.431 2 DEBUG nova.compute.manager [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Preparing to wait for external event network-vif-plugged-6c94515a-556d-4aeb-b39e-6e043f460cd8 prepare_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:283
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.432 2 DEBUG oslo_concurrency.lockutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquiring lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98-events" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.433 2 DEBUG oslo_concurrency.lockutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98-events" acquired by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.433 2 DEBUG oslo_concurrency.lockutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98-events" "released" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.434 2 DEBUG nova.virt.libvirt.vif [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:42:20Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description='tempest-TestNetworkBasicOps-server-1362866622',display_name='tempest-TestNetworkBasicOps-server-1362866622',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-testnetworkbasicops-server-1362866622',id=13,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBBvOI8sBNl5TpioVKGpLN8dhq3coyN+dxFn+5vc5Z4DYhusOh+pMF8qIT/hioWLecBE4NbVqzpuQToM0paZi+FH/wtMu/qV3DwVgbNJMA/2dr3YQIFl6T0rS5QbQV7dDoQ==',key_name='tempest-TestNetworkBasicOps-845830948',keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=PciDeviceList,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='86dfc4ba5e494748b86bc9b983426779',ramdisk_id='',reservation_id='r-ww57d129',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_hw_rng_model='virtio',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-TestNetworkBasicOps-494564743',owner_user_name='tempest-TestNetworkBasicOps-494564743-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:42:22Z,user_data=None,user_id='bcba1b18a2ad479587a15fe415ae307a',uuid=97d9494c-4ce4-4ff3-a0fa-d5cda135da98,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "address": "fa:16:3e:0f:4d:31", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.3", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap6c94515a-55", "ovs_interfaceid": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} plug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:710
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.435 2 DEBUG nova.network.os_vif_util [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Converting VIF {"id": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "address": "fa:16:3e:0f:4d:31", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.3", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap6c94515a-55", "ovs_interfaceid": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.436 2 DEBUG nova.network.os_vif_util [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:0f:4d:31,bridge_name='br-int',has_traffic_filtering=True,id=6c94515a-556d-4aeb-b39e-6e043f460cd8,network=Network(c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap6c94515a-55') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.437 2 DEBUG os_vif [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Plugging vif VIFOpenVSwitch(active=False,address=fa:16:3e:0f:4d:31,bridge_name='br-int',has_traffic_filtering=True,id=6c94515a-556d-4aeb-b39e-6e043f460cd8,network=Network(c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap6c94515a-55') plug /usr/lib/python3.9/site-packages/os_vif/__init__.py:76
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.438 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.439 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddBridgeCommand(_result=None, name=br-int, may_exist=True, datapath_type=system) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.440 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.445 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.446 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tap6c94515a-55, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.447 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbSetCommand(_result=None, table=Interface, record=tap6c94515a-55, col_values=(('external_ids', {'iface-id': '6c94515a-556d-4aeb-b39e-6e043f460cd8', 'iface-status': 'active', 'attached-mac': 'fa:16:3e:0f:4d:31', 'vm-uuid': '97d9494c-4ce4-4ff3-a0fa-d5cda135da98'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.450 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:28 compute-0 NetworkManager[44908]: <info>  [1760150548.4516] manager: (tap6c94515a-55): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/70)
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.454 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.460 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.461 2 INFO os_vif [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Successfully plugged vif VIFOpenVSwitch(active=False,address=fa:16:3e:0f:4d:31,bridge_name='br-int',has_traffic_filtering=True,id=6c94515a-556d-4aeb-b39e-6e043f460cd8,network=Network(c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap6c94515a-55')
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.525 2 DEBUG nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] No BDM found with device name vda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.527 2 DEBUG nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] No BDM found with device name sda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.528 2 DEBUG nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] No VIF found with MAC fa:16:3e:0f:4d:31, not building metadata _build_interface_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12092
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.529 2 INFO nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Using config drive
Oct 11 02:42:28 compute-0 nova_compute[356901]: 2025-10-11 02:42:28.576 2 DEBUG nova.storage.rbd_utils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] rbd image 97d9494c-4ce4-4ff3-a0fa-d5cda135da98_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:42:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1946: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 22 KiB/s rd, 1.8 MiB/s wr, 27 op/s
Oct 11 02:42:29 compute-0 nova_compute[356901]: 2025-10-11 02:42:29.061 2 INFO nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Creating config drive at /var/lib/nova/instances/97d9494c-4ce4-4ff3-a0fa-d5cda135da98/disk.config
Oct 11 02:42:29 compute-0 nova_compute[356901]: 2025-10-11 02:42:29.073 2 DEBUG oslo_concurrency.processutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Running cmd (subprocess): /usr/bin/mkisofs -o /var/lib/nova/instances/97d9494c-4ce4-4ff3-a0fa-d5cda135da98/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmppward6ul execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:42:29 compute-0 nova_compute[356901]: 2025-10-11 02:42:29.102 2 DEBUG nova.network.neutron [req-0a3b728f-d7c1-4581-86b8-974e032cceb3 req-5d49cef6-1887-4737-b37a-79499fb16f3d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Updated VIF entry in instance network info cache for port 6c94515a-556d-4aeb-b39e-6e043f460cd8. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:42:29 compute-0 nova_compute[356901]: 2025-10-11 02:42:29.104 2 DEBUG nova.network.neutron [req-0a3b728f-d7c1-4581-86b8-974e032cceb3 req-5d49cef6-1887-4737-b37a-79499fb16f3d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Updating instance_info_cache with network_info: [{"id": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "address": "fa:16:3e:0f:4d:31", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.3", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap6c94515a-55", "ovs_interfaceid": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:42:29 compute-0 nova_compute[356901]: 2025-10-11 02:42:29.122 2 DEBUG oslo_concurrency.lockutils [req-0a3b728f-d7c1-4581-86b8-974e032cceb3 req-5d49cef6-1887-4737-b37a-79499fb16f3d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-97d9494c-4ce4-4ff3-a0fa-d5cda135da98" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:42:29 compute-0 nova_compute[356901]: 2025-10-11 02:42:29.210 2 DEBUG oslo_concurrency.processutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CMD "/usr/bin/mkisofs -o /var/lib/nova/instances/97d9494c-4ce4-4ff3-a0fa-d5cda135da98/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmppward6ul" returned: 0 in 0.138s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:42:29 compute-0 nova_compute[356901]: 2025-10-11 02:42:29.248 2 DEBUG nova.storage.rbd_utils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] rbd image 97d9494c-4ce4-4ff3-a0fa-d5cda135da98_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:42:29 compute-0 nova_compute[356901]: 2025-10-11 02:42:29.255 2 DEBUG oslo_concurrency.processutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/97d9494c-4ce4-4ff3-a0fa-d5cda135da98/disk.config 97d9494c-4ce4-4ff3-a0fa-d5cda135da98_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:42:29 compute-0 romantic_germain[459727]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:42:29 compute-0 romantic_germain[459727]: --> relative data size: 1.0
Oct 11 02:42:29 compute-0 romantic_germain[459727]: --> All data devices are unavailable
Oct 11 02:42:29 compute-0 systemd[1]: libpod-ce9e2c667510a930e6e56811c70f7482894e631e83ecd97fd8cbcb389951b6c6.scope: Deactivated successfully.
Oct 11 02:42:29 compute-0 systemd[1]: libpod-ce9e2c667510a930e6e56811c70f7482894e631e83ecd97fd8cbcb389951b6c6.scope: Consumed 1.271s CPU time.
Oct 11 02:42:29 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1732692437' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:42:29 compute-0 ceph-mon[191930]: pgmap v1946: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 22 KiB/s rd, 1.8 MiB/s wr, 27 op/s
Oct 11 02:42:29 compute-0 podman[459838]: 2025-10-11 02:42:29.420268669 +0000 UTC m=+0.046470817 container died ce9e2c667510a930e6e56811c70f7482894e631e83ecd97fd8cbcb389951b6c6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_germain, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:42:29 compute-0 systemd[1]: var-lib-containers-storage-overlay-158fac24e372cd2f32f43a3be10607dff6c3fae616a3db20db4dbe5678717233-merged.mount: Deactivated successfully.
Oct 11 02:42:29 compute-0 nova_compute[356901]: 2025-10-11 02:42:29.498 2 DEBUG oslo_concurrency.processutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/97d9494c-4ce4-4ff3-a0fa-d5cda135da98/disk.config 97d9494c-4ce4-4ff3-a0fa-d5cda135da98_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.243s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:42:29 compute-0 nova_compute[356901]: 2025-10-11 02:42:29.501 2 INFO nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Deleting local config drive /var/lib/nova/instances/97d9494c-4ce4-4ff3-a0fa-d5cda135da98/disk.config because it was imported into RBD.
Oct 11 02:42:29 compute-0 podman[459838]: 2025-10-11 02:42:29.522118339 +0000 UTC m=+0.148320467 container remove ce9e2c667510a930e6e56811c70f7482894e631e83ecd97fd8cbcb389951b6c6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_germain, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 02:42:29 compute-0 systemd[1]: libpod-conmon-ce9e2c667510a930e6e56811c70f7482894e631e83ecd97fd8cbcb389951b6c6.scope: Deactivated successfully.
Oct 11 02:42:29 compute-0 sudo[459488]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:29 compute-0 kernel: tap6c94515a-55: entered promiscuous mode
Oct 11 02:42:29 compute-0 NetworkManager[44908]: <info>  [1760150549.5990] manager: (tap6c94515a-55): new Tun device (/org/freedesktop/NetworkManager/Devices/71)
Oct 11 02:42:29 compute-0 ovn_controller[88370]: 2025-10-11T02:42:29Z|00163|binding|INFO|Claiming lport 6c94515a-556d-4aeb-b39e-6e043f460cd8 for this chassis.
Oct 11 02:42:29 compute-0 ovn_controller[88370]: 2025-10-11T02:42:29Z|00164|binding|INFO|6c94515a-556d-4aeb-b39e-6e043f460cd8: Claiming fa:16:3e:0f:4d:31 10.100.0.3
Oct 11 02:42:29 compute-0 nova_compute[356901]: 2025-10-11 02:42:29.606 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:29.613 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:0f:4d:31 10.100.0.3'], port_security=['fa:16:3e:0f:4d:31 10.100.0.3'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.0.3/28', 'neutron:device_id': '97d9494c-4ce4-4ff3-a0fa-d5cda135da98', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': '86dfc4ba5e494748b86bc9b983426779', 'neutron:revision_number': '2', 'neutron:security_group_ids': '99941801-8eae-4ed7-9f1f-5e6556715ff8', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=b0ebf874-dd0a-4bac-aa4a-3eee85fcb8ba, chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], tunnel_key=4, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=6c94515a-556d-4aeb-b39e-6e043f460cd8) old=Port_Binding(chassis=[]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:42:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:29.616 286362 INFO neutron.agent.ovn.metadata.agent [-] Port 6c94515a-556d-4aeb-b39e-6e043f460cd8 in datapath c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635 bound to our chassis
Oct 11 02:42:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:29.621 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635
Oct 11 02:42:29 compute-0 ovn_controller[88370]: 2025-10-11T02:42:29Z|00165|binding|INFO|Setting lport 6c94515a-556d-4aeb-b39e-6e043f460cd8 ovn-installed in OVS
Oct 11 02:42:29 compute-0 ovn_controller[88370]: 2025-10-11T02:42:29Z|00166|binding|INFO|Setting lport 6c94515a-556d-4aeb-b39e-6e043f460cd8 up in Southbound
Oct 11 02:42:29 compute-0 nova_compute[356901]: 2025-10-11 02:42:29.635 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:29.651 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[87e633fc-8998-4328-bdd0-aed294ddb655]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:42:29 compute-0 systemd-udevd[459878]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:42:29 compute-0 systemd-machined[137586]: New machine qemu-14-instance-0000000d.
Oct 11 02:42:29 compute-0 systemd[1]: Started Virtual Machine qemu-14-instance-0000000d.
Oct 11 02:42:29 compute-0 NetworkManager[44908]: <info>  [1760150549.6920] device (tap6c94515a-55): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 02:42:29 compute-0 NetworkManager[44908]: <info>  [1760150549.6929] device (tap6c94515a-55): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Oct 11 02:42:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:29.697 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[4fc66362-159c-4ede-866c-3791fe401173]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:42:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:29.706 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[8f0e6833-ff61-4495-97ac-e98355e80e83]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:42:29 compute-0 sudo[459866]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:42:29 compute-0 sudo[459866]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:29 compute-0 podman[157119]: time="2025-10-11T02:42:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:42:29 compute-0 sudo[459866]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:29.749 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[bb5b2c3c-5e28-46dd-ad2d-a39b04cfa753]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:42:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:42:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:42:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:42:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9550 "" "Go-http-client/1.1"
Oct 11 02:42:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:29.788 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[0e985f1b-c781-420b-abd7-b8be9d7af991]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapc35c5e7e-41'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:b1:e9:cd'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 10, 'tx_packets': 5, 'rx_bytes': 916, 'tx_bytes': 354, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 10, 'tx_packets': 5, 'rx_bytes': 916, 'tx_bytes': 354, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 43], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 683520, 'reachable_time': 17291, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 8, 'inoctets': 720, 'indelivers': 1, 'outforwdatagrams': 0, 'outpkts': 3, 'outoctets': 228, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 8, 'outmcastpkts': 3, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 720, 'outmcastoctets': 228, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 8, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 1, 'inerrors': 0, 'outmsgs': 3, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 459907, 'error': None, 'target': 'ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:42:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:29.810 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[129b3595-58db-4916-8911-02d704c36174]: (4, ({'family': 2, 'prefixlen': 32, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '169.254.169.254'], ['IFA_LOCAL', '169.254.169.254'], ['IFA_BROADCAST', '169.254.169.254'], ['IFA_LABEL', 'tapc35c5e7e-41'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 683540, 'tstamp': 683540}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 459918, 'error': None, 'target': 'ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'}, {'family': 2, 'prefixlen': 28, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '10.100.0.2'], ['IFA_LOCAL', '10.100.0.2'], ['IFA_BROADCAST', '10.100.0.15'], ['IFA_LABEL', 'tapc35c5e7e-41'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 683545, 'tstamp': 683545}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 459918, 'error': None, 'target': 'ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'})) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:42:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:29.812 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapc35c5e7e-40, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:42:29 compute-0 nova_compute[356901]: 2025-10-11 02:42:29.814 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:29 compute-0 nova_compute[356901]: 2025-10-11 02:42:29.816 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:29.816 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapc35c5e7e-40, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:42:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:29.817 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:42:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:29.817 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tapc35c5e7e-40, col_values=(('external_ids', {'iface-id': 'ffb676d8-51f5-4de3-a31a-71adc7412138'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:42:29 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:29.818 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:42:29 compute-0 sudo[459908]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:42:29 compute-0 sudo[459908]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:29 compute-0 sudo[459908]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:29 compute-0 sudo[459935]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:42:29 compute-0 sudo[459935]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:29 compute-0 sudo[459935]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:29 compute-0 nova_compute[356901]: 2025-10-11 02:42:29.944 2 DEBUG nova.compute.manager [req-6bf7f15c-82b8-4f07-8e47-ffcea92f001a req-a946c21e-3582-435b-aaac-f854255f8553 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Received event network-vif-plugged-6c94515a-556d-4aeb-b39e-6e043f460cd8 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:42:29 compute-0 nova_compute[356901]: 2025-10-11 02:42:29.945 2 DEBUG oslo_concurrency.lockutils [req-6bf7f15c-82b8-4f07-8e47-ffcea92f001a req-a946c21e-3582-435b-aaac-f854255f8553 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:42:29 compute-0 nova_compute[356901]: 2025-10-11 02:42:29.946 2 DEBUG oslo_concurrency.lockutils [req-6bf7f15c-82b8-4f07-8e47-ffcea92f001a req-a946c21e-3582-435b-aaac-f854255f8553 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:42:29 compute-0 nova_compute[356901]: 2025-10-11 02:42:29.946 2 DEBUG oslo_concurrency.lockutils [req-6bf7f15c-82b8-4f07-8e47-ffcea92f001a req-a946c21e-3582-435b-aaac-f854255f8553 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:42:29 compute-0 nova_compute[356901]: 2025-10-11 02:42:29.946 2 DEBUG nova.compute.manager [req-6bf7f15c-82b8-4f07-8e47-ffcea92f001a req-a946c21e-3582-435b-aaac-f854255f8553 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Processing event network-vif-plugged-6c94515a-556d-4aeb-b39e-6e043f460cd8 _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10808
Oct 11 02:42:30 compute-0 sudo[459960]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:42:30 compute-0 sudo[459960]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:30 compute-0 nova_compute[356901]: 2025-10-11 02:42:30.142 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:30 compute-0 podman[460065]: 2025-10-11 02:42:30.599497855 +0000 UTC m=+0.066596134 container create e22991af73593d23ae3a28175d71b52a319982819eaf4b51f7ea52a25f7f4a8e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_lederberg, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.build-date=20250507, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:42:30 compute-0 podman[460065]: 2025-10-11 02:42:30.572018874 +0000 UTC m=+0.039117183 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:42:30 compute-0 systemd[1]: Started libpod-conmon-e22991af73593d23ae3a28175d71b52a319982819eaf4b51f7ea52a25f7f4a8e.scope.
Oct 11 02:42:30 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:42:30 compute-0 podman[460065]: 2025-10-11 02:42:30.738761716 +0000 UTC m=+0.205860085 container init e22991af73593d23ae3a28175d71b52a319982819eaf4b51f7ea52a25f7f4a8e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_lederberg, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2)
Oct 11 02:42:30 compute-0 podman[460065]: 2025-10-11 02:42:30.747679776 +0000 UTC m=+0.214778085 container start e22991af73593d23ae3a28175d71b52a319982819eaf4b51f7ea52a25f7f4a8e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_lederberg, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:42:30 compute-0 heuristic_lederberg[460080]: 167 167
Oct 11 02:42:30 compute-0 podman[460065]: 2025-10-11 02:42:30.756104691 +0000 UTC m=+0.223203070 container attach e22991af73593d23ae3a28175d71b52a319982819eaf4b51f7ea52a25f7f4a8e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_lederberg, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS)
Oct 11 02:42:30 compute-0 systemd[1]: libpod-e22991af73593d23ae3a28175d71b52a319982819eaf4b51f7ea52a25f7f4a8e.scope: Deactivated successfully.
Oct 11 02:42:30 compute-0 podman[460065]: 2025-10-11 02:42:30.759080421 +0000 UTC m=+0.226178680 container died e22991af73593d23ae3a28175d71b52a319982819eaf4b51f7ea52a25f7f4a8e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_lederberg, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, ceph=True)
Oct 11 02:42:30 compute-0 systemd[1]: var-lib-containers-storage-overlay-bfd652082770b772b844a9c524efabe7d6f88c8a8923d24de618bf5b60a7708c-merged.mount: Deactivated successfully.
Oct 11 02:42:30 compute-0 podman[460065]: 2025-10-11 02:42:30.813149956 +0000 UTC m=+0.280248225 container remove e22991af73593d23ae3a28175d71b52a319982819eaf4b51f7ea52a25f7f4a8e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_lederberg, org.label-schema.build-date=20250507, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0)
Oct 11 02:42:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1947: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 17 KiB/s rd, 1.8 MiB/s wr, 27 op/s
Oct 11 02:42:30 compute-0 systemd[1]: libpod-conmon-e22991af73593d23ae3a28175d71b52a319982819eaf4b51f7ea52a25f7f4a8e.scope: Deactivated successfully.
Oct 11 02:42:30 compute-0 ceph-mon[191930]: pgmap v1947: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 17 KiB/s rd, 1.8 MiB/s wr, 27 op/s
Oct 11 02:42:30 compute-0 nova_compute[356901]: 2025-10-11 02:42:30.939 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150550.9388015, 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 => Started> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:42:30 compute-0 nova_compute[356901]: 2025-10-11 02:42:30.941 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] VM Started (Lifecycle Event)
Oct 11 02:42:30 compute-0 nova_compute[356901]: 2025-10-11 02:42:30.944 2 DEBUG nova.compute.manager [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Instance event wait completed in 0 seconds for network-vif-plugged wait_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:577
Oct 11 02:42:30 compute-0 nova_compute[356901]: 2025-10-11 02:42:30.949 2 DEBUG nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Guest created on hypervisor spawn /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4417
Oct 11 02:42:30 compute-0 nova_compute[356901]: 2025-10-11 02:42:30.957 2 INFO nova.virt.libvirt.driver [-] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Instance spawned successfully.
Oct 11 02:42:30 compute-0 nova_compute[356901]: 2025-10-11 02:42:30.958 2 DEBUG nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Attempting to register defaults for the following image properties: ['hw_cdrom_bus', 'hw_disk_bus', 'hw_input_bus', 'hw_pointer_model', 'hw_video_model', 'hw_vif_model'] _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:917
Oct 11 02:42:30 compute-0 nova_compute[356901]: 2025-10-11 02:42:30.965 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:42:30 compute-0 nova_compute[356901]: 2025-10-11 02:42:30.973 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Synchronizing instance power state after lifecycle event "Started"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:42:30 compute-0 nova_compute[356901]: 2025-10-11 02:42:30.992 2 DEBUG nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Found default for hw_cdrom_bus of sata _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:42:30 compute-0 nova_compute[356901]: 2025-10-11 02:42:30.993 2 DEBUG nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Found default for hw_disk_bus of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:42:30 compute-0 nova_compute[356901]: 2025-10-11 02:42:30.994 2 DEBUG nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Found default for hw_input_bus of usb _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:42:30 compute-0 nova_compute[356901]: 2025-10-11 02:42:30.995 2 DEBUG nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Found default for hw_pointer_model of usbtablet _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:42:30 compute-0 nova_compute[356901]: 2025-10-11 02:42:30.995 2 DEBUG nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Found default for hw_video_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:42:30 compute-0 nova_compute[356901]: 2025-10-11 02:42:30.996 2 DEBUG nova.virt.libvirt.driver [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Found default for hw_vif_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:42:31 compute-0 nova_compute[356901]: 2025-10-11 02:42:31.002 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:42:31 compute-0 nova_compute[356901]: 2025-10-11 02:42:31.002 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150550.9389696, 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 => Paused> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:42:31 compute-0 nova_compute[356901]: 2025-10-11 02:42:31.002 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] VM Paused (Lifecycle Event)
Oct 11 02:42:31 compute-0 nova_compute[356901]: 2025-10-11 02:42:31.039 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:42:31 compute-0 nova_compute[356901]: 2025-10-11 02:42:31.046 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150550.948854, 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 => Resumed> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:42:31 compute-0 nova_compute[356901]: 2025-10-11 02:42:31.047 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] VM Resumed (Lifecycle Event)
Oct 11 02:42:31 compute-0 nova_compute[356901]: 2025-10-11 02:42:31.060 2 INFO nova.compute.manager [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Took 8.22 seconds to spawn the instance on the hypervisor.
Oct 11 02:42:31 compute-0 nova_compute[356901]: 2025-10-11 02:42:31.061 2 DEBUG nova.compute.manager [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:42:31 compute-0 nova_compute[356901]: 2025-10-11 02:42:31.071 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:42:31 compute-0 nova_compute[356901]: 2025-10-11 02:42:31.077 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Synchronizing instance power state after lifecycle event "Resumed"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:42:31 compute-0 podman[460104]: 2025-10-11 02:42:31.08909374 +0000 UTC m=+0.084105534 container create 34239a9adbc68585a4d8d7b70a2ca31eca3fa834ff16ae30a9a41227fa754d7d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_benz, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:42:31 compute-0 nova_compute[356901]: 2025-10-11 02:42:31.112 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:42:31 compute-0 nova_compute[356901]: 2025-10-11 02:42:31.133 2 INFO nova.compute.manager [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Took 9.29 seconds to build instance.
Oct 11 02:42:31 compute-0 podman[460104]: 2025-10-11 02:42:31.058788703 +0000 UTC m=+0.053800507 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:42:31 compute-0 nova_compute[356901]: 2025-10-11 02:42:31.159 2 DEBUG oslo_concurrency.lockutils [None req-c05f11b5-bf83-4af5-b9f6-f21b177fe9db bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98" "released" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: held 9.412s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:42:31 compute-0 systemd[1]: Started libpod-conmon-34239a9adbc68585a4d8d7b70a2ca31eca3fa834ff16ae30a9a41227fa754d7d.scope.
Oct 11 02:42:31 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:42:31 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/edfc17a59df3a3f2090a43a61dcca1b876827322ca991525f9bfaa0fa693b16a/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:42:31 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/edfc17a59df3a3f2090a43a61dcca1b876827322ca991525f9bfaa0fa693b16a/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:42:31 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/edfc17a59df3a3f2090a43a61dcca1b876827322ca991525f9bfaa0fa693b16a/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:42:31 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/edfc17a59df3a3f2090a43a61dcca1b876827322ca991525f9bfaa0fa693b16a/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:42:31 compute-0 podman[460104]: 2025-10-11 02:42:31.232315521 +0000 UTC m=+0.227327405 container init 34239a9adbc68585a4d8d7b70a2ca31eca3fa834ff16ae30a9a41227fa754d7d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_benz, CEPH_REF=reef, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:42:31 compute-0 podman[460104]: 2025-10-11 02:42:31.247760898 +0000 UTC m=+0.242772702 container start 34239a9adbc68585a4d8d7b70a2ca31eca3fa834ff16ae30a9a41227fa754d7d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_benz, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS)
Oct 11 02:42:31 compute-0 podman[460104]: 2025-10-11 02:42:31.25313328 +0000 UTC m=+0.248145144 container attach 34239a9adbc68585a4d8d7b70a2ca31eca3fa834ff16ae30a9a41227fa754d7d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_benz, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef)
Oct 11 02:42:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:42:31 compute-0 openstack_network_exporter[374316]: ERROR   02:42:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:42:31 compute-0 openstack_network_exporter[374316]: ERROR   02:42:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:42:31 compute-0 openstack_network_exporter[374316]: ERROR   02:42:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:42:31 compute-0 openstack_network_exporter[374316]: ERROR   02:42:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:42:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:42:31 compute-0 openstack_network_exporter[374316]: ERROR   02:42:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:42:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:42:32 compute-0 nova_compute[356901]: 2025-10-11 02:42:32.044 2 DEBUG nova.compute.manager [req-8de8fffc-c310-4646-bc75-d3e841fca6c8 req-567057c8-6a4e-4ba4-87cb-0e552d200fa8 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Received event network-vif-plugged-6c94515a-556d-4aeb-b39e-6e043f460cd8 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:42:32 compute-0 nova_compute[356901]: 2025-10-11 02:42:32.044 2 DEBUG oslo_concurrency.lockutils [req-8de8fffc-c310-4646-bc75-d3e841fca6c8 req-567057c8-6a4e-4ba4-87cb-0e552d200fa8 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:42:32 compute-0 nova_compute[356901]: 2025-10-11 02:42:32.045 2 DEBUG oslo_concurrency.lockutils [req-8de8fffc-c310-4646-bc75-d3e841fca6c8 req-567057c8-6a4e-4ba4-87cb-0e552d200fa8 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:42:32 compute-0 nova_compute[356901]: 2025-10-11 02:42:32.045 2 DEBUG oslo_concurrency.lockutils [req-8de8fffc-c310-4646-bc75-d3e841fca6c8 req-567057c8-6a4e-4ba4-87cb-0e552d200fa8 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:42:32 compute-0 nova_compute[356901]: 2025-10-11 02:42:32.045 2 DEBUG nova.compute.manager [req-8de8fffc-c310-4646-bc75-d3e841fca6c8 req-567057c8-6a4e-4ba4-87cb-0e552d200fa8 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] No waiting events found dispatching network-vif-plugged-6c94515a-556d-4aeb-b39e-6e043f460cd8 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:42:32 compute-0 nova_compute[356901]: 2025-10-11 02:42:32.045 2 WARNING nova.compute.manager [req-8de8fffc-c310-4646-bc75-d3e841fca6c8 req-567057c8-6a4e-4ba4-87cb-0e552d200fa8 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Received unexpected event network-vif-plugged-6c94515a-556d-4aeb-b39e-6e043f460cd8 for instance with vm_state active and task_state None.
Oct 11 02:42:32 compute-0 romantic_benz[460120]: {
Oct 11 02:42:32 compute-0 romantic_benz[460120]:     "0": [
Oct 11 02:42:32 compute-0 romantic_benz[460120]:         {
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "devices": [
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "/dev/loop3"
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             ],
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "lv_name": "ceph_lv0",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "lv_size": "21470642176",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "name": "ceph_lv0",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "tags": {
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.cluster_name": "ceph",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.crush_device_class": "",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.encrypted": "0",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.osd_id": "0",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.type": "block",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.vdo": "0"
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             },
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "type": "block",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "vg_name": "ceph_vg0"
Oct 11 02:42:32 compute-0 romantic_benz[460120]:         }
Oct 11 02:42:32 compute-0 romantic_benz[460120]:     ],
Oct 11 02:42:32 compute-0 romantic_benz[460120]:     "1": [
Oct 11 02:42:32 compute-0 romantic_benz[460120]:         {
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "devices": [
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "/dev/loop4"
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             ],
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "lv_name": "ceph_lv1",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "lv_size": "21470642176",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "name": "ceph_lv1",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "tags": {
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.cluster_name": "ceph",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.crush_device_class": "",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.encrypted": "0",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.osd_id": "1",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.type": "block",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.vdo": "0"
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             },
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "type": "block",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "vg_name": "ceph_vg1"
Oct 11 02:42:32 compute-0 romantic_benz[460120]:         }
Oct 11 02:42:32 compute-0 romantic_benz[460120]:     ],
Oct 11 02:42:32 compute-0 romantic_benz[460120]:     "2": [
Oct 11 02:42:32 compute-0 romantic_benz[460120]:         {
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "devices": [
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "/dev/loop5"
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             ],
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "lv_name": "ceph_lv2",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "lv_size": "21470642176",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "name": "ceph_lv2",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "tags": {
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.cluster_name": "ceph",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.crush_device_class": "",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.encrypted": "0",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.osd_id": "2",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.type": "block",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:                 "ceph.vdo": "0"
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             },
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "type": "block",
Oct 11 02:42:32 compute-0 romantic_benz[460120]:             "vg_name": "ceph_vg2"
Oct 11 02:42:32 compute-0 romantic_benz[460120]:         }
Oct 11 02:42:32 compute-0 romantic_benz[460120]:     ]
Oct 11 02:42:32 compute-0 romantic_benz[460120]: }
Oct 11 02:42:32 compute-0 systemd[1]: libpod-34239a9adbc68585a4d8d7b70a2ca31eca3fa834ff16ae30a9a41227fa754d7d.scope: Deactivated successfully.
Oct 11 02:42:32 compute-0 podman[460104]: 2025-10-11 02:42:32.138842562 +0000 UTC m=+1.133854366 container died 34239a9adbc68585a4d8d7b70a2ca31eca3fa834ff16ae30a9a41227fa754d7d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_benz, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 02:42:32 compute-0 systemd[1]: var-lib-containers-storage-overlay-edfc17a59df3a3f2090a43a61dcca1b876827322ca991525f9bfaa0fa693b16a-merged.mount: Deactivated successfully.
Oct 11 02:42:32 compute-0 podman[460104]: 2025-10-11 02:42:32.2250886 +0000 UTC m=+1.220100394 container remove 34239a9adbc68585a4d8d7b70a2ca31eca3fa834ff16ae30a9a41227fa754d7d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=romantic_benz, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 02:42:32 compute-0 systemd[1]: libpod-conmon-34239a9adbc68585a4d8d7b70a2ca31eca3fa834ff16ae30a9a41227fa754d7d.scope: Deactivated successfully.
Oct 11 02:42:32 compute-0 sudo[459960]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:32 compute-0 sudo[460142]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:42:32 compute-0 sudo[460142]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:32 compute-0 sudo[460142]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:32 compute-0 sudo[460167]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:42:32 compute-0 sudo[460167]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:32 compute-0 sudo[460167]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:32 compute-0 sudo[460192]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:42:32 compute-0 sudo[460192]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:32 compute-0 sudo[460192]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:32 compute-0 sudo[460217]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:42:32 compute-0 sudo[460217]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1948: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 17 KiB/s rd, 1.8 MiB/s wr, 27 op/s
Oct 11 02:42:32 compute-0 ceph-mon[191930]: pgmap v1948: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 17 KiB/s rd, 1.8 MiB/s wr, 27 op/s
Oct 11 02:42:33 compute-0 podman[460280]: 2025-10-11 02:42:33.133006864 +0000 UTC m=+0.062917284 container create 0596fa9a933c9c7fcb95f179acb94307236281b6833efe6938a8bae0485adfae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_bell, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:42:33 compute-0 systemd[1]: Started libpod-conmon-0596fa9a933c9c7fcb95f179acb94307236281b6833efe6938a8bae0485adfae.scope.
Oct 11 02:42:33 compute-0 podman[460280]: 2025-10-11 02:42:33.106115651 +0000 UTC m=+0.036026081 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:42:33 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:42:33 compute-0 podman[460280]: 2025-10-11 02:42:33.257043194 +0000 UTC m=+0.186953624 container init 0596fa9a933c9c7fcb95f179acb94307236281b6833efe6938a8bae0485adfae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_bell, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:42:33 compute-0 podman[460280]: 2025-10-11 02:42:33.266968905 +0000 UTC m=+0.196879305 container start 0596fa9a933c9c7fcb95f179acb94307236281b6833efe6938a8bae0485adfae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_bell, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:42:33 compute-0 podman[460280]: 2025-10-11 02:42:33.272146631 +0000 UTC m=+0.202057041 container attach 0596fa9a933c9c7fcb95f179acb94307236281b6833efe6938a8bae0485adfae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_bell, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef)
Oct 11 02:42:33 compute-0 competent_bell[460296]: 167 167
Oct 11 02:42:33 compute-0 systemd[1]: libpod-0596fa9a933c9c7fcb95f179acb94307236281b6833efe6938a8bae0485adfae.scope: Deactivated successfully.
Oct 11 02:42:33 compute-0 podman[460280]: 2025-10-11 02:42:33.276616016 +0000 UTC m=+0.206526426 container died 0596fa9a933c9c7fcb95f179acb94307236281b6833efe6938a8bae0485adfae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_bell, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:42:33 compute-0 systemd[1]: var-lib-containers-storage-overlay-2c824343209db8e86dddede03d27d535ecb679015b53afd1a39862880a247ff2-merged.mount: Deactivated successfully.
Oct 11 02:42:33 compute-0 podman[460280]: 2025-10-11 02:42:33.325024469 +0000 UTC m=+0.254934879 container remove 0596fa9a933c9c7fcb95f179acb94307236281b6833efe6938a8bae0485adfae (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_bell, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:42:33 compute-0 systemd[1]: libpod-conmon-0596fa9a933c9c7fcb95f179acb94307236281b6833efe6938a8bae0485adfae.scope: Deactivated successfully.
Oct 11 02:42:33 compute-0 nova_compute[356901]: 2025-10-11 02:42:33.451 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:33 compute-0 podman[460319]: 2025-10-11 02:42:33.611136381 +0000 UTC m=+0.082123265 container create 4acbe6434e3626eec2b30304ef07eae4b2ad3ed65ea7fae3fa83f76cda461554 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_wing, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:42:33 compute-0 podman[460319]: 2025-10-11 02:42:33.58300796 +0000 UTC m=+0.053994914 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:42:33 compute-0 systemd[1]: Started libpod-conmon-4acbe6434e3626eec2b30304ef07eae4b2ad3ed65ea7fae3fa83f76cda461554.scope.
Oct 11 02:42:33 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:42:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5164fac7c8cf5710a100107eb30efd4c2337f5cd29ac6c7ac13375b2fceb0248/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:42:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5164fac7c8cf5710a100107eb30efd4c2337f5cd29ac6c7ac13375b2fceb0248/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:42:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5164fac7c8cf5710a100107eb30efd4c2337f5cd29ac6c7ac13375b2fceb0248/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:42:33 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5164fac7c8cf5710a100107eb30efd4c2337f5cd29ac6c7ac13375b2fceb0248/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:42:33 compute-0 podman[460319]: 2025-10-11 02:42:33.747501384 +0000 UTC m=+0.218488308 container init 4acbe6434e3626eec2b30304ef07eae4b2ad3ed65ea7fae3fa83f76cda461554 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_wing, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef)
Oct 11 02:42:33 compute-0 podman[460319]: 2025-10-11 02:42:33.77351335 +0000 UTC m=+0.244500234 container start 4acbe6434e3626eec2b30304ef07eae4b2ad3ed65ea7fae3fa83f76cda461554 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_wing, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, ceph=True, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS)
Oct 11 02:42:33 compute-0 podman[460319]: 2025-10-11 02:42:33.779277635 +0000 UTC m=+0.250264559 container attach 4acbe6434e3626eec2b30304ef07eae4b2ad3ed65ea7fae3fa83f76cda461554 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_wing, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507)
Oct 11 02:42:34 compute-0 nova_compute[356901]: 2025-10-11 02:42:34.288 2 DEBUG nova.compute.manager [req-afd09f0d-f5f0-4ce5-96c3-865564e97cfc req-489dfae7-6fb1-4f1f-ab22-2e94d78b8bf1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Received event network-changed-6c94515a-556d-4aeb-b39e-6e043f460cd8 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:42:34 compute-0 nova_compute[356901]: 2025-10-11 02:42:34.289 2 DEBUG nova.compute.manager [req-afd09f0d-f5f0-4ce5-96c3-865564e97cfc req-489dfae7-6fb1-4f1f-ab22-2e94d78b8bf1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Refreshing instance network info cache due to event network-changed-6c94515a-556d-4aeb-b39e-6e043f460cd8. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:42:34 compute-0 nova_compute[356901]: 2025-10-11 02:42:34.289 2 DEBUG oslo_concurrency.lockutils [req-afd09f0d-f5f0-4ce5-96c3-865564e97cfc req-489dfae7-6fb1-4f1f-ab22-2e94d78b8bf1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-97d9494c-4ce4-4ff3-a0fa-d5cda135da98" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:42:34 compute-0 nova_compute[356901]: 2025-10-11 02:42:34.289 2 DEBUG oslo_concurrency.lockutils [req-afd09f0d-f5f0-4ce5-96c3-865564e97cfc req-489dfae7-6fb1-4f1f-ab22-2e94d78b8bf1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-97d9494c-4ce4-4ff3-a0fa-d5cda135da98" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:42:34 compute-0 nova_compute[356901]: 2025-10-11 02:42:34.289 2 DEBUG nova.network.neutron [req-afd09f0d-f5f0-4ce5-96c3-865564e97cfc req-489dfae7-6fb1-4f1f-ab22-2e94d78b8bf1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Refreshing network info cache for port 6c94515a-556d-4aeb-b39e-6e043f460cd8 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:42:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1949: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 985 KiB/s rd, 1.8 MiB/s wr, 69 op/s
Oct 11 02:42:34 compute-0 ceph-mon[191930]: pgmap v1949: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 985 KiB/s rd, 1.8 MiB/s wr, 69 op/s
Oct 11 02:42:34 compute-0 vibrant_wing[460336]: {
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:         "osd_id": 1,
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:         "type": "bluestore"
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:     },
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:         "osd_id": 2,
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:         "type": "bluestore"
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:     },
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:         "osd_id": 0,
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:         "type": "bluestore"
Oct 11 02:42:34 compute-0 vibrant_wing[460336]:     }
Oct 11 02:42:34 compute-0 vibrant_wing[460336]: }
Oct 11 02:42:35 compute-0 systemd[1]: libpod-4acbe6434e3626eec2b30304ef07eae4b2ad3ed65ea7fae3fa83f76cda461554.scope: Deactivated successfully.
Oct 11 02:42:35 compute-0 podman[460319]: 2025-10-11 02:42:35.007343449 +0000 UTC m=+1.478330333 container died 4acbe6434e3626eec2b30304ef07eae4b2ad3ed65ea7fae3fa83f76cda461554 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_wing, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:42:35 compute-0 systemd[1]: libpod-4acbe6434e3626eec2b30304ef07eae4b2ad3ed65ea7fae3fa83f76cda461554.scope: Consumed 1.191s CPU time.
Oct 11 02:42:35 compute-0 systemd[1]: var-lib-containers-storage-overlay-5164fac7c8cf5710a100107eb30efd4c2337f5cd29ac6c7ac13375b2fceb0248-merged.mount: Deactivated successfully.
Oct 11 02:42:35 compute-0 podman[460319]: 2025-10-11 02:42:35.098685671 +0000 UTC m=+1.569672555 container remove 4acbe6434e3626eec2b30304ef07eae4b2ad3ed65ea7fae3fa83f76cda461554 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_wing, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:42:35 compute-0 systemd[1]: libpod-conmon-4acbe6434e3626eec2b30304ef07eae4b2ad3ed65ea7fae3fa83f76cda461554.scope: Deactivated successfully.
Oct 11 02:42:35 compute-0 nova_compute[356901]: 2025-10-11 02:42:35.145 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:35 compute-0 sudo[460217]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:42:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:42:35 compute-0 podman[460372]: 2025-10-11 02:42:35.168845873 +0000 UTC m=+0.106535523 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=iscsid, container_name=iscsid)
Oct 11 02:42:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:42:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:42:35 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 430e303b-839b-4c19-b880-217ae64ddb6e does not exist
Oct 11 02:42:35 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 67d919b1-ec66-4f46-90d5-e2df4867f72e does not exist
Oct 11 02:42:35 compute-0 podman[460370]: 2025-10-11 02:42:35.193995003 +0000 UTC m=+0.131636611 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=multipathd, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, config_id=multipathd, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:42:35 compute-0 sudo[460414]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:42:35 compute-0 sudo[460414]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:35 compute-0 sudo[460414]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:35 compute-0 sudo[460439]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:42:35 compute-0 sudo[460439]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:42:35 compute-0 sudo[460439]: pam_unix(sudo:session): session closed for user root
Oct 11 02:42:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:42:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:42:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:42:36 compute-0 nova_compute[356901]: 2025-10-11 02:42:36.489 2 DEBUG nova.network.neutron [req-afd09f0d-f5f0-4ce5-96c3-865564e97cfc req-489dfae7-6fb1-4f1f-ab22-2e94d78b8bf1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Updated VIF entry in instance network info cache for port 6c94515a-556d-4aeb-b39e-6e043f460cd8. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:42:36 compute-0 nova_compute[356901]: 2025-10-11 02:42:36.490 2 DEBUG nova.network.neutron [req-afd09f0d-f5f0-4ce5-96c3-865564e97cfc req-489dfae7-6fb1-4f1f-ab22-2e94d78b8bf1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Updating instance_info_cache with network_info: [{"id": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "address": "fa:16:3e:0f:4d:31", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.3", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.233", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap6c94515a-55", "ovs_interfaceid": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:42:36 compute-0 nova_compute[356901]: 2025-10-11 02:42:36.508 2 DEBUG oslo_concurrency.lockutils [req-afd09f0d-f5f0-4ce5-96c3-865564e97cfc req-489dfae7-6fb1-4f1f-ab22-2e94d78b8bf1 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-97d9494c-4ce4-4ff3-a0fa-d5cda135da98" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:42:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1950: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.4 MiB/s rd, 828 KiB/s wr, 82 op/s
Oct 11 02:42:37 compute-0 ceph-mon[191930]: pgmap v1950: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.4 MiB/s rd, 828 KiB/s wr, 82 op/s
Oct 11 02:42:38 compute-0 nova_compute[356901]: 2025-10-11 02:42:38.455 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1951: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.4 MiB/s rd, 307 KiB/s wr, 81 op/s
Oct 11 02:42:39 compute-0 ceph-mon[191930]: pgmap v1951: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.4 MiB/s rd, 307 KiB/s wr, 81 op/s
Oct 11 02:42:40 compute-0 nova_compute[356901]: 2025-10-11 02:42:40.155 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1952: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 15 KiB/s wr, 74 op/s
Oct 11 02:42:40 compute-0 ceph-mon[191930]: pgmap v1952: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 15 KiB/s wr, 74 op/s
Oct 11 02:42:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:42:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1953: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 15 KiB/s wr, 74 op/s
Oct 11 02:42:42 compute-0 ceph-mon[191930]: pgmap v1953: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 15 KiB/s wr, 74 op/s
Oct 11 02:42:43 compute-0 nova_compute[356901]: 2025-10-11 02:42:43.458 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:43 compute-0 nova_compute[356901]: 2025-10-11 02:42:43.617 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:44 compute-0 podman[460464]: 2025-10-11 02:42:44.835444489 +0000 UTC m=+0.113306097 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_managed=true, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_ipmi, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3)
Oct 11 02:42:44 compute-0 podman[460465]: 2025-10-11 02:42:44.83878597 +0000 UTC m=+0.106405948 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, build-date=2025-08-20T13:12:41, name=ubi9-minimal, vcs-type=git, distribution-scope=public, io.buildah.version=1.33.7, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, architecture=x86_64, io.openshift.expose-services=, release=1755695350, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, config_id=edpm, vendor=Red Hat, Inc., com.redhat.component=ubi9-minimal-container, io.openshift.tags=minimal rhel9, container_name=openstack_network_exporter, version=9.6)
Oct 11 02:42:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1954: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 15 KiB/s wr, 74 op/s
Oct 11 02:42:44 compute-0 podman[460466]: 2025-10-11 02:42:44.86127331 +0000 UTC m=+0.131678832 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:42:44 compute-0 ceph-mon[191930]: pgmap v1954: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 15 KiB/s wr, 74 op/s
Oct 11 02:42:45 compute-0 nova_compute[356901]: 2025-10-11 02:42:45.154 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:45 compute-0 nova_compute[356901]: 2025-10-11 02:42:45.620 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:42:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1955: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 992 KiB/s rd, 2.1 KiB/s wr, 32 op/s
Oct 11 02:42:46 compute-0 ceph-mon[191930]: pgmap v1955: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 992 KiB/s rd, 2.1 KiB/s wr, 32 op/s
Oct 11 02:42:48 compute-0 nova_compute[356901]: 2025-10-11 02:42:48.461 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1956: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 530 KiB/s rd, 1.3 KiB/s wr, 17 op/s
Oct 11 02:42:48 compute-0 ceph-mon[191930]: pgmap v1956: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 530 KiB/s rd, 1.3 KiB/s wr, 17 op/s
Oct 11 02:42:50 compute-0 nova_compute[356901]: 2025-10-11 02:42:50.157 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:50 compute-0 nova_compute[356901]: 2025-10-11 02:42:50.839 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1957: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 499 KiB/s rd, 682 B/s wr, 15 op/s
Oct 11 02:42:50 compute-0 ceph-mon[191930]: pgmap v1957: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 499 KiB/s rd, 682 B/s wr, 15 op/s
Oct 11 02:42:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:42:52 compute-0 podman[460526]: 2025-10-11 02:42:52.288579455 +0000 UTC m=+0.163862156 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.29.0, io.k8s.display-name=Red Hat Universal Base Image 9, maintainer=Red Hat, Inc., managed_by=edpm_ansible, distribution-scope=public, io.openshift.tags=base rhel9, build-date=2024-09-18T21:23:30, config_id=edpm, vcs-type=git, name=ubi9, vendor=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, release-0.7.12=, version=9.4, com.redhat.component=ubi9-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, release=1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, architecture=x86_64, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, container_name=kepler)
Oct 11 02:42:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1958: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 682 B/s wr, 0 op/s
Oct 11 02:42:52 compute-0 ceph-mon[191930]: pgmap v1958: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 682 B/s wr, 0 op/s
Oct 11 02:42:53 compute-0 nova_compute[356901]: 2025-10-11 02:42:53.464 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1959: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 682 B/s wr, 0 op/s
Oct 11 02:42:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:54.872 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:42:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:54.872 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:42:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:42:54.873 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:42:54 compute-0 ceph-mon[191930]: pgmap v1959: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 682 B/s wr, 0 op/s
Oct 11 02:42:55 compute-0 nova_compute[356901]: 2025-10-11 02:42:55.160 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:55 compute-0 nova_compute[356901]: 2025-10-11 02:42:55.641 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:42:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:42:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:42:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:42:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:42:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:42:56
Oct 11 02:42:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:42:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:42:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['cephfs.cephfs.data', '.mgr', 'cephfs.cephfs.meta', 'default.rgw.meta', 'vms', 'default.rgw.log', 'backups', 'volumes', 'images', 'default.rgw.control', '.rgw.root']
Oct 11 02:42:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:42:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:42:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:42:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1960: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:42:56 compute-0 ceph-mon[191930]: pgmap v1960: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:42:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:42:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:42:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:42:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:42:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:42:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:42:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:42:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:42:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:42:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:42:58 compute-0 podman[460548]: 2025-10-11 02:42:58.228181306 +0000 UTC m=+0.108227164 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.4, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_compute)
Oct 11 02:42:58 compute-0 podman[460546]: 2025-10-11 02:42:58.234992272 +0000 UTC m=+0.110866584 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:42:58 compute-0 podman[460549]: 2025-10-11 02:42:58.265051681 +0000 UTC m=+0.138932002 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, container_name=ovn_metadata_agent, org.label-schema.license=GPLv2)
Oct 11 02:42:58 compute-0 podman[460547]: 2025-10-11 02:42:58.283947912 +0000 UTC m=+0.172516648 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.build-date=20251009)
Oct 11 02:42:58 compute-0 nova_compute[356901]: 2025-10-11 02:42:58.470 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:42:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e140 do_prune osdmap full prune enabled
Oct 11 02:42:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 e141: 3 total, 3 up, 3 in
Oct 11 02:42:58 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e141: 3 total, 3 up, 3 in
Oct 11 02:42:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1962: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 307 B/s rd, 204 B/s wr, 0 op/s
Oct 11 02:42:59 compute-0 ceph-mon[191930]: osdmap e141: 3 total, 3 up, 3 in
Oct 11 02:42:59 compute-0 ceph-mon[191930]: pgmap v1962: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 307 B/s rd, 204 B/s wr, 0 op/s
Oct 11 02:42:59 compute-0 podman[157119]: time="2025-10-11T02:42:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:42:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:42:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:42:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:42:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9548 "" "Go-http-client/1.1"
Oct 11 02:43:00 compute-0 nova_compute[356901]: 2025-10-11 02:43:00.163 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1963: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 307 B/s rd, 204 B/s wr, 0 op/s
Oct 11 02:43:00 compute-0 nova_compute[356901]: 2025-10-11 02:43:00.865 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:43:00 compute-0 ceph-mon[191930]: pgmap v1963: 321 pgs: 321 active+clean; 244 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 307 B/s rd, 204 B/s wr, 0 op/s
Oct 11 02:43:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:43:01 compute-0 openstack_network_exporter[374316]: ERROR   02:43:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:43:01 compute-0 openstack_network_exporter[374316]: ERROR   02:43:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:43:01 compute-0 openstack_network_exporter[374316]: ERROR   02:43:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:43:01 compute-0 openstack_network_exporter[374316]: ERROR   02:43:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:43:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:43:01 compute-0 openstack_network_exporter[374316]: ERROR   02:43:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:43:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:43:02 compute-0 systemd[1]: Starting dnf makecache...
Oct 11 02:43:02 compute-0 dnf[460626]: Metadata cache refreshed recently.
Oct 11 02:43:02 compute-0 systemd[1]: dnf-makecache.service: Deactivated successfully.
Oct 11 02:43:02 compute-0 systemd[1]: Finished dnf makecache.
Oct 11 02:43:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1964: 321 pgs: 321 active+clean; 260 MiB data, 388 MiB used, 60 GiB / 60 GiB avail; 13 KiB/s rd, 1.6 MiB/s wr, 18 op/s
Oct 11 02:43:02 compute-0 ceph-mon[191930]: pgmap v1964: 321 pgs: 321 active+clean; 260 MiB data, 388 MiB used, 60 GiB / 60 GiB avail; 13 KiB/s rd, 1.6 MiB/s wr, 18 op/s
Oct 11 02:43:03 compute-0 nova_compute[356901]: 2025-10-11 02:43:03.475 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:03 compute-0 nova_compute[356901]: 2025-10-11 02:43:03.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:43:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1965: 321 pgs: 321 active+clean; 265 MiB data, 393 MiB used, 60 GiB / 60 GiB avail; 13 KiB/s rd, 2.0 MiB/s wr, 19 op/s
Oct 11 02:43:04 compute-0 ceph-mon[191930]: pgmap v1965: 321 pgs: 321 active+clean; 265 MiB data, 393 MiB used, 60 GiB / 60 GiB avail; 13 KiB/s rd, 2.0 MiB/s wr, 19 op/s
Oct 11 02:43:05 compute-0 nova_compute[356901]: 2025-10-11 02:43:05.167 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:06 compute-0 podman[460628]: 2025-10-11 02:43:06.203904695 +0000 UTC m=+0.091206704 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=iscsid, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:43:06 compute-0 podman[460627]: 2025-10-11 02:43:06.249104894 +0000 UTC m=+0.139056012 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_id=multipathd, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 02:43:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:43:06 compute-0 nova_compute[356901]: 2025-10-11 02:43:06.832 2 DEBUG oslo_concurrency.lockutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquiring lock "8422017b-c868-4ba2-ab1f-61d3668ca145" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:06 compute-0 nova_compute[356901]: 2025-10-11 02:43:06.834 2 DEBUG oslo_concurrency.lockutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "8422017b-c868-4ba2-ab1f-61d3668ca145" acquired by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:06 compute-0 nova_compute[356901]: 2025-10-11 02:43:06.857 2 DEBUG nova.compute.manager [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Starting instance... _do_build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2402
Oct 11 02:43:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1966: 321 pgs: 321 active+clean; 272 MiB data, 395 MiB used, 60 GiB / 60 GiB avail; 42 KiB/s rd, 2.8 MiB/s wr, 27 op/s
Oct 11 02:43:06 compute-0 ceph-mon[191930]: pgmap v1966: 321 pgs: 321 active+clean; 272 MiB data, 395 MiB used, 60 GiB / 60 GiB avail; 42 KiB/s rd, 2.8 MiB/s wr, 27 op/s
Oct 11 02:43:06 compute-0 nova_compute[356901]: 2025-10-11 02:43:06.976 2 DEBUG oslo_concurrency.lockutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:06 compute-0 nova_compute[356901]: 2025-10-11 02:43:06.978 2 DEBUG oslo_concurrency.lockutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:06 compute-0 nova_compute[356901]: 2025-10-11 02:43:06.995 2 DEBUG nova.virt.hardware [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Require both a host and instance NUMA topology to fit instance on host. numa_fit_instance_to_host /usr/lib/python3.9/site-packages/nova/virt/hardware.py:2368
Oct 11 02:43:06 compute-0 nova_compute[356901]: 2025-10-11 02:43:06.996 2 INFO nova.compute.claims [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Claim successful on node compute-0.ctlplane.example.com
Oct 11 02:43:07 compute-0 ovn_controller[88370]: 2025-10-11T02:43:07Z|00021|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:0f:4d:31 10.100.0.3
Oct 11 02:43:07 compute-0 ovn_controller[88370]: 2025-10-11T02:43:07Z|00022|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:0f:4d:31 10.100.0.3
Oct 11 02:43:07 compute-0 nova_compute[356901]: 2025-10-11 02:43:07.203 2 DEBUG oslo_concurrency.processutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0017781233720778797 of space, bias 1.0, pg target 0.5334370116233639 quantized to 32 (current 32)
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00125203744627857 of space, bias 1.0, pg target 0.375611233883571 quantized to 32 (current 32)
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:43:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:43:07 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:43:07 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/960632260' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:43:07 compute-0 nova_compute[356901]: 2025-10-11 02:43:07.718 2 DEBUG oslo_concurrency.processutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.515s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:43:07 compute-0 nova_compute[356901]: 2025-10-11 02:43:07.730 2 DEBUG nova.compute.provider_tree [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:43:07 compute-0 nova_compute[356901]: 2025-10-11 02:43:07.748 2 DEBUG nova.scheduler.client.report [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:43:07 compute-0 nova_compute[356901]: 2025-10-11 02:43:07.772 2 DEBUG oslo_concurrency.lockutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: held 0.795s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:07 compute-0 nova_compute[356901]: 2025-10-11 02:43:07.774 2 DEBUG nova.compute.manager [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Start building networks asynchronously for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2799
Oct 11 02:43:07 compute-0 nova_compute[356901]: 2025-10-11 02:43:07.819 2 DEBUG nova.compute.manager [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Allocating IP information in the background. _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1952
Oct 11 02:43:07 compute-0 nova_compute[356901]: 2025-10-11 02:43:07.820 2 DEBUG nova.network.neutron [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] allocate_for_instance() allocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1156
Oct 11 02:43:07 compute-0 nova_compute[356901]: 2025-10-11 02:43:07.839 2 INFO nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Ignoring supplied device name: /dev/vda. Libvirt can't honour user-supplied dev names
Oct 11 02:43:07 compute-0 nova_compute[356901]: 2025-10-11 02:43:07.857 2 DEBUG nova.compute.manager [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Start building block device mappings for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2834
Oct 11 02:43:07 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/960632260' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:43:07 compute-0 nova_compute[356901]: 2025-10-11 02:43:07.951 2 DEBUG nova.compute.manager [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Start spawning the instance on the hypervisor. _build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2608
Oct 11 02:43:07 compute-0 nova_compute[356901]: 2025-10-11 02:43:07.954 2 DEBUG nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Creating instance directory _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4723
Oct 11 02:43:07 compute-0 nova_compute[356901]: 2025-10-11 02:43:07.955 2 INFO nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Creating image(s)
Oct 11 02:43:08 compute-0 nova_compute[356901]: 2025-10-11 02:43:08.004 2 DEBUG nova.storage.rbd_utils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] rbd image 8422017b-c868-4ba2-ab1f-61d3668ca145_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:43:08 compute-0 nova_compute[356901]: 2025-10-11 02:43:08.059 2 DEBUG nova.storage.rbd_utils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] rbd image 8422017b-c868-4ba2-ab1f-61d3668ca145_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:43:08 compute-0 nova_compute[356901]: 2025-10-11 02:43:08.121 2 DEBUG nova.storage.rbd_utils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] rbd image 8422017b-c868-4ba2-ab1f-61d3668ca145_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:43:08 compute-0 nova_compute[356901]: 2025-10-11 02:43:08.135 2 DEBUG oslo_concurrency.lockutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquiring lock "61847d5d7446819c58bff23b092765d610117849" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:08 compute-0 nova_compute[356901]: 2025-10-11 02:43:08.137 2 DEBUG oslo_concurrency.lockutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "61847d5d7446819c58bff23b092765d610117849" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 0.003s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:08 compute-0 nova_compute[356901]: 2025-10-11 02:43:08.144 2 DEBUG nova.policy [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Policy check for network:attach_external_network failed with credentials {'is_admin': False, 'user_id': 'f66a606299944d53a40f21e81c791d70', 'user_domain_id': 'default', 'system_scope': None, 'domain_id': None, 'project_id': 'a05bbc8f872d4dd99972d2cb8136d608', 'project_domain_id': 'default', 'roles': ['member', 'reader'], 'is_admin_project': True, 'service_user_id': None, 'service_user_domain_id': None, 'service_project_id': None, 'service_project_domain_id': None, 'service_roles': []} authorize /usr/lib/python3.9/site-packages/nova/policy.py:203
Oct 11 02:43:08 compute-0 nova_compute[356901]: 2025-10-11 02:43:08.344 2 DEBUG nova.virt.libvirt.imagebackend [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Image locations are: [{'url': 'rbd://3c7617c3-7a20-523e-a9de-20c0d6ba41da/images/2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c/snap', 'metadata': {'store': 'default_backend'}}, {'url': 'rbd://3c7617c3-7a20-523e-a9de-20c0d6ba41da/images/2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c/snap', 'metadata': {}}] clone /usr/lib/python3.9/site-packages/nova/virt/libvirt/imagebackend.py:1085
Oct 11 02:43:08 compute-0 nova_compute[356901]: 2025-10-11 02:43:08.480 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:08 compute-0 nova_compute[356901]: 2025-10-11 02:43:08.861 2 DEBUG nova.network.neutron [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Successfully created port: e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 _create_port_minimal /usr/lib/python3.9/site-packages/nova/network/neutron.py:548
Oct 11 02:43:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1967: 321 pgs: 321 active+clean; 295 MiB data, 412 MiB used, 60 GiB / 60 GiB avail; 306 KiB/s rd, 4.5 MiB/s wr, 82 op/s
Oct 11 02:43:08 compute-0 nova_compute[356901]: 2025-10-11 02:43:08.899 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:43:08 compute-0 ceph-mon[191930]: pgmap v1967: 321 pgs: 321 active+clean; 295 MiB data, 412 MiB used, 60 GiB / 60 GiB avail; 306 KiB/s rd, 4.5 MiB/s wr, 82 op/s
Oct 11 02:43:09 compute-0 nova_compute[356901]: 2025-10-11 02:43:09.436 2 DEBUG oslo_concurrency.processutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/61847d5d7446819c58bff23b092765d610117849.part --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:43:09 compute-0 nova_compute[356901]: 2025-10-11 02:43:09.537 2 DEBUG oslo_concurrency.processutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/61847d5d7446819c58bff23b092765d610117849.part --force-share --output=json" returned: 0 in 0.101s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:43:09 compute-0 nova_compute[356901]: 2025-10-11 02:43:09.538 2 DEBUG nova.virt.images [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] 2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c was qcow2, converting to raw fetch_to_raw /usr/lib/python3.9/site-packages/nova/virt/images.py:242
Oct 11 02:43:09 compute-0 nova_compute[356901]: 2025-10-11 02:43:09.540 2 DEBUG nova.privsep.utils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Path '/var/lib/nova/instances' supports direct I/O supports_direct_io /usr/lib/python3.9/site-packages/nova/privsep/utils.py:63
Oct 11 02:43:09 compute-0 nova_compute[356901]: 2025-10-11 02:43:09.541 2 DEBUG oslo_concurrency.processutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Running cmd (subprocess): qemu-img convert -t none -O raw -f qcow2 /var/lib/nova/instances/_base/61847d5d7446819c58bff23b092765d610117849.part /var/lib/nova/instances/_base/61847d5d7446819c58bff23b092765d610117849.converted execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:43:09 compute-0 nova_compute[356901]: 2025-10-11 02:43:09.592 2 DEBUG nova.network.neutron [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Successfully updated port: e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 _update_port /usr/lib/python3.9/site-packages/nova/network/neutron.py:586
Oct 11 02:43:09 compute-0 nova_compute[356901]: 2025-10-11 02:43:09.611 2 DEBUG oslo_concurrency.lockutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquiring lock "refresh_cache-8422017b-c868-4ba2-ab1f-61d3668ca145" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:43:09 compute-0 nova_compute[356901]: 2025-10-11 02:43:09.611 2 DEBUG oslo_concurrency.lockutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquired lock "refresh_cache-8422017b-c868-4ba2-ab1f-61d3668ca145" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:43:09 compute-0 nova_compute[356901]: 2025-10-11 02:43:09.611 2 DEBUG nova.network.neutron [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Building network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2010
Oct 11 02:43:09 compute-0 nova_compute[356901]: 2025-10-11 02:43:09.675 2 DEBUG nova.compute.manager [req-042a2dc7-7da4-4d7c-8518-65fa3228af8a req-08941a2e-6c6b-4bd4-b147-7034bd35eee2 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Received event network-changed-e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:43:09 compute-0 nova_compute[356901]: 2025-10-11 02:43:09.675 2 DEBUG nova.compute.manager [req-042a2dc7-7da4-4d7c-8518-65fa3228af8a req-08941a2e-6c6b-4bd4-b147-7034bd35eee2 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Refreshing instance network info cache due to event network-changed-e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:43:09 compute-0 nova_compute[356901]: 2025-10-11 02:43:09.676 2 DEBUG oslo_concurrency.lockutils [req-042a2dc7-7da4-4d7c-8518-65fa3228af8a req-08941a2e-6c6b-4bd4-b147-7034bd35eee2 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-8422017b-c868-4ba2-ab1f-61d3668ca145" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:43:09 compute-0 nova_compute[356901]: 2025-10-11 02:43:09.820 2 DEBUG oslo_concurrency.processutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CMD "qemu-img convert -t none -O raw -f qcow2 /var/lib/nova/instances/_base/61847d5d7446819c58bff23b092765d610117849.part /var/lib/nova/instances/_base/61847d5d7446819c58bff23b092765d610117849.converted" returned: 0 in 0.279s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:43:09 compute-0 nova_compute[356901]: 2025-10-11 02:43:09.824 2 DEBUG oslo_concurrency.processutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/61847d5d7446819c58bff23b092765d610117849.converted --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:43:09 compute-0 nova_compute[356901]: 2025-10-11 02:43:09.909 2 DEBUG oslo_concurrency.processutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/61847d5d7446819c58bff23b092765d610117849.converted --force-share --output=json" returned: 0 in 0.085s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:43:09 compute-0 nova_compute[356901]: 2025-10-11 02:43:09.911 2 DEBUG oslo_concurrency.lockutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "61847d5d7446819c58bff23b092765d610117849" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 1.774s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:09 compute-0 nova_compute[356901]: 2025-10-11 02:43:09.948 2 DEBUG nova.storage.rbd_utils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] rbd image 8422017b-c868-4ba2-ab1f-61d3668ca145_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:43:09 compute-0 nova_compute[356901]: 2025-10-11 02:43:09.957 2 DEBUG oslo_concurrency.processutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/61847d5d7446819c58bff23b092765d610117849 8422017b-c868-4ba2-ab1f-61d3668ca145_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:43:10 compute-0 nova_compute[356901]: 2025-10-11 02:43:10.172 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:10 compute-0 nova_compute[356901]: 2025-10-11 02:43:10.349 2 DEBUG oslo_concurrency.processutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/61847d5d7446819c58bff23b092765d610117849 8422017b-c868-4ba2-ab1f-61d3668ca145_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.393s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:43:10 compute-0 nova_compute[356901]: 2025-10-11 02:43:10.396 2 DEBUG nova.network.neutron [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Instance cache missing network info. _get_preexisting_port_ids /usr/lib/python3.9/site-packages/nova/network/neutron.py:3323
Oct 11 02:43:10 compute-0 nova_compute[356901]: 2025-10-11 02:43:10.477 2 DEBUG nova.storage.rbd_utils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] resizing rbd image 8422017b-c868-4ba2-ab1f-61d3668ca145_disk to 1073741824 resize /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:288
Oct 11 02:43:10 compute-0 nova_compute[356901]: 2025-10-11 02:43:10.668 2 DEBUG nova.objects.instance [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lazy-loading 'migration_context' on Instance uuid 8422017b-c868-4ba2-ab1f-61d3668ca145 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:43:10 compute-0 nova_compute[356901]: 2025-10-11 02:43:10.687 2 DEBUG nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Created local disks _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4857
Oct 11 02:43:10 compute-0 nova_compute[356901]: 2025-10-11 02:43:10.688 2 DEBUG nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Ensure instance console log exists: /var/lib/nova/instances/8422017b-c868-4ba2-ab1f-61d3668ca145/console.log _ensure_console_log_for_instance /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4609
Oct 11 02:43:10 compute-0 nova_compute[356901]: 2025-10-11 02:43:10.689 2 DEBUG oslo_concurrency.lockutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquiring lock "vgpu_resources" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:10 compute-0 nova_compute[356901]: 2025-10-11 02:43:10.689 2 DEBUG oslo_concurrency.lockutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "vgpu_resources" acquired by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:10 compute-0 nova_compute[356901]: 2025-10-11 02:43:10.690 2 DEBUG oslo_concurrency.lockutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "vgpu_resources" "released" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1968: 321 pgs: 321 active+clean; 295 MiB data, 412 MiB used, 60 GiB / 60 GiB avail; 258 KiB/s rd, 3.8 MiB/s wr, 69 op/s
Oct 11 02:43:10 compute-0 nova_compute[356901]: 2025-10-11 02:43:10.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:43:10 compute-0 nova_compute[356901]: 2025-10-11 02:43:10.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:43:10 compute-0 nova_compute[356901]: 2025-10-11 02:43:10.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:43:10 compute-0 ceph-mon[191930]: pgmap v1968: 321 pgs: 321 active+clean; 295 MiB data, 412 MiB used, 60 GiB / 60 GiB avail; 258 KiB/s rd, 3.8 MiB/s wr, 69 op/s
Oct 11 02:43:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:43:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3600.1 total, 600.0 interval
                                            Cumulative writes: 9523 writes, 36K keys, 9523 commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 9523 writes, 2506 syncs, 3.80 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 2049 writes, 7169 keys, 2049 commit groups, 1.0 writes per commit group, ingest: 7.34 MB, 0.01 MB/s
                                            Interval WAL: 2049 writes, 834 syncs, 2.46 writes per sync, written: 0.01 GB, 0.01 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.822 2 DEBUG nova.network.neutron [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Updating instance_info_cache with network_info: [{"id": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "address": "fa:16:3e:2c:af:96", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.3.53", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape00931c0-3d", "ovs_interfaceid": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.847 2 DEBUG oslo_concurrency.lockutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Releasing lock "refresh_cache-8422017b-c868-4ba2-ab1f-61d3668ca145" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.847 2 DEBUG nova.compute.manager [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Instance network_info: |[{"id": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "address": "fa:16:3e:2c:af:96", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.3.53", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape00931c0-3d", "ovs_interfaceid": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}]| _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1967
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.848 2 DEBUG oslo_concurrency.lockutils [req-042a2dc7-7da4-4d7c-8518-65fa3228af8a req-08941a2e-6c6b-4bd4-b147-7034bd35eee2 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-8422017b-c868-4ba2-ab1f-61d3668ca145" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.849 2 DEBUG nova.network.neutron [req-042a2dc7-7da4-4d7c-8518-65fa3228af8a req-08941a2e-6c6b-4bd4-b147-7034bd35eee2 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Refreshing network info cache for port e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.855 2 DEBUG nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Start _get_guest_xml network_info=[{"id": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "address": "fa:16:3e:2c:af:96", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.3.53", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape00931c0-3d", "ovs_interfaceid": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] disk_info={'disk_bus': 'virtio', 'cdrom_bus': 'sata', 'mapping': {'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.config': {'bus': 'sata', 'dev': 'sda', 'type': 'cdrom'}}} image_meta=ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:42:57Z,direct_url=<?>,disk_format='qcow2',id=2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c,min_disk=0,min_ram=0,name='tempest-scenario-img--676804077',owner='a05bbc8f872d4dd99972d2cb8136d608',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:42:58Z,virtual_size=<?>,visibility=<?>) rescue=None block_device_info={'root_device_name': '/dev/vda', 'image': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'boot_index': 0, 'device_name': '/dev/vda', 'size': 0, 'encryption_format': None, 'image_id': '2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c'}], 'ephemerals': [], 'block_device_mapping': [], 'swap': None} _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7549
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.881 2 WARNING nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.893 2 DEBUG nova.virt.libvirt.host [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V1... _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1653
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.894 2 DEBUG nova.virt.libvirt.host [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CPU controller missing on host. _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1663
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.901 2 DEBUG nova.virt.libvirt.host [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V2... _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1672
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.902 2 DEBUG nova.virt.libvirt.host [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CPU controller found on host. _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1679
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.903 2 DEBUG nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CPU mode 'host-model' models '' was chosen, with extra flags: '' _get_guest_cpu_model_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:5396
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.903 2 DEBUG nova.virt.hardware [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Getting desirable topologies for flavor Flavor(created_at=2025-10-11T02:38:03Z,deleted=False,deleted_at=None,description=None,disabled=False,ephemeral_gb=0,extra_specs={hw_rng:allowed='True'},flavorid='6dff30d1-85df-4e9c-9163-a20ba47bb0c7',id=3,is_public=True,memory_mb=128,name='m1.nano',projects=<?>,root_gb=1,rxtx_factor=1.0,swap=0,updated_at=None,vcpu_weight=0,vcpus=1) and image_meta ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:42:57Z,direct_url=<?>,disk_format='qcow2',id=2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c,min_disk=0,min_ram=0,name='tempest-scenario-img--676804077',owner='a05bbc8f872d4dd99972d2cb8136d608',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:42:58Z,virtual_size=<?>,visibility=<?>), allow threads: True _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:563
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.904 2 DEBUG nova.virt.hardware [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Flavor limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:348
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.905 2 DEBUG nova.virt.hardware [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Image limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:352
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.905 2 DEBUG nova.virt.hardware [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Flavor pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:388
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.905 2 DEBUG nova.virt.hardware [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Image pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:392
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.906 2 DEBUG nova.virt.hardware [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Chose sockets=0, cores=0, threads=0; limits were sockets=65536, cores=65536, threads=65536 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:430
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.906 2 DEBUG nova.virt.hardware [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Topology preferred VirtCPUTopology(cores=0,sockets=0,threads=0), maximum VirtCPUTopology(cores=65536,sockets=65536,threads=65536) _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:569
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.906 2 DEBUG nova.virt.hardware [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Build topologies for 1 vcpu(s) 1:1:1 _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:471
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.907 2 DEBUG nova.virt.hardware [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Got 1 possible topologies _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:501
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.907 2 DEBUG nova.virt.hardware [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Possible topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:575
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.908 2 DEBUG nova.virt.hardware [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Sorted desired topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:577
Oct 11 02:43:11 compute-0 nova_compute[356901]: 2025-10-11 02:43:11.913 2 DEBUG oslo_concurrency.processutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:43:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:43:12 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1893303887' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:43:12 compute-0 nova_compute[356901]: 2025-10-11 02:43:12.476 2 DEBUG oslo_concurrency.processutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.563s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:43:12 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1893303887' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:43:12 compute-0 nova_compute[356901]: 2025-10-11 02:43:12.535 2 DEBUG nova.storage.rbd_utils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] rbd image 8422017b-c868-4ba2-ab1f-61d3668ca145_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:43:12 compute-0 nova_compute[356901]: 2025-10-11 02:43:12.550 2 DEBUG oslo_concurrency.processutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:43:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1969: 321 pgs: 321 active+clean; 333 MiB data, 436 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 5.0 MiB/s wr, 99 op/s
Oct 11 02:43:12 compute-0 nova_compute[356901]: 2025-10-11 02:43:12.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:43:12 compute-0 nova_compute[356901]: 2025-10-11 02:43:12.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:43:12 compute-0 nova_compute[356901]: 2025-10-11 02:43:12.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:43:12 compute-0 nova_compute[356901]: 2025-10-11 02:43:12.928 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Skipping network cache update for instance because it is Building. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9871
Oct 11 02:43:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:43:13 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1602883105' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.163 2 DEBUG oslo_concurrency.processutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.613s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.165 2 DEBUG nova.virt.libvirt.vif [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:43:05Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description=None,display_name='te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c',id=14,image_ref='2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data=None,key_name=None,keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={metering.server_group='44c4fdb3-6cdb-42b8-903d-5a2c79f0da20'},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='a05bbc8f872d4dd99972d2cb8136d608',ramdisk_id='',reservation_id='r-zwtwqn0d',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-PrometheusGabbiTest-674022988',owner_user_name='tempest-PrometheusGabbiTest-674022988-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:43:07Z,user_data='IyEvYmluL3NoCmVjaG8gJ0xvYWRpbmcgQ1BVJwpzZXQgLXYKY2F0IC9kZXYvdXJhbmRvbSA+IC9kZXYvbnVsbCAmIHNsZWVwIDMwMCA7IGtpbGwgJCEgCg==',user_id='f66a606299944d53a40f21e81c791d70',uuid=8422017b-c868-4ba2-ab1f-61d3668ca145,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "address": "fa:16:3e:2c:af:96", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.3.53", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape00931c0-3d", "ovs_interfaceid": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} virt_type=kvm get_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:563
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.165 2 DEBUG nova.network.os_vif_util [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Converting VIF {"id": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "address": "fa:16:3e:2c:af:96", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.3.53", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape00931c0-3d", "ovs_interfaceid": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.166 2 DEBUG nova.network.os_vif_util [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:2c:af:96,bridge_name='br-int',has_traffic_filtering=True,id=e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6,network=Network(3563b4a1-477a-44a0-b01f-7d19d49c0308),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tape00931c0-3d') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.167 2 DEBUG nova.objects.instance [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lazy-loading 'pci_devices' on Instance uuid 8422017b-c868-4ba2-ab1f-61d3668ca145 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.182 2 DEBUG nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] End _get_guest_xml xml=<domain type="kvm">
Oct 11 02:43:13 compute-0 nova_compute[356901]:   <uuid>8422017b-c868-4ba2-ab1f-61d3668ca145</uuid>
Oct 11 02:43:13 compute-0 nova_compute[356901]:   <name>instance-0000000e</name>
Oct 11 02:43:13 compute-0 nova_compute[356901]:   <memory>131072</memory>
Oct 11 02:43:13 compute-0 nova_compute[356901]:   <vcpu>1</vcpu>
Oct 11 02:43:13 compute-0 nova_compute[356901]:   <metadata>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.1">
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <nova:package version="27.5.2-0.20250829104910.6f8decf.el9"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <nova:name>te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c</nova:name>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <nova:creationTime>2025-10-11 02:43:11</nova:creationTime>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <nova:flavor name="m1.nano">
Oct 11 02:43:13 compute-0 nova_compute[356901]:         <nova:memory>128</nova:memory>
Oct 11 02:43:13 compute-0 nova_compute[356901]:         <nova:disk>1</nova:disk>
Oct 11 02:43:13 compute-0 nova_compute[356901]:         <nova:swap>0</nova:swap>
Oct 11 02:43:13 compute-0 nova_compute[356901]:         <nova:ephemeral>0</nova:ephemeral>
Oct 11 02:43:13 compute-0 nova_compute[356901]:         <nova:vcpus>1</nova:vcpus>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       </nova:flavor>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <nova:owner>
Oct 11 02:43:13 compute-0 nova_compute[356901]:         <nova:user uuid="f66a606299944d53a40f21e81c791d70">tempest-PrometheusGabbiTest-674022988-project-member</nova:user>
Oct 11 02:43:13 compute-0 nova_compute[356901]:         <nova:project uuid="a05bbc8f872d4dd99972d2cb8136d608">tempest-PrometheusGabbiTest-674022988</nova:project>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       </nova:owner>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <nova:root type="image" uuid="2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <nova:ports>
Oct 11 02:43:13 compute-0 nova_compute[356901]:         <nova:port uuid="e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6">
Oct 11 02:43:13 compute-0 nova_compute[356901]:           <nova:ip type="fixed" address="10.100.3.53" ipVersion="4"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:         </nova:port>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       </nova:ports>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     </nova:instance>
Oct 11 02:43:13 compute-0 nova_compute[356901]:   </metadata>
Oct 11 02:43:13 compute-0 nova_compute[356901]:   <sysinfo type="smbios">
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <system>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <entry name="manufacturer">RDO</entry>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <entry name="product">OpenStack Compute</entry>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <entry name="version">27.5.2-0.20250829104910.6f8decf.el9</entry>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <entry name="serial">8422017b-c868-4ba2-ab1f-61d3668ca145</entry>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <entry name="uuid">8422017b-c868-4ba2-ab1f-61d3668ca145</entry>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <entry name="family">Virtual Machine</entry>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     </system>
Oct 11 02:43:13 compute-0 nova_compute[356901]:   </sysinfo>
Oct 11 02:43:13 compute-0 nova_compute[356901]:   <os>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <type arch="x86_64" machine="q35">hvm</type>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <boot dev="hd"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <smbios mode="sysinfo"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:   </os>
Oct 11 02:43:13 compute-0 nova_compute[356901]:   <features>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <acpi/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <apic/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <vmcoreinfo/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:   </features>
Oct 11 02:43:13 compute-0 nova_compute[356901]:   <clock offset="utc">
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <timer name="pit" tickpolicy="delay"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <timer name="rtc" tickpolicy="catchup"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <timer name="hpet" present="no"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:   </clock>
Oct 11 02:43:13 compute-0 nova_compute[356901]:   <cpu mode="host-model" match="exact">
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <topology sockets="1" cores="1" threads="1"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:43:13 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/8422017b-c868-4ba2-ab1f-61d3668ca145_disk">
Oct 11 02:43:13 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       </source>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:43:13 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <target dev="vda" bus="virtio"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <disk type="network" device="cdrom">
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/8422017b-c868-4ba2-ab1f-61d3668ca145_disk.config">
Oct 11 02:43:13 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       </source>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:43:13 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <target dev="sda" bus="sata"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <interface type="ethernet">
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <mac address="fa:16:3e:2c:af:96"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <driver name="vhost" rx_queue_size="512"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <mtu size="1442"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <target dev="tape00931c0-3d"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     </interface>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <serial type="pty">
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <log file="/var/lib/nova/instances/8422017b-c868-4ba2-ab1f-61d3668ca145/console.log" append="off"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     </serial>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <graphics type="vnc" autoport="yes" listen="::0"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <video>
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     </video>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <input type="tablet" bus="usb"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <rng model="virtio">
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <backend model="random">/dev/urandom</backend>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <controller type="usb" index="0"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     <memballoon model="virtio">
Oct 11 02:43:13 compute-0 nova_compute[356901]:       <stats period="10"/>
Oct 11 02:43:13 compute-0 nova_compute[356901]:     </memballoon>
Oct 11 02:43:13 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:43:13 compute-0 nova_compute[356901]: </domain>
Oct 11 02:43:13 compute-0 nova_compute[356901]:  _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7555
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.182 2 DEBUG nova.compute.manager [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Preparing to wait for external event network-vif-plugged-e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 prepare_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:283
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.183 2 DEBUG oslo_concurrency.lockutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquiring lock "8422017b-c868-4ba2-ab1f-61d3668ca145-events" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.183 2 DEBUG oslo_concurrency.lockutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "8422017b-c868-4ba2-ab1f-61d3668ca145-events" acquired by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.183 2 DEBUG oslo_concurrency.lockutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "8422017b-c868-4ba2-ab1f-61d3668ca145-events" "released" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.184 2 DEBUG nova.virt.libvirt.vif [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:43:05Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description=None,display_name='te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c',id=14,image_ref='2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data=None,key_name=None,keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={metering.server_group='44c4fdb3-6cdb-42b8-903d-5a2c79f0da20'},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=PciDeviceList,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='a05bbc8f872d4dd99972d2cb8136d608',ramdisk_id='',reservation_id='r-zwtwqn0d',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-PrometheusGabbiTest-674022988',owner_user_name='tempest-PrometheusGabbiTest-674022988-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:43:07Z,user_data='IyEvYmluL3NoCmVjaG8gJ0xvYWRpbmcgQ1BVJwpzZXQgLXYKY2F0IC9kZXYvdXJhbmRvbSA+IC9kZXYvbnVsbCAmIHNsZWVwIDMwMCA7IGtpbGwgJCEgCg==',user_id='f66a606299944d53a40f21e81c791d70',uuid=8422017b-c868-4ba2-ab1f-61d3668ca145,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "address": "fa:16:3e:2c:af:96", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.3.53", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape00931c0-3d", "ovs_interfaceid": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} plug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:710
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.185 2 DEBUG nova.network.os_vif_util [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Converting VIF {"id": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "address": "fa:16:3e:2c:af:96", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.3.53", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape00931c0-3d", "ovs_interfaceid": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.185 2 DEBUG nova.network.os_vif_util [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:2c:af:96,bridge_name='br-int',has_traffic_filtering=True,id=e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6,network=Network(3563b4a1-477a-44a0-b01f-7d19d49c0308),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tape00931c0-3d') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.186 2 DEBUG os_vif [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Plugging vif VIFOpenVSwitch(active=False,address=fa:16:3e:2c:af:96,bridge_name='br-int',has_traffic_filtering=True,id=e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6,network=Network(3563b4a1-477a-44a0-b01f-7d19d49c0308),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tape00931c0-3d') plug /usr/lib/python3.9/site-packages/os_vif/__init__.py:76
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.186 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.187 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddBridgeCommand(_result=None, name=br-int, may_exist=True, datapath_type=system) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.188 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.193 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.194 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tape00931c0-3d, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.194 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbSetCommand(_result=None, table=Interface, record=tape00931c0-3d, col_values=(('external_ids', {'iface-id': 'e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6', 'iface-status': 'active', 'attached-mac': 'fa:16:3e:2c:af:96', 'vm-uuid': '8422017b-c868-4ba2-ab1f-61d3668ca145'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.197 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:13 compute-0 NetworkManager[44908]: <info>  [1760150593.1982] manager: (tape00931c0-3d): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/72)
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.203 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.209 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.210 2 INFO os_vif [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Successfully plugged vif VIFOpenVSwitch(active=False,address=fa:16:3e:2c:af:96,bridge_name='br-int',has_traffic_filtering=True,id=e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6,network=Network(3563b4a1-477a-44a0-b01f-7d19d49c0308),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tape00931c0-3d')
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.281 2 DEBUG nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] No BDM found with device name vda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.283 2 DEBUG nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] No BDM found with device name sda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.283 2 DEBUG nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] No VIF found with MAC fa:16:3e:2c:af:96, not building metadata _build_interface_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12092
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.284 2 INFO nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Using config drive
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.324 2 DEBUG nova.storage.rbd_utils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] rbd image 8422017b-c868-4ba2-ab1f-61d3668ca145_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.358 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.358 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.358 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.358 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:43:13 compute-0 ceph-mon[191930]: pgmap v1969: 321 pgs: 321 active+clean; 333 MiB data, 436 MiB used, 60 GiB / 60 GiB avail; 1.7 MiB/s rd, 5.0 MiB/s wr, 99 op/s
Oct 11 02:43:13 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1602883105' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.702 2 INFO nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Creating config drive at /var/lib/nova/instances/8422017b-c868-4ba2-ab1f-61d3668ca145/disk.config
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.710 2 DEBUG oslo_concurrency.processutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Running cmd (subprocess): /usr/bin/mkisofs -o /var/lib/nova/instances/8422017b-c868-4ba2-ab1f-61d3668ca145/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmpypdis27i execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.751 2 DEBUG nova.network.neutron [req-042a2dc7-7da4-4d7c-8518-65fa3228af8a req-08941a2e-6c6b-4bd4-b147-7034bd35eee2 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Updated VIF entry in instance network info cache for port e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.752 2 DEBUG nova.network.neutron [req-042a2dc7-7da4-4d7c-8518-65fa3228af8a req-08941a2e-6c6b-4bd4-b147-7034bd35eee2 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Updating instance_info_cache with network_info: [{"id": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "address": "fa:16:3e:2c:af:96", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.3.53", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape00931c0-3d", "ovs_interfaceid": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.773 2 DEBUG oslo_concurrency.lockutils [req-042a2dc7-7da4-4d7c-8518-65fa3228af8a req-08941a2e-6c6b-4bd4-b147-7034bd35eee2 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-8422017b-c868-4ba2-ab1f-61d3668ca145" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.856 2 DEBUG oslo_concurrency.processutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CMD "/usr/bin/mkisofs -o /var/lib/nova/instances/8422017b-c868-4ba2-ab1f-61d3668ca145/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmpypdis27i" returned: 0 in 0.147s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.868 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.869 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.870 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.871 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.878 14 DEBUG ceilometer.compute.discovery [-] Querying metadata for instance 2a3deab0-7a22-486d-86a2-2fc870c8ab2d from Nova API get_server /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:176
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.878 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.879 14 DEBUG novaclient.v2.client [-] REQ: curl -g -i -X GET https://nova-internal.openstack.svc:8774/v2.1/servers/2a3deab0-7a22-486d-86a2-2fc870c8ab2d -H "Accept: application/json" -H "User-Agent: python-novaclient" -H "X-Auth-Token: {SHA256}d674387017edb5d8543811c363b3a2965950a94ddf4462840fede0e79ac258e9" -H "X-OpenStack-Nova-API-Version: 2.1" _http_log_request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:572
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.882 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.902 2 DEBUG nova.storage.rbd_utils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] rbd image 8422017b-c868-4ba2-ab1f-61d3668ca145_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:43:13 compute-0 nova_compute[356901]: 2025-10-11 02:43:13.915 2 DEBUG oslo_concurrency.processutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/8422017b-c868-4ba2-ab1f-61d3668ca145/disk.config 8422017b-c868-4ba2-ab1f-61d3668ca145_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:43:14 compute-0 nova_compute[356901]: 2025-10-11 02:43:14.198 2 DEBUG oslo_concurrency.processutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/8422017b-c868-4ba2-ab1f-61d3668ca145/disk.config 8422017b-c868-4ba2-ab1f-61d3668ca145_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.283s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:43:14 compute-0 nova_compute[356901]: 2025-10-11 02:43:14.199 2 INFO nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Deleting local config drive /var/lib/nova/instances/8422017b-c868-4ba2-ab1f-61d3668ca145/disk.config because it was imported into RBD.
Oct 11 02:43:14 compute-0 kernel: tape00931c0-3d: entered promiscuous mode
Oct 11 02:43:14 compute-0 NetworkManager[44908]: <info>  [1760150594.2847] manager: (tape00931c0-3d): new Tun device (/org/freedesktop/NetworkManager/Devices/73)
Oct 11 02:43:14 compute-0 ovn_controller[88370]: 2025-10-11T02:43:14Z|00167|binding|INFO|Claiming lport e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 for this chassis.
Oct 11 02:43:14 compute-0 ovn_controller[88370]: 2025-10-11T02:43:14Z|00168|binding|INFO|e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6: Claiming fa:16:3e:2c:af:96 10.100.3.53
Oct 11 02:43:14 compute-0 nova_compute[356901]: 2025-10-11 02:43:14.305 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:14 compute-0 nova_compute[356901]: 2025-10-11 02:43:14.317 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.331 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:2c:af:96 10.100.3.53'], port_security=['fa:16:3e:2c:af:96 10.100.3.53'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.3.53/16', 'neutron:device_id': '8422017b-c868-4ba2-ab1f-61d3668ca145', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-3563b4a1-477a-44a0-b01f-7d19d49c0308', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': 'a05bbc8f872d4dd99972d2cb8136d608', 'neutron:revision_number': '2', 'neutron:security_group_ids': 'd961c453-0bcb-43ec-b528-5018786739ee', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=4674209d-30ab-42f4-9114-728458c302a8, chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], tunnel_key=2, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6) old=Port_Binding(chassis=[]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.333 286362 INFO neutron.agent.ovn.metadata.agent [-] Port e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 in datapath 3563b4a1-477a-44a0-b01f-7d19d49c0308 bound to our chassis
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.335 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network 3563b4a1-477a-44a0-b01f-7d19d49c0308
Oct 11 02:43:14 compute-0 systemd-machined[137586]: New machine qemu-15-instance-0000000e.
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.352 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[92dcf66b-f44c-41ff-8ed7-7124d4659816]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.354 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Creating VETH tap3563b4a1-41 in ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308 namespace provision_datapath /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:665
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.356 422955 DEBUG neutron.privileged.agent.linux.ip_lib [-] Interface tap3563b4a1-40 not found in namespace None get_link_id /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:204
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.356 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[28fd35ea-3070-4817-b162-2d652d3c0a8d]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.358 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[650bf42d-5fc2-4bff-8b77-1d889af916aa]: (4, False) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:14 compute-0 ovn_controller[88370]: 2025-10-11T02:43:14Z|00169|binding|INFO|Setting lport e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 ovn-installed in OVS
Oct 11 02:43:14 compute-0 ovn_controller[88370]: 2025-10-11T02:43:14Z|00170|binding|INFO|Setting lport e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 up in Southbound
Oct 11 02:43:14 compute-0 systemd[1]: Started Virtual Machine qemu-15-instance-0000000e.
Oct 11 02:43:14 compute-0 nova_compute[356901]: 2025-10-11 02:43:14.364 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.379 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[6dbdc29b-44ad-4806-b02b-9ad194064d3c]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:14 compute-0 systemd-udevd[461002]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:43:14 compute-0 NetworkManager[44908]: <info>  [1760150594.4013] device (tape00931c0-3d): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 02:43:14 compute-0 NetworkManager[44908]: <info>  [1760150594.4022] device (tape00931c0-3d): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.415 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[053bf87d-b810-46d2-96d7-ba1b5a67321b]: (4, ('net.ipv4.conf.all.promote_secondaries = 1\n', '', 0)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.468 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[dbd2e3bd-b9b7-4857-bad4-094b77956c59]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:14 compute-0 NetworkManager[44908]: <info>  [1760150594.4795] manager: (tap3563b4a1-40): new Veth device (/org/freedesktop/NetworkManager/Devices/74)
Oct 11 02:43:14 compute-0 systemd-udevd[461005]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.480 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[5cb88360-5e93-4627-a6c1-0e5c55dc78f3]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.520 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[89f1c9e2-aaa8-4155-a903-ff514da5fef4]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.523 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[ee31f55c-9824-4571-83bd-a753694e8ebc]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:14 compute-0 NetworkManager[44908]: <info>  [1760150594.5502] device (tap3563b4a1-40): carrier: link connected
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.556 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[6d00eb9f-6ce0-4f12-92dd-01d1af951977]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.578 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[a2dba09f-3d90-4d7c-9d51-4d3e90a2a872]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tap3563b4a1-41'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:25:cf:fd'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 47], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 694227, 'reachable_time': 22741, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 1, 'outoctets': 76, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 1, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 76, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 1, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 461034, 'error': None, 'target': 'ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.595 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[6a81c8a4-9af3-4aef-972f-5683a7c12983]: (4, ({'family': 10, 'prefixlen': 64, 'flags': 192, 'scope': 253, 'index': 2, 'attrs': [['IFA_ADDRESS', 'fe80::f816:3eff:fe25:cffd'], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 694227, 'tstamp': 694227}], ['IFA_FLAGS', 192]], 'header': {'length': 72, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 461035, 'error': None, 'target': 'ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'},)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.622 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[e7385e94-8a95-40bc-8468-144112d90515]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tap3563b4a1-41'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:25:cf:fd'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 110, 'tx_bytes': 90, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 47], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 694227, 'reachable_time': 22741, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 1, 'inoctets': 96, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 1, 'outoctets': 76, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 1, 'outmcastpkts': 1, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 96, 'outmcastoctets': 76, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 1, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 1, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 0, 'sequence_number': 255, 'pid': 461036, 'error': None, 'target': 'ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:14.649 14 DEBUG novaclient.v2.client [-] RESP: [200] Connection: Keep-Alive Content-Length: 1853 Content-Type: application/json Date: Sat, 11 Oct 2025 02:43:13 GMT Keep-Alive: timeout=5, max=100 OpenStack-API-Version: compute 2.1 Server: Apache Vary: OpenStack-API-Version,X-OpenStack-Nova-API-Version X-OpenStack-Nova-API-Version: 2.1 x-compute-request-id: req-4070f587-c907-49c1-b3dd-03169d52a4f1 x-openstack-request-id: req-4070f587-c907-49c1-b3dd-03169d52a4f1 _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:613
Oct 11 02:43:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:14.650 14 DEBUG novaclient.v2.client [-] RESP BODY: {"server": {"id": "2a3deab0-7a22-486d-86a2-2fc870c8ab2d", "name": "tempest-TestNetworkBasicOps-server-983701941", "status": "ACTIVE", "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "user_id": "bcba1b18a2ad479587a15fe415ae307a", "metadata": {}, "hostId": "205a3e9f71fd8445a0940e03ba24d3addf0d2e03c0605249523c8387", "image": {"id": "72f37f2e-4296-450e-9a12-10717f4ac7dc", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/images/72f37f2e-4296-450e-9a12-10717f4ac7dc"}]}, "flavor": {"id": "6dff30d1-85df-4e9c-9163-a20ba47bb0c7", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/flavors/6dff30d1-85df-4e9c-9163-a20ba47bb0c7"}]}, "created": "2025-10-11T02:41:19Z", "updated": "2025-10-11T02:41:28Z", "addresses": {"tempest-network-smoke--1333203908": [{"version": 4, "addr": "10.100.0.4", "OS-EXT-IPS:type": "fixed", "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:c6:9a:0a"}]}, "accessIPv4": "", "accessIPv6": "", "links": [{"rel": "self", "href": "https://nova-internal.openstack.svc:8774/v2.1/servers/2a3deab0-7a22-486d-86a2-2fc870c8ab2d"}, {"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/servers/2a3deab0-7a22-486d-86a2-2fc870c8ab2d"}], "OS-DCF:diskConfig": "MANUAL", "progress": 0, "OS-EXT-AZ:availability_zone": "nova", "config_drive": "True", "key_name": "tempest-TestNetworkBasicOps-1838815715", "OS-SRV-USG:launched_at": "2025-10-11T02:41:28.000000", "OS-SRV-USG:terminated_at": null, "security_groups": [{"name": "tempest-secgroup-smoke-1609620817"}], "OS-EXT-SRV-ATTR:host": "compute-0.ctlplane.example.com", "OS-EXT-SRV-ATTR:instance_name": "instance-0000000c", "OS-EXT-SRV-ATTR:hypervisor_hostname": "compute-0.ctlplane.example.com", "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-EXT-STS:power_state": 1, "os-extended-volumes:volumes_attached": []}} _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:648
Oct 11 02:43:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:14.650 14 DEBUG novaclient.v2.client [-] GET call to compute for https://nova-internal.openstack.svc:8774/v2.1/servers/2a3deab0-7a22-486d-86a2-2fc870c8ab2d used request id req-4070f587-c907-49c1-b3dd-03169d52a4f1 request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:1073
Oct 11 02:43:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:14.653 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '2a3deab0-7a22-486d-86a2-2fc870c8ab2d', 'name': 'tempest-TestNetworkBasicOps-server-983701941', 'flavor': {'id': '6dff30d1-85df-4e9c-9163-a20ba47bb0c7', 'name': 'm1.nano', 'vcpus': 1, 'ram': 128, 'disk': 1, 'ephemeral': 0, 'swap': 0}, 'image': {'id': '72f37f2e-4296-450e-9a12-10717f4ac7dc'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-0000000c', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '86dfc4ba5e494748b86bc9b983426779', 'user_id': 'bcba1b18a2ad479587a15fe415ae307a', 'hostId': '205a3e9f71fd8445a0940e03ba24d3addf0d2e03c0605249523c8387', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:43:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:14.656 14 DEBUG ceilometer.compute.discovery [-] Querying metadata for instance 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 from Nova API get_server /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:176
Oct 11 02:43:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:14.657 14 DEBUG novaclient.v2.client [-] REQ: curl -g -i -X GET https://nova-internal.openstack.svc:8774/v2.1/servers/97d9494c-4ce4-4ff3-a0fa-d5cda135da98 -H "Accept: application/json" -H "User-Agent: python-novaclient" -H "X-Auth-Token: {SHA256}d674387017edb5d8543811c363b3a2965950a94ddf4462840fede0e79ac258e9" -H "X-OpenStack-Nova-API-Version: 2.1" _http_log_request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:572
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.672 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[7421b1ca-f46a-4f13-894d-83a031a1d0ce]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.766 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: SbGlobalUpdateEvent(events=('update',), table='SB_Global', conditions=None, old_conditions=None), priority=20 to row=SB_Global(external_ids={}, nb_cfg=15, options={'arp_ns_explicit_output': 'true', 'mac_prefix': 'fe:55:97', 'max_tunid': '16711680', 'northd_internal_version': '24.03.7-20.33.0-76.8', 'svc_monitor_mac': 'ce:9c:4f:b4:85:9b'}, ipsec=False) old=SB_Global(nb_cfg=14) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:43:14 compute-0 nova_compute[356901]: 2025-10-11 02:43:14.768 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.774 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[fd286f3b-6952-4f66-b189-e94521119374]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.776 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tap3563b4a1-40, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.776 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.777 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tap3563b4a1-40, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:43:14 compute-0 nova_compute[356901]: 2025-10-11 02:43:14.779 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:14 compute-0 NetworkManager[44908]: <info>  [1760150594.7799] manager: (tap3563b4a1-40): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/75)
Oct 11 02:43:14 compute-0 kernel: tap3563b4a1-40: entered promiscuous mode
Oct 11 02:43:14 compute-0 nova_compute[356901]: 2025-10-11 02:43:14.784 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.786 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tap3563b4a1-40, col_values=(('external_ids', {'iface-id': 'bd6ddb48-868e-41a0-8ff2-0f3a1a9b4d81'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:43:14 compute-0 nova_compute[356901]: 2025-10-11 02:43:14.788 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:14 compute-0 ovn_controller[88370]: 2025-10-11T02:43:14Z|00171|binding|INFO|Releasing lport bd6ddb48-868e-41a0-8ff2-0f3a1a9b4d81 from this chassis (sb_readonly=0)
Oct 11 02:43:14 compute-0 nova_compute[356901]: 2025-10-11 02:43:14.801 2 INFO nova.compute.manager [None req-113d25c3-c36e-46c5-8ed3-80fddb7e9611 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Get console output
Oct 11 02:43:14 compute-0 nova_compute[356901]: 2025-10-11 02:43:14.810 6605 INFO nova.privsep.libvirt [-] Ignored error while reading from instance console pty: can't concat NoneType to bytes
Oct 11 02:43:14 compute-0 nova_compute[356901]: 2025-10-11 02:43:14.815 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.815 286362 DEBUG neutron.agent.linux.utils [-] Unable to access /var/lib/neutron/external/pids/3563b4a1-477a-44a0-b01f-7d19d49c0308.pid.haproxy; Error: [Errno 2] No such file or directory: '/var/lib/neutron/external/pids/3563b4a1-477a-44a0-b01f-7d19d49c0308.pid.haproxy' get_value_from_file /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:252
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.818 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[d3986e3f-a964-4a1c-92b5-c690378f6e36]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.820 286362 DEBUG neutron.agent.ovn.metadata.driver [-] haproxy_cfg = 
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: global
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     log         /dev/log local0 debug
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     log-tag     haproxy-metadata-proxy-3563b4a1-477a-44a0-b01f-7d19d49c0308
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     user        root
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     group       root
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     maxconn     1024
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     pidfile     /var/lib/neutron/external/pids/3563b4a1-477a-44a0-b01f-7d19d49c0308.pid.haproxy
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     daemon
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: defaults
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     log global
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     mode http
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     option httplog
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     option dontlognull
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     option http-server-close
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     option forwardfor
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     retries                 3
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     timeout http-request    30s
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     timeout connect         30s
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     timeout client          32s
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     timeout server          32s
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     timeout http-keep-alive 30s
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: listen listener
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     bind 169.254.169.254:80
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     server metadata /var/lib/neutron/metadata_proxy
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:     http-request add-header X-OVN-Network-ID 3563b4a1-477a-44a0-b01f-7d19d49c0308
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]:  create_config_file /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/driver.py:107
Oct 11 02:43:14 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:14.820 286362 DEBUG neutron.agent.linux.utils [-] Running command: ['sudo', 'neutron-rootwrap', '/etc/neutron/rootwrap.conf', 'ip', 'netns', 'exec', 'ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308', 'env', 'PROCESS_TAG=haproxy-3563b4a1-477a-44a0-b01f-7d19d49c0308', 'haproxy', '-f', '/var/lib/neutron/ovn-metadata-proxy/3563b4a1-477a-44a0-b01f-7d19d49c0308.conf'] create_process /usr/lib/python3.9/site-packages/neutron/agent/linux/utils.py:84
Oct 11 02:43:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1970: 321 pgs: 321 active+clean; 344 MiB data, 443 MiB used, 60 GiB / 60 GiB avail; 2.0 MiB/s rd, 4.3 MiB/s wr, 98 op/s
Oct 11 02:43:14 compute-0 nova_compute[356901]: 2025-10-11 02:43:14.892 2 DEBUG nova.compute.manager [req-f95df249-f46b-49d9-8367-a8c7c03e2f0b req-7025eff6-36d2-4395-a8eb-1af2b1d275ff 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Received event network-vif-plugged-e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:43:14 compute-0 nova_compute[356901]: 2025-10-11 02:43:14.892 2 DEBUG oslo_concurrency.lockutils [req-f95df249-f46b-49d9-8367-a8c7c03e2f0b req-7025eff6-36d2-4395-a8eb-1af2b1d275ff 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "8422017b-c868-4ba2-ab1f-61d3668ca145-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:14 compute-0 nova_compute[356901]: 2025-10-11 02:43:14.892 2 DEBUG oslo_concurrency.lockutils [req-f95df249-f46b-49d9-8367-a8c7c03e2f0b req-7025eff6-36d2-4395-a8eb-1af2b1d275ff 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "8422017b-c868-4ba2-ab1f-61d3668ca145-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:14 compute-0 nova_compute[356901]: 2025-10-11 02:43:14.893 2 DEBUG oslo_concurrency.lockutils [req-f95df249-f46b-49d9-8367-a8c7c03e2f0b req-7025eff6-36d2-4395-a8eb-1af2b1d275ff 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "8422017b-c868-4ba2-ab1f-61d3668ca145-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:14 compute-0 nova_compute[356901]: 2025-10-11 02:43:14.893 2 DEBUG nova.compute.manager [req-f95df249-f46b-49d9-8367-a8c7c03e2f0b req-7025eff6-36d2-4395-a8eb-1af2b1d275ff 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Processing event network-vif-plugged-e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10808
Oct 11 02:43:14 compute-0 ceph-mon[191930]: pgmap v1970: 321 pgs: 321 active+clean; 344 MiB data, 443 MiB used, 60 GiB / 60 GiB avail; 2.0 MiB/s rd, 4.3 MiB/s wr, 98 op/s
Oct 11 02:43:14 compute-0 nova_compute[356901]: 2025-10-11 02:43:14.988 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.018 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.018 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.101 2 DEBUG oslo_concurrency.lockutils [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquiring lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.102 2 DEBUG oslo_concurrency.lockutils [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98" acquired by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.102 2 DEBUG oslo_concurrency.lockutils [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquiring lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98-events" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.102 2 DEBUG oslo_concurrency.lockutils [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98-events" acquired by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.103 2 DEBUG oslo_concurrency.lockutils [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98-events" "released" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.104 2 INFO nova.compute.manager [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Terminating instance
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.105 2 DEBUG nova.compute.manager [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Start destroying the instance on the hypervisor. _shutdown_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:3120
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.176 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:15 compute-0 kernel: tap6c94515a-55 (unregistering): left promiscuous mode
Oct 11 02:43:15 compute-0 NetworkManager[44908]: <info>  [1760150595.2134] device (tap6c94515a-55): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.227 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:15 compute-0 ovn_controller[88370]: 2025-10-11T02:43:15Z|00172|binding|INFO|Releasing lport 6c94515a-556d-4aeb-b39e-6e043f460cd8 from this chassis (sb_readonly=0)
Oct 11 02:43:15 compute-0 ovn_controller[88370]: 2025-10-11T02:43:15Z|00173|binding|INFO|Setting lport 6c94515a-556d-4aeb-b39e-6e043f460cd8 down in Southbound
Oct 11 02:43:15 compute-0 ovn_controller[88370]: 2025-10-11T02:43:15Z|00174|binding|INFO|Removing iface tap6c94515a-55 ovn-installed in OVS
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.230 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:15 compute-0 podman[461083]: 2025-10-11 02:43:15.232696728 +0000 UTC m=+0.118842385 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, name=ubi9-minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vcs-type=git, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc., architecture=x86_64, build-date=2025-08-20T13:12:41, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, io.buildah.version=1.33.7, vendor=Red Hat, Inc., config_id=edpm, release=1755695350, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=openstack_network_exporter, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.component=ubi9-minimal-container, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, url=https://catalog.redhat.com/en/search?searchType=containers, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, version=9.6, managed_by=edpm_ansible)
Oct 11 02:43:15 compute-0 podman[461082]: 2025-10-11 02:43:15.239520578 +0000 UTC m=+0.120924591 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, tcib_managed=true)
Oct 11 02:43:15 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:15.239 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:0f:4d:31 10.100.0.3'], port_security=['fa:16:3e:0f:4d:31 10.100.0.3'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.0.3/28', 'neutron:device_id': '97d9494c-4ce4-4ff3-a0fa-d5cda135da98', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': '86dfc4ba5e494748b86bc9b983426779', 'neutron:revision_number': '4', 'neutron:security_group_ids': '99941801-8eae-4ed7-9f1f-5e6556715ff8', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:host_id': 'compute-0.ctlplane.example.com', 'neutron:port_fip': '192.168.122.233'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=b0ebf874-dd0a-4bac-aa4a-3eee85fcb8ba, chassis=[], tunnel_key=4, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=6c94515a-556d-4aeb-b39e-6e043f460cd8) old=Port_Binding(up=[True], chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:43:15 compute-0 podman[461084]: 2025-10-11 02:43:15.244790536 +0000 UTC m=+0.127883971 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.245 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:15 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 02:43:15 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 02:43:15 compute-0 systemd[1]: machine-qemu\x2d14\x2dinstance\x2d0000000d.scope: Deactivated successfully.
Oct 11 02:43:15 compute-0 systemd[1]: machine-qemu\x2d14\x2dinstance\x2d0000000d.scope: Consumed 40.469s CPU time.
Oct 11 02:43:15 compute-0 systemd-machined[137586]: Machine qemu-14-instance-0000000d terminated.
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.340 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.348 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.358 2 INFO nova.virt.libvirt.driver [-] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Instance destroyed successfully.
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.360 2 DEBUG nova.objects.instance [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lazy-loading 'resources' on Instance uuid 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:43:15 compute-0 podman[461173]: 2025-10-11 02:43:15.363588471 +0000 UTC m=+0.066825838 container create 85289bc97b2aceb105bba40f2058284d6368e766d08e49b3131f65d5f6797ebc (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_managed=true)
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.375 2 DEBUG nova.virt.libvirt.vif [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='True',created_at=2025-10-11T02:42:20Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=<?>,disable_terminate=False,display_description='tempest-TestNetworkBasicOps-server-1362866622',display_name='tempest-TestNetworkBasicOps-server-1362866622',ec2_ids=<?>,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-testnetworkbasicops-server-1362866622',id=13,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBBvOI8sBNl5TpioVKGpLN8dhq3coyN+dxFn+5vc5Z4DYhusOh+pMF8qIT/hioWLecBE4NbVqzpuQToM0paZi+FH/wtMu/qV3DwVgbNJMA/2dr3YQIFl6T0rS5QbQV7dDoQ==',key_name='tempest-TestNetworkBasicOps-845830948',keypairs=<?>,launch_index=0,launched_at=2025-10-11T02:42:31Z,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={},migration_context=<?>,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=<?>,power_state=1,progress=0,project_id='86dfc4ba5e494748b86bc9b983426779',ramdisk_id='',reservation_id='r-ww57d129',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_cdrom_bus='sata',image_hw_disk_bus='virtio',image_hw_input_bus='usb',image_hw_machine_type='q35',image_hw_pointer_model='usbtablet',image_hw_rng_model='virtio',image_hw_video_model='virtio',image_hw_vif_model='virtio',image_min_disk='1',image_min_ram='0',owner_project_name='tempest-TestNetworkBasicOps-494564743',owner_user_name='tempest-TestNetworkBasicOps-494564743-project-member'},tags=<?>,task_state='deleting',terminated_at=None,trusted_certs=<?>,updated_at=2025-10-11T02:42:31Z,user_data=None,user_id='bcba1b18a2ad479587a15fe415ae307a',uuid=97d9494c-4ce4-4ff3-a0fa-d5cda135da98,vcpu_model=<?>,vcpus=1,vm_mode=None,vm_state='active') vif={"id": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "address": "fa:16:3e:0f:4d:31", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.3", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.233", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap6c94515a-55", "ovs_interfaceid": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} unplug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:828
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.376 2 DEBUG nova.network.os_vif_util [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Converting VIF {"id": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "address": "fa:16:3e:0f:4d:31", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.3", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.233", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap6c94515a-55", "ovs_interfaceid": "6c94515a-556d-4aeb-b39e-6e043f460cd8", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.377 2 DEBUG nova.network.os_vif_util [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Converted object VIFOpenVSwitch(active=True,address=fa:16:3e:0f:4d:31,bridge_name='br-int',has_traffic_filtering=True,id=6c94515a-556d-4aeb-b39e-6e043f460cd8,network=Network(c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap6c94515a-55') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.378 2 DEBUG os_vif [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Unplugging vif VIFOpenVSwitch(active=True,address=fa:16:3e:0f:4d:31,bridge_name='br-int',has_traffic_filtering=True,id=6c94515a-556d-4aeb-b39e-6e043f460cd8,network=Network(c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap6c94515a-55') unplug /usr/lib/python3.9/site-packages/os_vif/__init__.py:109
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.380 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.380 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tap6c94515a-55, bridge=br-int, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.382 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.385 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.388 2 INFO os_vif [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Successfully unplugged vif VIFOpenVSwitch(active=True,address=fa:16:3e:0f:4d:31,bridge_name='br-int',has_traffic_filtering=True,id=6c94515a-556d-4aeb-b39e-6e043f460cd8,network=Network(c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap6c94515a-55')
Oct 11 02:43:15 compute-0 systemd[1]: Started libpod-conmon-85289bc97b2aceb105bba40f2058284d6368e766d08e49b3131f65d5f6797ebc.scope.
Oct 11 02:43:15 compute-0 podman[461173]: 2025-10-11 02:43:15.326276959 +0000 UTC m=+0.029514326 image pull 1061e4fafe13e0b9aa1ef2c904ba4ad70c44f3e87b1d831f16c6db34937f4022 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.425 14 DEBUG novaclient.v2.client [-] RESP: [200] Connection: Keep-Alive Content-Length: 1975 Content-Type: application/json Date: Sat, 11 Oct 2025 02:43:14 GMT Keep-Alive: timeout=5, max=99 OpenStack-API-Version: compute 2.1 Server: Apache Vary: OpenStack-API-Version,X-OpenStack-Nova-API-Version X-OpenStack-Nova-API-Version: 2.1 x-compute-request-id: req-e516d1b3-f7e7-40a8-8127-5f5fb6af12a5 x-openstack-request-id: req-e516d1b3-f7e7-40a8-8127-5f5fb6af12a5 _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:613
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.426 14 DEBUG novaclient.v2.client [-] RESP BODY: {"server": {"id": "97d9494c-4ce4-4ff3-a0fa-d5cda135da98", "name": "tempest-TestNetworkBasicOps-server-1362866622", "status": "ACTIVE", "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "user_id": "bcba1b18a2ad479587a15fe415ae307a", "metadata": {}, "hostId": "205a3e9f71fd8445a0940e03ba24d3addf0d2e03c0605249523c8387", "image": {"id": "72f37f2e-4296-450e-9a12-10717f4ac7dc", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/images/72f37f2e-4296-450e-9a12-10717f4ac7dc"}]}, "flavor": {"id": "6dff30d1-85df-4e9c-9163-a20ba47bb0c7", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/flavors/6dff30d1-85df-4e9c-9163-a20ba47bb0c7"}]}, "created": "2025-10-11T02:42:20Z", "updated": "2025-10-11T02:42:31Z", "addresses": {"tempest-network-smoke--1333203908": [{"version": 4, "addr": "10.100.0.3", "OS-EXT-IPS:type": "fixed", "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:0f:4d:31"}, {"version": 4, "addr": "192.168.122.233", "OS-EXT-IPS:type": "floating", "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:0f:4d:31"}]}, "accessIPv4": "", "accessIPv6": "", "links": [{"rel": "self", "href": "https://nova-internal.openstack.svc:8774/v2.1/servers/97d9494c-4ce4-4ff3-a0fa-d5cda135da98"}, {"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/servers/97d9494c-4ce4-4ff3-a0fa-d5cda135da98"}], "OS-DCF:diskConfig": "MANUAL", "progress": 0, "OS-EXT-AZ:availability_zone": "nova", "config_drive": "True", "key_name": "tempest-TestNetworkBasicOps-845830948", "OS-SRV-USG:launched_at": "2025-10-11T02:42:31.000000", "OS-SRV-USG:terminated_at": null, "security_groups": [{"name": "tempest-secgroup-smoke-1851838111"}], "OS-EXT-SRV-ATTR:host": "compute-0.ctlplane.example.com", "OS-EXT-SRV-ATTR:instance_name": "instance-0000000d", "OS-EXT-SRV-ATTR:hypervisor_hostname": "compute-0.ctlplane.example.com", "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-EXT-STS:power_state": 1, "os-extended-volumes:volumes_attached": []}} _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:648
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.426 14 DEBUG novaclient.v2.client [-] GET call to compute for https://nova-internal.openstack.svc:8774/v2.1/servers/97d9494c-4ce4-4ff3-a0fa-d5cda135da98 used request id req-e516d1b3-f7e7-40a8-8127-5f5fb6af12a5 request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:1073
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.427 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '97d9494c-4ce4-4ff3-a0fa-d5cda135da98', 'name': 'tempest-TestNetworkBasicOps-server-1362866622', 'flavor': {'id': '6dff30d1-85df-4e9c-9163-a20ba47bb0c7', 'name': 'm1.nano', 'vcpus': 1, 'ram': 128, 'disk': 1, 'ephemeral': 0, 'swap': 0}, 'image': {'id': '72f37f2e-4296-450e-9a12-10717f4ac7dc'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-0000000d', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'shutdown', 'tenant_id': '86dfc4ba5e494748b86bc9b983426779', 'user_id': 'bcba1b18a2ad479587a15fe415ae307a', 'hostId': '205a3e9f71fd8445a0940e03ba24d3addf0d2e03c0605249523c8387', 'status': 'stopped', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.430 14 DEBUG ceilometer.compute.discovery [-] Querying metadata for instance 8422017b-c868-4ba2-ab1f-61d3668ca145 from Nova API get_server /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:176
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.432 14 DEBUG novaclient.v2.client [-] REQ: curl -g -i -X GET https://nova-internal.openstack.svc:8774/v2.1/servers/8422017b-c868-4ba2-ab1f-61d3668ca145 -H "Accept: application/json" -H "User-Agent: python-novaclient" -H "X-Auth-Token: {SHA256}d674387017edb5d8543811c363b3a2965950a94ddf4462840fede0e79ac258e9" -H "X-OpenStack-Nova-API-Version: 2.1" _http_log_request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:572
Oct 11 02:43:15 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:43:15 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/140f2c131d4939ae634803745ebdaf1b53146f7b51faed524507f0a6cf3bea3d/merged/var/lib/neutron supports timestamps until 2038 (0x7fffffff)
Oct 11 02:43:15 compute-0 podman[461173]: 2025-10-11 02:43:15.472716555 +0000 UTC m=+0.175953942 container init 85289bc97b2aceb105bba40f2058284d6368e766d08e49b3131f65d5f6797ebc (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:43:15 compute-0 podman[461173]: 2025-10-11 02:43:15.484511781 +0000 UTC m=+0.187749148 container start 85289bc97b2aceb105bba40f2058284d6368e766d08e49b3131f65d5f6797ebc (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:43:15 compute-0 neutron-haproxy-ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308[461211]: [NOTICE]   (461218) : New worker (461220) forked
Oct 11 02:43:15 compute-0 neutron-haproxy-ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308[461211]: [NOTICE]   (461218) : Loading success.
Oct 11 02:43:15 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:15.566 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Delaying updating chassis table for 9 seconds run /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:274
Oct 11 02:43:15 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:15.567 286362 INFO neutron.agent.ovn.metadata.agent [-] Port 6c94515a-556d-4aeb-b39e-6e043f460cd8 in datapath c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635 unbound from our chassis
Oct 11 02:43:15 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:15.569 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635
Oct 11 02:43:15 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:15.586 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[7ba83baa-87ff-43a8-a072-475a24fbe08c]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:15 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:15.623 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[7148bb39-2b8b-452a-8752-3717d03f1bed]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:15 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:15.626 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[d84fe33f-12e6-4f07-8eb9-22f9973c40d3]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:15 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:15.668 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[5e0d2321-a215-43f7-8ed1-62721f2b6cf3]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:15 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:15.699 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[b774d02b-85d6-4104-bc9e-745c1e35c5cc]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tapc35c5e7e-41'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:b1:e9:cd'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 12, 'tx_packets': 7, 'rx_bytes': 1000, 'tx_bytes': 438, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 12, 'tx_packets': 7, 'rx_bytes': 1000, 'tx_bytes': 438, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 43], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 683520, 'reachable_time': 25585, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 8, 'inoctets': 720, 'indelivers': 1, 'outforwdatagrams': 0, 'outpkts': 3, 'outoctets': 228, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 8, 'outmcastpkts': 3, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 720, 'outmcastoctets': 228, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 8, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 1, 'inerrors': 0, 'outmsgs': 3, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 461234, 'error': None, 'target': 'ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:15 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:15.721 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[57bc0d33-9336-4c83-accf-6373ee03d480]: (4, ({'family': 2, 'prefixlen': 32, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '169.254.169.254'], ['IFA_LOCAL', '169.254.169.254'], ['IFA_BROADCAST', '169.254.169.254'], ['IFA_LABEL', 'tapc35c5e7e-41'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 683540, 'tstamp': 683540}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 461235, 'error': None, 'target': 'ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'}, {'family': 2, 'prefixlen': 28, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '10.100.0.2'], ['IFA_LOCAL', '10.100.0.2'], ['IFA_BROADCAST', '10.100.0.15'], ['IFA_LABEL', 'tapc35c5e7e-41'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 683545, 'tstamp': 683545}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 461235, 'error': None, 'target': 'ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'})) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:15 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:15.723 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapc35c5e7e-40, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.725 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.727 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:15 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:15.730 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tapc35c5e7e-40, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:43:15 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:15.730 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:43:15 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:15.731 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tapc35c5e7e-40, col_values=(('external_ids', {'iface-id': 'ffb676d8-51f5-4de3-a31a-71adc7412138'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:43:15 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:15.731 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.819 14 DEBUG novaclient.v2.client [-] RESP: [200] Connection: Keep-Alive Content-Length: 1691 Content-Type: application/json Date: Sat, 11 Oct 2025 02:43:15 GMT Keep-Alive: timeout=5, max=98 OpenStack-API-Version: compute 2.1 Server: Apache Vary: OpenStack-API-Version,X-OpenStack-Nova-API-Version X-OpenStack-Nova-API-Version: 2.1 x-compute-request-id: req-159109d4-b119-4901-b172-0cf9e57acc83 x-openstack-request-id: req-159109d4-b119-4901-b172-0cf9e57acc83 _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:613
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.819 14 DEBUG novaclient.v2.client [-] RESP BODY: {"server": {"id": "8422017b-c868-4ba2-ab1f-61d3668ca145", "name": "te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c", "status": "BUILD", "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "user_id": "f66a606299944d53a40f21e81c791d70", "metadata": {"metering.server_group": "44c4fdb3-6cdb-42b8-903d-5a2c79f0da20"}, "hostId": "cea8816d446065ba50379057f72b942db7e204c60c4530591bc7d0be", "image": {"id": "2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/images/2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c"}]}, "flavor": {"id": "6dff30d1-85df-4e9c-9163-a20ba47bb0c7", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/flavors/6dff30d1-85df-4e9c-9163-a20ba47bb0c7"}]}, "created": "2025-10-11T02:43:05Z", "updated": "2025-10-11T02:43:07Z", "addresses": {}, "accessIPv4": "", "accessIPv6": "", "links": [{"rel": "self", "href": "https://nova-internal.openstack.svc:8774/v2.1/servers/8422017b-c868-4ba2-ab1f-61d3668ca145"}, {"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/servers/8422017b-c868-4ba2-ab1f-61d3668ca145"}], "OS-DCF:diskConfig": "MANUAL", "progress": 0, "OS-EXT-AZ:availability_zone": "nova", "config_drive": "", "key_name": null, "OS-SRV-USG:launched_at": null, "OS-SRV-USG:terminated_at": null, "security_groups": [{"name": "default"}], "OS-EXT-SRV-ATTR:host": "compute-0.ctlplane.example.com", "OS-EXT-SRV-ATTR:instance_name": "instance-0000000e", "OS-EXT-SRV-ATTR:hypervisor_hostname": "compute-0.ctlplane.example.com", "OS-EXT-STS:task_state": "spawning", "OS-EXT-STS:vm_state": "building", "OS-EXT-STS:power_state": 0, "os-extended-volumes:volumes_attached": []}} _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:648
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.819 14 DEBUG novaclient.v2.client [-] GET call to compute for https://nova-internal.openstack.svc:8774/v2.1/servers/8422017b-c868-4ba2-ab1f-61d3668ca145 used request id req-159109d4-b119-4901-b172-0cf9e57acc83 request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:1073
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.822 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '8422017b-c868-4ba2-ab1f-61d3668ca145', 'name': 'te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c', 'flavor': {'id': '6dff30d1-85df-4e9c-9163-a20ba47bb0c7', 'name': 'm1.nano', 'vcpus': 1, 'ram': 128, 'disk': 1, 'ephemeral': 0, 'swap': 0}, 'image': {'id': '2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-0000000e', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'paused', 'tenant_id': 'a05bbc8f872d4dd99972d2cb8136d608', 'user_id': 'f66a606299944d53a40f21e81c791d70', 'hostId': 'cea8816d446065ba50379057f72b942db7e204c60c4530591bc7d0be', 'status': 'paused', 'metadata': {'metering.server_group': '44c4fdb3-6cdb-42b8-903d-5a2c79f0da20'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.826 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.826 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.826 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.826 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.827 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.827 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:43:15.826954) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.831 14 DEBUG ceilometer.compute.virt.libvirt.inspector [-] No delta meter predecessor for 2a3deab0-7a22-486d-86a2-2fc870c8ab2d / tape332b5d8-f3 inspect_vnics /usr/lib/python3.12/site-packages/ceilometer/compute/virt/libvirt/inspector.py:143
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.831 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/network.incoming.bytes volume: 20804 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.833 14 DEBUG ceilometer.compute.pollsters [-] Instance 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 was shut off while getting sample of network.incoming.bytes: Failed to inspect data of instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>, domain state is SHUTOFF. get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:151
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.837 14 DEBUG ceilometer.compute.virt.libvirt.inspector [-] No delta meter predecessor for 8422017b-c868-4ba2-ab1f-61d3668ca145 / tape00931c0-3d inspect_vnics /usr/lib/python3.12/site-packages/ceilometer/compute/virt/libvirt/inspector.py:143
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.837 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.bytes volume: 110 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.842 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2856 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.843 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.843 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.843 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.843 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.843 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.843 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.843 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/network.outgoing.packets volume: 107 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.845 14 DEBUG ceilometer.compute.pollsters [-] Instance 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 was shut off while getting sample of network.outgoing.packets: Failed to inspect data of instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>, domain state is SHUTOFF. get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:151
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.845 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.845 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 24 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.846 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.846 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.846 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.846 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.846 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.846 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.846 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.845 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:43:15.843833) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.847 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:43:15.846640) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.848 14 DEBUG ceilometer.compute.pollsters [-] Instance 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 was shut off while getting sample of network.outgoing.packets.drop: Failed to inspect data of instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>, domain state is SHUTOFF. get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:151
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.848 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.848 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.849 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.849 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.849 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.849 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.849 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.849 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.849 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.850 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:43:15.849544) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.850 14 DEBUG ceilometer.compute.pollsters [-] Instance 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 was shut off while getting sample of network.outgoing.packets.error: Failed to inspect data of instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>, domain state is SHUTOFF. get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:151
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.851 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.851 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.851 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.851 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.851 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.851 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.852 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.852 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.852 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:43:15.852097) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.864 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.864 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:15 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.865 14 DEBUG ceilometer.compute.pollsters [-] Instance 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 was shut off while getting sample of disk.device.capacity: Failed to inspect data of instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>, domain state is SHUTOFF. get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:151
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.921 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.922 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.922 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.923 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.923 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.988 2 DEBUG nova.compute.manager [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Instance event wait completed in 0 seconds for network-vif-plugged wait_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:577
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.991 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150595.990025, 8422017b-c868-4ba2-ab1f-61d3668ca145 => Started> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.993 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] VM Started (Lifecycle Event)
Oct 11 02:43:15 compute-0 nova_compute[356901]: 2025-10-11 02:43:15.997 2 DEBUG nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Guest created on hypervisor spawn /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4417
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:15.999 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.000 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.capacity volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.006 2 INFO nova.virt.libvirt.driver [-] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Instance spawned successfully.
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.007 2 DEBUG nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Attempting to register defaults for the following image properties: ['hw_cdrom_bus', 'hw_disk_bus', 'hw_input_bus', 'hw_pointer_model', 'hw_video_model', 'hw_vif_model'] _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:917
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.023 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.023 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.024 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.025 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.025 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.025 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.026 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.026 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.026 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.027 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:43:16.026501) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.048 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.device.read.bytes volume: 30366208 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.051 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.device.read.bytes volume: 274750 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.051 2 INFO nova.virt.libvirt.driver [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Deleting instance files /var/lib/nova/instances/97d9494c-4ce4-4ff3-a0fa-d5cda135da98_del
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.053 14 DEBUG ceilometer.compute.pollsters [-] Instance 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 was shut off while getting sample of disk.device.read.bytes: Failed to inspect data of instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>, domain state is SHUTOFF. get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:151
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.053 2 INFO nova.virt.libvirt.driver [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Deletion of /var/lib/nova/instances/97d9494c-4ce4-4ff3-a0fa-d5cda135da98_del complete
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.060 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.069 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Synchronizing instance power state after lifecycle event "Started"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.075 2 DEBUG nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Found default for hw_cdrom_bus of sata _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.075 2 DEBUG nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Found default for hw_disk_bus of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.076 2 DEBUG nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Found default for hw_input_bus of usb _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.076 2 DEBUG nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Found default for hw_pointer_model of usbtablet _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.077 2 DEBUG nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Found default for hw_video_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.078 2 DEBUG nova.virt.libvirt.driver [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Found default for hw_vif_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.087 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.087 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.121 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.122 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150595.9902904, 8422017b-c868-4ba2-ab1f-61d3668ca145 => Paused> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.122 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] VM Paused (Lifecycle Event)
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.134 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.135 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.135 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.136 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.136 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.136 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.136 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.136 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.137 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.137 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.device.read.latency volume: 2204542667 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.137 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.device.read.latency volume: 173807011 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.137 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:43:16.137043) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.139 14 DEBUG ceilometer.compute.pollsters [-] Instance 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 was shut off while getting sample of disk.device.read.latency: Failed to inspect data of instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>, domain state is SHUTOFF. get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:151
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.139 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.139 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.139 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.139 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.140 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.140 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.140 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.140 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.140 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.140 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.140 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.140 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.device.read.requests volume: 1093 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.141 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.device.read.requests volume: 108 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.141 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:43:16.140860) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.142 14 DEBUG ceilometer.compute.pollsters [-] Instance 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 was shut off while getting sample of disk.device.read.requests: Failed to inspect data of instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>, domain state is SHUTOFF. get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:151
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.142 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.142 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.143 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.143 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.143 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.143 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.144 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.144 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.144 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.144 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.144 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.144 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.144 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:43:16.144399) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.144 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.146 14 DEBUG ceilometer.compute.pollsters [-] Instance 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 was shut off while getting sample of disk.device.usage: Failed to inspect data of instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>, domain state is SHUTOFF. get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:151
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.146 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.147 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.usage volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.147 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.147 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.147 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.148 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.148 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.148 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.148 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.148 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.148 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.148 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.device.write.bytes volume: 73097216 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.149 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:43:16.148776) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.149 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.150 14 DEBUG ceilometer.compute.pollsters [-] Instance 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 was shut off while getting sample of disk.device.write.bytes: Failed to inspect data of instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>, domain state is SHUTOFF. get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:151
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.150 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.151 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.151 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.151 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.152 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.152 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.152 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.152 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.152 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.153 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.153 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.153 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:43:16.153090) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.153 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.device.write.latency volume: 7846602865 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.153 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.155 14 DEBUG ceilometer.compute.pollsters [-] Instance 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 was shut off while getting sample of disk.device.write.latency: Failed to inspect data of instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>, domain state is SHUTOFF. get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:151
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.155 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.156 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.156 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.156 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.156 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.157 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.157 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.157 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.157 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.157 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.157 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.158 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:43:16.157746) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.158 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.168 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150595.9930491, 8422017b-c868-4ba2-ab1f-61d3668ca145 => Resumed> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.169 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] VM Resumed (Lifecycle Event)
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.173 2 INFO nova.compute.manager [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Took 8.22 seconds to spawn the instance on the hypervisor.
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.174 2 DEBUG nova.compute.manager [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.175 2 INFO nova.compute.manager [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Took 1.07 seconds to destroy the instance on the hypervisor.
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.175 2 DEBUG oslo.service.loopingcall [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Waiting for function nova.compute.manager.ComputeManager._try_deallocate_network.<locals>._deallocate_network_with_retries to return. func /usr/lib/python3.9/site-packages/oslo_service/loopingcall.py:435
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.176 2 DEBUG nova.compute.manager [-] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Deallocating network for instance _deallocate_network /usr/lib/python3.9/site-packages/nova/compute/manager.py:2259
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.176 2 DEBUG nova.network.neutron [-] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] deallocate_for_instance() deallocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1803
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.181 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: libvirt: QEMU Driver error : Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98'
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.183 14 DEBUG ceilometer.compute.pollsters [-] Exception while getting samples Error from libvirt while looking up instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>: [Error Code 42] Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98' get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:149
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.203 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.216 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.220 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.221 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.221 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.221 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.221 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.221 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.222 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.222 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.device.write.requests volume: 324 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.222 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: libvirt: QEMU Driver error : Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98'
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.223 14 DEBUG ceilometer.compute.pollsters [-] Exception while getting samples Error from libvirt while looking up instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>: [Error Code 42] Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98' get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:149
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.223 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.223 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:43:16.221989) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.223 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.223 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.224 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.224 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.224 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.224 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.225 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.225 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.225 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.225 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.225 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: libvirt: QEMU Driver error : Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98'
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.226 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Synchronizing instance power state after lifecycle event "Resumed"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.226 14 DEBUG ceilometer.compute.pollsters [-] Exception while getting samples Error from libvirt while looking up instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>: [Error Code 42] Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98' get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:149
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.226 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.226 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:43:16.225345) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.227 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.227 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.227 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.227 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.rate in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.227 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.228 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.228 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.rate heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.228 14 DEBUG ceilometer.compute.pollsters [-] LibvirtInspector does not provide data for IncomingBytesRatePollster get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:162
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.228 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.rate (2025-10-11T02:43:16.228149) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.228 14 ERROR ceilometer.polling.manager [-] Prevent pollster network.incoming.bytes.rate from polling [<NovaLikeServer: tempest-TestNetworkBasicOps-server-983701941>, <NovaLikeServer: tempest-TestNetworkBasicOps-server-1362866622>, <NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>] on source pollsters anymore!: ceilometer.polling.plugin_base.PollsterPermanentError: [<NovaLikeServer: tempest-TestNetworkBasicOps-server-983701941>, <NovaLikeServer: tempest-TestNetworkBasicOps-server-1362866622>, <NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>]
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.229 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.229 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.229 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.229 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.229 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.229 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.230 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.230 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.230 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.230 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.230 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.230 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/network.incoming.packets volume: 124 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.231 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:43:16.229338) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: libvirt: QEMU Driver error : Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98'
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.231 14 DEBUG ceilometer.compute.pollsters [-] Exception while getting samples Error from libvirt while looking up instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>: [Error Code 42] Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98' get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:149
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.231 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.231 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:43:16.230489) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.231 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 33 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.232 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.232 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.232 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.232 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.232 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.232 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.233 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.233 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:43:16.232761) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: libvirt: QEMU Driver error : Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98'
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.234 14 DEBUG ceilometer.compute.pollsters [-] Exception while getting samples Error from libvirt while looking up instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>: [Error Code 42] Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98' get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:149
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.234 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.234 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.234 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.235 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.235 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.235 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.235 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.235 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.236 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.236 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.236 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.236 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.236 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.237 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.237 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.236 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:43:16.235506) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.237 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:43:16.236987) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: libvirt: QEMU Driver error : Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98'
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.238 14 DEBUG ceilometer.compute.pollsters [-] Exception while getting samples Error from libvirt while looking up instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>: [Error Code 42] Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98' get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:149
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.238 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.238 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.238 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.239 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.239 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.239 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.239 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.239 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.239 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.240 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.240 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:43:16.239504) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: libvirt: QEMU Driver error : Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98'
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.241 14 DEBUG ceilometer.compute.pollsters [-] Exception while getting samples Error from libvirt while looking up instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>: [Error Code 42] Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98' get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:149
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.241 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.241 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.allocation volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.241 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.241 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.242 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.242 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.242 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.243 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.243 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.243 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.243 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.243 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.243 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:43:16.243311) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: libvirt: QEMU Driver error : Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98'
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.244 14 DEBUG ceilometer.compute.pollsters [-] Exception while getting samples Error from libvirt while looking up instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>: [Error Code 42] Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98' get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:149
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.244 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.244 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.244 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.244 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.245 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.245 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.245 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.245 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.245 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/cpu volume: 34630000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.245 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:43:16.245573) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: libvirt: QEMU Driver error : Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98'
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.246 14 DEBUG ceilometer.compute.pollsters [-] Exception while getting samples Error from libvirt while looking up instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>: [Error Code 42] Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98' get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:149
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.246 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/cpu volume: 150000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.246 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 55660000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.247 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.247 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.247 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.247 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.247 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.247 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.248 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:43:16.247851) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.247 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/network.outgoing.bytes volume: 15886 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: libvirt: QEMU Driver error : Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98'
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.249 14 DEBUG ceilometer.compute.pollsters [-] Exception while getting samples Error from libvirt while looking up instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>: [Error Code 42] Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98' get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:149
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.249 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.249 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2412 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.250 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.250 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.250 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.250 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.250 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.250 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.250 14 DEBUG ceilometer.compute.pollsters [-] 2a3deab0-7a22-486d-86a2-2fc870c8ab2d/memory.usage volume: 42.78125 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.251 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:43:16.250494) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: libvirt: QEMU Driver error : Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98'
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.251 14 DEBUG ceilometer.compute.pollsters [-] Exception while getting samples Error from libvirt while looking up instance <name=instance-0000000d, id=97d9494c-4ce4-4ff3-a0fa-d5cda135da98>: [Error Code 42] Domain not found: no domain with matching uuid '97d9494c-4ce4-4ff3-a0fa-d5cda135da98' get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:149
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.251 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/memory.usage volume: Unavailable _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.251 14 WARNING ceilometer.compute.pollsters [-] memory.usage statistic in not available for instance 8422017b-c868-4ba2-ab1f-61d3668ca145: ceilometer.compute.pollsters.NoVolumeException
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.252 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.83984375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.252 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.252 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.252 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.rate in the context of pollsters
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.252 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.252 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.253 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.rate heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.253 14 DEBUG ceilometer.compute.pollsters [-] LibvirtInspector does not provide data for OutgoingBytesRatePollster get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:162
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.253 14 ERROR ceilometer.polling.manager [-] Prevent pollster network.outgoing.bytes.rate from polling [<NovaLikeServer: tempest-TestNetworkBasicOps-server-983701941>, <NovaLikeServer: tempest-TestNetworkBasicOps-server-1362866622>, <NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>] on source pollsters anymore!: ceilometer.polling.plugin_base.PollsterPermanentError: [<NovaLikeServer: tempest-TestNetworkBasicOps-server-983701941>, <NovaLikeServer: tempest-TestNetworkBasicOps-server-1362866622>, <NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>]
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.254 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.254 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.254 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.254 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.255 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.255 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.255 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.rate (2025-10-11T02:43:16.252963) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.256 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.256 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.256 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.256 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.257 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.257 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.257 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.257 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.258 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.258 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.258 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.258 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.259 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.259 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.259 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.259 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.260 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.260 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.260 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:43:16.260 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.285 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:43:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.302 2 INFO nova.compute.manager [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Took 9.37 seconds to build instance.
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.326 2 DEBUG oslo_concurrency.lockutils [None req-6e17e89d-5658-4248-a8db-611deb3b2065 f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "8422017b-c868-4ba2-ab1f-61d3668ca145" "released" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: held 9.492s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:43:16 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/370559405' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.411 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.488s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:43:16 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/370559405' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.494 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000c as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.495 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000c as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.499 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.500 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.504 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.505 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.505 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:43:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1971: 321 pgs: 321 active+clean; 323 MiB data, 430 MiB used, 60 GiB / 60 GiB avail; 2.0 MiB/s rd, 3.9 MiB/s wr, 105 op/s
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.959 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.960 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3403MB free_disk=59.84347915649414GB free_vcpus=5 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.961 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:16 compute-0 nova_compute[356901]: 2025-10-11 02:43:16.961 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.077 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.077 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 2a3deab0-7a22-486d-86a2-2fc870c8ab2d actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.077 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.077 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 8422017b-c868-4ba2-ab1f-61d3668ca145 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.078 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 4 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.078 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1408MB phys_disk=59GB used_disk=5GB total_vcpus=8 used_vcpus=4 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.182 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.226 2 DEBUG nova.compute.manager [req-3b8496d5-7f11-4f90-a8e1-464205aebad1 req-9ae84ff0-ce34-4743-b981-8fc561dacd38 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Received event network-vif-plugged-e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.227 2 DEBUG oslo_concurrency.lockutils [req-3b8496d5-7f11-4f90-a8e1-464205aebad1 req-9ae84ff0-ce34-4743-b981-8fc561dacd38 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "8422017b-c868-4ba2-ab1f-61d3668ca145-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.227 2 DEBUG oslo_concurrency.lockutils [req-3b8496d5-7f11-4f90-a8e1-464205aebad1 req-9ae84ff0-ce34-4743-b981-8fc561dacd38 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "8422017b-c868-4ba2-ab1f-61d3668ca145-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.228 2 DEBUG oslo_concurrency.lockutils [req-3b8496d5-7f11-4f90-a8e1-464205aebad1 req-9ae84ff0-ce34-4743-b981-8fc561dacd38 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "8422017b-c868-4ba2-ab1f-61d3668ca145-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.228 2 DEBUG nova.compute.manager [req-3b8496d5-7f11-4f90-a8e1-464205aebad1 req-9ae84ff0-ce34-4743-b981-8fc561dacd38 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] No waiting events found dispatching network-vif-plugged-e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.228 2 WARNING nova.compute.manager [req-3b8496d5-7f11-4f90-a8e1-464205aebad1 req-9ae84ff0-ce34-4743-b981-8fc561dacd38 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Received unexpected event network-vif-plugged-e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 for instance with vm_state active and task_state None.
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.229 2 DEBUG nova.compute.manager [req-3b8496d5-7f11-4f90-a8e1-464205aebad1 req-9ae84ff0-ce34-4743-b981-8fc561dacd38 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Received event network-vif-unplugged-6c94515a-556d-4aeb-b39e-6e043f460cd8 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.229 2 DEBUG oslo_concurrency.lockutils [req-3b8496d5-7f11-4f90-a8e1-464205aebad1 req-9ae84ff0-ce34-4743-b981-8fc561dacd38 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.230 2 DEBUG oslo_concurrency.lockutils [req-3b8496d5-7f11-4f90-a8e1-464205aebad1 req-9ae84ff0-ce34-4743-b981-8fc561dacd38 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.230 2 DEBUG oslo_concurrency.lockutils [req-3b8496d5-7f11-4f90-a8e1-464205aebad1 req-9ae84ff0-ce34-4743-b981-8fc561dacd38 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.231 2 DEBUG nova.compute.manager [req-3b8496d5-7f11-4f90-a8e1-464205aebad1 req-9ae84ff0-ce34-4743-b981-8fc561dacd38 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] No waiting events found dispatching network-vif-unplugged-6c94515a-556d-4aeb-b39e-6e043f460cd8 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.231 2 DEBUG nova.compute.manager [req-3b8496d5-7f11-4f90-a8e1-464205aebad1 req-9ae84ff0-ce34-4743-b981-8fc561dacd38 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Received event network-vif-unplugged-6c94515a-556d-4aeb-b39e-6e043f460cd8 for instance with task_state deleting. _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10826
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.231 2 DEBUG nova.compute.manager [req-3b8496d5-7f11-4f90-a8e1-464205aebad1 req-9ae84ff0-ce34-4743-b981-8fc561dacd38 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Received event network-vif-plugged-6c94515a-556d-4aeb-b39e-6e043f460cd8 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.232 2 DEBUG oslo_concurrency.lockutils [req-3b8496d5-7f11-4f90-a8e1-464205aebad1 req-9ae84ff0-ce34-4743-b981-8fc561dacd38 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.232 2 DEBUG oslo_concurrency.lockutils [req-3b8496d5-7f11-4f90-a8e1-464205aebad1 req-9ae84ff0-ce34-4743-b981-8fc561dacd38 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.232 2 DEBUG oslo_concurrency.lockutils [req-3b8496d5-7f11-4f90-a8e1-464205aebad1 req-9ae84ff0-ce34-4743-b981-8fc561dacd38 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.232 2 DEBUG nova.compute.manager [req-3b8496d5-7f11-4f90-a8e1-464205aebad1 req-9ae84ff0-ce34-4743-b981-8fc561dacd38 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] No waiting events found dispatching network-vif-plugged-6c94515a-556d-4aeb-b39e-6e043f460cd8 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.233 2 WARNING nova.compute.manager [req-3b8496d5-7f11-4f90-a8e1-464205aebad1 req-9ae84ff0-ce34-4743-b981-8fc561dacd38 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Received unexpected event network-vif-plugged-6c94515a-556d-4aeb-b39e-6e043f460cd8 for instance with vm_state active and task_state deleting.
Oct 11 02:43:17 compute-0 ceph-mon[191930]: pgmap v1971: 321 pgs: 321 active+clean; 323 MiB data, 430 MiB used, 60 GiB / 60 GiB avail; 2.0 MiB/s rd, 3.9 MiB/s wr, 105 op/s
Oct 11 02:43:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:43:17 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1064333253' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.682 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.500s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.694 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.713 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.754 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.754 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.793s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.755 2 DEBUG nova.network.neutron [-] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Updating instance_info_cache with network_info: [] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.777 2 INFO nova.compute.manager [-] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Took 1.60 seconds to deallocate network for instance.
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.846 2 DEBUG oslo_concurrency.lockutils [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.update_usage" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.847 2 DEBUG oslo_concurrency.lockutils [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.851 2 DEBUG nova.compute.manager [req-a9b631d9-7db7-424d-ab84-8c6d71f7b359 req-a4e4564f-5ae9-4ff8-95e1-bdcbcca40df3 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Received event network-vif-deleted-6c94515a-556d-4aeb-b39e-6e043f460cd8 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:43:17 compute-0 nova_compute[356901]: 2025-10-11 02:43:17.964 2 DEBUG oslo_concurrency.processutils [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:43:18 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1064333253' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:43:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:43:18 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/522804660' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:43:18 compute-0 nova_compute[356901]: 2025-10-11 02:43:18.532 2 DEBUG oslo_concurrency.processutils [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.568s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:43:18 compute-0 nova_compute[356901]: 2025-10-11 02:43:18.546 2 DEBUG nova.compute.provider_tree [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:43:18 compute-0 nova_compute[356901]: 2025-10-11 02:43:18.573 2 DEBUG nova.scheduler.client.report [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:43:18 compute-0 nova_compute[356901]: 2025-10-11 02:43:18.608 2 DEBUG oslo_concurrency.lockutils [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: held 0.761s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:18 compute-0 nova_compute[356901]: 2025-10-11 02:43:18.650 2 INFO nova.scheduler.client.report [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Deleted allocations for instance 97d9494c-4ce4-4ff3-a0fa-d5cda135da98
Oct 11 02:43:18 compute-0 nova_compute[356901]: 2025-10-11 02:43:18.724 2 DEBUG oslo_concurrency.lockutils [None req-204e165d-3be4-46c8-8683-f19bd5f0b123 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "97d9494c-4ce4-4ff3-a0fa-d5cda135da98" "released" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: held 3.622s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:43:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3600.1 total, 600.0 interval
                                            Cumulative writes: 10K writes, 41K keys, 10K commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 10K writes, 2796 syncs, 3.83 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 2104 writes, 7839 keys, 2104 commit groups, 1.0 writes per commit group, ingest: 8.42 MB, 0.01 MB/s
                                            Interval WAL: 2104 writes, 817 syncs, 2.58 writes per sync, written: 0.01 GB, 0.01 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:43:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1972: 321 pgs: 321 active+clean; 265 MiB data, 406 MiB used, 60 GiB / 60 GiB avail; 2.4 MiB/s rd, 3.3 MiB/s wr, 141 op/s
Oct 11 02:43:19 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/522804660' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:43:19 compute-0 ceph-mon[191930]: pgmap v1972: 321 pgs: 321 active+clean; 265 MiB data, 406 MiB used, 60 GiB / 60 GiB avail; 2.4 MiB/s rd, 3.3 MiB/s wr, 141 op/s
Oct 11 02:43:20 compute-0 nova_compute[356901]: 2025-10-11 02:43:20.181 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:20 compute-0 nova_compute[356901]: 2025-10-11 02:43:20.383 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1973: 321 pgs: 321 active+clean; 265 MiB data, 406 MiB used, 60 GiB / 60 GiB avail; 2.2 MiB/s rd, 1.8 MiB/s wr, 93 op/s
Oct 11 02:43:20 compute-0 ceph-mon[191930]: pgmap v1973: 321 pgs: 321 active+clean; 265 MiB data, 406 MiB used, 60 GiB / 60 GiB avail; 2.2 MiB/s rd, 1.8 MiB/s wr, 93 op/s
Oct 11 02:43:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:43:21 compute-0 nova_compute[356901]: 2025-10-11 02:43:21.874 2 DEBUG oslo_concurrency.lockutils [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquiring lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:21 compute-0 nova_compute[356901]: 2025-10-11 02:43:21.876 2 DEBUG oslo_concurrency.lockutils [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d" acquired by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: waited 0.003s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:21 compute-0 nova_compute[356901]: 2025-10-11 02:43:21.877 2 DEBUG oslo_concurrency.lockutils [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquiring lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d-events" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:21 compute-0 nova_compute[356901]: 2025-10-11 02:43:21.878 2 DEBUG oslo_concurrency.lockutils [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d-events" acquired by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:21 compute-0 nova_compute[356901]: 2025-10-11 02:43:21.880 2 DEBUG oslo_concurrency.lockutils [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d-events" "released" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: held 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:21 compute-0 nova_compute[356901]: 2025-10-11 02:43:21.883 2 INFO nova.compute.manager [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Terminating instance
Oct 11 02:43:21 compute-0 nova_compute[356901]: 2025-10-11 02:43:21.887 2 DEBUG nova.compute.manager [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Start destroying the instance on the hypervisor. _shutdown_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:3120
Oct 11 02:43:22 compute-0 kernel: tape332b5d8-f3 (unregistering): left promiscuous mode
Oct 11 02:43:22 compute-0 NetworkManager[44908]: <info>  [1760150602.0221] device (tape332b5d8-f3): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')
Oct 11 02:43:22 compute-0 ovn_controller[88370]: 2025-10-11T02:43:22Z|00175|binding|INFO|Releasing lport e332b5d8-f31d-4e8a-99d2-7cc96428d93a from this chassis (sb_readonly=0)
Oct 11 02:43:22 compute-0 ovn_controller[88370]: 2025-10-11T02:43:22Z|00176|binding|INFO|Setting lport e332b5d8-f31d-4e8a-99d2-7cc96428d93a down in Southbound
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.039 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:22 compute-0 ovn_controller[88370]: 2025-10-11T02:43:22Z|00177|binding|INFO|Removing iface tape332b5d8-f3 ovn-installed in OVS
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.052 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:22.056 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:c6:9a:0a 10.100.0.4'], port_security=['fa:16:3e:c6:9a:0a 10.100.0.4'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.0.4/28', 'neutron:device_id': '2a3deab0-7a22-486d-86a2-2fc870c8ab2d', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': '86dfc4ba5e494748b86bc9b983426779', 'neutron:revision_number': '4', 'neutron:security_group_ids': '856a6c8c-5c19-45b0-83e8-c2918301c124', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:host_id': 'compute-0.ctlplane.example.com'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=b0ebf874-dd0a-4bac-aa4a-3eee85fcb8ba, chassis=[], tunnel_key=3, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=e332b5d8-f31d-4e8a-99d2-7cc96428d93a) old=Port_Binding(up=[True], chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:43:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:22.059 286362 INFO neutron.agent.ovn.metadata.agent [-] Port e332b5d8-f31d-4e8a-99d2-7cc96428d93a in datapath c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635 unbound from our chassis
Oct 11 02:43:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:22.062 286362 DEBUG neutron.agent.ovn.metadata.agent [-] No valid VIF ports were found for network c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635, tearing the namespace down if needed _get_provision_params /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:628
Oct 11 02:43:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:22.064 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[37dbae9b-041a-475a-840e-90fba318b53a]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:22.065 286362 INFO neutron.agent.ovn.metadata.agent [-] Cleaning up ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635 namespace which is not needed anymore
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.074 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:22 compute-0 systemd[1]: machine-qemu\x2d13\x2dinstance\x2d0000000c.scope: Deactivated successfully.
Oct 11 02:43:22 compute-0 systemd[1]: machine-qemu\x2d13\x2dinstance\x2d0000000c.scope: Consumed 48.507s CPU time.
Oct 11 02:43:22 compute-0 systemd-machined[137586]: Machine qemu-13-instance-0000000c terminated.
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.145 2 INFO nova.virt.libvirt.driver [-] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Instance destroyed successfully.
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.146 2 DEBUG nova.objects.instance [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lazy-loading 'resources' on Instance uuid 2a3deab0-7a22-486d-86a2-2fc870c8ab2d obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.170 2 DEBUG nova.virt.libvirt.vif [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='True',created_at=2025-10-11T02:41:19Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=<?>,disable_terminate=False,display_description='tempest-TestNetworkBasicOps-server-983701941',display_name='tempest-TestNetworkBasicOps-server-983701941',ec2_ids=<?>,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='tempest-testnetworkbasicops-server-983701941',id=12,image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data='ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBMITvXHGkIM8LS83kfV77RgpBE+Sw7Cf/gWnJ4njPpAm2utZ405mb/3SnZv98p+/HwAaSeNUeKJwLq/o7HlE9jBBurf1QCYsMBy+p+t8FriZaItil7Hb0u4A6Vs88VckEQ==',key_name='tempest-TestNetworkBasicOps-1838815715',keypairs=<?>,launch_index=0,launched_at=2025-10-11T02:41:28Z,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={},migration_context=<?>,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=<?>,power_state=1,progress=0,project_id='86dfc4ba5e494748b86bc9b983426779',ramdisk_id='',reservation_id='r-43re01xt',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='72f37f2e-4296-450e-9a12-10717f4ac7dc',image_container_format='bare',image_disk_format='qcow2',image_hw_cdrom_bus='sata',image_hw_disk_bus='virtio',image_hw_input_bus='usb',image_hw_machine_type='q35',image_hw_pointer_model='usbtablet',image_hw_rng_model='virtio',image_hw_video_model='virtio',image_hw_vif_model='virtio',image_min_disk='1',image_min_ram='0',owner_project_name='tempest-TestNetworkBasicOps-494564743',owner_user_name='tempest-TestNetworkBasicOps-494564743-project-member'},tags=<?>,task_state='deleting',terminated_at=None,trusted_certs=<?>,updated_at=2025-10-11T02:41:28Z,user_data=None,user_id='bcba1b18a2ad479587a15fe415ae307a',uuid=2a3deab0-7a22-486d-86a2-2fc870c8ab2d,vcpu_model=<?>,vcpus=1,vm_mode=None,vm_state='active') vif={"id": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "address": "fa:16:3e:c6:9a:0a", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape332b5d8-f3", "ovs_interfaceid": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} unplug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:828
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.171 2 DEBUG nova.network.os_vif_util [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Converting VIF {"id": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "address": "fa:16:3e:c6:9a:0a", "network": {"id": "c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635", "bridge": "br-int", "label": "tempest-network-smoke--1333203908", "subnets": [{"cidr": "10.100.0.0/28", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.0.4", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "86dfc4ba5e494748b86bc9b983426779", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape332b5d8-f3", "ovs_interfaceid": "e332b5d8-f31d-4e8a-99d2-7cc96428d93a", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.173 2 DEBUG nova.network.os_vif_util [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Converted object VIFOpenVSwitch(active=True,address=fa:16:3e:c6:9a:0a,bridge_name='br-int',has_traffic_filtering=True,id=e332b5d8-f31d-4e8a-99d2-7cc96428d93a,network=Network(c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tape332b5d8-f3') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.174 2 DEBUG os_vif [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Unplugging vif VIFOpenVSwitch(active=True,address=fa:16:3e:c6:9a:0a,bridge_name='br-int',has_traffic_filtering=True,id=e332b5d8-f31d-4e8a-99d2-7cc96428d93a,network=Network(c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tape332b5d8-f3') unplug /usr/lib/python3.9/site-packages/os_vif/__init__.py:109
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.177 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.178 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tape332b5d8-f3, bridge=br-int, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.183 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.185 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.188 2 INFO os_vif [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Successfully unplugged vif VIFOpenVSwitch(active=True,address=fa:16:3e:c6:9a:0a,bridge_name='br-int',has_traffic_filtering=True,id=e332b5d8-f31d-4e8a-99d2-7cc96428d93a,network=Network(c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tape332b5d8-f3')
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.279 2 DEBUG nova.compute.manager [req-c2292ff1-1468-4961-93a0-c9b7a09cc96c req-ec16b792-58ce-4331-8230-b2aa0537122a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Received event network-vif-unplugged-e332b5d8-f31d-4e8a-99d2-7cc96428d93a external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.279 2 DEBUG oslo_concurrency.lockutils [req-c2292ff1-1468-4961-93a0-c9b7a09cc96c req-ec16b792-58ce-4331-8230-b2aa0537122a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.279 2 DEBUG oslo_concurrency.lockutils [req-c2292ff1-1468-4961-93a0-c9b7a09cc96c req-ec16b792-58ce-4331-8230-b2aa0537122a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.279 2 DEBUG oslo_concurrency.lockutils [req-c2292ff1-1468-4961-93a0-c9b7a09cc96c req-ec16b792-58ce-4331-8230-b2aa0537122a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.280 2 DEBUG nova.compute.manager [req-c2292ff1-1468-4961-93a0-c9b7a09cc96c req-ec16b792-58ce-4331-8230-b2aa0537122a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] No waiting events found dispatching network-vif-unplugged-e332b5d8-f31d-4e8a-99d2-7cc96428d93a pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.280 2 DEBUG nova.compute.manager [req-c2292ff1-1468-4961-93a0-c9b7a09cc96c req-ec16b792-58ce-4331-8230-b2aa0537122a 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Received event network-vif-unplugged-e332b5d8-f31d-4e8a-99d2-7cc96428d93a for instance with task_state deleting. _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10826
Oct 11 02:43:22 compute-0 neutron-haproxy-ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635[458476]: [NOTICE]   (458480) : haproxy version is 2.8.14-c23fe91
Oct 11 02:43:22 compute-0 neutron-haproxy-ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635[458476]: [NOTICE]   (458480) : path to executable is /usr/sbin/haproxy
Oct 11 02:43:22 compute-0 neutron-haproxy-ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635[458476]: [WARNING]  (458480) : Exiting Master process...
Oct 11 02:43:22 compute-0 neutron-haproxy-ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635[458476]: [ALERT]    (458480) : Current worker (458482) exited with code 143 (Terminated)
Oct 11 02:43:22 compute-0 neutron-haproxy-ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635[458476]: [WARNING]  (458480) : All workers exited. Exiting... (0)
Oct 11 02:43:22 compute-0 systemd[1]: libpod-13f91ee0043106e729c13bb04ee2289ca5c3e36406333ea3ecfe7dbd1fd7590e.scope: Deactivated successfully.
Oct 11 02:43:22 compute-0 podman[461344]: 2025-10-11 02:43:22.310572476 +0000 UTC m=+0.082086258 container died 13f91ee0043106e729c13bb04ee2289ca5c3e36406333ea3ecfe7dbd1fd7590e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:43:22 compute-0 systemd[1]: var-lib-containers-storage-overlay-5138affe420e75c815ea5d7a265622e95016109742cc4959933cbbd024cbf1f9-merged.mount: Deactivated successfully.
Oct 11 02:43:22 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-13f91ee0043106e729c13bb04ee2289ca5c3e36406333ea3ecfe7dbd1fd7590e-userdata-shm.mount: Deactivated successfully.
Oct 11 02:43:22 compute-0 podman[461344]: 2025-10-11 02:43:22.384538884 +0000 UTC m=+0.156052676 container cleanup 13f91ee0043106e729c13bb04ee2289ca5c3e36406333ea3ecfe7dbd1fd7590e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.schema-version=1.0)
Oct 11 02:43:22 compute-0 systemd[1]: libpod-conmon-13f91ee0043106e729c13bb04ee2289ca5c3e36406333ea3ecfe7dbd1fd7590e.scope: Deactivated successfully.
Oct 11 02:43:22 compute-0 podman[461388]: 2025-10-11 02:43:22.491397232 +0000 UTC m=+0.065343477 container remove 13f91ee0043106e729c13bb04ee2289ca5c3e36406333ea3ecfe7dbd1fd7590e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:43:22 compute-0 podman[461381]: 2025-10-11 02:43:22.498966477 +0000 UTC m=+0.096705115 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, architecture=x86_64, io.buildah.version=1.29.0, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., config_id=edpm, version=9.4, io.openshift.expose-services=, name=ubi9, com.redhat.component=ubi9-container, io.openshift.tags=base rhel9, maintainer=Red Hat, Inc., container_name=kepler, build-date=2024-09-18T21:23:30, io.k8s.display-name=Red Hat Universal Base Image 9, managed_by=edpm_ansible, distribution-scope=public, release-0.7.12=, vendor=Red Hat, Inc.)
Oct 11 02:43:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:22.507 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[64901454-d927-4538-9bdb-9cee7a6a277b]: (4, ('Sat Oct 11 02:43:22 AM UTC 2025 Stopping container neutron-haproxy-ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635 (13f91ee0043106e729c13bb04ee2289ca5c3e36406333ea3ecfe7dbd1fd7590e)\n13f91ee0043106e729c13bb04ee2289ca5c3e36406333ea3ecfe7dbd1fd7590e\nSat Oct 11 02:43:22 AM UTC 2025 Deleting container neutron-haproxy-ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635 (13f91ee0043106e729c13bb04ee2289ca5c3e36406333ea3ecfe7dbd1fd7590e)\n13f91ee0043106e729c13bb04ee2289ca5c3e36406333ea3ecfe7dbd1fd7590e\n', '', 0)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:22.509 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[5d6f3d4b-b0da-4bed-b51c-f2807b1d5f35]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:22.511 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tapc35c5e7e-40, bridge=None, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:43:22 compute-0 kernel: tapc35c5e7e-40: left promiscuous mode
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.515 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.531 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:22.534 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[ec29f61e-a2f9-4190-b76e-07623e0f4c50]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.533 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:22.560 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[8120d076-d147-44f1-be66-d06c04beaed1]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:22.562 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[eb4da24f-ff98-48de-87c1-330a6956b6c8]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:22.580 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[40fbdebf-7790-4d64-bdd3-f48282efb5e1]: (4, [{'family': 0, '__align': (), 'ifi_type': 772, 'index': 1, 'flags': 65609, 'change': 0, 'attrs': [['IFLA_IFNAME', 'lo'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UNKNOWN'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 65536], ['IFLA_MIN_MTU', 0], ['IFLA_MAX_MTU', 0], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 1], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 1], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 0], ['IFLA_CARRIER_UP_COUNT', 0], ['IFLA_CARRIER_DOWN_COUNT', 0], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', '00:00:00:00:00:00'], ['IFLA_BROADCAST', '00:00:00:00:00:00'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 1, 'nopolicy': 1, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 683511, 'reachable_time': 15421, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 65536, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 4294967295, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 4294967295, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 0, 'inoctets': 0, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 0, 'outoctets': 0, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 0, 'outmcastpkts': 0, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 0, 'outmcastoctets': 0, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 0, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 0, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1404, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 461416, 'error': None, 'target': 'ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:22 compute-0 systemd[1]: run-netns-ovnmeta\x2dc35c5e7e\x2d4510\x2d4b5a\x2db4bb\x2d7a6bc4d44635.mount: Deactivated successfully.
Oct 11 02:43:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:22.584 286647 DEBUG neutron.privileged.agent.linux.ip_lib [-] Namespace ovnmeta-c35c5e7e-4510-4b5a-b4bb-7a6bc4d44635 deleted. remove_netns /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:607
Oct 11 02:43:22 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:22.584 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[d132d98c-1fb3-43cc-845f-1c073cc23fba]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.855 2 INFO nova.virt.libvirt.driver [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Deleting instance files /var/lib/nova/instances/2a3deab0-7a22-486d-86a2-2fc870c8ab2d_del
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.856 2 INFO nova.virt.libvirt.driver [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Deletion of /var/lib/nova/instances/2a3deab0-7a22-486d-86a2-2fc870c8ab2d_del complete
Oct 11 02:43:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1974: 321 pgs: 321 active+clean; 265 MiB data, 406 MiB used, 60 GiB / 60 GiB avail; 3.5 MiB/s rd, 1.8 MiB/s wr, 136 op/s
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.909 2 INFO nova.compute.manager [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Took 1.02 seconds to destroy the instance on the hypervisor.
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.910 2 DEBUG oslo.service.loopingcall [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Waiting for function nova.compute.manager.ComputeManager._try_deallocate_network.<locals>._deallocate_network_with_retries to return. func /usr/lib/python3.9/site-packages/oslo_service/loopingcall.py:435
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.910 2 DEBUG nova.compute.manager [-] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Deallocating network for instance _deallocate_network /usr/lib/python3.9/site-packages/nova/compute/manager.py:2259
Oct 11 02:43:22 compute-0 nova_compute[356901]: 2025-10-11 02:43:22.910 2 DEBUG nova.network.neutron [-] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] deallocate_for_instance() deallocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1803
Oct 11 02:43:22 compute-0 ceph-mon[191930]: pgmap v1974: 321 pgs: 321 active+clean; 265 MiB data, 406 MiB used, 60 GiB / 60 GiB avail; 3.5 MiB/s rd, 1.8 MiB/s wr, 136 op/s
Oct 11 02:43:23 compute-0 nova_compute[356901]: 2025-10-11 02:43:23.778 2 DEBUG nova.network.neutron [-] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Updating instance_info_cache with network_info: [] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:43:23 compute-0 nova_compute[356901]: 2025-10-11 02:43:23.795 2 INFO nova.compute.manager [-] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Took 0.89 seconds to deallocate network for instance.
Oct 11 02:43:23 compute-0 nova_compute[356901]: 2025-10-11 02:43:23.844 2 DEBUG oslo_concurrency.lockutils [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.update_usage" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:23 compute-0 nova_compute[356901]: 2025-10-11 02:43:23.848 2 DEBUG oslo_concurrency.lockutils [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: waited 0.004s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:23 compute-0 nova_compute[356901]: 2025-10-11 02:43:23.862 2 DEBUG nova.compute.manager [req-ae48e8a9-f818-4a3b-9106-5ca58b45c134 req-272bc207-29e3-48d9-ad8e-41354a02b7a0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Received event network-vif-deleted-e332b5d8-f31d-4e8a-99d2-7cc96428d93a external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:43:23 compute-0 nova_compute[356901]: 2025-10-11 02:43:23.955 2 DEBUG oslo_concurrency.processutils [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:43:24 compute-0 nova_compute[356901]: 2025-10-11 02:43:24.421 2 DEBUG nova.compute.manager [req-5ab02d5d-b5a3-4b2f-86d3-81e2a13ecfc8 req-a36fc013-a404-47e9-8da8-df470957f6a0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Received event network-vif-plugged-e332b5d8-f31d-4e8a-99d2-7cc96428d93a external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:43:24 compute-0 nova_compute[356901]: 2025-10-11 02:43:24.423 2 DEBUG oslo_concurrency.lockutils [req-5ab02d5d-b5a3-4b2f-86d3-81e2a13ecfc8 req-a36fc013-a404-47e9-8da8-df470957f6a0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:24 compute-0 nova_compute[356901]: 2025-10-11 02:43:24.423 2 DEBUG oslo_concurrency.lockutils [req-5ab02d5d-b5a3-4b2f-86d3-81e2a13ecfc8 req-a36fc013-a404-47e9-8da8-df470957f6a0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:24 compute-0 nova_compute[356901]: 2025-10-11 02:43:24.424 2 DEBUG oslo_concurrency.lockutils [req-5ab02d5d-b5a3-4b2f-86d3-81e2a13ecfc8 req-a36fc013-a404-47e9-8da8-df470957f6a0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:24 compute-0 nova_compute[356901]: 2025-10-11 02:43:24.424 2 DEBUG nova.compute.manager [req-5ab02d5d-b5a3-4b2f-86d3-81e2a13ecfc8 req-a36fc013-a404-47e9-8da8-df470957f6a0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] No waiting events found dispatching network-vif-plugged-e332b5d8-f31d-4e8a-99d2-7cc96428d93a pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:43:24 compute-0 nova_compute[356901]: 2025-10-11 02:43:24.424 2 WARNING nova.compute.manager [req-5ab02d5d-b5a3-4b2f-86d3-81e2a13ecfc8 req-a36fc013-a404-47e9-8da8-df470957f6a0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Received unexpected event network-vif-plugged-e332b5d8-f31d-4e8a-99d2-7cc96428d93a for instance with vm_state deleted and task_state None.
Oct 11 02:43:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:43:24 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1211482180' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:43:24 compute-0 nova_compute[356901]: 2025-10-11 02:43:24.554 2 DEBUG oslo_concurrency.processutils [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.599s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:43:24 compute-0 nova_compute[356901]: 2025-10-11 02:43:24.566 2 DEBUG nova.compute.provider_tree [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:43:24 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:24.569 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '15'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:43:24 compute-0 nova_compute[356901]: 2025-10-11 02:43:24.582 2 DEBUG nova.scheduler.client.report [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:43:24 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1211482180' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:43:24 compute-0 nova_compute[356901]: 2025-10-11 02:43:24.605 2 DEBUG oslo_concurrency.lockutils [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: held 0.757s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:24 compute-0 nova_compute[356901]: 2025-10-11 02:43:24.632 2 INFO nova.scheduler.client.report [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Deleted allocations for instance 2a3deab0-7a22-486d-86a2-2fc870c8ab2d
Oct 11 02:43:24 compute-0 nova_compute[356901]: 2025-10-11 02:43:24.687 2 DEBUG oslo_concurrency.lockutils [None req-80251202-5e67-4415-bd1c-316719921628 bcba1b18a2ad479587a15fe415ae307a 86dfc4ba5e494748b86bc9b983426779 - - default default] Lock "2a3deab0-7a22-486d-86a2-2fc870c8ab2d" "released" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: held 2.811s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1975: 321 pgs: 321 active+clean; 220 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 2.3 MiB/s rd, 634 KiB/s wr, 130 op/s
Oct 11 02:43:25 compute-0 nova_compute[356901]: 2025-10-11 02:43:25.186 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:25 compute-0 ceph-mon[191930]: pgmap v1975: 321 pgs: 321 active+clean; 220 MiB data, 372 MiB used, 60 GiB / 60 GiB avail; 2.3 MiB/s rd, 634 KiB/s wr, 130 op/s
Oct 11 02:43:26 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:43:26 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3600.2 total, 600.0 interval
                                            Cumulative writes: 8926 writes, 35K keys, 8926 commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 8926 writes, 2237 syncs, 3.99 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 1761 writes, 6906 keys, 1761 commit groups, 1.0 writes per commit group, ingest: 6.83 MB, 0.01 MB/s
                                            Interval WAL: 1761 writes, 701 syncs, 2.51 writes per sync, written: 0.01 GB, 0.01 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:43:26 compute-0 ceph-mgr[192233]: [devicehealth INFO root] Check health
Oct 11 02:43:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:43:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:43:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:43:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:43:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:43:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:43:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:43:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1976: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail; 2.0 MiB/s rd, 29 KiB/s wr, 129 op/s
Oct 11 02:43:26 compute-0 ceph-mon[191930]: pgmap v1976: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail; 2.0 MiB/s rd, 29 KiB/s wr, 129 op/s
Oct 11 02:43:27 compute-0 nova_compute[356901]: 2025-10-11 02:43:27.183 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:43:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3263286613' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:43:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:43:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3263286613' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:43:27 compute-0 ovn_controller[88370]: 2025-10-11T02:43:27Z|00178|binding|INFO|Releasing lport bd6ddb48-868e-41a0-8ff2-0f3a1a9b4d81 from this chassis (sb_readonly=0)
Oct 11 02:43:27 compute-0 ovn_controller[88370]: 2025-10-11T02:43:27Z|00179|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:43:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3263286613' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:43:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3263286613' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:43:28 compute-0 nova_compute[356901]: 2025-10-11 02:43:28.046 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1977: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 29 KiB/s wr, 122 op/s
Oct 11 02:43:28 compute-0 ceph-mon[191930]: pgmap v1977: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 29 KiB/s wr, 122 op/s
Oct 11 02:43:29 compute-0 podman[461441]: 2025-10-11 02:43:29.235985214 +0000 UTC m=+0.109101345 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_id=edpm, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251007, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, org.label-schema.name=CentOS Stream 10 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 02:43:29 compute-0 podman[461439]: 2025-10-11 02:43:29.244166564 +0000 UTC m=+0.134123967 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:43:29 compute-0 podman[461447]: 2025-10-11 02:43:29.271488382 +0000 UTC m=+0.124310285 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, config_id=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3)
Oct 11 02:43:29 compute-0 podman[461440]: 2025-10-11 02:43:29.279883613 +0000 UTC m=+0.158099611 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:43:29 compute-0 podman[157119]: time="2025-10-11T02:43:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:43:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:43:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:43:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:43:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9540 "" "Go-http-client/1.1"
Oct 11 02:43:30 compute-0 nova_compute[356901]: 2025-10-11 02:43:30.189 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:30 compute-0 nova_compute[356901]: 2025-10-11 02:43:30.355 2 DEBUG nova.virt.driver [-] Emitting event <LifecycleEvent: 1760150595.3532119, 97d9494c-4ce4-4ff3-a0fa-d5cda135da98 => Stopped> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:43:30 compute-0 nova_compute[356901]: 2025-10-11 02:43:30.356 2 INFO nova.compute.manager [-] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] VM Stopped (Lifecycle Event)
Oct 11 02:43:30 compute-0 nova_compute[356901]: 2025-10-11 02:43:30.386 2 DEBUG nova.compute.manager [None req-216831fb-f3f5-4a43-9768-a9c829dabf25 - - - - - -] [instance: 97d9494c-4ce4-4ff3-a0fa-d5cda135da98] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:43:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1978: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail; 1.6 MiB/s rd, 1.2 KiB/s wr, 79 op/s
Oct 11 02:43:30 compute-0 ceph-mon[191930]: pgmap v1978: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail; 1.6 MiB/s rd, 1.2 KiB/s wr, 79 op/s
Oct 11 02:43:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:43:31 compute-0 openstack_network_exporter[374316]: ERROR   02:43:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:43:31 compute-0 openstack_network_exporter[374316]: ERROR   02:43:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:43:31 compute-0 openstack_network_exporter[374316]: ERROR   02:43:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:43:31 compute-0 openstack_network_exporter[374316]: ERROR   02:43:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:43:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:43:31 compute-0 openstack_network_exporter[374316]: ERROR   02:43:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:43:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:43:32 compute-0 nova_compute[356901]: 2025-10-11 02:43:32.189 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1979: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail; 1.6 MiB/s rd, 1.2 KiB/s wr, 79 op/s
Oct 11 02:43:32 compute-0 ceph-mon[191930]: pgmap v1979: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail; 1.6 MiB/s rd, 1.2 KiB/s wr, 79 op/s
Oct 11 02:43:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1980: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail; 297 KiB/s rd, 1.2 KiB/s wr, 36 op/s
Oct 11 02:43:34 compute-0 ceph-mon[191930]: pgmap v1980: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail; 297 KiB/s rd, 1.2 KiB/s wr, 36 op/s
Oct 11 02:43:35 compute-0 nova_compute[356901]: 2025-10-11 02:43:35.191 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:35 compute-0 sudo[461520]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:43:35 compute-0 sudo[461520]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:43:35 compute-0 sudo[461520]: pam_unix(sudo:session): session closed for user root
Oct 11 02:43:35 compute-0 sudo[461545]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:43:35 compute-0 sudo[461545]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:43:35 compute-0 sudo[461545]: pam_unix(sudo:session): session closed for user root
Oct 11 02:43:35 compute-0 sudo[461570]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:43:35 compute-0 sudo[461570]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:43:35 compute-0 sudo[461570]: pam_unix(sudo:session): session closed for user root
Oct 11 02:43:35 compute-0 sudo[461595]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:43:35 compute-0 sudo[461595]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:43:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:43:36 compute-0 sudo[461595]: pam_unix(sudo:session): session closed for user root
Oct 11 02:43:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:43:36 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:43:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:43:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:43:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:43:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:43:36 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev c746139d-6555-42ea-a26d-c0c9427e6b82 does not exist
Oct 11 02:43:36 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 9b079d15-ad7a-4a2a-a41e-c4e04128a4be does not exist
Oct 11 02:43:36 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev a82990a5-c753-46d0-bd67-ac609e673ca5 does not exist
Oct 11 02:43:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:43:36 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:43:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:43:36 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:43:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:43:36 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:43:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:43:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:43:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:43:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:43:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:43:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:43:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1981: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail; 9.2 KiB/s rd, 852 B/s wr, 13 op/s
Oct 11 02:43:36 compute-0 sudo[461651]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:43:36 compute-0 sudo[461651]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:43:36 compute-0 sudo[461651]: pam_unix(sudo:session): session closed for user root
Oct 11 02:43:37 compute-0 sudo[461692]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:43:37 compute-0 sudo[461692]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:43:37 compute-0 podman[461675]: 2025-10-11 02:43:37.084743091 +0000 UTC m=+0.132857258 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_id=multipathd, io.buildah.version=1.41.3, managed_by=edpm_ansible, tcib_managed=true, container_name=multipathd, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']})
Oct 11 02:43:37 compute-0 sudo[461692]: pam_unix(sudo:session): session closed for user root
Oct 11 02:43:37 compute-0 podman[461676]: 2025-10-11 02:43:37.091959323 +0000 UTC m=+0.131257156 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=iscsid, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_id=iscsid, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:43:37 compute-0 nova_compute[356901]: 2025-10-11 02:43:37.142 2 DEBUG nova.virt.driver [-] Emitting event <LifecycleEvent: 1760150602.140853, 2a3deab0-7a22-486d-86a2-2fc870c8ab2d => Stopped> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:43:37 compute-0 nova_compute[356901]: 2025-10-11 02:43:37.142 2 INFO nova.compute.manager [-] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] VM Stopped (Lifecycle Event)
Oct 11 02:43:37 compute-0 sudo[461739]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:43:37 compute-0 sudo[461739]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:43:37 compute-0 sudo[461739]: pam_unix(sudo:session): session closed for user root
Oct 11 02:43:37 compute-0 nova_compute[356901]: 2025-10-11 02:43:37.173 2 DEBUG nova.compute.manager [None req-e6779a74-b5c2-4818-a04e-23462ef7f588 - - - - - -] [instance: 2a3deab0-7a22-486d-86a2-2fc870c8ab2d] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:43:37 compute-0 nova_compute[356901]: 2025-10-11 02:43:37.195 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:37 compute-0 sudo[461764]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:43:37 compute-0 sudo[461764]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:43:37 compute-0 podman[461830]: 2025-10-11 02:43:37.781346238 +0000 UTC m=+0.088569994 container create 906fbc8993462b1e387ef5d9de379459fa1627bf38491ef2588c3d31358028bc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_greider, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, ceph=True, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507)
Oct 11 02:43:37 compute-0 ceph-mon[191930]: pgmap v1981: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail; 9.2 KiB/s rd, 852 B/s wr, 13 op/s
Oct 11 02:43:37 compute-0 podman[461830]: 2025-10-11 02:43:37.744339789 +0000 UTC m=+0.051563585 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:43:37 compute-0 systemd[1]: Started libpod-conmon-906fbc8993462b1e387ef5d9de379459fa1627bf38491ef2588c3d31358028bc.scope.
Oct 11 02:43:37 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:43:37 compute-0 podman[461830]: 2025-10-11 02:43:37.94370495 +0000 UTC m=+0.250928686 container init 906fbc8993462b1e387ef5d9de379459fa1627bf38491ef2588c3d31358028bc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_greider, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2)
Oct 11 02:43:37 compute-0 podman[461830]: 2025-10-11 02:43:37.96169455 +0000 UTC m=+0.268918296 container start 906fbc8993462b1e387ef5d9de379459fa1627bf38491ef2588c3d31358028bc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_greider, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_REF=reef, OSD_FLAVOR=default)
Oct 11 02:43:37 compute-0 podman[461830]: 2025-10-11 02:43:37.96857016 +0000 UTC m=+0.275793916 container attach 906fbc8993462b1e387ef5d9de379459fa1627bf38491ef2588c3d31358028bc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_greider, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:43:37 compute-0 cool_greider[461846]: 167 167
Oct 11 02:43:37 compute-0 systemd[1]: libpod-906fbc8993462b1e387ef5d9de379459fa1627bf38491ef2588c3d31358028bc.scope: Deactivated successfully.
Oct 11 02:43:37 compute-0 conmon[461846]: conmon 906fbc8993462b1e387e <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-906fbc8993462b1e387ef5d9de379459fa1627bf38491ef2588c3d31358028bc.scope/container/memory.events
Oct 11 02:43:37 compute-0 podman[461830]: 2025-10-11 02:43:37.977490765 +0000 UTC m=+0.284714521 container died 906fbc8993462b1e387ef5d9de379459fa1627bf38491ef2588c3d31358028bc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_greider, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:43:38 compute-0 systemd[1]: var-lib-containers-storage-overlay-78586dd142083abd89738d5c1874ff07643898d2b3d97ebb05d8c4d1c5350389-merged.mount: Deactivated successfully.
Oct 11 02:43:38 compute-0 podman[461830]: 2025-10-11 02:43:38.065062173 +0000 UTC m=+0.372285939 container remove 906fbc8993462b1e387ef5d9de379459fa1627bf38491ef2588c3d31358028bc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_greider, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:43:38 compute-0 systemd[1]: libpod-conmon-906fbc8993462b1e387ef5d9de379459fa1627bf38491ef2588c3d31358028bc.scope: Deactivated successfully.
Oct 11 02:43:38 compute-0 podman[461869]: 2025-10-11 02:43:38.368995104 +0000 UTC m=+0.070820926 container create 0f7eb50721d5f27a3f942b2519c227d08bc1f0db5bdfe504ae6adae8a8e521a2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_kapitsa, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0)
Oct 11 02:43:38 compute-0 podman[461869]: 2025-10-11 02:43:38.339429419 +0000 UTC m=+0.041255241 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:43:38 compute-0 systemd[1]: Started libpod-conmon-0f7eb50721d5f27a3f942b2519c227d08bc1f0db5bdfe504ae6adae8a8e521a2.scope.
Oct 11 02:43:38 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:43:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f6eca451520e92a0aa6cc0fe2ace124a47226948e4bc165039019b2ae2e0c09b/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:43:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f6eca451520e92a0aa6cc0fe2ace124a47226948e4bc165039019b2ae2e0c09b/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:43:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f6eca451520e92a0aa6cc0fe2ace124a47226948e4bc165039019b2ae2e0c09b/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:43:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f6eca451520e92a0aa6cc0fe2ace124a47226948e4bc165039019b2ae2e0c09b/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:43:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f6eca451520e92a0aa6cc0fe2ace124a47226948e4bc165039019b2ae2e0c09b/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:43:38 compute-0 podman[461869]: 2025-10-11 02:43:38.600424478 +0000 UTC m=+0.302250360 container init 0f7eb50721d5f27a3f942b2519c227d08bc1f0db5bdfe504ae6adae8a8e521a2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_kapitsa, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:43:38 compute-0 podman[461869]: 2025-10-11 02:43:38.616335084 +0000 UTC m=+0.318160906 container start 0f7eb50721d5f27a3f942b2519c227d08bc1f0db5bdfe504ae6adae8a8e521a2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_kapitsa, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:43:38 compute-0 podman[461869]: 2025-10-11 02:43:38.62405957 +0000 UTC m=+0.325885392 container attach 0f7eb50721d5f27a3f942b2519c227d08bc1f0db5bdfe504ae6adae8a8e521a2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_kapitsa, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:43:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1982: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:43:38 compute-0 ceph-mon[191930]: pgmap v1982: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:43:39 compute-0 nova_compute[356901]: 2025-10-11 02:43:39.851 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:39 compute-0 crazy_kapitsa[461886]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:43:39 compute-0 crazy_kapitsa[461886]: --> relative data size: 1.0
Oct 11 02:43:39 compute-0 crazy_kapitsa[461886]: --> All data devices are unavailable
Oct 11 02:43:39 compute-0 systemd[1]: libpod-0f7eb50721d5f27a3f942b2519c227d08bc1f0db5bdfe504ae6adae8a8e521a2.scope: Deactivated successfully.
Oct 11 02:43:39 compute-0 systemd[1]: libpod-0f7eb50721d5f27a3f942b2519c227d08bc1f0db5bdfe504ae6adae8a8e521a2.scope: Consumed 1.226s CPU time.
Oct 11 02:43:39 compute-0 podman[461869]: 2025-10-11 02:43:39.938438344 +0000 UTC m=+1.640264146 container died 0f7eb50721d5f27a3f942b2519c227d08bc1f0db5bdfe504ae6adae8a8e521a2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_kapitsa, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:43:39 compute-0 systemd[1]: var-lib-containers-storage-overlay-f6eca451520e92a0aa6cc0fe2ace124a47226948e4bc165039019b2ae2e0c09b-merged.mount: Deactivated successfully.
Oct 11 02:43:40 compute-0 podman[461869]: 2025-10-11 02:43:40.035107567 +0000 UTC m=+1.736933349 container remove 0f7eb50721d5f27a3f942b2519c227d08bc1f0db5bdfe504ae6adae8a8e521a2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_kapitsa, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:43:40 compute-0 sudo[461764]: pam_unix(sudo:session): session closed for user root
Oct 11 02:43:40 compute-0 systemd[1]: libpod-conmon-0f7eb50721d5f27a3f942b2519c227d08bc1f0db5bdfe504ae6adae8a8e521a2.scope: Deactivated successfully.
Oct 11 02:43:40 compute-0 sudo[461928]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:43:40 compute-0 sudo[461928]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:43:40 compute-0 nova_compute[356901]: 2025-10-11 02:43:40.191 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:40 compute-0 sudo[461928]: pam_unix(sudo:session): session closed for user root
Oct 11 02:43:40 compute-0 sudo[461953]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:43:40 compute-0 sudo[461953]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:43:40 compute-0 sudo[461953]: pam_unix(sudo:session): session closed for user root
Oct 11 02:43:40 compute-0 sudo[461978]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:43:40 compute-0 sudo[461978]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:43:40 compute-0 sudo[461978]: pam_unix(sudo:session): session closed for user root
Oct 11 02:43:40 compute-0 sudo[462003]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:43:40 compute-0 sudo[462003]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:43:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1983: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:43:40 compute-0 ceph-mon[191930]: pgmap v1983: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:43:41 compute-0 podman[462065]: 2025-10-11 02:43:41.116541226 +0000 UTC m=+0.061767121 container create 6cd2617591390c786fad8ee06631d32147180d1f7cd9746150bdd8624209a260 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_goldstine, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True)
Oct 11 02:43:41 compute-0 systemd[1]: Started libpod-conmon-6cd2617591390c786fad8ee06631d32147180d1f7cd9746150bdd8624209a260.scope.
Oct 11 02:43:41 compute-0 podman[462065]: 2025-10-11 02:43:41.08417255 +0000 UTC m=+0.029398465 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:43:41 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:43:41 compute-0 podman[462065]: 2025-10-11 02:43:41.248692247 +0000 UTC m=+0.193918172 container init 6cd2617591390c786fad8ee06631d32147180d1f7cd9746150bdd8624209a260 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_goldstine, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:43:41 compute-0 podman[462065]: 2025-10-11 02:43:41.265731281 +0000 UTC m=+0.210957176 container start 6cd2617591390c786fad8ee06631d32147180d1f7cd9746150bdd8624209a260 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_goldstine, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:43:41 compute-0 podman[462065]: 2025-10-11 02:43:41.270748688 +0000 UTC m=+0.215974603 container attach 6cd2617591390c786fad8ee06631d32147180d1f7cd9746150bdd8624209a260 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_goldstine, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, OSD_FLAVOR=default, CEPH_REF=reef, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:43:41 compute-0 friendly_goldstine[462081]: 167 167
Oct 11 02:43:41 compute-0 systemd[1]: libpod-6cd2617591390c786fad8ee06631d32147180d1f7cd9746150bdd8624209a260.scope: Deactivated successfully.
Oct 11 02:43:41 compute-0 podman[462065]: 2025-10-11 02:43:41.278073101 +0000 UTC m=+0.223298996 container died 6cd2617591390c786fad8ee06631d32147180d1f7cd9746150bdd8624209a260 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_goldstine, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:43:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:43:41 compute-0 systemd[1]: var-lib-containers-storage-overlay-25496544331e054043aa83492c619c0729ece3d5a89f4cad37b7389bc35b701e-merged.mount: Deactivated successfully.
Oct 11 02:43:41 compute-0 podman[462065]: 2025-10-11 02:43:41.336990199 +0000 UTC m=+0.282216104 container remove 6cd2617591390c786fad8ee06631d32147180d1f7cd9746150bdd8624209a260 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_goldstine, CEPH_REF=reef, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 02:43:41 compute-0 systemd[1]: libpod-conmon-6cd2617591390c786fad8ee06631d32147180d1f7cd9746150bdd8624209a260.scope: Deactivated successfully.
Oct 11 02:43:41 compute-0 podman[462103]: 2025-10-11 02:43:41.555095486 +0000 UTC m=+0.061921092 container create f33554486f8741156be32e4b72aa912111a2b274976b0ef3becf6190cd9cd212 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_boyd, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.schema-version=1.0)
Oct 11 02:43:41 compute-0 podman[462103]: 2025-10-11 02:43:41.533274977 +0000 UTC m=+0.040100603 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:43:41 compute-0 systemd[1]: Started libpod-conmon-f33554486f8741156be32e4b72aa912111a2b274976b0ef3becf6190cd9cd212.scope.
Oct 11 02:43:41 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:43:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0f1f657e24553e874aa7d930eab69212448a749dcbddd35157ceb5f4997fb6cd/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:43:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0f1f657e24553e874aa7d930eab69212448a749dcbddd35157ceb5f4997fb6cd/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:43:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0f1f657e24553e874aa7d930eab69212448a749dcbddd35157ceb5f4997fb6cd/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:43:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0f1f657e24553e874aa7d930eab69212448a749dcbddd35157ceb5f4997fb6cd/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:43:41 compute-0 podman[462103]: 2025-10-11 02:43:41.70268403 +0000 UTC m=+0.209509656 container init f33554486f8741156be32e4b72aa912111a2b274976b0ef3becf6190cd9cd212 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_boyd, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3)
Oct 11 02:43:41 compute-0 podman[462103]: 2025-10-11 02:43:41.722163971 +0000 UTC m=+0.228989597 container start f33554486f8741156be32e4b72aa912111a2b274976b0ef3becf6190cd9cd212 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_boyd, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:43:41 compute-0 podman[462103]: 2025-10-11 02:43:41.727392679 +0000 UTC m=+0.234218305 container attach f33554486f8741156be32e4b72aa912111a2b274976b0ef3becf6190cd9cd212 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_boyd, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:43:42 compute-0 nova_compute[356901]: 2025-10-11 02:43:42.200 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]: {
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:     "0": [
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:         {
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "devices": [
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "/dev/loop3"
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             ],
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "lv_name": "ceph_lv0",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "lv_size": "21470642176",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "name": "ceph_lv0",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "tags": {
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.cluster_name": "ceph",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.crush_device_class": "",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.encrypted": "0",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.osd_id": "0",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.type": "block",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.vdo": "0"
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             },
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "type": "block",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "vg_name": "ceph_vg0"
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:         }
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:     ],
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:     "1": [
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:         {
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "devices": [
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "/dev/loop4"
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             ],
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "lv_name": "ceph_lv1",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "lv_size": "21470642176",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "name": "ceph_lv1",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "tags": {
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.cluster_name": "ceph",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.crush_device_class": "",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.encrypted": "0",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.osd_id": "1",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.type": "block",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.vdo": "0"
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             },
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "type": "block",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "vg_name": "ceph_vg1"
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:         }
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:     ],
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:     "2": [
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:         {
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "devices": [
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "/dev/loop5"
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             ],
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "lv_name": "ceph_lv2",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "lv_size": "21470642176",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "name": "ceph_lv2",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "tags": {
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.cluster_name": "ceph",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.crush_device_class": "",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.encrypted": "0",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.osd_id": "2",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.type": "block",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:                 "ceph.vdo": "0"
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             },
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "type": "block",
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:             "vg_name": "ceph_vg2"
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:         }
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]:     ]
Oct 11 02:43:42 compute-0 sleepy_boyd[462119]: }
Oct 11 02:43:42 compute-0 systemd[1]: libpod-f33554486f8741156be32e4b72aa912111a2b274976b0ef3becf6190cd9cd212.scope: Deactivated successfully.
Oct 11 02:43:42 compute-0 podman[462103]: 2025-10-11 02:43:42.612997533 +0000 UTC m=+1.119823139 container died f33554486f8741156be32e4b72aa912111a2b274976b0ef3becf6190cd9cd212 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_boyd, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:43:42 compute-0 systemd[1]: var-lib-containers-storage-overlay-0f1f657e24553e874aa7d930eab69212448a749dcbddd35157ceb5f4997fb6cd-merged.mount: Deactivated successfully.
Oct 11 02:43:42 compute-0 podman[462103]: 2025-10-11 02:43:42.688489462 +0000 UTC m=+1.195315068 container remove f33554486f8741156be32e4b72aa912111a2b274976b0ef3becf6190cd9cd212 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_boyd, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, ceph=True)
Oct 11 02:43:42 compute-0 systemd[1]: libpod-conmon-f33554486f8741156be32e4b72aa912111a2b274976b0ef3becf6190cd9cd212.scope: Deactivated successfully.
Oct 11 02:43:42 compute-0 sudo[462003]: pam_unix(sudo:session): session closed for user root
Oct 11 02:43:42 compute-0 sudo[462142]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:43:42 compute-0 sudo[462142]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:43:42 compute-0 sudo[462142]: pam_unix(sudo:session): session closed for user root
Oct 11 02:43:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1984: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:43:42 compute-0 ceph-mon[191930]: pgmap v1984: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:43:42 compute-0 sudo[462167]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:43:42 compute-0 sudo[462167]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:43:42 compute-0 sudo[462167]: pam_unix(sudo:session): session closed for user root
Oct 11 02:43:43 compute-0 sudo[462192]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:43:43 compute-0 sudo[462192]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:43:43 compute-0 sudo[462192]: pam_unix(sudo:session): session closed for user root
Oct 11 02:43:43 compute-0 sudo[462217]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:43:43 compute-0 sudo[462217]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:43:43 compute-0 nova_compute[356901]: 2025-10-11 02:43:43.689 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:43 compute-0 podman[462280]: 2025-10-11 02:43:43.819756034 +0000 UTC m=+0.068742192 container create 4aa721959bb1ceabd1be4315cfa5d4be1beac06865d1e9ea739c5a34c82850f7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_ramanujan, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507)
Oct 11 02:43:43 compute-0 podman[462280]: 2025-10-11 02:43:43.788930489 +0000 UTC m=+0.037916637 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:43:43 compute-0 systemd[1]: Started libpod-conmon-4aa721959bb1ceabd1be4315cfa5d4be1beac06865d1e9ea739c5a34c82850f7.scope.
Oct 11 02:43:43 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:43:43 compute-0 podman[462280]: 2025-10-11 02:43:43.975761859 +0000 UTC m=+0.224748017 container init 4aa721959bb1ceabd1be4315cfa5d4be1beac06865d1e9ea739c5a34c82850f7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_ramanujan, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:43:43 compute-0 podman[462280]: 2025-10-11 02:43:43.988124999 +0000 UTC m=+0.237111147 container start 4aa721959bb1ceabd1be4315cfa5d4be1beac06865d1e9ea739c5a34c82850f7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_ramanujan, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3)
Oct 11 02:43:43 compute-0 podman[462280]: 2025-10-11 02:43:43.99245481 +0000 UTC m=+0.241440978 container attach 4aa721959bb1ceabd1be4315cfa5d4be1beac06865d1e9ea739c5a34c82850f7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_ramanujan, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 02:43:43 compute-0 silly_ramanujan[462294]: 167 167
Oct 11 02:43:43 compute-0 systemd[1]: libpod-4aa721959bb1ceabd1be4315cfa5d4be1beac06865d1e9ea739c5a34c82850f7.scope: Deactivated successfully.
Oct 11 02:43:44 compute-0 podman[462299]: 2025-10-11 02:43:44.086096472 +0000 UTC m=+0.072409628 container died 4aa721959bb1ceabd1be4315cfa5d4be1beac06865d1e9ea739c5a34c82850f7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_ramanujan, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=reef)
Oct 11 02:43:44 compute-0 systemd[1]: var-lib-containers-storage-overlay-777c863e0ae44722da039645b6af24587b11b4416b71d7bbf3bec526b34f1da5-merged.mount: Deactivated successfully.
Oct 11 02:43:44 compute-0 podman[462299]: 2025-10-11 02:43:44.154843672 +0000 UTC m=+0.141156808 container remove 4aa721959bb1ceabd1be4315cfa5d4be1beac06865d1e9ea739c5a34c82850f7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_ramanujan, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.license=GPLv2)
Oct 11 02:43:44 compute-0 systemd[1]: libpod-conmon-4aa721959bb1ceabd1be4315cfa5d4be1beac06865d1e9ea739c5a34c82850f7.scope: Deactivated successfully.
Oct 11 02:43:44 compute-0 podman[462320]: 2025-10-11 02:43:44.443438522 +0000 UTC m=+0.096335342 container create 043ecd2bb5755c5dcd1e29b50afdcdec39608a4221a45a1b1a09cba054a96ac3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=interesting_keller, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:43:44 compute-0 podman[462320]: 2025-10-11 02:43:44.413585484 +0000 UTC m=+0.066482324 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:43:44 compute-0 systemd[1]: Started libpod-conmon-043ecd2bb5755c5dcd1e29b50afdcdec39608a4221a45a1b1a09cba054a96ac3.scope.
Oct 11 02:43:44 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:43:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/77d3392b0efd9e653428ade52e6003ecd8ad43a8f8330b0599fb3bbb6f17ca2a/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:43:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/77d3392b0efd9e653428ade52e6003ecd8ad43a8f8330b0599fb3bbb6f17ca2a/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:43:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/77d3392b0efd9e653428ade52e6003ecd8ad43a8f8330b0599fb3bbb6f17ca2a/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:43:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/77d3392b0efd9e653428ade52e6003ecd8ad43a8f8330b0599fb3bbb6f17ca2a/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:43:44 compute-0 podman[462320]: 2025-10-11 02:43:44.60399823 +0000 UTC m=+0.256895050 container init 043ecd2bb5755c5dcd1e29b50afdcdec39608a4221a45a1b1a09cba054a96ac3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=interesting_keller, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 02:43:44 compute-0 podman[462320]: 2025-10-11 02:43:44.615661295 +0000 UTC m=+0.268558115 container start 043ecd2bb5755c5dcd1e29b50afdcdec39608a4221a45a1b1a09cba054a96ac3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=interesting_keller, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 02:43:44 compute-0 podman[462320]: 2025-10-11 02:43:44.620351819 +0000 UTC m=+0.273248659 container attach 043ecd2bb5755c5dcd1e29b50afdcdec39608a4221a45a1b1a09cba054a96ac3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=interesting_keller, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS)
Oct 11 02:43:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1985: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:43:44 compute-0 ceph-mon[191930]: pgmap v1985: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:43:45 compute-0 nova_compute[356901]: 2025-10-11 02:43:45.195 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:45 compute-0 interesting_keller[462335]: {
Oct 11 02:43:45 compute-0 interesting_keller[462335]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:43:45 compute-0 interesting_keller[462335]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:43:45 compute-0 interesting_keller[462335]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:43:45 compute-0 interesting_keller[462335]:         "osd_id": 1,
Oct 11 02:43:45 compute-0 interesting_keller[462335]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:43:45 compute-0 interesting_keller[462335]:         "type": "bluestore"
Oct 11 02:43:45 compute-0 interesting_keller[462335]:     },
Oct 11 02:43:45 compute-0 interesting_keller[462335]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:43:45 compute-0 interesting_keller[462335]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:43:45 compute-0 interesting_keller[462335]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:43:45 compute-0 interesting_keller[462335]:         "osd_id": 2,
Oct 11 02:43:45 compute-0 interesting_keller[462335]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:43:45 compute-0 interesting_keller[462335]:         "type": "bluestore"
Oct 11 02:43:45 compute-0 interesting_keller[462335]:     },
Oct 11 02:43:45 compute-0 interesting_keller[462335]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:43:45 compute-0 interesting_keller[462335]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:43:45 compute-0 interesting_keller[462335]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:43:45 compute-0 interesting_keller[462335]:         "osd_id": 0,
Oct 11 02:43:45 compute-0 interesting_keller[462335]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:43:45 compute-0 interesting_keller[462335]:         "type": "bluestore"
Oct 11 02:43:45 compute-0 interesting_keller[462335]:     }
Oct 11 02:43:45 compute-0 interesting_keller[462335]: }
Oct 11 02:43:45 compute-0 systemd[1]: libpod-043ecd2bb5755c5dcd1e29b50afdcdec39608a4221a45a1b1a09cba054a96ac3.scope: Deactivated successfully.
Oct 11 02:43:45 compute-0 podman[462320]: 2025-10-11 02:43:45.885550634 +0000 UTC m=+1.538447454 container died 043ecd2bb5755c5dcd1e29b50afdcdec39608a4221a45a1b1a09cba054a96ac3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=interesting_keller, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 02:43:45 compute-0 systemd[1]: libpod-043ecd2bb5755c5dcd1e29b50afdcdec39608a4221a45a1b1a09cba054a96ac3.scope: Consumed 1.246s CPU time.
Oct 11 02:43:45 compute-0 systemd[1]: var-lib-containers-storage-overlay-77d3392b0efd9e653428ade52e6003ecd8ad43a8f8330b0599fb3bbb6f17ca2a-merged.mount: Deactivated successfully.
Oct 11 02:43:45 compute-0 podman[462320]: 2025-10-11 02:43:45.989387829 +0000 UTC m=+1.642284649 container remove 043ecd2bb5755c5dcd1e29b50afdcdec39608a4221a45a1b1a09cba054a96ac3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=interesting_keller, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 02:43:46 compute-0 systemd[1]: libpod-conmon-043ecd2bb5755c5dcd1e29b50afdcdec39608a4221a45a1b1a09cba054a96ac3.scope: Deactivated successfully.
Oct 11 02:43:46 compute-0 sudo[462217]: pam_unix(sudo:session): session closed for user root
Oct 11 02:43:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:43:46 compute-0 podman[462370]: 2025-10-11 02:43:46.047568662 +0000 UTC m=+0.115106358 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_id=edpm, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi)
Oct 11 02:43:46 compute-0 podman[462377]: 2025-10-11 02:43:46.049322375 +0000 UTC m=+0.109104725 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, release=1755695350, version=9.6, architecture=x86_64, url=https://catalog.redhat.com/en/search?searchType=containers, distribution-scope=public, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vendor=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, build-date=2025-08-20T13:12:41, com.redhat.component=ubi9-minimal-container, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.buildah.version=1.33.7, io.openshift.expose-services=, maintainer=Red Hat, Inc., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, vcs-type=git, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., container_name=openstack_network_exporter, io.openshift.tags=minimal rhel9, managed_by=edpm_ansible)
Oct 11 02:43:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:43:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:43:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:43:46 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev a4c8f98b-5d89-4c27-8127-fe6f7b691c9b does not exist
Oct 11 02:43:46 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev d91b46e2-879f-47cd-b4d7-dda2691fd49c does not exist
Oct 11 02:43:46 compute-0 podman[462378]: 2025-10-11 02:43:46.073957784 +0000 UTC m=+0.138749390 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:43:46 compute-0 sudo[462438]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:43:46 compute-0 sudo[462438]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:43:46 compute-0 sudo[462438]: pam_unix(sudo:session): session closed for user root
Oct 11 02:43:46 compute-0 sudo[462463]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:43:46 compute-0 sudo[462463]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:43:46 compute-0 sudo[462463]: pam_unix(sudo:session): session closed for user root
Oct 11 02:43:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:43:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1986: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:43:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:43:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:43:47 compute-0 ceph-mon[191930]: pgmap v1986: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:43:47 compute-0 nova_compute[356901]: 2025-10-11 02:43:47.204 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:47 compute-0 ovn_controller[88370]: 2025-10-11T02:43:47Z|00180|binding|INFO|Releasing lport bd6ddb48-868e-41a0-8ff2-0f3a1a9b4d81 from this chassis (sb_readonly=0)
Oct 11 02:43:47 compute-0 ovn_controller[88370]: 2025-10-11T02:43:47Z|00181|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:43:47 compute-0 nova_compute[356901]: 2025-10-11 02:43:47.379 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1987: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:43:48 compute-0 ceph-mon[191930]: pgmap v1987: 321 pgs: 321 active+clean; 185 MiB data, 350 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:43:50 compute-0 nova_compute[356901]: 2025-10-11 02:43:50.197 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1988: 321 pgs: 321 active+clean; 197 MiB data, 367 MiB used, 60 GiB / 60 GiB avail; 30 KiB/s rd, 1.4 MiB/s wr, 19 op/s
Oct 11 02:43:50 compute-0 ceph-mon[191930]: pgmap v1988: 321 pgs: 321 active+clean; 197 MiB data, 367 MiB used, 60 GiB / 60 GiB avail; 30 KiB/s rd, 1.4 MiB/s wr, 19 op/s
Oct 11 02:43:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:43:51 compute-0 ovn_controller[88370]: 2025-10-11T02:43:51Z|00023|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:2c:af:96 10.100.3.53
Oct 11 02:43:51 compute-0 ovn_controller[88370]: 2025-10-11T02:43:51Z|00024|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:2c:af:96 10.100.3.53
Oct 11 02:43:52 compute-0 nova_compute[356901]: 2025-10-11 02:43:52.213 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1989: 321 pgs: 321 active+clean; 204 MiB data, 370 MiB used, 60 GiB / 60 GiB avail; 61 KiB/s rd, 1.6 MiB/s wr, 26 op/s
Oct 11 02:43:52 compute-0 ceph-mon[191930]: pgmap v1989: 321 pgs: 321 active+clean; 204 MiB data, 370 MiB used, 60 GiB / 60 GiB avail; 61 KiB/s rd, 1.6 MiB/s wr, 26 op/s
Oct 11 02:43:53 compute-0 podman[462490]: 2025-10-11 02:43:53.205970206 +0000 UTC m=+0.103667625 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release-0.7.12=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, com.redhat.component=ubi9-container, io.buildah.version=1.29.0, managed_by=edpm_ansible, io.k8s.display-name=Red Hat Universal Base Image 9, summary=Provides the latest release of Red Hat Universal Base Image 9., build-date=2024-09-18T21:23:30, config_id=edpm, io.openshift.expose-services=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, io.openshift.tags=base rhel9, architecture=x86_64, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, name=ubi9, vcs-type=git, maintainer=Red Hat, Inc., container_name=kepler, release=1214.1726694543, vendor=Red Hat, Inc., version=9.4)
Oct 11 02:43:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:54.873 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:43:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:54.874 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:43:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:43:54.874 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:43:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1990: 321 pgs: 321 active+clean; 216 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 162 KiB/s rd, 2.1 MiB/s wr, 46 op/s
Oct 11 02:43:55 compute-0 nova_compute[356901]: 2025-10-11 02:43:55.201 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:55 compute-0 ceph-mon[191930]: pgmap v1990: 321 pgs: 321 active+clean; 216 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 162 KiB/s rd, 2.1 MiB/s wr, 46 op/s
Oct 11 02:43:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:43:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:43:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:43:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:43:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:43:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:43:56
Oct 11 02:43:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:43:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:43:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.meta', '.rgw.root', 'default.rgw.control', 'images', 'cephfs.cephfs.meta', 'volumes', 'cephfs.cephfs.data', 'backups', 'vms', 'default.rgw.log', '.mgr']
Oct 11 02:43:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:43:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:43:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:43:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1991: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 174 KiB/s rd, 2.1 MiB/s wr, 54 op/s
Oct 11 02:43:57 compute-0 nova_compute[356901]: 2025-10-11 02:43:57.218 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:43:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:43:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:43:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:43:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:43:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:43:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:43:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:43:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:43:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:43:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:43:57 compute-0 ceph-mon[191930]: pgmap v1991: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 174 KiB/s rd, 2.1 MiB/s wr, 54 op/s
Oct 11 02:43:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1992: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 174 KiB/s rd, 2.1 MiB/s wr, 54 op/s
Oct 11 02:43:59 compute-0 podman[157119]: time="2025-10-11T02:43:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:43:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:43:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:43:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:43:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9548 "" "Go-http-client/1.1"
Oct 11 02:43:59 compute-0 ceph-mon[191930]: pgmap v1992: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 174 KiB/s rd, 2.1 MiB/s wr, 54 op/s
Oct 11 02:44:00 compute-0 nova_compute[356901]: 2025-10-11 02:44:00.204 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:00 compute-0 podman[462509]: 2025-10-11 02:44:00.233436677 +0000 UTC m=+0.124090734 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:44:00 compute-0 podman[462511]: 2025-10-11 02:44:00.239044068 +0000 UTC m=+0.123286789 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm)
Oct 11 02:44:00 compute-0 podman[462512]: 2025-10-11 02:44:00.25175563 +0000 UTC m=+0.117577456 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:44:00 compute-0 podman[462510]: 2025-10-11 02:44:00.269458249 +0000 UTC m=+0.144909516 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:44:00 compute-0 nova_compute[356901]: 2025-10-11 02:44:00.755 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:44:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1993: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 174 KiB/s rd, 2.1 MiB/s wr, 54 op/s
Oct 11 02:44:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:44:01 compute-0 openstack_network_exporter[374316]: ERROR   02:44:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:44:01 compute-0 openstack_network_exporter[374316]: ERROR   02:44:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:44:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:44:01 compute-0 openstack_network_exporter[374316]: ERROR   02:44:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:44:01 compute-0 openstack_network_exporter[374316]: ERROR   02:44:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:44:01 compute-0 openstack_network_exporter[374316]: ERROR   02:44:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:44:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:44:01 compute-0 ceph-mon[191930]: pgmap v1993: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 174 KiB/s rd, 2.1 MiB/s wr, 54 op/s
Oct 11 02:44:02 compute-0 nova_compute[356901]: 2025-10-11 02:44:02.221 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1994: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 144 KiB/s rd, 733 KiB/s wr, 34 op/s
Oct 11 02:44:04 compute-0 ceph-mon[191930]: pgmap v1994: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 144 KiB/s rd, 733 KiB/s wr, 34 op/s
Oct 11 02:44:04 compute-0 nova_compute[356901]: 2025-10-11 02:44:04.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:44:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1995: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 113 KiB/s rd, 531 KiB/s wr, 28 op/s
Oct 11 02:44:05 compute-0 nova_compute[356901]: 2025-10-11 02:44:05.207 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:06 compute-0 ceph-mon[191930]: pgmap v1995: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 113 KiB/s rd, 531 KiB/s wr, 28 op/s
Oct 11 02:44:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:44:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1996: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 11 KiB/s rd, 62 KiB/s wr, 8 op/s
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0013086967669466692 of space, bias 1.0, pg target 0.3926090300840008 quantized to 32 (current 32)
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00125203744627857 of space, bias 1.0, pg target 0.375611233883571 quantized to 32 (current 32)
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:44:07 compute-0 nova_compute[356901]: 2025-10-11 02:44:07.241 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:44:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:44:08 compute-0 ceph-mon[191930]: pgmap v1996: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 11 KiB/s rd, 62 KiB/s wr, 8 op/s
Oct 11 02:44:08 compute-0 podman[462593]: 2025-10-11 02:44:08.19148669 +0000 UTC m=+0.090720870 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, config_id=multipathd, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3, tcib_managed=true, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:44:08 compute-0 podman[462594]: 2025-10-11 02:44:08.230836126 +0000 UTC m=+0.120301747 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, config_id=iscsid, org.label-schema.schema-version=1.0, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']})
Oct 11 02:44:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1997: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 4.0 KiB/s wr, 0 op/s
Oct 11 02:44:09 compute-0 nova_compute[356901]: 2025-10-11 02:44:09.898 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:44:10 compute-0 ceph-mon[191930]: pgmap v1997: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 4.0 KiB/s wr, 0 op/s
Oct 11 02:44:10 compute-0 nova_compute[356901]: 2025-10-11 02:44:10.209 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1998: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 5.0 KiB/s wr, 0 op/s
Oct 11 02:44:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:44:12 compute-0 ceph-mon[191930]: pgmap v1998: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 5.0 KiB/s wr, 0 op/s
Oct 11 02:44:12 compute-0 nova_compute[356901]: 2025-10-11 02:44:12.244 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:12 compute-0 nova_compute[356901]: 2025-10-11 02:44:12.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:44:12 compute-0 nova_compute[356901]: 2025-10-11 02:44:12.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:44:12 compute-0 nova_compute[356901]: 2025-10-11 02:44:12.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:44:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v1999: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:44:12 compute-0 nova_compute[356901]: 2025-10-11 02:44:12.943 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944
Oct 11 02:44:12 compute-0 nova_compute[356901]: 2025-10-11 02:44:12.944 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:44:12 compute-0 nova_compute[356901]: 2025-10-11 02:44:12.945 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:44:12 compute-0 nova_compute[356901]: 2025-10-11 02:44:12.946 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:44:14 compute-0 ceph-mon[191930]: pgmap v1999: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:44:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2000: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:44:15 compute-0 nova_compute[356901]: 2025-10-11 02:44:15.213 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:16 compute-0 ceph-mon[191930]: pgmap v2000: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:44:16 compute-0 podman[462638]: 2025-10-11 02:44:16.208349854 +0000 UTC m=+0.088157992 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:44:16 compute-0 podman[462633]: 2025-10-11 02:44:16.222474074 +0000 UTC m=+0.098038787 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vendor=Red Hat, Inc., container_name=openstack_network_exporter, distribution-scope=public, name=ubi9-minimal, url=https://catalog.redhat.com/en/search?searchType=containers, build-date=2025-08-20T13:12:41, version=9.6, com.redhat.component=ubi9-minimal-container, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, architecture=x86_64, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, io.buildah.version=1.33.7, config_id=edpm, io.openshift.expose-services=, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, release=1755695350)
Oct 11 02:44:16 compute-0 podman[462632]: 2025-10-11 02:44:16.223154912 +0000 UTC m=+0.116295555 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, managed_by=edpm_ansible)
Oct 11 02:44:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:44:16 compute-0 nova_compute[356901]: 2025-10-11 02:44:16.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:44:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2001: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:44:16 compute-0 nova_compute[356901]: 2025-10-11 02:44:16.932 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:44:16 compute-0 nova_compute[356901]: 2025-10-11 02:44:16.932 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:44:16 compute-0 nova_compute[356901]: 2025-10-11 02:44:16.933 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:44:16 compute-0 nova_compute[356901]: 2025-10-11 02:44:16.933 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:44:16 compute-0 nova_compute[356901]: 2025-10-11 02:44:16.933 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:44:17 compute-0 nova_compute[356901]: 2025-10-11 02:44:17.248 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:44:17 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/345237278' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:44:17 compute-0 nova_compute[356901]: 2025-10-11 02:44:17.468 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.535s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:44:17 compute-0 nova_compute[356901]: 2025-10-11 02:44:17.567 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:44:17 compute-0 nova_compute[356901]: 2025-10-11 02:44:17.567 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:44:17 compute-0 nova_compute[356901]: 2025-10-11 02:44:17.579 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:44:17 compute-0 nova_compute[356901]: 2025-10-11 02:44:17.579 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:44:17 compute-0 nova_compute[356901]: 2025-10-11 02:44:17.580 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:44:18 compute-0 nova_compute[356901]: 2025-10-11 02:44:18.019 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:44:18 compute-0 nova_compute[356901]: 2025-10-11 02:44:18.020 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3435MB free_disk=59.90976333618164GB free_vcpus=6 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:44:18 compute-0 nova_compute[356901]: 2025-10-11 02:44:18.020 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:44:18 compute-0 nova_compute[356901]: 2025-10-11 02:44:18.020 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:44:18 compute-0 ovn_controller[88370]: 2025-10-11T02:44:18Z|00182|memory_trim|INFO|Detected inactivity (last active 30018 ms ago): trimming memory
Oct 11 02:44:18 compute-0 ceph-mon[191930]: pgmap v2001: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:44:18 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/345237278' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:44:18 compute-0 nova_compute[356901]: 2025-10-11 02:44:18.110 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:44:18 compute-0 nova_compute[356901]: 2025-10-11 02:44:18.110 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 8422017b-c868-4ba2-ab1f-61d3668ca145 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:44:18 compute-0 nova_compute[356901]: 2025-10-11 02:44:18.111 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 2 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:44:18 compute-0 nova_compute[356901]: 2025-10-11 02:44:18.111 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1152MB phys_disk=59GB used_disk=3GB total_vcpus=8 used_vcpus=2 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:44:18 compute-0 nova_compute[356901]: 2025-10-11 02:44:18.137 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing inventories for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:804
Oct 11 02:44:18 compute-0 nova_compute[356901]: 2025-10-11 02:44:18.154 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating ProviderTree inventory for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 from _refresh_and_get_inventory using data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} _refresh_and_get_inventory /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:768
Oct 11 02:44:18 compute-0 nova_compute[356901]: 2025-10-11 02:44:18.154 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating inventory in ProviderTree for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 with inventory: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:176
Oct 11 02:44:18 compute-0 nova_compute[356901]: 2025-10-11 02:44:18.164 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing aggregate associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, aggregates: None _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:813
Oct 11 02:44:18 compute-0 nova_compute[356901]: 2025-10-11 02:44:18.187 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing trait associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, traits: COMPUTE_VOLUME_EXTEND,COMPUTE_NET_VIF_MODEL_VMXNET3,HW_CPU_X86_SSSE3,COMPUTE_RESCUE_BFV,COMPUTE_SOCKET_PCI_NUMA_AFFINITY,COMPUTE_NODE,HW_CPU_X86_SVM,COMPUTE_STORAGE_BUS_SCSI,HW_CPU_X86_FMA3,COMPUTE_GRAPHICS_MODEL_NONE,COMPUTE_NET_VIF_MODEL_RTL8139,HW_CPU_X86_SSE4A,COMPUTE_IMAGE_TYPE_QCOW2,HW_CPU_X86_BMI2,HW_CPU_X86_SSE42,HW_CPU_X86_AVX2,COMPUTE_IMAGE_TYPE_RAW,COMPUTE_VIOMMU_MODEL_VIRTIO,HW_CPU_X86_AESNI,COMPUTE_STORAGE_BUS_FDC,COMPUTE_GRAPHICS_MODEL_VIRTIO,HW_CPU_X86_AMD_SVM,COMPUTE_NET_VIF_MODEL_NE2K_PCI,COMPUTE_ACCELERATORS,HW_CPU_X86_SSE2,COMPUTE_GRAPHICS_MODEL_VGA,HW_CPU_X86_ABM,HW_CPU_X86_AVX,COMPUTE_NET_VIF_MODEL_E1000,COMPUTE_STORAGE_BUS_USB,COMPUTE_NET_ATTACH_INTERFACE,HW_CPU_X86_MMX,COMPUTE_SECURITY_TPM_2_0,COMPUTE_IMAGE_TYPE_ISO,HW_CPU_X86_SSE41,COMPUTE_IMAGE_TYPE_AKI,COMPUTE_IMAGE_TYPE_AMI,COMPUTE_NET_ATTACH_INTERFACE_WITH_TAG,COMPUTE_DEVICE_TAGGING,COMPUTE_SECURITY_UEFI_SECURE_BOOT,COMPUTE_TRUSTED_CERTS,COMPUTE_NET_VIF_MODEL_VIRTIO,COMPUTE_VIOMMU_MODEL_INTEL,COMPUTE_STORAGE_BUS_SATA,HW_CPU_X86_SSE,COMPUTE_STORAGE_BUS_VIRTIO,COMPUTE_NET_VIF_MODEL_PCNET,COMPUTE_GRAPHICS_MODEL_CIRRUS,HW_CPU_X86_SHA,HW_CPU_X86_BMI,COMPUTE_NET_VIF_MODEL_E1000E,COMPUTE_NET_VIF_MODEL_SPAPR_VLAN,COMPUTE_VOLUME_ATTACH_WITH_TAG,COMPUTE_GRAPHICS_MODEL_BOCHS,COMPUTE_VIOMMU_MODEL_AUTO,COMPUTE_IMAGE_TYPE_ARI,HW_CPU_X86_CLMUL,COMPUTE_STORAGE_BUS_IDE,COMPUTE_VOLUME_MULTI_ATTACH,HW_CPU_X86_F16C,COMPUTE_SECURITY_TPM_1_2 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:825
Oct 11 02:44:18 compute-0 nova_compute[356901]: 2025-10-11 02:44:18.255 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:44:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:44:18 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2657292003' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:44:18 compute-0 nova_compute[356901]: 2025-10-11 02:44:18.765 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.510s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:44:18 compute-0 nova_compute[356901]: 2025-10-11 02:44:18.777 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:44:18 compute-0 nova_compute[356901]: 2025-10-11 02:44:18.798 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:44:18 compute-0 nova_compute[356901]: 2025-10-11 02:44:18.835 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:44:18 compute-0 nova_compute[356901]: 2025-10-11 02:44:18.836 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.816s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:44:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2002: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:44:19 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2657292003' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:44:19 compute-0 nova_compute[356901]: 2025-10-11 02:44:19.839 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:44:19 compute-0 nova_compute[356901]: 2025-10-11 02:44:19.878 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:44:20 compute-0 ceph-mon[191930]: pgmap v2002: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:44:20 compute-0 nova_compute[356901]: 2025-10-11 02:44:20.217 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2003: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:44:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:44:22 compute-0 ceph-mon[191930]: pgmap v2003: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:44:22 compute-0 nova_compute[356901]: 2025-10-11 02:44:22.252 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2004: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 85 B/s wr, 0 op/s
Oct 11 02:44:24 compute-0 ceph-mon[191930]: pgmap v2004: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 85 B/s wr, 0 op/s
Oct 11 02:44:24 compute-0 podman[462740]: 2025-10-11 02:44:24.257451969 +0000 UTC m=+0.142836929 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, release=1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, managed_by=edpm_ansible, name=ubi9, vcs-type=git, vendor=Red Hat, Inc., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, architecture=x86_64, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release-0.7.12=, version=9.4, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9, build-date=2024-09-18T21:23:30, container_name=kepler, io.buildah.version=1.29.0, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, maintainer=Red Hat, Inc., com.redhat.component=ubi9-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.openshift.tags=base rhel9, config_id=edpm)
Oct 11 02:44:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2005: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 85 B/s wr, 0 op/s
Oct 11 02:44:25 compute-0 nova_compute[356901]: 2025-10-11 02:44:25.223 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:26 compute-0 ceph-mon[191930]: pgmap v2005: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 85 B/s wr, 0 op/s
Oct 11 02:44:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:44:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:44:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:44:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:44:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:44:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:44:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:44:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2006: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 767 B/s wr, 0 op/s
Oct 11 02:44:27 compute-0 nova_compute[356901]: 2025-10-11 02:44:27.257 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:44:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3130484644' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:44:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:44:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3130484644' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:44:28 compute-0 ceph-mon[191930]: pgmap v2006: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 767 B/s wr, 0 op/s
Oct 11 02:44:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3130484644' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:44:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3130484644' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:44:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2007: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 767 B/s wr, 0 op/s
Oct 11 02:44:29 compute-0 podman[157119]: time="2025-10-11T02:44:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:44:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:44:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:44:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:44:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9535 "" "Go-http-client/1.1"
Oct 11 02:44:30 compute-0 ceph-mon[191930]: pgmap v2007: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 767 B/s wr, 0 op/s
Oct 11 02:44:30 compute-0 nova_compute[356901]: 2025-10-11 02:44:30.227 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2008: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 767 B/s wr, 0 op/s
Oct 11 02:44:31 compute-0 podman[462760]: 2025-10-11 02:44:31.237887248 +0000 UTC m=+0.118916961 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:44:31 compute-0 podman[462763]: 2025-10-11 02:44:31.255428129 +0000 UTC m=+0.114895730 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009)
Oct 11 02:44:31 compute-0 podman[462762]: 2025-10-11 02:44:31.27578807 +0000 UTC m=+0.145767466 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251007, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:44:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:44:31 compute-0 podman[462761]: 2025-10-11 02:44:31.320888283 +0000 UTC m=+0.190116973 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, container_name=ovn_controller, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, io.buildah.version=1.41.3)
Oct 11 02:44:31 compute-0 openstack_network_exporter[374316]: ERROR   02:44:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:44:31 compute-0 openstack_network_exporter[374316]: ERROR   02:44:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:44:31 compute-0 openstack_network_exporter[374316]: ERROR   02:44:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:44:31 compute-0 openstack_network_exporter[374316]: ERROR   02:44:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:44:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:44:31 compute-0 openstack_network_exporter[374316]: ERROR   02:44:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:44:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:44:32 compute-0 ceph-mon[191930]: pgmap v2008: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 767 B/s wr, 0 op/s
Oct 11 02:44:32 compute-0 nova_compute[356901]: 2025-10-11 02:44:32.261 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2009: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 767 B/s wr, 0 op/s
Oct 11 02:44:34 compute-0 ceph-mon[191930]: pgmap v2009: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 767 B/s wr, 0 op/s
Oct 11 02:44:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2010: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 682 B/s wr, 0 op/s
Oct 11 02:44:35 compute-0 nova_compute[356901]: 2025-10-11 02:44:35.228 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:36 compute-0 ceph-mon[191930]: pgmap v2010: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 682 B/s wr, 0 op/s
Oct 11 02:44:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:44:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2011: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 682 B/s wr, 0 op/s
Oct 11 02:44:37 compute-0 nova_compute[356901]: 2025-10-11 02:44:37.266 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:38 compute-0 ceph-mon[191930]: pgmap v2011: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 682 B/s wr, 0 op/s
Oct 11 02:44:38 compute-0 podman[462844]: 2025-10-11 02:44:38.381429101 +0000 UTC m=+0.099065264 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=multipathd, managed_by=edpm_ansible, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0)
Oct 11 02:44:38 compute-0 podman[462845]: 2025-10-11 02:44:38.399053379 +0000 UTC m=+0.112995040 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_id=iscsid, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=iscsid, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 02:44:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2012: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:44:40 compute-0 nova_compute[356901]: 2025-10-11 02:44:40.230 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:40 compute-0 ceph-mon[191930]: pgmap v2012: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:44:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2013: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:44:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:44:42 compute-0 ceph-mon[191930]: pgmap v2013: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:44:42 compute-0 nova_compute[356901]: 2025-10-11 02:44:42.272 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2014: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:44:44 compute-0 ceph-mon[191930]: pgmap v2014: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:44:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2015: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:44:45 compute-0 nova_compute[356901]: 2025-10-11 02:44:45.233 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:46 compute-0 ceph-mon[191930]: pgmap v2015: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:44:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:44:46 compute-0 sudo[462883]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:44:46 compute-0 sudo[462883]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:46 compute-0 sudo[462883]: pam_unix(sudo:session): session closed for user root
Oct 11 02:44:46 compute-0 sudo[462926]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:44:46 compute-0 sudo[462926]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:46 compute-0 podman[462908]: 2025-10-11 02:44:46.563518849 +0000 UTC m=+0.117567485 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.33.7, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., build-date=2025-08-20T13:12:41, com.redhat.component=ubi9-minimal-container, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., release=1755695350, version=9.6, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, managed_by=edpm_ansible, vcs-type=git, container_name=openstack_network_exporter, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, architecture=x86_64, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, url=https://catalog.redhat.com/en/search?searchType=containers, distribution-scope=public, io.openshift.expose-services=, vendor=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b)
Oct 11 02:44:46 compute-0 sudo[462926]: pam_unix(sudo:session): session closed for user root
Oct 11 02:44:46 compute-0 podman[462907]: 2025-10-11 02:44:46.569954933 +0000 UTC m=+0.132935834 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, config_id=edpm, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:44:46 compute-0 podman[462909]: 2025-10-11 02:44:46.5707006 +0000 UTC m=+0.111840927 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:44:46 compute-0 sudo[462992]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:44:46 compute-0 sudo[462992]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:46 compute-0 sudo[462992]: pam_unix(sudo:session): session closed for user root
Oct 11 02:44:46 compute-0 sudo[463018]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:44:46 compute-0 sudo[463018]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2016: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:44:47 compute-0 nova_compute[356901]: 2025-10-11 02:44:47.276 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #96. Immutable memtables: 0.
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:44:47.308488) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 55] Flushing memtable with next log file: 96
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150687308520, "job": 55, "event": "flush_started", "num_memtables": 1, "num_entries": 1861, "num_deletes": 251, "total_data_size": 3034959, "memory_usage": 3084744, "flush_reason": "Manual Compaction"}
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 55] Level-0 flush table #97: started
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150687328145, "cf_name": "default", "job": 55, "event": "table_file_creation", "file_number": 97, "file_size": 2983055, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 39834, "largest_seqno": 41694, "table_properties": {"data_size": 2974517, "index_size": 5288, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 2181, "raw_key_size": 17280, "raw_average_key_size": 20, "raw_value_size": 2957548, "raw_average_value_size": 3431, "num_data_blocks": 235, "num_entries": 862, "num_filter_entries": 862, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760150487, "oldest_key_time": 1760150487, "file_creation_time": 1760150687, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 97, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 55] Flush lasted 19715 microseconds, and 9042 cpu microseconds.
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:44:47.328201) [db/flush_job.cc:967] [default] [JOB 55] Level-0 flush table #97: 2983055 bytes OK
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:44:47.328222) [db/memtable_list.cc:519] [default] Level-0 commit table #97 started
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:44:47.329909) [db/memtable_list.cc:722] [default] Level-0 commit table #97: memtable #1 done
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:44:47.329924) EVENT_LOG_v1 {"time_micros": 1760150687329920, "job": 55, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:44:47.329942) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 55] Try to delete WAL files size 3027093, prev total WAL file size 3027093, number of live WAL files 2.
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000093.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:44:47.331323) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F730033373635' seq:72057594037927935, type:22 .. '7061786F730034303137' seq:0, type:0; will stop at (end)
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 56] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 55 Base level 0, inputs: [97(2913KB)], [95(6372KB)]
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150687331375, "job": 56, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [97], "files_L6": [95], "score": -1, "input_data_size": 9508484, "oldest_snapshot_seqno": -1}
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 56] Generated table #98: 5758 keys, 7769343 bytes, temperature: kUnknown
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150687373951, "cf_name": "default", "job": 56, "event": "table_file_creation", "file_number": 98, "file_size": 7769343, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 7732856, "index_size": 21017, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 14405, "raw_key_size": 149159, "raw_average_key_size": 25, "raw_value_size": 7630570, "raw_average_value_size": 1325, "num_data_blocks": 836, "num_entries": 5758, "num_filter_entries": 5758, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760150687, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 98, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:44:47.374128) [db/compaction/compaction_job.cc:1663] [default] [JOB 56] Compacted 1@0 + 1@6 files to L6 => 7769343 bytes
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:44:47.375673) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 223.1 rd, 182.3 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(2.8, 6.2 +0.0 blob) out(7.4 +0.0 blob), read-write-amplify(5.8) write-amplify(2.6) OK, records in: 6276, records dropped: 518 output_compression: NoCompression
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:44:47.375688) EVENT_LOG_v1 {"time_micros": 1760150687375681, "job": 56, "event": "compaction_finished", "compaction_time_micros": 42628, "compaction_time_cpu_micros": 25688, "output_level": 6, "num_output_files": 1, "total_output_size": 7769343, "num_input_records": 6276, "num_output_records": 5758, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000097.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150687377175, "job": 56, "event": "table_file_deletion", "file_number": 97}
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000095.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150687378536, "job": 56, "event": "table_file_deletion", "file_number": 95}
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:44:47.331169) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:44:47.378680) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:44:47.378684) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:44:47.378685) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:44:47.378687) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:44:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:44:47.378689) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:44:47 compute-0 sudo[463018]: pam_unix(sudo:session): session closed for user root
Oct 11 02:44:47 compute-0 sudo[463076]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:44:47 compute-0 sudo[463076]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:47 compute-0 sudo[463076]: pam_unix(sudo:session): session closed for user root
Oct 11 02:44:47 compute-0 sudo[463101]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:44:47 compute-0 sudo[463101]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:47 compute-0 sudo[463101]: pam_unix(sudo:session): session closed for user root
Oct 11 02:44:47 compute-0 sudo[463126]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:44:47 compute-0 sudo[463126]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:47 compute-0 sudo[463126]: pam_unix(sudo:session): session closed for user root
Oct 11 02:44:47 compute-0 sudo[463151]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- inventory --format=json-pretty --filter-for-batch
Oct 11 02:44:47 compute-0 sudo[463151]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:48 compute-0 ceph-mon[191930]: pgmap v2016: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:44:48 compute-0 podman[463216]: 2025-10-11 02:44:48.593463093 +0000 UTC m=+0.081096987 container create 5a39eb9aececaa677ada3400ebbe6131c4118e37ebe8db101ca8b6402267f77a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=confident_mahavira, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:44:48 compute-0 podman[463216]: 2025-10-11 02:44:48.556941546 +0000 UTC m=+0.044575440 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:44:48 compute-0 systemd[1]: Started libpod-conmon-5a39eb9aececaa677ada3400ebbe6131c4118e37ebe8db101ca8b6402267f77a.scope.
Oct 11 02:44:48 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:44:48 compute-0 podman[463216]: 2025-10-11 02:44:48.733285247 +0000 UTC m=+0.220919111 container init 5a39eb9aececaa677ada3400ebbe6131c4118e37ebe8db101ca8b6402267f77a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=confident_mahavira, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:44:48 compute-0 podman[463216]: 2025-10-11 02:44:48.753824353 +0000 UTC m=+0.241458207 container start 5a39eb9aececaa677ada3400ebbe6131c4118e37ebe8db101ca8b6402267f77a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=confident_mahavira, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:44:48 compute-0 podman[463216]: 2025-10-11 02:44:48.758646799 +0000 UTC m=+0.246280673 container attach 5a39eb9aececaa677ada3400ebbe6131c4118e37ebe8db101ca8b6402267f77a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=confident_mahavira, CEPH_REF=reef, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:44:48 compute-0 confident_mahavira[463233]: 167 167
Oct 11 02:44:48 compute-0 systemd[1]: libpod-5a39eb9aececaa677ada3400ebbe6131c4118e37ebe8db101ca8b6402267f77a.scope: Deactivated successfully.
Oct 11 02:44:48 compute-0 podman[463216]: 2025-10-11 02:44:48.768204314 +0000 UTC m=+0.255838218 container died 5a39eb9aececaa677ada3400ebbe6131c4118e37ebe8db101ca8b6402267f77a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=confident_mahavira, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, OSD_FLAVOR=default)
Oct 11 02:44:48 compute-0 systemd[1]: var-lib-containers-storage-overlay-4f1f3c737f8e4b4c2f5ae37861ff36ca6a35009e8f407f9fabc187d465133820-merged.mount: Deactivated successfully.
Oct 11 02:44:48 compute-0 podman[463216]: 2025-10-11 02:44:48.845447833 +0000 UTC m=+0.333081727 container remove 5a39eb9aececaa677ada3400ebbe6131c4118e37ebe8db101ca8b6402267f77a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=confident_mahavira, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 02:44:48 compute-0 systemd[1]: libpod-conmon-5a39eb9aececaa677ada3400ebbe6131c4118e37ebe8db101ca8b6402267f77a.scope: Deactivated successfully.
Oct 11 02:44:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2017: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:44:49 compute-0 podman[463256]: 2025-10-11 02:44:49.09972678 +0000 UTC m=+0.065753606 container create 2489a9627957abef945d3616adac8549ad1525d4183f7eb1b0d53b780ca15953 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_cori, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:44:49 compute-0 podman[463256]: 2025-10-11 02:44:49.068699289 +0000 UTC m=+0.034726115 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:44:49 compute-0 systemd[1]: Started libpod-conmon-2489a9627957abef945d3616adac8549ad1525d4183f7eb1b0d53b780ca15953.scope.
Oct 11 02:44:49 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:44:49 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/467d4cd3f3f60f5271e030344e7837fa655a667f6a7211093af2900a66e6aaad/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:44:49 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/467d4cd3f3f60f5271e030344e7837fa655a667f6a7211093af2900a66e6aaad/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:44:49 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/467d4cd3f3f60f5271e030344e7837fa655a667f6a7211093af2900a66e6aaad/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:44:49 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/467d4cd3f3f60f5271e030344e7837fa655a667f6a7211093af2900a66e6aaad/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:44:49 compute-0 podman[463256]: 2025-10-11 02:44:49.245469826 +0000 UTC m=+0.211496642 container init 2489a9627957abef945d3616adac8549ad1525d4183f7eb1b0d53b780ca15953 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_cori, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:44:49 compute-0 podman[463256]: 2025-10-11 02:44:49.264726272 +0000 UTC m=+0.230753068 container start 2489a9627957abef945d3616adac8549ad1525d4183f7eb1b0d53b780ca15953 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_cori, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:44:49 compute-0 podman[463256]: 2025-10-11 02:44:49.269371294 +0000 UTC m=+0.235398090 container attach 2489a9627957abef945d3616adac8549ad1525d4183f7eb1b0d53b780ca15953 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_cori, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:44:50 compute-0 nova_compute[356901]: 2025-10-11 02:44:50.239 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:50 compute-0 ceph-mon[191930]: pgmap v2017: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:44:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2018: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 2.1 KiB/s rd, 0 B/s wr, 3 op/s
Oct 11 02:44:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:44:51 compute-0 sweet_cori[463271]: [
Oct 11 02:44:51 compute-0 sweet_cori[463271]:     {
Oct 11 02:44:51 compute-0 sweet_cori[463271]:         "available": false,
Oct 11 02:44:51 compute-0 sweet_cori[463271]:         "ceph_device": false,
Oct 11 02:44:51 compute-0 sweet_cori[463271]:         "device_id": "QEMU_DVD-ROM_QM00001",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:         "lsm_data": {},
Oct 11 02:44:51 compute-0 sweet_cori[463271]:         "lvs": [],
Oct 11 02:44:51 compute-0 sweet_cori[463271]:         "path": "/dev/sr0",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:         "rejected_reasons": [
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "Has a FileSystem",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "Insufficient space (<5GB)"
Oct 11 02:44:51 compute-0 sweet_cori[463271]:         ],
Oct 11 02:44:51 compute-0 sweet_cori[463271]:         "sys_api": {
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "actuators": null,
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "device_nodes": "sr0",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "devname": "sr0",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "human_readable_size": "482.00 KB",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "id_bus": "ata",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "model": "QEMU DVD-ROM",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "nr_requests": "2",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "parent": "/dev/sr0",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "partitions": {},
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "path": "/dev/sr0",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "removable": "1",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "rev": "2.5+",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "ro": "0",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "rotational": "0",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "sas_address": "",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "sas_device_handle": "",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "scheduler_mode": "mq-deadline",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "sectors": 0,
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "sectorsize": "2048",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "size": 493568.0,
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "support_discard": "2048",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "type": "disk",
Oct 11 02:44:51 compute-0 sweet_cori[463271]:             "vendor": "QEMU"
Oct 11 02:44:51 compute-0 sweet_cori[463271]:         }
Oct 11 02:44:51 compute-0 sweet_cori[463271]:     }
Oct 11 02:44:51 compute-0 sweet_cori[463271]: ]
Oct 11 02:44:51 compute-0 systemd[1]: libpod-2489a9627957abef945d3616adac8549ad1525d4183f7eb1b0d53b780ca15953.scope: Deactivated successfully.
Oct 11 02:44:51 compute-0 systemd[1]: libpod-2489a9627957abef945d3616adac8549ad1525d4183f7eb1b0d53b780ca15953.scope: Consumed 2.644s CPU time.
Oct 11 02:44:51 compute-0 podman[465740]: 2025-10-11 02:44:51.989021699 +0000 UTC m=+0.060253831 container died 2489a9627957abef945d3616adac8549ad1525d4183f7eb1b0d53b780ca15953 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_cori, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2)
Oct 11 02:44:52 compute-0 systemd[1]: var-lib-containers-storage-overlay-467d4cd3f3f60f5271e030344e7837fa655a667f6a7211093af2900a66e6aaad-merged.mount: Deactivated successfully.
Oct 11 02:44:52 compute-0 podman[465740]: 2025-10-11 02:44:52.062067812 +0000 UTC m=+0.133299904 container remove 2489a9627957abef945d3616adac8549ad1525d4183f7eb1b0d53b780ca15953 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_cori, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:44:52 compute-0 systemd[1]: libpod-conmon-2489a9627957abef945d3616adac8549ad1525d4183f7eb1b0d53b780ca15953.scope: Deactivated successfully.
Oct 11 02:44:52 compute-0 sudo[463151]: pam_unix(sudo:session): session closed for user root
Oct 11 02:44:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:44:52 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:44:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:44:52 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:44:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:44:52 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:44:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:44:52 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:44:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:44:52 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:44:52 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 5ffef22c-1ae9-46c3-b636-a169b0fe53d9 does not exist
Oct 11 02:44:52 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev a48e9185-a773-4933-82a5-76061d57df39 does not exist
Oct 11 02:44:52 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev f40d23be-21e5-4808-9fc9-8979ad398f84 does not exist
Oct 11 02:44:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:44:52 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:44:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:44:52 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:44:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:44:52 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:44:52 compute-0 nova_compute[356901]: 2025-10-11 02:44:52.285 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:52 compute-0 sudo[465753]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:44:52 compute-0 sudo[465753]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:52 compute-0 sudo[465753]: pam_unix(sudo:session): session closed for user root
Oct 11 02:44:52 compute-0 ceph-mon[191930]: pgmap v2018: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 2.1 KiB/s rd, 0 B/s wr, 3 op/s
Oct 11 02:44:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:44:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:44:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:44:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:44:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:44:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:44:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:44:52 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:44:52 compute-0 sudo[465778]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:44:52 compute-0 sudo[465778]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:52 compute-0 sudo[465778]: pam_unix(sudo:session): session closed for user root
Oct 11 02:44:52 compute-0 sudo[465803]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:44:52 compute-0 sudo[465803]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:52 compute-0 sudo[465803]: pam_unix(sudo:session): session closed for user root
Oct 11 02:44:52 compute-0 sudo[465828]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:44:52 compute-0 sudo[465828]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2019: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 10 KiB/s rd, 0 B/s wr, 17 op/s
Oct 11 02:44:53 compute-0 podman[465892]: 2025-10-11 02:44:53.122517258 +0000 UTC m=+0.062403382 container create 39b8cbaab9fd49b6d7a2a6e76dc44412cebdcec6eaa9f50d682cd408f16ac37a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_grothendieck, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:44:53 compute-0 systemd[1]: Started libpod-conmon-39b8cbaab9fd49b6d7a2a6e76dc44412cebdcec6eaa9f50d682cd408f16ac37a.scope.
Oct 11 02:44:53 compute-0 podman[465892]: 2025-10-11 02:44:53.098407037 +0000 UTC m=+0.038293181 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:44:53 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:44:53 compute-0 podman[465892]: 2025-10-11 02:44:53.251112341 +0000 UTC m=+0.190998555 container init 39b8cbaab9fd49b6d7a2a6e76dc44412cebdcec6eaa9f50d682cd408f16ac37a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_grothendieck, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3)
Oct 11 02:44:53 compute-0 podman[465892]: 2025-10-11 02:44:53.265785483 +0000 UTC m=+0.205671597 container start 39b8cbaab9fd49b6d7a2a6e76dc44412cebdcec6eaa9f50d682cd408f16ac37a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_grothendieck, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2)
Oct 11 02:44:53 compute-0 systemd[1]: libpod-39b8cbaab9fd49b6d7a2a6e76dc44412cebdcec6eaa9f50d682cd408f16ac37a.scope: Deactivated successfully.
Oct 11 02:44:53 compute-0 inspiring_grothendieck[465908]: 167 167
Oct 11 02:44:53 compute-0 conmon[465908]: conmon 39b8cbaab9fd49b6d7a2 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-39b8cbaab9fd49b6d7a2a6e76dc44412cebdcec6eaa9f50d682cd408f16ac37a.scope/container/memory.events
Oct 11 02:44:53 compute-0 podman[465892]: 2025-10-11 02:44:53.278013753 +0000 UTC m=+0.217899887 container attach 39b8cbaab9fd49b6d7a2a6e76dc44412cebdcec6eaa9f50d682cd408f16ac37a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_grothendieck, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:44:53 compute-0 podman[465913]: 2025-10-11 02:44:53.357394314 +0000 UTC m=+0.053690690 container died 39b8cbaab9fd49b6d7a2a6e76dc44412cebdcec6eaa9f50d682cd408f16ac37a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_grothendieck, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507)
Oct 11 02:44:53 compute-0 systemd[1]: var-lib-containers-storage-overlay-549858a9eb15b45ff0d6a878081f2bb6ef506735d9cd18067362d93f6112bacd-merged.mount: Deactivated successfully.
Oct 11 02:44:53 compute-0 podman[465913]: 2025-10-11 02:44:53.409831553 +0000 UTC m=+0.106127919 container remove 39b8cbaab9fd49b6d7a2a6e76dc44412cebdcec6eaa9f50d682cd408f16ac37a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_grothendieck, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2)
Oct 11 02:44:53 compute-0 systemd[1]: libpod-conmon-39b8cbaab9fd49b6d7a2a6e76dc44412cebdcec6eaa9f50d682cd408f16ac37a.scope: Deactivated successfully.
Oct 11 02:44:53 compute-0 podman[465933]: 2025-10-11 02:44:53.690197159 +0000 UTC m=+0.072444543 container create 8f2aa3eadb420fb269b6decf04502449da3e124ff6ba1196003a08e4ed475e1b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_elion, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:44:53 compute-0 systemd[1]: Started libpod-conmon-8f2aa3eadb420fb269b6decf04502449da3e124ff6ba1196003a08e4ed475e1b.scope.
Oct 11 02:44:53 compute-0 podman[465933]: 2025-10-11 02:44:53.661803034 +0000 UTC m=+0.044050418 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:44:53 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:44:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/04dfc5850a4cbbcbcbe209ab989f4ba7c20ae6209826c1f3d5a7ae8f3d8dce28/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:44:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/04dfc5850a4cbbcbcbe209ab989f4ba7c20ae6209826c1f3d5a7ae8f3d8dce28/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:44:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/04dfc5850a4cbbcbcbe209ab989f4ba7c20ae6209826c1f3d5a7ae8f3d8dce28/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:44:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/04dfc5850a4cbbcbcbe209ab989f4ba7c20ae6209826c1f3d5a7ae8f3d8dce28/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:44:53 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/04dfc5850a4cbbcbcbe209ab989f4ba7c20ae6209826c1f3d5a7ae8f3d8dce28/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:44:53 compute-0 podman[465933]: 2025-10-11 02:44:53.827067607 +0000 UTC m=+0.209314971 container init 8f2aa3eadb420fb269b6decf04502449da3e124ff6ba1196003a08e4ed475e1b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_elion, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 02:44:53 compute-0 podman[465933]: 2025-10-11 02:44:53.850735413 +0000 UTC m=+0.232982777 container start 8f2aa3eadb420fb269b6decf04502449da3e124ff6ba1196003a08e4ed475e1b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_elion, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:44:53 compute-0 podman[465933]: 2025-10-11 02:44:53.855183021 +0000 UTC m=+0.237430405 container attach 8f2aa3eadb420fb269b6decf04502449da3e124ff6ba1196003a08e4ed475e1b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_elion, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3)
Oct 11 02:44:54 compute-0 ceph-mon[191930]: pgmap v2019: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 10 KiB/s rd, 0 B/s wr, 17 op/s
Oct 11 02:44:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:44:54.874 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:44:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:44:54.877 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.003s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:44:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:44:54.878 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:44:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2020: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 24 KiB/s rd, 2.3 KiB/s wr, 41 op/s
Oct 11 02:44:55 compute-0 eager_elion[465947]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:44:55 compute-0 eager_elion[465947]: --> relative data size: 1.0
Oct 11 02:44:55 compute-0 eager_elion[465947]: --> All data devices are unavailable
Oct 11 02:44:55 compute-0 systemd[1]: libpod-8f2aa3eadb420fb269b6decf04502449da3e124ff6ba1196003a08e4ed475e1b.scope: Deactivated successfully.
Oct 11 02:44:55 compute-0 systemd[1]: libpod-8f2aa3eadb420fb269b6decf04502449da3e124ff6ba1196003a08e4ed475e1b.scope: Consumed 1.235s CPU time.
Oct 11 02:44:55 compute-0 podman[465933]: 2025-10-11 02:44:55.156501054 +0000 UTC m=+1.538748498 container died 8f2aa3eadb420fb269b6decf04502449da3e124ff6ba1196003a08e4ed475e1b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_elion, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:44:55 compute-0 systemd[1]: var-lib-containers-storage-overlay-04dfc5850a4cbbcbcbe209ab989f4ba7c20ae6209826c1f3d5a7ae8f3d8dce28-merged.mount: Deactivated successfully.
Oct 11 02:44:55 compute-0 nova_compute[356901]: 2025-10-11 02:44:55.243 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:55 compute-0 podman[465977]: 2025-10-11 02:44:55.265378984 +0000 UTC m=+0.147603757 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.component=ubi9-container, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, release=1214.1726694543, io.buildah.version=1.29.0, io.k8s.display-name=Red Hat Universal Base Image 9, name=ubi9, summary=Provides the latest release of Red Hat Universal Base Image 9., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.4, distribution-scope=public, managed_by=edpm_ansible, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, release-0.7.12=, io.openshift.tags=base rhel9, architecture=x86_64, build-date=2024-09-18T21:23:30, container_name=kepler, maintainer=Red Hat, Inc., vcs-type=git, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, config_id=edpm, io.openshift.expose-services=)
Oct 11 02:44:55 compute-0 podman[465933]: 2025-10-11 02:44:55.276544308 +0000 UTC m=+1.658791672 container remove 8f2aa3eadb420fb269b6decf04502449da3e124ff6ba1196003a08e4ed475e1b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=eager_elion, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:44:55 compute-0 systemd[1]: libpod-conmon-8f2aa3eadb420fb269b6decf04502449da3e124ff6ba1196003a08e4ed475e1b.scope: Deactivated successfully.
Oct 11 02:44:55 compute-0 sudo[465828]: pam_unix(sudo:session): session closed for user root
Oct 11 02:44:55 compute-0 sudo[466008]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:44:55 compute-0 sudo[466008]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:55 compute-0 sudo[466008]: pam_unix(sudo:session): session closed for user root
Oct 11 02:44:55 compute-0 sudo[466033]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:44:55 compute-0 sudo[466033]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:55 compute-0 sudo[466033]: pam_unix(sudo:session): session closed for user root
Oct 11 02:44:55 compute-0 sudo[466058]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:44:55 compute-0 sudo[466058]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:55 compute-0 sudo[466058]: pam_unix(sudo:session): session closed for user root
Oct 11 02:44:55 compute-0 sudo[466083]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:44:55 compute-0 sudo[466083]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:56 compute-0 podman[466150]: 2025-10-11 02:44:56.283464885 +0000 UTC m=+0.076301005 container create 296e3d965ee75e59c93c3b01a8006ca0c515e3b96b4e62bfef3e3b0ceed3bfbd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_hofstadter, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:44:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:44:56 compute-0 systemd[1]: Started libpod-conmon-296e3d965ee75e59c93c3b01a8006ca0c515e3b96b4e62bfef3e3b0ceed3bfbd.scope.
Oct 11 02:44:56 compute-0 podman[466150]: 2025-10-11 02:44:56.251629246 +0000 UTC m=+0.044465446 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:44:56 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:44:56 compute-0 ceph-mon[191930]: pgmap v2020: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 24 KiB/s rd, 2.3 KiB/s wr, 41 op/s
Oct 11 02:44:56 compute-0 podman[466150]: 2025-10-11 02:44:56.398723173 +0000 UTC m=+0.191559383 container init 296e3d965ee75e59c93c3b01a8006ca0c515e3b96b4e62bfef3e3b0ceed3bfbd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_hofstadter, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:44:56 compute-0 podman[466150]: 2025-10-11 02:44:56.416543474 +0000 UTC m=+0.209379614 container start 296e3d965ee75e59c93c3b01a8006ca0c515e3b96b4e62bfef3e3b0ceed3bfbd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_hofstadter, org.label-schema.license=GPLv2, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0)
Oct 11 02:44:56 compute-0 podman[466150]: 2025-10-11 02:44:56.422636043 +0000 UTC m=+0.215472263 container attach 296e3d965ee75e59c93c3b01a8006ca0c515e3b96b4e62bfef3e3b0ceed3bfbd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_hofstadter, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:44:56 compute-0 systemd[1]: libpod-296e3d965ee75e59c93c3b01a8006ca0c515e3b96b4e62bfef3e3b0ceed3bfbd.scope: Deactivated successfully.
Oct 11 02:44:56 compute-0 inspiring_hofstadter[466166]: 167 167
Oct 11 02:44:56 compute-0 conmon[466166]: conmon 296e3d965ee75e59c93c <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-296e3d965ee75e59c93c3b01a8006ca0c515e3b96b4e62bfef3e3b0ceed3bfbd.scope/container/memory.events
Oct 11 02:44:56 compute-0 podman[466150]: 2025-10-11 02:44:56.42614126 +0000 UTC m=+0.218977410 container died 296e3d965ee75e59c93c3b01a8006ca0c515e3b96b4e62bfef3e3b0ceed3bfbd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_hofstadter, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507)
Oct 11 02:44:56 compute-0 systemd[1]: var-lib-containers-storage-overlay-1fb84a8c35d9a2293100c3d5d9f4c5b9fb0f954b725dabeba5445a85911b6d80-merged.mount: Deactivated successfully.
Oct 11 02:44:56 compute-0 podman[466150]: 2025-10-11 02:44:56.48450633 +0000 UTC m=+0.277342450 container remove 296e3d965ee75e59c93c3b01a8006ca0c515e3b96b4e62bfef3e3b0ceed3bfbd (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=inspiring_hofstadter, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3)
Oct 11 02:44:56 compute-0 systemd[1]: libpod-conmon-296e3d965ee75e59c93c3b01a8006ca0c515e3b96b4e62bfef3e3b0ceed3bfbd.scope: Deactivated successfully.
Oct 11 02:44:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:44:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:44:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:44:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:44:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:44:56
Oct 11 02:44:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:44:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:44:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['backups', 'vms', 'volumes', 'default.rgw.control', '.rgw.root', 'default.rgw.meta', 'default.rgw.log', 'cephfs.cephfs.data', 'cephfs.cephfs.meta', 'images', '.mgr']
Oct 11 02:44:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:44:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:44:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:44:56 compute-0 podman[466187]: 2025-10-11 02:44:56.793330454 +0000 UTC m=+0.091448702 container create adf0e8473c7c83fe8e9eaf6ce2255db06d42ae54c3aeec5c13b6638849307ffc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_sinoussi, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:44:56 compute-0 podman[466187]: 2025-10-11 02:44:56.75812794 +0000 UTC m=+0.056246208 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:44:56 compute-0 systemd[1]: Started libpod-conmon-adf0e8473c7c83fe8e9eaf6ce2255db06d42ae54c3aeec5c13b6638849307ffc.scope.
Oct 11 02:44:56 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:44:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2021: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 2.3 KiB/s wr, 60 op/s
Oct 11 02:44:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/863dd8483022fb6b95a5af62caa60f41e9032a263617bf576f4c77641809df82/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:44:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/863dd8483022fb6b95a5af62caa60f41e9032a263617bf576f4c77641809df82/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:44:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/863dd8483022fb6b95a5af62caa60f41e9032a263617bf576f4c77641809df82/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:44:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/863dd8483022fb6b95a5af62caa60f41e9032a263617bf576f4c77641809df82/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:44:56 compute-0 podman[466187]: 2025-10-11 02:44:56.968679561 +0000 UTC m=+0.266797829 container init adf0e8473c7c83fe8e9eaf6ce2255db06d42ae54c3aeec5c13b6638849307ffc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_sinoussi, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:44:56 compute-0 podman[466187]: 2025-10-11 02:44:56.981342208 +0000 UTC m=+0.279460456 container start adf0e8473c7c83fe8e9eaf6ce2255db06d42ae54c3aeec5c13b6638849307ffc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_sinoussi, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:44:56 compute-0 podman[466187]: 2025-10-11 02:44:56.98514281 +0000 UTC m=+0.283261078 container attach adf0e8473c7c83fe8e9eaf6ce2255db06d42ae54c3aeec5c13b6638849307ffc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_sinoussi, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:44:57 compute-0 nova_compute[356901]: 2025-10-11 02:44:57.288 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:44:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:44:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:44:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:44:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:44:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:44:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:44:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:44:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:44:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:44:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]: {
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:     "0": [
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:         {
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "devices": [
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "/dev/loop3"
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             ],
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "lv_name": "ceph_lv0",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "lv_size": "21470642176",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "name": "ceph_lv0",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "tags": {
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.cluster_name": "ceph",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.crush_device_class": "",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.encrypted": "0",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.osd_id": "0",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.type": "block",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.vdo": "0"
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             },
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "type": "block",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "vg_name": "ceph_vg0"
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:         }
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:     ],
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:     "1": [
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:         {
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "devices": [
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "/dev/loop4"
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             ],
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "lv_name": "ceph_lv1",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "lv_size": "21470642176",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "name": "ceph_lv1",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "tags": {
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.cluster_name": "ceph",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.crush_device_class": "",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.encrypted": "0",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.osd_id": "1",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.type": "block",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.vdo": "0"
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             },
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "type": "block",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "vg_name": "ceph_vg1"
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:         }
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:     ],
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:     "2": [
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:         {
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "devices": [
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "/dev/loop5"
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             ],
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "lv_name": "ceph_lv2",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "lv_size": "21470642176",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "name": "ceph_lv2",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "tags": {
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.cluster_name": "ceph",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.crush_device_class": "",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.encrypted": "0",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.osd_id": "2",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.type": "block",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:                 "ceph.vdo": "0"
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             },
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "type": "block",
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:             "vg_name": "ceph_vg2"
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:         }
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]:     ]
Oct 11 02:44:57 compute-0 fervent_sinoussi[466203]: }
Oct 11 02:44:57 compute-0 systemd[1]: libpod-adf0e8473c7c83fe8e9eaf6ce2255db06d42ae54c3aeec5c13b6638849307ffc.scope: Deactivated successfully.
Oct 11 02:44:57 compute-0 podman[466187]: 2025-10-11 02:44:57.943455459 +0000 UTC m=+1.241573727 container died adf0e8473c7c83fe8e9eaf6ce2255db06d42ae54c3aeec5c13b6638849307ffc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_sinoussi, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True)
Oct 11 02:44:57 compute-0 systemd[1]: var-lib-containers-storage-overlay-863dd8483022fb6b95a5af62caa60f41e9032a263617bf576f4c77641809df82-merged.mount: Deactivated successfully.
Oct 11 02:44:58 compute-0 podman[466187]: 2025-10-11 02:44:58.013921727 +0000 UTC m=+1.312039975 container remove adf0e8473c7c83fe8e9eaf6ce2255db06d42ae54c3aeec5c13b6638849307ffc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_sinoussi, CEPH_REF=reef, ceph=True, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 02:44:58 compute-0 systemd[1]: libpod-conmon-adf0e8473c7c83fe8e9eaf6ce2255db06d42ae54c3aeec5c13b6638849307ffc.scope: Deactivated successfully.
Oct 11 02:44:58 compute-0 sudo[466083]: pam_unix(sudo:session): session closed for user root
Oct 11 02:44:58 compute-0 sudo[466223]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:44:58 compute-0 sudo[466223]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:58 compute-0 sudo[466223]: pam_unix(sudo:session): session closed for user root
Oct 11 02:44:58 compute-0 sudo[466248]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:44:58 compute-0 sudo[466248]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:58 compute-0 sudo[466248]: pam_unix(sudo:session): session closed for user root
Oct 11 02:44:58 compute-0 ceph-mon[191930]: pgmap v2021: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 2.3 KiB/s wr, 60 op/s
Oct 11 02:44:58 compute-0 sudo[466273]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:44:58 compute-0 sudo[466273]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:58 compute-0 sudo[466273]: pam_unix(sudo:session): session closed for user root
Oct 11 02:44:58 compute-0 sudo[466298]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:44:58 compute-0 sudo[466298]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:44:58 compute-0 nova_compute[356901]: 2025-10-11 02:44:58.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:44:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2022: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 44 KiB/s rd, 2.3 KiB/s wr, 73 op/s
Oct 11 02:44:59 compute-0 podman[466363]: 2025-10-11 02:44:59.014540995 +0000 UTC m=+0.080743157 container create 242bc587dc5f4ca0f28dcd971d01d77480602ea477271a71179ebb7fca2f9a60 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_mclaren, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:44:59 compute-0 podman[466363]: 2025-10-11 02:44:58.970140112 +0000 UTC m=+0.036342254 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:44:59 compute-0 systemd[1]: Started libpod-conmon-242bc587dc5f4ca0f28dcd971d01d77480602ea477271a71179ebb7fca2f9a60.scope.
Oct 11 02:44:59 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:44:59 compute-0 podman[466363]: 2025-10-11 02:44:59.203306982 +0000 UTC m=+0.269509194 container init 242bc587dc5f4ca0f28dcd971d01d77480602ea477271a71179ebb7fca2f9a60 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_mclaren, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:44:59 compute-0 podman[466363]: 2025-10-11 02:44:59.227464186 +0000 UTC m=+0.293666338 container start 242bc587dc5f4ca0f28dcd971d01d77480602ea477271a71179ebb7fca2f9a60 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_mclaren, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2)
Oct 11 02:44:59 compute-0 podman[466363]: 2025-10-11 02:44:59.234873926 +0000 UTC m=+0.301076078 container attach 242bc587dc5f4ca0f28dcd971d01d77480602ea477271a71179ebb7fca2f9a60 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_mclaren, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:44:59 compute-0 sharp_mclaren[466378]: 167 167
Oct 11 02:44:59 compute-0 systemd[1]: libpod-242bc587dc5f4ca0f28dcd971d01d77480602ea477271a71179ebb7fca2f9a60.scope: Deactivated successfully.
Oct 11 02:44:59 compute-0 podman[466363]: 2025-10-11 02:44:59.24185487 +0000 UTC m=+0.308057062 container died 242bc587dc5f4ca0f28dcd971d01d77480602ea477271a71179ebb7fca2f9a60 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_mclaren, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0)
Oct 11 02:44:59 compute-0 systemd[1]: var-lib-containers-storage-overlay-44fc16fce5db48d3b7e4c77da5c9bb868fdaee7310b6d11539588b8407e1ae3e-merged.mount: Deactivated successfully.
Oct 11 02:44:59 compute-0 podman[466363]: 2025-10-11 02:44:59.332361205 +0000 UTC m=+0.398563337 container remove 242bc587dc5f4ca0f28dcd971d01d77480602ea477271a71179ebb7fca2f9a60 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_mclaren, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default)
Oct 11 02:44:59 compute-0 systemd[1]: libpod-conmon-242bc587dc5f4ca0f28dcd971d01d77480602ea477271a71179ebb7fca2f9a60.scope: Deactivated successfully.
Oct 11 02:44:59 compute-0 podman[466402]: 2025-10-11 02:44:59.562048999 +0000 UTC m=+0.056054355 container create c4017fa3d2f9e169b9962063e57b7fe392701ed72b435b64d6179e5b025e9310 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_gates, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:44:59 compute-0 systemd[1]: Started libpod-conmon-c4017fa3d2f9e169b9962063e57b7fe392701ed72b435b64d6179e5b025e9310.scope.
Oct 11 02:44:59 compute-0 podman[466402]: 2025-10-11 02:44:59.539868957 +0000 UTC m=+0.033874323 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:44:59 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:44:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5d51d76b77f8d0e64651f0bd26dd5368477887dd639f71c6428a8726ef1f7c9e/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:44:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5d51d76b77f8d0e64651f0bd26dd5368477887dd639f71c6428a8726ef1f7c9e/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:44:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5d51d76b77f8d0e64651f0bd26dd5368477887dd639f71c6428a8726ef1f7c9e/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:44:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5d51d76b77f8d0e64651f0bd26dd5368477887dd639f71c6428a8726ef1f7c9e/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:44:59 compute-0 podman[466402]: 2025-10-11 02:44:59.696976618 +0000 UTC m=+0.190981984 container init c4017fa3d2f9e169b9962063e57b7fe392701ed72b435b64d6179e5b025e9310 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_gates, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:44:59 compute-0 podman[466402]: 2025-10-11 02:44:59.716952683 +0000 UTC m=+0.210958019 container start c4017fa3d2f9e169b9962063e57b7fe392701ed72b435b64d6179e5b025e9310 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_gates, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:44:59 compute-0 podman[466402]: 2025-10-11 02:44:59.721891154 +0000 UTC m=+0.215896520 container attach c4017fa3d2f9e169b9962063e57b7fe392701ed72b435b64d6179e5b025e9310 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_gates, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 02:44:59 compute-0 podman[157119]: time="2025-10-11T02:44:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:44:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:44:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 49069 "" "Go-http-client/1.1"
Oct 11 02:44:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:44:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9962 "" "Go-http-client/1.1"
Oct 11 02:45:00 compute-0 nova_compute[356901]: 2025-10-11 02:45:00.244 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:00 compute-0 ceph-mon[191930]: pgmap v2022: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 44 KiB/s rd, 2.3 KiB/s wr, 73 op/s
Oct 11 02:45:00 compute-0 festive_gates[466419]: {
Oct 11 02:45:00 compute-0 festive_gates[466419]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:45:00 compute-0 festive_gates[466419]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:45:00 compute-0 festive_gates[466419]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:45:00 compute-0 festive_gates[466419]:         "osd_id": 1,
Oct 11 02:45:00 compute-0 festive_gates[466419]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:45:00 compute-0 festive_gates[466419]:         "type": "bluestore"
Oct 11 02:45:00 compute-0 festive_gates[466419]:     },
Oct 11 02:45:00 compute-0 festive_gates[466419]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:45:00 compute-0 festive_gates[466419]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:45:00 compute-0 festive_gates[466419]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:45:00 compute-0 festive_gates[466419]:         "osd_id": 2,
Oct 11 02:45:00 compute-0 festive_gates[466419]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:45:00 compute-0 festive_gates[466419]:         "type": "bluestore"
Oct 11 02:45:00 compute-0 festive_gates[466419]:     },
Oct 11 02:45:00 compute-0 festive_gates[466419]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:45:00 compute-0 festive_gates[466419]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:45:00 compute-0 festive_gates[466419]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:45:00 compute-0 festive_gates[466419]:         "osd_id": 0,
Oct 11 02:45:00 compute-0 festive_gates[466419]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:45:00 compute-0 festive_gates[466419]:         "type": "bluestore"
Oct 11 02:45:00 compute-0 festive_gates[466419]:     }
Oct 11 02:45:00 compute-0 festive_gates[466419]: }
Oct 11 02:45:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2023: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 44 KiB/s rd, 2.3 KiB/s wr, 73 op/s
Oct 11 02:45:00 compute-0 systemd[1]: libpod-c4017fa3d2f9e169b9962063e57b7fe392701ed72b435b64d6179e5b025e9310.scope: Deactivated successfully.
Oct 11 02:45:00 compute-0 systemd[1]: libpod-c4017fa3d2f9e169b9962063e57b7fe392701ed72b435b64d6179e5b025e9310.scope: Consumed 1.192s CPU time.
Oct 11 02:45:00 compute-0 podman[466402]: 2025-10-11 02:45:00.938583483 +0000 UTC m=+1.432588819 container died c4017fa3d2f9e169b9962063e57b7fe392701ed72b435b64d6179e5b025e9310 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_gates, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2)
Oct 11 02:45:00 compute-0 systemd[1]: var-lib-containers-storage-overlay-5d51d76b77f8d0e64651f0bd26dd5368477887dd639f71c6428a8726ef1f7c9e-merged.mount: Deactivated successfully.
Oct 11 02:45:01 compute-0 podman[466402]: 2025-10-11 02:45:01.024443142 +0000 UTC m=+1.518448488 container remove c4017fa3d2f9e169b9962063e57b7fe392701ed72b435b64d6179e5b025e9310 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_gates, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:45:01 compute-0 systemd[1]: libpod-conmon-c4017fa3d2f9e169b9962063e57b7fe392701ed72b435b64d6179e5b025e9310.scope: Deactivated successfully.
Oct 11 02:45:01 compute-0 sudo[466298]: pam_unix(sudo:session): session closed for user root
Oct 11 02:45:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:45:01 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:45:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:45:01 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:45:01 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 3ad721af-375c-4bb0-908e-b85b9a825c47 does not exist
Oct 11 02:45:01 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev abd44c6f-860f-4a2a-a9a7-04ff1aad25d8 does not exist
Oct 11 02:45:01 compute-0 sudo[466462]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:45:01 compute-0 sudo[466462]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:45:01 compute-0 sudo[466462]: pam_unix(sudo:session): session closed for user root
Oct 11 02:45:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:45:01 compute-0 sudo[466487]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:45:01 compute-0 sudo[466487]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:45:01 compute-0 sudo[466487]: pam_unix(sudo:session): session closed for user root
Oct 11 02:45:01 compute-0 openstack_network_exporter[374316]: ERROR   02:45:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:45:01 compute-0 openstack_network_exporter[374316]: ERROR   02:45:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:45:01 compute-0 openstack_network_exporter[374316]: ERROR   02:45:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:45:01 compute-0 openstack_network_exporter[374316]: ERROR   02:45:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:45:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:45:01 compute-0 openstack_network_exporter[374316]: ERROR   02:45:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:45:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:45:01 compute-0 podman[466513]: 2025-10-11 02:45:01.507415833 +0000 UTC m=+0.107231938 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, managed_by=edpm_ansible, org.label-schema.build-date=20251007, tcib_managed=true, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0)
Oct 11 02:45:01 compute-0 podman[466511]: 2025-10-11 02:45:01.508657244 +0000 UTC m=+0.118360300 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:45:01 compute-0 podman[466514]: 2025-10-11 02:45:01.537567645 +0000 UTC m=+0.133224772 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_id=ovn_metadata_agent, org.label-schema.schema-version=1.0, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_metadata_agent, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:45:01 compute-0 podman[466512]: 2025-10-11 02:45:01.569649178 +0000 UTC m=+0.182244141 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image, config_id=ovn_controller)
Oct 11 02:45:02 compute-0 ceph-mon[191930]: pgmap v2023: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 44 KiB/s rd, 2.3 KiB/s wr, 73 op/s
Oct 11 02:45:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:45:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:45:02 compute-0 nova_compute[356901]: 2025-10-11 02:45:02.299 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2024: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 42 KiB/s rd, 2.3 KiB/s wr, 69 op/s
Oct 11 02:45:04 compute-0 ceph-mon[191930]: pgmap v2024: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 42 KiB/s rd, 2.3 KiB/s wr, 69 op/s
Oct 11 02:45:04 compute-0 nova_compute[356901]: 2025-10-11 02:45:04.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:45:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2025: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 33 KiB/s rd, 2.3 KiB/s wr, 56 op/s
Oct 11 02:45:05 compute-0 nova_compute[356901]: 2025-10-11 02:45:05.248 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:06 compute-0 ceph-mon[191930]: pgmap v2025: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 33 KiB/s rd, 2.3 KiB/s wr, 56 op/s
Oct 11 02:45:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:45:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2026: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 0 B/s wr, 32 op/s
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0013088875390701309 of space, bias 1.0, pg target 0.39266626172103924 quantized to 32 (current 32)
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00125203744627857 of space, bias 1.0, pg target 0.375611233883571 quantized to 32 (current 32)
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:45:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:45:07 compute-0 nova_compute[356901]: 2025-10-11 02:45:07.303 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:08 compute-0 ceph-mon[191930]: pgmap v2026: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 0 B/s wr, 32 op/s
Oct 11 02:45:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2027: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 7.6 KiB/s rd, 0 B/s wr, 12 op/s
Oct 11 02:45:09 compute-0 podman[466591]: 2025-10-11 02:45:09.223466109 +0000 UTC m=+0.119554140 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, config_id=multipathd, container_name=multipathd, org.label-schema.license=GPLv2, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS)
Oct 11 02:45:09 compute-0 podman[466592]: 2025-10-11 02:45:09.266596831 +0000 UTC m=+0.144134500 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.schema-version=1.0, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=iscsid, io.buildah.version=1.41.3)
Oct 11 02:45:10 compute-0 ceph-mon[191930]: pgmap v2027: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail; 7.6 KiB/s rd, 0 B/s wr, 12 op/s
Oct 11 02:45:10 compute-0 nova_compute[356901]: 2025-10-11 02:45:10.252 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2028: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:45:11 compute-0 nova_compute[356901]: 2025-10-11 02:45:11.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:45:12 compute-0 ceph-mon[191930]: pgmap v2028: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:12 compute-0 nova_compute[356901]: 2025-10-11 02:45:12.308 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2029: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.869 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.870 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.872 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.878 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.879 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.881 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '8422017b-c868-4ba2-ab1f-61d3668ca145', 'name': 'te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c', 'flavor': {'id': '6dff30d1-85df-4e9c-9163-a20ba47bb0c7', 'name': 'm1.nano', 'vcpus': 1, 'ram': 128, 'disk': 1, 'ephemeral': 0, 'swap': 0}, 'image': {'id': '2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-0000000e', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': 'a05bbc8f872d4dd99972d2cb8136d608', 'user_id': 'f66a606299944d53a40f21e81c791d70', 'hostId': 'cea8816d446065ba50379057f72b942db7e204c60c4530591bc7d0be', 'status': 'active', 'metadata': {'metering.server_group': '44c4fdb3-6cdb-42b8-903d-5a2c79f0da20'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.885 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.886 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.887 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.887 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.887 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.888 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.889 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:45:13.888020) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.887 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{'inspect_vnics': {}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.890 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{'inspect_vnics': {}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.891 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{'inspect_vnics': {}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.892 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{'inspect_vnics': {}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.893 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{'inspect_vnics': {}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.893 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{'inspect_vnics': {}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.894 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{'inspect_vnics': {}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 nova_compute[356901]: 2025-10-11 02:45:13.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:45:13 compute-0 nova_compute[356901]: 2025-10-11 02:45:13.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.898 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{'inspect_vnics': {}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.898 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.bytes volume: 1652 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.899 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{'inspect_vnics': {'8422017b-c868-4ba2-ab1f-61d3668ca145': (7061.667075818, [InterfaceStats(name='tape00931c0-3d', mac='fa:16:3e:2c:af:96', fref=None, parameters={'interfaceid': None, 'bridge': None}, rx_bytes=1652, tx_bytes=1620, rx_packets=11, tx_packets=16, rx_drop=0, tx_drop=0, rx_errors=0, tx_errors=0, rx_bytes_delta=1542, tx_bytes_delta=1620)])}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.902 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{'inspect_vnics': {'8422017b-c868-4ba2-ab1f-61d3668ca145': (7061.667075818, [InterfaceStats(name='tape00931c0-3d', mac='fa:16:3e:2c:af:96', fref=None, parameters={'interfaceid': None, 'bridge': None}, rx_bytes=1652, tx_bytes=1620, rx_packets=11, tx_packets=16, rx_drop=0, tx_drop=0, rx_errors=0, tx_errors=0, rx_bytes_delta=1542, tx_bytes_delta=1620)])}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.903 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{'inspect_vnics': {'8422017b-c868-4ba2-ab1f-61d3668ca145': (7061.667075818, [InterfaceStats(name='tape00931c0-3d', mac='fa:16:3e:2c:af:96', fref=None, parameters={'interfaceid': None, 'bridge': None}, rx_bytes=1652, tx_bytes=1620, rx_packets=11, tx_packets=16, rx_drop=0, tx_drop=0, rx_errors=0, tx_errors=0, rx_bytes_delta=1542, tx_bytes_delta=1620)])}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.906 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{'inspect_vnics': {'8422017b-c868-4ba2-ab1f-61d3668ca145': (7061.667075818, [InterfaceStats(name='tape00931c0-3d', mac='fa:16:3e:2c:af:96', fref=None, parameters={'interfaceid': None, 'bridge': None}, rx_bytes=1652, tx_bytes=1620, rx_packets=11, tx_packets=16, rx_drop=0, tx_drop=0, rx_errors=0, tx_errors=0, rx_bytes_delta=1542, tx_bytes_delta=1620)])}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c>, <NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.908 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2856 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.909 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.909 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.909 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.909 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.909 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.909 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.909 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets volume: 16 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.910 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 24 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.910 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.910 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.910 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.911 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.911 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.911 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.911 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.912 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.912 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.912 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.913 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.913 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.913 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.913 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.913 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.913 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.914 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.914 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.914 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.914 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.914 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.914 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.912 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:45:13.909719) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.917 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:45:13.911639) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.917 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:45:13.913380) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.918 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:45:13.914924) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.936 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.936 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.capacity volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.963 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.963 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.964 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.964 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.965 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.965 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.965 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.965 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.965 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:13.966 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:45:13.965680) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.005 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.bytes volume: 28634112 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.005 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.bytes volume: 246078 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.075 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.076 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.077 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.077 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.078 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.078 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.078 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.078 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.078 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.079 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.latency volume: 1980743477 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.079 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.latency volume: 119778612 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.080 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.080 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.081 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.082 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.082 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.082 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.083 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.083 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.083 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.084 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.requests volume: 1024 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.085 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.requests volume: 107 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.085 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.085 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.084 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:45:14.078881) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.086 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.086 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:45:14.083813) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.087 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.087 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.087 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.087 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.087 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.088 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.088 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.088 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:45:14.087941) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.088 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.usage volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.089 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.089 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.090 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.090 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.091 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.091 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.091 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.091 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.091 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.092 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.bytes volume: 72822784 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.092 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.093 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:45:14.091810) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.093 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.093 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.093 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.094 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.094 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.094 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.094 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.094 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.094 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.095 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.latency volume: 7591043388 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.095 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.095 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.095 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.096 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.097 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.097 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.097 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.097 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.097 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.098 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.097 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:45:14.094877) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.098 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:45:14.097964) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.129 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.156 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.156 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.156 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.156 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.157 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.157 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.157 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.157 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.requests volume: 309 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.157 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.158 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.158 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.159 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.159 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:45:14.157434) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.159 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.160 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.160 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.160 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.160 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.160 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.160 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.bytes.delta volume: 1542 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.161 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.161 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.161 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.162 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.162 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.162 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.162 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.162 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.162 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.163 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.163 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.163 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.163 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.163 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.164 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.164 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets volume: 11 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.164 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 33 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.165 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.165 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.165 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.165 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.165 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.166 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.166 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.bytes.delta volume: 1620 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.165 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:45:14.160612) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.166 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceph-mon[191930]: pgmap v2029: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.167 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:45:14.162549) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.167 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.167 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.167 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.167 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.167 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.168 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.167 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:45:14.164080) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.168 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:45:14.165989) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.168 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.168 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.169 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.169 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.169 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.169 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.169 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.169 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:45:14.168023) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.170 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.170 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.170 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.170 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.171 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.171 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.171 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.171 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.171 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.allocation volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.172 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.172 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.173 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.172 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:45:14.169590) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.173 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.173 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.174 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.174 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.174 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.174 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.174 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.173 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:45:14.171391) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.174 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.175 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.175 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.175 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.175 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.175 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.175 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.175 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/cpu volume: 114550000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.176 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 57480000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.176 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.176 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.177 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.177 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.177 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.177 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.177 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.bytes volume: 1620 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.177 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2412 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.178 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.178 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.178 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.178 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.178 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.178 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.178 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/memory.usage volume: 43.3671875 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.179 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.83984375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.179 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.179 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.179 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.180 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.181 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.181 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.181 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.182 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.182 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.182 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.183 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.183 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:45:14.174423) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.183 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.184 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:45:14.175861) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.184 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.184 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:45:14.177342) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.185 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.185 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:45:14.178778) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.185 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.186 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.186 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.186 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.187 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.187 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.187 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.187 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.188 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.188 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.188 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.189 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.189 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.189 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:45:14.190 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:45:14 compute-0 systemd[1]: virtsecretd.service: Deactivated successfully.
Oct 11 02:45:14 compute-0 nova_compute[356901]: 2025-10-11 02:45:14.894 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:45:14 compute-0 nova_compute[356901]: 2025-10-11 02:45:14.894 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:45:14 compute-0 nova_compute[356901]: 2025-10-11 02:45:14.895 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:45:14 compute-0 nova_compute[356901]: 2025-10-11 02:45:14.895 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:45:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2030: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:15 compute-0 nova_compute[356901]: 2025-10-11 02:45:15.256 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:15 compute-0 nova_compute[356901]: 2025-10-11 02:45:15.413 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:45:15 compute-0 nova_compute[356901]: 2025-10-11 02:45:15.414 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:45:15 compute-0 nova_compute[356901]: 2025-10-11 02:45:15.415 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:45:15 compute-0 nova_compute[356901]: 2025-10-11 02:45:15.416 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:45:16 compute-0 ceph-mon[191930]: pgmap v2030: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:45:16 compute-0 nova_compute[356901]: 2025-10-11 02:45:16.735 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:45:16 compute-0 nova_compute[356901]: 2025-10-11 02:45:16.762 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:45:16 compute-0 nova_compute[356901]: 2025-10-11 02:45:16.763 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:45:16 compute-0 nova_compute[356901]: 2025-10-11 02:45:16.764 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:45:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2031: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:17 compute-0 systemd[1]: virtproxyd.service: Deactivated successfully.
Oct 11 02:45:17 compute-0 podman[466630]: 2025-10-11 02:45:17.204152095 +0000 UTC m=+0.095361607 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:45:17 compute-0 podman[466631]: 2025-10-11 02:45:17.228479177 +0000 UTC m=+0.104731203 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=openstack_network_exporter, io.buildah.version=1.33.7, release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., build-date=2025-08-20T13:12:41, name=ubi9-minimal, io.openshift.tags=minimal rhel9, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, managed_by=edpm_ansible, maintainer=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, version=9.6, distribution-scope=public, vendor=Red Hat, Inc., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, com.redhat.component=ubi9-minimal-container, config_id=edpm, io.openshift.expose-services=, vcs-type=git)
Oct 11 02:45:17 compute-0 podman[466637]: 2025-10-11 02:45:17.233775893 +0000 UTC m=+0.098043780 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:45:17 compute-0 nova_compute[356901]: 2025-10-11 02:45:17.313 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:17 compute-0 nova_compute[356901]: 2025-10-11 02:45:17.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:45:17 compute-0 nova_compute[356901]: 2025-10-11 02:45:17.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:45:17 compute-0 nova_compute[356901]: 2025-10-11 02:45:17.921 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:45:17 compute-0 nova_compute[356901]: 2025-10-11 02:45:17.921 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:45:17 compute-0 nova_compute[356901]: 2025-10-11 02:45:17.921 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:45:17 compute-0 nova_compute[356901]: 2025-10-11 02:45:17.921 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:45:17 compute-0 nova_compute[356901]: 2025-10-11 02:45:17.921 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:45:18 compute-0 ceph-mon[191930]: pgmap v2031: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:45:18 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3710148126' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:45:18 compute-0 nova_compute[356901]: 2025-10-11 02:45:18.413 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.492s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:45:18 compute-0 nova_compute[356901]: 2025-10-11 02:45:18.533 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:45:18 compute-0 nova_compute[356901]: 2025-10-11 02:45:18.534 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:45:18 compute-0 nova_compute[356901]: 2025-10-11 02:45:18.542 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:45:18 compute-0 nova_compute[356901]: 2025-10-11 02:45:18.542 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:45:18 compute-0 nova_compute[356901]: 2025-10-11 02:45:18.542 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:45:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2032: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:18 compute-0 nova_compute[356901]: 2025-10-11 02:45:18.993 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:45:18 compute-0 nova_compute[356901]: 2025-10-11 02:45:18.995 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3451MB free_disk=59.90976333618164GB free_vcpus=6 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:45:18 compute-0 nova_compute[356901]: 2025-10-11 02:45:18.995 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:45:18 compute-0 nova_compute[356901]: 2025-10-11 02:45:18.995 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:45:19 compute-0 nova_compute[356901]: 2025-10-11 02:45:19.076 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:45:19 compute-0 nova_compute[356901]: 2025-10-11 02:45:19.076 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 8422017b-c868-4ba2-ab1f-61d3668ca145 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:45:19 compute-0 nova_compute[356901]: 2025-10-11 02:45:19.076 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 2 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:45:19 compute-0 nova_compute[356901]: 2025-10-11 02:45:19.076 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1152MB phys_disk=59GB used_disk=3GB total_vcpus=8 used_vcpus=2 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:45:19 compute-0 nova_compute[356901]: 2025-10-11 02:45:19.132 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:45:19 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3710148126' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:45:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:45:19 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1086262496' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:45:19 compute-0 nova_compute[356901]: 2025-10-11 02:45:19.601 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.469s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:45:19 compute-0 nova_compute[356901]: 2025-10-11 02:45:19.613 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:45:19 compute-0 nova_compute[356901]: 2025-10-11 02:45:19.633 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:45:19 compute-0 nova_compute[356901]: 2025-10-11 02:45:19.635 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:45:19 compute-0 nova_compute[356901]: 2025-10-11 02:45:19.636 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.641s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:45:20 compute-0 ceph-mon[191930]: pgmap v2032: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:20 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1086262496' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:45:20 compute-0 nova_compute[356901]: 2025-10-11 02:45:20.256 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2033: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:45:22 compute-0 ceph-mon[191930]: pgmap v2033: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:22 compute-0 nova_compute[356901]: 2025-10-11 02:45:22.318 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2034: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:24 compute-0 ceph-mon[191930]: pgmap v2034: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2035: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:25 compute-0 nova_compute[356901]: 2025-10-11 02:45:25.260 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:26 compute-0 podman[466735]: 2025-10-11 02:45:26.251040731 +0000 UTC m=+0.136210334 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.component=ubi9-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1214.1726694543, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release-0.7.12=, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, architecture=x86_64, maintainer=Red Hat, Inc., vcs-type=git, version=9.4, io.openshift.tags=base rhel9, build-date=2024-09-18T21:23:30, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, managed_by=edpm_ansible, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, container_name=kepler, name=ubi9, io.k8s.display-name=Red Hat Universal Base Image 9, io.buildah.version=1.29.0, config_id=edpm, vendor=Red Hat, Inc., io.openshift.expose-services=)
Oct 11 02:45:26 compute-0 ceph-mon[191930]: pgmap v2035: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:45:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:45:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:45:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:45:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:45:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:45:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:45:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2036: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:27 compute-0 nova_compute[356901]: 2025-10-11 02:45:27.321 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:45:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3432338448' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:45:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:45:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3432338448' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:45:28 compute-0 ceph-mon[191930]: pgmap v2036: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3432338448' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:45:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3432338448' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:45:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2037: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:29 compute-0 podman[157119]: time="2025-10-11T02:45:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:45:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:45:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:45:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:45:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9547 "" "Go-http-client/1.1"
Oct 11 02:45:30 compute-0 nova_compute[356901]: 2025-10-11 02:45:30.262 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:30 compute-0 ceph-mon[191930]: pgmap v2037: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2038: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:45:31 compute-0 openstack_network_exporter[374316]: ERROR   02:45:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:45:31 compute-0 openstack_network_exporter[374316]: ERROR   02:45:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:45:31 compute-0 openstack_network_exporter[374316]: ERROR   02:45:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:45:31 compute-0 openstack_network_exporter[374316]: ERROR   02:45:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:45:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:45:31 compute-0 openstack_network_exporter[374316]: ERROR   02:45:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:45:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:45:32 compute-0 podman[466754]: 2025-10-11 02:45:32.238810582 +0000 UTC m=+0.119843110 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:45:32 compute-0 podman[466757]: 2025-10-11 02:45:32.254574988 +0000 UTC m=+0.108904674 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, managed_by=edpm_ansible)
Oct 11 02:45:32 compute-0 podman[466756]: 2025-10-11 02:45:32.263471748 +0000 UTC m=+0.118365801 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, config_id=edpm, org.label-schema.vendor=CentOS)
Oct 11 02:45:32 compute-0 ceph-mon[191930]: pgmap v2038: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:32 compute-0 podman[466755]: 2025-10-11 02:45:32.315207061 +0000 UTC m=+0.187796164 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:45:32 compute-0 nova_compute[356901]: 2025-10-11 02:45:32.324 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2039: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:34 compute-0 ceph-mon[191930]: pgmap v2039: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2040: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:35 compute-0 nova_compute[356901]: 2025-10-11 02:45:35.267 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:45:36 compute-0 ceph-mon[191930]: pgmap v2040: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._run_image_cache_manager_pass run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.897 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "storage-registry-lock" by "nova.virt.storage_users.register_storage_use.<locals>.do_register_storage_use" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.898 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "storage-registry-lock" acquired by "nova.virt.storage_users.register_storage_use.<locals>.do_register_storage_use" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.898 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "storage-registry-lock" "released" by "nova.virt.storage_users.register_storage_use.<locals>.do_register_storage_use" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.899 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "storage-registry-lock" by "nova.virt.storage_users.get_storage_users.<locals>.do_get_storage_users" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.901 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "storage-registry-lock" acquired by "nova.virt.storage_users.get_storage_users.<locals>.do_get_storage_users" :: waited 0.003s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.901 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "storage-registry-lock" "released" by "nova.virt.storage_users.get_storage_users.<locals>.do_get_storage_users" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.926 2 DEBUG nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Adding ephemeral_1_0706d66 into backend ephemeral images _store_ephemeral_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/imagecache.py:100
Oct 11 02:45:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2041: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.959 2 DEBUG nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Verify base images _age_and_verify_cached_images /usr/lib/python3.9/site-packages/nova/virt/libvirt/imagecache.py:314
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.960 2 DEBUG nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Image id 2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c yields fingerprint 61847d5d7446819c58bff23b092765d610117849 _age_and_verify_cached_images /usr/lib/python3.9/site-packages/nova/virt/libvirt/imagecache.py:319
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.960 2 INFO nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] image 2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c at (/var/lib/nova/instances/_base/61847d5d7446819c58bff23b092765d610117849): checking
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.961 2 DEBUG nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] image 2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c at (/var/lib/nova/instances/_base/61847d5d7446819c58bff23b092765d610117849): image is in use _mark_in_use /usr/lib/python3.9/site-packages/nova/virt/libvirt/imagecache.py:279
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.963 2 DEBUG nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Image id  yields fingerprint da39a3ee5e6b4b0d3255bfef95601890afd80709 _age_and_verify_cached_images /usr/lib/python3.9/site-packages/nova/virt/libvirt/imagecache.py:319
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.964 2 DEBUG nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Image id a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7 yields fingerprint c2a4b3f256e07592b38b9a83d173b78feaa2ba6d _age_and_verify_cached_images /usr/lib/python3.9/site-packages/nova/virt/libvirt/imagecache.py:319
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.964 2 INFO nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] image a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7 at (/var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d): checking
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.965 2 DEBUG nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] image a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7 at (/var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d): image is in use _mark_in_use /usr/lib/python3.9/site-packages/nova/virt/libvirt/imagecache.py:279
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.968 2 DEBUG nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] 0cc56d17-ec3a-4408-bccb-91b29427379e is a valid instance name _list_backing_images /usr/lib/python3.9/site-packages/nova/virt/libvirt/imagecache.py:126
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.969 2 DEBUG nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] 8422017b-c868-4ba2-ab1f-61d3668ca145 is a valid instance name _list_backing_images /usr/lib/python3.9/site-packages/nova/virt/libvirt/imagecache.py:126
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.970 2 WARNING nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Unknown base file: /var/lib/nova/instances/_base/1d8c0e72e3c59f2e2987fd026cc0e3a116837b53
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.970 2 WARNING nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Unknown base file: /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.971 2 INFO nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Active base files: /var/lib/nova/instances/_base/61847d5d7446819c58bff23b092765d610117849 /var/lib/nova/instances/_base/c2a4b3f256e07592b38b9a83d173b78feaa2ba6d
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.971 2 INFO nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Removable base files: /var/lib/nova/instances/_base/1d8c0e72e3c59f2e2987fd026cc0e3a116837b53 /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.972 2 INFO nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Base, swap or ephemeral file too young to remove: /var/lib/nova/instances/_base/1d8c0e72e3c59f2e2987fd026cc0e3a116837b53
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.972 2 INFO nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Base, swap or ephemeral file too young to remove: /var/lib/nova/instances/_base/b7b5d6d3ee08acc22fdf9b7f2e268903bf2ce21d
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.973 2 DEBUG nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Verification complete _age_and_verify_cached_images /usr/lib/python3.9/site-packages/nova/virt/libvirt/imagecache.py:350
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.973 2 DEBUG nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Verify swap images _age_and_verify_swap_images /usr/lib/python3.9/site-packages/nova/virt/libvirt/imagecache.py:299
Oct 11 02:45:36 compute-0 nova_compute[356901]: 2025-10-11 02:45:36.974 2 DEBUG nova.virt.libvirt.imagecache [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Verify ephemeral images _age_and_verify_ephemeral_images /usr/lib/python3.9/site-packages/nova/virt/libvirt/imagecache.py:284
Oct 11 02:45:37 compute-0 nova_compute[356901]: 2025-10-11 02:45:37.328 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:38 compute-0 ceph-mon[191930]: pgmap v2041: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2042: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:40 compute-0 podman[466834]: 2025-10-11 02:45:40.198049272 +0000 UTC m=+0.097597784 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, container_name=multipathd, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=multipathd)
Oct 11 02:45:40 compute-0 podman[466835]: 2025-10-11 02:45:40.222642888 +0000 UTC m=+0.117442041 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, org.label-schema.name=CentOS Stream 9 Base Image, container_name=iscsid, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS)
Oct 11 02:45:40 compute-0 nova_compute[356901]: 2025-10-11 02:45:40.268 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:40 compute-0 ceph-mon[191930]: pgmap v2042: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2043: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:45:42 compute-0 nova_compute[356901]: 2025-10-11 02:45:42.334 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:42 compute-0 ceph-mon[191930]: pgmap v2043: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2044: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:44 compute-0 ceph-mon[191930]: pgmap v2044: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2045: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:45 compute-0 nova_compute[356901]: 2025-10-11 02:45:45.270 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:45:46 compute-0 ceph-mon[191930]: pgmap v2045: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2046: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:47 compute-0 nova_compute[356901]: 2025-10-11 02:45:47.338 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:48 compute-0 podman[466871]: 2025-10-11 02:45:48.209090806 +0000 UTC m=+0.103542877 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_managed=true)
Oct 11 02:45:48 compute-0 podman[466872]: 2025-10-11 02:45:48.217376149 +0000 UTC m=+0.106807708 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=openstack_network_exporter, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., name=ubi9-minimal, build-date=2025-08-20T13:12:41, config_id=edpm, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc., release=1755695350, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, vendor=Red Hat, Inc., io.openshift.expose-services=, distribution-scope=public, vcs-type=git, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, com.redhat.component=ubi9-minimal-container, managed_by=edpm_ansible, version=9.6, io.buildah.version=1.33.7, url=https://catalog.redhat.com/en/search?searchType=containers, io.openshift.tags=minimal rhel9, architecture=x86_64, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:45:48 compute-0 podman[466873]: 2025-10-11 02:45:48.224445757 +0000 UTC m=+0.103327375 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:45:48 compute-0 ceph-mon[191930]: pgmap v2046: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:48 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2047: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:50 compute-0 nova_compute[356901]: 2025-10-11 02:45:50.274 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:50 compute-0 ceph-mon[191930]: pgmap v2047: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:50 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2048: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:45:52 compute-0 nova_compute[356901]: 2025-10-11 02:45:52.343 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:52 compute-0 ceph-mon[191930]: pgmap v2048: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:52 compute-0 nova_compute[356901]: 2025-10-11 02:45:52.715 2 DEBUG oslo_concurrency.lockutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquiring lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:45:52 compute-0 nova_compute[356901]: 2025-10-11 02:45:52.718 2 DEBUG oslo_concurrency.lockutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f" acquired by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: waited 0.003s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:45:52 compute-0 nova_compute[356901]: 2025-10-11 02:45:52.735 2 DEBUG nova.compute.manager [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Starting instance... _do_build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2402
Oct 11 02:45:52 compute-0 nova_compute[356901]: 2025-10-11 02:45:52.820 2 DEBUG oslo_concurrency.lockutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:45:52 compute-0 nova_compute[356901]: 2025-10-11 02:45:52.822 2 DEBUG oslo_concurrency.lockutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:45:52 compute-0 nova_compute[356901]: 2025-10-11 02:45:52.833 2 DEBUG nova.virt.hardware [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Require both a host and instance NUMA topology to fit instance on host. numa_fit_instance_to_host /usr/lib/python3.9/site-packages/nova/virt/hardware.py:2368
Oct 11 02:45:52 compute-0 nova_compute[356901]: 2025-10-11 02:45:52.833 2 INFO nova.compute.claims [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Claim successful on node compute-0.ctlplane.example.com
Oct 11 02:45:52 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2049: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:52 compute-0 nova_compute[356901]: 2025-10-11 02:45:52.995 2 DEBUG oslo_concurrency.processutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:45:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:45:53 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3570884071' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.483 2 DEBUG oslo_concurrency.processutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.489s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.495 2 DEBUG nova.compute.provider_tree [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:45:53 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3570884071' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.511 2 DEBUG nova.scheduler.client.report [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.534 2 DEBUG oslo_concurrency.lockutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.instance_claim" :: held 0.712s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.536 2 DEBUG nova.compute.manager [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Start building networks asynchronously for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2799
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.583 2 DEBUG nova.compute.manager [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Allocating IP information in the background. _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1952
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.583 2 DEBUG nova.network.neutron [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] allocate_for_instance() allocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1156
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.603 2 INFO nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Ignoring supplied device name: /dev/vda. Libvirt can't honour user-supplied dev names
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.626 2 DEBUG nova.compute.manager [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Start building block device mappings for instance. _build_resources /usr/lib/python3.9/site-packages/nova/compute/manager.py:2834
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.728 2 DEBUG nova.compute.manager [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Start spawning the instance on the hypervisor. _build_and_run_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:2608
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.732 2 DEBUG nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Creating instance directory _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4723
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.733 2 INFO nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Creating image(s)
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.779 2 DEBUG nova.storage.rbd_utils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] rbd image f98d09d7-6aa0-4405-bfa0-be1f78d3911f_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.827 2 DEBUG nova.storage.rbd_utils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] rbd image f98d09d7-6aa0-4405-bfa0-be1f78d3911f_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.873 2 DEBUG nova.storage.rbd_utils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] rbd image f98d09d7-6aa0-4405-bfa0-be1f78d3911f_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.882 2 DEBUG oslo_concurrency.processutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Running cmd (subprocess): /usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/61847d5d7446819c58bff23b092765d610117849 --force-share --output=json execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.968 2 DEBUG oslo_concurrency.processutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CMD "/usr/bin/python3 -m oslo_concurrency.prlimit --as=1073741824 --cpu=30 -- env LC_ALL=C LANG=C qemu-img info /var/lib/nova/instances/_base/61847d5d7446819c58bff23b092765d610117849 --force-share --output=json" returned: 0 in 0.085s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.969 2 DEBUG oslo_concurrency.lockutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquiring lock "61847d5d7446819c58bff23b092765d610117849" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.970 2 DEBUG oslo_concurrency.lockutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "61847d5d7446819c58bff23b092765d610117849" acquired by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:45:53 compute-0 nova_compute[356901]: 2025-10-11 02:45:53.971 2 DEBUG oslo_concurrency.lockutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "61847d5d7446819c58bff23b092765d610117849" "released" by "nova.virt.libvirt.imagebackend.Image.cache.<locals>.fetch_func_sync" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:45:54 compute-0 nova_compute[356901]: 2025-10-11 02:45:54.008 2 DEBUG nova.storage.rbd_utils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] rbd image f98d09d7-6aa0-4405-bfa0-be1f78d3911f_disk does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:45:54 compute-0 nova_compute[356901]: 2025-10-11 02:45:54.021 2 DEBUG oslo_concurrency.processutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/_base/61847d5d7446819c58bff23b092765d610117849 f98d09d7-6aa0-4405-bfa0-be1f78d3911f_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:45:54 compute-0 nova_compute[356901]: 2025-10-11 02:45:54.415 2 DEBUG oslo_concurrency.processutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/_base/61847d5d7446819c58bff23b092765d610117849 f98d09d7-6aa0-4405-bfa0-be1f78d3911f_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.394s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:45:54 compute-0 nova_compute[356901]: 2025-10-11 02:45:54.468 2 DEBUG nova.policy [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Policy check for network:attach_external_network failed with credentials {'is_admin': False, 'user_id': 'f66a606299944d53a40f21e81c791d70', 'user_domain_id': 'default', 'system_scope': None, 'domain_id': None, 'project_id': 'a05bbc8f872d4dd99972d2cb8136d608', 'project_domain_id': 'default', 'roles': ['member', 'reader'], 'is_admin_project': True, 'service_user_id': None, 'service_user_domain_id': None, 'service_project_id': None, 'service_project_domain_id': None, 'service_roles': []} authorize /usr/lib/python3.9/site-packages/nova/policy.py:203
Oct 11 02:45:54 compute-0 ceph-mon[191930]: pgmap v2049: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:54 compute-0 nova_compute[356901]: 2025-10-11 02:45:54.538 2 DEBUG nova.storage.rbd_utils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] resizing rbd image f98d09d7-6aa0-4405-bfa0-be1f78d3911f_disk to 1073741824 resize /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:288
Oct 11 02:45:54 compute-0 nova_compute[356901]: 2025-10-11 02:45:54.683 2 DEBUG nova.objects.instance [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lazy-loading 'migration_context' on Instance uuid f98d09d7-6aa0-4405-bfa0-be1f78d3911f obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:45:54 compute-0 nova_compute[356901]: 2025-10-11 02:45:54.702 2 DEBUG nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Created local disks _create_image /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4857
Oct 11 02:45:54 compute-0 nova_compute[356901]: 2025-10-11 02:45:54.703 2 DEBUG nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Ensure instance console log exists: /var/lib/nova/instances/f98d09d7-6aa0-4405-bfa0-be1f78d3911f/console.log _ensure_console_log_for_instance /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4609
Oct 11 02:45:54 compute-0 nova_compute[356901]: 2025-10-11 02:45:54.704 2 DEBUG oslo_concurrency.lockutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquiring lock "vgpu_resources" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:45:54 compute-0 nova_compute[356901]: 2025-10-11 02:45:54.705 2 DEBUG oslo_concurrency.lockutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "vgpu_resources" acquired by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:45:54 compute-0 nova_compute[356901]: 2025-10-11 02:45:54.705 2 DEBUG oslo_concurrency.lockutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "vgpu_resources" "released" by "nova.virt.libvirt.driver.LibvirtDriver._allocate_mdevs" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:45:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:45:54.875 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:45:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:45:54.875 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:45:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:45:54.876 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:45:54 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2050: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:55 compute-0 nova_compute[356901]: 2025-10-11 02:45:55.275 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:45:56 compute-0 ceph-mon[191930]: pgmap v2050: 321 pgs: 321 active+clean; 218 MiB data, 375 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:45:56 compute-0 nova_compute[356901]: 2025-10-11 02:45:56.535 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:45:56.531 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: SbGlobalUpdateEvent(events=('update',), table='SB_Global', conditions=None, old_conditions=None), priority=20 to row=SB_Global(external_ids={}, nb_cfg=16, options={'arp_ns_explicit_output': 'true', 'mac_prefix': 'fe:55:97', 'max_tunid': '16711680', 'northd_internal_version': '24.03.7-20.33.0-76.8', 'svc_monitor_mac': 'ce:9c:4f:b4:85:9b'}, ipsec=False) old=SB_Global(nb_cfg=15) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:45:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:45:56.532 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Delaying updating chassis table for 6 seconds run /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:274
Oct 11 02:45:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:45:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:45:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:45:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:45:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:45:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:45:56 compute-0 nova_compute[356901]: 2025-10-11 02:45:56.644 2 DEBUG nova.network.neutron [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Successfully created port: 0c37c119-6647-42bb-a22f-ca741242ef30 _create_port_minimal /usr/lib/python3.9/site-packages/nova/network/neutron.py:548
Oct 11 02:45:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:45:56
Oct 11 02:45:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:45:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:45:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.log', 'cephfs.cephfs.data', '.rgw.root', 'default.rgw.control', 'images', 'vms', 'default.rgw.meta', 'cephfs.cephfs.meta', '.mgr', 'backups', 'volumes']
Oct 11 02:45:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:45:56 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2051: 321 pgs: 321 active+clean; 239 MiB data, 382 MiB used, 60 GiB / 60 GiB avail; 601 KiB/s wr, 1 op/s
Oct 11 02:45:57 compute-0 podman[467121]: 2025-10-11 02:45:57.220869827 +0000 UTC m=+0.114047908 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.tags=base rhel9, release-0.7.12=, config_id=edpm, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-type=git, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, name=ubi9, com.redhat.component=ubi9-container, io.openshift.expose-services=, maintainer=Red Hat, Inc., release=1214.1726694543, vendor=Red Hat, Inc., build-date=2024-09-18T21:23:30, summary=Provides the latest release of Red Hat Universal Base Image 9., architecture=x86_64, distribution-scope=public, container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.29.0, managed_by=edpm_ansible, version=9.4, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']})
Oct 11 02:45:57 compute-0 nova_compute[356901]: 2025-10-11 02:45:57.348 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:45:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:45:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:45:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:45:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:45:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:45:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:45:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:45:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:45:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:45:57 compute-0 nova_compute[356901]: 2025-10-11 02:45:57.641 2 DEBUG nova.network.neutron [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Successfully updated port: 0c37c119-6647-42bb-a22f-ca741242ef30 _update_port /usr/lib/python3.9/site-packages/nova/network/neutron.py:586
Oct 11 02:45:57 compute-0 nova_compute[356901]: 2025-10-11 02:45:57.657 2 DEBUG oslo_concurrency.lockutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquiring lock "refresh_cache-f98d09d7-6aa0-4405-bfa0-be1f78d3911f" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:45:57 compute-0 nova_compute[356901]: 2025-10-11 02:45:57.658 2 DEBUG oslo_concurrency.lockutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquired lock "refresh_cache-f98d09d7-6aa0-4405-bfa0-be1f78d3911f" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:45:57 compute-0 nova_compute[356901]: 2025-10-11 02:45:57.658 2 DEBUG nova.network.neutron [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Building network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2010
Oct 11 02:45:57 compute-0 nova_compute[356901]: 2025-10-11 02:45:57.739 2 DEBUG nova.compute.manager [req-947f5b29-dc92-4fd6-be24-25b18709df72 req-87ff9f60-e878-4b45-ab96-c1440700f517 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Received event network-changed-0c37c119-6647-42bb-a22f-ca741242ef30 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:45:57 compute-0 nova_compute[356901]: 2025-10-11 02:45:57.740 2 DEBUG nova.compute.manager [req-947f5b29-dc92-4fd6-be24-25b18709df72 req-87ff9f60-e878-4b45-ab96-c1440700f517 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Refreshing instance network info cache due to event network-changed-0c37c119-6647-42bb-a22f-ca741242ef30. external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11053
Oct 11 02:45:57 compute-0 nova_compute[356901]: 2025-10-11 02:45:57.740 2 DEBUG oslo_concurrency.lockutils [req-947f5b29-dc92-4fd6-be24-25b18709df72 req-87ff9f60-e878-4b45-ab96-c1440700f517 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "refresh_cache-f98d09d7-6aa0-4405-bfa0-be1f78d3911f" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:45:57 compute-0 nova_compute[356901]: 2025-10-11 02:45:57.958 2 DEBUG nova.network.neutron [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Instance cache missing network info. _get_preexisting_port_ids /usr/lib/python3.9/site-packages/nova/network/neutron.py:3323
Oct 11 02:45:58 compute-0 ceph-mon[191930]: pgmap v2051: 321 pgs: 321 active+clean; 239 MiB data, 382 MiB used, 60 GiB / 60 GiB avail; 601 KiB/s wr, 1 op/s
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.710 2 DEBUG nova.network.neutron [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Updating instance_info_cache with network_info: [{"id": "0c37c119-6647-42bb-a22f-ca741242ef30", "address": "fa:16:3e:ee:94:7e", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.2.253", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap0c37c119-66", "ovs_interfaceid": "0c37c119-6647-42bb-a22f-ca741242ef30", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.727 2 DEBUG oslo_concurrency.lockutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Releasing lock "refresh_cache-f98d09d7-6aa0-4405-bfa0-be1f78d3911f" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.727 2 DEBUG nova.compute.manager [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Instance network_info: |[{"id": "0c37c119-6647-42bb-a22f-ca741242ef30", "address": "fa:16:3e:ee:94:7e", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.2.253", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap0c37c119-66", "ovs_interfaceid": "0c37c119-6647-42bb-a22f-ca741242ef30", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}]| _allocate_network_async /usr/lib/python3.9/site-packages/nova/compute/manager.py:1967
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.728 2 DEBUG oslo_concurrency.lockutils [req-947f5b29-dc92-4fd6-be24-25b18709df72 req-87ff9f60-e878-4b45-ab96-c1440700f517 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquired lock "refresh_cache-f98d09d7-6aa0-4405-bfa0-be1f78d3911f" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.729 2 DEBUG nova.network.neutron [req-947f5b29-dc92-4fd6-be24-25b18709df72 req-87ff9f60-e878-4b45-ab96-c1440700f517 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Refreshing network info cache for port 0c37c119-6647-42bb-a22f-ca741242ef30 _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2007
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.731 2 DEBUG nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Start _get_guest_xml network_info=[{"id": "0c37c119-6647-42bb-a22f-ca741242ef30", "address": "fa:16:3e:ee:94:7e", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.2.253", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap0c37c119-66", "ovs_interfaceid": "0c37c119-6647-42bb-a22f-ca741242ef30", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] disk_info={'disk_bus': 'virtio', 'cdrom_bus': 'sata', 'mapping': {'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.config': {'bus': 'sata', 'dev': 'sda', 'type': 'cdrom'}}} image_meta=ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:42:57Z,direct_url=<?>,disk_format='qcow2',id=2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c,min_disk=0,min_ram=0,name='tempest-scenario-img--676804077',owner='a05bbc8f872d4dd99972d2cb8136d608',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:42:58Z,virtual_size=<?>,visibility=<?>) rescue=None block_device_info={'root_device_name': '/dev/vda', 'image': [{'encrypted': False, 'device_type': 'disk', 'guest_format': None, 'disk_bus': 'virtio', 'encryption_secret_uuid': None, 'encryption_options': None, 'boot_index': 0, 'device_name': '/dev/vda', 'size': 0, 'encryption_format': None, 'image_id': '2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c'}], 'ephemerals': [], 'block_device_mapping': [], 'swap': None} _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7549
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.740 2 WARNING nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.749 2 DEBUG nova.virt.libvirt.host [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V1... _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1653
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.750 2 DEBUG nova.virt.libvirt.host [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CPU controller missing on host. _has_cgroupsv1_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1663
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.755 2 DEBUG nova.virt.libvirt.host [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Searching host: 'compute-0.ctlplane.example.com' for CPU controller through CGroups V2... _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1672
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.756 2 DEBUG nova.virt.libvirt.host [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CPU controller found on host. _has_cgroupsv2_cpu_controller /usr/lib/python3.9/site-packages/nova/virt/libvirt/host.py:1679
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.757 2 DEBUG nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CPU mode 'host-model' models '' was chosen, with extra flags: '' _get_guest_cpu_model_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:5396
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.757 2 DEBUG nova.virt.hardware [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Getting desirable topologies for flavor Flavor(created_at=2025-10-11T02:38:03Z,deleted=False,deleted_at=None,description=None,disabled=False,ephemeral_gb=0,extra_specs={hw_rng:allowed='True'},flavorid='6dff30d1-85df-4e9c-9163-a20ba47bb0c7',id=3,is_public=True,memory_mb=128,name='m1.nano',projects=<?>,root_gb=1,rxtx_factor=1.0,swap=0,updated_at=None,vcpu_weight=0,vcpus=1) and image_meta ImageMeta(checksum='c8fc807773e5354afe61636071771906',container_format='bare',created_at=2025-10-11T02:42:57Z,direct_url=<?>,disk_format='qcow2',id=2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c,min_disk=0,min_ram=0,name='tempest-scenario-img--676804077',owner='a05bbc8f872d4dd99972d2cb8136d608',properties=ImageMetaProps,protected=<?>,size=21430272,status='active',tags=<?>,updated_at=2025-10-11T02:42:58Z,virtual_size=<?>,visibility=<?>), allow threads: True _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:563
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.758 2 DEBUG nova.virt.hardware [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Flavor limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:348
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.758 2 DEBUG nova.virt.hardware [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Image limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:352
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.758 2 DEBUG nova.virt.hardware [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Flavor pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:388
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.759 2 DEBUG nova.virt.hardware [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Image pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:392
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.759 2 DEBUG nova.virt.hardware [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Chose sockets=0, cores=0, threads=0; limits were sockets=65536, cores=65536, threads=65536 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:430
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.759 2 DEBUG nova.virt.hardware [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Topology preferred VirtCPUTopology(cores=0,sockets=0,threads=0), maximum VirtCPUTopology(cores=65536,sockets=65536,threads=65536) _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:569
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.760 2 DEBUG nova.virt.hardware [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Build topologies for 1 vcpu(s) 1:1:1 _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:471
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.760 2 DEBUG nova.virt.hardware [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Got 1 possible topologies _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:501
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.761 2 DEBUG nova.virt.hardware [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Possible topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:575
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.761 2 DEBUG nova.virt.hardware [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Sorted desired topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:577
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.764 2 DEBUG oslo_concurrency.processutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:45:58 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2052: 321 pgs: 321 active+clean; 264 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 17 KiB/s rd, 1.8 MiB/s wr, 27 op/s
Oct 11 02:45:58 compute-0 nova_compute[356901]: 2025-10-11 02:45:58.976 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:45:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:45:59 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2239136597' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.264 2 DEBUG oslo_concurrency.processutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.500s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.310 2 DEBUG nova.storage.rbd_utils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] rbd image f98d09d7-6aa0-4405-bfa0-be1f78d3911f_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.321 2 DEBUG oslo_concurrency.processutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Running cmd (subprocess): ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:45:59 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2239136597' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:45:59 compute-0 podman[157119]: time="2025-10-11T02:45:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:45:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:45:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:45:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json"} v 0) v1
Oct 11 02:45:59 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3493601268' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:45:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:45:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9542 "" "Go-http-client/1.1"
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.806 2 DEBUG oslo_concurrency.processutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CMD "ceph mon dump --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.485s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.813 2 DEBUG nova.virt.libvirt.vif [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:45:51Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description=None,display_name='te-0512306-asg-am4iabdjybzp-yj44h76hdzhi-bejrsw3xgi4q',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='te-0512306-asg-am4iabdjybzp-yj44h76hdzhi-bejrsw3xgi4q',id=15,image_ref='2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data=None,key_name=None,keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={metering.server_group='44c4fdb3-6cdb-42b8-903d-5a2c79f0da20'},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='a05bbc8f872d4dd99972d2cb8136d608',ramdisk_id='',reservation_id='r-15xf1e0g',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-PrometheusGabbiTest-674022988',owner_user_name='tempest-PrometheusGabbiTest-674022988-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:45:53Z,user_data='IyEvYmluL3NoCmVjaG8gJ0xvYWRpbmcgQ1BVJwpzZXQgLXYKY2F0IC9kZXYvdXJhbmRvbSA+IC9kZXYvbnVsbCAmIHNsZWVwIDMwMCA7IGtpbGwgJCEgCg==',user_id='f66a606299944d53a40f21e81c791d70',uuid=f98d09d7-6aa0-4405-bfa0-be1f78d3911f,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "0c37c119-6647-42bb-a22f-ca741242ef30", "address": "fa:16:3e:ee:94:7e", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.2.253", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap0c37c119-66", "ovs_interfaceid": "0c37c119-6647-42bb-a22f-ca741242ef30", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} virt_type=kvm get_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:563
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.814 2 DEBUG nova.network.os_vif_util [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Converting VIF {"id": "0c37c119-6647-42bb-a22f-ca741242ef30", "address": "fa:16:3e:ee:94:7e", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.2.253", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap0c37c119-66", "ovs_interfaceid": "0c37c119-6647-42bb-a22f-ca741242ef30", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.815 2 DEBUG nova.network.os_vif_util [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:ee:94:7e,bridge_name='br-int',has_traffic_filtering=True,id=0c37c119-6647-42bb-a22f-ca741242ef30,network=Network(3563b4a1-477a-44a0-b01f-7d19d49c0308),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap0c37c119-66') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.817 2 DEBUG nova.objects.instance [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lazy-loading 'pci_devices' on Instance uuid f98d09d7-6aa0-4405-bfa0-be1f78d3911f obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.850 2 DEBUG nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] End _get_guest_xml xml=<domain type="kvm">
Oct 11 02:45:59 compute-0 nova_compute[356901]:   <uuid>f98d09d7-6aa0-4405-bfa0-be1f78d3911f</uuid>
Oct 11 02:45:59 compute-0 nova_compute[356901]:   <name>instance-0000000f</name>
Oct 11 02:45:59 compute-0 nova_compute[356901]:   <memory>131072</memory>
Oct 11 02:45:59 compute-0 nova_compute[356901]:   <vcpu>1</vcpu>
Oct 11 02:45:59 compute-0 nova_compute[356901]:   <metadata>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.1">
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <nova:package version="27.5.2-0.20250829104910.6f8decf.el9"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <nova:name>te-0512306-asg-am4iabdjybzp-yj44h76hdzhi-bejrsw3xgi4q</nova:name>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <nova:creationTime>2025-10-11 02:45:58</nova:creationTime>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <nova:flavor name="m1.nano">
Oct 11 02:45:59 compute-0 nova_compute[356901]:         <nova:memory>128</nova:memory>
Oct 11 02:45:59 compute-0 nova_compute[356901]:         <nova:disk>1</nova:disk>
Oct 11 02:45:59 compute-0 nova_compute[356901]:         <nova:swap>0</nova:swap>
Oct 11 02:45:59 compute-0 nova_compute[356901]:         <nova:ephemeral>0</nova:ephemeral>
Oct 11 02:45:59 compute-0 nova_compute[356901]:         <nova:vcpus>1</nova:vcpus>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       </nova:flavor>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <nova:owner>
Oct 11 02:45:59 compute-0 nova_compute[356901]:         <nova:user uuid="f66a606299944d53a40f21e81c791d70">tempest-PrometheusGabbiTest-674022988-project-member</nova:user>
Oct 11 02:45:59 compute-0 nova_compute[356901]:         <nova:project uuid="a05bbc8f872d4dd99972d2cb8136d608">tempest-PrometheusGabbiTest-674022988</nova:project>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       </nova:owner>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <nova:root type="image" uuid="2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <nova:ports>
Oct 11 02:45:59 compute-0 nova_compute[356901]:         <nova:port uuid="0c37c119-6647-42bb-a22f-ca741242ef30">
Oct 11 02:45:59 compute-0 nova_compute[356901]:           <nova:ip type="fixed" address="10.100.2.253" ipVersion="4"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:         </nova:port>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       </nova:ports>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     </nova:instance>
Oct 11 02:45:59 compute-0 nova_compute[356901]:   </metadata>
Oct 11 02:45:59 compute-0 nova_compute[356901]:   <sysinfo type="smbios">
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <system>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <entry name="manufacturer">RDO</entry>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <entry name="product">OpenStack Compute</entry>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <entry name="version">27.5.2-0.20250829104910.6f8decf.el9</entry>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <entry name="serial">f98d09d7-6aa0-4405-bfa0-be1f78d3911f</entry>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <entry name="uuid">f98d09d7-6aa0-4405-bfa0-be1f78d3911f</entry>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <entry name="family">Virtual Machine</entry>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     </system>
Oct 11 02:45:59 compute-0 nova_compute[356901]:   </sysinfo>
Oct 11 02:45:59 compute-0 nova_compute[356901]:   <os>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <type arch="x86_64" machine="q35">hvm</type>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <boot dev="hd"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <smbios mode="sysinfo"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:   </os>
Oct 11 02:45:59 compute-0 nova_compute[356901]:   <features>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <acpi/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <apic/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <vmcoreinfo/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:   </features>
Oct 11 02:45:59 compute-0 nova_compute[356901]:   <clock offset="utc">
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <timer name="pit" tickpolicy="delay"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <timer name="rtc" tickpolicy="catchup"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <timer name="hpet" present="no"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:   </clock>
Oct 11 02:45:59 compute-0 nova_compute[356901]:   <cpu mode="host-model" match="exact">
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <topology sockets="1" cores="1" threads="1"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:   </cpu>
Oct 11 02:45:59 compute-0 nova_compute[356901]:   <devices>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <disk type="network" device="disk">
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/f98d09d7-6aa0-4405-bfa0-be1f78d3911f_disk">
Oct 11 02:45:59 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       </source>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:45:59 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <target dev="vda" bus="virtio"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <disk type="network" device="cdrom">
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <driver type="raw" cache="none"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <source protocol="rbd" name="vms/f98d09d7-6aa0-4405-bfa0-be1f78d3911f_disk.config">
Oct 11 02:45:59 compute-0 nova_compute[356901]:         <host name="192.168.122.100" port="6789"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       </source>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <auth username="openstack">
Oct 11 02:45:59 compute-0 nova_compute[356901]:         <secret type="ceph" uuid="3c7617c3-7a20-523e-a9de-20c0d6ba41da"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       </auth>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <target dev="sda" bus="sata"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     </disk>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <interface type="ethernet">
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <mac address="fa:16:3e:ee:94:7e"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <driver name="vhost" rx_queue_size="512"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <mtu size="1442"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <target dev="tap0c37c119-66"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     </interface>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <serial type="pty">
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <log file="/var/lib/nova/instances/f98d09d7-6aa0-4405-bfa0-be1f78d3911f/console.log" append="off"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     </serial>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <graphics type="vnc" autoport="yes" listen="::0"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <video>
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <model type="virtio"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     </video>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <input type="tablet" bus="usb"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <rng model="virtio">
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <backend model="random">/dev/urandom</backend>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     </rng>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="pci" model="pcie-root-port"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <controller type="usb" index="0"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     <memballoon model="virtio">
Oct 11 02:45:59 compute-0 nova_compute[356901]:       <stats period="10"/>
Oct 11 02:45:59 compute-0 nova_compute[356901]:     </memballoon>
Oct 11 02:45:59 compute-0 nova_compute[356901]:   </devices>
Oct 11 02:45:59 compute-0 nova_compute[356901]: </domain>
Oct 11 02:45:59 compute-0 nova_compute[356901]:  _get_guest_xml /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:7555
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.852 2 DEBUG nova.compute.manager [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Preparing to wait for external event network-vif-plugged-0c37c119-6647-42bb-a22f-ca741242ef30 prepare_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:283
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.852 2 DEBUG oslo_concurrency.lockutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquiring lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f-events" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.853 2 DEBUG oslo_concurrency.lockutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f-events" acquired by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.853 2 DEBUG oslo_concurrency.lockutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f-events" "released" by "nova.compute.manager.InstanceEvents.prepare_for_instance_event.<locals>._create_or_get_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.854 2 DEBUG nova.virt.libvirt.vif [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',created_at=2025-10-11T02:45:51Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=None,disable_terminate=False,display_description=None,display_name='te-0512306-asg-am4iabdjybzp-yj44h76hdzhi-bejrsw3xgi4q',ec2_ids=EC2Ids,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='te-0512306-asg-am4iabdjybzp-yj44h76hdzhi-bejrsw3xgi4q',id=15,image_ref='2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data=None,key_name=None,keypairs=KeyPairList,launch_index=0,launched_at=None,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={metering.server_group='44c4fdb3-6cdb-42b8-903d-5a2c79f0da20'},migration_context=None,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=PciDeviceList,pci_requests=InstancePCIRequests,power_state=0,progress=0,project_id='a05bbc8f872d4dd99972d2cb8136d608',ramdisk_id='',reservation_id='r-15xf1e0g',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c',image_container_format='bare',image_disk_format='qcow2',image_hw_machine_type='q35',image_min_disk='1',image_min_ram='0',network_allocated='True',owner_project_name='tempest-PrometheusGabbiTest-674022988',owner_user_name='tempest-PrometheusGabbiTest-674022988-project-member'},tags=TagList,task_state='spawning',terminated_at=None,trusted_certs=None,updated_at=2025-10-11T02:45:53Z,user_data='IyEvYmluL3NoCmVjaG8gJ0xvYWRpbmcgQ1BVJwpzZXQgLXYKY2F0IC9kZXYvdXJhbmRvbSA+IC9kZXYvbnVsbCAmIHNsZWVwIDMwMCA7IGtpbGwgJCEgCg==',user_id='f66a606299944d53a40f21e81c791d70',uuid=f98d09d7-6aa0-4405-bfa0-be1f78d3911f,vcpu_model=VirtCPUModel,vcpus=1,vm_mode=None,vm_state='building') vif={"id": "0c37c119-6647-42bb-a22f-ca741242ef30", "address": "fa:16:3e:ee:94:7e", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.2.253", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap0c37c119-66", "ovs_interfaceid": "0c37c119-6647-42bb-a22f-ca741242ef30", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} plug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:710
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.855 2 DEBUG nova.network.os_vif_util [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Converting VIF {"id": "0c37c119-6647-42bb-a22f-ca741242ef30", "address": "fa:16:3e:ee:94:7e", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.2.253", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap0c37c119-66", "ovs_interfaceid": "0c37c119-6647-42bb-a22f-ca741242ef30", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.855 2 DEBUG nova.network.os_vif_util [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Converted object VIFOpenVSwitch(active=False,address=fa:16:3e:ee:94:7e,bridge_name='br-int',has_traffic_filtering=True,id=0c37c119-6647-42bb-a22f-ca741242ef30,network=Network(3563b4a1-477a-44a0-b01f-7d19d49c0308),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap0c37c119-66') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.856 2 DEBUG os_vif [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Plugging vif VIFOpenVSwitch(active=False,address=fa:16:3e:ee:94:7e,bridge_name='br-int',has_traffic_filtering=True,id=0c37c119-6647-42bb-a22f-ca741242ef30,network=Network(3563b4a1-477a-44a0-b01f-7d19d49c0308),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap0c37c119-66') plug /usr/lib/python3.9/site-packages/os_vif/__init__.py:76
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.857 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.858 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddBridgeCommand(_result=None, name=br-int, may_exist=True, datapath_type=system) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.859 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.863 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.863 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tap0c37c119-66, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.864 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=1): DbSetCommand(_result=None, table=Interface, record=tap0c37c119-66, col_values=(('external_ids', {'iface-id': '0c37c119-6647-42bb-a22f-ca741242ef30', 'iface-status': 'active', 'attached-mac': 'fa:16:3e:ee:94:7e', 'vm-uuid': 'f98d09d7-6aa0-4405-bfa0-be1f78d3911f'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.867 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:59 compute-0 NetworkManager[44908]: <info>  [1760150759.8686] manager: (tap0c37c119-66): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/76)
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.870 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.878 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.885 2 INFO os_vif [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Successfully plugged vif VIFOpenVSwitch(active=False,address=fa:16:3e:ee:94:7e,bridge_name='br-int',has_traffic_filtering=True,id=0c37c119-6647-42bb-a22f-ca741242ef30,network=Network(3563b4a1-477a-44a0-b01f-7d19d49c0308),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap0c37c119-66')
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.951 2 DEBUG nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] No BDM found with device name vda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.951 2 DEBUG nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] No BDM found with device name sda, not building metadata. _build_disk_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12116
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.952 2 DEBUG nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] No VIF found with MAC fa:16:3e:ee:94:7e, not building metadata _build_interface_metadata /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:12092
Oct 11 02:45:59 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.952 2 INFO nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Using config drive
Oct 11 02:46:00 compute-0 nova_compute[356901]: 2025-10-11 02:45:59.999 2 DEBUG nova.storage.rbd_utils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] rbd image f98d09d7-6aa0-4405-bfa0-be1f78d3911f_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:46:00 compute-0 nova_compute[356901]: 2025-10-11 02:46:00.279 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:00 compute-0 ceph-mon[191930]: pgmap v2052: 321 pgs: 321 active+clean; 264 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 17 KiB/s rd, 1.8 MiB/s wr, 27 op/s
Oct 11 02:46:00 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3493601268' entity='client.openstack' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch
Oct 11 02:46:00 compute-0 nova_compute[356901]: 2025-10-11 02:46:00.627 2 INFO nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Creating config drive at /var/lib/nova/instances/f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.config
Oct 11 02:46:00 compute-0 nova_compute[356901]: 2025-10-11 02:46:00.640 2 DEBUG oslo_concurrency.processutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Running cmd (subprocess): /usr/bin/mkisofs -o /var/lib/nova/instances/f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmppwxrzqfs execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:46:00 compute-0 nova_compute[356901]: 2025-10-11 02:46:00.804 2 DEBUG oslo_concurrency.processutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CMD "/usr/bin/mkisofs -o /var/lib/nova/instances/f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.config -ldots -allow-lowercase -allow-multidot -l -publisher OpenStack Compute 27.5.2-0.20250829104910.6f8decf.el9 -quiet -J -r -V config-2 /tmp/tmppwxrzqfs" returned: 0 in 0.164s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:46:00 compute-0 nova_compute[356901]: 2025-10-11 02:46:00.874 2 DEBUG nova.storage.rbd_utils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] rbd image f98d09d7-6aa0-4405-bfa0-be1f78d3911f_disk.config does not exist __init__ /usr/lib/python3.9/site-packages/nova/storage/rbd_utils.py:80
Oct 11 02:46:00 compute-0 nova_compute[356901]: 2025-10-11 02:46:00.886 2 DEBUG oslo_concurrency.processutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Running cmd (subprocess): rbd import --pool vms /var/lib/nova/instances/f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.config f98d09d7-6aa0-4405-bfa0-be1f78d3911f_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:46:00 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2053: 321 pgs: 321 active+clean; 264 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 17 KiB/s rd, 1.8 MiB/s wr, 27 op/s
Oct 11 02:46:01 compute-0 nova_compute[356901]: 2025-10-11 02:46:01.205 2 DEBUG oslo_concurrency.processutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CMD "rbd import --pool vms /var/lib/nova/instances/f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.config f98d09d7-6aa0-4405-bfa0-be1f78d3911f_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.318s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:46:01 compute-0 nova_compute[356901]: 2025-10-11 02:46:01.206 2 INFO nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Deleting local config drive /var/lib/nova/instances/f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.config because it was imported into RBD.
Oct 11 02:46:01 compute-0 systemd[1]: Starting libvirt secret daemon...
Oct 11 02:46:01 compute-0 systemd[1]: Started libvirt secret daemon.
Oct 11 02:46:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:46:01 compute-0 kernel: tap0c37c119-66: entered promiscuous mode
Oct 11 02:46:01 compute-0 ovn_controller[88370]: 2025-10-11T02:46:01Z|00183|binding|INFO|Claiming lport 0c37c119-6647-42bb-a22f-ca741242ef30 for this chassis.
Oct 11 02:46:01 compute-0 ovn_controller[88370]: 2025-10-11T02:46:01Z|00184|binding|INFO|0c37c119-6647-42bb-a22f-ca741242ef30: Claiming fa:16:3e:ee:94:7e 10.100.2.253
Oct 11 02:46:01 compute-0 NetworkManager[44908]: <info>  [1760150761.3720] manager: (tap0c37c119-66): new Tun device (/org/freedesktop/NetworkManager/Devices/77)
Oct 11 02:46:01 compute-0 nova_compute[356901]: 2025-10-11 02:46:01.372 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:46:01.385 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:ee:94:7e 10.100.2.253'], port_security=['fa:16:3e:ee:94:7e 10.100.2.253'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.2.253/16', 'neutron:device_id': 'f98d09d7-6aa0-4405-bfa0-be1f78d3911f', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-3563b4a1-477a-44a0-b01f-7d19d49c0308', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': 'a05bbc8f872d4dd99972d2cb8136d608', 'neutron:revision_number': '2', 'neutron:security_group_ids': 'd961c453-0bcb-43ec-b528-5018786739ee', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=4674209d-30ab-42f4-9114-728458c302a8, chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], tunnel_key=3, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=0c37c119-6647-42bb-a22f-ca741242ef30) old=Port_Binding(chassis=[]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:46:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:46:01.388 286362 INFO neutron.agent.ovn.metadata.agent [-] Port 0c37c119-6647-42bb-a22f-ca741242ef30 in datapath 3563b4a1-477a-44a0-b01f-7d19d49c0308 bound to our chassis
Oct 11 02:46:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:46:01.395 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network 3563b4a1-477a-44a0-b01f-7d19d49c0308
Oct 11 02:46:01 compute-0 ovn_controller[88370]: 2025-10-11T02:46:01Z|00185|binding|INFO|Setting lport 0c37c119-6647-42bb-a22f-ca741242ef30 ovn-installed in OVS
Oct 11 02:46:01 compute-0 ovn_controller[88370]: 2025-10-11T02:46:01Z|00186|binding|INFO|Setting lport 0c37c119-6647-42bb-a22f-ca741242ef30 up in Southbound
Oct 11 02:46:01 compute-0 nova_compute[356901]: 2025-10-11 02:46:01.408 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:01 compute-0 openstack_network_exporter[374316]: ERROR   02:46:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:46:01 compute-0 openstack_network_exporter[374316]: ERROR   02:46:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:46:01 compute-0 openstack_network_exporter[374316]: ERROR   02:46:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:46:01 compute-0 openstack_network_exporter[374316]: ERROR   02:46:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:46:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:46:01 compute-0 openstack_network_exporter[374316]: ERROR   02:46:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:46:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:46:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:46:01.430 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[bbf57b03-5749-4164-9a45-f14f9f6c00e1]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:46:01 compute-0 systemd-machined[137586]: New machine qemu-16-instance-0000000f.
Oct 11 02:46:01 compute-0 systemd[1]: Started Virtual Machine qemu-16-instance-0000000f.
Oct 11 02:46:01 compute-0 systemd-udevd[467302]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 02:46:01 compute-0 NetworkManager[44908]: <info>  [1760150761.4807] device (tap0c37c119-66): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 02:46:01 compute-0 NetworkManager[44908]: <info>  [1760150761.4815] device (tap0c37c119-66): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Oct 11 02:46:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:46:01.503 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[d7287968-1430-4775-98dd-bfd2365a6900]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:46:01 compute-0 nova_compute[356901]: 2025-10-11 02:46:01.505 2 DEBUG nova.network.neutron [req-947f5b29-dc92-4fd6-be24-25b18709df72 req-87ff9f60-e878-4b45-ab96-c1440700f517 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Updated VIF entry in instance network info cache for port 0c37c119-6647-42bb-a22f-ca741242ef30. _build_network_info_model /usr/lib/python3.9/site-packages/nova/network/neutron.py:3482
Oct 11 02:46:01 compute-0 nova_compute[356901]: 2025-10-11 02:46:01.505 2 DEBUG nova.network.neutron [req-947f5b29-dc92-4fd6-be24-25b18709df72 req-87ff9f60-e878-4b45-ab96-c1440700f517 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Updating instance_info_cache with network_info: [{"id": "0c37c119-6647-42bb-a22f-ca741242ef30", "address": "fa:16:3e:ee:94:7e", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.2.253", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap0c37c119-66", "ovs_interfaceid": "0c37c119-6647-42bb-a22f-ca741242ef30", "qbh_params": null, "qbg_params": null, "active": false, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:46:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:46:01.510 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[e1ecc099-e3a8-4501-aa15-d30da6b60489]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:46:01 compute-0 sudo[467295]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:46:01 compute-0 sudo[467295]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:46:01 compute-0 sudo[467295]: pam_unix(sudo:session): session closed for user root
Oct 11 02:46:01 compute-0 nova_compute[356901]: 2025-10-11 02:46:01.529 2 DEBUG oslo_concurrency.lockutils [req-947f5b29-dc92-4fd6-be24-25b18709df72 req-87ff9f60-e878-4b45-ab96-c1440700f517 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Releasing lock "refresh_cache-f98d09d7-6aa0-4405-bfa0-be1f78d3911f" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:46:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:46:01.543 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[62444756-35f5-4c3d-b39d-45d80a4a36f7]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:46:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:46:01.570 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[60524b96-bb57-4d40-8421-53c020a8661e]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tap3563b4a1-41'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:25:cf:fd'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 10, 'tx_packets': 5, 'rx_bytes': 916, 'tx_bytes': 354, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 10, 'tx_packets': 5, 'rx_bytes': 916, 'tx_bytes': 354, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 47], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 694227, 'reachable_time': 22741, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 8, 'inoctets': 720, 'indelivers': 1, 'outforwdatagrams': 0, 'outpkts': 3, 'outoctets': 228, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 8, 'outmcastpkts': 3, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 720, 'outmcastoctets': 228, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 8, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 1, 'inerrors': 0, 'outmsgs': 3, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 467337, 'error': None, 'target': 'ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:46:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:46:01.593 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[f75cce9e-553a-4ef0-9511-91b8d4fba783]: (4, ({'family': 2, 'prefixlen': 32, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '169.254.169.254'], ['IFA_LOCAL', '169.254.169.254'], ['IFA_BROADCAST', '169.254.169.254'], ['IFA_LABEL', 'tap3563b4a1-41'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 694243, 'tstamp': 694243}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 467357, 'error': None, 'target': 'ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'}, {'family': 2, 'prefixlen': 16, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '10.100.0.2'], ['IFA_LOCAL', '10.100.0.2'], ['IFA_BROADCAST', '10.100.255.255'], ['IFA_LABEL', 'tap3563b4a1-41'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 694248, 'tstamp': 694248}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 467357, 'error': None, 'target': 'ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'})) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:46:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:46:01.595 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tap3563b4a1-40, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:46:01 compute-0 nova_compute[356901]: 2025-10-11 02:46:01.597 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:46:01.598 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tap3563b4a1-40, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:46:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:46:01.598 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:46:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:46:01.599 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tap3563b4a1-40, col_values=(('external_ids', {'iface-id': 'bd6ddb48-868e-41a0-8ff2-0f3a1a9b4d81'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:46:01 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:46:01.599 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:46:01 compute-0 sudo[467332]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:46:01 compute-0 sudo[467332]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:46:01 compute-0 sudo[467332]: pam_unix(sudo:session): session closed for user root
Oct 11 02:46:01 compute-0 sudo[467360]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:46:01 compute-0 sudo[467360]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:46:01 compute-0 sudo[467360]: pam_unix(sudo:session): session closed for user root
Oct 11 02:46:01 compute-0 sudo[467385]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:46:01 compute-0 sudo[467385]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:46:02 compute-0 systemd[1]: Starting libvirt proxy daemon...
Oct 11 02:46:02 compute-0 systemd[1]: Started libvirt proxy daemon.
Oct 11 02:46:02 compute-0 sudo[467385]: pam_unix(sudo:session): session closed for user root
Oct 11 02:46:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:46:02 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:46:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:46:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:46:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:46:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:46:02 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 426efce3-78ff-40f7-897f-799a7826e260 does not exist
Oct 11 02:46:02 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 5425615a-6c46-40ad-a38a-959caa094c77 does not exist
Oct 11 02:46:02 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 534c5d87-9202-4c40-8f7c-2ffb2972a5b1 does not exist
Oct 11 02:46:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:46:02 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:46:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:46:02 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:46:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:46:02 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:46:02 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:46:02.535 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '16'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:46:02 compute-0 sudo[467498]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:46:02 compute-0 sudo[467498]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:46:02 compute-0 ceph-mon[191930]: pgmap v2053: 321 pgs: 321 active+clean; 264 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 17 KiB/s rd, 1.8 MiB/s wr, 27 op/s
Oct 11 02:46:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:46:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:46:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:46:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:46:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:46:02 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:46:02 compute-0 sudo[467498]: pam_unix(sudo:session): session closed for user root
Oct 11 02:46:02 compute-0 nova_compute[356901]: 2025-10-11 02:46:02.633 2 DEBUG nova.compute.manager [req-3367169f-b01b-48d5-be88-59f7cf4b1e5e req-0f87d3b9-1067-4975-9fdb-a55fe12c8b95 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Received event network-vif-plugged-0c37c119-6647-42bb-a22f-ca741242ef30 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:46:02 compute-0 nova_compute[356901]: 2025-10-11 02:46:02.633 2 DEBUG oslo_concurrency.lockutils [req-3367169f-b01b-48d5-be88-59f7cf4b1e5e req-0f87d3b9-1067-4975-9fdb-a55fe12c8b95 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:46:02 compute-0 nova_compute[356901]: 2025-10-11 02:46:02.633 2 DEBUG oslo_concurrency.lockutils [req-3367169f-b01b-48d5-be88-59f7cf4b1e5e req-0f87d3b9-1067-4975-9fdb-a55fe12c8b95 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:46:02 compute-0 nova_compute[356901]: 2025-10-11 02:46:02.634 2 DEBUG oslo_concurrency.lockutils [req-3367169f-b01b-48d5-be88-59f7cf4b1e5e req-0f87d3b9-1067-4975-9fdb-a55fe12c8b95 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:46:02 compute-0 nova_compute[356901]: 2025-10-11 02:46:02.634 2 DEBUG nova.compute.manager [req-3367169f-b01b-48d5-be88-59f7cf4b1e5e req-0f87d3b9-1067-4975-9fdb-a55fe12c8b95 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Processing event network-vif-plugged-0c37c119-6647-42bb-a22f-ca741242ef30 _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10808
Oct 11 02:46:02 compute-0 sudo[467545]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:46:02 compute-0 sudo[467545]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:46:02 compute-0 sudo[467545]: pam_unix(sudo:session): session closed for user root
Oct 11 02:46:02 compute-0 podman[467525]: 2025-10-11 02:46:02.679958088 +0000 UTC m=+0.089382622 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_managed=true, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, config_id=ovn_metadata_agent, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:46:02 compute-0 podman[467522]: 2025-10-11 02:46:02.696535304 +0000 UTC m=+0.110402333 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:46:02 compute-0 podman[467524]: 2025-10-11 02:46:02.718190503 +0000 UTC m=+0.129442960 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, config_id=edpm, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']})
Oct 11 02:46:02 compute-0 sudo[467615]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:46:02 compute-0 podman[467523]: 2025-10-11 02:46:02.756962835 +0000 UTC m=+0.179923657 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:46:02 compute-0 sudo[467615]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:46:02 compute-0 sudo[467615]: pam_unix(sudo:session): session closed for user root
Oct 11 02:46:02 compute-0 sudo[467654]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:46:02 compute-0 sudo[467654]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:46:02 compute-0 nova_compute[356901]: 2025-10-11 02:46:02.905 2 DEBUG nova.compute.manager [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Instance event wait completed in 0 seconds for network-vif-plugged wait_for_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:577
Oct 11 02:46:02 compute-0 nova_compute[356901]: 2025-10-11 02:46:02.906 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150762.9049706, f98d09d7-6aa0-4405-bfa0-be1f78d3911f => Started> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:46:02 compute-0 nova_compute[356901]: 2025-10-11 02:46:02.906 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] VM Started (Lifecycle Event)
Oct 11 02:46:02 compute-0 nova_compute[356901]: 2025-10-11 02:46:02.925 2 DEBUG nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Guest created on hypervisor spawn /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:4417
Oct 11 02:46:02 compute-0 nova_compute[356901]: 2025-10-11 02:46:02.932 2 INFO nova.virt.libvirt.driver [-] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Instance spawned successfully.
Oct 11 02:46:02 compute-0 nova_compute[356901]: 2025-10-11 02:46:02.932 2 DEBUG nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Attempting to register defaults for the following image properties: ['hw_cdrom_bus', 'hw_disk_bus', 'hw_input_bus', 'hw_pointer_model', 'hw_video_model', 'hw_vif_model'] _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:917
Oct 11 02:46:02 compute-0 nova_compute[356901]: 2025-10-11 02:46:02.962 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:46:02 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2054: 321 pgs: 321 active+clean; 264 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 18 KiB/s rd, 1.8 MiB/s wr, 27 op/s
Oct 11 02:46:02 compute-0 nova_compute[356901]: 2025-10-11 02:46:02.979 2 DEBUG nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Found default for hw_cdrom_bus of sata _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:46:02 compute-0 nova_compute[356901]: 2025-10-11 02:46:02.980 2 DEBUG nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Found default for hw_disk_bus of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:46:02 compute-0 nova_compute[356901]: 2025-10-11 02:46:02.980 2 DEBUG nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Found default for hw_input_bus of usb _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:46:02 compute-0 nova_compute[356901]: 2025-10-11 02:46:02.981 2 DEBUG nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Found default for hw_pointer_model of usbtablet _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:46:02 compute-0 nova_compute[356901]: 2025-10-11 02:46:02.981 2 DEBUG nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Found default for hw_video_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:46:02 compute-0 nova_compute[356901]: 2025-10-11 02:46:02.982 2 DEBUG nova.virt.libvirt.driver [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Found default for hw_vif_model of virtio _register_undefined_instance_details /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:946
Oct 11 02:46:02 compute-0 nova_compute[356901]: 2025-10-11 02:46:02.991 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Synchronizing instance power state after lifecycle event "Started"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:46:03 compute-0 nova_compute[356901]: 2025-10-11 02:46:03.012 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] During sync_power_state the instance has a pending task (spawning). Skip.
Oct 11 02:46:03 compute-0 nova_compute[356901]: 2025-10-11 02:46:03.013 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150762.9125414, f98d09d7-6aa0-4405-bfa0-be1f78d3911f => Paused> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:46:03 compute-0 nova_compute[356901]: 2025-10-11 02:46:03.013 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] VM Paused (Lifecycle Event)
Oct 11 02:46:03 compute-0 nova_compute[356901]: 2025-10-11 02:46:03.036 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:46:03 compute-0 nova_compute[356901]: 2025-10-11 02:46:03.039 2 INFO nova.compute.manager [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Took 9.31 seconds to spawn the instance on the hypervisor.
Oct 11 02:46:03 compute-0 nova_compute[356901]: 2025-10-11 02:46:03.040 2 DEBUG nova.compute.manager [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:46:03 compute-0 nova_compute[356901]: 2025-10-11 02:46:03.052 2 DEBUG nova.virt.driver [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] Emitting event <LifecycleEvent: 1760150762.9126277, f98d09d7-6aa0-4405-bfa0-be1f78d3911f => Resumed> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:46:03 compute-0 nova_compute[356901]: 2025-10-11 02:46:03.053 2 INFO nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] VM Resumed (Lifecycle Event)
Oct 11 02:46:03 compute-0 nova_compute[356901]: 2025-10-11 02:46:03.071 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:46:03 compute-0 nova_compute[356901]: 2025-10-11 02:46:03.086 2 DEBUG nova.compute.manager [None req-5fc76a04-23ed-42ba-9ba0-9053dbeb5e9a - - - - - -] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Synchronizing instance power state after lifecycle event "Resumed"; current vm_state: building, current task_state: spawning, current DB power_state: 0, VM power_state: 1 handle_lifecycle_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:1396
Oct 11 02:46:03 compute-0 nova_compute[356901]: 2025-10-11 02:46:03.125 2 INFO nova.compute.manager [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Took 10.34 seconds to build instance.
Oct 11 02:46:03 compute-0 nova_compute[356901]: 2025-10-11 02:46:03.142 2 DEBUG oslo_concurrency.lockutils [None req-5aaaef8e-15b8-4212-9d33-fddfe6941c0d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f" "released" by "nova.compute.manager.ComputeManager.build_and_run_instance.<locals>._locked_do_build_and_run_instance" :: held 10.423s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:46:03 compute-0 podman[467716]: 2025-10-11 02:46:03.420521041 +0000 UTC m=+0.097726426 container create f0eaf3f9546d9c87b1759fddf6c458d1a1f1118714368e42e1eabd7db9efc6d5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_ritchie, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef)
Oct 11 02:46:03 compute-0 podman[467716]: 2025-10-11 02:46:03.380683236 +0000 UTC m=+0.057888671 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:46:03 compute-0 systemd[1]: Started libpod-conmon-f0eaf3f9546d9c87b1759fddf6c458d1a1f1118714368e42e1eabd7db9efc6d5.scope.
Oct 11 02:46:03 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:46:03 compute-0 podman[467716]: 2025-10-11 02:46:03.588204965 +0000 UTC m=+0.265410390 container init f0eaf3f9546d9c87b1759fddf6c458d1a1f1118714368e42e1eabd7db9efc6d5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_ritchie, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef)
Oct 11 02:46:03 compute-0 podman[467716]: 2025-10-11 02:46:03.604033291 +0000 UTC m=+0.281238716 container start f0eaf3f9546d9c87b1759fddf6c458d1a1f1118714368e42e1eabd7db9efc6d5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_ritchie, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=reef, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:46:03 compute-0 podman[467716]: 2025-10-11 02:46:03.610103277 +0000 UTC m=+0.287308722 container attach f0eaf3f9546d9c87b1759fddf6c458d1a1f1118714368e42e1eabd7db9efc6d5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_ritchie, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, io.buildah.version=1.39.3, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:46:03 compute-0 compassionate_ritchie[467731]: 167 167
Oct 11 02:46:03 compute-0 systemd[1]: libpod-f0eaf3f9546d9c87b1759fddf6c458d1a1f1118714368e42e1eabd7db9efc6d5.scope: Deactivated successfully.
Oct 11 02:46:03 compute-0 podman[467736]: 2025-10-11 02:46:03.68436045 +0000 UTC m=+0.047818456 container died f0eaf3f9546d9c87b1759fddf6c458d1a1f1118714368e42e1eabd7db9efc6d5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_ritchie, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:46:03 compute-0 systemd[1]: var-lib-containers-storage-overlay-5a119813da5bf8fa84339ce232edfcc837903a31586e971bafadb0b92417e2e8-merged.mount: Deactivated successfully.
Oct 11 02:46:03 compute-0 podman[467736]: 2025-10-11 02:46:03.77044106 +0000 UTC m=+0.133899056 container remove f0eaf3f9546d9c87b1759fddf6c458d1a1f1118714368e42e1eabd7db9efc6d5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_ritchie, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:46:03 compute-0 systemd[1]: libpod-conmon-f0eaf3f9546d9c87b1759fddf6c458d1a1f1118714368e42e1eabd7db9efc6d5.scope: Deactivated successfully.
Oct 11 02:46:04 compute-0 podman[467758]: 2025-10-11 02:46:04.08591781 +0000 UTC m=+0.094724239 container create 29e77e16558ba99f50425b65c41995224d041c54dd681ab6805de83e7ac2aa15 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=interesting_easley, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:46:04 compute-0 systemd[1]: Started libpod-conmon-29e77e16558ba99f50425b65c41995224d041c54dd681ab6805de83e7ac2aa15.scope.
Oct 11 02:46:04 compute-0 podman[467758]: 2025-10-11 02:46:04.055687464 +0000 UTC m=+0.064493903 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:46:04 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:46:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dd9b2f05537e758b0f726c91072026302df3bad8ca969940b9b5f9fecc9f45d8/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:46:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dd9b2f05537e758b0f726c91072026302df3bad8ca969940b9b5f9fecc9f45d8/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:46:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dd9b2f05537e758b0f726c91072026302df3bad8ca969940b9b5f9fecc9f45d8/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:46:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dd9b2f05537e758b0f726c91072026302df3bad8ca969940b9b5f9fecc9f45d8/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:46:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/dd9b2f05537e758b0f726c91072026302df3bad8ca969940b9b5f9fecc9f45d8/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:46:04 compute-0 podman[467758]: 2025-10-11 02:46:04.242989261 +0000 UTC m=+0.251795690 container init 29e77e16558ba99f50425b65c41995224d041c54dd681ab6805de83e7ac2aa15 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=interesting_easley, io.buildah.version=1.39.3, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:46:04 compute-0 podman[467758]: 2025-10-11 02:46:04.255609238 +0000 UTC m=+0.264415657 container start 29e77e16558ba99f50425b65c41995224d041c54dd681ab6805de83e7ac2aa15 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=interesting_easley, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507)
Oct 11 02:46:04 compute-0 podman[467758]: 2025-10-11 02:46:04.260876844 +0000 UTC m=+0.269683253 container attach 29e77e16558ba99f50425b65c41995224d041c54dd681ab6805de83e7ac2aa15 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=interesting_easley, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 02:46:04 compute-0 ceph-mon[191930]: pgmap v2054: 321 pgs: 321 active+clean; 264 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 18 KiB/s rd, 1.8 MiB/s wr, 27 op/s
Oct 11 02:46:04 compute-0 nova_compute[356901]: 2025-10-11 02:46:04.868 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:04 compute-0 nova_compute[356901]: 2025-10-11 02:46:04.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:46:04 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2055: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 24 KiB/s rd, 1.8 MiB/s wr, 35 op/s
Oct 11 02:46:05 compute-0 nova_compute[356901]: 2025-10-11 02:46:05.282 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:05 compute-0 interesting_easley[467774]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:46:05 compute-0 interesting_easley[467774]: --> relative data size: 1.0
Oct 11 02:46:05 compute-0 interesting_easley[467774]: --> All data devices are unavailable
Oct 11 02:46:05 compute-0 systemd[1]: libpod-29e77e16558ba99f50425b65c41995224d041c54dd681ab6805de83e7ac2aa15.scope: Deactivated successfully.
Oct 11 02:46:05 compute-0 systemd[1]: libpod-29e77e16558ba99f50425b65c41995224d041c54dd681ab6805de83e7ac2aa15.scope: Consumed 1.139s CPU time.
Oct 11 02:46:05 compute-0 podman[467758]: 2025-10-11 02:46:05.473099375 +0000 UTC m=+1.481905794 container died 29e77e16558ba99f50425b65c41995224d041c54dd681ab6805de83e7ac2aa15 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=interesting_easley, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:46:05 compute-0 systemd[1]: var-lib-containers-storage-overlay-dd9b2f05537e758b0f726c91072026302df3bad8ca969940b9b5f9fecc9f45d8-merged.mount: Deactivated successfully.
Oct 11 02:46:05 compute-0 podman[467758]: 2025-10-11 02:46:05.560644554 +0000 UTC m=+1.569450963 container remove 29e77e16558ba99f50425b65c41995224d041c54dd681ab6805de83e7ac2aa15 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=interesting_easley, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:46:05 compute-0 nova_compute[356901]: 2025-10-11 02:46:05.564 2 DEBUG nova.compute.manager [req-a3a4e41f-e7bf-4804-9e22-94ed8db6431a req-6cc3f5ef-cfb3-4107-b748-9a3e59e121b0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Received event network-vif-plugged-0c37c119-6647-42bb-a22f-ca741242ef30 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:46:05 compute-0 nova_compute[356901]: 2025-10-11 02:46:05.565 2 DEBUG oslo_concurrency.lockutils [req-a3a4e41f-e7bf-4804-9e22-94ed8db6431a req-6cc3f5ef-cfb3-4107-b748-9a3e59e121b0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:46:05 compute-0 nova_compute[356901]: 2025-10-11 02:46:05.566 2 DEBUG oslo_concurrency.lockutils [req-a3a4e41f-e7bf-4804-9e22-94ed8db6431a req-6cc3f5ef-cfb3-4107-b748-9a3e59e121b0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:46:05 compute-0 nova_compute[356901]: 2025-10-11 02:46:05.566 2 DEBUG oslo_concurrency.lockutils [req-a3a4e41f-e7bf-4804-9e22-94ed8db6431a req-6cc3f5ef-cfb3-4107-b748-9a3e59e121b0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:46:05 compute-0 nova_compute[356901]: 2025-10-11 02:46:05.566 2 DEBUG nova.compute.manager [req-a3a4e41f-e7bf-4804-9e22-94ed8db6431a req-6cc3f5ef-cfb3-4107-b748-9a3e59e121b0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] No waiting events found dispatching network-vif-plugged-0c37c119-6647-42bb-a22f-ca741242ef30 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:46:05 compute-0 nova_compute[356901]: 2025-10-11 02:46:05.567 2 WARNING nova.compute.manager [req-a3a4e41f-e7bf-4804-9e22-94ed8db6431a req-6cc3f5ef-cfb3-4107-b748-9a3e59e121b0 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Received unexpected event network-vif-plugged-0c37c119-6647-42bb-a22f-ca741242ef30 for instance with vm_state active and task_state None.
Oct 11 02:46:05 compute-0 systemd[1]: libpod-conmon-29e77e16558ba99f50425b65c41995224d041c54dd681ab6805de83e7ac2aa15.scope: Deactivated successfully.
Oct 11 02:46:05 compute-0 sudo[467654]: pam_unix(sudo:session): session closed for user root
Oct 11 02:46:05 compute-0 sudo[467815]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:46:05 compute-0 sudo[467815]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:46:05 compute-0 sudo[467815]: pam_unix(sudo:session): session closed for user root
Oct 11 02:46:05 compute-0 sudo[467840]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:46:05 compute-0 sudo[467840]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:46:05 compute-0 sudo[467840]: pam_unix(sudo:session): session closed for user root
Oct 11 02:46:05 compute-0 sudo[467865]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:46:05 compute-0 sudo[467865]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:46:05 compute-0 sudo[467865]: pam_unix(sudo:session): session closed for user root
Oct 11 02:46:06 compute-0 sudo[467890]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:46:06 compute-0 sudo[467890]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:46:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:46:06 compute-0 podman[467953]: 2025-10-11 02:46:06.527895565 +0000 UTC m=+0.061985491 container create a12cdbd647241ad23e54030659b832132738da92ef12c93c29218374d73a6756 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_antonelli, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, io.buildah.version=1.39.3)
Oct 11 02:46:06 compute-0 systemd[1]: Started libpod-conmon-a12cdbd647241ad23e54030659b832132738da92ef12c93c29218374d73a6756.scope.
Oct 11 02:46:06 compute-0 podman[467953]: 2025-10-11 02:46:06.505333925 +0000 UTC m=+0.039423901 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:46:06 compute-0 ceph-mon[191930]: pgmap v2055: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 24 KiB/s rd, 1.8 MiB/s wr, 35 op/s
Oct 11 02:46:06 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:46:06 compute-0 podman[467953]: 2025-10-11 02:46:06.633896992 +0000 UTC m=+0.167987008 container init a12cdbd647241ad23e54030659b832132738da92ef12c93c29218374d73a6756 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_antonelli, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3)
Oct 11 02:46:06 compute-0 podman[467953]: 2025-10-11 02:46:06.642664161 +0000 UTC m=+0.176754097 container start a12cdbd647241ad23e54030659b832132738da92ef12c93c29218374d73a6756 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_antonelli, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507)
Oct 11 02:46:06 compute-0 podman[467953]: 2025-10-11 02:46:06.648497064 +0000 UTC m=+0.182587070 container attach a12cdbd647241ad23e54030659b832132738da92ef12c93c29218374d73a6756 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_antonelli, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True)
Oct 11 02:46:06 compute-0 strange_antonelli[467969]: 167 167
Oct 11 02:46:06 compute-0 systemd[1]: libpod-a12cdbd647241ad23e54030659b832132738da92ef12c93c29218374d73a6756.scope: Deactivated successfully.
Oct 11 02:46:06 compute-0 conmon[467969]: conmon a12cdbd647241ad23e54 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-a12cdbd647241ad23e54030659b832132738da92ef12c93c29218374d73a6756.scope/container/memory.events
Oct 11 02:46:06 compute-0 podman[467953]: 2025-10-11 02:46:06.651260388 +0000 UTC m=+0.185350314 container died a12cdbd647241ad23e54030659b832132738da92ef12c93c29218374d73a6756 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_antonelli, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default)
Oct 11 02:46:06 compute-0 systemd[1]: var-lib-containers-storage-overlay-c830cda463bda31482c793ec973603e425e425ecda535dc216f6facdcfed1408-merged.mount: Deactivated successfully.
Oct 11 02:46:06 compute-0 podman[467953]: 2025-10-11 02:46:06.709478212 +0000 UTC m=+0.243568158 container remove a12cdbd647241ad23e54030659b832132738da92ef12c93c29218374d73a6756 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_antonelli, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:46:06 compute-0 systemd[1]: libpod-conmon-a12cdbd647241ad23e54030659b832132738da92ef12c93c29218374d73a6756.scope: Deactivated successfully.
Oct 11 02:46:06 compute-0 podman[467992]: 2025-10-11 02:46:06.952280789 +0000 UTC m=+0.056519833 container create 52e74b3f8a7b17439220208b9d213bc32d6f4de67d3b2f9d1932e0e33d856f7c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_agnesi, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:46:06 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2056: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 474 KiB/s rd, 1.8 MiB/s wr, 51 op/s
Oct 11 02:46:07 compute-0 podman[467992]: 2025-10-11 02:46:06.925488126 +0000 UTC m=+0.029727210 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:46:07 compute-0 systemd[1]: Started libpod-conmon-52e74b3f8a7b17439220208b9d213bc32d6f4de67d3b2f9d1932e0e33d856f7c.scope.
Oct 11 02:46:07 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:46:07 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0a44977c5655a462e966fbd231ae335b340971a52fb23cd6d4d8401532e6aa26/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:46:07 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0a44977c5655a462e966fbd231ae335b340971a52fb23cd6d4d8401532e6aa26/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:46:07 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0a44977c5655a462e966fbd231ae335b340971a52fb23cd6d4d8401532e6aa26/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:46:07 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0a44977c5655a462e966fbd231ae335b340971a52fb23cd6d4d8401532e6aa26/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:46:07 compute-0 podman[467992]: 2025-10-11 02:46:07.154769456 +0000 UTC m=+0.259008570 container init 52e74b3f8a7b17439220208b9d213bc32d6f4de67d3b2f9d1932e0e33d856f7c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_agnesi, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2)
Oct 11 02:46:07 compute-0 podman[467992]: 2025-10-11 02:46:07.172972082 +0000 UTC m=+0.277211126 container start 52e74b3f8a7b17439220208b9d213bc32d6f4de67d3b2f9d1932e0e33d856f7c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_agnesi, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:46:07 compute-0 podman[467992]: 2025-10-11 02:46:07.189757431 +0000 UTC m=+0.293996505 container attach 52e74b3f8a7b17439220208b9d213bc32d6f4de67d3b2f9d1932e0e33d856f7c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_agnesi, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.license=GPLv2)
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0016576189807579605 of space, bias 1.0, pg target 0.49728569422738816 quantized to 32 (current 32)
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00125203744627857 of space, bias 1.0, pg target 0.375611233883571 quantized to 32 (current 32)
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:46:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]: {
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:     "0": [
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:         {
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "devices": [
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "/dev/loop3"
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             ],
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "lv_name": "ceph_lv0",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "lv_size": "21470642176",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "name": "ceph_lv0",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "tags": {
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.cluster_name": "ceph",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.crush_device_class": "",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.encrypted": "0",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.osd_id": "0",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.type": "block",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.vdo": "0"
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             },
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "type": "block",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "vg_name": "ceph_vg0"
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:         }
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:     ],
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:     "1": [
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:         {
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "devices": [
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "/dev/loop4"
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             ],
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "lv_name": "ceph_lv1",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "lv_size": "21470642176",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "name": "ceph_lv1",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "tags": {
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.cluster_name": "ceph",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.crush_device_class": "",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.encrypted": "0",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.osd_id": "1",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.type": "block",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.vdo": "0"
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             },
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "type": "block",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "vg_name": "ceph_vg1"
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:         }
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:     ],
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:     "2": [
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:         {
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "devices": [
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "/dev/loop5"
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             ],
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "lv_name": "ceph_lv2",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "lv_size": "21470642176",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "name": "ceph_lv2",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "tags": {
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.cluster_name": "ceph",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.crush_device_class": "",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.encrypted": "0",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.osd_id": "2",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.type": "block",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:                 "ceph.vdo": "0"
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             },
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "type": "block",
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:             "vg_name": "ceph_vg2"
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:         }
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]:     ]
Oct 11 02:46:08 compute-0 nervous_agnesi[468007]: }
Oct 11 02:46:08 compute-0 systemd[1]: libpod-52e74b3f8a7b17439220208b9d213bc32d6f4de67d3b2f9d1932e0e33d856f7c.scope: Deactivated successfully.
Oct 11 02:46:08 compute-0 conmon[468007]: conmon 52e74b3f8a7b17439220 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-52e74b3f8a7b17439220208b9d213bc32d6f4de67d3b2f9d1932e0e33d856f7c.scope/container/memory.events
Oct 11 02:46:08 compute-0 podman[467992]: 2025-10-11 02:46:08.077076527 +0000 UTC m=+1.181315581 container died 52e74b3f8a7b17439220208b9d213bc32d6f4de67d3b2f9d1932e0e33d856f7c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_agnesi, io.buildah.version=1.39.3, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, OSD_FLAVOR=default)
Oct 11 02:46:08 compute-0 systemd[1]: var-lib-containers-storage-overlay-0a44977c5655a462e966fbd231ae335b340971a52fb23cd6d4d8401532e6aa26-merged.mount: Deactivated successfully.
Oct 11 02:46:08 compute-0 podman[467992]: 2025-10-11 02:46:08.15945241 +0000 UTC m=+1.263691454 container remove 52e74b3f8a7b17439220208b9d213bc32d6f4de67d3b2f9d1932e0e33d856f7c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_agnesi, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:46:08 compute-0 systemd[1]: libpod-conmon-52e74b3f8a7b17439220208b9d213bc32d6f4de67d3b2f9d1932e0e33d856f7c.scope: Deactivated successfully.
Oct 11 02:46:08 compute-0 sudo[467890]: pam_unix(sudo:session): session closed for user root
Oct 11 02:46:08 compute-0 sudo[468029]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:46:08 compute-0 sudo[468029]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:46:08 compute-0 sudo[468029]: pam_unix(sudo:session): session closed for user root
Oct 11 02:46:08 compute-0 sudo[468054]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:46:08 compute-0 sudo[468054]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:46:08 compute-0 sudo[468054]: pam_unix(sudo:session): session closed for user root
Oct 11 02:46:08 compute-0 sudo[468079]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:46:08 compute-0 sudo[468079]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:46:08 compute-0 sudo[468079]: pam_unix(sudo:session): session closed for user root
Oct 11 02:46:08 compute-0 sudo[468104]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:46:08 compute-0 sudo[468104]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:46:08 compute-0 ceph-mon[191930]: pgmap v2056: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 474 KiB/s rd, 1.8 MiB/s wr, 51 op/s
Oct 11 02:46:08 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2057: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 1.8 MiB/s rd, 1.2 MiB/s wr, 95 op/s
Oct 11 02:46:09 compute-0 podman[468165]: 2025-10-11 02:46:09.071714808 +0000 UTC m=+0.060196779 container create e928648813b820a22087e95152c4571aa568904d31fc685c1f03a9ed99a43a31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=serene_lederberg, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:46:09 compute-0 systemd[1]: Started libpod-conmon-e928648813b820a22087e95152c4571aa568904d31fc685c1f03a9ed99a43a31.scope.
Oct 11 02:46:09 compute-0 podman[468165]: 2025-10-11 02:46:09.048734622 +0000 UTC m=+0.037216643 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:46:09 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:46:09 compute-0 podman[468165]: 2025-10-11 02:46:09.17494455 +0000 UTC m=+0.163426551 container init e928648813b820a22087e95152c4571aa568904d31fc685c1f03a9ed99a43a31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=serene_lederberg, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:46:09 compute-0 podman[468165]: 2025-10-11 02:46:09.185609603 +0000 UTC m=+0.174091584 container start e928648813b820a22087e95152c4571aa568904d31fc685c1f03a9ed99a43a31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=serene_lederberg, io.buildah.version=1.39.3, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:46:09 compute-0 podman[468165]: 2025-10-11 02:46:09.189599523 +0000 UTC m=+0.178081524 container attach e928648813b820a22087e95152c4571aa568904d31fc685c1f03a9ed99a43a31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=serene_lederberg, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:46:09 compute-0 serene_lederberg[468181]: 167 167
Oct 11 02:46:09 compute-0 systemd[1]: libpod-e928648813b820a22087e95152c4571aa568904d31fc685c1f03a9ed99a43a31.scope: Deactivated successfully.
Oct 11 02:46:09 compute-0 conmon[468181]: conmon e928648813b820a22087 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-e928648813b820a22087e95152c4571aa568904d31fc685c1f03a9ed99a43a31.scope/container/memory.events
Oct 11 02:46:09 compute-0 podman[468165]: 2025-10-11 02:46:09.198449782 +0000 UTC m=+0.186931763 container died e928648813b820a22087e95152c4571aa568904d31fc685c1f03a9ed99a43a31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=serene_lederberg, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, ceph=True, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2)
Oct 11 02:46:09 compute-0 systemd[1]: var-lib-containers-storage-overlay-f4fd1807864741665eac2aaaa0a97e69f003bc628d6f1395fc0fed14ae812203-merged.mount: Deactivated successfully.
Oct 11 02:46:09 compute-0 podman[468165]: 2025-10-11 02:46:09.267035115 +0000 UTC m=+0.255517106 container remove e928648813b820a22087e95152c4571aa568904d31fc685c1f03a9ed99a43a31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=serene_lederberg, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef)
Oct 11 02:46:09 compute-0 systemd[1]: libpod-conmon-e928648813b820a22087e95152c4571aa568904d31fc685c1f03a9ed99a43a31.scope: Deactivated successfully.
Oct 11 02:46:09 compute-0 podman[468205]: 2025-10-11 02:46:09.541315814 +0000 UTC m=+0.073740328 container create 11a26ec4ab0ecc8428d833bbf8ef1d04dbd276a9fb77c77e3c4688850ba35411 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_proskuriakova, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3)
Oct 11 02:46:09 compute-0 podman[468205]: 2025-10-11 02:46:09.517500148 +0000 UTC m=+0.049924682 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:46:09 compute-0 systemd[1]: Started libpod-conmon-11a26ec4ab0ecc8428d833bbf8ef1d04dbd276a9fb77c77e3c4688850ba35411.scope.
Oct 11 02:46:09 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:46:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cc875c5a2b40526e1bc396314b7cbad61d2057eb71efee8b2ea2f5cc4980ef6d/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:46:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cc875c5a2b40526e1bc396314b7cbad61d2057eb71efee8b2ea2f5cc4980ef6d/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:46:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cc875c5a2b40526e1bc396314b7cbad61d2057eb71efee8b2ea2f5cc4980ef6d/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:46:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cc875c5a2b40526e1bc396314b7cbad61d2057eb71efee8b2ea2f5cc4980ef6d/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:46:09 compute-0 podman[468205]: 2025-10-11 02:46:09.697741568 +0000 UTC m=+0.230166122 container init 11a26ec4ab0ecc8428d833bbf8ef1d04dbd276a9fb77c77e3c4688850ba35411 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_proskuriakova, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS)
Oct 11 02:46:09 compute-0 podman[468205]: 2025-10-11 02:46:09.722709498 +0000 UTC m=+0.255134012 container start 11a26ec4ab0ecc8428d833bbf8ef1d04dbd276a9fb77c77e3c4688850ba35411 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_proskuriakova, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2)
Oct 11 02:46:09 compute-0 podman[468205]: 2025-10-11 02:46:09.73175478 +0000 UTC m=+0.264179374 container attach 11a26ec4ab0ecc8428d833bbf8ef1d04dbd276a9fb77c77e3c4688850ba35411 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_proskuriakova, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:46:09 compute-0 nova_compute[356901]: 2025-10-11 02:46:09.874 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:10 compute-0 nova_compute[356901]: 2025-10-11 02:46:10.285 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:10 compute-0 ceph-mon[191930]: pgmap v2057: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 1.8 MiB/s rd, 1.2 MiB/s wr, 95 op/s
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]: {
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:         "osd_id": 1,
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:         "type": "bluestore"
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:     },
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:         "osd_id": 2,
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:         "type": "bluestore"
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:     },
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:         "osd_id": 0,
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:         "type": "bluestore"
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]:     }
Oct 11 02:46:10 compute-0 sharp_proskuriakova[468220]: }
Oct 11 02:46:10 compute-0 systemd[1]: libpod-11a26ec4ab0ecc8428d833bbf8ef1d04dbd276a9fb77c77e3c4688850ba35411.scope: Deactivated successfully.
Oct 11 02:46:10 compute-0 podman[468205]: 2025-10-11 02:46:10.876158012 +0000 UTC m=+1.408582546 container died 11a26ec4ab0ecc8428d833bbf8ef1d04dbd276a9fb77c77e3c4688850ba35411 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_proskuriakova, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef)
Oct 11 02:46:10 compute-0 systemd[1]: libpod-11a26ec4ab0ecc8428d833bbf8ef1d04dbd276a9fb77c77e3c4688850ba35411.scope: Consumed 1.144s CPU time.
Oct 11 02:46:10 compute-0 systemd[1]: var-lib-containers-storage-overlay-cc875c5a2b40526e1bc396314b7cbad61d2057eb71efee8b2ea2f5cc4980ef6d-merged.mount: Deactivated successfully.
Oct 11 02:46:10 compute-0 podman[468205]: 2025-10-11 02:46:10.958303393 +0000 UTC m=+1.490727907 container remove 11a26ec4ab0ecc8428d833bbf8ef1d04dbd276a9fb77c77e3c4688850ba35411 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_proskuriakova, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 02:46:10 compute-0 systemd[1]: libpod-conmon-11a26ec4ab0ecc8428d833bbf8ef1d04dbd276a9fb77c77e3c4688850ba35411.scope: Deactivated successfully.
Oct 11 02:46:10 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2058: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 14 KiB/s wr, 73 op/s
Oct 11 02:46:10 compute-0 sudo[468104]: pam_unix(sudo:session): session closed for user root
Oct 11 02:46:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:46:11 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:46:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:46:11 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:46:11 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 99223930-4161-4fa7-a570-732b29c48e9d does not exist
Oct 11 02:46:11 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev d2717ea9-c03f-40a5-a0cb-45be4f177f94 does not exist
Oct 11 02:46:11 compute-0 podman[468255]: 2025-10-11 02:46:11.062423837 +0000 UTC m=+0.124555899 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, container_name=multipathd, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:46:11 compute-0 podman[468263]: 2025-10-11 02:46:11.07073061 +0000 UTC m=+0.133282107 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, container_name=iscsid, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS)
Oct 11 02:46:11 compute-0 sudo[468299]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:46:11 compute-0 sudo[468299]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:46:11 compute-0 sudo[468299]: pam_unix(sudo:session): session closed for user root
Oct 11 02:46:11 compute-0 sudo[468329]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:46:11 compute-0 sudo[468329]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:46:11 compute-0 sudo[468329]: pam_unix(sudo:session): session closed for user root
Oct 11 02:46:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:46:11 compute-0 nova_compute[356901]: 2025-10-11 02:46:11.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:46:12 compute-0 ceph-mon[191930]: pgmap v2058: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 14 KiB/s wr, 73 op/s
Oct 11 02:46:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:46:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:46:12 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2059: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 14 KiB/s wr, 73 op/s
Oct 11 02:46:13 compute-0 nova_compute[356901]: 2025-10-11 02:46:13.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:46:13 compute-0 nova_compute[356901]: 2025-10-11 02:46:13.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:46:14 compute-0 ceph-mon[191930]: pgmap v2059: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 14 KiB/s wr, 73 op/s
Oct 11 02:46:14 compute-0 nova_compute[356901]: 2025-10-11 02:46:14.878 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:14 compute-0 nova_compute[356901]: 2025-10-11 02:46:14.893 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:46:14 compute-0 nova_compute[356901]: 2025-10-11 02:46:14.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:46:14 compute-0 nova_compute[356901]: 2025-10-11 02:46:14.895 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:46:14 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2060: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 14 KiB/s wr, 73 op/s
Oct 11 02:46:15 compute-0 nova_compute[356901]: 2025-10-11 02:46:15.289 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:15 compute-0 nova_compute[356901]: 2025-10-11 02:46:15.468 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-8422017b-c868-4ba2-ab1f-61d3668ca145" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:46:15 compute-0 nova_compute[356901]: 2025-10-11 02:46:15.468 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-8422017b-c868-4ba2-ab1f-61d3668ca145" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:46:15 compute-0 nova_compute[356901]: 2025-10-11 02:46:15.469 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:46:16 compute-0 ceph-mon[191930]: pgmap v2060: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 14 KiB/s wr, 73 op/s
Oct 11 02:46:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:46:16 compute-0 nova_compute[356901]: 2025-10-11 02:46:16.814 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Updating instance_info_cache with network_info: [{"id": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "address": "fa:16:3e:2c:af:96", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.3.53", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape00931c0-3d", "ovs_interfaceid": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:46:16 compute-0 nova_compute[356901]: 2025-10-11 02:46:16.838 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-8422017b-c868-4ba2-ab1f-61d3668ca145" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:46:16 compute-0 nova_compute[356901]: 2025-10-11 02:46:16.839 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:46:16 compute-0 nova_compute[356901]: 2025-10-11 02:46:16.841 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:46:16 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2061: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 255 B/s wr, 65 op/s
Oct 11 02:46:18 compute-0 ceph-mon[191930]: pgmap v2061: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 1.9 MiB/s rd, 255 B/s wr, 65 op/s
Oct 11 02:46:18 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2062: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 49 op/s
Oct 11 02:46:19 compute-0 podman[468355]: 2025-10-11 02:46:19.25928821 +0000 UTC m=+0.105399729 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vcs-type=git, architecture=x86_64, com.redhat.component=ubi9-minimal-container, managed_by=edpm_ansible, url=https://catalog.redhat.com/en/search?searchType=containers, vendor=Red Hat, Inc., io.openshift.expose-services=, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, release=1755695350, version=9.6, io.buildah.version=1.33.7, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, distribution-scope=public, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, container_name=openstack_network_exporter, name=ubi9-minimal, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9.)
Oct 11 02:46:19 compute-0 podman[468354]: 2025-10-11 02:46:19.263944563 +0000 UTC m=+0.147547796 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, config_id=edpm, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']})
Oct 11 02:46:19 compute-0 podman[468361]: 2025-10-11 02:46:19.26400584 +0000 UTC m=+0.122888333 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:46:19 compute-0 nova_compute[356901]: 2025-10-11 02:46:19.883 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:19 compute-0 nova_compute[356901]: 2025-10-11 02:46:19.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:46:19 compute-0 nova_compute[356901]: 2025-10-11 02:46:19.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:46:19 compute-0 nova_compute[356901]: 2025-10-11 02:46:19.927 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:46:19 compute-0 nova_compute[356901]: 2025-10-11 02:46:19.928 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:46:19 compute-0 nova_compute[356901]: 2025-10-11 02:46:19.928 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:46:19 compute-0 nova_compute[356901]: 2025-10-11 02:46:19.928 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:46:19 compute-0 nova_compute[356901]: 2025-10-11 02:46:19.929 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:46:20 compute-0 ceph-mon[191930]: pgmap v2062: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 1.5 MiB/s rd, 49 op/s
Oct 11 02:46:20 compute-0 nova_compute[356901]: 2025-10-11 02:46:20.289 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:46:20 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3809082672' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:46:20 compute-0 nova_compute[356901]: 2025-10-11 02:46:20.431 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.502s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:46:20 compute-0 nova_compute[356901]: 2025-10-11 02:46:20.531 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:46:20 compute-0 nova_compute[356901]: 2025-10-11 02:46:20.532 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:46:20 compute-0 nova_compute[356901]: 2025-10-11 02:46:20.541 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:46:20 compute-0 nova_compute[356901]: 2025-10-11 02:46:20.542 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:46:20 compute-0 nova_compute[356901]: 2025-10-11 02:46:20.549 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:46:20 compute-0 nova_compute[356901]: 2025-10-11 02:46:20.549 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:46:20 compute-0 nova_compute[356901]: 2025-10-11 02:46:20.550 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:46:20 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2063: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 108 KiB/s rd, 3 op/s
Oct 11 02:46:20 compute-0 nova_compute[356901]: 2025-10-11 02:46:20.992 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:46:20 compute-0 nova_compute[356901]: 2025-10-11 02:46:20.995 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3288MB free_disk=59.88884353637695GB free_vcpus=5 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:46:20 compute-0 nova_compute[356901]: 2025-10-11 02:46:20.996 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:46:20 compute-0 nova_compute[356901]: 2025-10-11 02:46:20.997 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:46:21 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3809082672' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:46:21 compute-0 nova_compute[356901]: 2025-10-11 02:46:21.221 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:46:21 compute-0 nova_compute[356901]: 2025-10-11 02:46:21.222 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 8422017b-c868-4ba2-ab1f-61d3668ca145 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:46:21 compute-0 nova_compute[356901]: 2025-10-11 02:46:21.223 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance f98d09d7-6aa0-4405-bfa0-be1f78d3911f actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:46:21 compute-0 nova_compute[356901]: 2025-10-11 02:46:21.224 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 3 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:46:21 compute-0 nova_compute[356901]: 2025-10-11 02:46:21.225 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1280MB phys_disk=59GB used_disk=4GB total_vcpus=8 used_vcpus=3 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:46:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:46:21 compute-0 nova_compute[356901]: 2025-10-11 02:46:21.393 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:46:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:46:21 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/460282986' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:46:21 compute-0 nova_compute[356901]: 2025-10-11 02:46:21.908 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.515s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:46:21 compute-0 nova_compute[356901]: 2025-10-11 02:46:21.924 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:46:21 compute-0 nova_compute[356901]: 2025-10-11 02:46:21.947 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:46:22 compute-0 nova_compute[356901]: 2025-10-11 02:46:22.046 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:46:22 compute-0 nova_compute[356901]: 2025-10-11 02:46:22.049 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 1.052s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:46:22 compute-0 ceph-mon[191930]: pgmap v2063: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 108 KiB/s rd, 3 op/s
Oct 11 02:46:22 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/460282986' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:46:22 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2064: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:46:24 compute-0 ceph-mon[191930]: pgmap v2064: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:46:24 compute-0 nova_compute[356901]: 2025-10-11 02:46:24.886 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:24 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2065: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:46:25 compute-0 nova_compute[356901]: 2025-10-11 02:46:25.047 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:46:25 compute-0 nova_compute[356901]: 2025-10-11 02:46:25.291 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:25 compute-0 nova_compute[356901]: 2025-10-11 02:46:25.898 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_incomplete_migrations run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:46:25 compute-0 nova_compute[356901]: 2025-10-11 02:46:25.899 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances with incomplete migration  _cleanup_incomplete_migrations /usr/lib/python3.9/site-packages/nova/compute/manager.py:11183
Oct 11 02:46:26 compute-0 ceph-mon[191930]: pgmap v2065: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:46:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:46:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:46:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:46:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:46:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:46:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:46:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:46:26 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2066: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:46:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:46:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3889661126' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:46:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:46:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3889661126' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:46:28 compute-0 podman[468459]: 2025-10-11 02:46:28.218772191 +0000 UTC m=+0.106461376 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.29.0, managed_by=edpm_ansible, version=9.4, maintainer=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, container_name=kepler, io.k8s.display-name=Red Hat Universal Base Image 9, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.expose-services=, release=1214.1726694543, distribution-scope=public, release-0.7.12=, build-date=2024-09-18T21:23:30, com.redhat.component=ubi9-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, config_id=edpm, name=ubi9, vcs-type=git, architecture=x86_64, vendor=Red Hat, Inc., summary=Provides the latest release of Red Hat Universal Base Image 9.)
Oct 11 02:46:28 compute-0 ceph-mon[191930]: pgmap v2066: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:46:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3889661126' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:46:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3889661126' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:46:28 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2067: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:46:29 compute-0 podman[157119]: time="2025-10-11T02:46:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:46:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:46:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:46:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:46:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9537 "" "Go-http-client/1.1"
Oct 11 02:46:29 compute-0 nova_compute[356901]: 2025-10-11 02:46:29.892 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:30 compute-0 ceph-mon[191930]: pgmap v2067: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:46:30 compute-0 nova_compute[356901]: 2025-10-11 02:46:30.293 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:30 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2068: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:46:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:46:31 compute-0 ovn_controller[88370]: 2025-10-11T02:46:31Z|00187|memory_trim|INFO|Detected inactivity (last active 30008 ms ago): trimming memory
Oct 11 02:46:31 compute-0 openstack_network_exporter[374316]: ERROR   02:46:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:46:31 compute-0 openstack_network_exporter[374316]: ERROR   02:46:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:46:31 compute-0 openstack_network_exporter[374316]: ERROR   02:46:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:46:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:46:31 compute-0 openstack_network_exporter[374316]: ERROR   02:46:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:46:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:46:31 compute-0 openstack_network_exporter[374316]: ERROR   02:46:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:46:32 compute-0 ceph-mon[191930]: pgmap v2068: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:46:32 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2069: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:46:33 compute-0 podman[468481]: 2025-10-11 02:46:33.21573793 +0000 UTC m=+0.107935225 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_managed=true)
Oct 11 02:46:33 compute-0 podman[468479]: 2025-10-11 02:46:33.227059459 +0000 UTC m=+0.125878344 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:46:33 compute-0 podman[468482]: 2025-10-11 02:46:33.238608646 +0000 UTC m=+0.127455656 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']})
Oct 11 02:46:33 compute-0 podman[468480]: 2025-10-11 02:46:33.283082285 +0000 UTC m=+0.183150931 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, container_name=ovn_controller)
Oct 11 02:46:34 compute-0 ceph-mon[191930]: pgmap v2069: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:46:34 compute-0 nova_compute[356901]: 2025-10-11 02:46:34.897 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:34 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2070: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:46:35 compute-0 nova_compute[356901]: 2025-10-11 02:46:35.295 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:36 compute-0 ceph-mon[191930]: pgmap v2070: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:46:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #99. Immutable memtables: 0.
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:46:36.356616) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 57] Flushing memtable with next log file: 99
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150796356658, "job": 57, "event": "flush_started", "num_memtables": 1, "num_entries": 1136, "num_deletes": 257, "total_data_size": 1685753, "memory_usage": 1709536, "flush_reason": "Manual Compaction"}
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 57] Level-0 flush table #100: started
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150796375374, "cf_name": "default", "job": 57, "event": "table_file_creation", "file_number": 100, "file_size": 1647769, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 41695, "largest_seqno": 42830, "table_properties": {"data_size": 1642271, "index_size": 2893, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1541, "raw_key_size": 11575, "raw_average_key_size": 19, "raw_value_size": 1631240, "raw_average_value_size": 2732, "num_data_blocks": 130, "num_entries": 597, "num_filter_entries": 597, "num_deletions": 257, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760150688, "oldest_key_time": 1760150688, "file_creation_time": 1760150796, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 100, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 57] Flush lasted 18850 microseconds, and 10637 cpu microseconds.
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:46:36.375462) [db/flush_job.cc:967] [default] [JOB 57] Level-0 flush table #100: 1647769 bytes OK
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:46:36.375492) [db/memtable_list.cc:519] [default] Level-0 commit table #100 started
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:46:36.378041) [db/memtable_list.cc:722] [default] Level-0 commit table #100: memtable #1 done
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:46:36.378063) EVENT_LOG_v1 {"time_micros": 1760150796378056, "job": 57, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:46:36.378089) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 57] Try to delete WAL files size 1680472, prev total WAL file size 1680472, number of live WAL files 2.
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000096.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:46:36.379526) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '6C6F676D0031353131' seq:72057594037927935, type:22 .. '6C6F676D0031373634' seq:0, type:0; will stop at (end)
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 58] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 57 Base level 0, inputs: [100(1609KB)], [98(7587KB)]
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150796379568, "job": 58, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [100], "files_L6": [98], "score": -1, "input_data_size": 9417112, "oldest_snapshot_seqno": -1}
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 58] Generated table #101: 5829 keys, 9312898 bytes, temperature: kUnknown
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150796443195, "cf_name": "default", "job": 58, "event": "table_file_creation", "file_number": 101, "file_size": 9312898, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 9273718, "index_size": 23518, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 14597, "raw_key_size": 151556, "raw_average_key_size": 26, "raw_value_size": 9168020, "raw_average_value_size": 1572, "num_data_blocks": 943, "num_entries": 5829, "num_filter_entries": 5829, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760150796, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 101, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:46:36.443638) [db/compaction/compaction_job.cc:1663] [default] [JOB 58] Compacted 1@0 + 1@6 files to L6 => 9312898 bytes
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:46:36.445554) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 147.6 rd, 145.9 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(1.6, 7.4 +0.0 blob) out(8.9 +0.0 blob), read-write-amplify(11.4) write-amplify(5.7) OK, records in: 6355, records dropped: 526 output_compression: NoCompression
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:46:36.445577) EVENT_LOG_v1 {"time_micros": 1760150796445565, "job": 58, "event": "compaction_finished", "compaction_time_micros": 63809, "compaction_time_cpu_micros": 38857, "output_level": 6, "num_output_files": 1, "total_output_size": 9312898, "num_input_records": 6355, "num_output_records": 5829, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000100.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150796446098, "job": 58, "event": "table_file_deletion", "file_number": 100}
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000098.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150796448170, "job": 58, "event": "table_file_deletion", "file_number": 98}
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:46:36.379359) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:46:36.448580) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:46:36.448586) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:46:36.448588) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:46:36.448590) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:46:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:46:36.448592) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:46:36 compute-0 nova_compute[356901]: 2025-10-11 02:46:36.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_expired_console_auth_tokens run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:46:36 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2071: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 3.3 KiB/s rd, 0 op/s
Oct 11 02:46:37 compute-0 ovn_controller[88370]: 2025-10-11T02:46:37Z|00025|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:ee:94:7e 10.100.2.253
Oct 11 02:46:37 compute-0 ovn_controller[88370]: 2025-10-11T02:46:37Z|00026|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:ee:94:7e 10.100.2.253
Oct 11 02:46:38 compute-0 ceph-mon[191930]: pgmap v2071: 321 pgs: 321 active+clean; 265 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 3.3 KiB/s rd, 0 op/s
Oct 11 02:46:38 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2072: 321 pgs: 321 active+clean; 281 MiB data, 418 MiB used, 60 GiB / 60 GiB avail; 116 KiB/s rd, 1.4 MiB/s wr, 29 op/s
Oct 11 02:46:39 compute-0 nova_compute[356901]: 2025-10-11 02:46:39.902 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:40 compute-0 nova_compute[356901]: 2025-10-11 02:46:40.298 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:40 compute-0 ceph-mon[191930]: pgmap v2072: 321 pgs: 321 active+clean; 281 MiB data, 418 MiB used, 60 GiB / 60 GiB avail; 116 KiB/s rd, 1.4 MiB/s wr, 29 op/s
Oct 11 02:46:40 compute-0 nova_compute[356901]: 2025-10-11 02:46:40.916 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._run_pending_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:46:40 compute-0 nova_compute[356901]: 2025-10-11 02:46:40.917 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11145
Oct 11 02:46:40 compute-0 nova_compute[356901]: 2025-10-11 02:46:40.972 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] There are 0 instances to clean _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11154
Oct 11 02:46:40 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2073: 321 pgs: 321 active+clean; 290 MiB data, 425 MiB used, 60 GiB / 60 GiB avail; 260 KiB/s rd, 2.1 MiB/s wr, 48 op/s
Oct 11 02:46:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:46:42 compute-0 podman[468562]: 2025-10-11 02:46:42.232988077 +0000 UTC m=+0.119278157 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, container_name=multipathd, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:46:42 compute-0 podman[468563]: 2025-10-11 02:46:42.252600179 +0000 UTC m=+0.133292912 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, config_id=iscsid, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid)
Oct 11 02:46:42 compute-0 ceph-mon[191930]: pgmap v2073: 321 pgs: 321 active+clean; 290 MiB data, 425 MiB used, 60 GiB / 60 GiB avail; 260 KiB/s rd, 2.1 MiB/s wr, 48 op/s
Oct 11 02:46:42 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2074: 321 pgs: 321 active+clean; 295 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 297 KiB/s rd, 2.1 MiB/s wr, 59 op/s
Oct 11 02:46:44 compute-0 ceph-mon[191930]: pgmap v2074: 321 pgs: 321 active+clean; 295 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 297 KiB/s rd, 2.1 MiB/s wr, 59 op/s
Oct 11 02:46:44 compute-0 nova_compute[356901]: 2025-10-11 02:46:44.906 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:44 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2075: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 297 KiB/s rd, 2.1 MiB/s wr, 59 op/s
Oct 11 02:46:45 compute-0 nova_compute[356901]: 2025-10-11 02:46:45.300 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:46:46 compute-0 ceph-mon[191930]: pgmap v2075: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 297 KiB/s rd, 2.1 MiB/s wr, 59 op/s
Oct 11 02:46:46 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2076: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 297 KiB/s rd, 2.1 MiB/s wr, 59 op/s
Oct 11 02:46:48 compute-0 ceph-mon[191930]: pgmap v2076: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 297 KiB/s rd, 2.1 MiB/s wr, 59 op/s
Oct 11 02:46:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2077: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 293 KiB/s rd, 2.1 MiB/s wr, 58 op/s
Oct 11 02:46:49 compute-0 nova_compute[356901]: 2025-10-11 02:46:49.911 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:50 compute-0 podman[468600]: 2025-10-11 02:46:50.223575647 +0000 UTC m=+0.111628202 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, distribution-scope=public, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.expose-services=, name=ubi9-minimal, com.redhat.component=ubi9-minimal-container, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, container_name=openstack_network_exporter, maintainer=Red Hat, Inc., vendor=Red Hat, Inc., build-date=2025-08-20T13:12:41, managed_by=edpm_ansible, config_id=edpm, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1755695350, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, url=https://catalog.redhat.com/en/search?searchType=containers, version=9.6, io.buildah.version=1.33.7, io.openshift.tags=minimal rhel9, architecture=x86_64)
Oct 11 02:46:50 compute-0 podman[468601]: 2025-10-11 02:46:50.26378189 +0000 UTC m=+0.137764673 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:46:50 compute-0 podman[468599]: 2025-10-11 02:46:50.277979377 +0000 UTC m=+0.162469001 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.schema-version=1.0, config_id=edpm, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 02:46:50 compute-0 nova_compute[356901]: 2025-10-11 02:46:50.302 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:50 compute-0 ceph-mon[191930]: pgmap v2077: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 293 KiB/s rd, 2.1 MiB/s wr, 58 op/s
Oct 11 02:46:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2078: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 180 KiB/s rd, 709 KiB/s wr, 30 op/s
Oct 11 02:46:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:46:52 compute-0 ceph-mon[191930]: pgmap v2078: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 180 KiB/s rd, 709 KiB/s wr, 30 op/s
Oct 11 02:46:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2079: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 76 KiB/s wr, 11 op/s
Oct 11 02:46:54 compute-0 ceph-mon[191930]: pgmap v2079: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 76 KiB/s wr, 11 op/s
Oct 11 02:46:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:46:54.876 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:46:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:46:54.876 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:46:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:46:54.877 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:46:54 compute-0 nova_compute[356901]: 2025-10-11 02:46:54.915 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2080: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 8.0 KiB/s wr, 0 op/s
Oct 11 02:46:55 compute-0 nova_compute[356901]: 2025-10-11 02:46:55.306 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:46:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:46:56 compute-0 ceph-mon[191930]: pgmap v2080: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 8.0 KiB/s wr, 0 op/s
Oct 11 02:46:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:46:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:46:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:46:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:46:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:46:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:46:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:46:56
Oct 11 02:46:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:46:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:46:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.log', 'backups', '.rgw.root', 'volumes', 'cephfs.cephfs.meta', 'default.rgw.meta', 'vms', 'cephfs.cephfs.data', '.mgr', 'images', 'default.rgw.control']
Oct 11 02:46:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:46:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2081: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 6.0 KiB/s wr, 0 op/s
Oct 11 02:46:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:46:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:46:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:46:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:46:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:46:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:46:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:46:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:46:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:46:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:46:58 compute-0 ceph-mon[191930]: pgmap v2081: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 6.0 KiB/s wr, 0 op/s
Oct 11 02:46:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2082: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.1 KiB/s wr, 0 op/s
Oct 11 02:46:59 compute-0 podman[468656]: 2025-10-11 02:46:59.212139607 +0000 UTC m=+0.111739724 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, io.k8s.display-name=Red Hat Universal Base Image 9, maintainer=Red Hat, Inc., summary=Provides the latest release of Red Hat Universal Base Image 9., version=9.4, io.openshift.expose-services=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, container_name=kepler, release-0.7.12=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, config_id=edpm, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, io.buildah.version=1.29.0, release=1214.1726694543, build-date=2024-09-18T21:23:30, distribution-scope=public, com.redhat.component=ubi9-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, architecture=x86_64, vendor=Red Hat, Inc., io.openshift.tags=base rhel9, vcs-type=git)
Oct 11 02:46:59 compute-0 podman[157119]: time="2025-10-11T02:46:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:46:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:46:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:46:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:46:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9544 "" "Go-http-client/1.1"
Oct 11 02:46:59 compute-0 nova_compute[356901]: 2025-10-11 02:46:59.920 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:00 compute-0 nova_compute[356901]: 2025-10-11 02:47:00.310 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:00 compute-0 ceph-mon[191930]: pgmap v2082: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 6.1 KiB/s wr, 0 op/s
Oct 11 02:47:00 compute-0 nova_compute[356901]: 2025-10-11 02:47:00.953 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:47:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2083: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.1 KiB/s wr, 0 op/s
Oct 11 02:47:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:47:01 compute-0 openstack_network_exporter[374316]: ERROR   02:47:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:47:01 compute-0 openstack_network_exporter[374316]: ERROR   02:47:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:47:01 compute-0 openstack_network_exporter[374316]: ERROR   02:47:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:47:01 compute-0 openstack_network_exporter[374316]: ERROR   02:47:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:47:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:47:01 compute-0 openstack_network_exporter[374316]: ERROR   02:47:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:47:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:47:02 compute-0 ceph-mon[191930]: pgmap v2083: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.1 KiB/s wr, 0 op/s
Oct 11 02:47:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2084: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.1 KiB/s wr, 0 op/s
Oct 11 02:47:04 compute-0 podman[468679]: 2025-10-11 02:47:04.217084775 +0000 UTC m=+0.085429666 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.vendor=CentOS)
Oct 11 02:47:04 compute-0 podman[468674]: 2025-10-11 02:47:04.238800228 +0000 UTC m=+0.132266715 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:47:04 compute-0 podman[468683]: 2025-10-11 02:47:04.271185838 +0000 UTC m=+0.130365989 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_metadata_agent, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, io.buildah.version=1.41.3)
Oct 11 02:47:04 compute-0 podman[468675]: 2025-10-11 02:47:04.296338295 +0000 UTC m=+0.165324958 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:47:04 compute-0 ceph-mon[191930]: pgmap v2084: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.1 KiB/s wr, 0 op/s
Oct 11 02:47:04 compute-0 nova_compute[356901]: 2025-10-11 02:47:04.924 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2085: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.1 KiB/s wr, 0 op/s
Oct 11 02:47:05 compute-0 nova_compute[356901]: 2025-10-11 02:47:05.313 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:05 compute-0 nova_compute[356901]: 2025-10-11 02:47:05.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:47:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:47:06 compute-0 ceph-mon[191930]: pgmap v2085: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.1 KiB/s wr, 0 op/s
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2086: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.1 KiB/s wr, 0 op/s
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0020665708227518395 of space, bias 1.0, pg target 0.6199712468255518 quantized to 32 (current 32)
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00125203744627857 of space, bias 1.0, pg target 0.375611233883571 quantized to 32 (current 32)
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:47:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:47:08 compute-0 ceph-mon[191930]: pgmap v2086: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.1 KiB/s wr, 0 op/s
Oct 11 02:47:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2087: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.1 KiB/s wr, 0 op/s
Oct 11 02:47:09 compute-0 nova_compute[356901]: 2025-10-11 02:47:09.931 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:10 compute-0 nova_compute[356901]: 2025-10-11 02:47:10.281 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_power_states run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:47:10 compute-0 nova_compute[356901]: 2025-10-11 02:47:10.315 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:10 compute-0 nova_compute[356901]: 2025-10-11 02:47:10.324 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Triggering sync for uuid 0cc56d17-ec3a-4408-bccb-91b29427379e _sync_power_states /usr/lib/python3.9/site-packages/nova/compute/manager.py:10268
Oct 11 02:47:10 compute-0 nova_compute[356901]: 2025-10-11 02:47:10.325 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Triggering sync for uuid 8422017b-c868-4ba2-ab1f-61d3668ca145 _sync_power_states /usr/lib/python3.9/site-packages/nova/compute/manager.py:10268
Oct 11 02:47:10 compute-0 nova_compute[356901]: 2025-10-11 02:47:10.326 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Triggering sync for uuid f98d09d7-6aa0-4405-bfa0-be1f78d3911f _sync_power_states /usr/lib/python3.9/site-packages/nova/compute/manager.py:10268
Oct 11 02:47:10 compute-0 nova_compute[356901]: 2025-10-11 02:47:10.327 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "0cc56d17-ec3a-4408-bccb-91b29427379e" by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:47:10 compute-0 nova_compute[356901]: 2025-10-11 02:47:10.328 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "0cc56d17-ec3a-4408-bccb-91b29427379e" acquired by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:47:10 compute-0 nova_compute[356901]: 2025-10-11 02:47:10.329 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "8422017b-c868-4ba2-ab1f-61d3668ca145" by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:47:10 compute-0 nova_compute[356901]: 2025-10-11 02:47:10.331 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "8422017b-c868-4ba2-ab1f-61d3668ca145" acquired by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:47:10 compute-0 nova_compute[356901]: 2025-10-11 02:47:10.332 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f" by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:47:10 compute-0 nova_compute[356901]: 2025-10-11 02:47:10.332 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f" acquired by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:47:10 compute-0 nova_compute[356901]: 2025-10-11 02:47:10.372 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "8422017b-c868-4ba2-ab1f-61d3668ca145" "released" by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" :: held 0.041s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:47:10 compute-0 nova_compute[356901]: 2025-10-11 02:47:10.374 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "0cc56d17-ec3a-4408-bccb-91b29427379e" "released" by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" :: held 0.046s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:47:10 compute-0 nova_compute[356901]: 2025-10-11 02:47:10.378 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f" "released" by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" :: held 0.046s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:47:10 compute-0 ceph-mon[191930]: pgmap v2087: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 1.1 KiB/s wr, 0 op/s
Oct 11 02:47:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2088: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:47:11 compute-0 sudo[468755]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:47:11 compute-0 sudo[468755]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:11 compute-0 sudo[468755]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:47:11 compute-0 sudo[468780]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:47:11 compute-0 sudo[468780]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:11 compute-0 sudo[468780]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:11 compute-0 sudo[468805]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:47:11 compute-0 sudo[468805]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:11 compute-0 sudo[468805]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:11 compute-0 sudo[468830]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 check-host
Oct 11 02:47:11 compute-0 sudo[468830]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:11 compute-0 sudo[468830]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:47:11 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:47:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:47:11 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:47:12 compute-0 sudo[468874]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:47:12 compute-0 sudo[468874]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:12 compute-0 sudo[468874]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:12 compute-0 sudo[468899]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:47:12 compute-0 sudo[468899]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:12 compute-0 sudo[468899]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:12 compute-0 sudo[468924]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:47:12 compute-0 sudo[468924]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:12 compute-0 sudo[468924]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:12 compute-0 sudo[468949]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:47:12 compute-0 sudo[468949]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:12 compute-0 podman[468974]: 2025-10-11 02:47:12.437194681 +0000 UTC m=+0.098300734 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=iscsid, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, managed_by=edpm_ansible)
Oct 11 02:47:12 compute-0 podman[468973]: 2025-10-11 02:47:12.467682077 +0000 UTC m=+0.129257949 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_id=multipathd, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, managed_by=edpm_ansible)
Oct 11 02:47:12 compute-0 sudo[468949]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:12 compute-0 ceph-mon[191930]: pgmap v2088: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:47:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:47:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:47:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:47:12 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:47:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:47:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:47:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:47:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:47:12 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 2ac1eeb8-12f0-414b-99d1-56da018bb1a6 does not exist
Oct 11 02:47:12 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev d45af2bf-5691-4e1f-852d-867e356f5314 does not exist
Oct 11 02:47:12 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 60c59c51-39f3-49ad-a478-503b1d1bcfee does not exist
Oct 11 02:47:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:47:12 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:47:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:47:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:47:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:47:12 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:47:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2089: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 682 B/s wr, 0 op/s
Oct 11 02:47:13 compute-0 sudo[469040]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:47:13 compute-0 sudo[469040]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:13 compute-0 sudo[469040]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:13 compute-0 sudo[469065]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:47:13 compute-0 sudo[469065]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:13 compute-0 sudo[469065]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:13 compute-0 sudo[469090]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:47:13 compute-0 sudo[469090]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:13 compute-0 sudo[469090]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:13 compute-0 sudo[469115]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:47:13 compute-0 sudo[469115]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:13 compute-0 podman[469177]: 2025-10-11 02:47:13.862483428 +0000 UTC m=+0.064329404 container create cca8bc3583dbbc7387ff361c92f4deb8b6816290bc136503da2dd55cea2b4a86 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_hoover, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, ceph=True)
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.870 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.871 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.871 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.871 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc698e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.877 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '8422017b-c868-4ba2-ab1f-61d3668ca145', 'name': 'te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c', 'flavor': {'id': '6dff30d1-85df-4e9c-9163-a20ba47bb0c7', 'name': 'm1.nano', 'vcpus': 1, 'ram': 128, 'disk': 1, 'ephemeral': 0, 'swap': 0}, 'image': {'id': '2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-0000000e', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': 'a05bbc8f872d4dd99972d2cb8136d608', 'user_id': 'f66a606299944d53a40f21e81c791d70', 'hostId': 'cea8816d446065ba50379057f72b942db7e204c60c4530591bc7d0be', 'status': 'active', 'metadata': {'metering.server_group': '44c4fdb3-6cdb-42b8-903d-5a2c79f0da20'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.879 14 DEBUG ceilometer.compute.discovery [-] Querying metadata for instance f98d09d7-6aa0-4405-bfa0-be1f78d3911f from Nova API get_server /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:176
Oct 11 02:47:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:13.880 14 DEBUG novaclient.v2.client [-] REQ: curl -g -i -X GET https://nova-internal.openstack.svc:8774/v2.1/servers/f98d09d7-6aa0-4405-bfa0-be1f78d3911f -H "Accept: application/json" -H "User-Agent: python-novaclient" -H "X-Auth-Token: {SHA256}d674387017edb5d8543811c363b3a2965950a94ddf4462840fede0e79ac258e9" -H "X-OpenStack-Nova-API-Version: 2.1" _http_log_request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:572
Oct 11 02:47:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:47:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:47:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:47:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:47:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:47:13 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:47:13 compute-0 podman[469177]: 2025-10-11 02:47:13.839064594 +0000 UTC m=+0.040910580 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:47:13 compute-0 systemd[1]: Started libpod-conmon-cca8bc3583dbbc7387ff361c92f4deb8b6816290bc136503da2dd55cea2b4a86.scope.
Oct 11 02:47:13 compute-0 nova_compute[356901]: 2025-10-11 02:47:13.949 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:47:13 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:47:14 compute-0 podman[469177]: 2025-10-11 02:47:14.00814385 +0000 UTC m=+0.209989876 container init cca8bc3583dbbc7387ff361c92f4deb8b6816290bc136503da2dd55cea2b4a86 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_hoover, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:47:14 compute-0 podman[469177]: 2025-10-11 02:47:14.017149138 +0000 UTC m=+0.218995114 container start cca8bc3583dbbc7387ff361c92f4deb8b6816290bc136503da2dd55cea2b4a86 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_hoover, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 02:47:14 compute-0 podman[469177]: 2025-10-11 02:47:14.021395676 +0000 UTC m=+0.223241672 container attach cca8bc3583dbbc7387ff361c92f4deb8b6816290bc136503da2dd55cea2b4a86 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_hoover, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:47:14 compute-0 magical_hoover[469194]: 167 167
Oct 11 02:47:14 compute-0 systemd[1]: libpod-cca8bc3583dbbc7387ff361c92f4deb8b6816290bc136503da2dd55cea2b4a86.scope: Deactivated successfully.
Oct 11 02:47:14 compute-0 conmon[469194]: conmon cca8bc3583dbbc7387ff <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-cca8bc3583dbbc7387ff361c92f4deb8b6816290bc136503da2dd55cea2b4a86.scope/container/memory.events
Oct 11 02:47:14 compute-0 podman[469177]: 2025-10-11 02:47:14.027705713 +0000 UTC m=+0.229551699 container died cca8bc3583dbbc7387ff361c92f4deb8b6816290bc136503da2dd55cea2b4a86 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_hoover, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:47:14 compute-0 systemd[1]: var-lib-containers-storage-overlay-787cc027c561ff9263d89312172677bbf56b9ffd3ea79174a267fb04dc63efb1-merged.mount: Deactivated successfully.
Oct 11 02:47:14 compute-0 podman[469177]: 2025-10-11 02:47:14.092830961 +0000 UTC m=+0.294676947 container remove cca8bc3583dbbc7387ff361c92f4deb8b6816290bc136503da2dd55cea2b4a86 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_hoover, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:47:14 compute-0 systemd[1]: libpod-conmon-cca8bc3583dbbc7387ff361c92f4deb8b6816290bc136503da2dd55cea2b4a86.scope: Deactivated successfully.
Oct 11 02:47:14 compute-0 podman[469219]: 2025-10-11 02:47:14.318332054 +0000 UTC m=+0.068793857 container create 26c46cf070236e6ffc90a6087a26562a5f1fe2e371f377d51958e451eb4efd03 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_goldstine, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:47:14 compute-0 systemd[1]: Started libpod-conmon-26c46cf070236e6ffc90a6087a26562a5f1fe2e371f377d51958e451eb4efd03.scope.
Oct 11 02:47:14 compute-0 podman[469219]: 2025-10-11 02:47:14.296646952 +0000 UTC m=+0.047108765 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:47:14 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:47:14 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/be98ce5e6070768df775738eaa3abc67192c7e34a62f6fc64e129243845da176/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:47:14 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/be98ce5e6070768df775738eaa3abc67192c7e34a62f6fc64e129243845da176/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:47:14 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/be98ce5e6070768df775738eaa3abc67192c7e34a62f6fc64e129243845da176/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:47:14 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/be98ce5e6070768df775738eaa3abc67192c7e34a62f6fc64e129243845da176/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:47:14 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/be98ce5e6070768df775738eaa3abc67192c7e34a62f6fc64e129243845da176/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:47:14 compute-0 podman[469219]: 2025-10-11 02:47:14.421448247 +0000 UTC m=+0.171910070 container init 26c46cf070236e6ffc90a6087a26562a5f1fe2e371f377d51958e451eb4efd03 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_goldstine, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:47:14 compute-0 podman[469219]: 2025-10-11 02:47:14.434188923 +0000 UTC m=+0.184650726 container start 26c46cf070236e6ffc90a6087a26562a5f1fe2e371f377d51958e451eb4efd03 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_goldstine, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:47:14 compute-0 podman[469219]: 2025-10-11 02:47:14.438908411 +0000 UTC m=+0.189370214 container attach 26c46cf070236e6ffc90a6087a26562a5f1fe2e371f377d51958e451eb4efd03 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_goldstine, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.497 14 DEBUG novaclient.v2.client [-] RESP: [200] Connection: Keep-Alive Content-Length: 1832 Content-Type: application/json Date: Sat, 11 Oct 2025 02:47:13 GMT Keep-Alive: timeout=5, max=100 OpenStack-API-Version: compute 2.1 Server: Apache Vary: OpenStack-API-Version,X-OpenStack-Nova-API-Version X-OpenStack-Nova-API-Version: 2.1 x-compute-request-id: req-b0cd2ba6-62e2-40b2-9add-c16741fb44ff x-openstack-request-id: req-b0cd2ba6-62e2-40b2-9add-c16741fb44ff _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:613
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.498 14 DEBUG novaclient.v2.client [-] RESP BODY: {"server": {"id": "f98d09d7-6aa0-4405-bfa0-be1f78d3911f", "name": "te-0512306-asg-am4iabdjybzp-yj44h76hdzhi-bejrsw3xgi4q", "status": "ACTIVE", "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "user_id": "f66a606299944d53a40f21e81c791d70", "metadata": {"metering.server_group": "44c4fdb3-6cdb-42b8-903d-5a2c79f0da20"}, "hostId": "cea8816d446065ba50379057f72b942db7e204c60c4530591bc7d0be", "image": {"id": "2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/images/2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c"}]}, "flavor": {"id": "6dff30d1-85df-4e9c-9163-a20ba47bb0c7", "links": [{"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/flavors/6dff30d1-85df-4e9c-9163-a20ba47bb0c7"}]}, "created": "2025-10-11T02:45:51Z", "updated": "2025-10-11T02:46:03Z", "addresses": {"": [{"version": 4, "addr": "10.100.2.253", "OS-EXT-IPS:type": "fixed", "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:ee:94:7e"}]}, "accessIPv4": "", "accessIPv6": "", "links": [{"rel": "self", "href": "https://nova-internal.openstack.svc:8774/v2.1/servers/f98d09d7-6aa0-4405-bfa0-be1f78d3911f"}, {"rel": "bookmark", "href": "https://nova-internal.openstack.svc:8774/servers/f98d09d7-6aa0-4405-bfa0-be1f78d3911f"}], "OS-DCF:diskConfig": "MANUAL", "progress": 0, "OS-EXT-AZ:availability_zone": "nova", "config_drive": "True", "key_name": null, "OS-SRV-USG:launched_at": "2025-10-11T02:46:03.000000", "OS-SRV-USG:terminated_at": null, "security_groups": [{"name": "default"}], "OS-EXT-SRV-ATTR:host": "compute-0.ctlplane.example.com", "OS-EXT-SRV-ATTR:instance_name": "instance-0000000f", "OS-EXT-SRV-ATTR:hypervisor_hostname": "compute-0.ctlplane.example.com", "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-EXT-STS:power_state": 1, "os-extended-volumes:volumes_attached": []}} _http_log_response /usr/lib/python3.12/site-packages/keystoneauth1/session.py:648
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.498 14 DEBUG novaclient.v2.client [-] GET call to compute for https://nova-internal.openstack.svc:8774/v2.1/servers/f98d09d7-6aa0-4405-bfa0-be1f78d3911f used request id req-b0cd2ba6-62e2-40b2-9add-c16741fb44ff request /usr/lib/python3.12/site-packages/keystoneauth1/session.py:1073
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.500 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': 'f98d09d7-6aa0-4405-bfa0-be1f78d3911f', 'name': 'te-0512306-asg-am4iabdjybzp-yj44h76hdzhi-bejrsw3xgi4q', 'flavor': {'id': '6dff30d1-85df-4e9c-9163-a20ba47bb0c7', 'name': 'm1.nano', 'vcpus': 1, 'ram': 128, 'disk': 1, 'ephemeral': 0, 'swap': 0}, 'image': {'id': '2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-0000000f', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': 'a05bbc8f872d4dd99972d2cb8136d608', 'user_id': 'f66a606299944d53a40f21e81c791d70', 'hostId': 'cea8816d446065ba50379057f72b942db7e204c60c4530591bc7d0be', 'status': 'active', 'metadata': {'metering.server_group': '44c4fdb3-6cdb-42b8-903d-5a2c79f0da20'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.503 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.503 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.503 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.503 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.503 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.504 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:47:14.503891) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.509 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.bytes volume: 1820 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.514 14 DEBUG ceilometer.compute.virt.libvirt.inspector [-] No delta meter predecessor for f98d09d7-6aa0-4405-bfa0-be1f78d3911f / tap0c37c119-66 inspect_vnics /usr/lib/python3.12/site-packages/ceilometer/compute/virt/libvirt/inspector.py:143
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.514 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.bytes volume: 1646 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.518 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2856 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.519 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.519 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.519 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.519 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.519 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.519 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.519 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets volume: 16 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.520 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.packets volume: 16 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.520 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:47:14.519647) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.520 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 24 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.520 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.520 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.521 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.521 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.521 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.521 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.521 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.521 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.521 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.522 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.522 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.522 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.522 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.522 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:47:14.521207) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.522 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.522 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.522 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.523 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.523 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:47:14.522825) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.523 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.523 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.524 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.524 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.524 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.524 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.524 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.525 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:47:14.524440) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.538 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.538 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.capacity volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.553 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.553 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.capacity volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.573 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.573 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.574 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.574 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.574 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.574 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.574 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.574 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.575 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.576 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:47:14.575034) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.603 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.bytes volume: 28634112 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.604 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.bytes volume: 246078 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.629 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.bytes volume: 30145536 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.630 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.bytes volume: 246078 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.664 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.664 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.665 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.665 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.666 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.666 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.666 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.666 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.666 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.667 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.latency volume: 1980743477 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.667 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:47:14.666660) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.667 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.latency volume: 119778612 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.667 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.latency volume: 1934915770 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.668 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.latency volume: 164304713 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.668 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.668 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.669 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.669 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.669 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.670 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.670 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.670 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.670 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.670 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:47:14.670450) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.670 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.requests volume: 1024 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.671 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.requests volume: 107 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.671 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.requests volume: 1092 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.671 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.requests volume: 107 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.672 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.672 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.672 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.673 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.673 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.673 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.673 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.673 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.673 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.673 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.674 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.usage volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.674 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:47:14.673783) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.674 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.675 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.usage volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.675 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.675 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.675 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.676 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.676 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.676 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.676 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.676 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.677 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.677 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.bytes volume: 72822784 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.677 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.677 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.bytes volume: 72802304 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.678 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.678 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:47:14.677085) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.678 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.679 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.679 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.680 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.680 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.680 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.680 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.680 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.680 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.680 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.latency volume: 7591043388 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.681 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.681 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:47:14.680648) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.681 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.latency volume: 7798279393 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.681 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.682 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.682 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.682 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.683 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.683 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.683 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.683 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.683 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.683 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.684 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:47:14.683866) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.702 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.722 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.744 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.745 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.745 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.746 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.746 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.746 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.747 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.748 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.requests volume: 309 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.748 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:47:14.747328) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.749 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.749 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.requests volume: 272 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.750 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.750 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.751 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.751 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.752 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.753 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.753 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.754 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.754 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.754 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.755 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:47:14.754837) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.755 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.bytes.delta volume: 168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.756 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.756 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.757 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.757 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.759 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.rate in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.759 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.760 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.760 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.rate heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.760 14 DEBUG ceilometer.compute.pollsters [-] LibvirtInspector does not provide data for IncomingBytesRatePollster get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:162
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.760 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.rate (2025-10-11T02:47:14.760496) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.761 14 ERROR ceilometer.polling.manager [-] Prevent pollster network.incoming.bytes.rate from polling [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-yj44h76hdzhi-bejrsw3xgi4q>] on source pollsters anymore!: ceilometer.polling.plugin_base.PollsterPermanentError: [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-yj44h76hdzhi-bejrsw3xgi4q>]
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.762 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.762 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.762 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.763 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.763 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.763 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:47:14.763350) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.764 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.764 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.764 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.765 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.765 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.765 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.765 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets volume: 15 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.765 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:47:14.765618) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.766 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.packets volume: 12 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.766 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 33 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.767 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.767 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.767 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.768 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.768 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.768 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.769 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:47:14.768629) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.769 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.769 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.770 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.770 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.770 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.771 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.771 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.771 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.772 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.772 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:47:14.772067) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.773 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.773 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.773 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.773 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.774 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.774 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.774 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.774 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:47:14.774265) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.775 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.775 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.776 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.776 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.776 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.777 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.777 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.777 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.777 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.777 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:47:14.777602) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.778 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.allocation volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.778 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.779 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.allocation volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.779 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.780 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.780 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.780 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.781 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.781 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.781 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.781 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.782 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.782 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:47:14.782178) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.782 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.783 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.783 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.784 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.784 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.784 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.784 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.785 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.785 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.785 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/cpu volume: 234090000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.785 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:47:14.785333) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.785 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/cpu volume: 68800000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.786 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 59280000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.786 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.787 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.787 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.787 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.788 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.788 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.788 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.bytes volume: 1620 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.788 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:47:14.788215) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.789 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.bytes volume: 1620 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.789 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2412 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.790 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.790 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.790 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.791 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.791 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.791 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.791 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/memory.usage volume: 43.3671875 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.791 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:47:14.791426) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.792 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/memory.usage volume: 43.42578125 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.792 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.83984375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.793 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.793 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.794 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.rate in the context of pollsters
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.794 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.794 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.794 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.rate heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.795 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.rate (2025-10-11T02:47:14.794715) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.795 14 DEBUG ceilometer.compute.pollsters [-] LibvirtInspector does not provide data for OutgoingBytesRatePollster get_samples /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:162
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.795 14 ERROR ceilometer.polling.manager [-] Prevent pollster network.outgoing.bytes.rate from polling [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-yj44h76hdzhi-bejrsw3xgi4q>] on source pollsters anymore!: ceilometer.polling.plugin_base.PollsterPermanentError: [<NovaLikeServer: te-0512306-asg-am4iabdjybzp-yj44h76hdzhi-bejrsw3xgi4q>]
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.796 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.796 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.796 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.796 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.796 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.796 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.796 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.796 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.796 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.797 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.797 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.797 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.797 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.797 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.797 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.797 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.797 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.797 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.797 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.797 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.797 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.798 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.798 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.798 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.798 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:47:14.798 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:47:14 compute-0 nova_compute[356901]: 2025-10-11 02:47:14.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:47:14 compute-0 nova_compute[356901]: 2025-10-11 02:47:14.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:47:14 compute-0 nova_compute[356901]: 2025-10-11 02:47:14.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:47:14 compute-0 nova_compute[356901]: 2025-10-11 02:47:14.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:47:14 compute-0 nova_compute[356901]: 2025-10-11 02:47:14.937 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:14 compute-0 ceph-mon[191930]: pgmap v2089: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 682 B/s wr, 0 op/s
Oct 11 02:47:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2090: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 682 B/s wr, 0 op/s
Oct 11 02:47:15 compute-0 nova_compute[356901]: 2025-10-11 02:47:15.193 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:47:15 compute-0 nova_compute[356901]: 2025-10-11 02:47:15.194 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:47:15 compute-0 nova_compute[356901]: 2025-10-11 02:47:15.194 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:47:15 compute-0 nova_compute[356901]: 2025-10-11 02:47:15.194 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:47:15 compute-0 nova_compute[356901]: 2025-10-11 02:47:15.316 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:15 compute-0 agitated_goldstine[469235]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:47:15 compute-0 agitated_goldstine[469235]: --> relative data size: 1.0
Oct 11 02:47:15 compute-0 agitated_goldstine[469235]: --> All data devices are unavailable
Oct 11 02:47:15 compute-0 systemd[1]: libpod-26c46cf070236e6ffc90a6087a26562a5f1fe2e371f377d51958e451eb4efd03.scope: Deactivated successfully.
Oct 11 02:47:15 compute-0 systemd[1]: libpod-26c46cf070236e6ffc90a6087a26562a5f1fe2e371f377d51958e451eb4efd03.scope: Consumed 1.144s CPU time.
Oct 11 02:47:15 compute-0 conmon[469235]: conmon 26c46cf070236e6ffc90 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-26c46cf070236e6ffc90a6087a26562a5f1fe2e371f377d51958e451eb4efd03.scope/container/memory.events
Oct 11 02:47:15 compute-0 podman[469264]: 2025-10-11 02:47:15.762805988 +0000 UTC m=+0.047680225 container died 26c46cf070236e6ffc90a6087a26562a5f1fe2e371f377d51958e451eb4efd03 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_goldstine, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 02:47:15 compute-0 systemd[1]: var-lib-containers-storage-overlay-be98ce5e6070768df775738eaa3abc67192c7e34a62f6fc64e129243845da176-merged.mount: Deactivated successfully.
Oct 11 02:47:15 compute-0 podman[469264]: 2025-10-11 02:47:15.84539274 +0000 UTC m=+0.130266957 container remove 26c46cf070236e6ffc90a6087a26562a5f1fe2e371f377d51958e451eb4efd03 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_goldstine, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0)
Oct 11 02:47:15 compute-0 systemd[1]: libpod-conmon-26c46cf070236e6ffc90a6087a26562a5f1fe2e371f377d51958e451eb4efd03.scope: Deactivated successfully.
Oct 11 02:47:15 compute-0 sudo[469115]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:15 compute-0 ceph-mon[191930]: pgmap v2090: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 682 B/s wr, 0 op/s
Oct 11 02:47:16 compute-0 sudo[469276]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:47:16 compute-0 sudo[469276]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:16 compute-0 sudo[469276]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:16 compute-0 sudo[469301]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:47:16 compute-0 sudo[469301]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:16 compute-0 sudo[469301]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:16 compute-0 sudo[469326]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:47:16 compute-0 sudo[469326]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:16 compute-0 sudo[469326]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:16 compute-0 sudo[469351]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:47:16 compute-0 sudo[469351]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:47:16 compute-0 podman[469415]: 2025-10-11 02:47:16.878411881 +0000 UTC m=+0.100536966 container create 5d97d73a9bb99dcbac84b5c2edc5445589fcd7ec2e702fd3039b8cf44046b315 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_bouman, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True)
Oct 11 02:47:16 compute-0 podman[469415]: 2025-10-11 02:47:16.836663897 +0000 UTC m=+0.058789012 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:47:16 compute-0 systemd[1]: Started libpod-conmon-5d97d73a9bb99dcbac84b5c2edc5445589fcd7ec2e702fd3039b8cf44046b315.scope.
Oct 11 02:47:16 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:47:17 compute-0 podman[469415]: 2025-10-11 02:47:17.009422081 +0000 UTC m=+0.231547166 container init 5d97d73a9bb99dcbac84b5c2edc5445589fcd7ec2e702fd3039b8cf44046b315 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_bouman, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:47:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2091: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:47:17 compute-0 podman[469415]: 2025-10-11 02:47:17.03201946 +0000 UTC m=+0.254144535 container start 5d97d73a9bb99dcbac84b5c2edc5445589fcd7ec2e702fd3039b8cf44046b315 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_bouman, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507)
Oct 11 02:47:17 compute-0 podman[469415]: 2025-10-11 02:47:17.038129684 +0000 UTC m=+0.260254759 container attach 5d97d73a9bb99dcbac84b5c2edc5445589fcd7ec2e702fd3039b8cf44046b315 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_bouman, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 02:47:17 compute-0 thirsty_bouman[469432]: 167 167
Oct 11 02:47:17 compute-0 systemd[1]: libpod-5d97d73a9bb99dcbac84b5c2edc5445589fcd7ec2e702fd3039b8cf44046b315.scope: Deactivated successfully.
Oct 11 02:47:17 compute-0 conmon[469432]: conmon 5d97d73a9bb99dcbac84 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-5d97d73a9bb99dcbac84b5c2edc5445589fcd7ec2e702fd3039b8cf44046b315.scope/container/memory.events
Oct 11 02:47:17 compute-0 podman[469415]: 2025-10-11 02:47:17.046857536 +0000 UTC m=+0.268982631 container died 5d97d73a9bb99dcbac84b5c2edc5445589fcd7ec2e702fd3039b8cf44046b315 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_bouman, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, io.buildah.version=1.39.3)
Oct 11 02:47:17 compute-0 systemd[1]: var-lib-containers-storage-overlay-ba461dff05e1088efef60162e00b4ba5fbb871ca554579f885c0b8bada08bb95-merged.mount: Deactivated successfully.
Oct 11 02:47:17 compute-0 podman[469415]: 2025-10-11 02:47:17.111036586 +0000 UTC m=+0.333161661 container remove 5d97d73a9bb99dcbac84b5c2edc5445589fcd7ec2e702fd3039b8cf44046b315 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_bouman, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, ceph=True, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:47:17 compute-0 systemd[1]: libpod-conmon-5d97d73a9bb99dcbac84b5c2edc5445589fcd7ec2e702fd3039b8cf44046b315.scope: Deactivated successfully.
Oct 11 02:47:17 compute-0 podman[469455]: 2025-10-11 02:47:17.342848316 +0000 UTC m=+0.061072854 container create 4cb2edf4cb303f097acbea3cf7eac648cc9d3bffe901329f671db71160b48325 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_fermi, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default)
Oct 11 02:47:17 compute-0 systemd[1]: Started libpod-conmon-4cb2edf4cb303f097acbea3cf7eac648cc9d3bffe901329f671db71160b48325.scope.
Oct 11 02:47:17 compute-0 podman[469455]: 2025-10-11 02:47:17.320594123 +0000 UTC m=+0.038818671 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:47:17 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:47:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/89a7795ebd64490b7fdfb24ef22abb24a305fffce4239241da19f9633d3b0188/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:47:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/89a7795ebd64490b7fdfb24ef22abb24a305fffce4239241da19f9633d3b0188/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:47:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/89a7795ebd64490b7fdfb24ef22abb24a305fffce4239241da19f9633d3b0188/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:47:17 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/89a7795ebd64490b7fdfb24ef22abb24a305fffce4239241da19f9633d3b0188/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:47:17 compute-0 podman[469455]: 2025-10-11 02:47:17.470006055 +0000 UTC m=+0.188230593 container init 4cb2edf4cb303f097acbea3cf7eac648cc9d3bffe901329f671db71160b48325 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_fermi, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3)
Oct 11 02:47:17 compute-0 podman[469455]: 2025-10-11 02:47:17.485789468 +0000 UTC m=+0.204013996 container start 4cb2edf4cb303f097acbea3cf7eac648cc9d3bffe901329f671db71160b48325 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_fermi, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:47:17 compute-0 podman[469455]: 2025-10-11 02:47:17.491773669 +0000 UTC m=+0.209998207 container attach 4cb2edf4cb303f097acbea3cf7eac648cc9d3bffe901329f671db71160b48325 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_fermi, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:47:18 compute-0 ceph-mon[191930]: pgmap v2091: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:47:18 compute-0 boring_fermi[469471]: {
Oct 11 02:47:18 compute-0 boring_fermi[469471]:     "0": [
Oct 11 02:47:18 compute-0 boring_fermi[469471]:         {
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "devices": [
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "/dev/loop3"
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             ],
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "lv_name": "ceph_lv0",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "lv_size": "21470642176",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "name": "ceph_lv0",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "tags": {
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.cluster_name": "ceph",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.crush_device_class": "",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.encrypted": "0",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.osd_id": "0",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.type": "block",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.vdo": "0"
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             },
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "type": "block",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "vg_name": "ceph_vg0"
Oct 11 02:47:18 compute-0 boring_fermi[469471]:         }
Oct 11 02:47:18 compute-0 boring_fermi[469471]:     ],
Oct 11 02:47:18 compute-0 boring_fermi[469471]:     "1": [
Oct 11 02:47:18 compute-0 boring_fermi[469471]:         {
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "devices": [
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "/dev/loop4"
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             ],
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "lv_name": "ceph_lv1",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "lv_size": "21470642176",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "name": "ceph_lv1",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "tags": {
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.cluster_name": "ceph",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.crush_device_class": "",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.encrypted": "0",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.osd_id": "1",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.type": "block",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.vdo": "0"
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             },
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "type": "block",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "vg_name": "ceph_vg1"
Oct 11 02:47:18 compute-0 boring_fermi[469471]:         }
Oct 11 02:47:18 compute-0 boring_fermi[469471]:     ],
Oct 11 02:47:18 compute-0 boring_fermi[469471]:     "2": [
Oct 11 02:47:18 compute-0 boring_fermi[469471]:         {
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "devices": [
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "/dev/loop5"
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             ],
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "lv_name": "ceph_lv2",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "lv_size": "21470642176",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "name": "ceph_lv2",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "tags": {
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.cluster_name": "ceph",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.crush_device_class": "",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.encrypted": "0",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.osd_id": "2",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.type": "block",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:                 "ceph.vdo": "0"
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             },
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "type": "block",
Oct 11 02:47:18 compute-0 boring_fermi[469471]:             "vg_name": "ceph_vg2"
Oct 11 02:47:18 compute-0 boring_fermi[469471]:         }
Oct 11 02:47:18 compute-0 boring_fermi[469471]:     ]
Oct 11 02:47:18 compute-0 boring_fermi[469471]: }
Oct 11 02:47:18 compute-0 systemd[1]: libpod-4cb2edf4cb303f097acbea3cf7eac648cc9d3bffe901329f671db71160b48325.scope: Deactivated successfully.
Oct 11 02:47:18 compute-0 podman[469480]: 2025-10-11 02:47:18.49131764 +0000 UTC m=+0.074858190 container died 4cb2edf4cb303f097acbea3cf7eac648cc9d3bffe901329f671db71160b48325 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_fermi, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:47:18 compute-0 nova_compute[356901]: 2025-10-11 02:47:18.523 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:47:18 compute-0 systemd[1]: var-lib-containers-storage-overlay-89a7795ebd64490b7fdfb24ef22abb24a305fffce4239241da19f9633d3b0188-merged.mount: Deactivated successfully.
Oct 11 02:47:18 compute-0 podman[469480]: 2025-10-11 02:47:18.595701836 +0000 UTC m=+0.179242376 container remove 4cb2edf4cb303f097acbea3cf7eac648cc9d3bffe901329f671db71160b48325 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=boring_fermi, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2)
Oct 11 02:47:18 compute-0 systemd[1]: libpod-conmon-4cb2edf4cb303f097acbea3cf7eac648cc9d3bffe901329f671db71160b48325.scope: Deactivated successfully.
Oct 11 02:47:18 compute-0 sudo[469351]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:18 compute-0 nova_compute[356901]: 2025-10-11 02:47:18.722 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:47:18 compute-0 nova_compute[356901]: 2025-10-11 02:47:18.724 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:47:18 compute-0 nova_compute[356901]: 2025-10-11 02:47:18.725 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:47:18 compute-0 nova_compute[356901]: 2025-10-11 02:47:18.726 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:47:18 compute-0 nova_compute[356901]: 2025-10-11 02:47:18.726 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:47:18 compute-0 sudo[469493]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:47:18 compute-0 sudo[469493]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:18 compute-0 sudo[469493]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:18 compute-0 sudo[469518]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:47:18 compute-0 sudo[469518]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:18 compute-0 sudo[469518]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:18 compute-0 sudo[469543]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:47:18 compute-0 sudo[469543]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:19 compute-0 sudo[469543]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2092: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:47:19 compute-0 sudo[469568]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:47:19 compute-0 sudo[469568]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:19 compute-0 podman[469633]: 2025-10-11 02:47:19.618467278 +0000 UTC m=+0.064664361 container create 2e02fec1d7bb5566dc1df586f06f9a2dc309e8941171478cf3f4aea3fc3dc46e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_greider, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:47:19 compute-0 systemd[1]: Started libpod-conmon-2e02fec1d7bb5566dc1df586f06f9a2dc309e8941171478cf3f4aea3fc3dc46e.scope.
Oct 11 02:47:19 compute-0 podman[469633]: 2025-10-11 02:47:19.588783227 +0000 UTC m=+0.034980380 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:47:19 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:47:19 compute-0 podman[469633]: 2025-10-11 02:47:19.747426489 +0000 UTC m=+0.193623582 container init 2e02fec1d7bb5566dc1df586f06f9a2dc309e8941171478cf3f4aea3fc3dc46e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_greider, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2)
Oct 11 02:47:19 compute-0 podman[469633]: 2025-10-11 02:47:19.759669246 +0000 UTC m=+0.205866349 container start 2e02fec1d7bb5566dc1df586f06f9a2dc309e8941171478cf3f4aea3fc3dc46e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_greider, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507)
Oct 11 02:47:19 compute-0 podman[469633]: 2025-10-11 02:47:19.765789519 +0000 UTC m=+0.211986612 container attach 2e02fec1d7bb5566dc1df586f06f9a2dc309e8941171478cf3f4aea3fc3dc46e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_greider, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2)
Oct 11 02:47:19 compute-0 musing_greider[469649]: 167 167
Oct 11 02:47:19 compute-0 systemd[1]: libpod-2e02fec1d7bb5566dc1df586f06f9a2dc309e8941171478cf3f4aea3fc3dc46e.scope: Deactivated successfully.
Oct 11 02:47:19 compute-0 conmon[469649]: conmon 2e02fec1d7bb5566dc1d <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-2e02fec1d7bb5566dc1df586f06f9a2dc309e8941171478cf3f4aea3fc3dc46e.scope/container/memory.events
Oct 11 02:47:19 compute-0 podman[469633]: 2025-10-11 02:47:19.771357803 +0000 UTC m=+0.217554866 container died 2e02fec1d7bb5566dc1df586f06f9a2dc309e8941171478cf3f4aea3fc3dc46e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_greider, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, org.label-schema.vendor=CentOS)
Oct 11 02:47:19 compute-0 systemd[1]: var-lib-containers-storage-overlay-5a1b1d1b440bc95c727979a6951c56b00fb4ba86f52908d75903eef5472e0853-merged.mount: Deactivated successfully.
Oct 11 02:47:19 compute-0 podman[469633]: 2025-10-11 02:47:19.830968058 +0000 UTC m=+0.277165121 container remove 2e02fec1d7bb5566dc1df586f06f9a2dc309e8941171478cf3f4aea3fc3dc46e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_greider, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2)
Oct 11 02:47:19 compute-0 systemd[1]: libpod-conmon-2e02fec1d7bb5566dc1df586f06f9a2dc309e8941171478cf3f4aea3fc3dc46e.scope: Deactivated successfully.
Oct 11 02:47:19 compute-0 nova_compute[356901]: 2025-10-11 02:47:19.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:47:19 compute-0 nova_compute[356901]: 2025-10-11 02:47:19.941 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:20 compute-0 podman[469672]: 2025-10-11 02:47:20.085431762 +0000 UTC m=+0.089710643 container create d9bc4ef7efd4e0880791cecacf8aa9ce2d9343c56b975e200c17874cdde6bbfb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_cohen, org.label-schema.license=GPLv2, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:47:20 compute-0 ceph-mon[191930]: pgmap v2092: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:47:20 compute-0 podman[469672]: 2025-10-11 02:47:20.052208601 +0000 UTC m=+0.056487452 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:47:20 compute-0 systemd[1]: Started libpod-conmon-d9bc4ef7efd4e0880791cecacf8aa9ce2d9343c56b975e200c17874cdde6bbfb.scope.
Oct 11 02:47:20 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:47:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ace175fe88cde9daf298d87b990134e099bb3ce51d79b871885321d57c668214/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:47:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ace175fe88cde9daf298d87b990134e099bb3ce51d79b871885321d57c668214/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:47:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ace175fe88cde9daf298d87b990134e099bb3ce51d79b871885321d57c668214/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:47:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ace175fe88cde9daf298d87b990134e099bb3ce51d79b871885321d57c668214/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:47:20 compute-0 podman[469672]: 2025-10-11 02:47:20.246739916 +0000 UTC m=+0.251018817 container init d9bc4ef7efd4e0880791cecacf8aa9ce2d9343c56b975e200c17874cdde6bbfb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_cohen, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS)
Oct 11 02:47:20 compute-0 podman[469672]: 2025-10-11 02:47:20.269006167 +0000 UTC m=+0.273285048 container start d9bc4ef7efd4e0880791cecacf8aa9ce2d9343c56b975e200c17874cdde6bbfb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_cohen, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:47:20 compute-0 podman[469672]: 2025-10-11 02:47:20.27594195 +0000 UTC m=+0.280220871 container attach d9bc4ef7efd4e0880791cecacf8aa9ce2d9343c56b975e200c17874cdde6bbfb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_cohen, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:47:20 compute-0 nova_compute[356901]: 2025-10-11 02:47:20.319 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2093: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:47:21 compute-0 podman[469699]: 2025-10-11 02:47:21.222787024 +0000 UTC m=+0.106089432 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, tcib_managed=true, config_id=edpm, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.schema-version=1.0)
Oct 11 02:47:21 compute-0 podman[469701]: 2025-10-11 02:47:21.233910899 +0000 UTC m=+0.117152185 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:47:21 compute-0 podman[469700]: 2025-10-11 02:47:21.266530502 +0000 UTC m=+0.145160593 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., distribution-scope=public, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, release=1755695350, vendor=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, architecture=x86_64, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, com.redhat.component=ubi9-minimal-container, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, version=9.6, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, name=ubi9-minimal, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-type=git, io.buildah.version=1.33.7, managed_by=edpm_ansible, config_id=edpm, url=https://catalog.redhat.com/en/search?searchType=containers, container_name=openstack_network_exporter, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9.)
Oct 11 02:47:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:47:21 compute-0 friendly_cohen[469686]: {
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:         "osd_id": 1,
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:         "type": "bluestore"
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:     },
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:         "osd_id": 2,
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:         "type": "bluestore"
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:     },
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:         "osd_id": 0,
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:         "type": "bluestore"
Oct 11 02:47:21 compute-0 friendly_cohen[469686]:     }
Oct 11 02:47:21 compute-0 friendly_cohen[469686]: }
Oct 11 02:47:21 compute-0 systemd[1]: libpod-d9bc4ef7efd4e0880791cecacf8aa9ce2d9343c56b975e200c17874cdde6bbfb.scope: Deactivated successfully.
Oct 11 02:47:21 compute-0 systemd[1]: libpod-d9bc4ef7efd4e0880791cecacf8aa9ce2d9343c56b975e200c17874cdde6bbfb.scope: Consumed 1.107s CPU time.
Oct 11 02:47:21 compute-0 conmon[469686]: conmon d9bc4ef7efd4e0880791 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-d9bc4ef7efd4e0880791cecacf8aa9ce2d9343c56b975e200c17874cdde6bbfb.scope/container/memory.events
Oct 11 02:47:21 compute-0 podman[469672]: 2025-10-11 02:47:21.41767309 +0000 UTC m=+1.421951941 container died d9bc4ef7efd4e0880791cecacf8aa9ce2d9343c56b975e200c17874cdde6bbfb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_cohen, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:47:21 compute-0 systemd[1]: var-lib-containers-storage-overlay-ace175fe88cde9daf298d87b990134e099bb3ce51d79b871885321d57c668214-merged.mount: Deactivated successfully.
Oct 11 02:47:21 compute-0 podman[469672]: 2025-10-11 02:47:21.51345808 +0000 UTC m=+1.517736931 container remove d9bc4ef7efd4e0880791cecacf8aa9ce2d9343c56b975e200c17874cdde6bbfb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=friendly_cohen, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True)
Oct 11 02:47:21 compute-0 systemd[1]: libpod-conmon-d9bc4ef7efd4e0880791cecacf8aa9ce2d9343c56b975e200c17874cdde6bbfb.scope: Deactivated successfully.
Oct 11 02:47:21 compute-0 sudo[469568]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:47:21 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:47:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:47:21 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:47:21 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev c51d11b0-d796-4aca-a823-a14549285973 does not exist
Oct 11 02:47:21 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 0843732c-9519-4251-9661-f17a2e04d536 does not exist
Oct 11 02:47:21 compute-0 sudo[469790]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:47:21 compute-0 sudo[469790]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:21 compute-0 sudo[469790]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:21 compute-0 sudo[469815]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:47:21 compute-0 sudo[469815]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:47:21 compute-0 sudo[469815]: pam_unix(sudo:session): session closed for user root
Oct 11 02:47:21 compute-0 nova_compute[356901]: 2025-10-11 02:47:21.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:47:21 compute-0 nova_compute[356901]: 2025-10-11 02:47:21.981 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:47:21 compute-0 nova_compute[356901]: 2025-10-11 02:47:21.982 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:47:21 compute-0 nova_compute[356901]: 2025-10-11 02:47:21.982 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:47:21 compute-0 nova_compute[356901]: 2025-10-11 02:47:21.983 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:47:21 compute-0 nova_compute[356901]: 2025-10-11 02:47:21.983 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:47:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:47:22 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1328943949' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:47:22 compute-0 nova_compute[356901]: 2025-10-11 02:47:22.537 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.553s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:47:22 compute-0 ceph-mon[191930]: pgmap v2093: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:47:22 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:47:22 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:47:22 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1328943949' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:47:22 compute-0 nova_compute[356901]: 2025-10-11 02:47:22.941 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:47:22 compute-0 nova_compute[356901]: 2025-10-11 02:47:22.942 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:47:22 compute-0 nova_compute[356901]: 2025-10-11 02:47:22.949 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:47:22 compute-0 nova_compute[356901]: 2025-10-11 02:47:22.949 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:47:22 compute-0 nova_compute[356901]: 2025-10-11 02:47:22.957 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:47:22 compute-0 nova_compute[356901]: 2025-10-11 02:47:22.957 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:47:22 compute-0 nova_compute[356901]: 2025-10-11 02:47:22.958 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:47:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2094: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 682 B/s wr, 0 op/s
Oct 11 02:47:23 compute-0 nova_compute[356901]: 2025-10-11 02:47:23.444 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:47:23 compute-0 nova_compute[356901]: 2025-10-11 02:47:23.446 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3219MB free_disk=59.86431121826172GB free_vcpus=5 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:47:23 compute-0 nova_compute[356901]: 2025-10-11 02:47:23.446 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:47:23 compute-0 nova_compute[356901]: 2025-10-11 02:47:23.447 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:47:23 compute-0 nova_compute[356901]: 2025-10-11 02:47:23.891 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:47:23 compute-0 nova_compute[356901]: 2025-10-11 02:47:23.892 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 8422017b-c868-4ba2-ab1f-61d3668ca145 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:47:23 compute-0 nova_compute[356901]: 2025-10-11 02:47:23.892 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance f98d09d7-6aa0-4405-bfa0-be1f78d3911f actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:47:23 compute-0 nova_compute[356901]: 2025-10-11 02:47:23.892 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 3 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:47:23 compute-0 nova_compute[356901]: 2025-10-11 02:47:23.893 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1280MB phys_disk=59GB used_disk=4GB total_vcpus=8 used_vcpus=3 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:47:23 compute-0 nova_compute[356901]: 2025-10-11 02:47:23.974 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:47:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:47:24 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3351123226' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:47:24 compute-0 nova_compute[356901]: 2025-10-11 02:47:24.446 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.472s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:47:24 compute-0 nova_compute[356901]: 2025-10-11 02:47:24.458 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:47:24 compute-0 nova_compute[356901]: 2025-10-11 02:47:24.580 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:47:24 compute-0 nova_compute[356901]: 2025-10-11 02:47:24.581 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:47:24 compute-0 nova_compute[356901]: 2025-10-11 02:47:24.582 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 1.135s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:47:24 compute-0 ceph-mon[191930]: pgmap v2094: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 682 B/s wr, 0 op/s
Oct 11 02:47:24 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3351123226' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:47:24 compute-0 nova_compute[356901]: 2025-10-11 02:47:24.944 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2095: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:47:25 compute-0 nova_compute[356901]: 2025-10-11 02:47:25.322 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:47:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:47:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:47:26 compute-0 ceph-mon[191930]: pgmap v2095: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:47:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:47:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:47:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:47:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:47:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2096: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:47:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:47:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2604606846' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:47:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:47:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2604606846' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:47:28 compute-0 ceph-mon[191930]: pgmap v2096: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:47:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2604606846' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:47:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2604606846' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:47:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2097: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:47:29 compute-0 podman[157119]: time="2025-10-11T02:47:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:47:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:47:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:47:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:47:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9532 "" "Go-http-client/1.1"
Oct 11 02:47:29 compute-0 nova_compute[356901]: 2025-10-11 02:47:29.950 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:30 compute-0 podman[469884]: 2025-10-11 02:47:30.235352638 +0000 UTC m=+0.133285386 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, maintainer=Red Hat, Inc., io.buildah.version=1.29.0, io.openshift.tags=base rhel9, com.redhat.component=ubi9-container, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=kepler, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.expose-services=, release=1214.1726694543, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, version=9.4, architecture=x86_64, name=ubi9, vcs-type=git, build-date=2024-09-18T21:23:30, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, release-0.7.12=, config_id=edpm)
Oct 11 02:47:30 compute-0 nova_compute[356901]: 2025-10-11 02:47:30.326 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:30 compute-0 ceph-mon[191930]: pgmap v2097: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:47:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2098: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:47:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:47:31 compute-0 openstack_network_exporter[374316]: ERROR   02:47:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:47:31 compute-0 openstack_network_exporter[374316]: ERROR   02:47:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:47:31 compute-0 openstack_network_exporter[374316]: ERROR   02:47:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:47:31 compute-0 openstack_network_exporter[374316]: ERROR   02:47:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:47:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:47:31 compute-0 openstack_network_exporter[374316]: ERROR   02:47:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:47:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:47:32 compute-0 ceph-mon[191930]: pgmap v2098: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:47:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2099: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:47:34 compute-0 ceph-mon[191930]: pgmap v2099: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:47:34 compute-0 nova_compute[356901]: 2025-10-11 02:47:34.955 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2100: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:47:35 compute-0 podman[469901]: 2025-10-11 02:47:35.220089389 +0000 UTC m=+0.111482359 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:47:35 compute-0 podman[469903]: 2025-10-11 02:47:35.266845616 +0000 UTC m=+0.140943321 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.schema-version=1.0)
Oct 11 02:47:35 compute-0 podman[469910]: 2025-10-11 02:47:35.271812711 +0000 UTC m=+0.134200854 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, config_id=ovn_metadata_agent)
Oct 11 02:47:35 compute-0 podman[469902]: 2025-10-11 02:47:35.302309112 +0000 UTC m=+0.185955026 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_managed=true, config_id=ovn_controller, container_name=ovn_controller)
Oct 11 02:47:35 compute-0 nova_compute[356901]: 2025-10-11 02:47:35.326 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:47:36 compute-0 ceph-mon[191930]: pgmap v2100: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:47:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2101: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:47:38 compute-0 ceph-mon[191930]: pgmap v2101: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:47:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2102: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:47:39 compute-0 nova_compute[356901]: 2025-10-11 02:47:39.961 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:40 compute-0 nova_compute[356901]: 2025-10-11 02:47:40.331 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:40 compute-0 ceph-mon[191930]: pgmap v2102: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:47:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2103: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:47:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:47:42 compute-0 ceph-mon[191930]: pgmap v2103: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:47:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2104: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s wr, 0 op/s
Oct 11 02:47:43 compute-0 podman[469983]: 2025-10-11 02:47:43.23553506 +0000 UTC m=+0.117451014 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, org.label-schema.build-date=20251009, container_name=multipathd, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2)
Oct 11 02:47:43 compute-0 podman[469984]: 2025-10-11 02:47:43.269366969 +0000 UTC m=+0.140296502 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, container_name=iscsid, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:47:44 compute-0 ceph-mon[191930]: pgmap v2104: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s wr, 0 op/s
Oct 11 02:47:44 compute-0 nova_compute[356901]: 2025-10-11 02:47:44.965 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2105: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s wr, 0 op/s
Oct 11 02:47:45 compute-0 nova_compute[356901]: 2025-10-11 02:47:45.333 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:47:46 compute-0 ceph-mon[191930]: pgmap v2105: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s wr, 0 op/s
Oct 11 02:47:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2106: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s wr, 0 op/s
Oct 11 02:47:48 compute-0 ceph-mon[191930]: pgmap v2106: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s wr, 0 op/s
Oct 11 02:47:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2107: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s wr, 0 op/s
Oct 11 02:47:49 compute-0 nova_compute[356901]: 2025-10-11 02:47:49.969 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:50 compute-0 nova_compute[356901]: 2025-10-11 02:47:50.337 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:50 compute-0 ceph-mon[191930]: pgmap v2107: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s wr, 0 op/s
Oct 11 02:47:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2108: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s wr, 0 op/s
Oct 11 02:47:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:47:52 compute-0 podman[470020]: 2025-10-11 02:47:52.228183741 +0000 UTC m=+0.120683928 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_id=edpm, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:47:52 compute-0 podman[470022]: 2025-10-11 02:47:52.234688241 +0000 UTC m=+0.101561249 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:47:52 compute-0 podman[470021]: 2025-10-11 02:47:52.25313934 +0000 UTC m=+0.129644610 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.expose-services=, release=1755695350, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=minimal rhel9, com.redhat.component=ubi9-minimal-container, io.buildah.version=1.33.7, version=9.6, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, distribution-scope=public, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., container_name=openstack_network_exporter, name=ubi9-minimal, managed_by=edpm_ansible, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-type=git, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, config_id=edpm, url=https://catalog.redhat.com/en/search?searchType=containers, vendor=Red Hat, Inc.)
Oct 11 02:47:52 compute-0 ceph-mon[191930]: pgmap v2108: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s wr, 0 op/s
Oct 11 02:47:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2109: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s wr, 0 op/s
Oct 11 02:47:54 compute-0 ceph-mon[191930]: pgmap v2109: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 3.7 KiB/s wr, 0 op/s
Oct 11 02:47:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:47:54.877 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:47:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:47:54.878 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:47:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:47:54.879 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:47:54 compute-0 nova_compute[356901]: 2025-10-11 02:47:54.974 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2110: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:47:55 compute-0 nova_compute[356901]: 2025-10-11 02:47:55.338 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:47:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:47:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:47:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:47:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:47:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:47:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:47:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:47:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:47:56
Oct 11 02:47:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:47:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:47:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['.mgr', '.rgw.root', 'images', 'default.rgw.meta', 'default.rgw.log', 'volumes', 'default.rgw.control', 'cephfs.cephfs.data', 'vms', 'backups', 'cephfs.cephfs.meta']
Oct 11 02:47:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:47:56 compute-0 ceph-mon[191930]: pgmap v2110: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:47:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2111: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:47:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:47:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:47:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:47:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:47:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:47:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:47:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:47:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:47:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:47:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:47:58 compute-0 ceph-mon[191930]: pgmap v2111: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:47:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2112: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:47:59 compute-0 podman[157119]: time="2025-10-11T02:47:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:47:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:47:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:47:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:47:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9551 "" "Go-http-client/1.1"
Oct 11 02:47:59 compute-0 nova_compute[356901]: 2025-10-11 02:47:59.979 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:00 compute-0 nova_compute[356901]: 2025-10-11 02:48:00.343 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:00 compute-0 ceph-mon[191930]: pgmap v2112: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2113: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:01 compute-0 podman[470080]: 2025-10-11 02:48:01.234885834 +0000 UTC m=+0.129944649 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vcs-type=git, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, version=9.4, com.redhat.component=ubi9-container, architecture=x86_64, config_id=edpm, release=1214.1726694543, io.openshift.tags=base rhel9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release-0.7.12=, io.k8s.display-name=Red Hat Universal Base Image 9, vendor=Red Hat, Inc., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, maintainer=Red Hat, Inc., summary=Provides the latest release of Red Hat Universal Base Image 9., build-date=2024-09-18T21:23:30, io.openshift.expose-services=, managed_by=edpm_ansible, io.buildah.version=1.29.0, container_name=kepler, distribution-scope=public)
Oct 11 02:48:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:48:01 compute-0 systemd[1]: virtsecretd.service: Deactivated successfully.
Oct 11 02:48:01 compute-0 openstack_network_exporter[374316]: ERROR   02:48:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:48:01 compute-0 openstack_network_exporter[374316]: ERROR   02:48:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:48:01 compute-0 openstack_network_exporter[374316]: ERROR   02:48:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:48:01 compute-0 openstack_network_exporter[374316]: ERROR   02:48:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:48:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:48:01 compute-0 openstack_network_exporter[374316]: ERROR   02:48:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:48:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:48:02 compute-0 systemd[1]: virtproxyd.service: Deactivated successfully.
Oct 11 02:48:02 compute-0 ceph-mon[191930]: pgmap v2113: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2114: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:03 compute-0 nova_compute[356901]: 2025-10-11 02:48:03.581 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:48:04 compute-0 ceph-mon[191930]: pgmap v2114: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:04 compute-0 nova_compute[356901]: 2025-10-11 02:48:04.985 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2115: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:05 compute-0 nova_compute[356901]: 2025-10-11 02:48:05.346 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:05 compute-0 nova_compute[356901]: 2025-10-11 02:48:05.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:48:06 compute-0 podman[470109]: 2025-10-11 02:48:06.210402945 +0000 UTC m=+0.079811463 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Oct 11 02:48:06 compute-0 podman[470102]: 2025-10-11 02:48:06.262716094 +0000 UTC m=+0.150398836 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:48:06 compute-0 podman[470104]: 2025-10-11 02:48:06.271690106 +0000 UTC m=+0.130688239 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=edpm, container_name=ceilometer_agent_compute, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible)
Oct 11 02:48:06 compute-0 podman[470103]: 2025-10-11 02:48:06.277781394 +0000 UTC m=+0.153301310 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=ovn_controller, org.label-schema.build-date=20251009, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, container_name=ovn_controller)
Oct 11 02:48:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:48:06 compute-0 ceph-mon[191930]: pgmap v2115: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2116: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:07 compute-0 unix_chkpwd[470185]: password check failed for user (root)
Oct 11 02:48:07 compute-0 sshd-session[470183]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=80.94.93.119  user=root
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0020665708227518395 of space, bias 1.0, pg target 0.6199712468255518 quantized to 32 (current 32)
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00125203744627857 of space, bias 1.0, pg target 0.375611233883571 quantized to 32 (current 32)
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:48:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:48:08 compute-0 ceph-mon[191930]: pgmap v2116: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2117: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:09 compute-0 sshd-session[470183]: Failed password for root from 80.94.93.119 port 15556 ssh2
Oct 11 02:48:09 compute-0 nova_compute[356901]: 2025-10-11 02:48:09.992 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:10 compute-0 unix_chkpwd[470186]: password check failed for user (root)
Oct 11 02:48:10 compute-0 nova_compute[356901]: 2025-10-11 02:48:10.348 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:10 compute-0 ceph-mon[191930]: pgmap v2117: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2118: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:48:11 compute-0 sshd-session[470183]: Failed password for root from 80.94.93.119 port 15556 ssh2
Oct 11 02:48:12 compute-0 ceph-mon[191930]: pgmap v2118: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2119: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:13 compute-0 unix_chkpwd[470187]: password check failed for user (root)
Oct 11 02:48:13 compute-0 nova_compute[356901]: 2025-10-11 02:48:13.899 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:48:14 compute-0 podman[470188]: 2025-10-11 02:48:14.183125837 +0000 UTC m=+0.080795752 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_id=multipathd, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3)
Oct 11 02:48:14 compute-0 podman[470189]: 2025-10-11 02:48:14.195965622 +0000 UTC m=+0.084673846 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_id=iscsid, container_name=iscsid, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.build-date=20251009)
Oct 11 02:48:14 compute-0 nova_compute[356901]: 2025-10-11 02:48:14.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:48:14 compute-0 ceph-mon[191930]: pgmap v2119: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:14 compute-0 nova_compute[356901]: 2025-10-11 02:48:14.997 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2120: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:15 compute-0 nova_compute[356901]: 2025-10-11 02:48:15.352 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:15 compute-0 sshd-session[470183]: Failed password for root from 80.94.93.119 port 15556 ssh2
Oct 11 02:48:15 compute-0 nova_compute[356901]: 2025-10-11 02:48:15.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:48:15 compute-0 nova_compute[356901]: 2025-10-11 02:48:15.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:48:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:48:16 compute-0 sshd-session[470183]: Received disconnect from 80.94.93.119 port 15556:11:  [preauth]
Oct 11 02:48:16 compute-0 sshd-session[470183]: Disconnected from authenticating user root 80.94.93.119 port 15556 [preauth]
Oct 11 02:48:16 compute-0 sshd-session[470183]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=80.94.93.119  user=root
Oct 11 02:48:16 compute-0 nova_compute[356901]: 2025-10-11 02:48:16.893 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:48:16 compute-0 nova_compute[356901]: 2025-10-11 02:48:16.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:48:16 compute-0 nova_compute[356901]: 2025-10-11 02:48:16.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:48:16 compute-0 ceph-mon[191930]: pgmap v2120: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2121: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:17 compute-0 nova_compute[356901]: 2025-10-11 02:48:17.122 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-8422017b-c868-4ba2-ab1f-61d3668ca145" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:48:17 compute-0 nova_compute[356901]: 2025-10-11 02:48:17.123 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-8422017b-c868-4ba2-ab1f-61d3668ca145" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:48:17 compute-0 nova_compute[356901]: 2025-10-11 02:48:17.124 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:48:17 compute-0 unix_chkpwd[470225]: password check failed for user (root)
Oct 11 02:48:17 compute-0 sshd-session[470223]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=80.94.93.119  user=root
Oct 11 02:48:18 compute-0 nova_compute[356901]: 2025-10-11 02:48:18.024 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Updating instance_info_cache with network_info: [{"id": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "address": "fa:16:3e:2c:af:96", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.3.53", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape00931c0-3d", "ovs_interfaceid": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:48:18 compute-0 nova_compute[356901]: 2025-10-11 02:48:18.043 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-8422017b-c868-4ba2-ab1f-61d3668ca145" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:48:18 compute-0 nova_compute[356901]: 2025-10-11 02:48:18.044 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:48:18 compute-0 ceph-mon[191930]: pgmap v2121: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2122: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:19 compute-0 sshd-session[470223]: Failed password for root from 80.94.93.119 port 23672 ssh2
Oct 11 02:48:19 compute-0 unix_chkpwd[470226]: password check failed for user (root)
Oct 11 02:48:20 compute-0 nova_compute[356901]: 2025-10-11 02:48:20.004 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:20 compute-0 nova_compute[356901]: 2025-10-11 02:48:20.354 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:20 compute-0 nova_compute[356901]: 2025-10-11 02:48:20.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:48:20 compute-0 ceph-mon[191930]: pgmap v2122: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2123: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:48:21 compute-0 sshd-session[470223]: Failed password for root from 80.94.93.119 port 23672 ssh2
Oct 11 02:48:21 compute-0 nova_compute[356901]: 2025-10-11 02:48:21.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:48:21 compute-0 nova_compute[356901]: 2025-10-11 02:48:21.932 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:48:21 compute-0 nova_compute[356901]: 2025-10-11 02:48:21.933 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:48:21 compute-0 nova_compute[356901]: 2025-10-11 02:48:21.934 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:48:21 compute-0 nova_compute[356901]: 2025-10-11 02:48:21.934 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:48:21 compute-0 nova_compute[356901]: 2025-10-11 02:48:21.935 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:48:21 compute-0 sudo[470228]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:48:21 compute-0 sudo[470228]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:48:21 compute-0 sudo[470228]: pam_unix(sudo:session): session closed for user root
Oct 11 02:48:22 compute-0 sudo[470254]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:48:22 compute-0 sudo[470254]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:48:22 compute-0 sudo[470254]: pam_unix(sudo:session): session closed for user root
Oct 11 02:48:22 compute-0 sudo[470289]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:48:22 compute-0 sudo[470289]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:48:22 compute-0 sudo[470289]: pam_unix(sudo:session): session closed for user root
Oct 11 02:48:22 compute-0 sudo[470323]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:48:22 compute-0 sudo[470323]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:48:22 compute-0 podman[470348]: 2025-10-11 02:48:22.407220926 +0000 UTC m=+0.100034144 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, container_name=openstack_network_exporter, version=9.6, architecture=x86_64, config_id=edpm, distribution-scope=public, name=ubi9-minimal, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, com.redhat.component=ubi9-minimal-container, managed_by=edpm_ansible, vcs-type=git, release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.openshift.expose-services=, io.buildah.version=1.33.7, maintainer=Red Hat, Inc., build-date=2025-08-20T13:12:41, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.tags=minimal rhel9, url=https://catalog.redhat.com/en/search?searchType=containers, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:48:22 compute-0 podman[470349]: 2025-10-11 02:48:22.428755536 +0000 UTC m=+0.119312278 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:48:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:48:22 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3466108449' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:48:22 compute-0 podman[470347]: 2025-10-11 02:48:22.445934838 +0000 UTC m=+0.132227706 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:48:22 compute-0 nova_compute[356901]: 2025-10-11 02:48:22.480 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.545s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:48:22 compute-0 unix_chkpwd[470420]: password check failed for user (root)
Oct 11 02:48:22 compute-0 nova_compute[356901]: 2025-10-11 02:48:22.593 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:48:22 compute-0 nova_compute[356901]: 2025-10-11 02:48:22.594 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:48:22 compute-0 nova_compute[356901]: 2025-10-11 02:48:22.600 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:48:22 compute-0 nova_compute[356901]: 2025-10-11 02:48:22.600 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:48:22 compute-0 nova_compute[356901]: 2025-10-11 02:48:22.607 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:48:22 compute-0 nova_compute[356901]: 2025-10-11 02:48:22.607 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:48:22 compute-0 nova_compute[356901]: 2025-10-11 02:48:22.607 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:48:22 compute-0 sudo[470323]: pam_unix(sudo:session): session closed for user root
Oct 11 02:48:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"} v 0) v1
Oct 11 02:48:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"}]: dispatch
Oct 11 02:48:22 compute-0 ceph-mon[191930]: pgmap v2123: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:22 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3466108449' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:48:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:48:23 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:48:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:48:23 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:48:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:48:23 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:48:23 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 162a6cce-ec5a-4727-9adc-cc781f03222d does not exist
Oct 11 02:48:23 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 76f87f34-3da8-4d30-af5e-c0a96ae85670 does not exist
Oct 11 02:48:23 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev bbfcfb74-c2a6-41e8-8c7a-ec9a07f87c8d does not exist
Oct 11 02:48:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:48:23 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:48:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:48:23 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:48:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:48:23 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:48:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2124: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:23 compute-0 sudo[470438]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:48:23 compute-0 sudo[470438]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:48:23 compute-0 sudo[470438]: pam_unix(sudo:session): session closed for user root
Oct 11 02:48:23 compute-0 nova_compute[356901]: 2025-10-11 02:48:23.168 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:48:23 compute-0 nova_compute[356901]: 2025-10-11 02:48:23.172 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3226MB free_disk=59.86431121826172GB free_vcpus=5 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:48:23 compute-0 nova_compute[356901]: 2025-10-11 02:48:23.172 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:48:23 compute-0 nova_compute[356901]: 2025-10-11 02:48:23.173 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:48:23 compute-0 sudo[470463]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:48:23 compute-0 sudo[470463]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:48:23 compute-0 sudo[470463]: pam_unix(sudo:session): session closed for user root
Oct 11 02:48:23 compute-0 nova_compute[356901]: 2025-10-11 02:48:23.258 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:48:23 compute-0 nova_compute[356901]: 2025-10-11 02:48:23.258 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 8422017b-c868-4ba2-ab1f-61d3668ca145 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:48:23 compute-0 nova_compute[356901]: 2025-10-11 02:48:23.258 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance f98d09d7-6aa0-4405-bfa0-be1f78d3911f actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:48:23 compute-0 nova_compute[356901]: 2025-10-11 02:48:23.258 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 3 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:48:23 compute-0 nova_compute[356901]: 2025-10-11 02:48:23.259 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1280MB phys_disk=59GB used_disk=4GB total_vcpus=8 used_vcpus=3 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:48:23 compute-0 nova_compute[356901]: 2025-10-11 02:48:23.316 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:48:23 compute-0 sudo[470488]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:48:23 compute-0 sudo[470488]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:48:23 compute-0 sudo[470488]: pam_unix(sudo:session): session closed for user root
Oct 11 02:48:23 compute-0 sudo[470514]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:48:23 compute-0 sudo[470514]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:48:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:48:23 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3555191644' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:48:23 compute-0 nova_compute[356901]: 2025-10-11 02:48:23.809 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.493s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:48:23 compute-0 nova_compute[356901]: 2025-10-11 02:48:23.818 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:48:23 compute-0 nova_compute[356901]: 2025-10-11 02:48:23.832 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:48:23 compute-0 nova_compute[356901]: 2025-10-11 02:48:23.833 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:48:23 compute-0 nova_compute[356901]: 2025-10-11 02:48:23.833 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.661s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:48:23 compute-0 podman[470597]: 2025-10-11 02:48:23.970483147 +0000 UTC m=+0.063332432 container create 694d330fbbecf82c72eb87a117bac016bad3a313e615910f49f01d78ac190491 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_jennings, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=reef)
Oct 11 02:48:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"}]: dispatch
Oct 11 02:48:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:48:23 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:48:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:48:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:48:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:48:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:48:24 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3555191644' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:48:24 compute-0 podman[470597]: 2025-10-11 02:48:23.94698976 +0000 UTC m=+0.039839045 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:48:24 compute-0 systemd[1]: Started libpod-conmon-694d330fbbecf82c72eb87a117bac016bad3a313e615910f49f01d78ac190491.scope.
Oct 11 02:48:24 compute-0 sshd-session[470223]: Failed password for root from 80.94.93.119 port 23672 ssh2
Oct 11 02:48:24 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:48:24 compute-0 podman[470597]: 2025-10-11 02:48:24.114881328 +0000 UTC m=+0.207730693 container init 694d330fbbecf82c72eb87a117bac016bad3a313e615910f49f01d78ac190491 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_jennings, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507)
Oct 11 02:48:24 compute-0 podman[470597]: 2025-10-11 02:48:24.131961355 +0000 UTC m=+0.224810660 container start 694d330fbbecf82c72eb87a117bac016bad3a313e615910f49f01d78ac190491 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_jennings, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.39.3)
Oct 11 02:48:24 compute-0 podman[470597]: 2025-10-11 02:48:24.137655855 +0000 UTC m=+0.230505160 container attach 694d330fbbecf82c72eb87a117bac016bad3a313e615910f49f01d78ac190491 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_jennings, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:48:24 compute-0 nervous_jennings[470615]: 167 167
Oct 11 02:48:24 compute-0 systemd[1]: libpod-694d330fbbecf82c72eb87a117bac016bad3a313e615910f49f01d78ac190491.scope: Deactivated successfully.
Oct 11 02:48:24 compute-0 podman[470597]: 2025-10-11 02:48:24.142941623 +0000 UTC m=+0.235790938 container died 694d330fbbecf82c72eb87a117bac016bad3a313e615910f49f01d78ac190491 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_jennings, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:48:24 compute-0 systemd[1]: var-lib-containers-storage-overlay-a5a0cf14cc4e9f77f7be5c08340f295e25f6d33f2b608739005ac955bbf40f58-merged.mount: Deactivated successfully.
Oct 11 02:48:24 compute-0 sshd-session[470223]: Received disconnect from 80.94.93.119 port 23672:11:  [preauth]
Oct 11 02:48:24 compute-0 sshd-session[470223]: Disconnected from authenticating user root 80.94.93.119 port 23672 [preauth]
Oct 11 02:48:24 compute-0 sshd-session[470223]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=80.94.93.119  user=root
Oct 11 02:48:24 compute-0 podman[470597]: 2025-10-11 02:48:24.236934101 +0000 UTC m=+0.329783396 container remove 694d330fbbecf82c72eb87a117bac016bad3a313e615910f49f01d78ac190491 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_jennings, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:48:24 compute-0 systemd[1]: libpod-conmon-694d330fbbecf82c72eb87a117bac016bad3a313e615910f49f01d78ac190491.scope: Deactivated successfully.
Oct 11 02:48:24 compute-0 podman[470642]: 2025-10-11 02:48:24.489667496 +0000 UTC m=+0.072640130 container create 9641140cf1abc10468e6e39e9a376589d5d82d62c1819ebd51c31bd71bbf64e1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_swanson, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:48:24 compute-0 podman[470642]: 2025-10-11 02:48:24.463715638 +0000 UTC m=+0.046688292 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:48:24 compute-0 systemd[1]: Started libpod-conmon-9641140cf1abc10468e6e39e9a376589d5d82d62c1819ebd51c31bd71bbf64e1.scope.
Oct 11 02:48:24 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:48:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/da8a34788c5618424285e1c19f7d326ba82f834c0330c48b562258844abc074e/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:48:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/da8a34788c5618424285e1c19f7d326ba82f834c0330c48b562258844abc074e/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:48:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/da8a34788c5618424285e1c19f7d326ba82f834c0330c48b562258844abc074e/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:48:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/da8a34788c5618424285e1c19f7d326ba82f834c0330c48b562258844abc074e/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:48:24 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/da8a34788c5618424285e1c19f7d326ba82f834c0330c48b562258844abc074e/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:48:24 compute-0 podman[470642]: 2025-10-11 02:48:24.65173321 +0000 UTC m=+0.234705864 container init 9641140cf1abc10468e6e39e9a376589d5d82d62c1819ebd51c31bd71bbf64e1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_swanson, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:48:24 compute-0 podman[470642]: 2025-10-11 02:48:24.673010196 +0000 UTC m=+0.255982830 container start 9641140cf1abc10468e6e39e9a376589d5d82d62c1819ebd51c31bd71bbf64e1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_swanson, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 02:48:24 compute-0 podman[470642]: 2025-10-11 02:48:24.679213144 +0000 UTC m=+0.262185798 container attach 9641140cf1abc10468e6e39e9a376589d5d82d62c1819ebd51c31bd71bbf64e1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_swanson, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:48:25 compute-0 ceph-mon[191930]: pgmap v2124: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:25 compute-0 nova_compute[356901]: 2025-10-11 02:48:25.011 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:25 compute-0 unix_chkpwd[470664]: password check failed for user (root)
Oct 11 02:48:25 compute-0 sshd-session[470635]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=80.94.93.119  user=root
Oct 11 02:48:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2125: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:25 compute-0 nova_compute[356901]: 2025-10-11 02:48:25.357 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:25 compute-0 strange_swanson[470659]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:48:25 compute-0 strange_swanson[470659]: --> relative data size: 1.0
Oct 11 02:48:25 compute-0 strange_swanson[470659]: --> All data devices are unavailable
Oct 11 02:48:25 compute-0 systemd[1]: libpod-9641140cf1abc10468e6e39e9a376589d5d82d62c1819ebd51c31bd71bbf64e1.scope: Deactivated successfully.
Oct 11 02:48:25 compute-0 systemd[1]: libpod-9641140cf1abc10468e6e39e9a376589d5d82d62c1819ebd51c31bd71bbf64e1.scope: Consumed 1.082s CPU time.
Oct 11 02:48:25 compute-0 podman[470642]: 2025-10-11 02:48:25.839375987 +0000 UTC m=+1.422348671 container died 9641140cf1abc10468e6e39e9a376589d5d82d62c1819ebd51c31bd71bbf64e1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_swanson, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:48:25 compute-0 systemd[1]: var-lib-containers-storage-overlay-da8a34788c5618424285e1c19f7d326ba82f834c0330c48b562258844abc074e-merged.mount: Deactivated successfully.
Oct 11 02:48:25 compute-0 podman[470642]: 2025-10-11 02:48:25.950280623 +0000 UTC m=+1.533253267 container remove 9641140cf1abc10468e6e39e9a376589d5d82d62c1819ebd51c31bd71bbf64e1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=strange_swanson, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:48:25 compute-0 systemd[1]: libpod-conmon-9641140cf1abc10468e6e39e9a376589d5d82d62c1819ebd51c31bd71bbf64e1.scope: Deactivated successfully.
Oct 11 02:48:25 compute-0 sudo[470514]: pam_unix(sudo:session): session closed for user root
Oct 11 02:48:26 compute-0 ceph-mon[191930]: pgmap v2125: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:26 compute-0 sudo[470702]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:48:26 compute-0 sudo[470702]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:48:26 compute-0 sudo[470702]: pam_unix(sudo:session): session closed for user root
Oct 11 02:48:26 compute-0 sudo[470727]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:48:26 compute-0 sudo[470727]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:48:26 compute-0 sudo[470727]: pam_unix(sudo:session): session closed for user root
Oct 11 02:48:26 compute-0 sudo[470752]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:48:26 compute-0 sudo[470752]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:48:26 compute-0 sudo[470752]: pam_unix(sudo:session): session closed for user root
Oct 11 02:48:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:48:26 compute-0 sudo[470777]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:48:26 compute-0 sudo[470777]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:48:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:48:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:48:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:48:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:48:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:48:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:48:26 compute-0 podman[470838]: 2025-10-11 02:48:26.952968639 +0000 UTC m=+0.062763295 container create 1ca7350717bd0d862900de45e6dd0b5b36da2ef8f5f77867a246a694a35af19c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_chebyshev, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507)
Oct 11 02:48:27 compute-0 systemd[1]: Started libpod-conmon-1ca7350717bd0d862900de45e6dd0b5b36da2ef8f5f77867a246a694a35af19c.scope.
Oct 11 02:48:27 compute-0 podman[470838]: 2025-10-11 02:48:26.93069695 +0000 UTC m=+0.040491656 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:48:27 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:48:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2126: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:27 compute-0 podman[470838]: 2025-10-11 02:48:27.070875621 +0000 UTC m=+0.180670277 container init 1ca7350717bd0d862900de45e6dd0b5b36da2ef8f5f77867a246a694a35af19c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_chebyshev, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:48:27 compute-0 podman[470838]: 2025-10-11 02:48:27.081590403 +0000 UTC m=+0.191385069 container start 1ca7350717bd0d862900de45e6dd0b5b36da2ef8f5f77867a246a694a35af19c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_chebyshev, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:48:27 compute-0 podman[470838]: 2025-10-11 02:48:27.087533265 +0000 UTC m=+0.197327941 container attach 1ca7350717bd0d862900de45e6dd0b5b36da2ef8f5f77867a246a694a35af19c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_chebyshev, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:48:27 compute-0 hopeful_chebyshev[470854]: 167 167
Oct 11 02:48:27 compute-0 systemd[1]: libpod-1ca7350717bd0d862900de45e6dd0b5b36da2ef8f5f77867a246a694a35af19c.scope: Deactivated successfully.
Oct 11 02:48:27 compute-0 podman[470838]: 2025-10-11 02:48:27.092503519 +0000 UTC m=+0.202298185 container died 1ca7350717bd0d862900de45e6dd0b5b36da2ef8f5f77867a246a694a35af19c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_chebyshev, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:48:27 compute-0 systemd[1]: var-lib-containers-storage-overlay-be68b5352ad747548ea802f355f9b25a7286ddb827211904fd5e7ff925a094eb-merged.mount: Deactivated successfully.
Oct 11 02:48:27 compute-0 podman[470838]: 2025-10-11 02:48:27.158870588 +0000 UTC m=+0.268665244 container remove 1ca7350717bd0d862900de45e6dd0b5b36da2ef8f5f77867a246a694a35af19c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hopeful_chebyshev, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:48:27 compute-0 systemd[1]: libpod-conmon-1ca7350717bd0d862900de45e6dd0b5b36da2ef8f5f77867a246a694a35af19c.scope: Deactivated successfully.
Oct 11 02:48:27 compute-0 sshd-session[470635]: Failed password for root from 80.94.93.119 port 48210 ssh2
Oct 11 02:48:27 compute-0 podman[470876]: 2025-10-11 02:48:27.468098237 +0000 UTC m=+0.105914631 container create f17fa7b212617df68f664007158dbfa5c94a79f5d553c92079c17c9300e193e1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_cerf, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:48:27 compute-0 podman[470876]: 2025-10-11 02:48:27.420766124 +0000 UTC m=+0.058582598 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:48:27 compute-0 systemd[1]: Started libpod-conmon-f17fa7b212617df68f664007158dbfa5c94a79f5d553c92079c17c9300e193e1.scope.
Oct 11 02:48:27 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:48:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1d29720b6c54b10802b928bbedea57e685cdda65230ee80330f709f9e313df08/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:48:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1d29720b6c54b10802b928bbedea57e685cdda65230ee80330f709f9e313df08/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:48:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1d29720b6c54b10802b928bbedea57e685cdda65230ee80330f709f9e313df08/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:48:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1d29720b6c54b10802b928bbedea57e685cdda65230ee80330f709f9e313df08/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:48:27 compute-0 podman[470876]: 2025-10-11 02:48:27.638674279 +0000 UTC m=+0.276490683 container init f17fa7b212617df68f664007158dbfa5c94a79f5d553c92079c17c9300e193e1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_cerf, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.schema-version=1.0)
Oct 11 02:48:27 compute-0 podman[470876]: 2025-10-11 02:48:27.657551927 +0000 UTC m=+0.295368311 container start f17fa7b212617df68f664007158dbfa5c94a79f5d553c92079c17c9300e193e1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_cerf, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:48:27 compute-0 podman[470876]: 2025-10-11 02:48:27.661963814 +0000 UTC m=+0.299780218 container attach f17fa7b212617df68f664007158dbfa5c94a79f5d553c92079c17c9300e193e1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_cerf, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, ceph=True, org.label-schema.license=GPLv2)
Oct 11 02:48:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:48:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/4241080170' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:48:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:48:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/4241080170' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:48:28 compute-0 ceph-mon[191930]: pgmap v2126: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/4241080170' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:48:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/4241080170' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:48:28 compute-0 unix_chkpwd[470896]: password check failed for user (root)
Oct 11 02:48:28 compute-0 agitated_cerf[470891]: {
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:     "0": [
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:         {
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "devices": [
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "/dev/loop3"
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             ],
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "lv_name": "ceph_lv0",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "lv_size": "21470642176",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "name": "ceph_lv0",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "tags": {
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.cluster_name": "ceph",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.crush_device_class": "",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.encrypted": "0",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.osd_id": "0",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.type": "block",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.vdo": "0"
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             },
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "type": "block",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "vg_name": "ceph_vg0"
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:         }
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:     ],
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:     "1": [
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:         {
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "devices": [
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "/dev/loop4"
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             ],
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "lv_name": "ceph_lv1",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "lv_size": "21470642176",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "name": "ceph_lv1",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "tags": {
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.cluster_name": "ceph",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.crush_device_class": "",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.encrypted": "0",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.osd_id": "1",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.type": "block",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.vdo": "0"
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             },
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "type": "block",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "vg_name": "ceph_vg1"
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:         }
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:     ],
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:     "2": [
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:         {
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "devices": [
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "/dev/loop5"
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             ],
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "lv_name": "ceph_lv2",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "lv_size": "21470642176",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "name": "ceph_lv2",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "tags": {
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.cluster_name": "ceph",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.crush_device_class": "",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.encrypted": "0",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.osd_id": "2",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.type": "block",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:                 "ceph.vdo": "0"
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             },
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "type": "block",
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:             "vg_name": "ceph_vg2"
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:         }
Oct 11 02:48:28 compute-0 agitated_cerf[470891]:     ]
Oct 11 02:48:28 compute-0 agitated_cerf[470891]: }
Oct 11 02:48:28 compute-0 systemd[1]: libpod-f17fa7b212617df68f664007158dbfa5c94a79f5d553c92079c17c9300e193e1.scope: Deactivated successfully.
Oct 11 02:48:28 compute-0 podman[470876]: 2025-10-11 02:48:28.536460231 +0000 UTC m=+1.174276635 container died f17fa7b212617df68f664007158dbfa5c94a79f5d553c92079c17c9300e193e1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_cerf, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 02:48:28 compute-0 systemd[1]: var-lib-containers-storage-overlay-1d29720b6c54b10802b928bbedea57e685cdda65230ee80330f709f9e313df08-merged.mount: Deactivated successfully.
Oct 11 02:48:28 compute-0 podman[470876]: 2025-10-11 02:48:28.625020535 +0000 UTC m=+1.262836919 container remove f17fa7b212617df68f664007158dbfa5c94a79f5d553c92079c17c9300e193e1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_cerf, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0)
Oct 11 02:48:28 compute-0 systemd[1]: libpod-conmon-f17fa7b212617df68f664007158dbfa5c94a79f5d553c92079c17c9300e193e1.scope: Deactivated successfully.
Oct 11 02:48:28 compute-0 sudo[470777]: pam_unix(sudo:session): session closed for user root
Oct 11 02:48:28 compute-0 sudo[470913]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:48:28 compute-0 sudo[470913]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:48:28 compute-0 sudo[470913]: pam_unix(sudo:session): session closed for user root
Oct 11 02:48:28 compute-0 nova_compute[356901]: 2025-10-11 02:48:28.830 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:48:28 compute-0 sudo[470938]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:48:28 compute-0 sudo[470938]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:48:28 compute-0 sudo[470938]: pam_unix(sudo:session): session closed for user root
Oct 11 02:48:29 compute-0 sudo[470963]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:48:29 compute-0 sudo[470963]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:48:29 compute-0 sudo[470963]: pam_unix(sudo:session): session closed for user root
Oct 11 02:48:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2127: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:29 compute-0 sudo[470988]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:48:29 compute-0 sudo[470988]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:48:29 compute-0 sshd-session[470635]: Failed password for root from 80.94.93.119 port 48210 ssh2
Oct 11 02:48:29 compute-0 podman[471051]: 2025-10-11 02:48:29.685207161 +0000 UTC m=+0.077426302 container create 56a7e41211b1805c4fd4d11b5f6bb1ad2d064ed8cef3e8692d338d1d5061850f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_dhawan, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, ceph=True, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:48:29 compute-0 podman[157119]: time="2025-10-11T02:48:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:48:29 compute-0 systemd[1]: Started libpod-conmon-56a7e41211b1805c4fd4d11b5f6bb1ad2d064ed8cef3e8692d338d1d5061850f.scope.
Oct 11 02:48:29 compute-0 podman[471051]: 2025-10-11 02:48:29.658541908 +0000 UTC m=+0.050761059 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:48:29 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:48:29 compute-0 podman[471051]: 2025-10-11 02:48:29.847612713 +0000 UTC m=+0.239831905 container init 56a7e41211b1805c4fd4d11b5f6bb1ad2d064ed8cef3e8692d338d1d5061850f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_dhawan, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS)
Oct 11 02:48:29 compute-0 podman[471051]: 2025-10-11 02:48:29.861167698 +0000 UTC m=+0.253386849 container start 56a7e41211b1805c4fd4d11b5f6bb1ad2d064ed8cef3e8692d338d1d5061850f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_dhawan, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:48:29 compute-0 podman[471051]: 2025-10-11 02:48:29.866696109 +0000 UTC m=+0.258915250 container attach 56a7e41211b1805c4fd4d11b5f6bb1ad2d064ed8cef3e8692d338d1d5061850f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_dhawan, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:48:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:48:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 48873 "" "Go-http-client/1.1"
Oct 11 02:48:29 compute-0 nervous_dhawan[471067]: 167 167
Oct 11 02:48:29 compute-0 systemd[1]: libpod-56a7e41211b1805c4fd4d11b5f6bb1ad2d064ed8cef3e8692d338d1d5061850f.scope: Deactivated successfully.
Oct 11 02:48:29 compute-0 unix_chkpwd[471072]: password check failed for user (root)
Oct 11 02:48:29 compute-0 conmon[471067]: conmon 56a7e41211b1805c4fd4 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-56a7e41211b1805c4fd4d11b5f6bb1ad2d064ed8cef3e8692d338d1d5061850f.scope/container/memory.events
Oct 11 02:48:29 compute-0 podman[471051]: 2025-10-11 02:48:29.91695612 +0000 UTC m=+0.309175261 container died 56a7e41211b1805c4fd4d11b5f6bb1ad2d064ed8cef3e8692d338d1d5061850f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_dhawan, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:48:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:48:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9855 "" "Go-http-client/1.1"
Oct 11 02:48:29 compute-0 systemd[1]: var-lib-containers-storage-overlay-24133988c1ef75acb53d0b529d73dabc91379290b20d3ff5e21a21baceee7b16-merged.mount: Deactivated successfully.
Oct 11 02:48:29 compute-0 podman[471051]: 2025-10-11 02:48:29.978133968 +0000 UTC m=+0.370353109 container remove 56a7e41211b1805c4fd4d11b5f6bb1ad2d064ed8cef3e8692d338d1d5061850f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nervous_dhawan, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default)
Oct 11 02:48:30 compute-0 systemd[1]: libpod-conmon-56a7e41211b1805c4fd4d11b5f6bb1ad2d064ed8cef3e8692d338d1d5061850f.scope: Deactivated successfully.
Oct 11 02:48:30 compute-0 nova_compute[356901]: 2025-10-11 02:48:30.020 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:30 compute-0 ceph-mon[191930]: pgmap v2127: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:30 compute-0 podman[471091]: 2025-10-11 02:48:30.290964252 +0000 UTC m=+0.075643001 container create a912aaf2ca031273aa080ae02257ddf1c26f797ac2118b9dc344972555fa2b18 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_volhard, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3)
Oct 11 02:48:30 compute-0 podman[471091]: 2025-10-11 02:48:30.263112794 +0000 UTC m=+0.047791543 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:48:30 compute-0 nova_compute[356901]: 2025-10-11 02:48:30.357 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:30 compute-0 systemd[1]: Started libpod-conmon-a912aaf2ca031273aa080ae02257ddf1c26f797ac2118b9dc344972555fa2b18.scope.
Oct 11 02:48:30 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:48:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a3d053439f17378b7ce22009781dd33a335340158864b517d435fe07333ac008/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:48:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a3d053439f17378b7ce22009781dd33a335340158864b517d435fe07333ac008/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:48:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a3d053439f17378b7ce22009781dd33a335340158864b517d435fe07333ac008/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:48:30 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a3d053439f17378b7ce22009781dd33a335340158864b517d435fe07333ac008/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:48:30 compute-0 podman[471091]: 2025-10-11 02:48:30.440967821 +0000 UTC m=+0.225646550 container init a912aaf2ca031273aa080ae02257ddf1c26f797ac2118b9dc344972555fa2b18 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_volhard, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:48:30 compute-0 podman[471091]: 2025-10-11 02:48:30.467896099 +0000 UTC m=+0.252574798 container start a912aaf2ca031273aa080ae02257ddf1c26f797ac2118b9dc344972555fa2b18 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_volhard, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:48:30 compute-0 podman[471091]: 2025-10-11 02:48:30.472294743 +0000 UTC m=+0.256973452 container attach a912aaf2ca031273aa080ae02257ddf1c26f797ac2118b9dc344972555fa2b18 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_volhard, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0)
Oct 11 02:48:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2128: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:48:31 compute-0 sshd-session[470635]: Failed password for root from 80.94.93.119 port 48210 ssh2
Oct 11 02:48:31 compute-0 openstack_network_exporter[374316]: ERROR   02:48:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:48:31 compute-0 openstack_network_exporter[374316]: ERROR   02:48:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:48:31 compute-0 openstack_network_exporter[374316]: ERROR   02:48:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:48:31 compute-0 openstack_network_exporter[374316]: ERROR   02:48:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:48:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:48:31 compute-0 openstack_network_exporter[374316]: ERROR   02:48:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:48:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:48:31 compute-0 lucid_volhard[471106]: {
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:         "osd_id": 1,
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:         "type": "bluestore"
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:     },
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:         "osd_id": 2,
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:         "type": "bluestore"
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:     },
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:         "osd_id": 0,
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:         "type": "bluestore"
Oct 11 02:48:31 compute-0 lucid_volhard[471106]:     }
Oct 11 02:48:31 compute-0 lucid_volhard[471106]: }
Oct 11 02:48:31 compute-0 systemd[1]: libpod-a912aaf2ca031273aa080ae02257ddf1c26f797ac2118b9dc344972555fa2b18.scope: Deactivated successfully.
Oct 11 02:48:31 compute-0 systemd[1]: libpod-a912aaf2ca031273aa080ae02257ddf1c26f797ac2118b9dc344972555fa2b18.scope: Consumed 1.062s CPU time.
Oct 11 02:48:31 compute-0 podman[471091]: 2025-10-11 02:48:31.554350396 +0000 UTC m=+1.339029125 container died a912aaf2ca031273aa080ae02257ddf1c26f797ac2118b9dc344972555fa2b18 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_volhard, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:48:31 compute-0 sshd-session[470635]: Received disconnect from 80.94.93.119 port 48210:11:  [preauth]
Oct 11 02:48:31 compute-0 sshd-session[470635]: Disconnected from authenticating user root 80.94.93.119 port 48210 [preauth]
Oct 11 02:48:31 compute-0 sshd-session[470635]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=80.94.93.119  user=root
Oct 11 02:48:31 compute-0 systemd[1]: var-lib-containers-storage-overlay-a3d053439f17378b7ce22009781dd33a335340158864b517d435fe07333ac008-merged.mount: Deactivated successfully.
Oct 11 02:48:31 compute-0 podman[471091]: 2025-10-11 02:48:31.647983252 +0000 UTC m=+1.432661961 container remove a912aaf2ca031273aa080ae02257ddf1c26f797ac2118b9dc344972555fa2b18 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_volhard, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default)
Oct 11 02:48:31 compute-0 systemd[1]: libpod-conmon-a912aaf2ca031273aa080ae02257ddf1c26f797ac2118b9dc344972555fa2b18.scope: Deactivated successfully.
Oct 11 02:48:31 compute-0 sudo[470988]: pam_unix(sudo:session): session closed for user root
Oct 11 02:48:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:48:31 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:48:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:48:31 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:48:31 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 781b934e-52f5-41a6-8f15-4df087600e59 does not exist
Oct 11 02:48:31 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 5faeda86-1390-4781-9302-24eff41a6e06 does not exist
Oct 11 02:48:31 compute-0 podman[471139]: 2025-10-11 02:48:31.724198172 +0000 UTC m=+0.126488575 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.display-name=Red Hat Universal Base Image 9, distribution-scope=public, name=ubi9, vendor=Red Hat, Inc., io.openshift.tags=base rhel9, summary=Provides the latest release of Red Hat Universal Base Image 9., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.openshift.expose-services=, vcs-type=git, release-0.7.12=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi9-container, version=9.4, container_name=kepler, io.buildah.version=1.29.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., managed_by=edpm_ansible, config_id=edpm, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, build-date=2024-09-18T21:23:30, architecture=x86_64, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, release=1214.1726694543)
Oct 11 02:48:31 compute-0 sudo[471169]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:48:31 compute-0 sudo[471169]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:48:31 compute-0 sudo[471169]: pam_unix(sudo:session): session closed for user root
Oct 11 02:48:31 compute-0 sudo[471194]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:48:31 compute-0 sudo[471194]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:48:31 compute-0 sudo[471194]: pam_unix(sudo:session): session closed for user root
Oct 11 02:48:32 compute-0 ceph-mon[191930]: pgmap v2128: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:32 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:48:32 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:48:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2129: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:34 compute-0 ceph-mon[191930]: pgmap v2129: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #102. Immutable memtables: 0.
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:48:34.744824) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 59] Flushing memtable with next log file: 102
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150914744865, "job": 59, "event": "flush_started", "num_memtables": 1, "num_entries": 1179, "num_deletes": 251, "total_data_size": 1821076, "memory_usage": 1856752, "flush_reason": "Manual Compaction"}
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 59] Level-0 flush table #103: started
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150914757824, "cf_name": "default", "job": 59, "event": "table_file_creation", "file_number": 103, "file_size": 1782015, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 42831, "largest_seqno": 44009, "table_properties": {"data_size": 1776300, "index_size": 3110, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1541, "raw_key_size": 12022, "raw_average_key_size": 19, "raw_value_size": 1764901, "raw_average_value_size": 2907, "num_data_blocks": 140, "num_entries": 607, "num_filter_entries": 607, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760150797, "oldest_key_time": 1760150797, "file_creation_time": 1760150914, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 103, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 59] Flush lasted 13052 microseconds, and 5022 cpu microseconds.
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:48:34.757876) [db/flush_job.cc:967] [default] [JOB 59] Level-0 flush table #103: 1782015 bytes OK
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:48:34.757893) [db/memtable_list.cc:519] [default] Level-0 commit table #103 started
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:48:34.759218) [db/memtable_list.cc:722] [default] Level-0 commit table #103: memtable #1 done
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:48:34.759268) EVENT_LOG_v1 {"time_micros": 1760150914759264, "job": 59, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:48:34.759285) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 59] Try to delete WAL files size 1815668, prev total WAL file size 1815668, number of live WAL files 2.
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000099.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:48:34.759974) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F730034303136' seq:72057594037927935, type:22 .. '7061786F730034323638' seq:0, type:0; will stop at (end)
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 60] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 59 Base level 0, inputs: [103(1740KB)], [101(9094KB)]
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150914760002, "job": 60, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [103], "files_L6": [101], "score": -1, "input_data_size": 11094913, "oldest_snapshot_seqno": -1}
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 60] Generated table #104: 5922 keys, 9399181 bytes, temperature: kUnknown
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150914803296, "cf_name": "default", "job": 60, "event": "table_file_creation", "file_number": 104, "file_size": 9399181, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 9359270, "index_size": 23993, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 14853, "raw_key_size": 154151, "raw_average_key_size": 26, "raw_value_size": 9251847, "raw_average_value_size": 1562, "num_data_blocks": 957, "num_entries": 5922, "num_filter_entries": 5922, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760150914, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 104, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:48:34.803610) [db/compaction/compaction_job.cc:1663] [default] [JOB 60] Compacted 1@0 + 1@6 files to L6 => 9399181 bytes
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:48:34.805328) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 255.7 rd, 216.6 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(1.7, 8.9 +0.0 blob) out(9.0 +0.0 blob), read-write-amplify(11.5) write-amplify(5.3) OK, records in: 6436, records dropped: 514 output_compression: NoCompression
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:48:34.805351) EVENT_LOG_v1 {"time_micros": 1760150914805336, "job": 60, "event": "compaction_finished", "compaction_time_micros": 43393, "compaction_time_cpu_micros": 22188, "output_level": 6, "num_output_files": 1, "total_output_size": 9399181, "num_input_records": 6436, "num_output_records": 5922, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000103.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150914805740, "job": 60, "event": "table_file_deletion", "file_number": 103}
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000101.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150914807219, "job": 60, "event": "table_file_deletion", "file_number": 101}
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:48:34.759890) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:48:34.807404) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:48:34.807409) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:48:34.807410) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:48:34.807412) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:48:34 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:48:34.807413) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:48:35 compute-0 nova_compute[356901]: 2025-10-11 02:48:35.025 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2130: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:35 compute-0 nova_compute[356901]: 2025-10-11 02:48:35.361 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:48:36 compute-0 ceph-mon[191930]: pgmap v2130: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2131: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:37 compute-0 podman[471222]: 2025-10-11 02:48:37.210117562 +0000 UTC m=+0.084352905 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_id=ovn_metadata_agent, managed_by=edpm_ansible, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_metadata_agent)
Oct 11 02:48:37 compute-0 podman[471219]: 2025-10-11 02:48:37.218961918 +0000 UTC m=+0.105310886 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 02:48:37 compute-0 podman[471221]: 2025-10-11 02:48:37.239745118 +0000 UTC m=+0.114162273 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, managed_by=edpm_ansible, io.buildah.version=1.41.4)
Oct 11 02:48:37 compute-0 podman[471220]: 2025-10-11 02:48:37.266673506 +0000 UTC m=+0.157286664 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, container_name=ovn_controller, managed_by=edpm_ansible, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:48:38 compute-0 ceph-mon[191930]: pgmap v2131: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2132: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:40 compute-0 nova_compute[356901]: 2025-10-11 02:48:40.029 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:40 compute-0 nova_compute[356901]: 2025-10-11 02:48:40.365 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:40 compute-0 ceph-mon[191930]: pgmap v2132: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2133: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:48:42 compute-0 ceph-mon[191930]: pgmap v2133: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2134: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:44 compute-0 podman[471301]: 2025-10-11 02:48:44.794811018 +0000 UTC m=+0.094102328 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, container_name=iscsid)
Oct 11 02:48:44 compute-0 ceph-mon[191930]: pgmap v2134: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:44 compute-0 podman[471300]: 2025-10-11 02:48:44.81583319 +0000 UTC m=+0.123375763 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=multipathd, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0)
Oct 11 02:48:45 compute-0 nova_compute[356901]: 2025-10-11 02:48:45.035 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2135: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:45 compute-0 nova_compute[356901]: 2025-10-11 02:48:45.370 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:48:46 compute-0 ceph-mon[191930]: pgmap v2135: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2136: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:48 compute-0 ceph-mon[191930]: pgmap v2136: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2137: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:50 compute-0 nova_compute[356901]: 2025-10-11 02:48:50.041 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:50 compute-0 nova_compute[356901]: 2025-10-11 02:48:50.373 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:50 compute-0 ceph-mon[191930]: pgmap v2137: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2138: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:48:52 compute-0 ceph-mon[191930]: pgmap v2138: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:48:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2139: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 5.0 KiB/s rd, 85 B/s wr, 1 op/s
Oct 11 02:48:53 compute-0 podman[471337]: 2025-10-11 02:48:53.188335173 +0000 UTC m=+0.088298630 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:48:53 compute-0 podman[471339]: 2025-10-11 02:48:53.202499514 +0000 UTC m=+0.087982105 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:48:53 compute-0 podman[471338]: 2025-10-11 02:48:53.22590802 +0000 UTC m=+0.105388709 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, release=1755695350, name=ubi9-minimal, io.openshift.expose-services=, vendor=Red Hat, Inc., build-date=2025-08-20T13:12:41, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, version=9.6, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-type=git, config_id=edpm, maintainer=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=minimal rhel9, com.redhat.component=ubi9-minimal-container, io.buildah.version=1.33.7, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, managed_by=edpm_ansible, container_name=openstack_network_exporter)
Oct 11 02:48:54 compute-0 ceph-mon[191930]: pgmap v2139: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 5.0 KiB/s rd, 85 B/s wr, 1 op/s
Oct 11 02:48:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:48:54.879 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:48:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:48:54.880 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:48:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:48:54.881 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:48:55 compute-0 nova_compute[356901]: 2025-10-11 02:48:55.046 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2140: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 16 KiB/s rd, 85 B/s wr, 3 op/s
Oct 11 02:48:55 compute-0 nova_compute[356901]: 2025-10-11 02:48:55.378 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:48:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:48:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:48:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:48:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:48:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:48:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:48:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:48:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:48:56
Oct 11 02:48:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:48:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:48:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['.mgr', 'cephfs.cephfs.meta', 'cephfs.cephfs.data', 'vms', '.rgw.root', 'volumes', 'images', 'backups', 'default.rgw.log', 'default.rgw.meta', 'default.rgw.control']
Oct 11 02:48:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:48:56 compute-0 ceph-mon[191930]: pgmap v2140: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 16 KiB/s rd, 85 B/s wr, 3 op/s
Oct 11 02:48:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2141: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 43 KiB/s rd, 170 B/s wr, 4 op/s
Oct 11 02:48:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:48:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:48:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:48:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:48:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:48:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:48:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:48:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:48:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:48:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:48:58 compute-0 ceph-mon[191930]: pgmap v2141: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 43 KiB/s rd, 170 B/s wr, 4 op/s
Oct 11 02:48:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2142: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 43 KiB/s rd, 255 B/s wr, 4 op/s
Oct 11 02:48:59 compute-0 podman[157119]: time="2025-10-11T02:48:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:48:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:48:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:48:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:48:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9556 "" "Go-http-client/1.1"
Oct 11 02:49:00 compute-0 nova_compute[356901]: 2025-10-11 02:49:00.052 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:00 compute-0 nova_compute[356901]: 2025-10-11 02:49:00.378 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:00 compute-0 ceph-mon[191930]: pgmap v2142: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 43 KiB/s rd, 255 B/s wr, 4 op/s
Oct 11 02:49:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2143: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 43 KiB/s rd, 8.6 KiB/s wr, 5 op/s
Oct 11 02:49:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:49:01 compute-0 openstack_network_exporter[374316]: ERROR   02:49:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:49:01 compute-0 openstack_network_exporter[374316]: ERROR   02:49:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:49:01 compute-0 openstack_network_exporter[374316]: ERROR   02:49:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:49:01 compute-0 openstack_network_exporter[374316]: ERROR   02:49:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:49:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:49:01 compute-0 openstack_network_exporter[374316]: ERROR   02:49:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:49:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:49:02 compute-0 podman[471397]: 2025-10-11 02:49:02.248843688 +0000 UTC m=+0.137736117 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.expose-services=, architecture=x86_64, io.buildah.version=1.29.0, vendor=Red Hat, Inc., version=9.4, name=ubi9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.openshift.tags=base rhel9, vcs-type=git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1214.1726694543, release-0.7.12=, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, com.redhat.component=ubi9-container, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., container_name=kepler, managed_by=edpm_ansible, build-date=2024-09-18T21:23:30, distribution-scope=public, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Oct 11 02:49:02 compute-0 nova_compute[356901]: 2025-10-11 02:49:02.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:49:02 compute-0 ceph-mon[191930]: pgmap v2143: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 43 KiB/s rd, 8.6 KiB/s wr, 5 op/s
Oct 11 02:49:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2144: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 43 KiB/s rd, 8.6 KiB/s wr, 5 op/s
Oct 11 02:49:04 compute-0 ceph-mon[191930]: pgmap v2144: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 43 KiB/s rd, 8.6 KiB/s wr, 5 op/s
Oct 11 02:49:05 compute-0 nova_compute[356901]: 2025-10-11 02:49:05.056 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2145: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 38 KiB/s rd, 8.5 KiB/s wr, 3 op/s
Oct 11 02:49:05 compute-0 nova_compute[356901]: 2025-10-11 02:49:05.382 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:49:06 compute-0 ceph-mon[191930]: pgmap v2145: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 38 KiB/s rd, 8.5 KiB/s wr, 3 op/s
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2146: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 27 KiB/s rd, 8.5 KiB/s wr, 1 op/s
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.002068160590447353 of space, bias 1.0, pg target 0.620448177134206 quantized to 32 (current 32)
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00125203744627857 of space, bias 1.0, pg target 0.375611233883571 quantized to 32 (current 32)
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:49:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:49:07 compute-0 nova_compute[356901]: 2025-10-11 02:49:07.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:49:08 compute-0 podman[471420]: 2025-10-11 02:49:08.236415238 +0000 UTC m=+0.104235000 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 10 Base Image, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.build-date=20251007, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, config_id=edpm)
Oct 11 02:49:08 compute-0 podman[471418]: 2025-10-11 02:49:08.245192559 +0000 UTC m=+0.126771533 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:49:08 compute-0 podman[471424]: 2025-10-11 02:49:08.281556577 +0000 UTC m=+0.139414338 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009, container_name=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:49:08 compute-0 podman[471419]: 2025-10-11 02:49:08.315687541 +0000 UTC m=+0.188292605 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, org.label-schema.vendor=CentOS, config_id=ovn_controller, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_managed=true)
Oct 11 02:49:08 compute-0 ceph-mon[191930]: pgmap v2146: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 27 KiB/s rd, 8.5 KiB/s wr, 1 op/s
Oct 11 02:49:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2147: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 8.4 KiB/s wr, 0 op/s
Oct 11 02:49:10 compute-0 nova_compute[356901]: 2025-10-11 02:49:10.060 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:10 compute-0 nova_compute[356901]: 2025-10-11 02:49:10.385 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:10 compute-0 ceph-mon[191930]: pgmap v2147: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 0 B/s rd, 8.4 KiB/s wr, 0 op/s
Oct 11 02:49:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2148: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 682 B/s rd, 8.3 KiB/s wr, 0 op/s
Oct 11 02:49:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:49:12 compute-0 ceph-mon[191930]: pgmap v2148: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 682 B/s rd, 8.3 KiB/s wr, 0 op/s
Oct 11 02:49:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2149: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 682 B/s rd, 0 op/s
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.871 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.872 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.873 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.878 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.879 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc68fe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.883 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '8422017b-c868-4ba2-ab1f-61d3668ca145', 'name': 'te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c', 'flavor': {'id': '6dff30d1-85df-4e9c-9163-a20ba47bb0c7', 'name': 'm1.nano', 'vcpus': 1, 'ram': 128, 'disk': 1, 'ephemeral': 0, 'swap': 0}, 'image': {'id': '2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-0000000e', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': 'a05bbc8f872d4dd99972d2cb8136d608', 'user_id': 'f66a606299944d53a40f21e81c791d70', 'hostId': 'cea8816d446065ba50379057f72b942db7e204c60c4530591bc7d0be', 'status': 'active', 'metadata': {'metering.server_group': '44c4fdb3-6cdb-42b8-903d-5a2c79f0da20'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.887 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': 'f98d09d7-6aa0-4405-bfa0-be1f78d3911f', 'name': 'te-0512306-asg-am4iabdjybzp-yj44h76hdzhi-bejrsw3xgi4q', 'flavor': {'id': '6dff30d1-85df-4e9c-9163-a20ba47bb0c7', 'name': 'm1.nano', 'vcpus': 1, 'ram': 128, 'disk': 1, 'ephemeral': 0, 'swap': 0}, 'image': {'id': '2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-0000000f', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': 'a05bbc8f872d4dd99972d2cb8136d608', 'user_id': 'f66a606299944d53a40f21e81c791d70', 'hostId': 'cea8816d446065ba50379057f72b942db7e204c60c4530591bc7d0be', 'status': 'active', 'metadata': {'metering.server_group': '44c4fdb3-6cdb-42b8-903d-5a2c79f0da20'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.891 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.891 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.892 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.892 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.892 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.893 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:49:13.892563) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.900 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.bytes volume: 1820 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.905 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.bytes volume: 2276 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.910 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2856 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.911 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.912 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.912 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.912 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.912 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.913 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.913 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets volume: 31 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.913 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:49:13.912932) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.914 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.packets volume: 16 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.914 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 24 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.915 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.915 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.915 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.916 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.916 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.916 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.916 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.917 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.917 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.918 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.919 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.919 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.919 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.919 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.919 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.920 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.920 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.921 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.922 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.922 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.923 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:49:13.916558) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.922 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.923 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.923 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:49:13.919941) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.923 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.924 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:49:13.923763) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.923 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.939 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.939 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.capacity volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.961 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.961 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.capacity volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.985 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.985 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.985 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.986 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.986 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.986 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.987 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.987 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.987 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:13.988 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:49:13.987187) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.013 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.bytes volume: 29657600 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.014 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.bytes volume: 299326 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.046 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.bytes volume: 30145536 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.047 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.bytes volume: 246078 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.086 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.087 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.087 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.088 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.088 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.088 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.088 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.088 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.089 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.089 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.latency volume: 2082910661 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.089 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.latency volume: 143173838 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.089 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.latency volume: 1934915770 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.090 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.latency volume: 164304713 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.090 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.090 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.091 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.091 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.091 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.092 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.092 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.092 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.092 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.092 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:49:14.089026) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.092 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.requests volume: 1067 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.093 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.requests volume: 120 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.093 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.requests volume: 1092 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.093 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.requests volume: 107 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.094 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.094 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.094 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.095 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.095 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.095 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.095 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.096 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.096 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.096 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.096 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.usage volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.096 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:49:14.092665) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.097 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.097 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:49:14.096133) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.097 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.usage volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.097 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.097 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.098 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.098 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.099 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.099 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.099 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.099 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.099 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.099 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.bytes volume: 73015296 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.100 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.100 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.bytes volume: 72847360 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.100 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.101 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.101 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.101 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.102 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.102 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.102 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.102 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.102 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.102 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.102 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.latency volume: 7671012150 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.103 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.103 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.latency volume: 7938162731 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.103 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.104 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.104 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.104 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.105 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.105 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.105 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.105 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.106 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.106 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.107 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:49:14.099626) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.107 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:49:14.102817) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.107 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:49:14.106112) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.131 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.160 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.180 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.181 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.181 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.182 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.182 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.182 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.182 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.183 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.requests volume: 316 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.183 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.184 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.requests volume: 279 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.185 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.185 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.186 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.187 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:49:14.182705) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.187 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.189 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.189 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.189 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.190 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.190 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.190 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.191 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.191 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.bytes.delta volume: 630 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.192 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.193 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.193 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.194 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.194 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.194 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.195 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:49:14.190685) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.195 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.195 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.195 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.196 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:49:14.195669) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.197 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.198 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.198 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.198 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.199 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.199 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.199 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets volume: 15 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.200 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:49:14.199403) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.200 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.packets volume: 27 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.201 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 33 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.202 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.202 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.202 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.203 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.203 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.203 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.203 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.bytes.delta volume: 630 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.203 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:49:14.203366) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.204 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.204 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.205 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.206 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.206 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.206 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.207 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.207 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.208 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.209 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:49:14.207342) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.209 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.209 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.210 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.210 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.210 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.210 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.211 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.211 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.213 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.213 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:49:14.210573) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.213 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.213 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.213 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.213 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.214 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.214 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.214 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.allocation volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.215 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.215 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:49:14.214068) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.215 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.allocation volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.216 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.216 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.216 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.217 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.217 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.217 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.218 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.218 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.218 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.218 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:49:14.218239) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.218 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.218 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.219 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.219 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.220 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.220 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.220 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.220 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.220 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.221 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/cpu volume: 331530000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.221 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/cpu volume: 186910000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.221 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:49:14.220906) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.221 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 61070000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.222 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.222 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.222 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.223 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.223 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.223 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.223 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.bytes volume: 2250 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.224 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.bytes volume: 1620 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.224 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2412 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.225 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:49:14.223676) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.225 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.225 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.225 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.226 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.226 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.226 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.226 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:49:14.226453) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.226 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/memory.usage volume: 43.12890625 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.227 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/memory.usage volume: 43.42578125 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.227 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.83984375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.228 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.228 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.228 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.229 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.229 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.229 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.229 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.230 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.230 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.230 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.230 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.230 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.231 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.231 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.231 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.231 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.231 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.232 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.232 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.232 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.232 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.233 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.233 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.233 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.233 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.234 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.234 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.234 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:49:14.234 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:49:14 compute-0 nova_compute[356901]: 2025-10-11 02:49:14.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:49:14 compute-0 ceph-mon[191930]: pgmap v2149: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 682 B/s rd, 0 op/s
Oct 11 02:49:15 compute-0 nova_compute[356901]: 2025-10-11 02:49:15.068 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2150: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 682 B/s rd, 0 op/s
Oct 11 02:49:15 compute-0 podman[471503]: 2025-10-11 02:49:15.249470854 +0000 UTC m=+0.136256404 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, config_id=multipathd, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009, container_name=multipathd, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.license=GPLv2)
Oct 11 02:49:15 compute-0 podman[471504]: 2025-10-11 02:49:15.268398295 +0000 UTC m=+0.155038839 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, container_name=iscsid, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, managed_by=edpm_ansible, tcib_managed=true, org.label-schema.build-date=20251009)
Oct 11 02:49:15 compute-0 nova_compute[356901]: 2025-10-11 02:49:15.389 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:15 compute-0 nova_compute[356901]: 2025-10-11 02:49:15.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:49:15 compute-0 nova_compute[356901]: 2025-10-11 02:49:15.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:49:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #105. Immutable memtables: 0.
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:49:16.405701) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 61] Flushing memtable with next log file: 105
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150956406047, "job": 61, "event": "flush_started", "num_memtables": 1, "num_entries": 542, "num_deletes": 250, "total_data_size": 596253, "memory_usage": 607000, "flush_reason": "Manual Compaction"}
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 61] Level-0 flush table #106: started
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150956414496, "cf_name": "default", "job": 61, "event": "table_file_creation", "file_number": 106, "file_size": 393688, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 44010, "largest_seqno": 44551, "table_properties": {"data_size": 391028, "index_size": 696, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 901, "raw_key_size": 6981, "raw_average_key_size": 20, "raw_value_size": 385671, "raw_average_value_size": 1117, "num_data_blocks": 32, "num_entries": 345, "num_filter_entries": 345, "num_deletions": 250, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760150915, "oldest_key_time": 1760150915, "file_creation_time": 1760150956, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 106, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 61] Flush lasted 8892 microseconds, and 4689 cpu microseconds.
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:49:16.414592) [db/flush_job.cc:967] [default] [JOB 61] Level-0 flush table #106: 393688 bytes OK
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:49:16.414627) [db/memtable_list.cc:519] [default] Level-0 commit table #106 started
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:49:16.417507) [db/memtable_list.cc:722] [default] Level-0 commit table #106: memtable #1 done
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:49:16.417532) EVENT_LOG_v1 {"time_micros": 1760150956417523, "job": 61, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:49:16.417556) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 61] Try to delete WAL files size 593217, prev total WAL file size 593217, number of live WAL files 2.
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000102.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:49:16.419914) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '6D6772737461740031373534' seq:72057594037927935, type:22 .. '6D6772737461740032303035' seq:0, type:0; will stop at (end)
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 62] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 61 Base level 0, inputs: [106(384KB)], [104(9178KB)]
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150956420003, "job": 62, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [106], "files_L6": [104], "score": -1, "input_data_size": 9792869, "oldest_snapshot_seqno": -1}
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 62] Generated table #107: 5775 keys, 6700658 bytes, temperature: kUnknown
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150956481829, "cf_name": "default", "job": 62, "event": "table_file_creation", "file_number": 107, "file_size": 6700658, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 6666051, "index_size": 19073, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 14469, "raw_key_size": 151247, "raw_average_key_size": 26, "raw_value_size": 6565433, "raw_average_value_size": 1136, "num_data_blocks": 753, "num_entries": 5775, "num_filter_entries": 5775, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760150956, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 107, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:49:16.482125) [db/compaction/compaction_job.cc:1663] [default] [JOB 62] Compacted 1@0 + 1@6 files to L6 => 6700658 bytes
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:49:16.484147) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 158.1 rd, 108.2 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(0.4, 9.0 +0.0 blob) out(6.4 +0.0 blob), read-write-amplify(41.9) write-amplify(17.0) OK, records in: 6267, records dropped: 492 output_compression: NoCompression
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:49:16.484168) EVENT_LOG_v1 {"time_micros": 1760150956484158, "job": 62, "event": "compaction_finished", "compaction_time_micros": 61937, "compaction_time_cpu_micros": 47029, "output_level": 6, "num_output_files": 1, "total_output_size": 6700658, "num_input_records": 6267, "num_output_records": 5775, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000106.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150956484459, "job": 62, "event": "table_file_deletion", "file_number": 106}
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000104.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760150956486528, "job": 62, "event": "table_file_deletion", "file_number": 104}
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:49:16.418815) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:49:16.486772) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:49:16.486779) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:49:16.486781) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:49:16.486783) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:49:16 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:49:16.486785) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:49:16 compute-0 nova_compute[356901]: 2025-10-11 02:49:16.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:49:17 compute-0 ceph-mon[191930]: pgmap v2150: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 682 B/s rd, 0 op/s
Oct 11 02:49:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2151: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 682 B/s rd, 7.3 KiB/s wr, 0 op/s
Oct 11 02:49:18 compute-0 nova_compute[356901]: 2025-10-11 02:49:18.891 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:49:18 compute-0 nova_compute[356901]: 2025-10-11 02:49:18.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:49:18 compute-0 nova_compute[356901]: 2025-10-11 02:49:18.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:49:19 compute-0 ceph-mon[191930]: pgmap v2151: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 682 B/s rd, 7.3 KiB/s wr, 0 op/s
Oct 11 02:49:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2152: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 682 B/s rd, 7.3 KiB/s wr, 0 op/s
Oct 11 02:49:19 compute-0 nova_compute[356901]: 2025-10-11 02:49:19.512 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-f98d09d7-6aa0-4405-bfa0-be1f78d3911f" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:49:19 compute-0 nova_compute[356901]: 2025-10-11 02:49:19.513 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-f98d09d7-6aa0-4405-bfa0-be1f78d3911f" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:49:19 compute-0 nova_compute[356901]: 2025-10-11 02:49:19.514 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:49:20 compute-0 nova_compute[356901]: 2025-10-11 02:49:20.076 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:20 compute-0 nova_compute[356901]: 2025-10-11 02:49:20.392 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:21 compute-0 ceph-mon[191930]: pgmap v2152: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 682 B/s rd, 7.3 KiB/s wr, 0 op/s
Oct 11 02:49:21 compute-0 nova_compute[356901]: 2025-10-11 02:49:21.059 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Updating instance_info_cache with network_info: [{"id": "0c37c119-6647-42bb-a22f-ca741242ef30", "address": "fa:16:3e:ee:94:7e", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.2.253", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap0c37c119-66", "ovs_interfaceid": "0c37c119-6647-42bb-a22f-ca741242ef30", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:49:21 compute-0 nova_compute[356901]: 2025-10-11 02:49:21.078 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-f98d09d7-6aa0-4405-bfa0-be1f78d3911f" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:49:21 compute-0 nova_compute[356901]: 2025-10-11 02:49:21.079 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:49:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2153: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 682 B/s rd, 7.3 KiB/s wr, 0 op/s
Oct 11 02:49:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:49:21 compute-0 nova_compute[356901]: 2025-10-11 02:49:21.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:49:21 compute-0 nova_compute[356901]: 2025-10-11 02:49:21.926 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:49:21 compute-0 nova_compute[356901]: 2025-10-11 02:49:21.927 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:49:21 compute-0 nova_compute[356901]: 2025-10-11 02:49:21.928 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:49:21 compute-0 nova_compute[356901]: 2025-10-11 02:49:21.929 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:49:21 compute-0 nova_compute[356901]: 2025-10-11 02:49:21.930 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:49:22 compute-0 ceph-mon[191930]: pgmap v2153: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 682 B/s rd, 7.3 KiB/s wr, 0 op/s
Oct 11 02:49:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:49:22 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3776013941' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:49:22 compute-0 nova_compute[356901]: 2025-10-11 02:49:22.448 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.519s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:49:22 compute-0 nova_compute[356901]: 2025-10-11 02:49:22.567 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:49:22 compute-0 nova_compute[356901]: 2025-10-11 02:49:22.568 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:49:22 compute-0 nova_compute[356901]: 2025-10-11 02:49:22.578 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:49:22 compute-0 nova_compute[356901]: 2025-10-11 02:49:22.579 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:49:22 compute-0 nova_compute[356901]: 2025-10-11 02:49:22.589 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:49:22 compute-0 nova_compute[356901]: 2025-10-11 02:49:22.590 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:49:22 compute-0 nova_compute[356901]: 2025-10-11 02:49:22.591 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:49:23 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3776013941' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:49:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2154: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 7.3 KiB/s wr, 0 op/s
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.150 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.152 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3231MB free_disk=59.864131927490234GB free_vcpus=5 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.152 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.153 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.225 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.226 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 8422017b-c868-4ba2-ab1f-61d3668ca145 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.226 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance f98d09d7-6aa0-4405-bfa0-be1f78d3911f actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.227 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 3 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.227 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1280MB phys_disk=59GB used_disk=4GB total_vcpus=8 used_vcpus=3 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.242 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing inventories for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:804
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.260 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating ProviderTree inventory for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 from _refresh_and_get_inventory using data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} _refresh_and_get_inventory /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:768
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.261 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating inventory in ProviderTree for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 with inventory: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:176
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.277 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing aggregate associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, aggregates: None _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:813
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.311 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing trait associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, traits: COMPUTE_VOLUME_EXTEND,COMPUTE_NET_VIF_MODEL_VMXNET3,HW_CPU_X86_SSSE3,COMPUTE_RESCUE_BFV,COMPUTE_SOCKET_PCI_NUMA_AFFINITY,COMPUTE_NODE,HW_CPU_X86_SVM,COMPUTE_STORAGE_BUS_SCSI,HW_CPU_X86_FMA3,COMPUTE_GRAPHICS_MODEL_NONE,COMPUTE_NET_VIF_MODEL_RTL8139,HW_CPU_X86_SSE4A,COMPUTE_IMAGE_TYPE_QCOW2,HW_CPU_X86_BMI2,HW_CPU_X86_SSE42,HW_CPU_X86_AVX2,COMPUTE_IMAGE_TYPE_RAW,COMPUTE_VIOMMU_MODEL_VIRTIO,HW_CPU_X86_AESNI,COMPUTE_STORAGE_BUS_FDC,COMPUTE_GRAPHICS_MODEL_VIRTIO,HW_CPU_X86_AMD_SVM,COMPUTE_NET_VIF_MODEL_NE2K_PCI,COMPUTE_ACCELERATORS,HW_CPU_X86_SSE2,COMPUTE_GRAPHICS_MODEL_VGA,HW_CPU_X86_ABM,HW_CPU_X86_AVX,COMPUTE_NET_VIF_MODEL_E1000,COMPUTE_STORAGE_BUS_USB,COMPUTE_NET_ATTACH_INTERFACE,HW_CPU_X86_MMX,COMPUTE_SECURITY_TPM_2_0,COMPUTE_IMAGE_TYPE_ISO,HW_CPU_X86_SSE41,COMPUTE_IMAGE_TYPE_AKI,COMPUTE_IMAGE_TYPE_AMI,COMPUTE_NET_ATTACH_INTERFACE_WITH_TAG,COMPUTE_DEVICE_TAGGING,COMPUTE_SECURITY_UEFI_SECURE_BOOT,COMPUTE_TRUSTED_CERTS,COMPUTE_NET_VIF_MODEL_VIRTIO,COMPUTE_VIOMMU_MODEL_INTEL,COMPUTE_STORAGE_BUS_SATA,HW_CPU_X86_SSE,COMPUTE_STORAGE_BUS_VIRTIO,COMPUTE_NET_VIF_MODEL_PCNET,COMPUTE_GRAPHICS_MODEL_CIRRUS,HW_CPU_X86_SHA,HW_CPU_X86_BMI,COMPUTE_NET_VIF_MODEL_E1000E,COMPUTE_NET_VIF_MODEL_SPAPR_VLAN,COMPUTE_VOLUME_ATTACH_WITH_TAG,COMPUTE_GRAPHICS_MODEL_BOCHS,COMPUTE_VIOMMU_MODEL_AUTO,COMPUTE_IMAGE_TYPE_ARI,HW_CPU_X86_CLMUL,COMPUTE_STORAGE_BUS_IDE,COMPUTE_VOLUME_MULTI_ATTACH,HW_CPU_X86_F16C,COMPUTE_SECURITY_TPM_1_2 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:825
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.370 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:49:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:49:23 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2565734401' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.871 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.500s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.889 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.919 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.922 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:49:23 compute-0 nova_compute[356901]: 2025-10-11 02:49:23.922 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.769s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:49:24 compute-0 ceph-mon[191930]: pgmap v2154: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 7.3 KiB/s wr, 0 op/s
Oct 11 02:49:24 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2565734401' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:49:24 compute-0 podman[471596]: 2025-10-11 02:49:24.228947845 +0000 UTC m=+0.096039873 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:49:24 compute-0 podman[471588]: 2025-10-11 02:49:24.23095761 +0000 UTC m=+0.124059669 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, config_id=edpm, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:49:24 compute-0 podman[471589]: 2025-10-11 02:49:24.242642594 +0000 UTC m=+0.117348974 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, version=9.6, io.buildah.version=1.33.7, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., architecture=x86_64, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, build-date=2025-08-20T13:12:41, com.redhat.component=ubi9-minimal-container, io.openshift.tags=minimal rhel9, release=1755695350, vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, container_name=openstack_network_exporter, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, distribution-scope=public, vendor=Red Hat, Inc., managed_by=edpm_ansible, name=ubi9-minimal, url=https://catalog.redhat.com/en/search?searchType=containers)
Oct 11 02:49:24 compute-0 nova_compute[356901]: 2025-10-11 02:49:24.924 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:49:25 compute-0 nova_compute[356901]: 2025-10-11 02:49:25.081 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2155: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 7.3 KiB/s wr, 0 op/s
Oct 11 02:49:25 compute-0 nova_compute[356901]: 2025-10-11 02:49:25.395 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:26 compute-0 ceph-mon[191930]: pgmap v2155: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 7.3 KiB/s wr, 0 op/s
Oct 11 02:49:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:49:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:49:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:49:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:49:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:49:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:49:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:49:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2156: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 7.3 KiB/s wr, 0 op/s
Oct 11 02:49:28 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:49:28 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1797795798' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:49:28 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:49:28 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1797795798' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:49:28 compute-0 ceph-mon[191930]: pgmap v2156: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 7.3 KiB/s wr, 0 op/s
Oct 11 02:49:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1797795798' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:49:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1797795798' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:49:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2157: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:49:29 compute-0 podman[157119]: time="2025-10-11T02:49:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:49:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:49:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:49:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:49:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9547 "" "Go-http-client/1.1"
Oct 11 02:49:30 compute-0 nova_compute[356901]: 2025-10-11 02:49:30.086 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:30 compute-0 ceph-mon[191930]: pgmap v2157: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:49:30 compute-0 nova_compute[356901]: 2025-10-11 02:49:30.396 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2158: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:49:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:49:31 compute-0 openstack_network_exporter[374316]: ERROR   02:49:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:49:31 compute-0 openstack_network_exporter[374316]: ERROR   02:49:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:49:31 compute-0 openstack_network_exporter[374316]: ERROR   02:49:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:49:31 compute-0 openstack_network_exporter[374316]: ERROR   02:49:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:49:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:49:31 compute-0 openstack_network_exporter[374316]: ERROR   02:49:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:49:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:49:32 compute-0 sudo[471648]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:49:32 compute-0 sudo[471648]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:49:32 compute-0 sudo[471648]: pam_unix(sudo:session): session closed for user root
Oct 11 02:49:32 compute-0 ceph-mon[191930]: pgmap v2158: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:49:32 compute-0 sudo[471673]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:49:32 compute-0 sudo[471673]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:49:32 compute-0 sudo[471673]: pam_unix(sudo:session): session closed for user root
Oct 11 02:49:32 compute-0 sudo[471698]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:49:32 compute-0 sudo[471698]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:49:32 compute-0 sudo[471698]: pam_unix(sudo:session): session closed for user root
Oct 11 02:49:32 compute-0 sudo[471729]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:49:32 compute-0 podman[471722]: 2025-10-11 02:49:32.516088529 +0000 UTC m=+0.136116603 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, version=9.4, architecture=x86_64, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.openshift.tags=base rhel9, release=1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., com.redhat.component=ubi9-container, io.openshift.expose-services=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, managed_by=edpm_ansible, build-date=2024-09-18T21:23:30, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., name=ubi9, release-0.7.12=, config_id=edpm, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.buildah.version=1.29.0, vcs-type=git)
Oct 11 02:49:32 compute-0 sudo[471729]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:49:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2159: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:49:33 compute-0 sudo[471729]: pam_unix(sudo:session): session closed for user root
Oct 11 02:49:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:49:33 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:49:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:49:33 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:49:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:49:33 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:49:33 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 01a8bbd1-b6e1-434b-9af4-b9f71d2c4e02 does not exist
Oct 11 02:49:33 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev c9b806f3-bc66-4882-a965-14f6f7992961 does not exist
Oct 11 02:49:33 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 5adaea52-797a-4b89-afa2-6a87dcb8f026 does not exist
Oct 11 02:49:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:49:33 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:49:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:49:33 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:49:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:49:33 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:49:33 compute-0 sudo[471797]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:49:33 compute-0 sudo[471797]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:49:33 compute-0 sudo[471797]: pam_unix(sudo:session): session closed for user root
Oct 11 02:49:33 compute-0 sudo[471822]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:49:33 compute-0 sudo[471822]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:49:33 compute-0 sudo[471822]: pam_unix(sudo:session): session closed for user root
Oct 11 02:49:33 compute-0 sudo[471847]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:49:33 compute-0 sudo[471847]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:49:33 compute-0 sudo[471847]: pam_unix(sudo:session): session closed for user root
Oct 11 02:49:33 compute-0 sudo[471872]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:49:33 compute-0 sudo[471872]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:49:34 compute-0 ceph-mon[191930]: pgmap v2159: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:49:34 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:49:34 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:49:34 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:49:34 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:49:34 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:49:34 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:49:34 compute-0 podman[471935]: 2025-10-11 02:49:34.44915744 +0000 UTC m=+0.075692566 container create 01962c3be98f8a9171d5297c6dc8ac3db37b5b299f2d4fbe9436f1c005211447 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_mayer, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 02:49:34 compute-0 podman[471935]: 2025-10-11 02:49:34.42101491 +0000 UTC m=+0.047550076 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:49:34 compute-0 systemd[1]: Started libpod-conmon-01962c3be98f8a9171d5297c6dc8ac3db37b5b299f2d4fbe9436f1c005211447.scope.
Oct 11 02:49:34 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:49:34 compute-0 podman[471935]: 2025-10-11 02:49:34.590899476 +0000 UTC m=+0.217434682 container init 01962c3be98f8a9171d5297c6dc8ac3db37b5b299f2d4fbe9436f1c005211447 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_mayer, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 02:49:34 compute-0 podman[471935]: 2025-10-11 02:49:34.602947046 +0000 UTC m=+0.229482202 container start 01962c3be98f8a9171d5297c6dc8ac3db37b5b299f2d4fbe9436f1c005211447 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_mayer, OSD_FLAVOR=default, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:49:34 compute-0 podman[471935]: 2025-10-11 02:49:34.608529247 +0000 UTC m=+0.235064413 container attach 01962c3be98f8a9171d5297c6dc8ac3db37b5b299f2d4fbe9436f1c005211447 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_mayer, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef)
Oct 11 02:49:34 compute-0 elastic_mayer[471951]: 167 167
Oct 11 02:49:34 compute-0 systemd[1]: libpod-01962c3be98f8a9171d5297c6dc8ac3db37b5b299f2d4fbe9436f1c005211447.scope: Deactivated successfully.
Oct 11 02:49:34 compute-0 podman[471935]: 2025-10-11 02:49:34.616951127 +0000 UTC m=+0.243486283 container died 01962c3be98f8a9171d5297c6dc8ac3db37b5b299f2d4fbe9436f1c005211447 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_mayer, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:49:34 compute-0 systemd[1]: var-lib-containers-storage-overlay-5fd1b05ed685ffbcd374c968b6f8390ab1d4246fb510c7fa243b0100bbf7ca38-merged.mount: Deactivated successfully.
Oct 11 02:49:34 compute-0 podman[471935]: 2025-10-11 02:49:34.678289302 +0000 UTC m=+0.304824438 container remove 01962c3be98f8a9171d5297c6dc8ac3db37b5b299f2d4fbe9436f1c005211447 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_mayer, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:49:34 compute-0 systemd[1]: libpod-conmon-01962c3be98f8a9171d5297c6dc8ac3db37b5b299f2d4fbe9436f1c005211447.scope: Deactivated successfully.
Oct 11 02:49:34 compute-0 podman[471974]: 2025-10-11 02:49:34.939552243 +0000 UTC m=+0.073477290 container create 8b81dca258d3812c3b11eda31041044dc34ec7720fa2cde80534fdf5a1caba41 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_morse, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:49:35 compute-0 podman[471974]: 2025-10-11 02:49:34.909373186 +0000 UTC m=+0.043298273 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:49:35 compute-0 systemd[1]: Started libpod-conmon-8b81dca258d3812c3b11eda31041044dc34ec7720fa2cde80534fdf5a1caba41.scope.
Oct 11 02:49:35 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:49:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ce9f2d9de4615c9badabdca1153d9c495eb9fe511e94552023a23be7c3ecda5f/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:49:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ce9f2d9de4615c9badabdca1153d9c495eb9fe511e94552023a23be7c3ecda5f/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:49:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ce9f2d9de4615c9badabdca1153d9c495eb9fe511e94552023a23be7c3ecda5f/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:49:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ce9f2d9de4615c9badabdca1153d9c495eb9fe511e94552023a23be7c3ecda5f/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:49:35 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/ce9f2d9de4615c9badabdca1153d9c495eb9fe511e94552023a23be7c3ecda5f/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:49:35 compute-0 nova_compute[356901]: 2025-10-11 02:49:35.090 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:35 compute-0 podman[471974]: 2025-10-11 02:49:35.095592546 +0000 UTC m=+0.229517613 container init 8b81dca258d3812c3b11eda31041044dc34ec7720fa2cde80534fdf5a1caba41 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_morse, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 02:49:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2160: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 8.0 KiB/s wr, 1 op/s
Oct 11 02:49:35 compute-0 podman[471974]: 2025-10-11 02:49:35.116357611 +0000 UTC m=+0.250282658 container start 8b81dca258d3812c3b11eda31041044dc34ec7720fa2cde80534fdf5a1caba41 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_morse, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:49:35 compute-0 podman[471974]: 2025-10-11 02:49:35.120586692 +0000 UTC m=+0.254511919 container attach 8b81dca258d3812c3b11eda31041044dc34ec7720fa2cde80534fdf5a1caba41 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_morse, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:49:35 compute-0 nova_compute[356901]: 2025-10-11 02:49:35.400 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:36 compute-0 ceph-mon[191930]: pgmap v2160: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 8.0 KiB/s wr, 1 op/s
Oct 11 02:49:36 compute-0 sleepy_morse[471990]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:49:36 compute-0 sleepy_morse[471990]: --> relative data size: 1.0
Oct 11 02:49:36 compute-0 sleepy_morse[471990]: --> All data devices are unavailable
Oct 11 02:49:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:49:36 compute-0 systemd[1]: libpod-8b81dca258d3812c3b11eda31041044dc34ec7720fa2cde80534fdf5a1caba41.scope: Deactivated successfully.
Oct 11 02:49:36 compute-0 podman[471974]: 2025-10-11 02:49:36.416713422 +0000 UTC m=+1.550638529 container died 8b81dca258d3812c3b11eda31041044dc34ec7720fa2cde80534fdf5a1caba41 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_morse, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.schema-version=1.0)
Oct 11 02:49:36 compute-0 systemd[1]: libpod-8b81dca258d3812c3b11eda31041044dc34ec7720fa2cde80534fdf5a1caba41.scope: Consumed 1.233s CPU time.
Oct 11 02:49:36 compute-0 systemd[1]: var-lib-containers-storage-overlay-ce9f2d9de4615c9badabdca1153d9c495eb9fe511e94552023a23be7c3ecda5f-merged.mount: Deactivated successfully.
Oct 11 02:49:36 compute-0 podman[471974]: 2025-10-11 02:49:36.522990005 +0000 UTC m=+1.656915072 container remove 8b81dca258d3812c3b11eda31041044dc34ec7720fa2cde80534fdf5a1caba41 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_morse, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.build-date=20250507, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 02:49:36 compute-0 systemd[1]: libpod-conmon-8b81dca258d3812c3b11eda31041044dc34ec7720fa2cde80534fdf5a1caba41.scope: Deactivated successfully.
Oct 11 02:49:36 compute-0 sudo[471872]: pam_unix(sudo:session): session closed for user root
Oct 11 02:49:36 compute-0 sudo[472030]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:49:36 compute-0 sudo[472030]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:49:36 compute-0 sudo[472030]: pam_unix(sudo:session): session closed for user root
Oct 11 02:49:36 compute-0 sudo[472055]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:49:36 compute-0 sudo[472055]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:49:36 compute-0 sudo[472055]: pam_unix(sudo:session): session closed for user root
Oct 11 02:49:36 compute-0 sudo[472080]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:49:36 compute-0 sudo[472080]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:49:36 compute-0 sudo[472080]: pam_unix(sudo:session): session closed for user root
Oct 11 02:49:37 compute-0 sudo[472105]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:49:37 compute-0 sudo[472105]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:49:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2161: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s wr, 1 op/s
Oct 11 02:49:37 compute-0 podman[472168]: 2025-10-11 02:49:37.631323805 +0000 UTC m=+0.066654684 container create d3957bbf0a68f07235f93c9fbcbb80b1d963903850e85a995931efcf281a9d43 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_mendeleev, org.label-schema.build-date=20250507, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:49:37 compute-0 podman[472168]: 2025-10-11 02:49:37.605676955 +0000 UTC m=+0.041007864 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:49:37 compute-0 systemd[1]: Started libpod-conmon-d3957bbf0a68f07235f93c9fbcbb80b1d963903850e85a995931efcf281a9d43.scope.
Oct 11 02:49:37 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:49:37 compute-0 podman[472168]: 2025-10-11 02:49:37.790957156 +0000 UTC m=+0.226288105 container init d3957bbf0a68f07235f93c9fbcbb80b1d963903850e85a995931efcf281a9d43 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_mendeleev, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.build-date=20250507, OSD_FLAVOR=default)
Oct 11 02:49:37 compute-0 podman[472168]: 2025-10-11 02:49:37.812398976 +0000 UTC m=+0.247729855 container start d3957bbf0a68f07235f93c9fbcbb80b1d963903850e85a995931efcf281a9d43 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_mendeleev, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507)
Oct 11 02:49:37 compute-0 podman[472168]: 2025-10-11 02:49:37.818382718 +0000 UTC m=+0.253713677 container attach d3957bbf0a68f07235f93c9fbcbb80b1d963903850e85a995931efcf281a9d43 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_mendeleev, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:49:37 compute-0 systemd[1]: libpod-d3957bbf0a68f07235f93c9fbcbb80b1d963903850e85a995931efcf281a9d43.scope: Deactivated successfully.
Oct 11 02:49:37 compute-0 blissful_mendeleev[472184]: 167 167
Oct 11 02:49:37 compute-0 conmon[472184]: conmon d3957bbf0a68f07235f9 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-d3957bbf0a68f07235f93c9fbcbb80b1d963903850e85a995931efcf281a9d43.scope/container/memory.events
Oct 11 02:49:37 compute-0 podman[472168]: 2025-10-11 02:49:37.823367339 +0000 UTC m=+0.258698278 container died d3957bbf0a68f07235f93c9fbcbb80b1d963903850e85a995931efcf281a9d43 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_mendeleev, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507)
Oct 11 02:49:37 compute-0 systemd[1]: var-lib-containers-storage-overlay-a68c147f2dc7c10d13ea292934ac0cdb6e16eef8b2bc7bf944c9ce54b79204a4-merged.mount: Deactivated successfully.
Oct 11 02:49:37 compute-0 podman[472168]: 2025-10-11 02:49:37.901012986 +0000 UTC m=+0.336343895 container remove d3957bbf0a68f07235f93c9fbcbb80b1d963903850e85a995931efcf281a9d43 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_mendeleev, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS)
Oct 11 02:49:37 compute-0 systemd[1]: libpod-conmon-d3957bbf0a68f07235f93c9fbcbb80b1d963903850e85a995931efcf281a9d43.scope: Deactivated successfully.
Oct 11 02:49:38 compute-0 podman[472207]: 2025-10-11 02:49:38.175436454 +0000 UTC m=+0.081130390 container create 25203e9815e7b128a2a96bd1d7914f152bf309834f69dd8c3a8cca8b2d5ef378 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_franklin, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default)
Oct 11 02:49:38 compute-0 ceph-mon[191930]: pgmap v2161: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s wr, 1 op/s
Oct 11 02:49:38 compute-0 podman[472207]: 2025-10-11 02:49:38.140479638 +0000 UTC m=+0.046173604 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:49:38 compute-0 systemd[1]: Started libpod-conmon-25203e9815e7b128a2a96bd1d7914f152bf309834f69dd8c3a8cca8b2d5ef378.scope.
Oct 11 02:49:38 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:49:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4e1dd3dcda5718bd31aee9ed21e8df5d8836032b38e163904e9126da83706401/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:49:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4e1dd3dcda5718bd31aee9ed21e8df5d8836032b38e163904e9126da83706401/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:49:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4e1dd3dcda5718bd31aee9ed21e8df5d8836032b38e163904e9126da83706401/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:49:38 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4e1dd3dcda5718bd31aee9ed21e8df5d8836032b38e163904e9126da83706401/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:49:38 compute-0 podman[472207]: 2025-10-11 02:49:38.342944466 +0000 UTC m=+0.248638422 container init 25203e9815e7b128a2a96bd1d7914f152bf309834f69dd8c3a8cca8b2d5ef378 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_franklin, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 02:49:38 compute-0 podman[472207]: 2025-10-11 02:49:38.365131626 +0000 UTC m=+0.270825542 container start 25203e9815e7b128a2a96bd1d7914f152bf309834f69dd8c3a8cca8b2d5ef378 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_franklin, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, OSD_FLAVOR=default)
Oct 11 02:49:38 compute-0 podman[472207]: 2025-10-11 02:49:38.370544338 +0000 UTC m=+0.276238314 container attach 25203e9815e7b128a2a96bd1d7914f152bf309834f69dd8c3a8cca8b2d5ef378 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_franklin, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:49:38 compute-0 podman[472223]: 2025-10-11 02:49:38.398885849 +0000 UTC m=+0.105350895 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:49:38 compute-0 podman[472226]: 2025-10-11 02:49:38.457939905 +0000 UTC m=+0.158554196 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.4, org.label-schema.vendor=CentOS, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20251007, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute)
Oct 11 02:49:38 compute-0 podman[472270]: 2025-10-11 02:49:38.508336828 +0000 UTC m=+0.076375202 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.license=GPLv2, container_name=ovn_metadata_agent, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:49:38 compute-0 podman[472268]: 2025-10-11 02:49:38.556780709 +0000 UTC m=+0.128682034 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, org.label-schema.build-date=20251009, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:49:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2162: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s wr, 1 op/s
Oct 11 02:49:39 compute-0 sweet_franklin[472224]: {
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:     "0": [
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:         {
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "devices": [
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "/dev/loop3"
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             ],
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "lv_name": "ceph_lv0",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "lv_size": "21470642176",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "name": "ceph_lv0",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "tags": {
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.cluster_name": "ceph",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.crush_device_class": "",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.encrypted": "0",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.osd_id": "0",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.type": "block",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.vdo": "0"
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             },
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "type": "block",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "vg_name": "ceph_vg0"
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:         }
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:     ],
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:     "1": [
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:         {
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "devices": [
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "/dev/loop4"
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             ],
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "lv_name": "ceph_lv1",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "lv_size": "21470642176",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "name": "ceph_lv1",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "tags": {
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.cluster_name": "ceph",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.crush_device_class": "",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.encrypted": "0",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.osd_id": "1",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.type": "block",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.vdo": "0"
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             },
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "type": "block",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "vg_name": "ceph_vg1"
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:         }
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:     ],
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:     "2": [
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:         {
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "devices": [
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "/dev/loop5"
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             ],
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "lv_name": "ceph_lv2",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "lv_size": "21470642176",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "name": "ceph_lv2",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "tags": {
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.cluster_name": "ceph",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.crush_device_class": "",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.encrypted": "0",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.osd_id": "2",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.type": "block",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:                 "ceph.vdo": "0"
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             },
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "type": "block",
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:             "vg_name": "ceph_vg2"
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:         }
Oct 11 02:49:39 compute-0 sweet_franklin[472224]:     ]
Oct 11 02:49:39 compute-0 sweet_franklin[472224]: }
Oct 11 02:49:39 compute-0 systemd[1]: libpod-25203e9815e7b128a2a96bd1d7914f152bf309834f69dd8c3a8cca8b2d5ef378.scope: Deactivated successfully.
Oct 11 02:49:39 compute-0 podman[472314]: 2025-10-11 02:49:39.284995127 +0000 UTC m=+0.057421430 container died 25203e9815e7b128a2a96bd1d7914f152bf309834f69dd8c3a8cca8b2d5ef378 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_franklin, CEPH_REF=reef, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:49:39 compute-0 systemd[1]: var-lib-containers-storage-overlay-4e1dd3dcda5718bd31aee9ed21e8df5d8836032b38e163904e9126da83706401-merged.mount: Deactivated successfully.
Oct 11 02:49:39 compute-0 podman[472314]: 2025-10-11 02:49:39.385270026 +0000 UTC m=+0.157696249 container remove 25203e9815e7b128a2a96bd1d7914f152bf309834f69dd8c3a8cca8b2d5ef378 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_franklin, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:49:39 compute-0 systemd[1]: libpod-conmon-25203e9815e7b128a2a96bd1d7914f152bf309834f69dd8c3a8cca8b2d5ef378.scope: Deactivated successfully.
Oct 11 02:49:39 compute-0 sudo[472105]: pam_unix(sudo:session): session closed for user root
Oct 11 02:49:39 compute-0 sudo[472327]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:49:39 compute-0 sudo[472327]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:49:39 compute-0 sudo[472327]: pam_unix(sudo:session): session closed for user root
Oct 11 02:49:39 compute-0 sudo[472352]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:49:39 compute-0 sudo[472352]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:49:39 compute-0 sudo[472352]: pam_unix(sudo:session): session closed for user root
Oct 11 02:49:39 compute-0 sudo[472377]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:49:39 compute-0 sudo[472377]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:49:39 compute-0 sudo[472377]: pam_unix(sudo:session): session closed for user root
Oct 11 02:49:40 compute-0 sudo[472402]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:49:40 compute-0 sudo[472402]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:49:40 compute-0 nova_compute[356901]: 2025-10-11 02:49:40.097 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:40 compute-0 ceph-mon[191930]: pgmap v2162: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s wr, 1 op/s
Oct 11 02:49:40 compute-0 nova_compute[356901]: 2025-10-11 02:49:40.403 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:40 compute-0 podman[472465]: 2025-10-11 02:49:40.588338876 +0000 UTC m=+0.053579701 container create f0418e5d1bff644b18a1816274a3a46bbbef9fcaee630c2decfff850f495ec9b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_blackburn, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:49:40 compute-0 systemd[1]: Started libpod-conmon-f0418e5d1bff644b18a1816274a3a46bbbef9fcaee630c2decfff850f495ec9b.scope.
Oct 11 02:49:40 compute-0 podman[472465]: 2025-10-11 02:49:40.564825247 +0000 UTC m=+0.030066062 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:49:40 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:49:40 compute-0 podman[472465]: 2025-10-11 02:49:40.704622691 +0000 UTC m=+0.169863496 container init f0418e5d1bff644b18a1816274a3a46bbbef9fcaee630c2decfff850f495ec9b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_blackburn, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:49:40 compute-0 podman[472465]: 2025-10-11 02:49:40.713849303 +0000 UTC m=+0.179090108 container start f0418e5d1bff644b18a1816274a3a46bbbef9fcaee630c2decfff850f495ec9b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_blackburn, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:49:40 compute-0 podman[472465]: 2025-10-11 02:49:40.718464585 +0000 UTC m=+0.183705390 container attach f0418e5d1bff644b18a1816274a3a46bbbef9fcaee630c2decfff850f495ec9b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_blackburn, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:49:40 compute-0 fervent_blackburn[472480]: 167 167
Oct 11 02:49:40 compute-0 systemd[1]: libpod-f0418e5d1bff644b18a1816274a3a46bbbef9fcaee630c2decfff850f495ec9b.scope: Deactivated successfully.
Oct 11 02:49:40 compute-0 conmon[472480]: conmon f0418e5d1bff644b18a1 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-f0418e5d1bff644b18a1816274a3a46bbbef9fcaee630c2decfff850f495ec9b.scope/container/memory.events
Oct 11 02:49:40 compute-0 podman[472485]: 2025-10-11 02:49:40.772402493 +0000 UTC m=+0.031304827 container died f0418e5d1bff644b18a1816274a3a46bbbef9fcaee630c2decfff850f495ec9b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_blackburn, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 02:49:40 compute-0 systemd[1]: var-lib-containers-storage-overlay-6068c587ec547ce1fde403d289e74cabb61f983cd144bfb57dd0cad74ac6791e-merged.mount: Deactivated successfully.
Oct 11 02:49:40 compute-0 podman[472485]: 2025-10-11 02:49:40.835690319 +0000 UTC m=+0.094592633 container remove f0418e5d1bff644b18a1816274a3a46bbbef9fcaee630c2decfff850f495ec9b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=fervent_blackburn, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 02:49:40 compute-0 systemd[1]: libpod-conmon-f0418e5d1bff644b18a1816274a3a46bbbef9fcaee630c2decfff850f495ec9b.scope: Deactivated successfully.
Oct 11 02:49:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2163: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s wr, 1 op/s
Oct 11 02:49:41 compute-0 podman[472506]: 2025-10-11 02:49:41.195795115 +0000 UTC m=+0.085921631 container create 36dce3cc92f0acbec3a77cc908ec312343eb7cb26232fe6e099da0a0c81fe157 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_turing, CEPH_REF=reef, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:49:41 compute-0 podman[472506]: 2025-10-11 02:49:41.15876477 +0000 UTC m=+0.048891376 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:49:41 compute-0 systemd[1]: Started libpod-conmon-36dce3cc92f0acbec3a77cc908ec312343eb7cb26232fe6e099da0a0c81fe157.scope.
Oct 11 02:49:41 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:49:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/259bf6a98a3173741142cec197c8e3590f37d2f67af031c084b2f723e8520d07/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:49:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/259bf6a98a3173741142cec197c8e3590f37d2f67af031c084b2f723e8520d07/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:49:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/259bf6a98a3173741142cec197c8e3590f37d2f67af031c084b2f723e8520d07/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:49:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/259bf6a98a3173741142cec197c8e3590f37d2f67af031c084b2f723e8520d07/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:49:41 compute-0 podman[472506]: 2025-10-11 02:49:41.37179168 +0000 UTC m=+0.261918226 container init 36dce3cc92f0acbec3a77cc908ec312343eb7cb26232fe6e099da0a0c81fe157 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_turing, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:49:41 compute-0 podman[472506]: 2025-10-11 02:49:41.393317015 +0000 UTC m=+0.283443531 container start 36dce3cc92f0acbec3a77cc908ec312343eb7cb26232fe6e099da0a0c81fe157 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_turing, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 02:49:41 compute-0 podman[472506]: 2025-10-11 02:49:41.397557837 +0000 UTC m=+0.287684353 container attach 36dce3cc92f0acbec3a77cc908ec312343eb7cb26232fe6e099da0a0c81fe157 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_turing, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:49:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:49:42 compute-0 ceph-mon[191930]: pgmap v2163: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s wr, 1 op/s
Oct 11 02:49:42 compute-0 hungry_turing[472523]: {
Oct 11 02:49:42 compute-0 hungry_turing[472523]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:49:42 compute-0 hungry_turing[472523]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:49:42 compute-0 hungry_turing[472523]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:49:42 compute-0 hungry_turing[472523]:         "osd_id": 1,
Oct 11 02:49:42 compute-0 hungry_turing[472523]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:49:42 compute-0 hungry_turing[472523]:         "type": "bluestore"
Oct 11 02:49:42 compute-0 hungry_turing[472523]:     },
Oct 11 02:49:42 compute-0 hungry_turing[472523]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:49:42 compute-0 hungry_turing[472523]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:49:42 compute-0 hungry_turing[472523]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:49:42 compute-0 hungry_turing[472523]:         "osd_id": 2,
Oct 11 02:49:42 compute-0 hungry_turing[472523]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:49:42 compute-0 hungry_turing[472523]:         "type": "bluestore"
Oct 11 02:49:42 compute-0 hungry_turing[472523]:     },
Oct 11 02:49:42 compute-0 hungry_turing[472523]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:49:42 compute-0 hungry_turing[472523]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:49:42 compute-0 hungry_turing[472523]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:49:42 compute-0 hungry_turing[472523]:         "osd_id": 0,
Oct 11 02:49:42 compute-0 hungry_turing[472523]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:49:42 compute-0 hungry_turing[472523]:         "type": "bluestore"
Oct 11 02:49:42 compute-0 hungry_turing[472523]:     }
Oct 11 02:49:42 compute-0 hungry_turing[472523]: }
Oct 11 02:49:42 compute-0 systemd[1]: libpod-36dce3cc92f0acbec3a77cc908ec312343eb7cb26232fe6e099da0a0c81fe157.scope: Deactivated successfully.
Oct 11 02:49:42 compute-0 systemd[1]: libpod-36dce3cc92f0acbec3a77cc908ec312343eb7cb26232fe6e099da0a0c81fe157.scope: Consumed 1.168s CPU time.
Oct 11 02:49:42 compute-0 podman[472506]: 2025-10-11 02:49:42.568865867 +0000 UTC m=+1.458992423 container died 36dce3cc92f0acbec3a77cc908ec312343eb7cb26232fe6e099da0a0c81fe157 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_turing, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:49:42 compute-0 systemd[1]: var-lib-containers-storage-overlay-259bf6a98a3173741142cec197c8e3590f37d2f67af031c084b2f723e8520d07-merged.mount: Deactivated successfully.
Oct 11 02:49:42 compute-0 podman[472506]: 2025-10-11 02:49:42.690880342 +0000 UTC m=+1.581006858 container remove 36dce3cc92f0acbec3a77cc908ec312343eb7cb26232fe6e099da0a0c81fe157 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_turing, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS)
Oct 11 02:49:42 compute-0 systemd[1]: libpod-conmon-36dce3cc92f0acbec3a77cc908ec312343eb7cb26232fe6e099da0a0c81fe157.scope: Deactivated successfully.
Oct 11 02:49:42 compute-0 sudo[472402]: pam_unix(sudo:session): session closed for user root
Oct 11 02:49:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:49:42 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:49:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:49:42 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:49:42 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 39690efe-4ab1-48e6-89d0-5b185fe69007 does not exist
Oct 11 02:49:42 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev be77f476-63e7-4202-9bb0-61f73d452b47 does not exist
Oct 11 02:49:42 compute-0 sudo[472569]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:49:42 compute-0 sudo[472569]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:49:42 compute-0 sudo[472569]: pam_unix(sudo:session): session closed for user root
Oct 11 02:49:43 compute-0 sudo[472594]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:49:43 compute-0 sudo[472594]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:49:43 compute-0 sudo[472594]: pam_unix(sudo:session): session closed for user root
Oct 11 02:49:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2164: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s wr, 1 op/s
Oct 11 02:49:43 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:49:43 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:49:44 compute-0 ceph-mon[191930]: pgmap v2164: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s wr, 1 op/s
Oct 11 02:49:45 compute-0 nova_compute[356901]: 2025-10-11 02:49:45.107 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2165: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s wr, 1 op/s
Oct 11 02:49:45 compute-0 nova_compute[356901]: 2025-10-11 02:49:45.406 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:46 compute-0 podman[472620]: 2025-10-11 02:49:46.273425616 +0000 UTC m=+0.149320663 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=iscsid)
Oct 11 02:49:46 compute-0 podman[472619]: 2025-10-11 02:49:46.295598364 +0000 UTC m=+0.175238677 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=multipathd, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_managed=true, config_id=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:49:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:49:46 compute-0 ceph-mon[191930]: pgmap v2165: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s wr, 1 op/s
Oct 11 02:49:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2166: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s wr, 0 op/s
Oct 11 02:49:48 compute-0 ceph-mon[191930]: pgmap v2166: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s wr, 0 op/s
Oct 11 02:49:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2167: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:49:50 compute-0 nova_compute[356901]: 2025-10-11 02:49:50.113 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:50 compute-0 nova_compute[356901]: 2025-10-11 02:49:50.408 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:50 compute-0 ceph-mon[191930]: pgmap v2167: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:49:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2168: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:49:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:49:52 compute-0 ceph-mon[191930]: pgmap v2168: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:49:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2169: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:49:54 compute-0 ceph-mon[191930]: pgmap v2169: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:49:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:49:54.881 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:49:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:49:54.882 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:49:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:49:54.882 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:49:55 compute-0 nova_compute[356901]: 2025-10-11 02:49:55.117 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2170: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:49:55 compute-0 podman[472656]: 2025-10-11 02:49:55.249765509 +0000 UTC m=+0.114674023 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:49:55 compute-0 podman[472655]: 2025-10-11 02:49:55.256034917 +0000 UTC m=+0.127985088 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, distribution-scope=public, managed_by=edpm_ansible, name=ubi9-minimal, architecture=x86_64, config_id=edpm, url=https://catalog.redhat.com/en/search?searchType=containers, vendor=Red Hat, Inc., release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., build-date=2025-08-20T13:12:41, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.buildah.version=1.33.7, container_name=openstack_network_exporter, vcs-type=git, io.openshift.expose-services=, version=9.6, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, maintainer=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, com.redhat.component=ubi9-minimal-container)
Oct 11 02:49:55 compute-0 podman[472654]: 2025-10-11 02:49:55.285740419 +0000 UTC m=+0.160146469 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_managed=true, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3)
Oct 11 02:49:55 compute-0 nova_compute[356901]: 2025-10-11 02:49:55.411 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:49:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:49:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:49:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:49:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:49:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:49:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:49:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:49:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:49:56
Oct 11 02:49:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:49:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:49:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['backups', '.rgw.root', '.mgr', 'cephfs.cephfs.meta', 'volumes', 'default.rgw.meta', 'images', 'default.rgw.control', 'cephfs.cephfs.data', 'vms', 'default.rgw.log']
Oct 11 02:49:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:49:56 compute-0 ceph-mon[191930]: pgmap v2170: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:49:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2171: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:49:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:49:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:49:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:49:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:49:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:49:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:49:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:49:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:49:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:49:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:49:58 compute-0 ceph-mon[191930]: pgmap v2171: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:49:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2172: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:49:59 compute-0 podman[157119]: time="2025-10-11T02:49:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:49:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:49:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:49:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:49:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9544 "" "Go-http-client/1.1"
Oct 11 02:50:00 compute-0 nova_compute[356901]: 2025-10-11 02:50:00.122 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:00 compute-0 nova_compute[356901]: 2025-10-11 02:50:00.413 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:00 compute-0 ceph-mon[191930]: pgmap v2172: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2173: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:50:01 compute-0 openstack_network_exporter[374316]: ERROR   02:50:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:50:01 compute-0 openstack_network_exporter[374316]: ERROR   02:50:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:50:01 compute-0 openstack_network_exporter[374316]: ERROR   02:50:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:50:01 compute-0 openstack_network_exporter[374316]: ERROR   02:50:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:50:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:50:01 compute-0 openstack_network_exporter[374316]: ERROR   02:50:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:50:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:50:02 compute-0 ceph-mon[191930]: pgmap v2173: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2174: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:03 compute-0 podman[472713]: 2025-10-11 02:50:03.254702328 +0000 UTC m=+0.138831874 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vendor=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, distribution-scope=public, config_id=edpm, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, com.redhat.component=ubi9-container, io.buildah.version=1.29.0, io.openshift.expose-services=, architecture=x86_64, container_name=kepler, io.k8s.display-name=Red Hat Universal Base Image 9, release-0.7.12=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, summary=Provides the latest release of Red Hat Universal Base Image 9., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, version=9.4, build-date=2024-09-18T21:23:30, maintainer=Red Hat, Inc., managed_by=edpm_ansible, name=ubi9, release=1214.1726694543)
Oct 11 02:50:04 compute-0 nova_compute[356901]: 2025-10-11 02:50:04.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:50:04 compute-0 ceph-mon[191930]: pgmap v2174: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:05 compute-0 nova_compute[356901]: 2025-10-11 02:50:05.127 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2175: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:05 compute-0 nova_compute[356901]: 2025-10-11 02:50:05.416 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:50:06 compute-0 ceph-mon[191930]: pgmap v2175: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2176: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0020696231767272253 of space, bias 1.0, pg target 0.6208869530181677 quantized to 32 (current 32)
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00125203744627857 of space, bias 1.0, pg target 0.375611233883571 quantized to 32 (current 32)
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:50:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:50:08 compute-0 nova_compute[356901]: 2025-10-11 02:50:08.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:50:08 compute-0 ceph-mon[191930]: pgmap v2176: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2177: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:09 compute-0 podman[472734]: 2025-10-11 02:50:09.205163793 +0000 UTC m=+0.096182246 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:50:09 compute-0 podman[472736]: 2025-10-11 02:50:09.2221423 +0000 UTC m=+0.100966466 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, org.label-schema.name=CentOS Stream 10 Base Image, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, org.label-schema.vendor=CentOS)
Oct 11 02:50:09 compute-0 podman[472740]: 2025-10-11 02:50:09.222891949 +0000 UTC m=+0.083809870 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']})
Oct 11 02:50:09 compute-0 podman[472735]: 2025-10-11 02:50:09.30538414 +0000 UTC m=+0.182429623 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:50:10 compute-0 nova_compute[356901]: 2025-10-11 02:50:10.131 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:10 compute-0 nova_compute[356901]: 2025-10-11 02:50:10.420 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:10 compute-0 ceph-mon[191930]: pgmap v2177: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2178: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:50:12 compute-0 ceph-mon[191930]: pgmap v2178: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2179: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:14 compute-0 nova_compute[356901]: 2025-10-11 02:50:14.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:50:14 compute-0 ceph-mon[191930]: pgmap v2179: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2180: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:15 compute-0 nova_compute[356901]: 2025-10-11 02:50:15.138 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:15 compute-0 nova_compute[356901]: 2025-10-11 02:50:15.424 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:15 compute-0 nova_compute[356901]: 2025-10-11 02:50:15.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:50:15 compute-0 nova_compute[356901]: 2025-10-11 02:50:15.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:50:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:50:16 compute-0 nova_compute[356901]: 2025-10-11 02:50:16.899 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:50:17 compute-0 ceph-mon[191930]: pgmap v2180: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2181: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:17 compute-0 podman[472816]: 2025-10-11 02:50:17.270078207 +0000 UTC m=+0.153465450 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=multipathd, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3)
Oct 11 02:50:17 compute-0 podman[472817]: 2025-10-11 02:50:17.281110243 +0000 UTC m=+0.158312682 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, config_id=iscsid, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid)
Oct 11 02:50:18 compute-0 nova_compute[356901]: 2025-10-11 02:50:18.894 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:50:19 compute-0 ceph-mon[191930]: pgmap v2181: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2182: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:20 compute-0 nova_compute[356901]: 2025-10-11 02:50:20.144 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:20 compute-0 nova_compute[356901]: 2025-10-11 02:50:20.427 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:20 compute-0 nova_compute[356901]: 2025-10-11 02:50:20.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:50:20 compute-0 nova_compute[356901]: 2025-10-11 02:50:20.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:50:20 compute-0 nova_compute[356901]: 2025-10-11 02:50:20.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:50:21 compute-0 ceph-mon[191930]: pgmap v2182: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2183: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:50:21 compute-0 nova_compute[356901]: 2025-10-11 02:50:21.595 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:50:21 compute-0 nova_compute[356901]: 2025-10-11 02:50:21.598 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:50:21 compute-0 nova_compute[356901]: 2025-10-11 02:50:21.599 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:50:21 compute-0 nova_compute[356901]: 2025-10-11 02:50:21.600 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:50:22 compute-0 nova_compute[356901]: 2025-10-11 02:50:22.992 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:50:23 compute-0 nova_compute[356901]: 2025-10-11 02:50:23.010 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:50:23 compute-0 nova_compute[356901]: 2025-10-11 02:50:23.011 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:50:23 compute-0 nova_compute[356901]: 2025-10-11 02:50:23.012 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:50:23 compute-0 nova_compute[356901]: 2025-10-11 02:50:23.033 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:50:23 compute-0 nova_compute[356901]: 2025-10-11 02:50:23.034 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:50:23 compute-0 nova_compute[356901]: 2025-10-11 02:50:23.034 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:50:23 compute-0 nova_compute[356901]: 2025-10-11 02:50:23.035 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:50:23 compute-0 nova_compute[356901]: 2025-10-11 02:50:23.035 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:50:23 compute-0 ceph-mon[191930]: pgmap v2183: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2184: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:23 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:50:23 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2597508248' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:50:23 compute-0 nova_compute[356901]: 2025-10-11 02:50:23.553 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.518s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:50:23 compute-0 nova_compute[356901]: 2025-10-11 02:50:23.666 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:50:23 compute-0 nova_compute[356901]: 2025-10-11 02:50:23.667 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:50:23 compute-0 nova_compute[356901]: 2025-10-11 02:50:23.673 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:50:23 compute-0 nova_compute[356901]: 2025-10-11 02:50:23.674 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:50:23 compute-0 nova_compute[356901]: 2025-10-11 02:50:23.680 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:50:23 compute-0 nova_compute[356901]: 2025-10-11 02:50:23.680 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:50:23 compute-0 nova_compute[356901]: 2025-10-11 02:50:23.681 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:50:24 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2597508248' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:50:24 compute-0 nova_compute[356901]: 2025-10-11 02:50:24.136 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:50:24 compute-0 nova_compute[356901]: 2025-10-11 02:50:24.138 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3246MB free_disk=59.86412811279297GB free_vcpus=5 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:50:24 compute-0 nova_compute[356901]: 2025-10-11 02:50:24.138 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:50:24 compute-0 nova_compute[356901]: 2025-10-11 02:50:24.139 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:50:24 compute-0 nova_compute[356901]: 2025-10-11 02:50:24.253 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:50:24 compute-0 nova_compute[356901]: 2025-10-11 02:50:24.254 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 8422017b-c868-4ba2-ab1f-61d3668ca145 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:50:24 compute-0 nova_compute[356901]: 2025-10-11 02:50:24.256 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance f98d09d7-6aa0-4405-bfa0-be1f78d3911f actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:50:24 compute-0 nova_compute[356901]: 2025-10-11 02:50:24.257 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 3 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:50:24 compute-0 nova_compute[356901]: 2025-10-11 02:50:24.257 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1280MB phys_disk=59GB used_disk=4GB total_vcpus=8 used_vcpus=3 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:50:24 compute-0 nova_compute[356901]: 2025-10-11 02:50:24.366 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:50:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:50:24 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/399676102' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:50:24 compute-0 nova_compute[356901]: 2025-10-11 02:50:24.857 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.491s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:50:24 compute-0 nova_compute[356901]: 2025-10-11 02:50:24.873 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:50:24 compute-0 nova_compute[356901]: 2025-10-11 02:50:24.899 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:50:24 compute-0 nova_compute[356901]: 2025-10-11 02:50:24.902 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:50:24 compute-0 nova_compute[356901]: 2025-10-11 02:50:24.903 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.765s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:50:25 compute-0 ceph-mon[191930]: pgmap v2184: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:25 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/399676102' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:50:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2185: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:25 compute-0 nova_compute[356901]: 2025-10-11 02:50:25.150 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:25 compute-0 nova_compute[356901]: 2025-10-11 02:50:25.435 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:25 compute-0 nova_compute[356901]: 2025-10-11 02:50:25.789 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:50:26 compute-0 ceph-mon[191930]: pgmap v2185: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:26 compute-0 podman[472901]: 2025-10-11 02:50:26.236219638 +0000 UTC m=+0.107504168 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:50:26 compute-0 podman[472900]: 2025-10-11 02:50:26.243616034 +0000 UTC m=+0.121614205 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=openstack_network_exporter, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.component=ubi9-minimal-container, io.openshift.expose-services=, distribution-scope=public, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, build-date=2025-08-20T13:12:41, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, maintainer=Red Hat, Inc., vendor=Red Hat, Inc., io.openshift.tags=minimal rhel9, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, url=https://catalog.redhat.com/en/search?searchType=containers, vcs-type=git, config_id=edpm, managed_by=edpm_ansible, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, architecture=x86_64, name=ubi9-minimal, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, release=1755695350, version=9.6, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:50:26 compute-0 podman[472899]: 2025-10-11 02:50:26.282472034 +0000 UTC m=+0.164033821 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, config_id=edpm, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 02:50:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:50:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:50:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:50:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:50:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:50:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:50:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:50:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2186: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:50:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1775601961' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:50:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:50:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1775601961' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:50:28 compute-0 ceph-mon[191930]: pgmap v2186: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1775601961' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:50:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1775601961' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:50:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2187: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:29 compute-0 podman[157119]: time="2025-10-11T02:50:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:50:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:50:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:50:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:50:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9558 "" "Go-http-client/1.1"
Oct 11 02:50:30 compute-0 nova_compute[356901]: 2025-10-11 02:50:30.156 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:30 compute-0 ceph-mon[191930]: pgmap v2187: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:30 compute-0 nova_compute[356901]: 2025-10-11 02:50:30.443 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:30 compute-0 nova_compute[356901]: 2025-10-11 02:50:30.893 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:50:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2188: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:50:31 compute-0 openstack_network_exporter[374316]: ERROR   02:50:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:50:31 compute-0 openstack_network_exporter[374316]: ERROR   02:50:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:50:31 compute-0 openstack_network_exporter[374316]: ERROR   02:50:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:50:31 compute-0 openstack_network_exporter[374316]: ERROR   02:50:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:50:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:50:31 compute-0 openstack_network_exporter[374316]: ERROR   02:50:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:50:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:50:32 compute-0 ceph-mon[191930]: pgmap v2188: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2189: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:34 compute-0 podman[472957]: 2025-10-11 02:50:34.201763064 +0000 UTC m=+0.098421381 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.component=ubi9-container, vcs-type=git, config_id=edpm, container_name=kepler, io.openshift.tags=base rhel9, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, distribution-scope=public, summary=Provides the latest release of Red Hat Universal Base Image 9., build-date=2024-09-18T21:23:30, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.buildah.version=1.29.0, name=ubi9, managed_by=edpm_ansible, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, maintainer=Red Hat, Inc., vendor=Red Hat, Inc., io.openshift.expose-services=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, architecture=x86_64, release=1214.1726694543, version=9.4, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release-0.7.12=)
Oct 11 02:50:34 compute-0 ceph-mon[191930]: pgmap v2189: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2190: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:35 compute-0 nova_compute[356901]: 2025-10-11 02:50:35.161 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:35 compute-0 nova_compute[356901]: 2025-10-11 02:50:35.451 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:36 compute-0 ceph-mon[191930]: pgmap v2190: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:50:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2191: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:38 compute-0 ceph-mon[191930]: pgmap v2191: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2192: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:40 compute-0 nova_compute[356901]: 2025-10-11 02:50:40.166 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:40 compute-0 podman[472977]: 2025-10-11 02:50:40.212529044 +0000 UTC m=+0.103026648 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:50:40 compute-0 podman[472979]: 2025-10-11 02:50:40.213108139 +0000 UTC m=+0.091512382 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251007, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:50:40 compute-0 podman[472985]: 2025-10-11 02:50:40.225956782 +0000 UTC m=+0.090376188 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_metadata_agent, managed_by=edpm_ansible)
Oct 11 02:50:40 compute-0 podman[472978]: 2025-10-11 02:50:40.276963436 +0000 UTC m=+0.163174823 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:50:40 compute-0 ceph-mon[191930]: pgmap v2192: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:40 compute-0 nova_compute[356901]: 2025-10-11 02:50:40.454 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2193: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:50:42 compute-0 ceph-mon[191930]: pgmap v2193: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2194: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:43 compute-0 sudo[473055]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:50:43 compute-0 sudo[473055]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:43 compute-0 sudo[473055]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:43 compute-0 sudo[473080]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:50:43 compute-0 sudo[473080]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:43 compute-0 sudo[473080]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:43 compute-0 sudo[473105]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:50:43 compute-0 sudo[473105]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:43 compute-0 sudo[473105]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:43 compute-0 sudo[473130]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ls
Oct 11 02:50:43 compute-0 sudo[473130]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:44 compute-0 ceph-mon[191930]: pgmap v2194: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:44 compute-0 podman[473225]: 2025-10-11 02:50:44.502451375 +0000 UTC m=+0.132467182 container exec ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:50:44 compute-0 podman[473225]: 2025-10-11 02:50:44.603589265 +0000 UTC m=+0.233605012 container exec_died ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, OSD_FLAVOR=default)
Oct 11 02:50:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2195: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:45 compute-0 nova_compute[356901]: 2025-10-11 02:50:45.170 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:45 compute-0 nova_compute[356901]: 2025-10-11 02:50:45.457 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:45 compute-0 sudo[473130]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:50:45 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:50:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:50:45 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:50:45 compute-0 sudo[473377]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:50:45 compute-0 sudo[473377]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:45 compute-0 sudo[473377]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:45 compute-0 sudo[473402]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:50:45 compute-0 sudo[473402]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:45 compute-0 sudo[473402]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:45 compute-0 sudo[473427]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:50:45 compute-0 sudo[473427]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:45 compute-0 sudo[473427]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:46 compute-0 sudo[473452]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:50:46 compute-0 sudo[473452]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:50:46 compute-0 ceph-mon[191930]: pgmap v2195: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:46 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:50:46 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:50:46 compute-0 sudo[473452]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:50:46 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:50:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:50:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:50:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:50:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:50:46 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 64e12fa3-3dfd-4f7b-9fb8-ec0ca01cd491 does not exist
Oct 11 02:50:46 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 3f2265d3-11d1-4130-8a30-9c36b8921cfc does not exist
Oct 11 02:50:46 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 32664c50-0ce3-4e1d-a6ad-9f013c07b34f does not exist
Oct 11 02:50:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:50:46 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:50:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:50:46 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:50:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:50:46 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:50:46 compute-0 sudo[473507]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:50:46 compute-0 sudo[473507]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:46 compute-0 sudo[473507]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:47 compute-0 sudo[473532]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:50:47 compute-0 sudo[473532]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:47 compute-0 sudo[473532]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:47 compute-0 sudo[473557]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:50:47 compute-0 sudo[473557]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2196: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:47 compute-0 sudo[473557]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:47 compute-0 sudo[473582]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:50:47 compute-0 sudo[473582]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:50:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:50:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:50:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:50:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:50:47 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:50:47 compute-0 podman[473646]: 2025-10-11 02:50:47.795510413 +0000 UTC m=+0.093868928 container create aa113f9cb87de6771c497927feb36450f0fbfb82a1bb6130ecf1a54d1b65f3de (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_blackburn, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef)
Oct 11 02:50:47 compute-0 systemd[1]: Started libpod-conmon-aa113f9cb87de6771c497927feb36450f0fbfb82a1bb6130ecf1a54d1b65f3de.scope.
Oct 11 02:50:47 compute-0 podman[473646]: 2025-10-11 02:50:47.768722617 +0000 UTC m=+0.067081172 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:50:47 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:50:47 compute-0 podman[473646]: 2025-10-11 02:50:47.919289424 +0000 UTC m=+0.217647949 container init aa113f9cb87de6771c497927feb36450f0fbfb82a1bb6130ecf1a54d1b65f3de (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_blackburn, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, ceph=True, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507)
Oct 11 02:50:47 compute-0 podman[473646]: 2025-10-11 02:50:47.929569699 +0000 UTC m=+0.227928194 container start aa113f9cb87de6771c497927feb36450f0fbfb82a1bb6130ecf1a54d1b65f3de (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_blackburn, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef)
Oct 11 02:50:47 compute-0 podman[473646]: 2025-10-11 02:50:47.935642286 +0000 UTC m=+0.234000811 container attach aa113f9cb87de6771c497927feb36450f0fbfb82a1bb6130ecf1a54d1b65f3de (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_blackburn, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 02:50:47 compute-0 festive_blackburn[473668]: 167 167
Oct 11 02:50:47 compute-0 systemd[1]: libpod-aa113f9cb87de6771c497927feb36450f0fbfb82a1bb6130ecf1a54d1b65f3de.scope: Deactivated successfully.
Oct 11 02:50:47 compute-0 conmon[473668]: conmon aa113f9cb87de6771c49 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-aa113f9cb87de6771c497927feb36450f0fbfb82a1bb6130ecf1a54d1b65f3de.scope/container/memory.events
Oct 11 02:50:47 compute-0 podman[473646]: 2025-10-11 02:50:47.941935045 +0000 UTC m=+0.240293560 container died aa113f9cb87de6771c497927feb36450f0fbfb82a1bb6130ecf1a54d1b65f3de (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_blackburn, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 02:50:47 compute-0 podman[473659]: 2025-10-11 02:50:47.956191247 +0000 UTC m=+0.100327376 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_id=multipathd, container_name=multipathd, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']})
Oct 11 02:50:47 compute-0 systemd[1]: var-lib-containers-storage-overlay-430b1e0c9860e3ffe9b184cf0f195f22dc82c9b32a7824506d11fe0d20c5429a-merged.mount: Deactivated successfully.
Oct 11 02:50:48 compute-0 podman[473646]: 2025-10-11 02:50:48.006803431 +0000 UTC m=+0.305161946 container remove aa113f9cb87de6771c497927feb36450f0fbfb82a1bb6130ecf1a54d1b65f3de (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=festive_blackburn, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:50:48 compute-0 podman[473662]: 2025-10-11 02:50:48.013530154 +0000 UTC m=+0.141327650 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=iscsid, container_name=iscsid, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:50:48 compute-0 systemd[1]: libpod-conmon-aa113f9cb87de6771c497927feb36450f0fbfb82a1bb6130ecf1a54d1b65f3de.scope: Deactivated successfully.
Oct 11 02:50:48 compute-0 podman[473720]: 2025-10-11 02:50:48.250856945 +0000 UTC m=+0.065185196 container create a740dd98ed5fa6911b39bad9dd269af44c0015a20d332fa6a81ef9eb145861f4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_galois, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef)
Oct 11 02:50:48 compute-0 systemd[1]: Started libpod-conmon-a740dd98ed5fa6911b39bad9dd269af44c0015a20d332fa6a81ef9eb145861f4.scope.
Oct 11 02:50:48 compute-0 podman[473720]: 2025-10-11 02:50:48.22801979 +0000 UTC m=+0.042348071 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:50:48 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:50:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cdec60b8c613d85ad165a2f4a335d8c7c6b4240adf064793639d1bd9175b316d/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:50:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cdec60b8c613d85ad165a2f4a335d8c7c6b4240adf064793639d1bd9175b316d/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:50:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cdec60b8c613d85ad165a2f4a335d8c7c6b4240adf064793639d1bd9175b316d/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:50:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cdec60b8c613d85ad165a2f4a335d8c7c6b4240adf064793639d1bd9175b316d/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:50:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cdec60b8c613d85ad165a2f4a335d8c7c6b4240adf064793639d1bd9175b316d/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:50:48 compute-0 podman[473720]: 2025-10-11 02:50:48.385300191 +0000 UTC m=+0.199628502 container init a740dd98ed5fa6911b39bad9dd269af44c0015a20d332fa6a81ef9eb145861f4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_galois, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:50:48 compute-0 podman[473720]: 2025-10-11 02:50:48.408974379 +0000 UTC m=+0.223302640 container start a740dd98ed5fa6911b39bad9dd269af44c0015a20d332fa6a81ef9eb145861f4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_galois, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_REF=reef)
Oct 11 02:50:48 compute-0 podman[473720]: 2025-10-11 02:50:48.415777982 +0000 UTC m=+0.230106433 container attach a740dd98ed5fa6911b39bad9dd269af44c0015a20d332fa6a81ef9eb145861f4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_galois, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:50:48 compute-0 ceph-mon[191930]: pgmap v2196: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2197: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:49 compute-0 sharp_galois[473736]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:50:49 compute-0 sharp_galois[473736]: --> relative data size: 1.0
Oct 11 02:50:49 compute-0 sharp_galois[473736]: --> All data devices are unavailable
Oct 11 02:50:49 compute-0 systemd[1]: libpod-a740dd98ed5fa6911b39bad9dd269af44c0015a20d332fa6a81ef9eb145861f4.scope: Deactivated successfully.
Oct 11 02:50:49 compute-0 systemd[1]: libpod-a740dd98ed5fa6911b39bad9dd269af44c0015a20d332fa6a81ef9eb145861f4.scope: Consumed 1.339s CPU time.
Oct 11 02:50:49 compute-0 podman[473720]: 2025-10-11 02:50:49.826881236 +0000 UTC m=+1.641209527 container died a740dd98ed5fa6911b39bad9dd269af44c0015a20d332fa6a81ef9eb145861f4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_galois, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:50:49 compute-0 systemd[1]: var-lib-containers-storage-overlay-cdec60b8c613d85ad165a2f4a335d8c7c6b4240adf064793639d1bd9175b316d-merged.mount: Deactivated successfully.
Oct 11 02:50:49 compute-0 podman[473720]: 2025-10-11 02:50:49.944074426 +0000 UTC m=+1.758402697 container remove a740dd98ed5fa6911b39bad9dd269af44c0015a20d332fa6a81ef9eb145861f4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_galois, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507)
Oct 11 02:50:50 compute-0 systemd[1]: libpod-conmon-a740dd98ed5fa6911b39bad9dd269af44c0015a20d332fa6a81ef9eb145861f4.scope: Deactivated successfully.
Oct 11 02:50:50 compute-0 sudo[473582]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:50 compute-0 sudo[473776]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:50:50 compute-0 sudo[473776]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:50 compute-0 nova_compute[356901]: 2025-10-11 02:50:50.177 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:50 compute-0 sudo[473776]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:50 compute-0 sudo[473801]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:50:50 compute-0 sudo[473801]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:50 compute-0 sudo[473801]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:50 compute-0 nova_compute[356901]: 2025-10-11 02:50:50.460 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:50 compute-0 sudo[473826]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:50:50 compute-0 sudo[473826]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:50 compute-0 sudo[473826]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:50 compute-0 sudo[473851]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:50:50 compute-0 sudo[473851]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:50 compute-0 ceph-mon[191930]: pgmap v2197: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2198: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:51 compute-0 podman[473919]: 2025-10-11 02:50:51.245038519 +0000 UTC m=+0.083871817 container create 453a4afd21c4eb6e5216185669ad208bbfbac6116d588dfc22aebf761896bb23 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_euler, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default)
Oct 11 02:50:51 compute-0 podman[473919]: 2025-10-11 02:50:51.218780345 +0000 UTC m=+0.057613643 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:50:51 compute-0 systemd[1]: Started libpod-conmon-453a4afd21c4eb6e5216185669ad208bbfbac6116d588dfc22aebf761896bb23.scope.
Oct 11 02:50:51 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:50:51 compute-0 podman[473919]: 2025-10-11 02:50:51.382442993 +0000 UTC m=+0.221276331 container init 453a4afd21c4eb6e5216185669ad208bbfbac6116d588dfc22aebf761896bb23 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_euler, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, ceph=True, org.label-schema.vendor=CentOS)
Oct 11 02:50:51 compute-0 podman[473919]: 2025-10-11 02:50:51.395024711 +0000 UTC m=+0.233858009 container start 453a4afd21c4eb6e5216185669ad208bbfbac6116d588dfc22aebf761896bb23 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_euler, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, io.buildah.version=1.39.3)
Oct 11 02:50:51 compute-0 podman[473919]: 2025-10-11 02:50:51.399759871 +0000 UTC m=+0.238593209 container attach 453a4afd21c4eb6e5216185669ad208bbfbac6116d588dfc22aebf761896bb23 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_euler, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:50:51 compute-0 agitated_euler[473933]: 167 167
Oct 11 02:50:51 compute-0 systemd[1]: libpod-453a4afd21c4eb6e5216185669ad208bbfbac6116d588dfc22aebf761896bb23.scope: Deactivated successfully.
Oct 11 02:50:51 compute-0 podman[473919]: 2025-10-11 02:50:51.410554859 +0000 UTC m=+0.249388157 container died 453a4afd21c4eb6e5216185669ad208bbfbac6116d588dfc22aebf761896bb23 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_euler, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:50:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:50:51 compute-0 systemd[1]: var-lib-containers-storage-overlay-31b888bbca23dbafeb5ab92cd3e210760c2d2ee73f0b839fe7b2399056483f30-merged.mount: Deactivated successfully.
Oct 11 02:50:51 compute-0 podman[473919]: 2025-10-11 02:50:51.477504447 +0000 UTC m=+0.316337745 container remove 453a4afd21c4eb6e5216185669ad208bbfbac6116d588dfc22aebf761896bb23 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=agitated_euler, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:50:51 compute-0 systemd[1]: libpod-conmon-453a4afd21c4eb6e5216185669ad208bbfbac6116d588dfc22aebf761896bb23.scope: Deactivated successfully.
Oct 11 02:50:51 compute-0 podman[473958]: 2025-10-11 02:50:51.77461321 +0000 UTC m=+0.095010151 container create d3a2e8d364e758b16de20fe853add93b1e99898e478ab996441f124fbebe8dcf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=distracted_dirac, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:50:51 compute-0 podman[473958]: 2025-10-11 02:50:51.747220622 +0000 UTC m=+0.067617583 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:50:51 compute-0 systemd[1]: Started libpod-conmon-d3a2e8d364e758b16de20fe853add93b1e99898e478ab996441f124fbebe8dcf.scope.
Oct 11 02:50:51 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:50:51 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b36a86370eaaa7383c774974a427bc34c94c2c19799ff34efe681e3d3022cfe4/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:50:51 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b36a86370eaaa7383c774974a427bc34c94c2c19799ff34efe681e3d3022cfe4/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:50:51 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b36a86370eaaa7383c774974a427bc34c94c2c19799ff34efe681e3d3022cfe4/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:50:51 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b36a86370eaaa7383c774974a427bc34c94c2c19799ff34efe681e3d3022cfe4/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:50:51 compute-0 podman[473958]: 2025-10-11 02:50:51.983921244 +0000 UTC m=+0.304318175 container init d3a2e8d364e758b16de20fe853add93b1e99898e478ab996441f124fbebe8dcf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=distracted_dirac, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default)
Oct 11 02:50:51 compute-0 podman[473958]: 2025-10-11 02:50:51.998308929 +0000 UTC m=+0.318705850 container start d3a2e8d364e758b16de20fe853add93b1e99898e478ab996441f124fbebe8dcf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=distracted_dirac, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_REF=reef)
Oct 11 02:50:52 compute-0 podman[473958]: 2025-10-11 02:50:52.002984156 +0000 UTC m=+0.323381077 container attach d3a2e8d364e758b16de20fe853add93b1e99898e478ab996441f124fbebe8dcf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=distracted_dirac, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default)
Oct 11 02:50:52 compute-0 ceph-mon[191930]: pgmap v2198: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:52 compute-0 distracted_dirac[473974]: {
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:     "0": [
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:         {
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "devices": [
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "/dev/loop3"
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             ],
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "lv_name": "ceph_lv0",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "lv_size": "21470642176",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "name": "ceph_lv0",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "tags": {
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.cluster_name": "ceph",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.crush_device_class": "",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.encrypted": "0",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.osd_id": "0",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.type": "block",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.vdo": "0"
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             },
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "type": "block",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "vg_name": "ceph_vg0"
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:         }
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:     ],
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:     "1": [
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:         {
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "devices": [
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "/dev/loop4"
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             ],
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "lv_name": "ceph_lv1",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "lv_size": "21470642176",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "name": "ceph_lv1",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "tags": {
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.cluster_name": "ceph",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.crush_device_class": "",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.encrypted": "0",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.osd_id": "1",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.type": "block",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.vdo": "0"
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             },
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "type": "block",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "vg_name": "ceph_vg1"
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:         }
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:     ],
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:     "2": [
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:         {
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "devices": [
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "/dev/loop5"
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             ],
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "lv_name": "ceph_lv2",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "lv_size": "21470642176",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "name": "ceph_lv2",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "tags": {
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.cluster_name": "ceph",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.crush_device_class": "",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.encrypted": "0",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.osd_id": "2",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.type": "block",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:                 "ceph.vdo": "0"
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             },
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "type": "block",
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:             "vg_name": "ceph_vg2"
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:         }
Oct 11 02:50:52 compute-0 distracted_dirac[473974]:     ]
Oct 11 02:50:52 compute-0 distracted_dirac[473974]: }
Oct 11 02:50:52 compute-0 systemd[1]: libpod-d3a2e8d364e758b16de20fe853add93b1e99898e478ab996441f124fbebe8dcf.scope: Deactivated successfully.
Oct 11 02:50:52 compute-0 podman[473958]: 2025-10-11 02:50:52.925700528 +0000 UTC m=+1.246097479 container died d3a2e8d364e758b16de20fe853add93b1e99898e478ab996441f124fbebe8dcf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=distracted_dirac, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:50:52 compute-0 systemd[1]: var-lib-containers-storage-overlay-b36a86370eaaa7383c774974a427bc34c94c2c19799ff34efe681e3d3022cfe4-merged.mount: Deactivated successfully.
Oct 11 02:50:53 compute-0 podman[473958]: 2025-10-11 02:50:53.022556281 +0000 UTC m=+1.342953211 container remove d3a2e8d364e758b16de20fe853add93b1e99898e478ab996441f124fbebe8dcf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=distracted_dirac, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.vendor=CentOS)
Oct 11 02:50:53 compute-0 systemd[1]: libpod-conmon-d3a2e8d364e758b16de20fe853add93b1e99898e478ab996441f124fbebe8dcf.scope: Deactivated successfully.
Oct 11 02:50:53 compute-0 sudo[473851]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2199: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:53 compute-0 sudo[473994]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:50:53 compute-0 sudo[473994]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:53 compute-0 sudo[473994]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:53 compute-0 sudo[474019]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:50:53 compute-0 sudo[474019]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:53 compute-0 sudo[474019]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:53 compute-0 sudo[474044]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:50:53 compute-0 sudo[474044]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:53 compute-0 sudo[474044]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:53 compute-0 sudo[474069]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:50:53 compute-0 sudo[474069]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:54 compute-0 podman[474134]: 2025-10-11 02:50:54.221856503 +0000 UTC m=+0.073111205 container create d92085b05aa296bcf7c80079cddbc90c0c2071b5b2644b028f4604eb514dbaad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_ramanujan, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, ceph=True, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3)
Oct 11 02:50:54 compute-0 podman[474134]: 2025-10-11 02:50:54.195925176 +0000 UTC m=+0.047179918 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:50:54 compute-0 systemd[1]: Started libpod-conmon-d92085b05aa296bcf7c80079cddbc90c0c2071b5b2644b028f4604eb514dbaad.scope.
Oct 11 02:50:54 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:50:54 compute-0 podman[474134]: 2025-10-11 02:50:54.367003099 +0000 UTC m=+0.218257801 container init d92085b05aa296bcf7c80079cddbc90c0c2071b5b2644b028f4604eb514dbaad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_ramanujan, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:50:54 compute-0 podman[474134]: 2025-10-11 02:50:54.38506023 +0000 UTC m=+0.236314922 container start d92085b05aa296bcf7c80079cddbc90c0c2071b5b2644b028f4604eb514dbaad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_ramanujan, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507)
Oct 11 02:50:54 compute-0 podman[474134]: 2025-10-11 02:50:54.391094302 +0000 UTC m=+0.242349004 container attach d92085b05aa296bcf7c80079cddbc90c0c2071b5b2644b028f4604eb514dbaad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_ramanujan, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 02:50:54 compute-0 suspicious_ramanujan[474149]: 167 167
Oct 11 02:50:54 compute-0 systemd[1]: libpod-d92085b05aa296bcf7c80079cddbc90c0c2071b5b2644b028f4604eb514dbaad.scope: Deactivated successfully.
Oct 11 02:50:54 compute-0 podman[474134]: 2025-10-11 02:50:54.396892195 +0000 UTC m=+0.248146877 container died d92085b05aa296bcf7c80079cddbc90c0c2071b5b2644b028f4604eb514dbaad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_ramanujan, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 02:50:54 compute-0 systemd[1]: var-lib-containers-storage-overlay-5ab4f13c35053b60fdbaa3b552f5d2cd9f2c05fd160adc5773adc457b5f9b991-merged.mount: Deactivated successfully.
Oct 11 02:50:54 compute-0 podman[474134]: 2025-10-11 02:50:54.455078732 +0000 UTC m=+0.306333404 container remove d92085b05aa296bcf7c80079cddbc90c0c2071b5b2644b028f4604eb514dbaad (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_ramanujan, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS)
Oct 11 02:50:54 compute-0 systemd[1]: libpod-conmon-d92085b05aa296bcf7c80079cddbc90c0c2071b5b2644b028f4604eb514dbaad.scope: Deactivated successfully.
Oct 11 02:50:54 compute-0 podman[474173]: 2025-10-11 02:50:54.73911667 +0000 UTC m=+0.094016989 container create 19abc1ee854e8d8594df3742a28994dfd8d19bde732757d9487c1d62a049e8ca (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_volhard, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:50:54 compute-0 podman[474173]: 2025-10-11 02:50:54.694172505 +0000 UTC m=+0.049072884 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:50:54 compute-0 ceph-mon[191930]: pgmap v2199: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:54 compute-0 systemd[1]: Started libpod-conmon-19abc1ee854e8d8594df3742a28994dfd8d19bde732757d9487c1d62a049e8ca.scope.
Oct 11 02:50:54 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:50:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:50:54.882 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:50:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:50:54.884 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:50:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:50:54.884 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:50:54 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f39f89da4be54e9e94ebdcd21c26e237b0cbe71c7554bfc504a5b88d620cc870/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:50:54 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f39f89da4be54e9e94ebdcd21c26e237b0cbe71c7554bfc504a5b88d620cc870/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:50:54 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f39f89da4be54e9e94ebdcd21c26e237b0cbe71c7554bfc504a5b88d620cc870/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:50:54 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f39f89da4be54e9e94ebdcd21c26e237b0cbe71c7554bfc504a5b88d620cc870/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:50:54 compute-0 podman[474173]: 2025-10-11 02:50:54.913374103 +0000 UTC m=+0.268274482 container init 19abc1ee854e8d8594df3742a28994dfd8d19bde732757d9487c1d62a049e8ca (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_volhard, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True)
Oct 11 02:50:54 compute-0 podman[474173]: 2025-10-11 02:50:54.941571777 +0000 UTC m=+0.296472076 container start 19abc1ee854e8d8594df3742a28994dfd8d19bde732757d9487c1d62a049e8ca (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_volhard, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0)
Oct 11 02:50:54 compute-0 podman[474173]: 2025-10-11 02:50:54.946683458 +0000 UTC m=+0.301583787 container attach 19abc1ee854e8d8594df3742a28994dfd8d19bde732757d9487c1d62a049e8ca (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_volhard, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:50:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2200: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:55 compute-0 nova_compute[356901]: 2025-10-11 02:50:55.186 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:55 compute-0 nova_compute[356901]: 2025-10-11 02:50:55.462 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]: {
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:         "osd_id": 1,
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:         "type": "bluestore"
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:     },
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:         "osd_id": 2,
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:         "type": "bluestore"
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:     },
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:         "osd_id": 0,
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:         "type": "bluestore"
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]:     }
Oct 11 02:50:55 compute-0 sleepy_volhard[474189]: }
Oct 11 02:50:56 compute-0 systemd[1]: libpod-19abc1ee854e8d8594df3742a28994dfd8d19bde732757d9487c1d62a049e8ca.scope: Deactivated successfully.
Oct 11 02:50:56 compute-0 podman[474173]: 2025-10-11 02:50:56.007722447 +0000 UTC m=+1.362622736 container died 19abc1ee854e8d8594df3742a28994dfd8d19bde732757d9487c1d62a049e8ca (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_volhard, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default)
Oct 11 02:50:56 compute-0 systemd[1]: libpod-19abc1ee854e8d8594df3742a28994dfd8d19bde732757d9487c1d62a049e8ca.scope: Consumed 1.070s CPU time.
Oct 11 02:50:56 compute-0 systemd[1]: var-lib-containers-storage-overlay-f39f89da4be54e9e94ebdcd21c26e237b0cbe71c7554bfc504a5b88d620cc870-merged.mount: Deactivated successfully.
Oct 11 02:50:56 compute-0 podman[474173]: 2025-10-11 02:50:56.077391204 +0000 UTC m=+1.432291493 container remove 19abc1ee854e8d8594df3742a28994dfd8d19bde732757d9487c1d62a049e8ca (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sleepy_volhard, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:50:56 compute-0 systemd[1]: libpod-conmon-19abc1ee854e8d8594df3742a28994dfd8d19bde732757d9487c1d62a049e8ca.scope: Deactivated successfully.
Oct 11 02:50:56 compute-0 sudo[474069]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:50:56 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:50:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:50:56 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:50:56 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 97be3dc4-cd17-4fbd-b0c2-6acdfe0f195a does not exist
Oct 11 02:50:56 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 80ad0e2b-e847-42d3-b2c0-c8f6c8a71010 does not exist
Oct 11 02:50:56 compute-0 sudo[474234]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:50:56 compute-0 sudo[474234]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:56 compute-0 sudo[474234]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:56 compute-0 sudo[474259]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:50:56 compute-0 sudo[474259]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:50:56 compute-0 sudo[474259]: pam_unix(sudo:session): session closed for user root
Oct 11 02:50:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:50:56 compute-0 podman[474285]: 2025-10-11 02:50:56.449298653 +0000 UTC m=+0.079846306 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:50:56 compute-0 podman[474283]: 2025-10-11 02:50:56.454373189 +0000 UTC m=+0.091839210 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:50:56 compute-0 podman[474284]: 2025-10-11 02:50:56.479611815 +0000 UTC m=+0.117078376 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, name=ubi9-minimal, version=9.6, com.redhat.component=ubi9-minimal-container, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, config_id=edpm, release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., architecture=x86_64, vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, vendor=Red Hat, Inc., io.openshift.tags=minimal rhel9, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, build-date=2025-08-20T13:12:41, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, io.openshift.expose-services=, maintainer=Red Hat, Inc., managed_by=edpm_ansible, url=https://catalog.redhat.com/en/search?searchType=containers, container_name=openstack_network_exporter)
Oct 11 02:50:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:50:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:50:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:50:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:50:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:50:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:50:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:50:56
Oct 11 02:50:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:50:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:50:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['volumes', 'backups', 'images', 'default.rgw.control', '.mgr', '.rgw.root', 'cephfs.cephfs.data', 'default.rgw.meta', 'vms', 'default.rgw.log', 'cephfs.cephfs.meta']
Oct 11 02:50:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:50:56 compute-0 ceph-mon[191930]: pgmap v2200: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:56 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:50:56 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:50:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2201: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:50:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:50:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:50:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:50:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:50:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:50:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:50:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:50:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:50:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:50:58 compute-0 ceph-mon[191930]: pgmap v2201: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2202: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:50:59 compute-0 podman[157119]: time="2025-10-11T02:50:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:50:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:50:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:50:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:50:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9540 "" "Go-http-client/1.1"
Oct 11 02:51:00 compute-0 nova_compute[356901]: 2025-10-11 02:51:00.191 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:00 compute-0 nova_compute[356901]: 2025-10-11 02:51:00.465 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:00 compute-0 ceph-mon[191930]: pgmap v2202: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2203: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:01 compute-0 openstack_network_exporter[374316]: ERROR   02:51:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:51:01 compute-0 openstack_network_exporter[374316]: ERROR   02:51:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:51:01 compute-0 openstack_network_exporter[374316]: ERROR   02:51:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:51:01 compute-0 openstack_network_exporter[374316]: ERROR   02:51:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:51:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:51:01 compute-0 openstack_network_exporter[374316]: ERROR   02:51:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:51:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:51:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:51:02 compute-0 ceph-mon[191930]: pgmap v2203: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2204: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:04 compute-0 ceph-mon[191930]: pgmap v2204: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2205: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:05 compute-0 nova_compute[356901]: 2025-10-11 02:51:05.197 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:05 compute-0 podman[474340]: 2025-10-11 02:51:05.218031752 +0000 UTC m=+0.110042132 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, maintainer=Red Hat, Inc., vendor=Red Hat, Inc., io.openshift.tags=base rhel9, architecture=x86_64, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, version=9.4, container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.29.0, managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., distribution-scope=public, release-0.7.12=, com.redhat.component=ubi9-container, config_id=edpm, io.k8s.display-name=Red Hat Universal Base Image 9, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1214.1726694543, build-date=2024-09-18T21:23:30, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.expose-services=, name=ubi9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543)
Oct 11 02:51:05 compute-0 nova_compute[356901]: 2025-10-11 02:51:05.469 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:05 compute-0 nova_compute[356901]: 2025-10-11 02:51:05.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:51:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:51:06 compute-0 ceph-mon[191930]: pgmap v2205: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2206: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0020696231767272253 of space, bias 1.0, pg target 0.6208869530181677 quantized to 32 (current 32)
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00125203744627857 of space, bias 1.0, pg target 0.375611233883571 quantized to 32 (current 32)
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:51:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:51:08 compute-0 ceph-mon[191930]: pgmap v2206: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2207: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:51:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 4200.0 total, 600.0 interval
                                            Cumulative writes: 9998 writes, 45K keys, 9998 commit groups, 1.0 writes per commit group, ingest: 0.06 GB, 0.01 MB/s
                                            Cumulative WAL: 9998 writes, 9998 syncs, 1.00 writes per sync, written: 0.06 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 1335 writes, 6045 keys, 1335 commit groups, 1.0 writes per commit group, ingest: 8.71 MB, 0.01 MB/s
                                            Interval WAL: 1335 writes, 1335 syncs, 1.00 writes per sync, written: 0.01 GB, 0.01 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
                                            
                                            ** Compaction Stats [default] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.1      0.1       0.0   1.0      0.0     86.9      0.63              0.24        31    0.020       0      0       0.0       0.0
                                              L6      1/0    6.39 MB   0.0      0.3     0.1      0.2       0.2      0.0       0.0   4.1    139.1    114.4      1.96              0.99        30    0.065    159K    16K       0.0       0.0
                                             Sum      1/0    6.39 MB   0.0      0.3     0.1      0.2       0.3      0.1       0.0   5.1    105.3    107.7      2.59              1.24        61    0.042    159K    16K       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   6.6    137.3    136.6      0.33              0.19        10    0.033     31K   2561       0.0       0.0
                                            
                                            ** Compaction Stats [default] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Low      0/0    0.00 KB   0.0      0.3     0.1      0.2       0.2      0.0       0.0   0.0    139.1    114.4      1.96              0.99        30    0.065    159K    16K       0.0       0.0
                                            High      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.1      0.1       0.0   0.0      0.0     87.5      0.63              0.24        30    0.021       0      0       0.0       0.0
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0     12.0      0.00              0.00         1    0.004       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 4200.0 total, 600.0 interval
                                            Flush(GB): cumulative 0.054, interval 0.007
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.27 GB write, 0.07 MB/s write, 0.27 GB read, 0.06 MB/s read, 2.6 seconds
                                            Interval compaction: 0.04 GB write, 0.07 MB/s write, 0.04 GB read, 0.07 MB/s read, 0.3 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x55816e47f1f0#2 capacity: 304.00 MB usage: 32.37 MB table_size: 0 occupancy: 18446744073709551615 collections: 8 last_copies: 0 last_secs: 0.000306 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2076,31.19 MB,10.2611%) FilterBlock(62,452.05 KB,0.145214%) IndexBlock(62,750.14 KB,0.240973%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [default] **
Oct 11 02:51:10 compute-0 nova_compute[356901]: 2025-10-11 02:51:10.202 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:10 compute-0 nova_compute[356901]: 2025-10-11 02:51:10.471 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:10 compute-0 nova_compute[356901]: 2025-10-11 02:51:10.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:51:10 compute-0 ceph-mon[191930]: pgmap v2207: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2208: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:11 compute-0 podman[474362]: 2025-10-11 02:51:11.23894543 +0000 UTC m=+0.114640399 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_managed=true, container_name=ceilometer_agent_compute)
Oct 11 02:51:11 compute-0 podman[474363]: 2025-10-11 02:51:11.260169792 +0000 UTC m=+0.128823961 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=ovn_metadata_agent, org.label-schema.schema-version=1.0, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible)
Oct 11 02:51:11 compute-0 podman[474360]: 2025-10-11 02:51:11.26482852 +0000 UTC m=+0.144853074 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:51:11 compute-0 podman[474361]: 2025-10-11 02:51:11.275982437 +0000 UTC m=+0.146547939 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_id=ovn_controller, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009)
Oct 11 02:51:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:51:12 compute-0 ceph-mon[191930]: pgmap v2208: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2209: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.872 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.872 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.872 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.873 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.878 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.878 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.879 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.879 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '8422017b-c868-4ba2-ab1f-61d3668ca145', 'name': 'te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c', 'flavor': {'id': '6dff30d1-85df-4e9c-9163-a20ba47bb0c7', 'name': 'm1.nano', 'vcpus': 1, 'ram': 128, 'disk': 1, 'ephemeral': 0, 'swap': 0}, 'image': {'id': '2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-0000000e', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': 'a05bbc8f872d4dd99972d2cb8136d608', 'user_id': 'f66a606299944d53a40f21e81c791d70', 'hostId': 'cea8816d446065ba50379057f72b942db7e204c60c4530591bc7d0be', 'status': 'active', 'metadata': {'metering.server_group': '44c4fdb3-6cdb-42b8-903d-5a2c79f0da20'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.879 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc69220>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.882 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': 'f98d09d7-6aa0-4405-bfa0-be1f78d3911f', 'name': 'te-0512306-asg-am4iabdjybzp-yj44h76hdzhi-bejrsw3xgi4q', 'flavor': {'id': '6dff30d1-85df-4e9c-9163-a20ba47bb0c7', 'name': 'm1.nano', 'vcpus': 1, 'ram': 128, 'disk': 1, 'ephemeral': 0, 'swap': 0}, 'image': {'id': '2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-0000000f', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': 'a05bbc8f872d4dd99972d2cb8136d608', 'user_id': 'f66a606299944d53a40f21e81c791d70', 'hostId': 'cea8816d446065ba50379057f72b942db7e204c60c4530591bc7d0be', 'status': 'active', 'metadata': {'metering.server_group': '44c4fdb3-6cdb-42b8-903d-5a2c79f0da20'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.884 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.884 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.884 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.884 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.884 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.885 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:51:13.884934) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.890 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.bytes volume: 1820 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.893 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.bytes volume: 2276 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.898 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2856 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.899 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.900 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.900 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.900 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.900 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.901 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.901 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets volume: 31 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.901 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.packets volume: 16 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.902 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:51:13.900920) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.902 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 24 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.903 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.903 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.904 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.904 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.904 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.904 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.905 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.906 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.906 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.906 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.907 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.907 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.907 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.907 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.907 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.907 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.908 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.908 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.907 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:51:13.904932) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.908 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.908 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.908 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.908 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.908 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.909 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.909 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:51:13.907503) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.910 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:51:13.909042) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.922 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.923 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.capacity volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.937 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.938 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.capacity volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.958 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.958 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.958 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.959 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.959 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.959 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.959 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.959 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.960 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.960 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:51:13.960002) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.981 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.bytes volume: 29657600 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:13.982 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.bytes volume: 299326 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.012 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.bytes volume: 30145536 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.012 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.bytes volume: 246078 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.057 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.058 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.058 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.058 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.059 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.059 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.059 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.059 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.059 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.059 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.latency volume: 2082910661 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.060 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.latency volume: 143173838 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.060 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.latency volume: 1934915770 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.060 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.latency volume: 164304713 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.060 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:51:14.059722) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.060 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.061 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.061 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.061 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.061 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.061 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.061 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.061 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.062 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.062 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.requests volume: 1067 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.062 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.requests volume: 120 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.062 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.requests volume: 1092 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.062 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.requests volume: 107 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.062 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.063 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.063 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.063 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.063 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.063 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.063 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.063 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.064 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.064 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.064 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.usage volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.064 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.064 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.usage volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.064 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.064 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.065 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.065 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.065 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.066 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.066 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.066 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.066 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.066 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.bytes volume: 73129984 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.066 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.066 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.bytes volume: 72847360 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.067 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.067 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.067 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.067 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.067 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.068 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.068 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.068 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.068 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.068 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.068 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.latency volume: 8003595076 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.068 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.068 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.latency volume: 7938162731 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.068 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.069 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.069 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.069 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.069 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.070 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.070 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.070 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.070 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.070 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.070 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:51:14.062055) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.071 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:51:14.063986) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.073 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:51:14.066262) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.074 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:51:14.068367) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.076 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:51:14.070271) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.096 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.144 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.185 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.185 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.186 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.186 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.186 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.186 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.186 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.186 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.requests volume: 334 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.187 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.187 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.requests volume: 279 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.187 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.187 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.188 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.188 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.188 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.189 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.189 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.189 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.189 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.189 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.189 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.189 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.190 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.190 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.190 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.190 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.191 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.191 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.191 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.191 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.191 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.192 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.192 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.192 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.193 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.193 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.193 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.193 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets volume: 15 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.193 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.packets volume: 27 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.193 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 33 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.194 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.194 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.194 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.194 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.194 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.194 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.194 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.195 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.195 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.195 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.195 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.196 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.196 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.196 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.196 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.196 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.197 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.197 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.197 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.197 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.197 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.197 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.197 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.198 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.198 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.199 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.199 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.199 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.199 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.199 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.199 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.200 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.allocation volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.200 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.200 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.allocation volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.200 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.201 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.201 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.201 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.202 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.202 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.202 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.202 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.202 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.202 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.202 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.203 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.203 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.203 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.203 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.203 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.203 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.204 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.204 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/cpu volume: 333440000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.204 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/cpu volume: 305750000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.204 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 62980000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.205 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.205 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.205 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.205 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.205 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.205 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.205 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.bytes volume: 2250 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.205 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.bytes volume: 1620 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.206 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2412 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.206 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.206 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.206 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.207 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.207 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.207 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.207 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/memory.usage volume: 42.40625 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.207 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/memory.usage volume: 43.42578125 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.207 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.83984375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.203 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:51:14.186577) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.208 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.208 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.208 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.209 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.209 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.210 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.211 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.211 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.211 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.212 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.212 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.213 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.214 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.214 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.215 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.215 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.216 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.216 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.217 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.218 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.218 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.219 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.219 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.220 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.220 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:51:14.189448) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.221 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:51:14.191753) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.222 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.222 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:51:14.193192) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.222 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:51:14.194784) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.223 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.223 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.223 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:51:14.196405) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.224 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:51:14.197602) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.223 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.224 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.226 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:51:14.199679) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.226 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:51:14.202440) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.227 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:51:14.203988) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.227 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:51:14.205582) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:51:14.228 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:51:14.207138) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:51:14 compute-0 ceph-mon[191930]: pgmap v2209: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2210: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:15 compute-0 nova_compute[356901]: 2025-10-11 02:51:15.205 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:15 compute-0 nova_compute[356901]: 2025-10-11 02:51:15.478 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:15 compute-0 nova_compute[356901]: 2025-10-11 02:51:15.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:51:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:51:16 compute-0 nova_compute[356901]: 2025-10-11 02:51:16.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:51:16 compute-0 nova_compute[356901]: 2025-10-11 02:51:16.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:51:16 compute-0 ceph-mon[191930]: pgmap v2210: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2211: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:18 compute-0 podman[474441]: 2025-10-11 02:51:18.231688607 +0000 UTC m=+0.110696998 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, io.buildah.version=1.41.3, config_id=iscsid, container_name=iscsid, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:51:18 compute-0 podman[474440]: 2025-10-11 02:51:18.255727727 +0000 UTC m=+0.139131486 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_id=multipathd, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, container_name=multipathd, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 02:51:18 compute-0 nova_compute[356901]: 2025-10-11 02:51:18.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:51:18 compute-0 ceph-mon[191930]: pgmap v2211: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2212: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:19 compute-0 nova_compute[356901]: 2025-10-11 02:51:19.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:51:20 compute-0 nova_compute[356901]: 2025-10-11 02:51:20.212 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:20 compute-0 nova_compute[356901]: 2025-10-11 02:51:20.482 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:20 compute-0 ceph-mon[191930]: pgmap v2212: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2213: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:51:22 compute-0 nova_compute[356901]: 2025-10-11 02:51:22.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:51:22 compute-0 nova_compute[356901]: 2025-10-11 02:51:22.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:51:23 compute-0 ceph-mon[191930]: pgmap v2213: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:23 compute-0 nova_compute[356901]: 2025-10-11 02:51:23.119 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-8422017b-c868-4ba2-ab1f-61d3668ca145" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:51:23 compute-0 nova_compute[356901]: 2025-10-11 02:51:23.120 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-8422017b-c868-4ba2-ab1f-61d3668ca145" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:51:23 compute-0 nova_compute[356901]: 2025-10-11 02:51:23.120 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:51:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2214: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:24 compute-0 nova_compute[356901]: 2025-10-11 02:51:24.003 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Updating instance_info_cache with network_info: [{"id": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "address": "fa:16:3e:2c:af:96", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.3.53", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape00931c0-3d", "ovs_interfaceid": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:51:24 compute-0 nova_compute[356901]: 2025-10-11 02:51:24.017 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-8422017b-c868-4ba2-ab1f-61d3668ca145" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:51:24 compute-0 nova_compute[356901]: 2025-10-11 02:51:24.018 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:51:24 compute-0 nova_compute[356901]: 2025-10-11 02:51:24.018 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:51:24 compute-0 nova_compute[356901]: 2025-10-11 02:51:24.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:51:24 compute-0 nova_compute[356901]: 2025-10-11 02:51:24.923 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:51:24 compute-0 nova_compute[356901]: 2025-10-11 02:51:24.924 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:51:24 compute-0 nova_compute[356901]: 2025-10-11 02:51:24.925 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:51:24 compute-0 nova_compute[356901]: 2025-10-11 02:51:24.925 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:51:24 compute-0 nova_compute[356901]: 2025-10-11 02:51:24.926 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:51:25 compute-0 ceph-mon[191930]: pgmap v2214: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2215: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:25 compute-0 nova_compute[356901]: 2025-10-11 02:51:25.216 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:51:25 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/454707184' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:51:25 compute-0 nova_compute[356901]: 2025-10-11 02:51:25.423 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.497s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:51:25 compute-0 nova_compute[356901]: 2025-10-11 02:51:25.486 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:25 compute-0 nova_compute[356901]: 2025-10-11 02:51:25.530 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:51:25 compute-0 nova_compute[356901]: 2025-10-11 02:51:25.532 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:51:25 compute-0 nova_compute[356901]: 2025-10-11 02:51:25.541 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:51:25 compute-0 nova_compute[356901]: 2025-10-11 02:51:25.542 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:51:25 compute-0 nova_compute[356901]: 2025-10-11 02:51:25.549 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:51:25 compute-0 nova_compute[356901]: 2025-10-11 02:51:25.550 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:51:25 compute-0 nova_compute[356901]: 2025-10-11 02:51:25.551 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:51:26 compute-0 nova_compute[356901]: 2025-10-11 02:51:26.019 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:51:26 compute-0 nova_compute[356901]: 2025-10-11 02:51:26.022 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3227MB free_disk=59.86412811279297GB free_vcpus=5 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:51:26 compute-0 nova_compute[356901]: 2025-10-11 02:51:26.022 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:51:26 compute-0 nova_compute[356901]: 2025-10-11 02:51:26.023 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:51:26 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/454707184' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:51:26 compute-0 nova_compute[356901]: 2025-10-11 02:51:26.155 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:51:26 compute-0 nova_compute[356901]: 2025-10-11 02:51:26.156 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 8422017b-c868-4ba2-ab1f-61d3668ca145 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:51:26 compute-0 nova_compute[356901]: 2025-10-11 02:51:26.157 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance f98d09d7-6aa0-4405-bfa0-be1f78d3911f actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:51:26 compute-0 nova_compute[356901]: 2025-10-11 02:51:26.157 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 3 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:51:26 compute-0 nova_compute[356901]: 2025-10-11 02:51:26.158 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1280MB phys_disk=59GB used_disk=4GB total_vcpus=8 used_vcpus=3 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:51:26 compute-0 nova_compute[356901]: 2025-10-11 02:51:26.312 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:51:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:51:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:51:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:51:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:51:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:51:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:51:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:51:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:51:26 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1334608099' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:51:26 compute-0 nova_compute[356901]: 2025-10-11 02:51:26.824 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.511s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:51:26 compute-0 nova_compute[356901]: 2025-10-11 02:51:26.837 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:51:26 compute-0 nova_compute[356901]: 2025-10-11 02:51:26.884 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:51:26 compute-0 nova_compute[356901]: 2025-10-11 02:51:26.887 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:51:26 compute-0 nova_compute[356901]: 2025-10-11 02:51:26.888 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.865s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:51:27 compute-0 ceph-mon[191930]: pgmap v2215: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1334608099' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:51:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2216: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:27 compute-0 podman[474526]: 2025-10-11 02:51:27.233651887 +0000 UTC m=+0.109899117 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:51:27 compute-0 podman[474524]: 2025-10-11 02:51:27.234350034 +0000 UTC m=+0.120146439 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_managed=true, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.license=GPLv2)
Oct 11 02:51:27 compute-0 podman[474525]: 2025-10-11 02:51:27.235513988 +0000 UTC m=+0.116982008 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=openstack_network_exporter, io.buildah.version=1.33.7, vendor=Red Hat, Inc., build-date=2025-08-20T13:12:41, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, release=1755695350, version=9.6, com.redhat.component=ubi9-minimal-container, maintainer=Red Hat, Inc., distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, managed_by=edpm_ansible, url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, config_id=edpm, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-type=git, io.openshift.expose-services=, io.openshift.tags=minimal rhel9, name=ubi9-minimal, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Oct 11 02:51:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:51:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3714905409' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:51:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:51:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3714905409' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:51:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3714905409' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:51:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3714905409' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:51:29 compute-0 ceph-mon[191930]: pgmap v2216: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2217: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:29 compute-0 podman[157119]: time="2025-10-11T02:51:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:51:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:51:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:51:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:51:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9545 "" "Go-http-client/1.1"
Oct 11 02:51:30 compute-0 nova_compute[356901]: 2025-10-11 02:51:30.224 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:30 compute-0 nova_compute[356901]: 2025-10-11 02:51:30.493 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:31 compute-0 ceph-mon[191930]: pgmap v2217: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2218: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:31 compute-0 openstack_network_exporter[374316]: ERROR   02:51:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:51:31 compute-0 openstack_network_exporter[374316]: ERROR   02:51:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:51:31 compute-0 openstack_network_exporter[374316]: ERROR   02:51:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:51:31 compute-0 openstack_network_exporter[374316]: ERROR   02:51:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:51:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:51:31 compute-0 openstack_network_exporter[374316]: ERROR   02:51:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:51:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:51:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:51:33 compute-0 ceph-mon[191930]: pgmap v2218: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2219: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:34 compute-0 ceph-mon[191930]: pgmap v2219: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2220: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:35 compute-0 nova_compute[356901]: 2025-10-11 02:51:35.229 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:35 compute-0 nova_compute[356901]: 2025-10-11 02:51:35.498 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:36 compute-0 ceph-mon[191930]: pgmap v2220: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:36 compute-0 podman[474584]: 2025-10-11 02:51:36.274031399 +0000 UTC m=+0.152542629 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, release-0.7.12=, summary=Provides the latest release of Red Hat Universal Base Image 9., name=ubi9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, architecture=x86_64, vendor=Red Hat, Inc., config_id=edpm, io.openshift.tags=base rhel9, io.k8s.display-name=Red Hat Universal Base Image 9, release=1214.1726694543, version=9.4, build-date=2024-09-18T21:23:30, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, com.redhat.component=ubi9-container, maintainer=Red Hat, Inc., container_name=kepler, distribution-scope=public, io.buildah.version=1.29.0, io.openshift.expose-services=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543)
Oct 11 02:51:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:51:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2221: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:38 compute-0 ceph-mon[191930]: pgmap v2221: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:38 compute-0 nova_compute[356901]: 2025-10-11 02:51:38.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_incomplete_migrations run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:51:38 compute-0 nova_compute[356901]: 2025-10-11 02:51:38.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances with incomplete migration  _cleanup_incomplete_migrations /usr/lib/python3.9/site-packages/nova/compute/manager.py:11183
Oct 11 02:51:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2222: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:40 compute-0 nova_compute[356901]: 2025-10-11 02:51:40.235 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:40 compute-0 ceph-mon[191930]: pgmap v2222: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:51:40 compute-0 nova_compute[356901]: 2025-10-11 02:51:40.502 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2223: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 12 KiB/s rd, 1 op/s
Oct 11 02:51:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:51:42 compute-0 podman[474604]: 2025-10-11 02:51:42.240341917 +0000 UTC m=+0.122310407 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:51:42 compute-0 podman[474606]: 2025-10-11 02:51:42.243598332 +0000 UTC m=+0.117432017 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.4, container_name=ceilometer_agent_compute, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251007, tcib_managed=true, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 02:51:42 compute-0 podman[474607]: 2025-10-11 02:51:42.259060559 +0000 UTC m=+0.124272403 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, tcib_managed=true, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, container_name=ovn_metadata_agent, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:51:42 compute-0 ceph-mon[191930]: pgmap v2223: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 12 KiB/s rd, 1 op/s
Oct 11 02:51:42 compute-0 podman[474605]: 2025-10-11 02:51:42.314693323 +0000 UTC m=+0.184754169 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_controller, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, container_name=ovn_controller)
Oct 11 02:51:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2224: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 31 KiB/s rd, 3 op/s
Oct 11 02:51:44 compute-0 ceph-mon[191930]: pgmap v2224: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 31 KiB/s rd, 3 op/s
Oct 11 02:51:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2225: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 35 KiB/s rd, 170 B/s wr, 4 op/s
Oct 11 02:51:45 compute-0 nova_compute[356901]: 2025-10-11 02:51:45.241 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:45 compute-0 nova_compute[356901]: 2025-10-11 02:51:45.507 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:46 compute-0 ceph-mon[191930]: pgmap v2225: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 35 KiB/s rd, 170 B/s wr, 4 op/s
Oct 11 02:51:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:51:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2226: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 35 KiB/s rd, 852 B/s wr, 4 op/s
Oct 11 02:51:47 compute-0 nova_compute[356901]: 2025-10-11 02:51:47.917 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._run_pending_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:51:47 compute-0 nova_compute[356901]: 2025-10-11 02:51:47.918 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11145
Oct 11 02:51:47 compute-0 nova_compute[356901]: 2025-10-11 02:51:47.941 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] There are 0 instances to clean _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11154
Oct 11 02:51:48 compute-0 ceph-mon[191930]: pgmap v2226: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 35 KiB/s rd, 852 B/s wr, 4 op/s
Oct 11 02:51:48 compute-0 nova_compute[356901]: 2025-10-11 02:51:48.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_expired_console_auth_tokens run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:51:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2227: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 35 KiB/s rd, 8.6 KiB/s wr, 5 op/s
Oct 11 02:51:49 compute-0 podman[474683]: 2025-10-11 02:51:49.221165495 +0000 UTC m=+0.119563136 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=multipathd, managed_by=edpm_ansible)
Oct 11 02:51:49 compute-0 podman[474684]: 2025-10-11 02:51:49.250953312 +0000 UTC m=+0.139232614 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_id=iscsid, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=iscsid, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:51:50 compute-0 nova_compute[356901]: 2025-10-11 02:51:50.248 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:50 compute-0 ceph-mon[191930]: pgmap v2227: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 35 KiB/s rd, 8.6 KiB/s wr, 5 op/s
Oct 11 02:51:50 compute-0 nova_compute[356901]: 2025-10-11 02:51:50.510 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2228: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 35 KiB/s rd, 8.6 KiB/s wr, 5 op/s
Oct 11 02:51:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:51:52 compute-0 ceph-mon[191930]: pgmap v2228: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 35 KiB/s rd, 8.6 KiB/s wr, 5 op/s
Oct 11 02:51:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2229: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 22 KiB/s rd, 8.6 KiB/s wr, 3 op/s
Oct 11 02:51:54 compute-0 ceph-mon[191930]: pgmap v2229: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 22 KiB/s rd, 8.6 KiB/s wr, 3 op/s
Oct 11 02:51:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:51:54.884 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:51:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:51:54.885 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:51:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:51:54.886 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:51:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2230: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 4.0 KiB/s rd, 8.6 KiB/s wr, 1 op/s
Oct 11 02:51:55 compute-0 nova_compute[356901]: 2025-10-11 02:51:55.253 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:55 compute-0 nova_compute[356901]: 2025-10-11 02:51:55.513 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:51:56 compute-0 ceph-mon[191930]: pgmap v2230: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 4.0 KiB/s rd, 8.6 KiB/s wr, 1 op/s
Oct 11 02:51:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:51:56 compute-0 sudo[474720]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:51:56 compute-0 sudo[474720]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:51:56 compute-0 sudo[474720]: pam_unix(sudo:session): session closed for user root
Oct 11 02:51:56 compute-0 sudo[474745]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:51:56 compute-0 sudo[474745]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:51:56 compute-0 sudo[474745]: pam_unix(sudo:session): session closed for user root
Oct 11 02:51:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:51:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:51:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:51:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:51:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:51:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:51:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:51:56
Oct 11 02:51:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:51:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:51:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['cephfs.cephfs.data', '.rgw.root', 'images', 'default.rgw.log', '.mgr', 'volumes', 'vms', 'backups', 'default.rgw.meta', 'default.rgw.control', 'cephfs.cephfs.meta']
Oct 11 02:51:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:51:56 compute-0 sudo[474770]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:51:56 compute-0 sudo[474770]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:51:56 compute-0 sudo[474770]: pam_unix(sudo:session): session closed for user root
Oct 11 02:51:56 compute-0 sudo[474795]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:51:56 compute-0 sudo[474795]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:51:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2231: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s rd, 8.4 KiB/s wr, 0 op/s
Oct 11 02:51:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:51:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:51:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:51:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:51:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:51:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:51:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:51:57 compute-0 sudo[474795]: pam_unix(sudo:session): session closed for user root
Oct 11 02:51:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:51:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:51:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:51:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:51:57 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:51:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:51:57 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:51:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:51:57 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:51:57 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 02bd3a56-d412-4018-a70a-9d9570ec3345 does not exist
Oct 11 02:51:57 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 2828d12e-568a-42d1-aa2a-51546922c54e does not exist
Oct 11 02:51:57 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 11e882ac-f433-4bd9-a1f5-b7c291d5ce17 does not exist
Oct 11 02:51:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:51:57 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:51:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:51:57 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:51:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:51:57 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:51:57 compute-0 sudo[474850]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:51:57 compute-0 sudo[474850]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:51:57 compute-0 sudo[474850]: pam_unix(sudo:session): session closed for user root
Oct 11 02:51:57 compute-0 sudo[474896]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:51:57 compute-0 sudo[474896]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:51:57 compute-0 sudo[474896]: pam_unix(sudo:session): session closed for user root
Oct 11 02:51:57 compute-0 podman[474875]: 2025-10-11 02:51:57.861875499 +0000 UTC m=+0.125516538 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=openstack_network_exporter, io.buildah.version=1.33.7, build-date=2025-08-20T13:12:41, distribution-scope=public, io.openshift.expose-services=, vcs-type=git, com.redhat.component=ubi9-minimal-container, url=https://catalog.redhat.com/en/search?searchType=containers, vendor=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc., release=1755695350, io.openshift.tags=minimal rhel9, version=9.6, architecture=x86_64, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, managed_by=edpm_ansible, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., name=ubi9-minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b)
Oct 11 02:51:57 compute-0 podman[474876]: 2025-10-11 02:51:57.861374643 +0000 UTC m=+0.121609082 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:51:57 compute-0 podman[474874]: 2025-10-11 02:51:57.882018826 +0000 UTC m=+0.160825284 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_id=edpm, tcib_managed=true, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:51:57 compute-0 sudo[474962]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:51:57 compute-0 sudo[474962]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:51:57 compute-0 sudo[474962]: pam_unix(sudo:session): session closed for user root
Oct 11 02:51:58 compute-0 sudo[474988]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:51:58 compute-0 sudo[474988]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:51:58 compute-0 ceph-mon[191930]: pgmap v2231: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s rd, 8.4 KiB/s wr, 0 op/s
Oct 11 02:51:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:51:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:51:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:51:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:51:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:51:58 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:51:58 compute-0 podman[475051]: 2025-10-11 02:51:58.533325876 +0000 UTC m=+0.074772561 container create d5bb7e10d3a666fbf31140c910d904166486de4b72b5d151eb6f8ec9c761fa96 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_snyder, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_REF=reef, OSD_FLAVOR=default)
Oct 11 02:51:58 compute-0 systemd[1]: Started libpod-conmon-d5bb7e10d3a666fbf31140c910d904166486de4b72b5d151eb6f8ec9c761fa96.scope.
Oct 11 02:51:58 compute-0 podman[475051]: 2025-10-11 02:51:58.501860421 +0000 UTC m=+0.043307206 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:51:58 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:51:58 compute-0 podman[475051]: 2025-10-11 02:51:58.646711043 +0000 UTC m=+0.188157748 container init d5bb7e10d3a666fbf31140c910d904166486de4b72b5d151eb6f8ec9c761fa96 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_snyder, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2)
Oct 11 02:51:58 compute-0 podman[475051]: 2025-10-11 02:51:58.660462636 +0000 UTC m=+0.201909311 container start d5bb7e10d3a666fbf31140c910d904166486de4b72b5d151eb6f8ec9c761fa96 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_snyder, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, io.buildah.version=1.39.3)
Oct 11 02:51:58 compute-0 podman[475051]: 2025-10-11 02:51:58.665077034 +0000 UTC m=+0.206523709 container attach d5bb7e10d3a666fbf31140c910d904166486de4b72b5d151eb6f8ec9c761fa96 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_snyder, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:51:58 compute-0 brave_snyder[475067]: 167 167
Oct 11 02:51:58 compute-0 systemd[1]: libpod-d5bb7e10d3a666fbf31140c910d904166486de4b72b5d151eb6f8ec9c761fa96.scope: Deactivated successfully.
Oct 11 02:51:58 compute-0 podman[475051]: 2025-10-11 02:51:58.673412506 +0000 UTC m=+0.214859231 container died d5bb7e10d3a666fbf31140c910d904166486de4b72b5d151eb6f8ec9c761fa96 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_snyder, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:51:58 compute-0 systemd[1]: var-lib-containers-storage-overlay-918c19bc46701cbcc14ec936612f6e2ce4920f87baf8fc8004ae19ddf0246517-merged.mount: Deactivated successfully.
Oct 11 02:51:58 compute-0 podman[475051]: 2025-10-11 02:51:58.735708515 +0000 UTC m=+0.277155190 container remove d5bb7e10d3a666fbf31140c910d904166486de4b72b5d151eb6f8ec9c761fa96 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_snyder, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_REF=reef, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:51:58 compute-0 systemd[1]: libpod-conmon-d5bb7e10d3a666fbf31140c910d904166486de4b72b5d151eb6f8ec9c761fa96.scope: Deactivated successfully.
Oct 11 02:51:58 compute-0 podman[475090]: 2025-10-11 02:51:58.944291578 +0000 UTC m=+0.059235553 container create b51d529b1684561e321492dbb8bf16415972c0eb5e4dd552cecfb78339af874b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_sinoussi, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.build-date=20250507, io.buildah.version=1.39.3)
Oct 11 02:51:59 compute-0 systemd[1]: Started libpod-conmon-b51d529b1684561e321492dbb8bf16415972c0eb5e4dd552cecfb78339af874b.scope.
Oct 11 02:51:59 compute-0 podman[475090]: 2025-10-11 02:51:58.918935308 +0000 UTC m=+0.033879313 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:51:59 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:51:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6f01e24da7d1c305d21f696e7688bc75d99dfbca8eea75dad3fcb1f1be407f33/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:51:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6f01e24da7d1c305d21f696e7688bc75d99dfbca8eea75dad3fcb1f1be407f33/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:51:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6f01e24da7d1c305d21f696e7688bc75d99dfbca8eea75dad3fcb1f1be407f33/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:51:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6f01e24da7d1c305d21f696e7688bc75d99dfbca8eea75dad3fcb1f1be407f33/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:51:59 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6f01e24da7d1c305d21f696e7688bc75d99dfbca8eea75dad3fcb1f1be407f33/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:51:59 compute-0 podman[475090]: 2025-10-11 02:51:59.072986678 +0000 UTC m=+0.187930653 container init b51d529b1684561e321492dbb8bf16415972c0eb5e4dd552cecfb78339af874b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_sinoussi, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS)
Oct 11 02:51:59 compute-0 podman[475090]: 2025-10-11 02:51:59.092407544 +0000 UTC m=+0.207351499 container start b51d529b1684561e321492dbb8bf16415972c0eb5e4dd552cecfb78339af874b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_sinoussi, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:51:59 compute-0 podman[475090]: 2025-10-11 02:51:59.097400131 +0000 UTC m=+0.212344086 container attach b51d529b1684561e321492dbb8bf16415972c0eb5e4dd552cecfb78339af874b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_sinoussi, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 02:51:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2232: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 7.7 KiB/s wr, 0 op/s
Oct 11 02:51:59 compute-0 podman[157119]: time="2025-10-11T02:51:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:51:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:51:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 49205 "" "Go-http-client/1.1"
Oct 11 02:51:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:51:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9976 "" "Go-http-client/1.1"
Oct 11 02:52:00 compute-0 cool_sinoussi[475105]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:52:00 compute-0 cool_sinoussi[475105]: --> relative data size: 1.0
Oct 11 02:52:00 compute-0 cool_sinoussi[475105]: --> All data devices are unavailable
Oct 11 02:52:00 compute-0 nova_compute[356901]: 2025-10-11 02:52:00.257 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:00 compute-0 systemd[1]: libpod-b51d529b1684561e321492dbb8bf16415972c0eb5e4dd552cecfb78339af874b.scope: Deactivated successfully.
Oct 11 02:52:00 compute-0 systemd[1]: libpod-b51d529b1684561e321492dbb8bf16415972c0eb5e4dd552cecfb78339af874b.scope: Consumed 1.129s CPU time.
Oct 11 02:52:00 compute-0 conmon[475105]: conmon b51d529b1684561e3214 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-b51d529b1684561e321492dbb8bf16415972c0eb5e4dd552cecfb78339af874b.scope/container/memory.events
Oct 11 02:52:00 compute-0 podman[475090]: 2025-10-11 02:52:00.298677822 +0000 UTC m=+1.413621807 container died b51d529b1684561e321492dbb8bf16415972c0eb5e4dd552cecfb78339af874b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_sinoussi, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:52:00 compute-0 systemd[1]: var-lib-containers-storage-overlay-6f01e24da7d1c305d21f696e7688bc75d99dfbca8eea75dad3fcb1f1be407f33-merged.mount: Deactivated successfully.
Oct 11 02:52:00 compute-0 podman[475090]: 2025-10-11 02:52:00.396012786 +0000 UTC m=+1.510956771 container remove b51d529b1684561e321492dbb8bf16415972c0eb5e4dd552cecfb78339af874b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cool_sinoussi, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef)
Oct 11 02:52:00 compute-0 systemd[1]: libpod-conmon-b51d529b1684561e321492dbb8bf16415972c0eb5e4dd552cecfb78339af874b.scope: Deactivated successfully.
Oct 11 02:52:00 compute-0 ceph-mon[191930]: pgmap v2232: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 7.7 KiB/s wr, 0 op/s
Oct 11 02:52:00 compute-0 sudo[474988]: pam_unix(sudo:session): session closed for user root
Oct 11 02:52:00 compute-0 nova_compute[356901]: 2025-10-11 02:52:00.516 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:00 compute-0 sudo[475147]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:52:00 compute-0 sudo[475147]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:52:00 compute-0 sudo[475147]: pam_unix(sudo:session): session closed for user root
Oct 11 02:52:00 compute-0 sudo[475172]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:52:00 compute-0 sudo[475172]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:52:00 compute-0 sudo[475172]: pam_unix(sudo:session): session closed for user root
Oct 11 02:52:00 compute-0 sudo[475197]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:52:00 compute-0 sudo[475197]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:52:00 compute-0 sudo[475197]: pam_unix(sudo:session): session closed for user root
Oct 11 02:52:00 compute-0 sudo[475222]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:52:00 compute-0 sudo[475222]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:52:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2233: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 341 B/s wr, 0 op/s
Oct 11 02:52:01 compute-0 openstack_network_exporter[374316]: ERROR   02:52:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:52:01 compute-0 openstack_network_exporter[374316]: ERROR   02:52:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:52:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:52:01 compute-0 openstack_network_exporter[374316]: ERROR   02:52:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:52:01 compute-0 openstack_network_exporter[374316]: ERROR   02:52:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:52:01 compute-0 openstack_network_exporter[374316]: ERROR   02:52:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:52:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:52:01 compute-0 podman[475285]: 2025-10-11 02:52:01.418824235 +0000 UTC m=+0.059863535 container create 5d3f3134fb25ca228432fedd1c119c82431d285199b99d715f905bfd3c5e0652 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_dubinsky, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:52:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:52:01 compute-0 systemd[1]: Started libpod-conmon-5d3f3134fb25ca228432fedd1c119c82431d285199b99d715f905bfd3c5e0652.scope.
Oct 11 02:52:01 compute-0 podman[475285]: 2025-10-11 02:52:01.389165843 +0000 UTC m=+0.030205163 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:52:01 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:52:01 compute-0 podman[475285]: 2025-10-11 02:52:01.563704143 +0000 UTC m=+0.204743503 container init 5d3f3134fb25ca228432fedd1c119c82431d285199b99d715f905bfd3c5e0652 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_dubinsky, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:52:01 compute-0 podman[475285]: 2025-10-11 02:52:01.573904443 +0000 UTC m=+0.214943763 container start 5d3f3134fb25ca228432fedd1c119c82431d285199b99d715f905bfd3c5e0652 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_dubinsky, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 02:52:01 compute-0 podman[475285]: 2025-10-11 02:52:01.579366148 +0000 UTC m=+0.220405488 container attach 5d3f3134fb25ca228432fedd1c119c82431d285199b99d715f905bfd3c5e0652 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_dubinsky, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:52:01 compute-0 vigorous_dubinsky[475300]: 167 167
Oct 11 02:52:01 compute-0 systemd[1]: libpod-5d3f3134fb25ca228432fedd1c119c82431d285199b99d715f905bfd3c5e0652.scope: Deactivated successfully.
Oct 11 02:52:01 compute-0 podman[475285]: 2025-10-11 02:52:01.585606439 +0000 UTC m=+0.226645779 container died 5d3f3134fb25ca228432fedd1c119c82431d285199b99d715f905bfd3c5e0652 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_dubinsky, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True)
Oct 11 02:52:01 compute-0 systemd[1]: var-lib-containers-storage-overlay-d0cc1033242f7a83dcf7ee5566bd3b9f9a283c90985cb40391da9ffe7c48ad81-merged.mount: Deactivated successfully.
Oct 11 02:52:01 compute-0 podman[475285]: 2025-10-11 02:52:01.658386126 +0000 UTC m=+0.299425436 container remove 5d3f3134fb25ca228432fedd1c119c82431d285199b99d715f905bfd3c5e0652 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vigorous_dubinsky, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:52:01 compute-0 systemd[1]: libpod-conmon-5d3f3134fb25ca228432fedd1c119c82431d285199b99d715f905bfd3c5e0652.scope: Deactivated successfully.
Oct 11 02:52:01 compute-0 podman[475328]: 2025-10-11 02:52:01.886047433 +0000 UTC m=+0.068520392 container create 272d58cffdfce5871972b77dea8145a3bb50bd43dcb7bb7764dde67a412d8b55 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_gould, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS)
Oct 11 02:52:01 compute-0 podman[475328]: 2025-10-11 02:52:01.857799248 +0000 UTC m=+0.040272227 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:52:01 compute-0 systemd[1]: Started libpod-conmon-272d58cffdfce5871972b77dea8145a3bb50bd43dcb7bb7764dde67a412d8b55.scope.
Oct 11 02:52:02 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:52:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/eabc132124cd7d232430ce5d21dbf9cd8e24b5672c610417a18b8695cdf8bc09/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:52:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/eabc132124cd7d232430ce5d21dbf9cd8e24b5672c610417a18b8695cdf8bc09/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:52:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/eabc132124cd7d232430ce5d21dbf9cd8e24b5672c610417a18b8695cdf8bc09/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:52:02 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/eabc132124cd7d232430ce5d21dbf9cd8e24b5672c610417a18b8695cdf8bc09/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:52:02 compute-0 podman[475328]: 2025-10-11 02:52:02.050188055 +0000 UTC m=+0.232661064 container init 272d58cffdfce5871972b77dea8145a3bb50bd43dcb7bb7764dde67a412d8b55 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_gould, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2)
Oct 11 02:52:02 compute-0 podman[475328]: 2025-10-11 02:52:02.071084151 +0000 UTC m=+0.253557080 container start 272d58cffdfce5871972b77dea8145a3bb50bd43dcb7bb7764dde67a412d8b55 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_gould, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507)
Oct 11 02:52:02 compute-0 podman[475328]: 2025-10-11 02:52:02.075658293 +0000 UTC m=+0.258131302 container attach 272d58cffdfce5871972b77dea8145a3bb50bd43dcb7bb7764dde67a412d8b55 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_gould, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:52:02 compute-0 ceph-mon[191930]: pgmap v2233: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 341 B/s wr, 0 op/s
Oct 11 02:52:02 compute-0 pensive_gould[475344]: {
Oct 11 02:52:02 compute-0 pensive_gould[475344]:     "0": [
Oct 11 02:52:02 compute-0 pensive_gould[475344]:         {
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "devices": [
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "/dev/loop3"
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             ],
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "lv_name": "ceph_lv0",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "lv_size": "21470642176",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "name": "ceph_lv0",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "tags": {
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.cluster_name": "ceph",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.crush_device_class": "",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.encrypted": "0",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.osd_id": "0",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.type": "block",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.vdo": "0"
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             },
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "type": "block",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "vg_name": "ceph_vg0"
Oct 11 02:52:02 compute-0 pensive_gould[475344]:         }
Oct 11 02:52:02 compute-0 pensive_gould[475344]:     ],
Oct 11 02:52:02 compute-0 pensive_gould[475344]:     "1": [
Oct 11 02:52:02 compute-0 pensive_gould[475344]:         {
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "devices": [
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "/dev/loop4"
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             ],
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "lv_name": "ceph_lv1",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "lv_size": "21470642176",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "name": "ceph_lv1",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "tags": {
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.cluster_name": "ceph",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.crush_device_class": "",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.encrypted": "0",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.osd_id": "1",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.type": "block",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.vdo": "0"
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             },
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "type": "block",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "vg_name": "ceph_vg1"
Oct 11 02:52:02 compute-0 pensive_gould[475344]:         }
Oct 11 02:52:02 compute-0 pensive_gould[475344]:     ],
Oct 11 02:52:02 compute-0 pensive_gould[475344]:     "2": [
Oct 11 02:52:02 compute-0 pensive_gould[475344]:         {
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "devices": [
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "/dev/loop5"
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             ],
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "lv_name": "ceph_lv2",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "lv_size": "21470642176",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "name": "ceph_lv2",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "tags": {
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.cluster_name": "ceph",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.crush_device_class": "",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.encrypted": "0",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.osd_id": "2",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.type": "block",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:                 "ceph.vdo": "0"
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             },
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "type": "block",
Oct 11 02:52:02 compute-0 pensive_gould[475344]:             "vg_name": "ceph_vg2"
Oct 11 02:52:02 compute-0 pensive_gould[475344]:         }
Oct 11 02:52:02 compute-0 pensive_gould[475344]:     ]
Oct 11 02:52:02 compute-0 pensive_gould[475344]: }
Oct 11 02:52:02 compute-0 systemd[1]: libpod-272d58cffdfce5871972b77dea8145a3bb50bd43dcb7bb7764dde67a412d8b55.scope: Deactivated successfully.
Oct 11 02:52:02 compute-0 podman[475353]: 2025-10-11 02:52:02.984090931 +0000 UTC m=+0.052652229 container died 272d58cffdfce5871972b77dea8145a3bb50bd43dcb7bb7764dde67a412d8b55 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_gould, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2)
Oct 11 02:52:03 compute-0 systemd[1]: var-lib-containers-storage-overlay-eabc132124cd7d232430ce5d21dbf9cd8e24b5672c610417a18b8695cdf8bc09-merged.mount: Deactivated successfully.
Oct 11 02:52:03 compute-0 podman[475353]: 2025-10-11 02:52:03.069838917 +0000 UTC m=+0.138400055 container remove 272d58cffdfce5871972b77dea8145a3bb50bd43dcb7bb7764dde67a412d8b55 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=pensive_gould, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:52:03 compute-0 systemd[1]: libpod-conmon-272d58cffdfce5871972b77dea8145a3bb50bd43dcb7bb7764dde67a412d8b55.scope: Deactivated successfully.
Oct 11 02:52:03 compute-0 sudo[475222]: pam_unix(sudo:session): session closed for user root
Oct 11 02:52:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2234: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 7.3 KiB/s wr, 0 op/s
Oct 11 02:52:03 compute-0 sudo[475367]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:52:03 compute-0 sudo[475367]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:52:03 compute-0 sudo[475367]: pam_unix(sudo:session): session closed for user root
Oct 11 02:52:03 compute-0 sudo[475392]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:52:03 compute-0 sudo[475392]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:52:03 compute-0 sudo[475392]: pam_unix(sudo:session): session closed for user root
Oct 11 02:52:03 compute-0 sudo[475417]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:52:03 compute-0 sudo[475417]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:52:03 compute-0 sudo[475417]: pam_unix(sudo:session): session closed for user root
Oct 11 02:52:03 compute-0 sudo[475442]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:52:03 compute-0 sudo[475442]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:52:04 compute-0 podman[475504]: 2025-10-11 02:52:04.215368696 +0000 UTC m=+0.074088105 container create ddefb28e2e4beb4488fb763c371191d2cab3ddbc23c619dfb2bc420761ffc77b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_williams, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:52:04 compute-0 podman[475504]: 2025-10-11 02:52:04.178453785 +0000 UTC m=+0.037173144 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:52:04 compute-0 systemd[1]: Started libpod-conmon-ddefb28e2e4beb4488fb763c371191d2cab3ddbc23c619dfb2bc420761ffc77b.scope.
Oct 11 02:52:04 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:52:04 compute-0 podman[475504]: 2025-10-11 02:52:04.328944603 +0000 UTC m=+0.187663992 container init ddefb28e2e4beb4488fb763c371191d2cab3ddbc23c619dfb2bc420761ffc77b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_williams, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS)
Oct 11 02:52:04 compute-0 podman[475504]: 2025-10-11 02:52:04.341508625 +0000 UTC m=+0.200227984 container start ddefb28e2e4beb4488fb763c371191d2cab3ddbc23c619dfb2bc420761ffc77b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_williams, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, ceph=True)
Oct 11 02:52:04 compute-0 podman[475504]: 2025-10-11 02:52:04.346217483 +0000 UTC m=+0.204936842 container attach ddefb28e2e4beb4488fb763c371191d2cab3ddbc23c619dfb2bc420761ffc77b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_williams, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:52:04 compute-0 naughty_williams[475520]: 167 167
Oct 11 02:52:04 compute-0 systemd[1]: libpod-ddefb28e2e4beb4488fb763c371191d2cab3ddbc23c619dfb2bc420761ffc77b.scope: Deactivated successfully.
Oct 11 02:52:04 compute-0 podman[475504]: 2025-10-11 02:52:04.353577781 +0000 UTC m=+0.212297140 container died ddefb28e2e4beb4488fb763c371191d2cab3ddbc23c619dfb2bc420761ffc77b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_williams, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3)
Oct 11 02:52:04 compute-0 systemd[1]: var-lib-containers-storage-overlay-5ac9c26f79957b242a9e6c6dfd47deda42bd02dc4a95f411aca013e0d23c2a52-merged.mount: Deactivated successfully.
Oct 11 02:52:04 compute-0 podman[475504]: 2025-10-11 02:52:04.398927225 +0000 UTC m=+0.257646584 container remove ddefb28e2e4beb4488fb763c371191d2cab3ddbc23c619dfb2bc420761ffc77b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_williams, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3)
Oct 11 02:52:04 compute-0 systemd[1]: libpod-conmon-ddefb28e2e4beb4488fb763c371191d2cab3ddbc23c619dfb2bc420761ffc77b.scope: Deactivated successfully.
Oct 11 02:52:04 compute-0 ceph-mon[191930]: pgmap v2234: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 7.3 KiB/s wr, 0 op/s
Oct 11 02:52:04 compute-0 podman[475543]: 2025-10-11 02:52:04.626186106 +0000 UTC m=+0.070493967 container create 873d16ba959aef52176ba4b892293e9d96d55a3a65fdc433e492ce1e773bdd0e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_saha, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0)
Oct 11 02:52:04 compute-0 systemd[1]: Started libpod-conmon-873d16ba959aef52176ba4b892293e9d96d55a3a65fdc433e492ce1e773bdd0e.scope.
Oct 11 02:52:04 compute-0 podman[475543]: 2025-10-11 02:52:04.604949147 +0000 UTC m=+0.049257018 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:52:04 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:52:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5f6245d6d87bf788f2e6305dcde6fe56f21ee98cc8bdfab3d0477f10905cf756/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:52:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5f6245d6d87bf788f2e6305dcde6fe56f21ee98cc8bdfab3d0477f10905cf756/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:52:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5f6245d6d87bf788f2e6305dcde6fe56f21ee98cc8bdfab3d0477f10905cf756/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:52:04 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/5f6245d6d87bf788f2e6305dcde6fe56f21ee98cc8bdfab3d0477f10905cf756/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:52:04 compute-0 podman[475543]: 2025-10-11 02:52:04.780393879 +0000 UTC m=+0.224701800 container init 873d16ba959aef52176ba4b892293e9d96d55a3a65fdc433e492ce1e773bdd0e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_saha, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:52:04 compute-0 podman[475543]: 2025-10-11 02:52:04.792433338 +0000 UTC m=+0.236741219 container start 873d16ba959aef52176ba4b892293e9d96d55a3a65fdc433e492ce1e773bdd0e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_saha, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True)
Oct 11 02:52:04 compute-0 podman[475543]: 2025-10-11 02:52:04.800141779 +0000 UTC m=+0.244449710 container attach 873d16ba959aef52176ba4b892293e9d96d55a3a65fdc433e492ce1e773bdd0e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_saha, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507)
Oct 11 02:52:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2235: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 7.3 KiB/s wr, 0 op/s
Oct 11 02:52:05 compute-0 nova_compute[356901]: 2025-10-11 02:52:05.264 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:05 compute-0 nova_compute[356901]: 2025-10-11 02:52:05.519 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:05 compute-0 adoring_saha[475560]: {
Oct 11 02:52:05 compute-0 adoring_saha[475560]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:52:05 compute-0 adoring_saha[475560]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:52:05 compute-0 adoring_saha[475560]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:52:05 compute-0 adoring_saha[475560]:         "osd_id": 1,
Oct 11 02:52:05 compute-0 adoring_saha[475560]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:52:05 compute-0 adoring_saha[475560]:         "type": "bluestore"
Oct 11 02:52:05 compute-0 adoring_saha[475560]:     },
Oct 11 02:52:05 compute-0 adoring_saha[475560]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:52:05 compute-0 adoring_saha[475560]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:52:05 compute-0 adoring_saha[475560]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:52:05 compute-0 adoring_saha[475560]:         "osd_id": 2,
Oct 11 02:52:05 compute-0 adoring_saha[475560]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:52:05 compute-0 adoring_saha[475560]:         "type": "bluestore"
Oct 11 02:52:05 compute-0 adoring_saha[475560]:     },
Oct 11 02:52:05 compute-0 adoring_saha[475560]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:52:05 compute-0 adoring_saha[475560]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:52:05 compute-0 adoring_saha[475560]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:52:05 compute-0 adoring_saha[475560]:         "osd_id": 0,
Oct 11 02:52:05 compute-0 adoring_saha[475560]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:52:05 compute-0 adoring_saha[475560]:         "type": "bluestore"
Oct 11 02:52:05 compute-0 adoring_saha[475560]:     }
Oct 11 02:52:05 compute-0 adoring_saha[475560]: }
Oct 11 02:52:05 compute-0 systemd[1]: libpod-873d16ba959aef52176ba4b892293e9d96d55a3a65fdc433e492ce1e773bdd0e.scope: Deactivated successfully.
Oct 11 02:52:05 compute-0 podman[475543]: 2025-10-11 02:52:05.842916544 +0000 UTC m=+1.287224435 container died 873d16ba959aef52176ba4b892293e9d96d55a3a65fdc433e492ce1e773bdd0e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_saha, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2)
Oct 11 02:52:05 compute-0 systemd[1]: libpod-873d16ba959aef52176ba4b892293e9d96d55a3a65fdc433e492ce1e773bdd0e.scope: Consumed 1.044s CPU time.
Oct 11 02:52:05 compute-0 systemd[1]: var-lib-containers-storage-overlay-5f6245d6d87bf788f2e6305dcde6fe56f21ee98cc8bdfab3d0477f10905cf756-merged.mount: Deactivated successfully.
Oct 11 02:52:05 compute-0 podman[475543]: 2025-10-11 02:52:05.942336522 +0000 UTC m=+1.386644383 container remove 873d16ba959aef52176ba4b892293e9d96d55a3a65fdc433e492ce1e773bdd0e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=adoring_saha, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 02:52:05 compute-0 systemd[1]: libpod-conmon-873d16ba959aef52176ba4b892293e9d96d55a3a65fdc433e492ce1e773bdd0e.scope: Deactivated successfully.
Oct 11 02:52:05 compute-0 sudo[475442]: pam_unix(sudo:session): session closed for user root
Oct 11 02:52:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:52:06 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:52:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:52:06 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:52:06 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev ed046610-8a63-4cc2-829f-99537a4f0c88 does not exist
Oct 11 02:52:06 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 0d13affc-d5b4-4a94-908e-742a4db230e5 does not exist
Oct 11 02:52:06 compute-0 sudo[475608]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:52:06 compute-0 sudo[475608]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:52:06 compute-0 sudo[475608]: pam_unix(sudo:session): session closed for user root
Oct 11 02:52:06 compute-0 sudo[475633]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:52:06 compute-0 sudo[475633]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:52:06 compute-0 sudo[475633]: pam_unix(sudo:session): session closed for user root
Oct 11 02:52:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:52:07 compute-0 ceph-mon[191930]: pgmap v2235: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 7.3 KiB/s wr, 0 op/s
Oct 11 02:52:07 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:52:07 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2236: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 7.3 KiB/s wr, 0 op/s
Oct 11 02:52:07 compute-0 podman[475658]: 2025-10-11 02:52:07.270074886 +0000 UTC m=+0.146911529 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vendor=Red Hat, Inc., release-0.7.12=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, container_name=kepler, distribution-scope=public, architecture=x86_64, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, release=1214.1726694543, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., io.openshift.tags=base rhel9, com.redhat.component=ubi9-container, io.buildah.version=1.29.0, version=9.4, build-date=2024-09-18T21:23:30, name=ubi9, io.k8s.display-name=Red Hat Universal Base Image 9, summary=Provides the latest release of Red Hat Universal Base Image 9., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, io.openshift.expose-services=)
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0020726119399947906 of space, bias 1.0, pg target 0.6217835819984372 quantized to 32 (current 32)
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00125203744627857 of space, bias 1.0, pg target 0.375611233883571 quantized to 32 (current 32)
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:52:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:52:07 compute-0 nova_compute[356901]: 2025-10-11 02:52:07.913 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:52:09 compute-0 ceph-mon[191930]: pgmap v2236: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1.3 KiB/s rd, 7.3 KiB/s wr, 0 op/s
Oct 11 02:52:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2237: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1023 B/s rd, 7.3 KiB/s wr, 0 op/s
Oct 11 02:52:10 compute-0 nova_compute[356901]: 2025-10-11 02:52:10.273 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:10 compute-0 nova_compute[356901]: 2025-10-11 02:52:10.522 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:11 compute-0 ceph-mon[191930]: pgmap v2237: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1023 B/s rd, 7.3 KiB/s wr, 0 op/s
Oct 11 02:52:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2238: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 7.3 KiB/s wr, 0 op/s
Oct 11 02:52:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:52:11 compute-0 nova_compute[356901]: 2025-10-11 02:52:11.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:52:13 compute-0 ceph-mon[191930]: pgmap v2238: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 7.3 KiB/s wr, 0 op/s
Oct 11 02:52:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2239: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 7.0 KiB/s wr, 0 op/s
Oct 11 02:52:13 compute-0 podman[475677]: 2025-10-11 02:52:13.224905998 +0000 UTC m=+0.105707503 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 02:52:13 compute-0 podman[475679]: 2025-10-11 02:52:13.244626691 +0000 UTC m=+0.115593206 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 10 Base Image, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, tcib_managed=true)
Oct 11 02:52:13 compute-0 podman[475680]: 2025-10-11 02:52:13.268074279 +0000 UTC m=+0.128098277 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, container_name=ovn_metadata_agent, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, managed_by=edpm_ansible)
Oct 11 02:52:13 compute-0 podman[475678]: 2025-10-11 02:52:13.280827071 +0000 UTC m=+0.154814158 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_managed=true)
Oct 11 02:52:15 compute-0 ceph-mon[191930]: pgmap v2239: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 7.0 KiB/s wr, 0 op/s
Oct 11 02:52:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2240: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:15 compute-0 nova_compute[356901]: 2025-10-11 02:52:15.278 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:15 compute-0 nova_compute[356901]: 2025-10-11 02:52:15.526 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:52:16 compute-0 nova_compute[356901]: 2025-10-11 02:52:16.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:52:17 compute-0 ceph-mon[191930]: pgmap v2240: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2241: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:18 compute-0 nova_compute[356901]: 2025-10-11 02:52:18.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:52:18 compute-0 nova_compute[356901]: 2025-10-11 02:52:18.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:52:18 compute-0 nova_compute[356901]: 2025-10-11 02:52:18.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:52:19 compute-0 ceph-mon[191930]: pgmap v2241: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2242: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:20 compute-0 podman[475762]: 2025-10-11 02:52:20.222649853 +0000 UTC m=+0.108002358 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, container_name=multipathd, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.schema-version=1.0)
Oct 11 02:52:20 compute-0 podman[475763]: 2025-10-11 02:52:20.262883801 +0000 UTC m=+0.138215072 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=iscsid, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, org.label-schema.license=GPLv2)
Oct 11 02:52:20 compute-0 nova_compute[356901]: 2025-10-11 02:52:20.284 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:20 compute-0 nova_compute[356901]: 2025-10-11 02:52:20.529 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:21 compute-0 ceph-mon[191930]: pgmap v2242: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2243: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:52:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:52:21 compute-0 nova_compute[356901]: 2025-10-11 02:52:21.893 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:52:22 compute-0 ceph-mon[191930]: pgmap v2243: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 1023 B/s wr, 0 op/s
Oct 11 02:52:22 compute-0 nova_compute[356901]: 2025-10-11 02:52:22.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:52:22 compute-0 nova_compute[356901]: 2025-10-11 02:52:22.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:52:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2244: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s wr, 1 op/s
Oct 11 02:52:23 compute-0 nova_compute[356901]: 2025-10-11 02:52:23.295 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-f98d09d7-6aa0-4405-bfa0-be1f78d3911f" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:52:23 compute-0 nova_compute[356901]: 2025-10-11 02:52:23.296 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-f98d09d7-6aa0-4405-bfa0-be1f78d3911f" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:52:23 compute-0 nova_compute[356901]: 2025-10-11 02:52:23.296 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:52:24 compute-0 ceph-mon[191930]: pgmap v2244: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s wr, 1 op/s
Oct 11 02:52:25 compute-0 nova_compute[356901]: 2025-10-11 02:52:25.141 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Updating instance_info_cache with network_info: [{"id": "0c37c119-6647-42bb-a22f-ca741242ef30", "address": "fa:16:3e:ee:94:7e", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.2.253", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap0c37c119-66", "ovs_interfaceid": "0c37c119-6647-42bb-a22f-ca741242ef30", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:52:25 compute-0 nova_compute[356901]: 2025-10-11 02:52:25.157 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-f98d09d7-6aa0-4405-bfa0-be1f78d3911f" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:52:25 compute-0 nova_compute[356901]: 2025-10-11 02:52:25.157 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:52:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2245: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s wr, 1 op/s
Oct 11 02:52:25 compute-0 nova_compute[356901]: 2025-10-11 02:52:25.290 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:25 compute-0 nova_compute[356901]: 2025-10-11 02:52:25.532 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:25 compute-0 nova_compute[356901]: 2025-10-11 02:52:25.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:52:25 compute-0 nova_compute[356901]: 2025-10-11 02:52:25.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:52:25 compute-0 nova_compute[356901]: 2025-10-11 02:52:25.932 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:52:25 compute-0 nova_compute[356901]: 2025-10-11 02:52:25.933 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:52:25 compute-0 nova_compute[356901]: 2025-10-11 02:52:25.934 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:52:25 compute-0 nova_compute[356901]: 2025-10-11 02:52:25.934 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:52:25 compute-0 nova_compute[356901]: 2025-10-11 02:52:25.935 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:52:26 compute-0 ceph-mon[191930]: pgmap v2245: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s wr, 1 op/s
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #108. Immutable memtables: 0.
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:52:26.317964) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 63] Flushing memtable with next log file: 108
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151146317995, "job": 63, "event": "flush_started", "num_memtables": 1, "num_entries": 1748, "num_deletes": 251, "total_data_size": 2881741, "memory_usage": 2927488, "flush_reason": "Manual Compaction"}
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 63] Level-0 flush table #109: started
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151146333602, "cf_name": "default", "job": 63, "event": "table_file_creation", "file_number": 109, "file_size": 2820848, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 44552, "largest_seqno": 46299, "table_properties": {"data_size": 2812759, "index_size": 4968, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 2053, "raw_key_size": 16313, "raw_average_key_size": 19, "raw_value_size": 2796699, "raw_average_value_size": 3427, "num_data_blocks": 221, "num_entries": 816, "num_filter_entries": 816, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760150956, "oldest_key_time": 1760150956, "file_creation_time": 1760151146, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 109, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 63] Flush lasted 15688 microseconds, and 6572 cpu microseconds.
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:52:26.333652) [db/flush_job.cc:967] [default] [JOB 63] Level-0 flush table #109: 2820848 bytes OK
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:52:26.333670) [db/memtable_list.cc:519] [default] Level-0 commit table #109 started
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:52:26.336024) [db/memtable_list.cc:722] [default] Level-0 commit table #109: memtable #1 done
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:52:26.336036) EVENT_LOG_v1 {"time_micros": 1760151146336032, "job": 63, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:52:26.336053) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 63] Try to delete WAL files size 2874270, prev total WAL file size 2874270, number of live WAL files 2.
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000105.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:52:26.336969) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F730034323637' seq:72057594037927935, type:22 .. '7061786F730034353139' seq:0, type:0; will stop at (end)
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 64] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 63 Base level 0, inputs: [109(2754KB)], [107(6543KB)]
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151146337011, "job": 64, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [109], "files_L6": [107], "score": -1, "input_data_size": 9521506, "oldest_snapshot_seqno": -1}
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 64] Generated table #110: 6077 keys, 7821718 bytes, temperature: kUnknown
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151146372785, "cf_name": "default", "job": 64, "event": "table_file_creation", "file_number": 110, "file_size": 7821718, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 7783929, "index_size": 21487, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 15237, "raw_key_size": 158133, "raw_average_key_size": 26, "raw_value_size": 7676848, "raw_average_value_size": 1263, "num_data_blocks": 852, "num_entries": 6077, "num_filter_entries": 6077, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760151146, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 110, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:52:26.373005) [db/compaction/compaction_job.cc:1663] [default] [JOB 64] Compacted 1@0 + 1@6 files to L6 => 7821718 bytes
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:52:26.374879) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 265.5 rd, 218.1 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(2.7, 6.4 +0.0 blob) out(7.5 +0.0 blob), read-write-amplify(6.1) write-amplify(2.8) OK, records in: 6591, records dropped: 514 output_compression: NoCompression
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:52:26.374894) EVENT_LOG_v1 {"time_micros": 1760151146374886, "job": 64, "event": "compaction_finished", "compaction_time_micros": 35860, "compaction_time_cpu_micros": 19971, "output_level": 6, "num_output_files": 1, "total_output_size": 7821718, "num_input_records": 6591, "num_output_records": 6077, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000109.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151146375419, "job": 64, "event": "table_file_deletion", "file_number": 109}
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000107.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151146376424, "job": 64, "event": "table_file_deletion", "file_number": 107}
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:52:26.336888) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:52:26.376569) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:52:26.376574) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:52:26.376576) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:52:26.376577) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:52:26 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:52:26.376579) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:52:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:52:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:52:26 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2778000401' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:52:26 compute-0 nova_compute[356901]: 2025-10-11 02:52:26.488 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.553s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:52:26 compute-0 nova_compute[356901]: 2025-10-11 02:52:26.593 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:52:26 compute-0 nova_compute[356901]: 2025-10-11 02:52:26.594 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:52:26 compute-0 nova_compute[356901]: 2025-10-11 02:52:26.604 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:52:26 compute-0 nova_compute[356901]: 2025-10-11 02:52:26.605 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:52:26 compute-0 nova_compute[356901]: 2025-10-11 02:52:26.614 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:52:26 compute-0 nova_compute[356901]: 2025-10-11 02:52:26.615 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:52:26 compute-0 nova_compute[356901]: 2025-10-11 02:52:26.615 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:52:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:52:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:52:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:52:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:52:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:52:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:52:27 compute-0 nova_compute[356901]: 2025-10-11 02:52:27.048 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:52:27 compute-0 nova_compute[356901]: 2025-10-11 02:52:27.049 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3248MB free_disk=59.86394500732422GB free_vcpus=5 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:52:27 compute-0 nova_compute[356901]: 2025-10-11 02:52:27.050 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:52:27 compute-0 nova_compute[356901]: 2025-10-11 02:52:27.050 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:52:27 compute-0 nova_compute[356901]: 2025-10-11 02:52:27.127 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:52:27 compute-0 nova_compute[356901]: 2025-10-11 02:52:27.128 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 8422017b-c868-4ba2-ab1f-61d3668ca145 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:52:27 compute-0 nova_compute[356901]: 2025-10-11 02:52:27.128 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance f98d09d7-6aa0-4405-bfa0-be1f78d3911f actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:52:27 compute-0 nova_compute[356901]: 2025-10-11 02:52:27.129 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 3 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:52:27 compute-0 nova_compute[356901]: 2025-10-11 02:52:27.129 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1280MB phys_disk=59GB used_disk=4GB total_vcpus=8 used_vcpus=3 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:52:27 compute-0 nova_compute[356901]: 2025-10-11 02:52:27.186 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:52:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2246: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s wr, 1 op/s
Oct 11 02:52:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2778000401' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:52:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:52:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2398970951' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:52:27 compute-0 nova_compute[356901]: 2025-10-11 02:52:27.668 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.482s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:52:27 compute-0 nova_compute[356901]: 2025-10-11 02:52:27.677 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:52:27 compute-0 nova_compute[356901]: 2025-10-11 02:52:27.693 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:52:27 compute-0 nova_compute[356901]: 2025-10-11 02:52:27.694 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:52:27 compute-0 nova_compute[356901]: 2025-10-11 02:52:27.695 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.644s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:52:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:52:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/4048875254' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:52:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:52:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/4048875254' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:52:28 compute-0 podman[475847]: 2025-10-11 02:52:28.200093738 +0000 UTC m=+0.081873116 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:52:28 compute-0 podman[475844]: 2025-10-11 02:52:28.20612019 +0000 UTC m=+0.105155139 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_id=edpm, org.label-schema.schema-version=1.0, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.build-date=20251009, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:52:28 compute-0 podman[475845]: 2025-10-11 02:52:28.221573145 +0000 UTC m=+0.113257265 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=openstack_network_exporter, io.openshift.expose-services=, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, url=https://catalog.redhat.com/en/search?searchType=containers, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, com.redhat.component=ubi9-minimal-container, config_id=edpm, managed_by=edpm_ansible, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.buildah.version=1.33.7, maintainer=Red Hat, Inc., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., io.openshift.tags=minimal rhel9, version=9.6, vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, architecture=x86_64, build-date=2025-08-20T13:12:41, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, release=1755695350)
Oct 11 02:52:28 compute-0 ceph-mon[191930]: pgmap v2246: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s wr, 1 op/s
Oct 11 02:52:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2398970951' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:52:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/4048875254' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:52:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/4048875254' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:52:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2247: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s wr, 1 op/s
Oct 11 02:52:29 compute-0 podman[157119]: time="2025-10-11T02:52:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:52:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:52:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:52:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:52:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9551 "" "Go-http-client/1.1"
Oct 11 02:52:30 compute-0 nova_compute[356901]: 2025-10-11 02:52:30.295 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:30 compute-0 ceph-mon[191930]: pgmap v2247: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.0 KiB/s wr, 1 op/s
Oct 11 02:52:30 compute-0 nova_compute[356901]: 2025-10-11 02:52:30.536 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2248: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.3 KiB/s wr, 1 op/s
Oct 11 02:52:31 compute-0 openstack_network_exporter[374316]: ERROR   02:52:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:52:31 compute-0 openstack_network_exporter[374316]: ERROR   02:52:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:52:31 compute-0 openstack_network_exporter[374316]: ERROR   02:52:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:52:31 compute-0 openstack_network_exporter[374316]: ERROR   02:52:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:52:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:52:31 compute-0 openstack_network_exporter[374316]: ERROR   02:52:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:52:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:52:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:52:32 compute-0 ceph-mon[191930]: pgmap v2248: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.3 KiB/s wr, 1 op/s
Oct 11 02:52:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2249: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 8.3 KiB/s wr, 1 op/s
Oct 11 02:52:34 compute-0 ceph-mon[191930]: pgmap v2249: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 8.3 KiB/s wr, 1 op/s
Oct 11 02:52:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2250: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:52:35 compute-0 nova_compute[356901]: 2025-10-11 02:52:35.301 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:35 compute-0 nova_compute[356901]: 2025-10-11 02:52:35.538 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:36 compute-0 ceph-mon[191930]: pgmap v2250: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:52:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:52:36 compute-0 nova_compute[356901]: 2025-10-11 02:52:36.690 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:52:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2251: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:52:38 compute-0 podman[475904]: 2025-10-11 02:52:38.254306384 +0000 UTC m=+0.135720031 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, release-0.7.12=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, container_name=kepler, name=ubi9, architecture=x86_64, build-date=2024-09-18T21:23:30, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of Red Hat Universal Base Image 9., vendor=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9, managed_by=edpm_ansible, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.openshift.expose-services=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_id=edpm, io.buildah.version=1.29.0, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, com.redhat.component=ubi9-container, version=9.4, io.openshift.tags=base rhel9, maintainer=Red Hat, Inc., release=1214.1726694543, vcs-type=git)
Oct 11 02:52:38 compute-0 ceph-mon[191930]: pgmap v2251: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:52:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2252: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:52:40 compute-0 nova_compute[356901]: 2025-10-11 02:52:40.308 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:40 compute-0 ceph-mon[191930]: pgmap v2252: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:52:40 compute-0 nova_compute[356901]: 2025-10-11 02:52:40.541 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2253: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:52:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:52:42 compute-0 ceph-mon[191930]: pgmap v2253: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 341 B/s wr, 0 op/s
Oct 11 02:52:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2254: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:44 compute-0 podman[475923]: 2025-10-11 02:52:44.223516751 +0000 UTC m=+0.111704727 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 02:52:44 compute-0 podman[475926]: 2025-10-11 02:52:44.235797243 +0000 UTC m=+0.114391560 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent)
Oct 11 02:52:44 compute-0 podman[475925]: 2025-10-11 02:52:44.241884961 +0000 UTC m=+0.127667943 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_id=edpm, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image)
Oct 11 02:52:44 compute-0 podman[475924]: 2025-10-11 02:52:44.283440555 +0000 UTC m=+0.160322703 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_controller, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3)
Oct 11 02:52:44 compute-0 ceph-mon[191930]: pgmap v2254: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2255: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:45 compute-0 nova_compute[356901]: 2025-10-11 02:52:45.312 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:45 compute-0 nova_compute[356901]: 2025-10-11 02:52:45.544 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:52:46 compute-0 ceph-mon[191930]: pgmap v2255: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2256: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:48 compute-0 ceph-mon[191930]: pgmap v2256: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2257: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:50 compute-0 nova_compute[356901]: 2025-10-11 02:52:50.318 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:50 compute-0 nova_compute[356901]: 2025-10-11 02:52:50.549 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:51 compute-0 ceph-mon[191930]: pgmap v2257: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2258: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:51 compute-0 podman[476010]: 2025-10-11 02:52:51.257882973 +0000 UTC m=+0.125243684 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, config_id=iscsid, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid)
Oct 11 02:52:51 compute-0 podman[476009]: 2025-10-11 02:52:51.266863761 +0000 UTC m=+0.149821706 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, container_name=multipathd, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=multipathd, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:52:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:52:52 compute-0 ceph-mon[191930]: pgmap v2258: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2259: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:54 compute-0 ceph-mon[191930]: pgmap v2259: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:52:54.885 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:52:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:52:54.887 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:52:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:52:54.889 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:52:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2260: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:55 compute-0 nova_compute[356901]: 2025-10-11 02:52:55.323 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:55 compute-0 nova_compute[356901]: 2025-10-11 02:52:55.552 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:52:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:52:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:52:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:52:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:52:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:52:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:52:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:52:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:52:56
Oct 11 02:52:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:52:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:52:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.control', 'vms', '.rgw.root', 'default.rgw.log', '.mgr', 'cephfs.cephfs.data', 'backups', 'images', 'default.rgw.meta', 'volumes', 'cephfs.cephfs.meta']
Oct 11 02:52:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:52:56 compute-0 ceph-mon[191930]: pgmap v2260: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2261: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:52:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:52:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:52:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:52:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:52:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:52:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:52:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:52:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:52:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:52:58 compute-0 ceph-mon[191930]: pgmap v2261: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:59 compute-0 podman[476047]: 2025-10-11 02:52:59.229578429 +0000 UTC m=+0.124469776 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, config_id=edpm, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_ipmi)
Oct 11 02:52:59 compute-0 podman[476048]: 2025-10-11 02:52:59.243921637 +0000 UTC m=+0.115129994 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vcs-type=git, container_name=openstack_network_exporter, managed_by=edpm_ansible, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.6, url=https://catalog.redhat.com/en/search?searchType=containers, build-date=2025-08-20T13:12:41, io.openshift.tags=minimal rhel9, name=ubi9-minimal, architecture=x86_64, com.redhat.component=ubi9-minimal-container, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vendor=Red Hat, Inc., io.buildah.version=1.33.7, release=1755695350, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.expose-services=, maintainer=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, config_id=edpm, distribution-scope=public)
Oct 11 02:52:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2262: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:52:59 compute-0 podman[476050]: 2025-10-11 02:52:59.265107074 +0000 UTC m=+0.129513624 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:52:59 compute-0 podman[157119]: time="2025-10-11T02:52:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:52:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:52:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:52:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:52:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9551 "" "Go-http-client/1.1"
Oct 11 02:53:00 compute-0 nova_compute[356901]: 2025-10-11 02:53:00.330 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:00 compute-0 nova_compute[356901]: 2025-10-11 02:53:00.555 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:00 compute-0 ceph-mon[191930]: pgmap v2262: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2263: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:01 compute-0 openstack_network_exporter[374316]: ERROR   02:53:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:53:01 compute-0 openstack_network_exporter[374316]: ERROR   02:53:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:53:01 compute-0 openstack_network_exporter[374316]: ERROR   02:53:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:53:01 compute-0 openstack_network_exporter[374316]: ERROR   02:53:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:53:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:53:01 compute-0 openstack_network_exporter[374316]: ERROR   02:53:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:53:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:53:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:53:03 compute-0 ceph-mon[191930]: pgmap v2263: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2264: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:04 compute-0 ceph-mon[191930]: pgmap v2264: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2265: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:05 compute-0 nova_compute[356901]: 2025-10-11 02:53:05.335 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:05 compute-0 nova_compute[356901]: 2025-10-11 02:53:05.560 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:06 compute-0 sudo[476106]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:53:06 compute-0 sudo[476106]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:53:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:53:06 compute-0 sudo[476106]: pam_unix(sudo:session): session closed for user root
Oct 11 02:53:06 compute-0 sudo[476131]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:53:06 compute-0 sudo[476131]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:53:06 compute-0 sudo[476131]: pam_unix(sudo:session): session closed for user root
Oct 11 02:53:06 compute-0 sudo[476156]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:53:06 compute-0 sudo[476156]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:53:06 compute-0 sudo[476156]: pam_unix(sudo:session): session closed for user root
Oct 11 02:53:06 compute-0 ceph-mon[191930]: pgmap v2265: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:06 compute-0 sudo[476181]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:53:06 compute-0 sudo[476181]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2266: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.002072675530702611 of space, bias 1.0, pg target 0.6218026592107834 quantized to 32 (current 32)
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00125203744627857 of space, bias 1.0, pg target 0.375611233883571 quantized to 32 (current 32)
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:53:07 compute-0 sudo[476181]: pam_unix(sudo:session): session closed for user root
Oct 11 02:53:07 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:53:07 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:53:07 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:53:07 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:53:07 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:53:07 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev b24df1d7-b74e-417f-8b76-f19543c9e9c7 does not exist
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev a8ff8738-0447-4cb6-af8a-d709c91d9d79 does not exist
Oct 11 02:53:07 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev d5cc65e7-b01a-4996-8684-cff424df91b0 does not exist
Oct 11 02:53:07 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:53:07 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:53:07 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:53:07 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:53:07 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:53:07 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:53:07 compute-0 sudo[476236]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:53:07 compute-0 sudo[476236]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:53:07 compute-0 sudo[476236]: pam_unix(sudo:session): session closed for user root
Oct 11 02:53:07 compute-0 sudo[476261]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:53:07 compute-0 sudo[476261]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:53:07 compute-0 sudo[476261]: pam_unix(sudo:session): session closed for user root
Oct 11 02:53:07 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:53:07 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:53:07 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:53:07 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:53:07 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:53:07 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:53:08 compute-0 sudo[476286]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:53:08 compute-0 sudo[476286]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:53:08 compute-0 sudo[476286]: pam_unix(sudo:session): session closed for user root
Oct 11 02:53:08 compute-0 sudo[476311]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:53:08 compute-0 sudo[476311]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:53:08 compute-0 podman[476375]: 2025-10-11 02:53:08.630650411 +0000 UTC m=+0.050953886 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:53:08 compute-0 podman[476375]: 2025-10-11 02:53:08.737646338 +0000 UTC m=+0.157949773 container create be36f052932e4b53f46db7aee21bcf1f235f70e9b1cf424e840834a463f571c6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_germain, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:53:08 compute-0 systemd[1]: Started libpod-conmon-be36f052932e4b53f46db7aee21bcf1f235f70e9b1cf424e840834a463f571c6.scope.
Oct 11 02:53:09 compute-0 podman[476389]: 2025-10-11 02:53:09.006931082 +0000 UTC m=+0.200272126 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, release-0.7.12=, config_id=edpm, build-date=2024-09-18T21:23:30, maintainer=Red Hat, Inc., io.openshift.expose-services=, vendor=Red Hat, Inc., architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.29.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi9-container, managed_by=edpm_ansible, container_name=kepler, release=1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., io.k8s.display-name=Red Hat Universal Base Image 9, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, distribution-scope=public, io.openshift.tags=base rhel9, name=ubi9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, version=9.4, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:53:09 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:53:09 compute-0 podman[476375]: 2025-10-11 02:53:09.04515114 +0000 UTC m=+0.465454595 container init be36f052932e4b53f46db7aee21bcf1f235f70e9b1cf424e840834a463f571c6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_germain, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:53:09 compute-0 podman[476375]: 2025-10-11 02:53:09.064457682 +0000 UTC m=+0.484761087 container start be36f052932e4b53f46db7aee21bcf1f235f70e9b1cf424e840834a463f571c6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_germain, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:53:09 compute-0 nice_germain[476406]: 167 167
Oct 11 02:53:09 compute-0 systemd[1]: libpod-be36f052932e4b53f46db7aee21bcf1f235f70e9b1cf424e840834a463f571c6.scope: Deactivated successfully.
Oct 11 02:53:09 compute-0 podman[476375]: 2025-10-11 02:53:09.168204384 +0000 UTC m=+0.588508169 container attach be36f052932e4b53f46db7aee21bcf1f235f70e9b1cf424e840834a463f571c6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_germain, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:53:09 compute-0 podman[476375]: 2025-10-11 02:53:09.170204682 +0000 UTC m=+0.590508107 container died be36f052932e4b53f46db7aee21bcf1f235f70e9b1cf424e840834a463f571c6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_germain, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:53:09 compute-0 ceph-mon[191930]: pgmap v2266: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2267: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:09 compute-0 nova_compute[356901]: 2025-10-11 02:53:09.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:53:09 compute-0 systemd[1]: var-lib-containers-storage-overlay-95f1c1fc3ec0e402ad483c76ab525ccec8326e5cd7cd890921d02619e5f98b69-merged.mount: Deactivated successfully.
Oct 11 02:53:10 compute-0 nova_compute[356901]: 2025-10-11 02:53:10.341 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:10 compute-0 ceph-mon[191930]: pgmap v2267: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:10 compute-0 nova_compute[356901]: 2025-10-11 02:53:10.562 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:10 compute-0 podman[476375]: 2025-10-11 02:53:10.689180459 +0000 UTC m=+2.109483854 container remove be36f052932e4b53f46db7aee21bcf1f235f70e9b1cf424e840834a463f571c6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_germain, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, ceph=True)
Oct 11 02:53:10 compute-0 systemd[1]: libpod-conmon-be36f052932e4b53f46db7aee21bcf1f235f70e9b1cf424e840834a463f571c6.scope: Deactivated successfully.
Oct 11 02:53:11 compute-0 podman[476434]: 2025-10-11 02:53:10.923465288 +0000 UTC m=+0.056540589 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:53:11 compute-0 podman[476434]: 2025-10-11 02:53:11.102301209 +0000 UTC m=+0.235376420 container create 6c24edc924f022f6bed7972033dfcfd5fc9b9150cab93a1cf92006900a0057ea (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_noether, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:53:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2268: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:11 compute-0 systemd[1]: Started libpod-conmon-6c24edc924f022f6bed7972033dfcfd5fc9b9150cab93a1cf92006900a0057ea.scope.
Oct 11 02:53:11 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:53:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/621dfc54d0524e1115e1285c98f1cfb81f3566c6cd40788fed094a43820f11e2/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:53:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/621dfc54d0524e1115e1285c98f1cfb81f3566c6cd40788fed094a43820f11e2/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:53:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/621dfc54d0524e1115e1285c98f1cfb81f3566c6cd40788fed094a43820f11e2/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:53:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/621dfc54d0524e1115e1285c98f1cfb81f3566c6cd40788fed094a43820f11e2/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:53:11 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/621dfc54d0524e1115e1285c98f1cfb81f3566c6cd40788fed094a43820f11e2/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:53:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:53:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:53:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 4200.1 total, 600.0 interval
                                            Cumulative writes: 10K writes, 38K keys, 10K commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 10K writes, 2784 syncs, 3.64 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 615 writes, 1777 keys, 615 commit groups, 1.0 writes per commit group, ingest: 1.43 MB, 0.00 MB/s
                                            Interval WAL: 615 writes, 278 syncs, 2.21 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:53:11 compute-0 podman[476434]: 2025-10-11 02:53:11.633117162 +0000 UTC m=+0.766192383 container init 6c24edc924f022f6bed7972033dfcfd5fc9b9150cab93a1cf92006900a0057ea (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_noether, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS)
Oct 11 02:53:11 compute-0 podman[476434]: 2025-10-11 02:53:11.653963999 +0000 UTC m=+0.787039210 container start 6c24edc924f022f6bed7972033dfcfd5fc9b9150cab93a1cf92006900a0057ea (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_noether, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, io.buildah.version=1.39.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:53:11 compute-0 podman[476434]: 2025-10-11 02:53:11.846761929 +0000 UTC m=+0.979837140 container attach 6c24edc924f022f6bed7972033dfcfd5fc9b9150cab93a1cf92006900a0057ea (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_noether, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 02:53:12 compute-0 recursing_noether[476450]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:53:12 compute-0 recursing_noether[476450]: --> relative data size: 1.0
Oct 11 02:53:12 compute-0 recursing_noether[476450]: --> All data devices are unavailable
Oct 11 02:53:12 compute-0 systemd[1]: libpod-6c24edc924f022f6bed7972033dfcfd5fc9b9150cab93a1cf92006900a0057ea.scope: Deactivated successfully.
Oct 11 02:53:12 compute-0 systemd[1]: libpod-6c24edc924f022f6bed7972033dfcfd5fc9b9150cab93a1cf92006900a0057ea.scope: Consumed 1.241s CPU time.
Oct 11 02:53:13 compute-0 podman[476480]: 2025-10-11 02:53:13.083305882 +0000 UTC m=+0.061976723 container died 6c24edc924f022f6bed7972033dfcfd5fc9b9150cab93a1cf92006900a0057ea (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_noether, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:53:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2269: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:13 compute-0 ceph-mon[191930]: pgmap v2268: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:13 compute-0 systemd[1]: var-lib-containers-storage-overlay-621dfc54d0524e1115e1285c98f1cfb81f3566c6cd40788fed094a43820f11e2-merged.mount: Deactivated successfully.
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.872 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.873 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.873 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.874 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.875 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.876 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.878 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.878 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc695b0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.882 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '8422017b-c868-4ba2-ab1f-61d3668ca145', 'name': 'te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c', 'flavor': {'id': '6dff30d1-85df-4e9c-9163-a20ba47bb0c7', 'name': 'm1.nano', 'vcpus': 1, 'ram': 128, 'disk': 1, 'ephemeral': 0, 'swap': 0}, 'image': {'id': '2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-0000000e', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': 'a05bbc8f872d4dd99972d2cb8136d608', 'user_id': 'f66a606299944d53a40f21e81c791d70', 'hostId': 'cea8816d446065ba50379057f72b942db7e204c60c4530591bc7d0be', 'status': 'active', 'metadata': {'metering.server_group': '44c4fdb3-6cdb-42b8-903d-5a2c79f0da20'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.886 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': 'f98d09d7-6aa0-4405-bfa0-be1f78d3911f', 'name': 'te-0512306-asg-am4iabdjybzp-yj44h76hdzhi-bejrsw3xgi4q', 'flavor': {'id': '6dff30d1-85df-4e9c-9163-a20ba47bb0c7', 'name': 'm1.nano', 'vcpus': 1, 'ram': 128, 'disk': 1, 'ephemeral': 0, 'swap': 0}, 'image': {'id': '2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-0000000f', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': 'a05bbc8f872d4dd99972d2cb8136d608', 'user_id': 'f66a606299944d53a40f21e81c791d70', 'hostId': 'cea8816d446065ba50379057f72b942db7e204c60c4530591bc7d0be', 'status': 'active', 'metadata': {'metering.server_group': '44c4fdb3-6cdb-42b8-903d-5a2c79f0da20'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.891 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.892 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.892 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.893 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.893 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.894 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:53:13.893452) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:13 compute-0 nova_compute[356901]: 2025-10-11 02:53:13.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.903 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.bytes volume: 2450 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.910 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.bytes volume: 2276 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.916 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2856 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.918 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.918 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.918 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.918 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.918 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.919 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.919 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets volume: 31 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.919 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.packets volume: 31 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.920 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:53:13.919047) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.921 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 24 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.922 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.922 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.922 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.922 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.923 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.923 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.923 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:53:13.923357) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.924 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.924 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.925 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.925 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.926 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.926 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.926 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.926 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.927 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.927 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.927 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:53:13.926951) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.928 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.928 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.929 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.929 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.930 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.930 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.930 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.930 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.931 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:53:13.930763) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.953 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.954 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.capacity volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.987 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:13.988 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.capacity volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.014 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.015 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.015 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.016 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.016 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.016 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.016 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.016 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.016 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.017 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:53:14.016700) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.057 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.bytes volume: 29657600 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.057 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.bytes volume: 299326 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.089 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.bytes volume: 31070720 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.090 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.bytes volume: 299326 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.131 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.131 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.132 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.132 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.133 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.133 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.133 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.133 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.134 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.134 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.latency volume: 2082910661 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.135 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.latency volume: 143173838 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.134 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:53:14.134280) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.135 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.latency volume: 2034310761 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.136 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.latency volume: 186412257 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.136 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.136 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.137 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.137 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.137 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.138 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.138 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.138 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.138 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.139 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.requests volume: 1067 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.139 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:53:14.138847) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.139 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.requests volume: 120 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.139 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.requests volume: 1136 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.140 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.requests volume: 120 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.140 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.141 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.141 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.142 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.142 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.142 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.142 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.143 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.143 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.143 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.143 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:53:14.143384) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.144 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.usage volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.144 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.144 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.usage volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.145 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.145 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.145 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.146 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.146 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.147 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.147 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.147 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.147 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.148 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.bytes volume: 73129984 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.148 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:53:14.147924) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.148 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.149 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.bytes volume: 73154560 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.149 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.149 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.150 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.150 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.151 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.151 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.151 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.151 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.152 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.152 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.152 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.latency volume: 8003595076 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.152 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:53:14.152315) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.153 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.153 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.latency volume: 8498421759 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.153 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.154 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.154 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.154 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.155 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.155 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.156 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.156 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.156 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.156 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.157 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:53:14.156758) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.175 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.196 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.213 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.214 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.214 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.215 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.215 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.215 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.215 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.216 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.requests volume: 334 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.216 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:53:14.215751) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.216 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.217 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.requests volume: 304 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.217 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.218 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.218 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.218 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.219 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.219 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.219 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.220 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.220 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.220 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.220 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.bytes.delta volume: 630 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.220 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:53:14.220494) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.221 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.221 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.222 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.222 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.222 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.222 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.222 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.222 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.222 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.223 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.223 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.224 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.224 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.224 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.224 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.224 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.224 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets volume: 30 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.225 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.packets volume: 27 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.225 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 33 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.225 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.225 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.225 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.226 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.226 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.226 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.226 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:53:14.223050) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.226 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:53:14.224655) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.226 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:53:14.226188) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.226 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.227 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.bytes.delta volume: 630 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.227 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.227 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.227 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.227 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.228 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.228 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.228 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.228 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.228 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.228 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.229 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.229 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:53:14.228128) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.229 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.229 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.229 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.229 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.230 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.230 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:53:14.229608) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.231 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.231 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.231 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.231 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.231 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.231 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.231 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.231 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.allocation volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.232 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.232 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.allocation volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.232 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.232 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.233 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.233 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.233 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.233 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.233 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.234 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.234 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:53:14.231599) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.234 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.234 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.234 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.235 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.235 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.235 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.235 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.236 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:53:14.234476) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.236 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.236 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.236 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.236 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/cpu volume: 335380000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.236 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/cpu volume: 333010000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.237 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 64880000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.237 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.237 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.237 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.237 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.237 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.238 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.238 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.bytes volume: 2250 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.238 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.bytes volume: 2250 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.238 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2412 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.238 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.239 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.239 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.239 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.239 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.239 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.239 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/memory.usage volume: 42.40625 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.239 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/memory.usage volume: 42.25 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.240 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:53:14.236452) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.240 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:53:14.238037) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.240 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:53:14.239606) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.240 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.83984375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.240 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.241 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.241 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.241 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.241 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.242 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.242 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.242 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.242 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.242 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.242 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.242 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.242 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.242 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.242 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.242 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.242 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.242 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.242 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.243 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.243 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.243 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.243 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.243 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.243 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.243 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.243 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.243 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:53:14.243 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:53:14 compute-0 podman[476480]: 2025-10-11 02:53:14.259776212 +0000 UTC m=+1.238446972 container remove 6c24edc924f022f6bed7972033dfcfd5fc9b9150cab93a1cf92006900a0057ea (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_noether, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507)
Oct 11 02:53:14 compute-0 systemd[1]: libpod-conmon-6c24edc924f022f6bed7972033dfcfd5fc9b9150cab93a1cf92006900a0057ea.scope: Deactivated successfully.
Oct 11 02:53:14 compute-0 sudo[476311]: pam_unix(sudo:session): session closed for user root
Oct 11 02:53:14 compute-0 podman[476498]: 2025-10-11 02:53:14.391473974 +0000 UTC m=+0.085526351 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0)
Oct 11 02:53:14 compute-0 podman[476496]: 2025-10-11 02:53:14.407478492 +0000 UTC m=+0.101884825 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 02:53:14 compute-0 sudo[476519]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:53:14 compute-0 sudo[476519]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:53:14 compute-0 sudo[476519]: pam_unix(sudo:session): session closed for user root
Oct 11 02:53:14 compute-0 podman[476497]: 2025-10-11 02:53:14.434327245 +0000 UTC m=+0.130187022 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, config_id=edpm, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, org.label-schema.vendor=CentOS)
Oct 11 02:53:14 compute-0 sudo[476584]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:53:14 compute-0 sudo[476584]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:53:14 compute-0 sudo[476584]: pam_unix(sudo:session): session closed for user root
Oct 11 02:53:14 compute-0 podman[476576]: 2025-10-11 02:53:14.551565985 +0000 UTC m=+0.124166281 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=ovn_controller)
Oct 11 02:53:14 compute-0 sudo[476623]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:53:14 compute-0 sudo[476623]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:53:14 compute-0 sudo[476623]: pam_unix(sudo:session): session closed for user root
Oct 11 02:53:14 compute-0 sudo[476653]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:53:14 compute-0 sudo[476653]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:53:15 compute-0 ceph-mon[191930]: pgmap v2269: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2270: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:15 compute-0 podman[476719]: 2025-10-11 02:53:15.212364971 +0000 UTC m=+0.049290586 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:53:15 compute-0 nova_compute[356901]: 2025-10-11 02:53:15.345 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:15 compute-0 podman[476719]: 2025-10-11 02:53:15.451763183 +0000 UTC m=+0.288688768 container create 92d884c8a7c6f1e2d52d8b102d5c772b90a0cf35472899ae7c8f19bac2563845 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_mayer, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507)
Oct 11 02:53:15 compute-0 nova_compute[356901]: 2025-10-11 02:53:15.567 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:15 compute-0 systemd[1]: Started libpod-conmon-92d884c8a7c6f1e2d52d8b102d5c772b90a0cf35472899ae7c8f19bac2563845.scope.
Oct 11 02:53:15 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:53:15 compute-0 podman[476719]: 2025-10-11 02:53:15.90343295 +0000 UTC m=+0.740358565 container init 92d884c8a7c6f1e2d52d8b102d5c772b90a0cf35472899ae7c8f19bac2563845 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_mayer, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2)
Oct 11 02:53:15 compute-0 podman[476719]: 2025-10-11 02:53:15.926708317 +0000 UTC m=+0.763633912 container start 92d884c8a7c6f1e2d52d8b102d5c772b90a0cf35472899ae7c8f19bac2563845 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_mayer, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:53:15 compute-0 angry_mayer[476735]: 167 167
Oct 11 02:53:15 compute-0 systemd[1]: libpod-92d884c8a7c6f1e2d52d8b102d5c772b90a0cf35472899ae7c8f19bac2563845.scope: Deactivated successfully.
Oct 11 02:53:16 compute-0 podman[476719]: 2025-10-11 02:53:16.205691968 +0000 UTC m=+1.042617583 container attach 92d884c8a7c6f1e2d52d8b102d5c772b90a0cf35472899ae7c8f19bac2563845 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_mayer, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507)
Oct 11 02:53:16 compute-0 podman[476719]: 2025-10-11 02:53:16.208019287 +0000 UTC m=+1.044944912 container died 92d884c8a7c6f1e2d52d8b102d5c772b90a0cf35472899ae7c8f19bac2563845 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_mayer, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:53:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:53:16 compute-0 ceph-mon[191930]: pgmap v2270: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:16 compute-0 systemd[1]: var-lib-containers-storage-overlay-74f00689830e0bccb024b37e7e08c4e402fcc162b52b2f6114e48d9a63b383fb-merged.mount: Deactivated successfully.
Oct 11 02:53:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2271: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:17 compute-0 podman[476719]: 2025-10-11 02:53:17.333127469 +0000 UTC m=+2.170053074 container remove 92d884c8a7c6f1e2d52d8b102d5c772b90a0cf35472899ae7c8f19bac2563845 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=angry_mayer, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:53:17 compute-0 systemd[1]: libpod-conmon-92d884c8a7c6f1e2d52d8b102d5c772b90a0cf35472899ae7c8f19bac2563845.scope: Deactivated successfully.
Oct 11 02:53:17 compute-0 podman[476758]: 2025-10-11 02:53:17.560118603 +0000 UTC m=+0.042455316 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:53:17 compute-0 podman[476758]: 2025-10-11 02:53:17.798499726 +0000 UTC m=+0.280836359 container create a9ddf91c18f08083d6170710d67bd516e0bacad10a82effbf89d23dfbb6c5231 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_chatelet, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef)
Oct 11 02:53:17 compute-0 nova_compute[356901]: 2025-10-11 02:53:17.898 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:53:18 compute-0 systemd[1]: Started libpod-conmon-a9ddf91c18f08083d6170710d67bd516e0bacad10a82effbf89d23dfbb6c5231.scope.
Oct 11 02:53:18 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:53:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a04a4b091db2adcfffd99b30c5f9f580ad1b4b430bae11881314184ef857d5cb/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:53:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a04a4b091db2adcfffd99b30c5f9f580ad1b4b430bae11881314184ef857d5cb/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:53:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a04a4b091db2adcfffd99b30c5f9f580ad1b4b430bae11881314184ef857d5cb/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:53:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/a04a4b091db2adcfffd99b30c5f9f580ad1b4b430bae11881314184ef857d5cb/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:53:18 compute-0 ceph-mon[191930]: pgmap v2271: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:18 compute-0 podman[476758]: 2025-10-11 02:53:18.69417459 +0000 UTC m=+1.176511303 container init a9ddf91c18f08083d6170710d67bd516e0bacad10a82effbf89d23dfbb6c5231 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_chatelet, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, ceph=True, OSD_FLAVOR=default)
Oct 11 02:53:18 compute-0 podman[476758]: 2025-10-11 02:53:18.717068976 +0000 UTC m=+1.199405609 container start a9ddf91c18f08083d6170710d67bd516e0bacad10a82effbf89d23dfbb6c5231 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_chatelet, ceph=True, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:53:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:53:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 4200.1 total, 600.0 interval
                                            Cumulative writes: 11K writes, 44K keys, 11K commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 11K writes, 3098 syncs, 3.70 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 775 writes, 2877 keys, 775 commit groups, 1.0 writes per commit group, ingest: 3.62 MB, 0.01 MB/s
                                            Interval WAL: 775 writes, 302 syncs, 2.57 writes per sync, written: 0.00 GB, 0.01 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:53:18 compute-0 nova_compute[356901]: 2025-10-11 02:53:18.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:53:18 compute-0 nova_compute[356901]: 2025-10-11 02:53:18.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:53:18 compute-0 nova_compute[356901]: 2025-10-11 02:53:18.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:53:18 compute-0 podman[476758]: 2025-10-11 02:53:18.917393824 +0000 UTC m=+1.399730657 container attach a9ddf91c18f08083d6170710d67bd516e0bacad10a82effbf89d23dfbb6c5231 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_chatelet, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:53:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2272: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:19 compute-0 magical_chatelet[476774]: {
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:     "0": [
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:         {
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "devices": [
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "/dev/loop3"
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             ],
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "lv_name": "ceph_lv0",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "lv_size": "21470642176",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "name": "ceph_lv0",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "tags": {
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.cluster_name": "ceph",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.crush_device_class": "",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.encrypted": "0",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.osd_id": "0",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.type": "block",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.vdo": "0"
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             },
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "type": "block",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "vg_name": "ceph_vg0"
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:         }
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:     ],
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:     "1": [
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:         {
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "devices": [
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "/dev/loop4"
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             ],
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "lv_name": "ceph_lv1",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "lv_size": "21470642176",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "name": "ceph_lv1",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "tags": {
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.cluster_name": "ceph",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.crush_device_class": "",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.encrypted": "0",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.osd_id": "1",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.type": "block",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.vdo": "0"
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             },
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "type": "block",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "vg_name": "ceph_vg1"
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:         }
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:     ],
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:     "2": [
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:         {
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "devices": [
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "/dev/loop5"
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             ],
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "lv_name": "ceph_lv2",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "lv_size": "21470642176",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "name": "ceph_lv2",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "tags": {
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.cluster_name": "ceph",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.crush_device_class": "",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.encrypted": "0",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.osd_id": "2",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.type": "block",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:                 "ceph.vdo": "0"
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             },
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "type": "block",
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:             "vg_name": "ceph_vg2"
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:         }
Oct 11 02:53:19 compute-0 magical_chatelet[476774]:     ]
Oct 11 02:53:19 compute-0 magical_chatelet[476774]: }
Oct 11 02:53:19 compute-0 systemd[1]: libpod-a9ddf91c18f08083d6170710d67bd516e0bacad10a82effbf89d23dfbb6c5231.scope: Deactivated successfully.
Oct 11 02:53:19 compute-0 podman[476786]: 2025-10-11 02:53:19.770040552 +0000 UTC m=+0.067052316 container died a9ddf91c18f08083d6170710d67bd516e0bacad10a82effbf89d23dfbb6c5231 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_chatelet, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.license=GPLv2)
Oct 11 02:53:20 compute-0 nova_compute[356901]: 2025-10-11 02:53:20.350 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:20 compute-0 nova_compute[356901]: 2025-10-11 02:53:20.569 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:20 compute-0 systemd[1]: var-lib-containers-storage-overlay-a04a4b091db2adcfffd99b30c5f9f580ad1b4b430bae11881314184ef857d5cb-merged.mount: Deactivated successfully.
Oct 11 02:53:20 compute-0 ceph-mon[191930]: pgmap v2272: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2273: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:53:21 compute-0 podman[476786]: 2025-10-11 02:53:21.650218117 +0000 UTC m=+1.947229841 container remove a9ddf91c18f08083d6170710d67bd516e0bacad10a82effbf89d23dfbb6c5231 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_chatelet, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef)
Oct 11 02:53:21 compute-0 systemd[1]: libpod-conmon-a9ddf91c18f08083d6170710d67bd516e0bacad10a82effbf89d23dfbb6c5231.scope: Deactivated successfully.
Oct 11 02:53:21 compute-0 sudo[476653]: pam_unix(sudo:session): session closed for user root
Oct 11 02:53:21 compute-0 sudo[476813]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:53:21 compute-0 sudo[476813]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:53:21 compute-0 podman[476801]: 2025-10-11 02:53:21.875811764 +0000 UTC m=+0.148179748 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=iscsid, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0)
Oct 11 02:53:21 compute-0 sudo[476813]: pam_unix(sudo:session): session closed for user root
Oct 11 02:53:21 compute-0 podman[476800]: 2025-10-11 02:53:21.876267996 +0000 UTC m=+0.150328783 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:53:21 compute-0 sudo[476865]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:53:21 compute-0 sudo[476865]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:53:21 compute-0 sudo[476865]: pam_unix(sudo:session): session closed for user root
Oct 11 02:53:22 compute-0 sudo[476891]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:53:22 compute-0 sudo[476891]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:53:22 compute-0 sudo[476891]: pam_unix(sudo:session): session closed for user root
Oct 11 02:53:22 compute-0 sudo[476916]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:53:22 compute-0 sudo[476916]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:53:22 compute-0 nova_compute[356901]: 2025-10-11 02:53:22.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:53:22 compute-0 podman[476979]: 2025-10-11 02:53:22.828858306 +0000 UTC m=+0.065998633 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:53:23 compute-0 podman[476979]: 2025-10-11 02:53:23.084098412 +0000 UTC m=+0.321238739 container create c724b916a17382c699385f2a6d54956dce560c89c7bf8a5176f6060f380655c7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_moser, org.label-schema.license=GPLv2, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:53:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2274: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:23 compute-0 systemd[1]: Started libpod-conmon-c724b916a17382c699385f2a6d54956dce560c89c7bf8a5176f6060f380655c7.scope.
Oct 11 02:53:23 compute-0 ceph-mon[191930]: pgmap v2273: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:23 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:53:23 compute-0 podman[476979]: 2025-10-11 02:53:23.519506827 +0000 UTC m=+0.756647214 container init c724b916a17382c699385f2a6d54956dce560c89c7bf8a5176f6060f380655c7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_moser, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:53:23 compute-0 podman[476979]: 2025-10-11 02:53:23.537065538 +0000 UTC m=+0.774205835 container start c724b916a17382c699385f2a6d54956dce560c89c7bf8a5176f6060f380655c7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_moser, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0)
Oct 11 02:53:23 compute-0 recursing_moser[476996]: 167 167
Oct 11 02:53:23 compute-0 systemd[1]: libpod-c724b916a17382c699385f2a6d54956dce560c89c7bf8a5176f6060f380655c7.scope: Deactivated successfully.
Oct 11 02:53:23 compute-0 podman[476979]: 2025-10-11 02:53:23.67855628 +0000 UTC m=+0.915696617 container attach c724b916a17382c699385f2a6d54956dce560c89c7bf8a5176f6060f380655c7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_moser, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:53:23 compute-0 podman[476979]: 2025-10-11 02:53:23.679419177 +0000 UTC m=+0.916559494 container died c724b916a17382c699385f2a6d54956dce560c89c7bf8a5176f6060f380655c7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_moser, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:53:24 compute-0 systemd[1]: var-lib-containers-storage-overlay-ced75939f7cb9934858f7a7ae681476515ad3bf05900781f384e0670ec6bd835-merged.mount: Deactivated successfully.
Oct 11 02:53:24 compute-0 nova_compute[356901]: 2025-10-11 02:53:24.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:53:24 compute-0 nova_compute[356901]: 2025-10-11 02:53:24.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:53:24 compute-0 nova_compute[356901]: 2025-10-11 02:53:24.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:53:25 compute-0 ceph-mon[191930]: pgmap v2274: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2275: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:25 compute-0 nova_compute[356901]: 2025-10-11 02:53:25.361 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:25 compute-0 nova_compute[356901]: 2025-10-11 02:53:25.573 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:25 compute-0 nova_compute[356901]: 2025-10-11 02:53:25.693 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:53:25 compute-0 nova_compute[356901]: 2025-10-11 02:53:25.694 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:53:25 compute-0 nova_compute[356901]: 2025-10-11 02:53:25.695 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:53:25 compute-0 nova_compute[356901]: 2025-10-11 02:53:25.697 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:53:26 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:53:26 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 4200.2 total, 600.0 interval
                                            Cumulative writes: 9424 writes, 37K keys, 9424 commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 9424 writes, 2439 syncs, 3.86 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 498 writes, 1584 keys, 498 commit groups, 1.0 writes per commit group, ingest: 2.24 MB, 0.00 MB/s
                                            Interval WAL: 498 writes, 202 syncs, 2.47 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:53:26 compute-0 podman[476979]: 2025-10-11 02:53:26.100704628 +0000 UTC m=+3.337844955 container remove c724b916a17382c699385f2a6d54956dce560c89c7bf8a5176f6060f380655c7 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=recursing_moser, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:53:26 compute-0 ceph-mgr[192233]: [devicehealth INFO root] Check health
Oct 11 02:53:26 compute-0 systemd[1]: libpod-conmon-c724b916a17382c699385f2a6d54956dce560c89c7bf8a5176f6060f380655c7.scope: Deactivated successfully.
Oct 11 02:53:26 compute-0 ceph-mon[191930]: pgmap v2275: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:26 compute-0 podman[477019]: 2025-10-11 02:53:26.375106809 +0000 UTC m=+0.051795868 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:53:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:53:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:53:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:53:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:53:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:53:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:53:26 compute-0 podman[477019]: 2025-10-11 02:53:26.679071161 +0000 UTC m=+0.355760130 container create f29d578f0079f66b5bae541694d103895340a7262126f62e57178bbd238058dc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_carson, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 02:53:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:53:27 compute-0 systemd[1]: Started libpod-conmon-f29d578f0079f66b5bae541694d103895340a7262126f62e57178bbd238058dc.scope.
Oct 11 02:53:27 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:53:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/14dd035f66da0fdc5dfc725811333f2d644e642608dd93c2898bf5565027d7c5/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:53:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/14dd035f66da0fdc5dfc725811333f2d644e642608dd93c2898bf5565027d7c5/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:53:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/14dd035f66da0fdc5dfc725811333f2d644e642608dd93c2898bf5565027d7c5/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:53:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/14dd035f66da0fdc5dfc725811333f2d644e642608dd93c2898bf5565027d7c5/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:53:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2276: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:27 compute-0 podman[477019]: 2025-10-11 02:53:27.397918503 +0000 UTC m=+1.074607492 container init f29d578f0079f66b5bae541694d103895340a7262126f62e57178bbd238058dc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_carson, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507)
Oct 11 02:53:27 compute-0 podman[477019]: 2025-10-11 02:53:27.421019696 +0000 UTC m=+1.097708665 container start f29d578f0079f66b5bae541694d103895340a7262126f62e57178bbd238058dc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_carson, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:53:27 compute-0 podman[477019]: 2025-10-11 02:53:27.663541164 +0000 UTC m=+1.340230163 container attach f29d578f0079f66b5bae541694d103895340a7262126f62e57178bbd238058dc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_carson, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, ceph=True, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:53:27 compute-0 nova_compute[356901]: 2025-10-11 02:53:27.692 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:53:27 compute-0 nova_compute[356901]: 2025-10-11 02:53:27.713 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:53:27 compute-0 nova_compute[356901]: 2025-10-11 02:53:27.714 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:53:27 compute-0 nova_compute[356901]: 2025-10-11 02:53:27.714 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:53:27 compute-0 nova_compute[356901]: 2025-10-11 02:53:27.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:53:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:53:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2672875961' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:53:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:53:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2672875961' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:53:27 compute-0 nova_compute[356901]: 2025-10-11 02:53:27.932 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:53:27 compute-0 nova_compute[356901]: 2025-10-11 02:53:27.934 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:53:27 compute-0 nova_compute[356901]: 2025-10-11 02:53:27.934 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:53:27 compute-0 nova_compute[356901]: 2025-10-11 02:53:27.937 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:53:27 compute-0 nova_compute[356901]: 2025-10-11 02:53:27.937 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:53:28 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:53:28 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/4219061123' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:53:28 compute-0 nova_compute[356901]: 2025-10-11 02:53:28.412 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.475s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:53:28 compute-0 naughty_carson[477036]: {
Oct 11 02:53:28 compute-0 naughty_carson[477036]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:53:28 compute-0 naughty_carson[477036]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:53:28 compute-0 naughty_carson[477036]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:53:28 compute-0 naughty_carson[477036]:         "osd_id": 1,
Oct 11 02:53:28 compute-0 naughty_carson[477036]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:53:28 compute-0 naughty_carson[477036]:         "type": "bluestore"
Oct 11 02:53:28 compute-0 naughty_carson[477036]:     },
Oct 11 02:53:28 compute-0 naughty_carson[477036]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:53:28 compute-0 naughty_carson[477036]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:53:28 compute-0 naughty_carson[477036]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:53:28 compute-0 naughty_carson[477036]:         "osd_id": 2,
Oct 11 02:53:28 compute-0 naughty_carson[477036]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:53:28 compute-0 naughty_carson[477036]:         "type": "bluestore"
Oct 11 02:53:28 compute-0 naughty_carson[477036]:     },
Oct 11 02:53:28 compute-0 naughty_carson[477036]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:53:28 compute-0 naughty_carson[477036]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:53:28 compute-0 naughty_carson[477036]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:53:28 compute-0 naughty_carson[477036]:         "osd_id": 0,
Oct 11 02:53:28 compute-0 naughty_carson[477036]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:53:28 compute-0 naughty_carson[477036]:         "type": "bluestore"
Oct 11 02:53:28 compute-0 naughty_carson[477036]:     }
Oct 11 02:53:28 compute-0 naughty_carson[477036]: }
Oct 11 02:53:28 compute-0 nova_compute[356901]: 2025-10-11 02:53:28.526 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:53:28 compute-0 nova_compute[356901]: 2025-10-11 02:53:28.526 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:53:28 compute-0 nova_compute[356901]: 2025-10-11 02:53:28.531 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:53:28 compute-0 nova_compute[356901]: 2025-10-11 02:53:28.531 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:53:28 compute-0 nova_compute[356901]: 2025-10-11 02:53:28.537 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:53:28 compute-0 nova_compute[356901]: 2025-10-11 02:53:28.537 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:53:28 compute-0 nova_compute[356901]: 2025-10-11 02:53:28.537 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:53:28 compute-0 systemd[1]: libpod-f29d578f0079f66b5bae541694d103895340a7262126f62e57178bbd238058dc.scope: Deactivated successfully.
Oct 11 02:53:28 compute-0 systemd[1]: libpod-f29d578f0079f66b5bae541694d103895340a7262126f62e57178bbd238058dc.scope: Consumed 1.128s CPU time.
Oct 11 02:53:28 compute-0 podman[477091]: 2025-10-11 02:53:28.643911082 +0000 UTC m=+0.059498726 container died f29d578f0079f66b5bae541694d103895340a7262126f62e57178bbd238058dc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_carson, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef)
Oct 11 02:53:28 compute-0 systemd[1]: var-lib-containers-storage-overlay-14dd035f66da0fdc5dfc725811333f2d644e642608dd93c2898bf5565027d7c5-merged.mount: Deactivated successfully.
Oct 11 02:53:28 compute-0 podman[477091]: 2025-10-11 02:53:28.74538996 +0000 UTC m=+0.160977574 container remove f29d578f0079f66b5bae541694d103895340a7262126f62e57178bbd238058dc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=naughty_carson, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:53:28 compute-0 systemd[1]: libpod-conmon-f29d578f0079f66b5bae541694d103895340a7262126f62e57178bbd238058dc.scope: Deactivated successfully.
Oct 11 02:53:28 compute-0 sudo[476916]: pam_unix(sudo:session): session closed for user root
Oct 11 02:53:28 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:53:28 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:53:28 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:53:28 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:53:28 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 293cf389-452f-44fa-86e5-60e9bbf845c1 does not exist
Oct 11 02:53:28 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 30ce87f4-b62e-4e71-8d9d-65c877403d1e does not exist
Oct 11 02:53:28 compute-0 ceph-mon[191930]: pgmap v2276: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2672875961' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:53:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2672875961' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:53:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/4219061123' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:53:28 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:53:28 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:53:28 compute-0 sudo[477107]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:53:28 compute-0 sudo[477107]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:53:28 compute-0 sudo[477107]: pam_unix(sudo:session): session closed for user root
Oct 11 02:53:28 compute-0 nova_compute[356901]: 2025-10-11 02:53:28.982 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:53:28 compute-0 nova_compute[356901]: 2025-10-11 02:53:28.984 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3177MB free_disk=59.86394500732422GB free_vcpus=5 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:53:28 compute-0 nova_compute[356901]: 2025-10-11 02:53:28.985 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:53:28 compute-0 nova_compute[356901]: 2025-10-11 02:53:28.985 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:53:29 compute-0 sudo[477132]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:53:29 compute-0 sudo[477132]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:53:29 compute-0 sudo[477132]: pam_unix(sudo:session): session closed for user root
Oct 11 02:53:29 compute-0 nova_compute[356901]: 2025-10-11 02:53:29.095 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:53:29 compute-0 nova_compute[356901]: 2025-10-11 02:53:29.095 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 8422017b-c868-4ba2-ab1f-61d3668ca145 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:53:29 compute-0 nova_compute[356901]: 2025-10-11 02:53:29.096 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance f98d09d7-6aa0-4405-bfa0-be1f78d3911f actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:53:29 compute-0 nova_compute[356901]: 2025-10-11 02:53:29.096 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 3 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:53:29 compute-0 nova_compute[356901]: 2025-10-11 02:53:29.096 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1280MB phys_disk=59GB used_disk=4GB total_vcpus=8 used_vcpus=3 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:53:29 compute-0 nova_compute[356901]: 2025-10-11 02:53:29.167 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:53:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2277: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:53:29 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3919187649' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:53:29 compute-0 nova_compute[356901]: 2025-10-11 02:53:29.646 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.479s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:53:29 compute-0 nova_compute[356901]: 2025-10-11 02:53:29.654 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:53:29 compute-0 nova_compute[356901]: 2025-10-11 02:53:29.670 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:53:29 compute-0 nova_compute[356901]: 2025-10-11 02:53:29.671 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:53:29 compute-0 nova_compute[356901]: 2025-10-11 02:53:29.671 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.686s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:53:29 compute-0 podman[157119]: time="2025-10-11T02:53:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:53:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:53:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:53:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:53:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9543 "" "Go-http-client/1.1"
Oct 11 02:53:29 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3919187649' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:53:30 compute-0 podman[477179]: 2025-10-11 02:53:30.225993693 +0000 UTC m=+0.110193811 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_ipmi, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']})
Oct 11 02:53:30 compute-0 podman[477181]: 2025-10-11 02:53:30.239883103 +0000 UTC m=+0.117367177 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:53:30 compute-0 podman[477180]: 2025-10-11 02:53:30.273199813 +0000 UTC m=+0.158179228 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.expose-services=, container_name=openstack_network_exporter, distribution-scope=public, build-date=2025-08-20T13:12:41, config_id=edpm, maintainer=Red Hat, Inc., name=ubi9-minimal, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.tags=minimal rhel9, url=https://catalog.redhat.com/en/search?searchType=containers, vcs-type=git, vendor=Red Hat, Inc., version=9.6, com.redhat.component=ubi9-minimal-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, architecture=x86_64, io.buildah.version=1.33.7, release=1755695350)
Oct 11 02:53:30 compute-0 nova_compute[356901]: 2025-10-11 02:53:30.367 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:30 compute-0 nova_compute[356901]: 2025-10-11 02:53:30.575 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:30 compute-0 ceph-mon[191930]: pgmap v2277: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2278: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:31 compute-0 openstack_network_exporter[374316]: ERROR   02:53:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:53:31 compute-0 openstack_network_exporter[374316]: ERROR   02:53:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:53:31 compute-0 openstack_network_exporter[374316]: ERROR   02:53:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:53:31 compute-0 openstack_network_exporter[374316]: ERROR   02:53:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:53:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:53:31 compute-0 openstack_network_exporter[374316]: ERROR   02:53:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:53:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:53:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:53:33 compute-0 ceph-mon[191930]: pgmap v2278: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2279: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:34 compute-0 ceph-mon[191930]: pgmap v2279: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2280: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:35 compute-0 nova_compute[356901]: 2025-10-11 02:53:35.372 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:35 compute-0 nova_compute[356901]: 2025-10-11 02:53:35.578 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:53:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2281: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:37 compute-0 ceph-mon[191930]: pgmap v2280: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:39 compute-0 ceph-mon[191930]: pgmap v2281: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:39 compute-0 podman[477240]: 2025-10-11 02:53:39.265171277 +0000 UTC m=+0.141554692 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, io.buildah.version=1.29.0, release=1214.1726694543, com.redhat.component=ubi9-container, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, container_name=kepler, build-date=2024-09-18T21:23:30, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, summary=Provides the latest release of Red Hat Universal Base Image 9., config_id=edpm, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, vcs-type=git, architecture=x86_64, release-0.7.12=, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9, vendor=Red Hat, Inc., version=9.4, io.openshift.tags=base rhel9, maintainer=Red Hat, Inc., managed_by=edpm_ansible, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']})
Oct 11 02:53:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2282: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:40 compute-0 nova_compute[356901]: 2025-10-11 02:53:40.376 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:40 compute-0 nova_compute[356901]: 2025-10-11 02:53:40.580 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:40 compute-0 ceph-mon[191930]: pgmap v2282: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2283: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:53:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2284: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:43 compute-0 ceph-mon[191930]: pgmap v2283: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:44 compute-0 ceph-mon[191930]: pgmap v2284: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:44 compute-0 podman[477259]: 2025-10-11 02:53:44.831535824 +0000 UTC m=+0.120228591 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 02:53:44 compute-0 podman[477262]: 2025-10-11 02:53:44.854005697 +0000 UTC m=+0.130826513 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:53:44 compute-0 podman[477261]: 2025-10-11 02:53:44.866579917 +0000 UTC m=+0.154582455 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_managed=true, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_compute, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']})
Oct 11 02:53:44 compute-0 podman[477260]: 2025-10-11 02:53:44.878703035 +0000 UTC m=+0.147611647 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_id=ovn_controller, container_name=ovn_controller, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:53:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2285: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:45 compute-0 nova_compute[356901]: 2025-10-11 02:53:45.380 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:45 compute-0 nova_compute[356901]: 2025-10-11 02:53:45.583 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:53:47 compute-0 ceph-mon[191930]: pgmap v2285: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2286: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:48 compute-0 ceph-mon[191930]: pgmap v2286: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2287: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:50 compute-0 nova_compute[356901]: 2025-10-11 02:53:50.383 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:50 compute-0 ceph-mon[191930]: pgmap v2287: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:50 compute-0 nova_compute[356901]: 2025-10-11 02:53:50.585 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2288: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:53:52 compute-0 podman[477342]: 2025-10-11 02:53:52.246673101 +0000 UTC m=+0.126503827 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, config_id=multipathd)
Oct 11 02:53:52 compute-0 podman[477343]: 2025-10-11 02:53:52.27100073 +0000 UTC m=+0.144953135 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, container_name=iscsid, io.buildah.version=1.41.3, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=iscsid, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:53:53 compute-0 ceph-mon[191930]: pgmap v2288: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2289: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:54 compute-0 ceph-mon[191930]: pgmap v2289: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:53:54.887 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:53:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:53:54.889 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:53:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:53:54.890 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:53:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2290: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:55 compute-0 nova_compute[356901]: 2025-10-11 02:53:55.391 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:55 compute-0 nova_compute[356901]: 2025-10-11 02:53:55.588 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:53:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:53:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:53:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:53:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:53:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:53:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:53:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:53:56
Oct 11 02:53:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:53:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:53:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['.mgr', 'default.rgw.log', 'vms', 'cephfs.cephfs.data', '.rgw.root', 'cephfs.cephfs.meta', 'backups', 'default.rgw.meta', 'default.rgw.control', 'images', 'volumes']
Oct 11 02:53:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:53:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:53:56 compute-0 ceph-mon[191930]: pgmap v2290: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2291: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:53:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:53:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:53:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:53:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:53:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:53:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:53:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:53:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:53:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:53:59 compute-0 ceph-mon[191930]: pgmap v2291: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2292: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:53:59 compute-0 podman[157119]: time="2025-10-11T02:53:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:53:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:53:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:53:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:53:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9551 "" "Go-http-client/1.1"
Oct 11 02:54:00 compute-0 nova_compute[356901]: 2025-10-11 02:54:00.395 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:00 compute-0 nova_compute[356901]: 2025-10-11 02:54:00.591 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:00 compute-0 ceph-mon[191930]: pgmap v2292: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:01 compute-0 podman[477378]: 2025-10-11 02:54:01.210816361 +0000 UTC m=+0.097248634 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, build-date=2025-08-20T13:12:41, container_name=openstack_network_exporter, version=9.6, architecture=x86_64, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, name=ubi9-minimal, io.openshift.expose-services=, maintainer=Red Hat, Inc., managed_by=edpm_ansible, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vcs-type=git, com.redhat.component=ubi9-minimal-container, io.openshift.tags=minimal rhel9, url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vendor=Red Hat, Inc., io.buildah.version=1.33.7, release=1755695350, config_id=edpm, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal)
Oct 11 02:54:01 compute-0 podman[477379]: 2025-10-11 02:54:01.215570304 +0000 UTC m=+0.088481532 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:54:01 compute-0 podman[477377]: 2025-10-11 02:54:01.223684077 +0000 UTC m=+0.106797783 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_id=edpm, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true)
Oct 11 02:54:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2293: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:01 compute-0 openstack_network_exporter[374316]: ERROR   02:54:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:54:01 compute-0 openstack_network_exporter[374316]: ERROR   02:54:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:54:01 compute-0 openstack_network_exporter[374316]: ERROR   02:54:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:54:01 compute-0 openstack_network_exporter[374316]: ERROR   02:54:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:54:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:54:01 compute-0 openstack_network_exporter[374316]: ERROR   02:54:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:54:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:54:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:54:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2294: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:03 compute-0 ceph-mon[191930]: pgmap v2293: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:04 compute-0 ceph-mon[191930]: pgmap v2294: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2295: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:05 compute-0 nova_compute[356901]: 2025-10-11 02:54:05.403 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:05 compute-0 nova_compute[356901]: 2025-10-11 02:54:05.596 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2296: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:07 compute-0 ceph-mon[191930]: pgmap v2295: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.002072675530702611 of space, bias 1.0, pg target 0.6218026592107834 quantized to 32 (current 32)
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00125203744627857 of space, bias 1.0, pg target 0.375611233883571 quantized to 32 (current 32)
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:54:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:54:08 compute-0 ceph-mon[191930]: pgmap v2296: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2297: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:10 compute-0 podman[477440]: 2025-10-11 02:54:10.242927739 +0000 UTC m=+0.121077693 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.component=ubi9-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, vcs-type=git, container_name=kepler, architecture=x86_64, io.buildah.version=1.29.0, io.openshift.tags=base rhel9, version=9.4, config_id=edpm, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, name=ubi9, distribution-scope=public, maintainer=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, release=1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., release-0.7.12=, summary=Provides the latest release of Red Hat Universal Base Image 9., build-date=2024-09-18T21:23:30, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543)
Oct 11 02:54:10 compute-0 nova_compute[356901]: 2025-10-11 02:54:10.409 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:10 compute-0 nova_compute[356901]: 2025-10-11 02:54:10.602 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2298: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:11 compute-0 ceph-mon[191930]: pgmap v2297: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:54:13 compute-0 ceph-mon[191930]: pgmap v2298: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2299: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:13 compute-0 nova_compute[356901]: 2025-10-11 02:54:13.674 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:54:15 compute-0 podman[477462]: 2025-10-11 02:54:15.222207435 +0000 UTC m=+0.094693641 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 02:54:15 compute-0 podman[477459]: 2025-10-11 02:54:15.237779193 +0000 UTC m=+0.114566085 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:54:15 compute-0 podman[477461]: 2025-10-11 02:54:15.238082041 +0000 UTC m=+0.111073447 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, config_id=edpm, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.build-date=20251007, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, org.label-schema.vendor=CentOS, managed_by=edpm_ansible)
Oct 11 02:54:15 compute-0 podman[477460]: 2025-10-11 02:54:15.246203635 +0000 UTC m=+0.133200126 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, config_id=ovn_controller, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:54:15 compute-0 ceph-mon[191930]: pgmap v2299: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2300: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:15 compute-0 nova_compute[356901]: 2025-10-11 02:54:15.412 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:15 compute-0 nova_compute[356901]: 2025-10-11 02:54:15.606 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:15 compute-0 nova_compute[356901]: 2025-10-11 02:54:15.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:54:16 compute-0 ceph-mon[191930]: pgmap v2300: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:54:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2301: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:18 compute-0 ceph-mon[191930]: pgmap v2301: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2302: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:19 compute-0 nova_compute[356901]: 2025-10-11 02:54:19.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:54:19 compute-0 nova_compute[356901]: 2025-10-11 02:54:19.898 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:54:20 compute-0 nova_compute[356901]: 2025-10-11 02:54:20.417 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:20 compute-0 nova_compute[356901]: 2025-10-11 02:54:20.610 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:20 compute-0 ceph-mon[191930]: pgmap v2302: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:20 compute-0 nova_compute[356901]: 2025-10-11 02:54:20.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:54:20 compute-0 nova_compute[356901]: 2025-10-11 02:54:20.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:54:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2303: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:54:22 compute-0 ceph-mon[191930]: pgmap v2303: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:23 compute-0 podman[477542]: 2025-10-11 02:54:23.2520375 +0000 UTC m=+0.129870377 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, container_name=iscsid, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, tcib_managed=true)
Oct 11 02:54:23 compute-0 podman[477541]: 2025-10-11 02:54:23.253615814 +0000 UTC m=+0.134736207 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, config_id=multipathd, org.label-schema.license=GPLv2, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=multipathd, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:54:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2304: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:24 compute-0 nova_compute[356901]: 2025-10-11 02:54:24.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:54:25 compute-0 ceph-mon[191930]: pgmap v2304: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2305: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:25 compute-0 nova_compute[356901]: 2025-10-11 02:54:25.423 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:25 compute-0 nova_compute[356901]: 2025-10-11 02:54:25.614 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:26 compute-0 ceph-mon[191930]: pgmap v2305: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:54:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:54:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:54:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:54:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:54:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:54:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:54:26 compute-0 nova_compute[356901]: 2025-10-11 02:54:26.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:54:26 compute-0 nova_compute[356901]: 2025-10-11 02:54:26.908 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:54:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2306: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:27 compute-0 nova_compute[356901]: 2025-10-11 02:54:27.704 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-8422017b-c868-4ba2-ab1f-61d3668ca145" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:54:27 compute-0 nova_compute[356901]: 2025-10-11 02:54:27.705 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-8422017b-c868-4ba2-ab1f-61d3668ca145" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:54:27 compute-0 nova_compute[356901]: 2025-10-11 02:54:27.705 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:54:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:54:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3460530041' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:54:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:54:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3460530041' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:54:28 compute-0 ceph-mon[191930]: pgmap v2306: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3460530041' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:54:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3460530041' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:54:29 compute-0 sudo[477582]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:54:29 compute-0 sudo[477582]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:54:29 compute-0 sudo[477582]: pam_unix(sudo:session): session closed for user root
Oct 11 02:54:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2307: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:29 compute-0 sudo[477607]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:54:29 compute-0 sudo[477607]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:54:29 compute-0 sudo[477607]: pam_unix(sudo:session): session closed for user root
Oct 11 02:54:29 compute-0 sudo[477632]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:54:29 compute-0 sudo[477632]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:54:29 compute-0 sudo[477632]: pam_unix(sudo:session): session closed for user root
Oct 11 02:54:29 compute-0 sudo[477657]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:54:29 compute-0 sudo[477657]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:54:29 compute-0 nova_compute[356901]: 2025-10-11 02:54:29.709 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Updating instance_info_cache with network_info: [{"id": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "address": "fa:16:3e:2c:af:96", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.3.53", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape00931c0-3d", "ovs_interfaceid": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:54:29 compute-0 podman[157119]: time="2025-10-11T02:54:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:54:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:54:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:54:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:54:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9558 "" "Go-http-client/1.1"
Oct 11 02:54:29 compute-0 nova_compute[356901]: 2025-10-11 02:54:29.764 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-8422017b-c868-4ba2-ab1f-61d3668ca145" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:54:29 compute-0 nova_compute[356901]: 2025-10-11 02:54:29.764 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:54:29 compute-0 nova_compute[356901]: 2025-10-11 02:54:29.765 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:54:29 compute-0 nova_compute[356901]: 2025-10-11 02:54:29.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:54:29 compute-0 nova_compute[356901]: 2025-10-11 02:54:29.942 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:54:29 compute-0 nova_compute[356901]: 2025-10-11 02:54:29.943 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:54:29 compute-0 nova_compute[356901]: 2025-10-11 02:54:29.943 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:54:29 compute-0 nova_compute[356901]: 2025-10-11 02:54:29.944 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:54:29 compute-0 nova_compute[356901]: 2025-10-11 02:54:29.944 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:54:30 compute-0 sudo[477657]: pam_unix(sudo:session): session closed for user root
Oct 11 02:54:30 compute-0 nova_compute[356901]: 2025-10-11 02:54:30.430 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:54:30 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/834237351' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:54:30 compute-0 nova_compute[356901]: 2025-10-11 02:54:30.480 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.536s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:54:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:54:30 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:54:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:54:30 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:54:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:54:30 compute-0 nova_compute[356901]: 2025-10-11 02:54:30.617 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:30 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:54:30 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 2b8b0962-e91e-4a09-9028-04bb388919a1 does not exist
Oct 11 02:54:30 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev a1bb3542-548a-4102-ae65-2270d7ef47b4 does not exist
Oct 11 02:54:30 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 43cb4813-b0ce-48ca-a1e6-292b7f79bf2f does not exist
Oct 11 02:54:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:54:30 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:54:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:54:30 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:54:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:54:30 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:54:30 compute-0 sudo[477734]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:54:30 compute-0 sudo[477734]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:54:30 compute-0 sudo[477734]: pam_unix(sudo:session): session closed for user root
Oct 11 02:54:30 compute-0 nova_compute[356901]: 2025-10-11 02:54:30.882 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:54:30 compute-0 nova_compute[356901]: 2025-10-11 02:54:30.882 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:54:30 compute-0 nova_compute[356901]: 2025-10-11 02:54:30.889 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:54:30 compute-0 nova_compute[356901]: 2025-10-11 02:54:30.889 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:54:30 compute-0 nova_compute[356901]: 2025-10-11 02:54:30.899 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:54:30 compute-0 nova_compute[356901]: 2025-10-11 02:54:30.899 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:54:30 compute-0 nova_compute[356901]: 2025-10-11 02:54:30.900 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:54:30 compute-0 sudo[477759]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:54:30 compute-0 sudo[477759]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:54:30 compute-0 sudo[477759]: pam_unix(sudo:session): session closed for user root
Oct 11 02:54:31 compute-0 sudo[477784]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:54:31 compute-0 sudo[477784]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:54:31 compute-0 sudo[477784]: pam_unix(sudo:session): session closed for user root
Oct 11 02:54:31 compute-0 sudo[477809]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:54:31 compute-0 sudo[477809]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:54:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2308: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:31 compute-0 nova_compute[356901]: 2025-10-11 02:54:31.384 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:54:31 compute-0 nova_compute[356901]: 2025-10-11 02:54:31.386 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3211MB free_disk=59.86394500732422GB free_vcpus=5 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:54:31 compute-0 nova_compute[356901]: 2025-10-11 02:54:31.386 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:54:31 compute-0 nova_compute[356901]: 2025-10-11 02:54:31.386 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:54:31 compute-0 openstack_network_exporter[374316]: ERROR   02:54:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:54:31 compute-0 openstack_network_exporter[374316]: ERROR   02:54:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:54:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:54:31 compute-0 openstack_network_exporter[374316]: ERROR   02:54:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:54:31 compute-0 openstack_network_exporter[374316]: ERROR   02:54:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:54:31 compute-0 ceph-mon[191930]: pgmap v2307: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:31 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/834237351' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:54:31 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:54:31 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:54:31 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:54:31 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:54:31 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:54:31 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:54:31 compute-0 openstack_network_exporter[374316]: ERROR   02:54:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:54:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:54:31 compute-0 nova_compute[356901]: 2025-10-11 02:54:31.579 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:54:31 compute-0 nova_compute[356901]: 2025-10-11 02:54:31.580 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 8422017b-c868-4ba2-ab1f-61d3668ca145 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:54:31 compute-0 nova_compute[356901]: 2025-10-11 02:54:31.580 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance f98d09d7-6aa0-4405-bfa0-be1f78d3911f actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:54:31 compute-0 nova_compute[356901]: 2025-10-11 02:54:31.580 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 3 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:54:31 compute-0 nova_compute[356901]: 2025-10-11 02:54:31.581 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1280MB phys_disk=59GB used_disk=4GB total_vcpus=8 used_vcpus=3 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:54:31 compute-0 nova_compute[356901]: 2025-10-11 02:54:31.603 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing inventories for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:804
Oct 11 02:54:31 compute-0 nova_compute[356901]: 2025-10-11 02:54:31.621 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating ProviderTree inventory for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 from _refresh_and_get_inventory using data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} _refresh_and_get_inventory /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:768
Oct 11 02:54:31 compute-0 nova_compute[356901]: 2025-10-11 02:54:31.621 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating inventory in ProviderTree for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 with inventory: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:176
Oct 11 02:54:31 compute-0 nova_compute[356901]: 2025-10-11 02:54:31.642 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing aggregate associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, aggregates: None _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:813
Oct 11 02:54:31 compute-0 nova_compute[356901]: 2025-10-11 02:54:31.666 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing trait associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, traits: COMPUTE_VOLUME_EXTEND,COMPUTE_NET_VIF_MODEL_VMXNET3,HW_CPU_X86_SSSE3,COMPUTE_RESCUE_BFV,COMPUTE_SOCKET_PCI_NUMA_AFFINITY,COMPUTE_NODE,HW_CPU_X86_SVM,COMPUTE_STORAGE_BUS_SCSI,HW_CPU_X86_FMA3,COMPUTE_GRAPHICS_MODEL_NONE,COMPUTE_NET_VIF_MODEL_RTL8139,HW_CPU_X86_SSE4A,COMPUTE_IMAGE_TYPE_QCOW2,HW_CPU_X86_BMI2,HW_CPU_X86_SSE42,HW_CPU_X86_AVX2,COMPUTE_IMAGE_TYPE_RAW,COMPUTE_VIOMMU_MODEL_VIRTIO,HW_CPU_X86_AESNI,COMPUTE_STORAGE_BUS_FDC,COMPUTE_GRAPHICS_MODEL_VIRTIO,HW_CPU_X86_AMD_SVM,COMPUTE_NET_VIF_MODEL_NE2K_PCI,COMPUTE_ACCELERATORS,HW_CPU_X86_SSE2,COMPUTE_GRAPHICS_MODEL_VGA,HW_CPU_X86_ABM,HW_CPU_X86_AVX,COMPUTE_NET_VIF_MODEL_E1000,COMPUTE_STORAGE_BUS_USB,COMPUTE_NET_ATTACH_INTERFACE,HW_CPU_X86_MMX,COMPUTE_SECURITY_TPM_2_0,COMPUTE_IMAGE_TYPE_ISO,HW_CPU_X86_SSE41,COMPUTE_IMAGE_TYPE_AKI,COMPUTE_IMAGE_TYPE_AMI,COMPUTE_NET_ATTACH_INTERFACE_WITH_TAG,COMPUTE_DEVICE_TAGGING,COMPUTE_SECURITY_UEFI_SECURE_BOOT,COMPUTE_TRUSTED_CERTS,COMPUTE_NET_VIF_MODEL_VIRTIO,COMPUTE_VIOMMU_MODEL_INTEL,COMPUTE_STORAGE_BUS_SATA,HW_CPU_X86_SSE,COMPUTE_STORAGE_BUS_VIRTIO,COMPUTE_NET_VIF_MODEL_PCNET,COMPUTE_GRAPHICS_MODEL_CIRRUS,HW_CPU_X86_SHA,HW_CPU_X86_BMI,COMPUTE_NET_VIF_MODEL_E1000E,COMPUTE_NET_VIF_MODEL_SPAPR_VLAN,COMPUTE_VOLUME_ATTACH_WITH_TAG,COMPUTE_GRAPHICS_MODEL_BOCHS,COMPUTE_VIOMMU_MODEL_AUTO,COMPUTE_IMAGE_TYPE_ARI,HW_CPU_X86_CLMUL,COMPUTE_STORAGE_BUS_IDE,COMPUTE_VOLUME_MULTI_ATTACH,HW_CPU_X86_F16C,COMPUTE_SECURITY_TPM_1_2 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:825
Oct 11 02:54:31 compute-0 podman[477873]: 2025-10-11 02:54:31.600901465 +0000 UTC m=+0.040586509 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:54:31 compute-0 nova_compute[356901]: 2025-10-11 02:54:31.735 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:54:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:54:32 compute-0 podman[477873]: 2025-10-11 02:54:32.038714104 +0000 UTC m=+0.478399168 container create 14cb85a0a3600c50cb031ff8b712cf34d4b565551b7f2bfbd8ec470b2ef5a2fb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=reverent_kirch, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:54:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:54:32 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2926335684' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:54:32 compute-0 nova_compute[356901]: 2025-10-11 02:54:32.229 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.494s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:54:32 compute-0 nova_compute[356901]: 2025-10-11 02:54:32.246 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:54:32 compute-0 systemd[1]: Started libpod-conmon-14cb85a0a3600c50cb031ff8b712cf34d4b565551b7f2bfbd8ec470b2ef5a2fb.scope.
Oct 11 02:54:32 compute-0 nova_compute[356901]: 2025-10-11 02:54:32.283 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:54:32 compute-0 nova_compute[356901]: 2025-10-11 02:54:32.285 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:54:32 compute-0 nova_compute[356901]: 2025-10-11 02:54:32.286 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.900s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:54:32 compute-0 podman[477908]: 2025-10-11 02:54:32.293802329 +0000 UTC m=+0.180893086 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 02:54:32 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:54:32 compute-0 podman[477907]: 2025-10-11 02:54:32.309872076 +0000 UTC m=+0.201054557 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, build-date=2025-08-20T13:12:41, io.openshift.tags=minimal rhel9, managed_by=edpm_ansible, io.openshift.expose-services=, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, config_id=edpm, name=ubi9-minimal, com.redhat.component=ubi9-minimal-container, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=openstack_network_exporter, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.buildah.version=1.33.7, release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, version=9.6, architecture=x86_64, vendor=Red Hat, Inc., vcs-type=git, maintainer=Red Hat, Inc.)
Oct 11 02:54:32 compute-0 podman[477873]: 2025-10-11 02:54:32.854922692 +0000 UTC m=+1.294607716 container init 14cb85a0a3600c50cb031ff8b712cf34d4b565551b7f2bfbd8ec470b2ef5a2fb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=reverent_kirch, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:54:32 compute-0 podman[477873]: 2025-10-11 02:54:32.871443396 +0000 UTC m=+1.311128420 container start 14cb85a0a3600c50cb031ff8b712cf34d4b565551b7f2bfbd8ec470b2ef5a2fb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=reverent_kirch, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:54:32 compute-0 reverent_kirch[477948]: 167 167
Oct 11 02:54:32 compute-0 systemd[1]: libpod-14cb85a0a3600c50cb031ff8b712cf34d4b565551b7f2bfbd8ec470b2ef5a2fb.scope: Deactivated successfully.
Oct 11 02:54:33 compute-0 podman[477873]: 2025-10-11 02:54:33.102739484 +0000 UTC m=+1.542424538 container attach 14cb85a0a3600c50cb031ff8b712cf34d4b565551b7f2bfbd8ec470b2ef5a2fb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=reverent_kirch, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:54:33 compute-0 podman[477873]: 2025-10-11 02:54:33.104421574 +0000 UTC m=+1.544106598 container died 14cb85a0a3600c50cb031ff8b712cf34d4b565551b7f2bfbd8ec470b2ef5a2fb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=reverent_kirch, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:54:33 compute-0 ceph-mon[191930]: pgmap v2308: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:33 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2926335684' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:54:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2309: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:33 compute-0 systemd[1]: var-lib-containers-storage-overlay-bc6b5562a450c3b09cf01b2d0ac299373e1af3940be68d9389ef4375c2e30dd7-merged.mount: Deactivated successfully.
Oct 11 02:54:33 compute-0 podman[477873]: 2025-10-11 02:54:33.685017888 +0000 UTC m=+2.124702922 container remove 14cb85a0a3600c50cb031ff8b712cf34d4b565551b7f2bfbd8ec470b2ef5a2fb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=reverent_kirch, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2)
Oct 11 02:54:33 compute-0 systemd[1]: libpod-conmon-14cb85a0a3600c50cb031ff8b712cf34d4b565551b7f2bfbd8ec470b2ef5a2fb.scope: Deactivated successfully.
Oct 11 02:54:33 compute-0 podman[477906]: 2025-10-11 02:54:33.753969255 +0000 UTC m=+1.651191556 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.license=GPLv2, config_id=edpm, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS)
Oct 11 02:54:33 compute-0 podman[477994]: 2025-10-11 02:54:33.906795899 +0000 UTC m=+0.059545088 container create ecc0e727e5cfb84de0c05d057d26f9c2773e2d47e134c096558743298abda046 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=kind_dijkstra, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:54:33 compute-0 systemd[1]: Started libpod-conmon-ecc0e727e5cfb84de0c05d057d26f9c2773e2d47e134c096558743298abda046.scope.
Oct 11 02:54:33 compute-0 podman[477994]: 2025-10-11 02:54:33.886101686 +0000 UTC m=+0.038850895 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:54:34 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:54:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/804eed0e9786a61af470e71c7ab3d5e002a173f0f917df7f19aea768c5d9ab1f/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:54:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/804eed0e9786a61af470e71c7ab3d5e002a173f0f917df7f19aea768c5d9ab1f/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:54:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/804eed0e9786a61af470e71c7ab3d5e002a173f0f917df7f19aea768c5d9ab1f/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:54:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/804eed0e9786a61af470e71c7ab3d5e002a173f0f917df7f19aea768c5d9ab1f/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:54:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/804eed0e9786a61af470e71c7ab3d5e002a173f0f917df7f19aea768c5d9ab1f/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:54:34 compute-0 podman[477994]: 2025-10-11 02:54:34.03322759 +0000 UTC m=+0.185976819 container init ecc0e727e5cfb84de0c05d057d26f9c2773e2d47e134c096558743298abda046 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=kind_dijkstra, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:54:34 compute-0 podman[477994]: 2025-10-11 02:54:34.050130207 +0000 UTC m=+0.202879396 container start ecc0e727e5cfb84de0c05d057d26f9c2773e2d47e134c096558743298abda046 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=kind_dijkstra, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:54:34 compute-0 podman[477994]: 2025-10-11 02:54:34.056915871 +0000 UTC m=+0.209665100 container attach ecc0e727e5cfb84de0c05d057d26f9c2773e2d47e134c096558743298abda046 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=kind_dijkstra, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 02:54:34 compute-0 ceph-mon[191930]: pgmap v2309: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:35 compute-0 kind_dijkstra[478010]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:54:35 compute-0 kind_dijkstra[478010]: --> relative data size: 1.0
Oct 11 02:54:35 compute-0 kind_dijkstra[478010]: --> All data devices are unavailable
Oct 11 02:54:35 compute-0 systemd[1]: libpod-ecc0e727e5cfb84de0c05d057d26f9c2773e2d47e134c096558743298abda046.scope: Deactivated successfully.
Oct 11 02:54:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2310: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:35 compute-0 systemd[1]: libpod-ecc0e727e5cfb84de0c05d057d26f9c2773e2d47e134c096558743298abda046.scope: Consumed 1.161s CPU time.
Oct 11 02:54:35 compute-0 podman[478039]: 2025-10-11 02:54:35.39168847 +0000 UTC m=+0.051692260 container died ecc0e727e5cfb84de0c05d057d26f9c2773e2d47e134c096558743298abda046 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=kind_dijkstra, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0)
Oct 11 02:54:35 compute-0 systemd[1]: var-lib-containers-storage-overlay-804eed0e9786a61af470e71c7ab3d5e002a173f0f917df7f19aea768c5d9ab1f-merged.mount: Deactivated successfully.
Oct 11 02:54:35 compute-0 nova_compute[356901]: 2025-10-11 02:54:35.435 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:35 compute-0 podman[478039]: 2025-10-11 02:54:35.483043691 +0000 UTC m=+0.143047441 container remove ecc0e727e5cfb84de0c05d057d26f9c2773e2d47e134c096558743298abda046 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=kind_dijkstra, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True)
Oct 11 02:54:35 compute-0 systemd[1]: libpod-conmon-ecc0e727e5cfb84de0c05d057d26f9c2773e2d47e134c096558743298abda046.scope: Deactivated successfully.
Oct 11 02:54:35 compute-0 sudo[477809]: pam_unix(sudo:session): session closed for user root
Oct 11 02:54:35 compute-0 nova_compute[356901]: 2025-10-11 02:54:35.620 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:35 compute-0 sudo[478052]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:54:35 compute-0 sudo[478052]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:54:35 compute-0 sudo[478052]: pam_unix(sudo:session): session closed for user root
Oct 11 02:54:35 compute-0 sudo[478077]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:54:35 compute-0 sudo[478077]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:54:35 compute-0 sudo[478077]: pam_unix(sudo:session): session closed for user root
Oct 11 02:54:35 compute-0 sudo[478102]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:54:35 compute-0 sudo[478102]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:54:35 compute-0 sudo[478102]: pam_unix(sudo:session): session closed for user root
Oct 11 02:54:35 compute-0 sudo[478127]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:54:35 compute-0 sudo[478127]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:54:36 compute-0 ceph-mon[191930]: pgmap v2310: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:36 compute-0 podman[478190]: 2025-10-11 02:54:36.557927458 +0000 UTC m=+0.078300555 container create 5e5b34b21a4447857f205213cdd47843c3c9d51a8e7758185cb7507a123279c1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_easley, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507)
Oct 11 02:54:36 compute-0 systemd[1]: Started libpod-conmon-5e5b34b21a4447857f205213cdd47843c3c9d51a8e7758185cb7507a123279c1.scope.
Oct 11 02:54:36 compute-0 podman[478190]: 2025-10-11 02:54:36.530105671 +0000 UTC m=+0.050478758 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:54:36 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:54:36 compute-0 podman[478190]: 2025-10-11 02:54:36.678889333 +0000 UTC m=+0.199262440 container init 5e5b34b21a4447857f205213cdd47843c3c9d51a8e7758185cb7507a123279c1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_easley, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, OSD_FLAVOR=default)
Oct 11 02:54:36 compute-0 podman[478190]: 2025-10-11 02:54:36.689307494 +0000 UTC m=+0.209680561 container start 5e5b34b21a4447857f205213cdd47843c3c9d51a8e7758185cb7507a123279c1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_easley, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:54:36 compute-0 podman[478190]: 2025-10-11 02:54:36.693630741 +0000 UTC m=+0.214003828 container attach 5e5b34b21a4447857f205213cdd47843c3c9d51a8e7758185cb7507a123279c1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_easley, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True)
Oct 11 02:54:36 compute-0 unruffled_easley[478206]: 167 167
Oct 11 02:54:36 compute-0 systemd[1]: libpod-5e5b34b21a4447857f205213cdd47843c3c9d51a8e7758185cb7507a123279c1.scope: Deactivated successfully.
Oct 11 02:54:36 compute-0 podman[478190]: 2025-10-11 02:54:36.697105338 +0000 UTC m=+0.217478435 container died 5e5b34b21a4447857f205213cdd47843c3c9d51a8e7758185cb7507a123279c1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_easley, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, OSD_FLAVOR=default, CEPH_REF=reef, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507)
Oct 11 02:54:36 compute-0 systemd[1]: var-lib-containers-storage-overlay-63c7cd1a8c72a5d650b997c4893a8c9729be470b3f0fbae322c205ad96a14a82-merged.mount: Deactivated successfully.
Oct 11 02:54:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #111. Immutable memtables: 0.
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:54:36.758997) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 65] Flushing memtable with next log file: 111
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151276759023, "job": 65, "event": "flush_started", "num_memtables": 1, "num_entries": 1247, "num_deletes": 256, "total_data_size": 1896407, "memory_usage": 1921880, "flush_reason": "Manual Compaction"}
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 65] Level-0 flush table #112: started
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151276772080, "cf_name": "default", "job": 65, "event": "table_file_creation", "file_number": 112, "file_size": 1878693, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 46300, "largest_seqno": 47546, "table_properties": {"data_size": 1872662, "index_size": 3362, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1605, "raw_key_size": 12394, "raw_average_key_size": 19, "raw_value_size": 1860578, "raw_average_value_size": 2925, "num_data_blocks": 151, "num_entries": 636, "num_filter_entries": 636, "num_deletions": 256, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760151147, "oldest_key_time": 1760151147, "file_creation_time": 1760151276, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 112, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 65] Flush lasted 13123 microseconds, and 4590 cpu microseconds.
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:54:36.772122) [db/flush_job.cc:967] [default] [JOB 65] Level-0 flush table #112: 1878693 bytes OK
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:54:36.772137) [db/memtable_list.cc:519] [default] Level-0 commit table #112 started
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:54:36.773737) [db/memtable_list.cc:722] [default] Level-0 commit table #112: memtable #1 done
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:54:36.773751) EVENT_LOG_v1 {"time_micros": 1760151276773746, "job": 65, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:54:36.773766) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 65] Try to delete WAL files size 1890734, prev total WAL file size 1890734, number of live WAL files 2.
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000108.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:54:36.774504) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '6C6F676D0031373633' seq:72057594037927935, type:22 .. '6C6F676D0032303135' seq:0, type:0; will stop at (end)
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 66] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 65 Base level 0, inputs: [112(1834KB)], [110(7638KB)]
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151276774548, "job": 66, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [112], "files_L6": [110], "score": -1, "input_data_size": 9700411, "oldest_snapshot_seqno": -1}
Oct 11 02:54:36 compute-0 podman[478190]: 2025-10-11 02:54:36.779885689 +0000 UTC m=+0.300258756 container remove 5e5b34b21a4447857f205213cdd47843c3c9d51a8e7758185cb7507a123279c1 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=unruffled_easley, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3)
Oct 11 02:54:36 compute-0 systemd[1]: libpod-conmon-5e5b34b21a4447857f205213cdd47843c3c9d51a8e7758185cb7507a123279c1.scope: Deactivated successfully.
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 66] Generated table #113: 6189 keys, 9593688 bytes, temperature: kUnknown
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151276829329, "cf_name": "default", "job": 66, "event": "table_file_creation", "file_number": 113, "file_size": 9593688, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 9552705, "index_size": 24425, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 15493, "raw_key_size": 161383, "raw_average_key_size": 26, "raw_value_size": 9441089, "raw_average_value_size": 1525, "num_data_blocks": 977, "num_entries": 6189, "num_filter_entries": 6189, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760151276, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 113, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:54:36.829747) [db/compaction/compaction_job.cc:1663] [default] [JOB 66] Compacted 1@0 + 1@6 files to L6 => 9593688 bytes
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:54:36.832486) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 176.4 rd, 174.4 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(1.8, 7.5 +0.0 blob) out(9.1 +0.0 blob), read-write-amplify(10.3) write-amplify(5.1) OK, records in: 6713, records dropped: 524 output_compression: NoCompression
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:54:36.832504) EVENT_LOG_v1 {"time_micros": 1760151276832495, "job": 66, "event": "compaction_finished", "compaction_time_micros": 54998, "compaction_time_cpu_micros": 23953, "output_level": 6, "num_output_files": 1, "total_output_size": 9593688, "num_input_records": 6713, "num_output_records": 6189, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000112.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151276833078, "job": 66, "event": "table_file_deletion", "file_number": 112}
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000110.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151276834683, "job": 66, "event": "table_file_deletion", "file_number": 110}
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:54:36.774394) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:54:36.834812) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:54:36.834819) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:54:36.834821) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:54:36.834822) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:54:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:54:36.834824) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:54:37 compute-0 podman[478230]: 2025-10-11 02:54:37.039009295 +0000 UTC m=+0.063797932 container create 2c64cce88f493eca3d0c7ed4c75822d76f5888b9edb60b4957242ad0ad5f6eee (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_euclid, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507)
Oct 11 02:54:37 compute-0 systemd[1]: Started libpod-conmon-2c64cce88f493eca3d0c7ed4c75822d76f5888b9edb60b4957242ad0ad5f6eee.scope.
Oct 11 02:54:37 compute-0 podman[478230]: 2025-10-11 02:54:37.01509085 +0000 UTC m=+0.039879527 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:54:37 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:54:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9ed46e0b2989362175002b68c95f8d996d40da95f69873a927c77946a270010d/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:54:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9ed46e0b2989362175002b68c95f8d996d40da95f69873a927c77946a270010d/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:54:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9ed46e0b2989362175002b68c95f8d996d40da95f69873a927c77946a270010d/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:54:37 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/9ed46e0b2989362175002b68c95f8d996d40da95f69873a927c77946a270010d/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:54:37 compute-0 podman[478230]: 2025-10-11 02:54:37.166095065 +0000 UTC m=+0.190883712 container init 2c64cce88f493eca3d0c7ed4c75822d76f5888b9edb60b4957242ad0ad5f6eee (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_euclid, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 02:54:37 compute-0 podman[478230]: 2025-10-11 02:54:37.175734049 +0000 UTC m=+0.200522696 container start 2c64cce88f493eca3d0c7ed4c75822d76f5888b9edb60b4957242ad0ad5f6eee (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_euclid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:54:37 compute-0 podman[478230]: 2025-10-11 02:54:37.182058396 +0000 UTC m=+0.206847073 container attach 2c64cce88f493eca3d0c7ed4c75822d76f5888b9edb60b4957242ad0ad5f6eee (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_euclid, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 02:54:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2311: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]: {
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:     "0": [
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:         {
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "devices": [
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "/dev/loop3"
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             ],
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "lv_name": "ceph_lv0",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "lv_size": "21470642176",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "name": "ceph_lv0",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "tags": {
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.cluster_name": "ceph",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.crush_device_class": "",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.encrypted": "0",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.osd_id": "0",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.type": "block",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.vdo": "0"
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             },
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "type": "block",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "vg_name": "ceph_vg0"
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:         }
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:     ],
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:     "1": [
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:         {
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "devices": [
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "/dev/loop4"
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             ],
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "lv_name": "ceph_lv1",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "lv_size": "21470642176",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "name": "ceph_lv1",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "tags": {
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.cluster_name": "ceph",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.crush_device_class": "",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.encrypted": "0",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.osd_id": "1",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.type": "block",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.vdo": "0"
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             },
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "type": "block",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "vg_name": "ceph_vg1"
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:         }
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:     ],
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:     "2": [
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:         {
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "devices": [
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "/dev/loop5"
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             ],
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "lv_name": "ceph_lv2",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "lv_size": "21470642176",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "name": "ceph_lv2",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "tags": {
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.cluster_name": "ceph",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.crush_device_class": "",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.encrypted": "0",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.osd_id": "2",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.type": "block",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:                 "ceph.vdo": "0"
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             },
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "type": "block",
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:             "vg_name": "ceph_vg2"
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:         }
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]:     ]
Oct 11 02:54:37 compute-0 compassionate_euclid[478245]: }
Oct 11 02:54:37 compute-0 systemd[1]: libpod-2c64cce88f493eca3d0c7ed4c75822d76f5888b9edb60b4957242ad0ad5f6eee.scope: Deactivated successfully.
Oct 11 02:54:38 compute-0 podman[478254]: 2025-10-11 02:54:38.053398659 +0000 UTC m=+0.036320765 container died 2c64cce88f493eca3d0c7ed4c75822d76f5888b9edb60b4957242ad0ad5f6eee (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_euclid, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:54:38 compute-0 systemd[1]: var-lib-containers-storage-overlay-9ed46e0b2989362175002b68c95f8d996d40da95f69873a927c77946a270010d-merged.mount: Deactivated successfully.
Oct 11 02:54:38 compute-0 podman[478254]: 2025-10-11 02:54:38.136963107 +0000 UTC m=+0.119885193 container remove 2c64cce88f493eca3d0c7ed4c75822d76f5888b9edb60b4957242ad0ad5f6eee (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_euclid, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2)
Oct 11 02:54:38 compute-0 systemd[1]: libpod-conmon-2c64cce88f493eca3d0c7ed4c75822d76f5888b9edb60b4957242ad0ad5f6eee.scope: Deactivated successfully.
Oct 11 02:54:38 compute-0 sudo[478127]: pam_unix(sudo:session): session closed for user root
Oct 11 02:54:38 compute-0 sudo[478269]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:54:38 compute-0 sudo[478269]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:54:38 compute-0 sudo[478269]: pam_unix(sudo:session): session closed for user root
Oct 11 02:54:38 compute-0 sudo[478294]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:54:38 compute-0 sudo[478294]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:54:38 compute-0 sudo[478294]: pam_unix(sudo:session): session closed for user root
Oct 11 02:54:38 compute-0 sudo[478319]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:54:38 compute-0 sudo[478319]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:54:38 compute-0 sudo[478319]: pam_unix(sudo:session): session closed for user root
Oct 11 02:54:38 compute-0 sudo[478344]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:54:38 compute-0 sudo[478344]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:54:38 compute-0 ceph-mon[191930]: pgmap v2311: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:39 compute-0 podman[478406]: 2025-10-11 02:54:39.12079341 +0000 UTC m=+0.074467536 container create 64845b254fee56cb86ac20c6ca4d6fdaa1de89fe866b06a29c6783c534c1d5c5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_jang, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default)
Oct 11 02:54:39 compute-0 podman[478406]: 2025-10-11 02:54:39.084981597 +0000 UTC m=+0.038655723 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:54:39 compute-0 systemd[1]: Started libpod-conmon-64845b254fee56cb86ac20c6ca4d6fdaa1de89fe866b06a29c6783c534c1d5c5.scope.
Oct 11 02:54:39 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:54:39 compute-0 podman[478406]: 2025-10-11 02:54:39.264984909 +0000 UTC m=+0.218659035 container init 64845b254fee56cb86ac20c6ca4d6fdaa1de89fe866b06a29c6783c534c1d5c5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_jang, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, ceph=True, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:54:39 compute-0 podman[478406]: 2025-10-11 02:54:39.285072646 +0000 UTC m=+0.238746772 container start 64845b254fee56cb86ac20c6ca4d6fdaa1de89fe866b06a29c6783c534c1d5c5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_jang, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2)
Oct 11 02:54:39 compute-0 podman[478406]: 2025-10-11 02:54:39.292454736 +0000 UTC m=+0.246128912 container attach 64845b254fee56cb86ac20c6ca4d6fdaa1de89fe866b06a29c6783c534c1d5c5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_jang, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:54:39 compute-0 lucid_jang[478422]: 167 167
Oct 11 02:54:39 compute-0 systemd[1]: libpod-64845b254fee56cb86ac20c6ca4d6fdaa1de89fe866b06a29c6783c534c1d5c5.scope: Deactivated successfully.
Oct 11 02:54:39 compute-0 podman[478406]: 2025-10-11 02:54:39.298552909 +0000 UTC m=+0.252227035 container died 64845b254fee56cb86ac20c6ca4d6fdaa1de89fe866b06a29c6783c534c1d5c5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_jang, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:54:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2312: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:39 compute-0 systemd[1]: var-lib-containers-storage-overlay-f0c5f3de9ad17ea3bf1c38376b206498af486f66e5ee0109167d050b72c7b3df-merged.mount: Deactivated successfully.
Oct 11 02:54:39 compute-0 podman[478406]: 2025-10-11 02:54:39.376659872 +0000 UTC m=+0.330333998 container remove 64845b254fee56cb86ac20c6ca4d6fdaa1de89fe866b06a29c6783c534c1d5c5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_jang, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:54:39 compute-0 systemd[1]: libpod-conmon-64845b254fee56cb86ac20c6ca4d6fdaa1de89fe866b06a29c6783c534c1d5c5.scope: Deactivated successfully.
Oct 11 02:54:39 compute-0 podman[478445]: 2025-10-11 02:54:39.650378646 +0000 UTC m=+0.085413359 container create a1cbe0a0acb99f2fb3e2cb12dffdbd8fde07f299f8bc5a3a05d432cd6c36fa0c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_babbage, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:54:39 compute-0 podman[478445]: 2025-10-11 02:54:39.614120986 +0000 UTC m=+0.049155699 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:54:39 compute-0 systemd[1]: Started libpod-conmon-a1cbe0a0acb99f2fb3e2cb12dffdbd8fde07f299f8bc5a3a05d432cd6c36fa0c.scope.
Oct 11 02:54:39 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:54:39 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/21e1858c54e7ff1fb0b4d50edd78e7652412376caa2cffb45a108d781d7203cb/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:54:39 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/21e1858c54e7ff1fb0b4d50edd78e7652412376caa2cffb45a108d781d7203cb/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:54:39 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/21e1858c54e7ff1fb0b4d50edd78e7652412376caa2cffb45a108d781d7203cb/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:54:39 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/21e1858c54e7ff1fb0b4d50edd78e7652412376caa2cffb45a108d781d7203cb/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:54:39 compute-0 podman[478445]: 2025-10-11 02:54:39.791602368 +0000 UTC m=+0.226637081 container init a1cbe0a0acb99f2fb3e2cb12dffdbd8fde07f299f8bc5a3a05d432cd6c36fa0c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_babbage, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3)
Oct 11 02:54:39 compute-0 podman[478445]: 2025-10-11 02:54:39.808668275 +0000 UTC m=+0.243702948 container start a1cbe0a0acb99f2fb3e2cb12dffdbd8fde07f299f8bc5a3a05d432cd6c36fa0c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_babbage, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS)
Oct 11 02:54:39 compute-0 podman[478445]: 2025-10-11 02:54:39.813996402 +0000 UTC m=+0.249031115 container attach a1cbe0a0acb99f2fb3e2cb12dffdbd8fde07f299f8bc5a3a05d432cd6c36fa0c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_babbage, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:54:40 compute-0 nova_compute[356901]: 2025-10-11 02:54:40.443 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:40 compute-0 nova_compute[356901]: 2025-10-11 02:54:40.623 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:40 compute-0 ceph-mon[191930]: pgmap v2312: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:40 compute-0 sharp_babbage[478461]: {
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:         "osd_id": 1,
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:         "type": "bluestore"
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:     },
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:         "osd_id": 2,
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:         "type": "bluestore"
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:     },
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:         "osd_id": 0,
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:         "type": "bluestore"
Oct 11 02:54:40 compute-0 sharp_babbage[478461]:     }
Oct 11 02:54:40 compute-0 sharp_babbage[478461]: }
Oct 11 02:54:40 compute-0 systemd[1]: libpod-a1cbe0a0acb99f2fb3e2cb12dffdbd8fde07f299f8bc5a3a05d432cd6c36fa0c.scope: Deactivated successfully.
Oct 11 02:54:40 compute-0 systemd[1]: libpod-a1cbe0a0acb99f2fb3e2cb12dffdbd8fde07f299f8bc5a3a05d432cd6c36fa0c.scope: Consumed 1.166s CPU time.
Oct 11 02:54:41 compute-0 podman[478495]: 2025-10-11 02:54:41.098808524 +0000 UTC m=+0.066801620 container died a1cbe0a0acb99f2fb3e2cb12dffdbd8fde07f299f8bc5a3a05d432cd6c36fa0c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_babbage, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.license=GPLv2, ceph=True)
Oct 11 02:54:41 compute-0 systemd[1]: var-lib-containers-storage-overlay-21e1858c54e7ff1fb0b4d50edd78e7652412376caa2cffb45a108d781d7203cb-merged.mount: Deactivated successfully.
Oct 11 02:54:41 compute-0 podman[478494]: 2025-10-11 02:54:41.18100369 +0000 UTC m=+0.147513688 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, name=ubi9, build-date=2024-09-18T21:23:30, com.redhat.component=ubi9-container, config_id=edpm, managed_by=edpm_ansible, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 9, architecture=x86_64, vendor=Red Hat, Inc., version=9.4, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=1214.1726694543, maintainer=Red Hat, Inc., container_name=kepler, release-0.7.12=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, distribution-scope=public, io.buildah.version=1.29.0, io.openshift.expose-services=, io.openshift.tags=base rhel9, summary=Provides the latest release of Red Hat Universal Base Image 9., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543)
Oct 11 02:54:41 compute-0 podman[478495]: 2025-10-11 02:54:41.204523041 +0000 UTC m=+0.172516037 container remove a1cbe0a0acb99f2fb3e2cb12dffdbd8fde07f299f8bc5a3a05d432cd6c36fa0c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_babbage, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:54:41 compute-0 systemd[1]: libpod-conmon-a1cbe0a0acb99f2fb3e2cb12dffdbd8fde07f299f8bc5a3a05d432cd6c36fa0c.scope: Deactivated successfully.
Oct 11 02:54:41 compute-0 sudo[478344]: pam_unix(sudo:session): session closed for user root
Oct 11 02:54:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:54:41 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:54:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:54:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2313: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:41 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:54:41 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 36ef63f1-ddb7-403d-86cd-f3614870834e does not exist
Oct 11 02:54:41 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev a2073e84-88e0-44f5-b962-65db16426039 does not exist
Oct 11 02:54:41 compute-0 sudo[478527]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:54:41 compute-0 sudo[478527]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:54:41 compute-0 sudo[478527]: pam_unix(sudo:session): session closed for user root
Oct 11 02:54:41 compute-0 sudo[478552]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:54:41 compute-0 sudo[478552]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:54:41 compute-0 sudo[478552]: pam_unix(sudo:session): session closed for user root
Oct 11 02:54:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:54:42 compute-0 nova_compute[356901]: 2025-10-11 02:54:42.282 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:54:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:54:42 compute-0 ceph-mon[191930]: pgmap v2313: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:54:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2314: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:44 compute-0 ceph-mon[191930]: pgmap v2314: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2315: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:45 compute-0 nova_compute[356901]: 2025-10-11 02:54:45.450 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:45 compute-0 nova_compute[356901]: 2025-10-11 02:54:45.625 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:46 compute-0 podman[478580]: 2025-10-11 02:54:46.225476963 +0000 UTC m=+0.088863775 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20251009, config_id=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, tcib_managed=true)
Oct 11 02:54:46 compute-0 podman[478577]: 2025-10-11 02:54:46.229722475 +0000 UTC m=+0.111650231 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:54:46 compute-0 podman[478579]: 2025-10-11 02:54:46.248422489 +0000 UTC m=+0.109535505 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, tcib_managed=true, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:54:46 compute-0 podman[478578]: 2025-10-11 02:54:46.272501514 +0000 UTC m=+0.151353177 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=ovn_controller, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:54:46 compute-0 ceph-mon[191930]: pgmap v2315: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:54:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2316: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:48 compute-0 ceph-mon[191930]: pgmap v2316: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2317: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:50 compute-0 ceph-mon[191930]: pgmap v2317: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:50 compute-0 nova_compute[356901]: 2025-10-11 02:54:50.457 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:50 compute-0 nova_compute[356901]: 2025-10-11 02:54:50.629 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2318: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:54:52 compute-0 ceph-mon[191930]: pgmap v2318: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:54:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2319: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 2.7 KiB/s rd, 0 B/s wr, 4 op/s
Oct 11 02:54:54 compute-0 podman[478661]: 2025-10-11 02:54:54.230148276 +0000 UTC m=+0.105703235 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=iscsid, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, container_name=iscsid, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']})
Oct 11 02:54:54 compute-0 podman[478660]: 2025-10-11 02:54:54.250890993 +0000 UTC m=+0.128947039 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3)
Oct 11 02:54:54 compute-0 ceph-mon[191930]: pgmap v2319: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 2.7 KiB/s rd, 0 B/s wr, 4 op/s
Oct 11 02:54:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:54:54.889 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:54:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:54:54.889 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:54:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:54:54.890 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:54:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2320: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.2 KiB/s rd, 0 B/s wr, 15 op/s
Oct 11 02:54:55 compute-0 nova_compute[356901]: 2025-10-11 02:54:55.462 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:55 compute-0 nova_compute[356901]: 2025-10-11 02:54:55.632 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:54:56 compute-0 ceph-mon[191930]: pgmap v2320: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 9.2 KiB/s rd, 0 B/s wr, 15 op/s
Oct 11 02:54:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:54:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:54:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:54:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:54:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:54:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:54:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:54:56
Oct 11 02:54:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:54:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:54:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.log', 'volumes', '.rgw.root', 'cephfs.cephfs.meta', 'vms', 'images', 'default.rgw.meta', 'default.rgw.control', 'backups', '.mgr', 'cephfs.cephfs.data']
Oct 11 02:54:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:54:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:54:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2321: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 20 KiB/s rd, 0 B/s wr, 34 op/s
Oct 11 02:54:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:54:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:54:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:54:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:54:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:54:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:54:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:54:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:54:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:54:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:54:58 compute-0 ceph-mon[191930]: pgmap v2321: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 20 KiB/s rd, 0 B/s wr, 34 op/s
Oct 11 02:54:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2322: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:54:59 compute-0 podman[157119]: time="2025-10-11T02:54:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:54:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:54:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:54:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:54:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9554 "" "Go-http-client/1.1"
Oct 11 02:55:00 compute-0 nova_compute[356901]: 2025-10-11 02:55:00.469 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:00 compute-0 ceph-mon[191930]: pgmap v2322: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:55:00 compute-0 nova_compute[356901]: 2025-10-11 02:55:00.635 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2323: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:55:01 compute-0 openstack_network_exporter[374316]: ERROR   02:55:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:55:01 compute-0 openstack_network_exporter[374316]: ERROR   02:55:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:55:01 compute-0 openstack_network_exporter[374316]: ERROR   02:55:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:55:01 compute-0 openstack_network_exporter[374316]: ERROR   02:55:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:55:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:55:01 compute-0 openstack_network_exporter[374316]: ERROR   02:55:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:55:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:55:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:55:02 compute-0 ceph-mon[191930]: pgmap v2323: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:55:03 compute-0 podman[478698]: 2025-10-11 02:55:03.25244319 +0000 UTC m=+0.136248458 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.component=ubi9-minimal-container, config_id=edpm, container_name=openstack_network_exporter, version=9.6, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.tags=minimal rhel9, release=1755695350, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, architecture=x86_64, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://catalog.redhat.com/en/search?searchType=containers, maintainer=Red Hat, Inc., distribution-scope=public, managed_by=edpm_ansible, vcs-type=git, io.openshift.expose-services=, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, build-date=2025-08-20T13:12:41, io.buildah.version=1.33.7, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9.)
Oct 11 02:55:03 compute-0 podman[478699]: 2025-10-11 02:55:03.297665373 +0000 UTC m=+0.173598425 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:55:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2324: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:55:04 compute-0 podman[478739]: 2025-10-11 02:55:04.239831603 +0000 UTC m=+0.123224028 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, config_id=edpm, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:55:04 compute-0 ceph-mon[191930]: pgmap v2324: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 02:55:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2325: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 33 KiB/s rd, 0 B/s wr, 54 op/s
Oct 11 02:55:05 compute-0 nova_compute[356901]: 2025-10-11 02:55:05.474 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:05 compute-0 nova_compute[356901]: 2025-10-11 02:55:05.640 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:06 compute-0 ceph-mon[191930]: pgmap v2325: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 33 KiB/s rd, 0 B/s wr, 54 op/s
Oct 11 02:55:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2326: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 26 KiB/s rd, 0 B/s wr, 44 op/s
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.002072675530702611 of space, bias 1.0, pg target 0.6218026592107834 quantized to 32 (current 32)
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00125203744627857 of space, bias 1.0, pg target 0.375611233883571 quantized to 32 (current 32)
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:55:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:55:08 compute-0 ceph-mon[191930]: pgmap v2326: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 26 KiB/s rd, 0 B/s wr, 44 op/s
Oct 11 02:55:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2327: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 15 KiB/s rd, 0 B/s wr, 25 op/s
Oct 11 02:55:10 compute-0 nova_compute[356901]: 2025-10-11 02:55:10.480 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:10 compute-0 ceph-mon[191930]: pgmap v2327: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail; 15 KiB/s rd, 0 B/s wr, 25 op/s
Oct 11 02:55:10 compute-0 nova_compute[356901]: 2025-10-11 02:55:10.643 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2328: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:55:11 compute-0 nova_compute[356901]: 2025-10-11 02:55:11.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:55:12 compute-0 podman[478759]: 2025-10-11 02:55:12.255515032 +0000 UTC m=+0.133123960 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-type=git, com.redhat.component=ubi9-container, distribution-scope=public, name=ubi9, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of Red Hat Universal Base Image 9., config_id=edpm, maintainer=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., version=9.4, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9, release=1214.1726694543, release-0.7.12=, io.buildah.version=1.29.0, io.openshift.expose-services=, io.openshift.tags=base rhel9, architecture=x86_64, managed_by=edpm_ansible, build-date=2024-09-18T21:23:30, container_name=kepler)
Oct 11 02:55:12 compute-0 ceph-mon[191930]: pgmap v2328: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2329: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.873 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.874 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.874 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.875 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.878 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.878 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.878 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.878 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.879 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.879 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.879 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.879 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.879 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.882 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.882 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.886 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '8422017b-c868-4ba2-ab1f-61d3668ca145', 'name': 'te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c', 'flavor': {'id': '6dff30d1-85df-4e9c-9163-a20ba47bb0c7', 'name': 'm1.nano', 'vcpus': 1, 'ram': 128, 'disk': 1, 'ephemeral': 0, 'swap': 0}, 'image': {'id': '2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-0000000e', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': 'a05bbc8f872d4dd99972d2cb8136d608', 'user_id': 'f66a606299944d53a40f21e81c791d70', 'hostId': 'cea8816d446065ba50379057f72b942db7e204c60c4530591bc7d0be', 'status': 'active', 'metadata': {'metering.server_group': '44c4fdb3-6cdb-42b8-903d-5a2c79f0da20'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.892 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': 'f98d09d7-6aa0-4405-bfa0-be1f78d3911f', 'name': 'te-0512306-asg-am4iabdjybzp-yj44h76hdzhi-bejrsw3xgi4q', 'flavor': {'id': '6dff30d1-85df-4e9c-9163-a20ba47bb0c7', 'name': 'm1.nano', 'vcpus': 1, 'ram': 128, 'disk': 1, 'ephemeral': 0, 'swap': 0}, 'image': {'id': '2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-0000000f', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': 'a05bbc8f872d4dd99972d2cb8136d608', 'user_id': 'f66a606299944d53a40f21e81c791d70', 'hostId': 'cea8816d446065ba50379057f72b942db7e204c60c4530591bc7d0be', 'status': 'active', 'metadata': {'metering.server_group': '44c4fdb3-6cdb-42b8-903d-5a2c79f0da20'}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.897 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.897 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.898 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.898 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.899 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:55:13.898558) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.898 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.909 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.bytes volume: 2450 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.917 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.bytes volume: 2276 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.924 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2856 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.926 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.926 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.926 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.926 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.927 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.927 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.928 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:55:13.927191) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.928 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets volume: 31 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.929 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.packets volume: 31 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.930 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 24 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.931 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.932 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.932 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.932 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.932 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.933 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.933 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.934 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.934 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.935 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.936 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:55:13.932808) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.936 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.936 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.936 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.937 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.937 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.937 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.938 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.938 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.939 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.939 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.940 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:55:13.937345) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.940 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.940 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.940 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.941 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:55:13.941071) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.941 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.968 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:13.969 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.capacity volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.003 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.004 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.capacity volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.042 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.043 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.044 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.045 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.045 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.045 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.046 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.046 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.046 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.047 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:55:14.046441) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.089 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.bytes volume: 29657600 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.090 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.bytes volume: 299326 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.141 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.bytes volume: 31070720 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.142 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.bytes volume: 299326 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.214 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.215 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.216 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.217 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.217 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.218 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.218 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.218 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.218 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.219 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.latency volume: 2082910661 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.219 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:55:14.218639) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.219 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.latency volume: 143173838 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.220 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.latency volume: 2034310761 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.220 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.latency volume: 186412257 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.221 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.221 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.222 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.223 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.223 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.224 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.224 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.224 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.224 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.225 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:55:14.224704) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.225 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.requests volume: 1067 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.225 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.read.requests volume: 120 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.226 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.requests volume: 1136 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.226 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.read.requests volume: 120 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.227 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.227 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.228 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.229 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.229 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.229 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.229 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.230 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.230 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.230 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.231 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.usage volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.232 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.232 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.usage volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.233 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:55:14.230120) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.233 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.234 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.234 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.235 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.235 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.236 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.236 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.236 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.237 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.237 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.bytes volume: 73129984 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.238 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.239 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:55:14.236923) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.240 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.bytes volume: 73154560 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.241 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.242 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.242 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.243 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.244 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.244 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.244 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.245 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.245 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.245 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.245 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.latency volume: 8003595076 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.246 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:55:14.245514) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.246 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.247 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.latency volume: 8498421759 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.247 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.248 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.248 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.249 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.250 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.250 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.251 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.251 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.251 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.251 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.252 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:55:14.251890) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.283 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.324 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.359 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.360 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.361 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.361 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.361 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.361 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.361 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.361 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.requests volume: 334 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.362 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.362 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.requests volume: 304 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.362 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.363 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.363 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.364 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.365 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.364 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:55:14.361635) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.365 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.365 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.365 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.365 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.365 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.366 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.366 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.366 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.367 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.367 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.367 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.368 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.368 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:55:14.365835) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.368 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.368 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.368 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.369 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.369 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.371 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.371 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.371 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.372 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.372 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.372 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets volume: 30 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.372 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:55:14.368951) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.373 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.packets volume: 27 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.373 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 33 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.374 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.375 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.375 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.375 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.376 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.376 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.376 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:55:14.372315) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.376 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.376 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:55:14.376553) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.377 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.377 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.378 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.378 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.378 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.379 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.379 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.379 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.379 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:55:14.379556) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.380 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.381 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.381 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.381 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.381 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.382 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.382 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.382 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:55:14.382025) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.383 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.383 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.384 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.384 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.384 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.385 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.385 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.385 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.386 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.386 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:55:14.385697) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.386 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/disk.device.allocation volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.387 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.387 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/disk.device.allocation volume: 509952 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.388 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.388 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.388 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.389 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.390 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.390 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.390 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.390 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.390 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.391 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.391 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:55:14.390823) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.391 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.392 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.393 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.393 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.394 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.395 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.396 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.397 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.397 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:55:14.397154) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.398 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/cpu volume: 337260000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.399 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/cpu volume: 334820000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.400 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 66710000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.401 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.401 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.402 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.402 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.402 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.403 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.403 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:55:14.402989) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.403 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/network.outgoing.bytes volume: 2250 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.404 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/network.outgoing.bytes volume: 2250 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.405 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2412 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.406 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.406 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.406 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.407 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.407 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.407 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.407 14 DEBUG ceilometer.compute.pollsters [-] 8422017b-c868-4ba2-ab1f-61d3668ca145/memory.usage volume: 42.40625 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.408 14 DEBUG ceilometer.compute.pollsters [-] f98d09d7-6aa0-4405-bfa0-be1f78d3911f/memory.usage volume: 42.26171875 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.409 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.83984375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.409 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.410 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.410 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:55:14.407500) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.410 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.412 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.412 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.412 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.413 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.413 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.413 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.413 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.414 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.414 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.414 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.414 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.414 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.414 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.415 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.415 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.415 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.415 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.415 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.416 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.416 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.416 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.416 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.416 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.416 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.417 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:55:14.417 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:55:14 compute-0 ceph-mon[191930]: pgmap v2329: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2330: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:15 compute-0 nova_compute[356901]: 2025-10-11 02:55:15.484 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:15 compute-0 nova_compute[356901]: 2025-10-11 02:55:15.648 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:16 compute-0 ceph-mon[191930]: pgmap v2330: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:55:17 compute-0 podman[478777]: 2025-10-11 02:55:17.239672118 +0000 UTC m=+0.114916672 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:55:17 compute-0 podman[478780]: 2025-10-11 02:55:17.253022431 +0000 UTC m=+0.106125675 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, org.label-schema.license=GPLv2, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0)
Oct 11 02:55:17 compute-0 podman[478779]: 2025-10-11 02:55:17.254933759 +0000 UTC m=+0.120611934 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm)
Oct 11 02:55:17 compute-0 podman[478778]: 2025-10-11 02:55:17.302906655 +0000 UTC m=+0.170435644 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, config_id=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.vendor=CentOS)
Oct 11 02:55:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2331: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:17 compute-0 nova_compute[356901]: 2025-10-11 02:55:17.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:55:18 compute-0 ceph-mon[191930]: pgmap v2331: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2332: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:20 compute-0 nova_compute[356901]: 2025-10-11 02:55:20.490 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:20 compute-0 ceph-mon[191930]: pgmap v2332: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:20 compute-0 nova_compute[356901]: 2025-10-11 02:55:20.651 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:20 compute-0 nova_compute[356901]: 2025-10-11 02:55:20.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:55:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2333: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:55:21 compute-0 nova_compute[356901]: 2025-10-11 02:55:21.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:55:21 compute-0 nova_compute[356901]: 2025-10-11 02:55:21.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:55:21 compute-0 nova_compute[356901]: 2025-10-11 02:55:21.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:55:22 compute-0 ceph-mon[191930]: pgmap v2333: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2334: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:24 compute-0 ceph-mon[191930]: pgmap v2334: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:25 compute-0 podman[478860]: 2025-10-11 02:55:25.221376948 +0000 UTC m=+0.109876695 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, container_name=multipathd, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:55:25 compute-0 podman[478861]: 2025-10-11 02:55:25.237981028 +0000 UTC m=+0.123346296 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, io.buildah.version=1.41.3, config_id=iscsid, container_name=iscsid, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:55:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2335: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:25 compute-0 nova_compute[356901]: 2025-10-11 02:55:25.497 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:25 compute-0 nova_compute[356901]: 2025-10-11 02:55:25.655 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:55:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:55:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:55:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:55:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:55:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:55:26 compute-0 ceph-mon[191930]: pgmap v2335: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:55:26 compute-0 nova_compute[356901]: 2025-10-11 02:55:26.894 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:55:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2336: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:55:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/288248883' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:55:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:55:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/288248883' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:55:27 compute-0 nova_compute[356901]: 2025-10-11 02:55:27.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:55:28 compute-0 ceph-mon[191930]: pgmap v2336: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/288248883' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:55:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/288248883' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:55:28 compute-0 nova_compute[356901]: 2025-10-11 02:55:28.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:55:28 compute-0 nova_compute[356901]: 2025-10-11 02:55:28.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:55:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2337: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:29 compute-0 nova_compute[356901]: 2025-10-11 02:55:29.702 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-f98d09d7-6aa0-4405-bfa0-be1f78d3911f" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:55:29 compute-0 nova_compute[356901]: 2025-10-11 02:55:29.702 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-f98d09d7-6aa0-4405-bfa0-be1f78d3911f" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:55:29 compute-0 nova_compute[356901]: 2025-10-11 02:55:29.703 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:55:29 compute-0 podman[157119]: time="2025-10-11T02:55:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:55:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:55:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:55:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:55:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9550 "" "Go-http-client/1.1"
Oct 11 02:55:30 compute-0 nova_compute[356901]: 2025-10-11 02:55:30.503 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:30 compute-0 nova_compute[356901]: 2025-10-11 02:55:30.660 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:30 compute-0 ceph-mon[191930]: pgmap v2337: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2338: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:31 compute-0 openstack_network_exporter[374316]: ERROR   02:55:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:55:31 compute-0 openstack_network_exporter[374316]: ERROR   02:55:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:55:31 compute-0 openstack_network_exporter[374316]: ERROR   02:55:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:55:31 compute-0 openstack_network_exporter[374316]: ERROR   02:55:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:55:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:55:31 compute-0 openstack_network_exporter[374316]: ERROR   02:55:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:55:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:55:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:55:31 compute-0 nova_compute[356901]: 2025-10-11 02:55:31.891 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Updating instance_info_cache with network_info: [{"id": "0c37c119-6647-42bb-a22f-ca741242ef30", "address": "fa:16:3e:ee:94:7e", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.2.253", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap0c37c119-66", "ovs_interfaceid": "0c37c119-6647-42bb-a22f-ca741242ef30", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:55:32 compute-0 nova_compute[356901]: 2025-10-11 02:55:32.117 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-f98d09d7-6aa0-4405-bfa0-be1f78d3911f" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:55:32 compute-0 nova_compute[356901]: 2025-10-11 02:55:32.118 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:55:32 compute-0 nova_compute[356901]: 2025-10-11 02:55:32.120 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:55:32 compute-0 nova_compute[356901]: 2025-10-11 02:55:32.315 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:55:32 compute-0 nova_compute[356901]: 2025-10-11 02:55:32.316 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:55:32 compute-0 nova_compute[356901]: 2025-10-11 02:55:32.317 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:55:32 compute-0 nova_compute[356901]: 2025-10-11 02:55:32.318 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:55:32 compute-0 nova_compute[356901]: 2025-10-11 02:55:32.319 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:55:32 compute-0 ceph-mon[191930]: pgmap v2338: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:55:32 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3892426716' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:55:32 compute-0 nova_compute[356901]: 2025-10-11 02:55:32.831 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.512s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:55:32 compute-0 nova_compute[356901]: 2025-10-11 02:55:32.946 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:55:32 compute-0 nova_compute[356901]: 2025-10-11 02:55:32.948 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000e as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:55:32 compute-0 nova_compute[356901]: 2025-10-11 02:55:32.961 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:55:32 compute-0 nova_compute[356901]: 2025-10-11 02:55:32.962 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-0000000f as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:55:32 compute-0 nova_compute[356901]: 2025-10-11 02:55:32.970 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:55:32 compute-0 nova_compute[356901]: 2025-10-11 02:55:32.971 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:55:32 compute-0 nova_compute[356901]: 2025-10-11 02:55:32.972 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:55:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2339: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:33 compute-0 nova_compute[356901]: 2025-10-11 02:55:33.547 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:55:33 compute-0 nova_compute[356901]: 2025-10-11 02:55:33.550 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3199MB free_disk=59.86394500732422GB free_vcpus=5 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:55:33 compute-0 nova_compute[356901]: 2025-10-11 02:55:33.551 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:55:33 compute-0 nova_compute[356901]: 2025-10-11 02:55:33.552 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:55:33 compute-0 nova_compute[356901]: 2025-10-11 02:55:33.659 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:55:33 compute-0 nova_compute[356901]: 2025-10-11 02:55:33.659 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 8422017b-c868-4ba2-ab1f-61d3668ca145 actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:55:33 compute-0 nova_compute[356901]: 2025-10-11 02:55:33.659 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance f98d09d7-6aa0-4405-bfa0-be1f78d3911f actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 1, 'MEMORY_MB': 128, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:55:33 compute-0 nova_compute[356901]: 2025-10-11 02:55:33.659 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 3 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:55:33 compute-0 nova_compute[356901]: 2025-10-11 02:55:33.660 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1280MB phys_disk=59GB used_disk=4GB total_vcpus=8 used_vcpus=3 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:55:33 compute-0 nova_compute[356901]: 2025-10-11 02:55:33.736 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:55:33 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3892426716' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:55:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:55:34 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2107807282' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:55:34 compute-0 podman[478941]: 2025-10-11 02:55:34.241078218 +0000 UTC m=+0.126510008 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, url=https://catalog.redhat.com/en/search?searchType=containers, container_name=openstack_network_exporter, version=9.6, name=ubi9-minimal, release=1755695350, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vendor=Red Hat, Inc., distribution-scope=public, maintainer=Red Hat, Inc., architecture=x86_64, io.openshift.expose-services=, io.openshift.tags=minimal rhel9, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2025-08-20T13:12:41, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, com.redhat.component=ubi9-minimal-container, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.buildah.version=1.33.7, managed_by=edpm_ansible, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm)
Oct 11 02:55:34 compute-0 podman[478942]: 2025-10-11 02:55:34.249299417 +0000 UTC m=+0.125840242 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:55:34 compute-0 nova_compute[356901]: 2025-10-11 02:55:34.260 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.525s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:55:34 compute-0 nova_compute[356901]: 2025-10-11 02:55:34.270 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:55:34 compute-0 nova_compute[356901]: 2025-10-11 02:55:34.314 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:55:34 compute-0 nova_compute[356901]: 2025-10-11 02:55:34.317 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:55:34 compute-0 nova_compute[356901]: 2025-10-11 02:55:34.317 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.766s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:55:34 compute-0 podman[478983]: 2025-10-11 02:55:34.380133663 +0000 UTC m=+0.075632216 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 02:55:34 compute-0 ceph-mon[191930]: pgmap v2339: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:34 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2107807282' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:55:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2340: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:35 compute-0 nova_compute[356901]: 2025-10-11 02:55:35.509 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:35 compute-0 nova_compute[356901]: 2025-10-11 02:55:35.663 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:55:36 compute-0 ceph-mon[191930]: pgmap v2340: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2341: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:38 compute-0 ceph-mon[191930]: pgmap v2341: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2342: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:40 compute-0 nova_compute[356901]: 2025-10-11 02:55:40.513 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:40 compute-0 nova_compute[356901]: 2025-10-11 02:55:40.666 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:40 compute-0 ceph-mon[191930]: pgmap v2342: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2343: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:41 compute-0 sudo[479002]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:55:41 compute-0 sudo[479002]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:55:41 compute-0 sudo[479002]: pam_unix(sudo:session): session closed for user root
Oct 11 02:55:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:55:41 compute-0 sudo[479027]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:55:41 compute-0 sudo[479027]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:55:41 compute-0 sudo[479027]: pam_unix(sudo:session): session closed for user root
Oct 11 02:55:41 compute-0 sudo[479052]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:55:41 compute-0 sudo[479052]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:55:41 compute-0 sudo[479052]: pam_unix(sudo:session): session closed for user root
Oct 11 02:55:42 compute-0 sudo[479077]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:55:42 compute-0 sudo[479077]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:55:42 compute-0 sudo[479077]: pam_unix(sudo:session): session closed for user root
Oct 11 02:55:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:55:42 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:55:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:55:42 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:55:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:55:42 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:55:42 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 85f0fcef-3de5-4d67-bb31-66fe99733237 does not exist
Oct 11 02:55:42 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 085915fa-1526-40c1-bdc1-b03b66a7863f does not exist
Oct 11 02:55:42 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 4b27ad60-a020-4e11-8521-e61d51919417 does not exist
Oct 11 02:55:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:55:42 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:55:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:55:42 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:55:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:55:42 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:55:42 compute-0 ceph-mon[191930]: pgmap v2343: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:55:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:55:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:55:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:55:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:55:42 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:55:42 compute-0 sudo[479132]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:55:42 compute-0 sudo[479132]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:55:42 compute-0 sudo[479132]: pam_unix(sudo:session): session closed for user root
Oct 11 02:55:43 compute-0 sudo[479158]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:55:43 compute-0 sudo[479158]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:55:43 compute-0 sudo[479158]: pam_unix(sudo:session): session closed for user root
Oct 11 02:55:43 compute-0 podman[479156]: 2025-10-11 02:55:43.07438133 +0000 UTC m=+0.127470801 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.tags=base rhel9, release=1214.1726694543, vcs-type=git, name=ubi9, vendor=Red Hat, Inc., distribution-scope=public, managed_by=edpm_ansible, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_id=edpm, io.openshift.expose-services=, com.redhat.component=ubi9-container, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, version=9.4, maintainer=Red Hat, Inc., build-date=2024-09-18T21:23:30, container_name=kepler, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release-0.7.12=, summary=Provides the latest release of Red Hat Universal Base Image 9., io.buildah.version=1.29.0)
Oct 11 02:55:43 compute-0 sudo[479196]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:55:43 compute-0 sudo[479196]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:55:43 compute-0 sudo[479196]: pam_unix(sudo:session): session closed for user root
Oct 11 02:55:43 compute-0 sudo[479224]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:55:43 compute-0 sudo[479224]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:55:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2344: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:43 compute-0 podman[479289]: 2025-10-11 02:55:43.748367853 +0000 UTC m=+0.066892122 container create 7079d1a5a406e30c1b43b8e157696168eeb3363ff1827a1a1e9f79ecbfb2ef31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_torvalds, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Oct 11 02:55:43 compute-0 systemd[1]: Started libpod-conmon-7079d1a5a406e30c1b43b8e157696168eeb3363ff1827a1a1e9f79ecbfb2ef31.scope.
Oct 11 02:55:43 compute-0 podman[479289]: 2025-10-11 02:55:43.716856957 +0000 UTC m=+0.035381266 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:55:43 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:55:43 compute-0 podman[479289]: 2025-10-11 02:55:43.876005715 +0000 UTC m=+0.194530074 container init 7079d1a5a406e30c1b43b8e157696168eeb3363ff1827a1a1e9f79ecbfb2ef31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_torvalds, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:55:43 compute-0 podman[479289]: 2025-10-11 02:55:43.88639661 +0000 UTC m=+0.204920879 container start 7079d1a5a406e30c1b43b8e157696168eeb3363ff1827a1a1e9f79ecbfb2ef31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_torvalds, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:55:43 compute-0 trusting_torvalds[479305]: 167 167
Oct 11 02:55:43 compute-0 systemd[1]: libpod-7079d1a5a406e30c1b43b8e157696168eeb3363ff1827a1a1e9f79ecbfb2ef31.scope: Deactivated successfully.
Oct 11 02:55:43 compute-0 conmon[479305]: conmon 7079d1a5a406e30c1b43 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-7079d1a5a406e30c1b43b8e157696168eeb3363ff1827a1a1e9f79ecbfb2ef31.scope/container/memory.events
Oct 11 02:55:43 compute-0 podman[479289]: 2025-10-11 02:55:43.894814302 +0000 UTC m=+0.213338611 container attach 7079d1a5a406e30c1b43b8e157696168eeb3363ff1827a1a1e9f79ecbfb2ef31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_torvalds, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:55:43 compute-0 podman[479289]: 2025-10-11 02:55:43.899999699 +0000 UTC m=+0.218524018 container died 7079d1a5a406e30c1b43b8e157696168eeb3363ff1827a1a1e9f79ecbfb2ef31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_torvalds, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:55:43 compute-0 systemd[1]: var-lib-containers-storage-overlay-689ca661c5c3df3d025587932495b2a5a05f75cc1709bba4a018b07c3bdb9f5c-merged.mount: Deactivated successfully.
Oct 11 02:55:43 compute-0 podman[479289]: 2025-10-11 02:55:43.96240156 +0000 UTC m=+0.280925829 container remove 7079d1a5a406e30c1b43b8e157696168eeb3363ff1827a1a1e9f79ecbfb2ef31 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_torvalds, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2)
Oct 11 02:55:43 compute-0 systemd[1]: libpod-conmon-7079d1a5a406e30c1b43b8e157696168eeb3363ff1827a1a1e9f79ecbfb2ef31.scope: Deactivated successfully.
Oct 11 02:55:44 compute-0 podman[479328]: 2025-10-11 02:55:44.265326699 +0000 UTC m=+0.073118988 container create 192fe8628876abc195e9f876e88c869f034d817ac87ec1b6841984bfc56dfcce (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_shirley, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef)
Oct 11 02:55:44 compute-0 podman[479328]: 2025-10-11 02:55:44.236114647 +0000 UTC m=+0.043906916 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:55:44 compute-0 systemd[1]: Started libpod-conmon-192fe8628876abc195e9f876e88c869f034d817ac87ec1b6841984bfc56dfcce.scope.
Oct 11 02:55:44 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:55:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3757a0760cc41a466684460f2ad609ae5d9ffd77d4d1ff4a9fed84ce923d79c3/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:55:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3757a0760cc41a466684460f2ad609ae5d9ffd77d4d1ff4a9fed84ce923d79c3/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:55:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3757a0760cc41a466684460f2ad609ae5d9ffd77d4d1ff4a9fed84ce923d79c3/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:55:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3757a0760cc41a466684460f2ad609ae5d9ffd77d4d1ff4a9fed84ce923d79c3/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:55:44 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3757a0760cc41a466684460f2ad609ae5d9ffd77d4d1ff4a9fed84ce923d79c3/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:55:44 compute-0 podman[479328]: 2025-10-11 02:55:44.411198529 +0000 UTC m=+0.218990808 container init 192fe8628876abc195e9f876e88c869f034d817ac87ec1b6841984bfc56dfcce (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_shirley, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, io.buildah.version=1.39.3)
Oct 11 02:55:44 compute-0 podman[479328]: 2025-10-11 02:55:44.439209562 +0000 UTC m=+0.247001821 container start 192fe8628876abc195e9f876e88c869f034d817ac87ec1b6841984bfc56dfcce (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_shirley, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:55:44 compute-0 podman[479328]: 2025-10-11 02:55:44.444477784 +0000 UTC m=+0.252270093 container attach 192fe8628876abc195e9f876e88c869f034d817ac87ec1b6841984bfc56dfcce (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_shirley, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=reef, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:55:44 compute-0 ceph-mon[191930]: pgmap v2344: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2345: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:45 compute-0 nova_compute[356901]: 2025-10-11 02:55:45.521 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:45 compute-0 nova_compute[356901]: 2025-10-11 02:55:45.669 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:45 compute-0 lucid_shirley[479344]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:55:45 compute-0 lucid_shirley[479344]: --> relative data size: 1.0
Oct 11 02:55:45 compute-0 lucid_shirley[479344]: --> All data devices are unavailable
Oct 11 02:55:45 compute-0 systemd[1]: libpod-192fe8628876abc195e9f876e88c869f034d817ac87ec1b6841984bfc56dfcce.scope: Deactivated successfully.
Oct 11 02:55:45 compute-0 systemd[1]: libpod-192fe8628876abc195e9f876e88c869f034d817ac87ec1b6841984bfc56dfcce.scope: Consumed 1.341s CPU time.
Oct 11 02:55:45 compute-0 podman[479328]: 2025-10-11 02:55:45.879017686 +0000 UTC m=+1.686809975 container died 192fe8628876abc195e9f876e88c869f034d817ac87ec1b6841984bfc56dfcce (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_shirley, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 02:55:45 compute-0 systemd[1]: var-lib-containers-storage-overlay-3757a0760cc41a466684460f2ad609ae5d9ffd77d4d1ff4a9fed84ce923d79c3-merged.mount: Deactivated successfully.
Oct 11 02:55:45 compute-0 podman[479328]: 2025-10-11 02:55:45.98873588 +0000 UTC m=+1.796528139 container remove 192fe8628876abc195e9f876e88c869f034d817ac87ec1b6841984bfc56dfcce (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_shirley, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:55:46 compute-0 systemd[1]: libpod-conmon-192fe8628876abc195e9f876e88c869f034d817ac87ec1b6841984bfc56dfcce.scope: Deactivated successfully.
Oct 11 02:55:46 compute-0 sudo[479224]: pam_unix(sudo:session): session closed for user root
Oct 11 02:55:46 compute-0 sudo[479384]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:55:46 compute-0 sudo[479384]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:55:46 compute-0 sudo[479384]: pam_unix(sudo:session): session closed for user root
Oct 11 02:55:46 compute-0 sudo[479409]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:55:46 compute-0 sudo[479409]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:55:46 compute-0 sudo[479409]: pam_unix(sudo:session): session closed for user root
Oct 11 02:55:46 compute-0 sudo[479434]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:55:46 compute-0 sudo[479434]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:55:46 compute-0 sudo[479434]: pam_unix(sudo:session): session closed for user root
Oct 11 02:55:46 compute-0 sudo[479459]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:55:46 compute-0 sudo[479459]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:55:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:55:46 compute-0 ceph-mon[191930]: pgmap v2345: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:47 compute-0 podman[479523]: 2025-10-11 02:55:47.138900583 +0000 UTC m=+0.058993565 container create 070da41262a6552711d09e4122fdb469e6c52bd893276154fc8fc75858b00964 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_vaughan, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2)
Oct 11 02:55:47 compute-0 systemd[1]: Started libpod-conmon-070da41262a6552711d09e4122fdb469e6c52bd893276154fc8fc75858b00964.scope.
Oct 11 02:55:47 compute-0 podman[479523]: 2025-10-11 02:55:47.118820421 +0000 UTC m=+0.038913393 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:55:47 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:55:47 compute-0 podman[479523]: 2025-10-11 02:55:47.242705012 +0000 UTC m=+0.162798054 container init 070da41262a6552711d09e4122fdb469e6c52bd893276154fc8fc75858b00964 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_vaughan, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default)
Oct 11 02:55:47 compute-0 podman[479523]: 2025-10-11 02:55:47.252739663 +0000 UTC m=+0.172832645 container start 070da41262a6552711d09e4122fdb469e6c52bd893276154fc8fc75858b00964 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_vaughan, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True)
Oct 11 02:55:47 compute-0 podman[479523]: 2025-10-11 02:55:47.259333183 +0000 UTC m=+0.179426175 container attach 070da41262a6552711d09e4122fdb469e6c52bd893276154fc8fc75858b00964 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_vaughan, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:55:47 compute-0 sweet_vaughan[479539]: 167 167
Oct 11 02:55:47 compute-0 systemd[1]: libpod-070da41262a6552711d09e4122fdb469e6c52bd893276154fc8fc75858b00964.scope: Deactivated successfully.
Oct 11 02:55:47 compute-0 podman[479523]: 2025-10-11 02:55:47.261702982 +0000 UTC m=+0.181795934 container died 070da41262a6552711d09e4122fdb469e6c52bd893276154fc8fc75858b00964 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_vaughan, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:55:47 compute-0 systemd[1]: var-lib-containers-storage-overlay-40507f12ab326166a0c696f4a5d1ed8458fd22a7f0effd2416d229a2d0bda168-merged.mount: Deactivated successfully.
Oct 11 02:55:47 compute-0 podman[479523]: 2025-10-11 02:55:47.339653012 +0000 UTC m=+0.259745994 container remove 070da41262a6552711d09e4122fdb469e6c52bd893276154fc8fc75858b00964 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sweet_vaughan, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0)
Oct 11 02:55:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2346: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:47 compute-0 systemd[1]: libpod-conmon-070da41262a6552711d09e4122fdb469e6c52bd893276154fc8fc75858b00964.scope: Deactivated successfully.
Oct 11 02:55:47 compute-0 podman[479554]: 2025-10-11 02:55:47.401035075 +0000 UTC m=+0.098462672 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.schema-version=1.0, config_id=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, managed_by=edpm_ansible)
Oct 11 02:55:47 compute-0 podman[479546]: 2025-10-11 02:55:47.410060689 +0000 UTC m=+0.108721479 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 02:55:47 compute-0 podman[479553]: 2025-10-11 02:55:47.444224142 +0000 UTC m=+0.141348969 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, tcib_managed=true, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 02:55:47 compute-0 podman[479601]: 2025-10-11 02:55:47.517921419 +0000 UTC m=+0.115522433 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 02:55:47 compute-0 podman[479643]: 2025-10-11 02:55:47.580211852 +0000 UTC m=+0.075819759 container create 4f4b514eeeab961bff8b6b5f4609d714b9d9e502c21397826da66416ca5c9b3c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elated_fermi, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3)
Oct 11 02:55:47 compute-0 podman[479643]: 2025-10-11 02:55:47.551095936 +0000 UTC m=+0.046703883 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:55:47 compute-0 systemd[1]: Started libpod-conmon-4f4b514eeeab961bff8b6b5f4609d714b9d9e502c21397826da66416ca5c9b3c.scope.
Oct 11 02:55:47 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:55:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cb9a4c909435a94c59000d384e280a4b2638d81441ad915b058c26b572369589/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:55:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cb9a4c909435a94c59000d384e280a4b2638d81441ad915b058c26b572369589/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:55:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cb9a4c909435a94c59000d384e280a4b2638d81441ad915b058c26b572369589/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:55:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/cb9a4c909435a94c59000d384e280a4b2638d81441ad915b058c26b572369589/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:55:47 compute-0 podman[479643]: 2025-10-11 02:55:47.743664358 +0000 UTC m=+0.239272285 container init 4f4b514eeeab961bff8b6b5f4609d714b9d9e502c21397826da66416ca5c9b3c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elated_fermi, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:55:47 compute-0 podman[479643]: 2025-10-11 02:55:47.770785242 +0000 UTC m=+0.266393139 container start 4f4b514eeeab961bff8b6b5f4609d714b9d9e502c21397826da66416ca5c9b3c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elated_fermi, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 02:55:47 compute-0 podman[479643]: 2025-10-11 02:55:47.775621195 +0000 UTC m=+0.271229092 container attach 4f4b514eeeab961bff8b6b5f4609d714b9d9e502c21397826da66416ca5c9b3c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elated_fermi, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default)
Oct 11 02:55:48 compute-0 elated_fermi[479663]: {
Oct 11 02:55:48 compute-0 elated_fermi[479663]:     "0": [
Oct 11 02:55:48 compute-0 elated_fermi[479663]:         {
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "devices": [
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "/dev/loop3"
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             ],
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "lv_name": "ceph_lv0",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "lv_size": "21470642176",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "name": "ceph_lv0",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "tags": {
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.cluster_name": "ceph",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.crush_device_class": "",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.encrypted": "0",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.osd_id": "0",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.type": "block",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.vdo": "0"
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             },
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "type": "block",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "vg_name": "ceph_vg0"
Oct 11 02:55:48 compute-0 elated_fermi[479663]:         }
Oct 11 02:55:48 compute-0 elated_fermi[479663]:     ],
Oct 11 02:55:48 compute-0 elated_fermi[479663]:     "1": [
Oct 11 02:55:48 compute-0 elated_fermi[479663]:         {
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "devices": [
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "/dev/loop4"
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             ],
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "lv_name": "ceph_lv1",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "lv_size": "21470642176",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "name": "ceph_lv1",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "tags": {
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.cluster_name": "ceph",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.crush_device_class": "",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.encrypted": "0",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.osd_id": "1",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.type": "block",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.vdo": "0"
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             },
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "type": "block",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "vg_name": "ceph_vg1"
Oct 11 02:55:48 compute-0 elated_fermi[479663]:         }
Oct 11 02:55:48 compute-0 elated_fermi[479663]:     ],
Oct 11 02:55:48 compute-0 elated_fermi[479663]:     "2": [
Oct 11 02:55:48 compute-0 elated_fermi[479663]:         {
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "devices": [
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "/dev/loop5"
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             ],
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "lv_name": "ceph_lv2",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "lv_size": "21470642176",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "name": "ceph_lv2",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "tags": {
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.cluster_name": "ceph",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.crush_device_class": "",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.encrypted": "0",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.osd_id": "2",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.type": "block",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:                 "ceph.vdo": "0"
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             },
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "type": "block",
Oct 11 02:55:48 compute-0 elated_fermi[479663]:             "vg_name": "ceph_vg2"
Oct 11 02:55:48 compute-0 elated_fermi[479663]:         }
Oct 11 02:55:48 compute-0 elated_fermi[479663]:     ]
Oct 11 02:55:48 compute-0 elated_fermi[479663]: }
Oct 11 02:55:48 compute-0 systemd[1]: libpod-4f4b514eeeab961bff8b6b5f4609d714b9d9e502c21397826da66416ca5c9b3c.scope: Deactivated successfully.
Oct 11 02:55:48 compute-0 podman[479643]: 2025-10-11 02:55:48.655776988 +0000 UTC m=+1.151384935 container died 4f4b514eeeab961bff8b6b5f4609d714b9d9e502c21397826da66416ca5c9b3c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elated_fermi, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, ceph=True, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:55:48 compute-0 systemd[1]: var-lib-containers-storage-overlay-cb9a4c909435a94c59000d384e280a4b2638d81441ad915b058c26b572369589-merged.mount: Deactivated successfully.
Oct 11 02:55:48 compute-0 podman[479643]: 2025-10-11 02:55:48.764502706 +0000 UTC m=+1.260110613 container remove 4f4b514eeeab961bff8b6b5f4609d714b9d9e502c21397826da66416ca5c9b3c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elated_fermi, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0)
Oct 11 02:55:48 compute-0 systemd[1]: libpod-conmon-4f4b514eeeab961bff8b6b5f4609d714b9d9e502c21397826da66416ca5c9b3c.scope: Deactivated successfully.
Oct 11 02:55:48 compute-0 sudo[479459]: pam_unix(sudo:session): session closed for user root
Oct 11 02:55:48 compute-0 ceph-mon[191930]: pgmap v2346: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:48 compute-0 sudo[479687]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:55:48 compute-0 sudo[479687]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:55:48 compute-0 sudo[479687]: pam_unix(sudo:session): session closed for user root
Oct 11 02:55:49 compute-0 sudo[479712]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:55:49 compute-0 sudo[479712]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:55:49 compute-0 sudo[479712]: pam_unix(sudo:session): session closed for user root
Oct 11 02:55:49 compute-0 sudo[479737]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:55:49 compute-0 sudo[479737]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:55:49 compute-0 sudo[479737]: pam_unix(sudo:session): session closed for user root
Oct 11 02:55:49 compute-0 sudo[479762]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:55:49 compute-0 sudo[479762]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.348 2 DEBUG oslo_concurrency.lockutils [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquiring lock "8422017b-c868-4ba2-ab1f-61d3668ca145" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.351 2 DEBUG oslo_concurrency.lockutils [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "8422017b-c868-4ba2-ab1f-61d3668ca145" acquired by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: waited 0.003s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.352 2 DEBUG oslo_concurrency.lockutils [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquiring lock "8422017b-c868-4ba2-ab1f-61d3668ca145-events" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.352 2 DEBUG oslo_concurrency.lockutils [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "8422017b-c868-4ba2-ab1f-61d3668ca145-events" acquired by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.353 2 DEBUG oslo_concurrency.lockutils [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "8422017b-c868-4ba2-ab1f-61d3668ca145-events" "released" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.354 2 INFO nova.compute.manager [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Terminating instance
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.355 2 DEBUG nova.compute.manager [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Start destroying the instance on the hypervisor. _shutdown_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:3120
Oct 11 02:55:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2347: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:49 compute-0 kernel: tape00931c0-3d (unregistering): left promiscuous mode
Oct 11 02:55:49 compute-0 NetworkManager[44908]: <info>  [1760151349.4444] device (tape00931c0-3d): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')
Oct 11 02:55:49 compute-0 ovn_controller[88370]: 2025-10-11T02:55:49Z|00188|binding|INFO|Releasing lport e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 from this chassis (sb_readonly=0)
Oct 11 02:55:49 compute-0 ovn_controller[88370]: 2025-10-11T02:55:49Z|00189|binding|INFO|Setting lport e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 down in Southbound
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.463 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:49 compute-0 ovn_controller[88370]: 2025-10-11T02:55:49Z|00190|binding|INFO|Removing iface tape00931c0-3d ovn-installed in OVS
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.469 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:49.476 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:2c:af:96 10.100.3.53'], port_security=['fa:16:3e:2c:af:96 10.100.3.53'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.3.53/16', 'neutron:device_id': '8422017b-c868-4ba2-ab1f-61d3668ca145', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-3563b4a1-477a-44a0-b01f-7d19d49c0308', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': 'a05bbc8f872d4dd99972d2cb8136d608', 'neutron:revision_number': '4', 'neutron:security_group_ids': 'd961c453-0bcb-43ec-b528-5018786739ee', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:host_id': 'compute-0.ctlplane.example.com'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=4674209d-30ab-42f4-9114-728458c302a8, chassis=[], tunnel_key=2, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6) old=Port_Binding(up=[True], chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:55:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:49.478 286362 INFO neutron.agent.ovn.metadata.agent [-] Port e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 in datapath 3563b4a1-477a-44a0-b01f-7d19d49c0308 unbound from our chassis
Oct 11 02:55:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:49.479 286362 INFO neutron.agent.ovn.metadata.agent [-] Provisioning metadata for network 3563b4a1-477a-44a0-b01f-7d19d49c0308
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.494 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:49.507 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[f569b9f6-aecd-4ff9-afe4-37a7e2c57f3c]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:55:49 compute-0 systemd[1]: machine-qemu\x2d15\x2dinstance\x2d0000000e.scope: Deactivated successfully.
Oct 11 02:55:49 compute-0 systemd[1]: machine-qemu\x2d15\x2dinstance\x2d0000000e.scope: Consumed 7min 9.698s CPU time.
Oct 11 02:55:49 compute-0 systemd-machined[137586]: Machine qemu-15-instance-0000000e terminated.
Oct 11 02:55:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:49.555 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[d96e2692-fb55-4bb7-9a5a-87bb72ba4f2c]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:55:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:49.560 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[a1d47963-99d6-427f-a46a-67185ba60c07]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.582 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.590 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.602 2 INFO nova.virt.libvirt.driver [-] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Instance destroyed successfully.
Oct 11 02:55:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:49.602 422988 DEBUG oslo.privsep.daemon [-] privsep: reply[43b4b722-73f6-4aa9-9d8b-0d8672c7c052]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.603 2 DEBUG nova.objects.instance [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lazy-loading 'resources' on Instance uuid 8422017b-c868-4ba2-ab1f-61d3668ca145 obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:55:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:49.625 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[540fea68-08cf-42bf-8dd4-f64d6479201a]: (4, [{'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [['IFLA_IFNAME', 'tap3563b4a1-41'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UP'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 1500], ['IFLA_MIN_MTU', 68], ['IFLA_MAX_MTU', 65535], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 8], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 8], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 2], ['IFLA_CARRIER_UP_COUNT', 1], ['IFLA_CARRIER_DOWN_COUNT', 1], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', 'fa:16:3e:25:cf:fd'], ['IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'], ['IFLA_STATS64', {'rx_packets': 42, 'tx_packets': 7, 'rx_bytes': 2260, 'tx_bytes': 438, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 42, 'tx_packets': 7, 'rx_bytes': 2260, 'tx_bytes': 438, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', 'veth']]}], ['IFLA_LINK_NETNSID', 0], ['IFLA_LINK', 47], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 694227, 'reachable_time': 42074, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 8, 'inoctets': 720, 'indelivers': 1, 'outforwdatagrams': 0, 'outpkts': 3, 'outoctets': 228, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 8, 'outmcastpkts': 3, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 720, 'outmcastoctets': 228, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 8, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 1, 'inerrors': 0, 'outmsgs': 3, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1448, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 479826, 'error': None, 'target': 'ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.624 2 DEBUG nova.virt.libvirt.vif [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='True',created_at=2025-10-11T02:43:05Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=<?>,disable_terminate=False,display_description=None,display_name='te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c',ec2_ids=<?>,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='te-0512306-asg-am4iabdjybzp-pcvex4npiyvj-cf7cxs52a76c',id=14,image_ref='2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data=None,key_name=None,keypairs=<?>,launch_index=0,launched_at=2025-10-11T02:43:16Z,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={metering.server_group='44c4fdb3-6cdb-42b8-903d-5a2c79f0da20'},migration_context=<?>,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=<?>,power_state=1,progress=0,project_id='a05bbc8f872d4dd99972d2cb8136d608',ramdisk_id='',reservation_id='r-zwtwqn0d',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c',image_container_format='bare',image_disk_format='qcow2',image_hw_cdrom_bus='sata',image_hw_disk_bus='virtio',image_hw_input_bus='usb',image_hw_machine_type='q35',image_hw_pointer_model='usbtablet',image_hw_video_model='virtio',image_hw_vif_model='virtio',image_min_disk='1',image_min_ram='0',owner_project_name='tempest-PrometheusGabbiTest-674022988',owner_user_name='tempest-PrometheusGabbiTest-674022988-project-member'},tags=<?>,task_state='deleting',terminated_at=None,trusted_certs=<?>,updated_at=2025-10-11T02:43:16Z,user_data='IyEvYmluL3NoCmVjaG8gJ0xvYWRpbmcgQ1BVJwpzZXQgLXYKY2F0IC9kZXYvdXJhbmRvbSA+IC9kZXYvbnVsbCAmIHNsZWVwIDMwMCA7IGtpbGwgJCEgCg==',user_id='f66a606299944d53a40f21e81c791d70',uuid=8422017b-c868-4ba2-ab1f-61d3668ca145,vcpu_model=<?>,vcpus=1,vm_mode=None,vm_state='active') vif={"id": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "address": "fa:16:3e:2c:af:96", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.3.53", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape00931c0-3d", "ovs_interfaceid": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} unplug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:828
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.624 2 DEBUG nova.network.os_vif_util [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Converting VIF {"id": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "address": "fa:16:3e:2c:af:96", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.3.53", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tape00931c0-3d", "ovs_interfaceid": "e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.625 2 DEBUG nova.network.os_vif_util [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Converted object VIFOpenVSwitch(active=True,address=fa:16:3e:2c:af:96,bridge_name='br-int',has_traffic_filtering=True,id=e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6,network=Network(3563b4a1-477a-44a0-b01f-7d19d49c0308),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tape00931c0-3d') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.625 2 DEBUG os_vif [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Unplugging vif VIFOpenVSwitch(active=True,address=fa:16:3e:2c:af:96,bridge_name='br-int',has_traffic_filtering=True,id=e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6,network=Network(3563b4a1-477a-44a0-b01f-7d19d49c0308),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tape00931c0-3d') unplug /usr/lib/python3.9/site-packages/os_vif/__init__.py:109
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.627 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.627 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tape00931c0-3d, bridge=br-int, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.629 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.632 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.632 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.635 2 INFO os_vif [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Successfully unplugged vif VIFOpenVSwitch(active=True,address=fa:16:3e:2c:af:96,bridge_name='br-int',has_traffic_filtering=True,id=e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6,network=Network(3563b4a1-477a-44a0-b01f-7d19d49c0308),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tape00931c0-3d')
Oct 11 02:55:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:49.649 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[928d5ef4-4bd5-4070-bd1b-36e8082abe15]: (4, ({'family': 2, 'prefixlen': 32, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '169.254.169.254'], ['IFA_LOCAL', '169.254.169.254'], ['IFA_BROADCAST', '169.254.169.254'], ['IFA_LABEL', 'tap3563b4a1-41'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 694243, 'tstamp': 694243}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 479830, 'error': None, 'target': 'ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'}, {'family': 2, 'prefixlen': 16, 'flags': 128, 'scope': 0, 'index': 2, 'attrs': [['IFA_ADDRESS', '10.100.0.2'], ['IFA_LOCAL', '10.100.0.2'], ['IFA_BROADCAST', '10.100.255.255'], ['IFA_LABEL', 'tap3563b4a1-41'], ['IFA_FLAGS', 128], ['IFA_CACHEINFO', {'ifa_preferred': 4294967295, 'ifa_valid': 4294967295, 'cstamp': 694248, 'tstamp': 694248}]], 'header': {'length': 96, 'type': 20, 'flags': 2, 'sequence_number': 255, 'pid': 479830, 'error': None, 'target': 'ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308', 'stats': (0, 0, 0)}, 'event': 'RTM_NEWADDR'})) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:55:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:49.651 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tap3563b4a1-40, bridge=br-ex, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:55:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:49.654 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): AddPortCommand(_result=None, bridge=br-int, port=tap3563b4a1-40, may_exist=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:55:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:49.654 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:55:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:49.655 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Interface, record=tap3563b4a1-40, col_values=(('external_ids', {'iface-id': 'bd6ddb48-868e-41a0-8ff2-0f3a1a9b4d81'}),)) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:55:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:49.655 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Transaction caused no change do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:129
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.663 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.701 2 DEBUG nova.compute.manager [req-f995b33e-5695-4054-a0bd-b7ee22b0da8d req-eaa6a3fc-e256-434c-a2fa-9f301e43bbab 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Received event network-vif-unplugged-e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.702 2 DEBUG oslo_concurrency.lockutils [req-f995b33e-5695-4054-a0bd-b7ee22b0da8d req-eaa6a3fc-e256-434c-a2fa-9f301e43bbab 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "8422017b-c868-4ba2-ab1f-61d3668ca145-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.702 2 DEBUG oslo_concurrency.lockutils [req-f995b33e-5695-4054-a0bd-b7ee22b0da8d req-eaa6a3fc-e256-434c-a2fa-9f301e43bbab 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "8422017b-c868-4ba2-ab1f-61d3668ca145-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.702 2 DEBUG oslo_concurrency.lockutils [req-f995b33e-5695-4054-a0bd-b7ee22b0da8d req-eaa6a3fc-e256-434c-a2fa-9f301e43bbab 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "8422017b-c868-4ba2-ab1f-61d3668ca145-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.703 2 DEBUG nova.compute.manager [req-f995b33e-5695-4054-a0bd-b7ee22b0da8d req-eaa6a3fc-e256-434c-a2fa-9f301e43bbab 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] No waiting events found dispatching network-vif-unplugged-e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.704 2 DEBUG nova.compute.manager [req-f995b33e-5695-4054-a0bd-b7ee22b0da8d req-eaa6a3fc-e256-434c-a2fa-9f301e43bbab 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Received event network-vif-unplugged-e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 for instance with task_state deleting. _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10826
Oct 11 02:55:49 compute-0 podman[479868]: 2025-10-11 02:55:49.88327972 +0000 UTC m=+0.067648163 container create 204c0946a1012cc10e74587bb8cd867de90187f8d2afe238372a7162f8edc1bf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_ride, ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2)
Oct 11 02:55:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:49.909 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: SbGlobalUpdateEvent(events=('update',), table='SB_Global', conditions=None, old_conditions=None), priority=20 to row=SB_Global(external_ids={}, nb_cfg=17, options={'arp_ns_explicit_output': 'true', 'mac_prefix': 'fe:55:97', 'max_tunid': '16711680', 'northd_internal_version': '24.03.7-20.33.0-76.8', 'svc_monitor_mac': 'ce:9c:4f:b4:85:9b'}, ipsec=False) old=SB_Global(nb_cfg=16) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:55:49 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:49.910 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Delaying updating chassis table for 7 seconds run /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:274
Oct 11 02:55:49 compute-0 nova_compute[356901]: 2025-10-11 02:55:49.912 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:49 compute-0 podman[479868]: 2025-10-11 02:55:49.858540777 +0000 UTC m=+0.042909240 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:55:49 compute-0 systemd[1]: Started libpod-conmon-204c0946a1012cc10e74587bb8cd867de90187f8d2afe238372a7162f8edc1bf.scope.
Oct 11 02:55:50 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:55:50 compute-0 podman[479868]: 2025-10-11 02:55:50.022492976 +0000 UTC m=+0.206861439 container init 204c0946a1012cc10e74587bb8cd867de90187f8d2afe238372a7162f8edc1bf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_ride, org.label-schema.build-date=20250507, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:55:50 compute-0 podman[479868]: 2025-10-11 02:55:50.039520834 +0000 UTC m=+0.223889317 container start 204c0946a1012cc10e74587bb8cd867de90187f8d2afe238372a7162f8edc1bf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_ride, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3)
Oct 11 02:55:50 compute-0 podman[479868]: 2025-10-11 02:55:50.045796124 +0000 UTC m=+0.230164587 container attach 204c0946a1012cc10e74587bb8cd867de90187f8d2afe238372a7162f8edc1bf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_ride, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, ceph=True)
Oct 11 02:55:50 compute-0 gallant_ride[479884]: 167 167
Oct 11 02:55:50 compute-0 systemd[1]: libpod-204c0946a1012cc10e74587bb8cd867de90187f8d2afe238372a7162f8edc1bf.scope: Deactivated successfully.
Oct 11 02:55:50 compute-0 conmon[479884]: conmon 204c0946a1012cc10e74 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-204c0946a1012cc10e74587bb8cd867de90187f8d2afe238372a7162f8edc1bf.scope/container/memory.events
Oct 11 02:55:50 compute-0 podman[479890]: 2025-10-11 02:55:50.148448436 +0000 UTC m=+0.063843639 container died 204c0946a1012cc10e74587bb8cd867de90187f8d2afe238372a7162f8edc1bf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_ride, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:55:50 compute-0 systemd[1]: var-lib-containers-storage-overlay-438c4a83d484d8a0546e00e91b370b127af24f34f04f5a7ce12b93130245e452-merged.mount: Deactivated successfully.
Oct 11 02:55:50 compute-0 podman[479890]: 2025-10-11 02:55:50.214823023 +0000 UTC m=+0.130218206 container remove 204c0946a1012cc10e74587bb8cd867de90187f8d2afe238372a7162f8edc1bf (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_ride, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True)
Oct 11 02:55:50 compute-0 systemd[1]: libpod-conmon-204c0946a1012cc10e74587bb8cd867de90187f8d2afe238372a7162f8edc1bf.scope: Deactivated successfully.
Oct 11 02:55:50 compute-0 nova_compute[356901]: 2025-10-11 02:55:50.339 2 INFO nova.virt.libvirt.driver [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Deleting instance files /var/lib/nova/instances/8422017b-c868-4ba2-ab1f-61d3668ca145_del
Oct 11 02:55:50 compute-0 nova_compute[356901]: 2025-10-11 02:55:50.341 2 INFO nova.virt.libvirt.driver [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Deletion of /var/lib/nova/instances/8422017b-c868-4ba2-ab1f-61d3668ca145_del complete
Oct 11 02:55:50 compute-0 nova_compute[356901]: 2025-10-11 02:55:50.399 2 INFO nova.compute.manager [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Took 1.04 seconds to destroy the instance on the hypervisor.
Oct 11 02:55:50 compute-0 nova_compute[356901]: 2025-10-11 02:55:50.400 2 DEBUG oslo.service.loopingcall [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Waiting for function nova.compute.manager.ComputeManager._try_deallocate_network.<locals>._deallocate_network_with_retries to return. func /usr/lib/python3.9/site-packages/oslo_service/loopingcall.py:435
Oct 11 02:55:50 compute-0 nova_compute[356901]: 2025-10-11 02:55:50.401 2 DEBUG nova.compute.manager [-] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Deallocating network for instance _deallocate_network /usr/lib/python3.9/site-packages/nova/compute/manager.py:2259
Oct 11 02:55:50 compute-0 nova_compute[356901]: 2025-10-11 02:55:50.401 2 DEBUG nova.network.neutron [-] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] deallocate_for_instance() deallocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1803
Oct 11 02:55:50 compute-0 podman[479912]: 2025-10-11 02:55:50.515056261 +0000 UTC m=+0.077174740 container create 06feb23a65dc86fda0ede3a2e13ef784fa8d8e2b9157b2ac7ecbc8e0643c7620 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_yalow, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 02:55:50 compute-0 systemd[1]: Started libpod-conmon-06feb23a65dc86fda0ede3a2e13ef784fa8d8e2b9157b2ac7ecbc8e0643c7620.scope.
Oct 11 02:55:50 compute-0 podman[479912]: 2025-10-11 02:55:50.492084476 +0000 UTC m=+0.054202965 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:55:50 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:55:50 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/be978647b54c43ae2aad18ef20d25e0962da40acafada903b43077bb94bbe675/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:55:50 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/be978647b54c43ae2aad18ef20d25e0962da40acafada903b43077bb94bbe675/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:55:50 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/be978647b54c43ae2aad18ef20d25e0962da40acafada903b43077bb94bbe675/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:55:50 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/be978647b54c43ae2aad18ef20d25e0962da40acafada903b43077bb94bbe675/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:55:50 compute-0 podman[479912]: 2025-10-11 02:55:50.647453651 +0000 UTC m=+0.209572170 container init 06feb23a65dc86fda0ede3a2e13ef784fa8d8e2b9157b2ac7ecbc8e0643c7620 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_yalow, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS)
Oct 11 02:55:50 compute-0 podman[479912]: 2025-10-11 02:55:50.662916605 +0000 UTC m=+0.225035094 container start 06feb23a65dc86fda0ede3a2e13ef784fa8d8e2b9157b2ac7ecbc8e0643c7620 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_yalow, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:55:50 compute-0 podman[479912]: 2025-10-11 02:55:50.671001175 +0000 UTC m=+0.233119694 container attach 06feb23a65dc86fda0ede3a2e13ef784fa8d8e2b9157b2ac7ecbc8e0643c7620 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_yalow, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:55:50 compute-0 nova_compute[356901]: 2025-10-11 02:55:50.671 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:50 compute-0 ceph-mon[191930]: pgmap v2347: 321 pgs: 321 active+clean; 298 MiB data, 426 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:55:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2348: 321 pgs: 321 active+clean; 248 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 9.2 KiB/s rd, 852 B/s wr, 13 op/s
Oct 11 02:55:51 compute-0 nova_compute[356901]: 2025-10-11 02:55:51.466 2 DEBUG nova.network.neutron [-] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Updating instance_info_cache with network_info: [] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:55:51 compute-0 nova_compute[356901]: 2025-10-11 02:55:51.487 2 INFO nova.compute.manager [-] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Took 1.09 seconds to deallocate network for instance.
Oct 11 02:55:51 compute-0 nova_compute[356901]: 2025-10-11 02:55:51.522 2 DEBUG nova.compute.manager [req-6531bac8-b194-48bc-ad59-7ac9bcf6bedf req-f3bdd04c-3469-419b-b823-4aef79e38d1b 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Received event network-vif-deleted-e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:55:51 compute-0 nova_compute[356901]: 2025-10-11 02:55:51.537 2 DEBUG oslo_concurrency.lockutils [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.update_usage" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:55:51 compute-0 nova_compute[356901]: 2025-10-11 02:55:51.537 2 DEBUG oslo_concurrency.lockutils [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:55:51 compute-0 nova_compute[356901]: 2025-10-11 02:55:51.630 2 DEBUG oslo_concurrency.processutils [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:55:51 compute-0 nova_compute[356901]: 2025-10-11 02:55:51.765 2 DEBUG nova.compute.manager [req-a746b143-ada1-401a-a786-10e80faa20b2 req-8aaaa79b-bbb0-4551-a90a-0b0b7bec6355 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Received event network-vif-plugged-e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:55:51 compute-0 nova_compute[356901]: 2025-10-11 02:55:51.766 2 DEBUG oslo_concurrency.lockutils [req-a746b143-ada1-401a-a786-10e80faa20b2 req-8aaaa79b-bbb0-4551-a90a-0b0b7bec6355 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "8422017b-c868-4ba2-ab1f-61d3668ca145-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:55:51 compute-0 nova_compute[356901]: 2025-10-11 02:55:51.766 2 DEBUG oslo_concurrency.lockutils [req-a746b143-ada1-401a-a786-10e80faa20b2 req-8aaaa79b-bbb0-4551-a90a-0b0b7bec6355 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "8422017b-c868-4ba2-ab1f-61d3668ca145-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:55:51 compute-0 nova_compute[356901]: 2025-10-11 02:55:51.766 2 DEBUG oslo_concurrency.lockutils [req-a746b143-ada1-401a-a786-10e80faa20b2 req-8aaaa79b-bbb0-4551-a90a-0b0b7bec6355 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "8422017b-c868-4ba2-ab1f-61d3668ca145-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:55:51 compute-0 nova_compute[356901]: 2025-10-11 02:55:51.766 2 DEBUG nova.compute.manager [req-a746b143-ada1-401a-a786-10e80faa20b2 req-8aaaa79b-bbb0-4551-a90a-0b0b7bec6355 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] No waiting events found dispatching network-vif-plugged-e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:55:51 compute-0 nova_compute[356901]: 2025-10-11 02:55:51.767 2 WARNING nova.compute.manager [req-a746b143-ada1-401a-a786-10e80faa20b2 req-8aaaa79b-bbb0-4551-a90a-0b0b7bec6355 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Received unexpected event network-vif-plugged-e00931c0-3dd7-422f-a0d5-21fcf5bf1ee6 for instance with vm_state deleted and task_state None.
Oct 11 02:55:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:55:51 compute-0 silly_yalow[479927]: {
Oct 11 02:55:51 compute-0 silly_yalow[479927]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:55:51 compute-0 silly_yalow[479927]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:55:51 compute-0 silly_yalow[479927]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:55:51 compute-0 silly_yalow[479927]:         "osd_id": 1,
Oct 11 02:55:51 compute-0 silly_yalow[479927]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:55:51 compute-0 silly_yalow[479927]:         "type": "bluestore"
Oct 11 02:55:51 compute-0 silly_yalow[479927]:     },
Oct 11 02:55:51 compute-0 silly_yalow[479927]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:55:51 compute-0 silly_yalow[479927]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:55:51 compute-0 silly_yalow[479927]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:55:51 compute-0 silly_yalow[479927]:         "osd_id": 2,
Oct 11 02:55:51 compute-0 silly_yalow[479927]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:55:51 compute-0 silly_yalow[479927]:         "type": "bluestore"
Oct 11 02:55:51 compute-0 silly_yalow[479927]:     },
Oct 11 02:55:51 compute-0 silly_yalow[479927]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:55:51 compute-0 silly_yalow[479927]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:55:51 compute-0 silly_yalow[479927]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:55:51 compute-0 silly_yalow[479927]:         "osd_id": 0,
Oct 11 02:55:51 compute-0 silly_yalow[479927]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:55:51 compute-0 silly_yalow[479927]:         "type": "bluestore"
Oct 11 02:55:51 compute-0 silly_yalow[479927]:     }
Oct 11 02:55:51 compute-0 silly_yalow[479927]: }
Oct 11 02:55:51 compute-0 systemd[1]: libpod-06feb23a65dc86fda0ede3a2e13ef784fa8d8e2b9157b2ac7ecbc8e0643c7620.scope: Deactivated successfully.
Oct 11 02:55:51 compute-0 systemd[1]: libpod-06feb23a65dc86fda0ede3a2e13ef784fa8d8e2b9157b2ac7ecbc8e0643c7620.scope: Consumed 1.229s CPU time.
Oct 11 02:55:51 compute-0 podman[479912]: 2025-10-11 02:55:51.902521395 +0000 UTC m=+1.464639884 container died 06feb23a65dc86fda0ede3a2e13ef784fa8d8e2b9157b2ac7ecbc8e0643c7620 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_yalow, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:55:51 compute-0 systemd[1]: var-lib-containers-storage-overlay-be978647b54c43ae2aad18ef20d25e0962da40acafada903b43077bb94bbe675-merged.mount: Deactivated successfully.
Oct 11 02:55:51 compute-0 podman[479912]: 2025-10-11 02:55:51.974280322 +0000 UTC m=+1.536398801 container remove 06feb23a65dc86fda0ede3a2e13ef784fa8d8e2b9157b2ac7ecbc8e0643c7620 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_yalow, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:55:51 compute-0 systemd[1]: libpod-conmon-06feb23a65dc86fda0ede3a2e13ef784fa8d8e2b9157b2ac7ecbc8e0643c7620.scope: Deactivated successfully.
Oct 11 02:55:52 compute-0 sudo[479762]: pam_unix(sudo:session): session closed for user root
Oct 11 02:55:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:55:52 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:55:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:55:52 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:55:52 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev aabe415b-b3c8-4ed4-b27d-e49de5179614 does not exist
Oct 11 02:55:52 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 6987288a-879b-4ed0-8975-b74a71c96fde does not exist
Oct 11 02:55:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:55:52 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/615181269' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:55:52 compute-0 nova_compute[356901]: 2025-10-11 02:55:52.110 2 DEBUG oslo_concurrency.processutils [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.480s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:55:52 compute-0 sudo[479992]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:55:52 compute-0 nova_compute[356901]: 2025-10-11 02:55:52.121 2 DEBUG nova.compute.provider_tree [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:55:52 compute-0 sudo[479992]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:55:52 compute-0 sudo[479992]: pam_unix(sudo:session): session closed for user root
Oct 11 02:55:52 compute-0 nova_compute[356901]: 2025-10-11 02:55:52.137 2 DEBUG nova.scheduler.client.report [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:55:52 compute-0 nova_compute[356901]: 2025-10-11 02:55:52.158 2 DEBUG oslo_concurrency.lockutils [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: held 0.621s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:55:52 compute-0 nova_compute[356901]: 2025-10-11 02:55:52.186 2 INFO nova.scheduler.client.report [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Deleted allocations for instance 8422017b-c868-4ba2-ab1f-61d3668ca145
Oct 11 02:55:52 compute-0 sudo[480019]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:55:52 compute-0 sudo[480019]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:55:52 compute-0 sudo[480019]: pam_unix(sudo:session): session closed for user root
Oct 11 02:55:52 compute-0 nova_compute[356901]: 2025-10-11 02:55:52.256 2 DEBUG oslo_concurrency.lockutils [None req-56bad3c2-5b53-4268-9fc0-06c2e5606e1d f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "8422017b-c868-4ba2-ab1f-61d3668ca145" "released" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: held 2.905s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:55:53 compute-0 ceph-mon[191930]: pgmap v2348: 321 pgs: 321 active+clean; 248 MiB data, 397 MiB used, 60 GiB / 60 GiB avail; 9.2 KiB/s rd, 852 B/s wr, 13 op/s
Oct 11 02:55:53 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:55:53 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:55:53 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/615181269' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:55:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2349: 321 pgs: 321 active+clean; 239 MiB data, 394 MiB used, 60 GiB / 60 GiB avail; 18 KiB/s rd, 1.2 KiB/s wr, 25 op/s
Oct 11 02:55:54 compute-0 nova_compute[356901]: 2025-10-11 02:55:54.631 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:54.889 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:54.890 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:55:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:54.892 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:55:55 compute-0 ceph-mon[191930]: pgmap v2349: 321 pgs: 321 active+clean; 239 MiB data, 394 MiB used, 60 GiB / 60 GiB avail; 18 KiB/s rd, 1.2 KiB/s wr, 25 op/s
Oct 11 02:55:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2350: 321 pgs: 321 active+clean; 218 MiB data, 382 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 1.2 KiB/s wr, 27 op/s
Oct 11 02:55:55 compute-0 nova_compute[356901]: 2025-10-11 02:55:55.675 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:56 compute-0 podman[480044]: 2025-10-11 02:55:56.25945623 +0000 UTC m=+0.137038382 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, container_name=multipathd, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_id=multipathd, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']})
Oct 11 02:55:56 compute-0 podman[480045]: 2025-10-11 02:55:56.26199811 +0000 UTC m=+0.136986458 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=iscsid, container_name=iscsid, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']})
Oct 11 02:55:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:55:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:55:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:55:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:55:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:55:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:55:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:55:56
Oct 11 02:55:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:55:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:55:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.meta', 'cephfs.cephfs.meta', 'vms', 'images', 'cephfs.cephfs.data', '.rgw.root', 'default.rgw.control', 'default.rgw.log', '.mgr', 'backups', 'volumes']
Oct 11 02:55:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:55:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:55:56 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:55:56.913 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '17'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:55:57 compute-0 ceph-mon[191930]: pgmap v2350: 321 pgs: 321 active+clean; 218 MiB data, 382 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 1.2 KiB/s wr, 27 op/s
Oct 11 02:55:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2351: 321 pgs: 321 active+clean; 218 MiB data, 382 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 1.2 KiB/s wr, 27 op/s
Oct 11 02:55:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:55:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:55:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:55:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:55:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:55:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:55:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:55:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:55:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:55:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:55:59 compute-0 ceph-mon[191930]: pgmap v2351: 321 pgs: 321 active+clean; 218 MiB data, 382 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 1.2 KiB/s wr, 27 op/s
Oct 11 02:55:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2352: 321 pgs: 321 active+clean; 218 MiB data, 382 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 1.2 KiB/s wr, 27 op/s
Oct 11 02:55:59 compute-0 nova_compute[356901]: 2025-10-11 02:55:59.635 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:55:59 compute-0 podman[157119]: time="2025-10-11T02:55:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:55:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:55:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47501 "" "Go-http-client/1.1"
Oct 11 02:55:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:55:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9561 "" "Go-http-client/1.1"
Oct 11 02:56:00 compute-0 nova_compute[356901]: 2025-10-11 02:56:00.679 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:01 compute-0 ceph-mon[191930]: pgmap v2352: 321 pgs: 321 active+clean; 218 MiB data, 382 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 1.2 KiB/s wr, 27 op/s
Oct 11 02:56:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2353: 321 pgs: 321 active+clean; 218 MiB data, 382 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 1.2 KiB/s wr, 27 op/s
Oct 11 02:56:01 compute-0 openstack_network_exporter[374316]: ERROR   02:56:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:56:01 compute-0 openstack_network_exporter[374316]: ERROR   02:56:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:56:01 compute-0 openstack_network_exporter[374316]: ERROR   02:56:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:56:01 compute-0 openstack_network_exporter[374316]: ERROR   02:56:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:56:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:56:01 compute-0 openstack_network_exporter[374316]: ERROR   02:56:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:56:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:56:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.171 2 DEBUG oslo_concurrency.lockutils [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquiring lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.172 2 DEBUG oslo_concurrency.lockutils [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f" acquired by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.173 2 DEBUG oslo_concurrency.lockutils [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquiring lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f-events" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.174 2 DEBUG oslo_concurrency.lockutils [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f-events" acquired by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.175 2 DEBUG oslo_concurrency.lockutils [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f-events" "released" by "nova.compute.manager.InstanceEvents.clear_events_for_instance.<locals>._clear_events" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.177 2 INFO nova.compute.manager [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Terminating instance
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.179 2 DEBUG nova.compute.manager [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Start destroying the instance on the hypervisor. _shutdown_instance /usr/lib/python3.9/site-packages/nova/compute/manager.py:3120
Oct 11 02:56:02 compute-0 kernel: tap0c37c119-66 (unregistering): left promiscuous mode
Oct 11 02:56:02 compute-0 NetworkManager[44908]: <info>  [1760151362.3128] device (tap0c37c119-66): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')
Oct 11 02:56:02 compute-0 ovn_controller[88370]: 2025-10-11T02:56:02Z|00191|binding|INFO|Releasing lport 0c37c119-6647-42bb-a22f-ca741242ef30 from this chassis (sb_readonly=0)
Oct 11 02:56:02 compute-0 ovn_controller[88370]: 2025-10-11T02:56:02Z|00192|binding|INFO|Setting lport 0c37c119-6647-42bb-a22f-ca741242ef30 down in Southbound
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.338 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:02 compute-0 ovn_controller[88370]: 2025-10-11T02:56:02Z|00193|binding|INFO|Removing iface tap0c37c119-66 ovn-installed in OVS
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.343 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.373 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:02 compute-0 systemd[1]: machine-qemu\x2d16\x2dinstance\x2d0000000f.scope: Deactivated successfully.
Oct 11 02:56:02 compute-0 systemd[1]: machine-qemu\x2d16\x2dinstance\x2d0000000f.scope: Consumed 6min 48.912s CPU time.
Oct 11 02:56:02 compute-0 systemd-machined[137586]: Machine qemu-16-instance-0000000f terminated.
Oct 11 02:56:02 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:56:02.437 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: PortBindingUpdatedEvent(events=('update',), table='Port_Binding', conditions=None, old_conditions=None), priority=20 to row=Port_Binding(mac=['fa:16:3e:ee:94:7e 10.100.2.253'], port_security=['fa:16:3e:ee:94:7e 10.100.2.253'], type=, nat_addresses=[], virtual_parent=[], up=[False], options={'requested-chassis': 'compute-0.ctlplane.example.com'}, parent_port=[], requested_additional_chassis=[], ha_chassis_group=[], external_ids={'neutron:cidrs': '10.100.2.253/16', 'neutron:device_id': 'f98d09d7-6aa0-4405-bfa0-be1f78d3911f', 'neutron:device_owner': 'compute:nova', 'neutron:mtu': '', 'neutron:network_name': 'neutron-3563b4a1-477a-44a0-b01f-7d19d49c0308', 'neutron:port_capabilities': '', 'neutron:port_name': '', 'neutron:project_id': 'a05bbc8f872d4dd99972d2cb8136d608', 'neutron:revision_number': '4', 'neutron:security_group_ids': 'd961c453-0bcb-43ec-b528-5018786739ee', 'neutron:subnet_pool_addr_scope4': '', 'neutron:subnet_pool_addr_scope6': '', 'neutron:vnic_type': 'normal', 'neutron:host_id': 'compute-0.ctlplane.example.com'}, additional_chassis=[], tag=[], additional_encap=[], encap=[], mirror_rules=[], datapath=4674209d-30ab-42f4-9114-728458c302a8, chassis=[], tunnel_key=3, gateway_chassis=[], requested_chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>], logical_port=0c37c119-6647-42bb-a22f-ca741242ef30) old=Port_Binding(up=[True], chassis=[<ovs.db.idl.Row object at 0x7f7f71082fd0>]) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 02:56:02 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:56:02.440 286362 INFO neutron.agent.ovn.metadata.agent [-] Port 0c37c119-6647-42bb-a22f-ca741242ef30 in datapath 3563b4a1-477a-44a0-b01f-7d19d49c0308 unbound from our chassis
Oct 11 02:56:02 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:56:02.442 286362 DEBUG neutron.agent.ovn.metadata.agent [-] No valid VIF ports were found for network 3563b4a1-477a-44a0-b01f-7d19d49c0308, tearing the namespace down if needed _get_provision_params /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:628
Oct 11 02:56:02 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:56:02.445 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[b4cea401-e25e-4534-9bf6-c7f6ed0546d4]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:56:02 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:56:02.446 286362 INFO neutron.agent.ovn.metadata.agent [-] Cleaning up ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308 namespace which is not needed anymore
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.623 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.637 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.644 2 INFO nova.virt.libvirt.driver [-] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Instance destroyed successfully.
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.646 2 DEBUG nova.objects.instance [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lazy-loading 'resources' on Instance uuid f98d09d7-6aa0-4405-bfa0-be1f78d3911f obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.685 2 DEBUG nova.virt.libvirt.vif [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] vif_type=ovs instance=Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=False,availability_zone='nova',cell_name=None,cleaned=False,config_drive='True',created_at=2025-10-11T02:45:51Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,device_metadata=<?>,disable_terminate=False,display_description=None,display_name='te-0512306-asg-am4iabdjybzp-yj44h76hdzhi-bejrsw3xgi4q',ec2_ids=<?>,ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,flavor=Flavor(3),hidden=False,host='compute-0.ctlplane.example.com',hostname='te-0512306-asg-am4iabdjybzp-yj44h76hdzhi-bejrsw3xgi4q',id=15,image_ref='2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c',info_cache=InstanceInfoCache,instance_type_id=3,kernel_id='',key_data=None,key_name=None,keypairs=<?>,launch_index=0,launched_at=2025-10-11T02:46:03Z,launched_on='compute-0.ctlplane.example.com',locked=False,locked_by=None,memory_mb=128,metadata={metering.server_group='44c4fdb3-6cdb-42b8-903d-5a2c79f0da20'},migration_context=<?>,new_flavor=None,node='compute-0.ctlplane.example.com',numa_topology=None,old_flavor=None,os_type=None,pci_devices=<?>,pci_requests=<?>,power_state=1,progress=0,project_id='a05bbc8f872d4dd99972d2cb8136d608',ramdisk_id='',reservation_id='r-15xf1e0g',resources=None,root_device_name='/dev/vda',root_gb=1,security_groups=SecurityGroupList,services=<?>,shutdown_terminate=False,system_metadata={boot_roles='member,reader',image_base_image_ref='2d5a0a43-33dc-47bc-aa6f-9d076f91ec6c',image_container_format='bare',image_disk_format='qcow2',image_hw_cdrom_bus='sata',image_hw_disk_bus='virtio',image_hw_input_bus='usb',image_hw_machine_type='q35',image_hw_pointer_model='usbtablet',image_hw_video_model='virtio',image_hw_vif_model='virtio',image_min_disk='1',image_min_ram='0',owner_project_name='tempest-PrometheusGabbiTest-674022988',owner_user_name='tempest-PrometheusGabbiTest-674022988-project-member'},tags=<?>,task_state='deleting',terminated_at=None,trusted_certs=<?>,updated_at=2025-10-11T02:46:03Z,user_data='IyEvYmluL3NoCmVjaG8gJ0xvYWRpbmcgQ1BVJwpzZXQgLXYKY2F0IC9kZXYvdXJhbmRvbSA+IC9kZXYvbnVsbCAmIHNsZWVwIDMwMCA7IGtpbGwgJCEgCg==',user_id='f66a606299944d53a40f21e81c791d70',uuid=f98d09d7-6aa0-4405-bfa0-be1f78d3911f,vcpu_model=<?>,vcpus=1,vm_mode=None,vm_state='active') vif={"id": "0c37c119-6647-42bb-a22f-ca741242ef30", "address": "fa:16:3e:ee:94:7e", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.2.253", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap0c37c119-66", "ovs_interfaceid": "0c37c119-6647-42bb-a22f-ca741242ef30", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} unplug /usr/lib/python3.9/site-packages/nova/virt/libvirt/vif.py:828
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.687 2 DEBUG nova.network.os_vif_util [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Converting VIF {"id": "0c37c119-6647-42bb-a22f-ca741242ef30", "address": "fa:16:3e:ee:94:7e", "network": {"id": "3563b4a1-477a-44a0-b01f-7d19d49c0308", "bridge": "br-int", "label": "", "subnets": [{"cidr": "10.100.0.0/16", "dns": [], "gateway": {"address": "10.100.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "10.100.2.253", "type": "fixed", "version": 4, "meta": {}, "floating_ips": []}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "a05bbc8f872d4dd99972d2cb8136d608", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap0c37c119-66", "ovs_interfaceid": "0c37c119-6647-42bb-a22f-ca741242ef30", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}} nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:511
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.690 2 DEBUG nova.network.os_vif_util [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Converted object VIFOpenVSwitch(active=True,address=fa:16:3e:ee:94:7e,bridge_name='br-int',has_traffic_filtering=True,id=0c37c119-6647-42bb-a22f-ca741242ef30,network=Network(3563b4a1-477a-44a0-b01f-7d19d49c0308),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap0c37c119-66') nova_to_osvif_vif /usr/lib/python3.9/site-packages/nova/network/os_vif_util.py:548
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.692 2 DEBUG os_vif [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Unplugging vif VIFOpenVSwitch(active=True,address=fa:16:3e:ee:94:7e,bridge_name='br-int',has_traffic_filtering=True,id=0c37c119-6647-42bb-a22f-ca741242ef30,network=Network(3563b4a1-477a-44a0-b01f-7d19d49c0308),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap0c37c119-66') unplug /usr/lib/python3.9/site-packages/os_vif/__init__.py:109
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.702 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 20 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.704 2 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tap0c37c119-66, bridge=br-int, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.707 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.711 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 0-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.714 2 INFO os_vif [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Successfully unplugged vif VIFOpenVSwitch(active=True,address=fa:16:3e:ee:94:7e,bridge_name='br-int',has_traffic_filtering=True,id=0c37c119-6647-42bb-a22f-ca741242ef30,network=Network(3563b4a1-477a-44a0-b01f-7d19d49c0308),plugin='ovs',port_profile=VIFPortProfileOpenVSwitch,preserve_on_delete=False,vif_name='tap0c37c119-66')
Oct 11 02:56:02 compute-0 neutron-haproxy-ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308[461211]: [NOTICE]   (461218) : haproxy version is 2.8.14-c23fe91
Oct 11 02:56:02 compute-0 neutron-haproxy-ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308[461211]: [NOTICE]   (461218) : path to executable is /usr/sbin/haproxy
Oct 11 02:56:02 compute-0 neutron-haproxy-ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308[461211]: [WARNING]  (461218) : Exiting Master process...
Oct 11 02:56:02 compute-0 neutron-haproxy-ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308[461211]: [ALERT]    (461218) : Current worker (461220) exited with code 143 (Terminated)
Oct 11 02:56:02 compute-0 neutron-haproxy-ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308[461211]: [WARNING]  (461218) : All workers exited. Exiting... (0)
Oct 11 02:56:02 compute-0 systemd[1]: libpod-85289bc97b2aceb105bba40f2058284d6368e766d08e49b3131f65d5f6797ebc.scope: Deactivated successfully.
Oct 11 02:56:02 compute-0 podman[480111]: 2025-10-11 02:56:02.753024334 +0000 UTC m=+0.105631477 container died 85289bc97b2aceb105bba40f2058284d6368e766d08e49b3131f65d5f6797ebc (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.779 2 DEBUG nova.compute.manager [req-90a1ef4e-76d7-482b-8d6d-e33ee413b1a7 req-dfe8d3e2-5396-4e27-8ed2-520f08e36d0b 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Received event network-vif-unplugged-0c37c119-6647-42bb-a22f-ca741242ef30 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.780 2 DEBUG oslo_concurrency.lockutils [req-90a1ef4e-76d7-482b-8d6d-e33ee413b1a7 req-dfe8d3e2-5396-4e27-8ed2-520f08e36d0b 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.780 2 DEBUG oslo_concurrency.lockutils [req-90a1ef4e-76d7-482b-8d6d-e33ee413b1a7 req-dfe8d3e2-5396-4e27-8ed2-520f08e36d0b 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.781 2 DEBUG oslo_concurrency.lockutils [req-90a1ef4e-76d7-482b-8d6d-e33ee413b1a7 req-dfe8d3e2-5396-4e27-8ed2-520f08e36d0b 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.781 2 DEBUG nova.compute.manager [req-90a1ef4e-76d7-482b-8d6d-e33ee413b1a7 req-dfe8d3e2-5396-4e27-8ed2-520f08e36d0b 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] No waiting events found dispatching network-vif-unplugged-0c37c119-6647-42bb-a22f-ca741242ef30 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.781 2 DEBUG nova.compute.manager [req-90a1ef4e-76d7-482b-8d6d-e33ee413b1a7 req-dfe8d3e2-5396-4e27-8ed2-520f08e36d0b 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Received event network-vif-unplugged-0c37c119-6647-42bb-a22f-ca741242ef30 for instance with task_state deleting. _process_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:10826
Oct 11 02:56:02 compute-0 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-85289bc97b2aceb105bba40f2058284d6368e766d08e49b3131f65d5f6797ebc-userdata-shm.mount: Deactivated successfully.
Oct 11 02:56:02 compute-0 systemd[1]: var-lib-containers-storage-overlay-140f2c131d4939ae634803745ebdaf1b53146f7b51faed524507f0a6cf3bea3d-merged.mount: Deactivated successfully.
Oct 11 02:56:02 compute-0 podman[480111]: 2025-10-11 02:56:02.832508942 +0000 UTC m=+0.185116086 container cleanup 85289bc97b2aceb105bba40f2058284d6368e766d08e49b3131f65d5f6797ebc (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.schema-version=1.0)
Oct 11 02:56:02 compute-0 systemd[1]: libpod-conmon-85289bc97b2aceb105bba40f2058284d6368e766d08e49b3131f65d5f6797ebc.scope: Deactivated successfully.
Oct 11 02:56:02 compute-0 podman[480165]: 2025-10-11 02:56:02.960051402 +0000 UTC m=+0.078953641 container remove 85289bc97b2aceb105bba40f2058284d6368e766d08e49b3131f65d5f6797ebc (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:56:02 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:56:02.971 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[6da6bb5f-5e90-4ca8-b187-529fa870ce43]: (4, ('Sat Oct 11 02:56:02 AM UTC 2025 Stopping container neutron-haproxy-ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308 (85289bc97b2aceb105bba40f2058284d6368e766d08e49b3131f65d5f6797ebc)\n85289bc97b2aceb105bba40f2058284d6368e766d08e49b3131f65d5f6797ebc\nSat Oct 11 02:56:02 AM UTC 2025 Deleting container neutron-haproxy-ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308 (85289bc97b2aceb105bba40f2058284d6368e766d08e49b3131f65d5f6797ebc)\n85289bc97b2aceb105bba40f2058284d6368e766d08e49b3131f65d5f6797ebc\n', '', 0)) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:56:02 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:56:02.976 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[eaaf34ee-bc74-4fb7-8328-236e78df15c1]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:56:02 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:56:02.979 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DelPortCommand(_result=None, port=tap3563b4a1-40, bridge=None, if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 02:56:02 compute-0 nova_compute[356901]: 2025-10-11 02:56:02.981 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:03 compute-0 nova_compute[356901]: 2025-10-11 02:56:03.008 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:03 compute-0 kernel: tap3563b4a1-40: left promiscuous mode
Oct 11 02:56:03 compute-0 nova_compute[356901]: 2025-10-11 02:56:03.012 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:03 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:56:03.018 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[4fcb63be-b949-4c90-83a0-5e809b3eb432]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:56:03 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:56:03.036 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[ccccd64e-c184-4878-9b21-4c60d9131f19]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:56:03 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:56:03.037 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[4cfe42cb-ef99-4f30-8521-92c6ce464f73]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:56:03 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:56:03.065 422955 DEBUG oslo.privsep.daemon [-] privsep: reply[343c0816-8de7-4c65-9f72-3c8cefc377ff]: (4, [{'family': 0, '__align': (), 'ifi_type': 772, 'index': 1, 'flags': 65609, 'change': 0, 'attrs': [['IFLA_IFNAME', 'lo'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UNKNOWN'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 65536], ['IFLA_MIN_MTU', 0], ['IFLA_MAX_MTU', 0], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 1], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 1], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 0], ['IFLA_CARRIER_UP_COUNT', 0], ['IFLA_CARRIER_DOWN_COUNT', 0], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', '00:00:00:00:00:00'], ['IFLA_BROADCAST', '00:00:00:00:00:00'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 1, 'nopolicy': 1, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 694218, 'reachable_time': 36967, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 65536, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 4294967295, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 4294967295, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 0, 'inoctets': 0, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 0, 'outoctets': 0, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 0, 'outmcastpkts': 0, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 0, 'outmcastoctets': 0, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 0, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 0, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1404, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 480179, 'error': None, 'target': 'ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:56:03 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:56:03.068 286647 DEBUG neutron.privileged.agent.linux.ip_lib [-] Namespace ovnmeta-3563b4a1-477a-44a0-b01f-7d19d49c0308 deleted. remove_netns /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:607
Oct 11 02:56:03 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:56:03.068 286647 DEBUG oslo.privsep.daemon [-] privsep: reply[f73e1dcd-4b19-47ca-8702-8d8402e6d438]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501
Oct 11 02:56:03 compute-0 systemd[1]: run-netns-ovnmeta\x2d3563b4a1\x2d477a\x2d44a0\x2db01f\x2d7d19d49c0308.mount: Deactivated successfully.
Oct 11 02:56:03 compute-0 ceph-mon[191930]: pgmap v2353: 321 pgs: 321 active+clean; 218 MiB data, 382 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 1.2 KiB/s wr, 27 op/s
Oct 11 02:56:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2354: 321 pgs: 321 active+clean; 218 MiB data, 382 MiB used, 60 GiB / 60 GiB avail; 10 KiB/s rd, 341 B/s wr, 14 op/s
Oct 11 02:56:03 compute-0 nova_compute[356901]: 2025-10-11 02:56:03.471 2 INFO nova.virt.libvirt.driver [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Deleting instance files /var/lib/nova/instances/f98d09d7-6aa0-4405-bfa0-be1f78d3911f_del
Oct 11 02:56:03 compute-0 nova_compute[356901]: 2025-10-11 02:56:03.472 2 INFO nova.virt.libvirt.driver [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Deletion of /var/lib/nova/instances/f98d09d7-6aa0-4405-bfa0-be1f78d3911f_del complete
Oct 11 02:56:03 compute-0 nova_compute[356901]: 2025-10-11 02:56:03.628 2 INFO nova.compute.manager [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Took 1.45 seconds to destroy the instance on the hypervisor.
Oct 11 02:56:03 compute-0 nova_compute[356901]: 2025-10-11 02:56:03.629 2 DEBUG oslo.service.loopingcall [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Waiting for function nova.compute.manager.ComputeManager._try_deallocate_network.<locals>._deallocate_network_with_retries to return. func /usr/lib/python3.9/site-packages/oslo_service/loopingcall.py:435
Oct 11 02:56:03 compute-0 nova_compute[356901]: 2025-10-11 02:56:03.630 2 DEBUG nova.compute.manager [-] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Deallocating network for instance _deallocate_network /usr/lib/python3.9/site-packages/nova/compute/manager.py:2259
Oct 11 02:56:03 compute-0 nova_compute[356901]: 2025-10-11 02:56:03.631 2 DEBUG nova.network.neutron [-] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] deallocate_for_instance() deallocate_for_instance /usr/lib/python3.9/site-packages/nova/network/neutron.py:1803
Oct 11 02:56:04 compute-0 nova_compute[356901]: 2025-10-11 02:56:04.601 2 DEBUG nova.virt.driver [-] Emitting event <LifecycleEvent: 1760151349.5989177, 8422017b-c868-4ba2-ab1f-61d3668ca145 => Stopped> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:56:04 compute-0 nova_compute[356901]: 2025-10-11 02:56:04.602 2 INFO nova.compute.manager [-] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] VM Stopped (Lifecycle Event)
Oct 11 02:56:04 compute-0 nova_compute[356901]: 2025-10-11 02:56:04.701 2 DEBUG nova.compute.manager [None req-3196b157-a978-437d-af4b-5cbf44bfbb28 - - - - - -] [instance: 8422017b-c868-4ba2-ab1f-61d3668ca145] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:56:04 compute-0 nova_compute[356901]: 2025-10-11 02:56:04.965 2 DEBUG nova.compute.manager [req-db3f7021-2dab-4a5a-a4c3-ad0712db11bf req-4c629031-2c2f-4636-92b9-f668b096e63d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Received event network-vif-plugged-0c37c119-6647-42bb-a22f-ca741242ef30 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:56:04 compute-0 nova_compute[356901]: 2025-10-11 02:56:04.965 2 DEBUG oslo_concurrency.lockutils [req-db3f7021-2dab-4a5a-a4c3-ad0712db11bf req-4c629031-2c2f-4636-92b9-f668b096e63d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Acquiring lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f-events" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:56:04 compute-0 nova_compute[356901]: 2025-10-11 02:56:04.966 2 DEBUG oslo_concurrency.lockutils [req-db3f7021-2dab-4a5a-a4c3-ad0712db11bf req-4c629031-2c2f-4636-92b9-f668b096e63d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f-events" acquired by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:56:04 compute-0 nova_compute[356901]: 2025-10-11 02:56:04.966 2 DEBUG oslo_concurrency.lockutils [req-db3f7021-2dab-4a5a-a4c3-ad0712db11bf req-4c629031-2c2f-4636-92b9-f668b096e63d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] Lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f-events" "released" by "nova.compute.manager.InstanceEvents.pop_instance_event.<locals>._pop_event" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:56:04 compute-0 nova_compute[356901]: 2025-10-11 02:56:04.966 2 DEBUG nova.compute.manager [req-db3f7021-2dab-4a5a-a4c3-ad0712db11bf req-4c629031-2c2f-4636-92b9-f668b096e63d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] No waiting events found dispatching network-vif-plugged-0c37c119-6647-42bb-a22f-ca741242ef30 pop_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:320
Oct 11 02:56:04 compute-0 nova_compute[356901]: 2025-10-11 02:56:04.966 2 WARNING nova.compute.manager [req-db3f7021-2dab-4a5a-a4c3-ad0712db11bf req-4c629031-2c2f-4636-92b9-f668b096e63d 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Received unexpected event network-vif-plugged-0c37c119-6647-42bb-a22f-ca741242ef30 for instance with vm_state active and task_state deleting.
Oct 11 02:56:05 compute-0 nova_compute[356901]: 2025-10-11 02:56:05.060 2 DEBUG nova.network.neutron [-] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Updating instance_info_cache with network_info: [] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:56:05 compute-0 ceph-mon[191930]: pgmap v2354: 321 pgs: 321 active+clean; 218 MiB data, 382 MiB used, 60 GiB / 60 GiB avail; 10 KiB/s rd, 341 B/s wr, 14 op/s
Oct 11 02:56:05 compute-0 nova_compute[356901]: 2025-10-11 02:56:05.168 2 INFO nova.compute.manager [-] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Took 1.54 seconds to deallocate network for instance.
Oct 11 02:56:05 compute-0 podman[480183]: 2025-10-11 02:56:05.233758814 +0000 UTC m=+0.104543307 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:56:05 compute-0 podman[480181]: 2025-10-11 02:56:05.238525197 +0000 UTC m=+0.130605247 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_id=edpm, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3)
Oct 11 02:56:05 compute-0 podman[480182]: 2025-10-11 02:56:05.242472459 +0000 UTC m=+0.129681958 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., release=1755695350, build-date=2025-08-20T13:12:41, vcs-type=git, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, url=https://catalog.redhat.com/en/search?searchType=containers, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vendor=Red Hat, Inc., architecture=x86_64, io.buildah.version=1.33.7, io.openshift.expose-services=, container_name=openstack_network_exporter, distribution-scope=public, config_id=edpm, managed_by=edpm_ansible, com.redhat.component=ubi9-minimal-container, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.openshift.tags=minimal rhel9, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, version=9.6, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']})
Oct 11 02:56:05 compute-0 nova_compute[356901]: 2025-10-11 02:56:05.262 2 DEBUG oslo_concurrency.lockutils [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.update_usage" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:56:05 compute-0 nova_compute[356901]: 2025-10-11 02:56:05.262 2 DEBUG oslo_concurrency.lockutils [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:56:05 compute-0 nova_compute[356901]: 2025-10-11 02:56:05.340 2 DEBUG oslo_concurrency.processutils [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:56:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2355: 321 pgs: 321 active+clean; 203 MiB data, 373 MiB used, 60 GiB / 60 GiB avail; 15 KiB/s rd, 0 B/s wr, 20 op/s
Oct 11 02:56:05 compute-0 nova_compute[356901]: 2025-10-11 02:56:05.683 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:56:05 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/861215492' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:56:05 compute-0 nova_compute[356901]: 2025-10-11 02:56:05.847 2 DEBUG oslo_concurrency.processutils [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.506s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:56:05 compute-0 nova_compute[356901]: 2025-10-11 02:56:05.859 2 DEBUG nova.compute.provider_tree [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:56:05 compute-0 nova_compute[356901]: 2025-10-11 02:56:05.888 2 DEBUG nova.scheduler.client.report [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:56:05 compute-0 nova_compute[356901]: 2025-10-11 02:56:05.951 2 DEBUG oslo_concurrency.lockutils [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.update_usage" :: held 0.688s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:56:05 compute-0 nova_compute[356901]: 2025-10-11 02:56:05.991 2 INFO nova.scheduler.client.report [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Deleted allocations for instance f98d09d7-6aa0-4405-bfa0-be1f78d3911f
Oct 11 02:56:06 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/861215492' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:56:06 compute-0 nova_compute[356901]: 2025-10-11 02:56:06.149 2 DEBUG oslo_concurrency.lockutils [None req-a21f06ca-d390-45a1-9717-530a6ef3d9da f66a606299944d53a40f21e81c791d70 a05bbc8f872d4dd99972d2cb8136d608 - - default default] Lock "f98d09d7-6aa0-4405-bfa0-be1f78d3911f" "released" by "nova.compute.manager.ComputeManager.terminate_instance.<locals>.do_terminate_instance" :: held 3.976s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:56:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:56:07 compute-0 nova_compute[356901]: 2025-10-11 02:56:07.077 2 DEBUG nova.compute.manager [req-88f378e8-80bb-448f-bac2-56fb850a06c1 req-7cbf66b4-10ea-4191-9927-26cfa0ad2828 6b6159e942c54c138dca8080d5cc45bc d8a6f89a1d3d4ca192dfb06bf68e599e - - default default] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Received event network-vif-deleted-0c37c119-6647-42bb-a22f-ca741242ef30 external_instance_event /usr/lib/python3.9/site-packages/nova/compute/manager.py:11048
Oct 11 02:56:07 compute-0 ceph-mon[191930]: pgmap v2355: 321 pgs: 321 active+clean; 203 MiB data, 373 MiB used, 60 GiB / 60 GiB avail; 15 KiB/s rd, 0 B/s wr, 20 op/s
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2356: 321 pgs: 321 active+clean; 173 MiB data, 356 MiB used, 60 GiB / 60 GiB avail; 17 KiB/s rd, 852 B/s wr, 24 op/s
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0009279156085172885 of space, bias 1.0, pg target 0.27837468255518655 quantized to 32 (current 32)
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.00125203744627857 of space, bias 1.0, pg target 0.375611233883571 quantized to 32 (current 32)
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:56:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:56:07 compute-0 nova_compute[356901]: 2025-10-11 02:56:07.708 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:09 compute-0 ceph-mon[191930]: pgmap v2356: 321 pgs: 321 active+clean; 173 MiB data, 356 MiB used, 60 GiB / 60 GiB avail; 17 KiB/s rd, 852 B/s wr, 24 op/s
Oct 11 02:56:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2357: 321 pgs: 321 active+clean; 139 MiB data, 344 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 1.2 KiB/s wr, 27 op/s
Oct 11 02:56:10 compute-0 nova_compute[356901]: 2025-10-11 02:56:10.687 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:11 compute-0 ceph-mon[191930]: pgmap v2357: 321 pgs: 321 active+clean; 139 MiB data, 344 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 1.2 KiB/s wr, 27 op/s
Oct 11 02:56:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2358: 321 pgs: 321 active+clean; 139 MiB data, 344 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 1.2 KiB/s wr, 27 op/s
Oct 11 02:56:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:56:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e141 do_prune osdmap full prune enabled
Oct 11 02:56:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e142 e142: 3 total, 3 up, 3 in
Oct 11 02:56:12 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e142: 3 total, 3 up, 3 in
Oct 11 02:56:12 compute-0 nova_compute[356901]: 2025-10-11 02:56:12.714 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:13 compute-0 ceph-mon[191930]: pgmap v2358: 321 pgs: 321 active+clean; 139 MiB data, 344 MiB used, 60 GiB / 60 GiB avail; 19 KiB/s rd, 1.2 KiB/s wr, 27 op/s
Oct 11 02:56:13 compute-0 ceph-mon[191930]: osdmap e142: 3 total, 3 up, 3 in
Oct 11 02:56:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2360: 321 pgs: 321 active+clean; 134 MiB data, 329 MiB used, 60 GiB / 60 GiB avail; 22 KiB/s rd, 1.4 KiB/s wr, 32 op/s
Oct 11 02:56:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e142 do_prune osdmap full prune enabled
Oct 11 02:56:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e143 e143: 3 total, 3 up, 3 in
Oct 11 02:56:14 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e143: 3 total, 3 up, 3 in
Oct 11 02:56:14 compute-0 podman[480265]: 2025-10-11 02:56:14.263691649 +0000 UTC m=+0.144525569 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, version=9.4, architecture=x86_64, summary=Provides the latest release of Red Hat Universal Base Image 9., build-date=2024-09-18T21:23:30, io.openshift.expose-services=, io.openshift.tags=base rhel9, vcs-type=git, vendor=Red Hat, Inc., config_id=edpm, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, container_name=kepler, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, com.redhat.component=ubi9-container, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.buildah.version=1.29.0, maintainer=Red Hat, Inc., release=1214.1726694543, release-0.7.12=, io.k8s.display-name=Red Hat Universal Base Image 9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:56:15 compute-0 nova_compute[356901]: 2025-10-11 02:56:15.095 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:56:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e143 do_prune osdmap full prune enabled
Oct 11 02:56:15 compute-0 ceph-mon[191930]: pgmap v2360: 321 pgs: 321 active+clean; 134 MiB data, 329 MiB used, 60 GiB / 60 GiB avail; 22 KiB/s rd, 1.4 KiB/s wr, 32 op/s
Oct 11 02:56:15 compute-0 ceph-mon[191930]: osdmap e143: 3 total, 3 up, 3 in
Oct 11 02:56:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e144 e144: 3 total, 3 up, 3 in
Oct 11 02:56:15 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e144: 3 total, 3 up, 3 in
Oct 11 02:56:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2363: 321 pgs: 321 active+clean; 134 MiB data, 329 MiB used, 60 GiB / 60 GiB avail; 2.2 KiB/s rd, 682 B/s wr, 4 op/s
Oct 11 02:56:15 compute-0 nova_compute[356901]: 2025-10-11 02:56:15.692 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:16 compute-0 ceph-mon[191930]: osdmap e144: 3 total, 3 up, 3 in
Oct 11 02:56:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e144 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:56:17 compute-0 ceph-mon[191930]: pgmap v2363: 321 pgs: 321 active+clean; 134 MiB data, 329 MiB used, 60 GiB / 60 GiB avail; 2.2 KiB/s rd, 682 B/s wr, 4 op/s
Oct 11 02:56:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2364: 321 pgs: 321 active+clean; 118 MiB data, 317 MiB used, 60 GiB / 60 GiB avail; 33 KiB/s rd, 759 KiB/s wr, 46 op/s
Oct 11 02:56:17 compute-0 nova_compute[356901]: 2025-10-11 02:56:17.638 2 DEBUG nova.virt.driver [-] Emitting event <LifecycleEvent: 1760151362.635928, f98d09d7-6aa0-4405-bfa0-be1f78d3911f => Stopped> emit_event /usr/lib/python3.9/site-packages/nova/virt/driver.py:1653
Oct 11 02:56:17 compute-0 nova_compute[356901]: 2025-10-11 02:56:17.638 2 INFO nova.compute.manager [-] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] VM Stopped (Lifecycle Event)
Oct 11 02:56:17 compute-0 nova_compute[356901]: 2025-10-11 02:56:17.720 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:17 compute-0 nova_compute[356901]: 2025-10-11 02:56:17.736 2 DEBUG nova.compute.manager [None req-c11cbadf-279d-47c6-8f7f-fa7d21f18968 - - - - - -] [instance: f98d09d7-6aa0-4405-bfa0-be1f78d3911f] Checking state _get_power_state /usr/lib/python3.9/site-packages/nova/compute/manager.py:1762
Oct 11 02:56:17 compute-0 nova_compute[356901]: 2025-10-11 02:56:17.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:56:18 compute-0 podman[480285]: 2025-10-11 02:56:18.232005288 +0000 UTC m=+0.107928347 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:56:18 compute-0 podman[480291]: 2025-10-11 02:56:18.239935366 +0000 UTC m=+0.103367940 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:56:18 compute-0 ceph-mon[191930]: pgmap v2364: 321 pgs: 321 active+clean; 118 MiB data, 317 MiB used, 60 GiB / 60 GiB avail; 33 KiB/s rd, 759 KiB/s wr, 46 op/s
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #114. Immutable memtables: 0.
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:56:18.259675) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 67] Flushing memtable with next log file: 114
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151378259712, "job": 67, "event": "flush_started", "num_memtables": 1, "num_entries": 1072, "num_deletes": 251, "total_data_size": 1588587, "memory_usage": 1615488, "flush_reason": "Manual Compaction"}
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 67] Level-0 flush table #115: started
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151378273545, "cf_name": "default", "job": 67, "event": "table_file_creation", "file_number": 115, "file_size": 1562532, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 47547, "largest_seqno": 48618, "table_properties": {"data_size": 1557230, "index_size": 2762, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1477, "raw_key_size": 11296, "raw_average_key_size": 19, "raw_value_size": 1546614, "raw_average_value_size": 2718, "num_data_blocks": 124, "num_entries": 569, "num_filter_entries": 569, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760151277, "oldest_key_time": 1760151277, "file_creation_time": 1760151378, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 115, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 67] Flush lasted 13929 microseconds, and 5664 cpu microseconds.
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:56:18 compute-0 podman[480287]: 2025-10-11 02:56:18.274865373 +0000 UTC m=+0.139216855 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image)
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:56:18.273604) [db/flush_job.cc:967] [default] [JOB 67] Level-0 flush table #115: 1562532 bytes OK
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:56:18.273623) [db/memtable_list.cc:519] [default] Level-0 commit table #115 started
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:56:18.276479) [db/memtable_list.cc:722] [default] Level-0 commit table #115: memtable #1 done
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:56:18.276497) EVENT_LOG_v1 {"time_micros": 1760151378276491, "job": 67, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:56:18.276587) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 67] Try to delete WAL files size 1583580, prev total WAL file size 1583580, number of live WAL files 2.
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000111.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:56:18.277960) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F730034353138' seq:72057594037927935, type:22 .. '7061786F730034373730' seq:0, type:0; will stop at (end)
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 68] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 67 Base level 0, inputs: [115(1525KB)], [113(9368KB)]
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151378278084, "job": 68, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [115], "files_L6": [113], "score": -1, "input_data_size": 11156220, "oldest_snapshot_seqno": -1}
Oct 11 02:56:18 compute-0 podman[480286]: 2025-10-11 02:56:18.305033798 +0000 UTC m=+0.183375106 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller)
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 68] Generated table #116: 6240 keys, 9381564 bytes, temperature: kUnknown
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151378369269, "cf_name": "default", "job": 68, "event": "table_file_creation", "file_number": 116, "file_size": 9381564, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 9340358, "index_size": 24520, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 15621, "raw_key_size": 163144, "raw_average_key_size": 26, "raw_value_size": 9227864, "raw_average_value_size": 1478, "num_data_blocks": 975, "num_entries": 6240, "num_filter_entries": 6240, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760151378, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 116, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:56:18.369914) [db/compaction/compaction_job.cc:1663] [default] [JOB 68] Compacted 1@0 + 1@6 files to L6 => 9381564 bytes
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:56:18.372170) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 121.8 rd, 102.4 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(1.5, 9.1 +0.0 blob) out(8.9 +0.0 blob), read-write-amplify(13.1) write-amplify(6.0) OK, records in: 6758, records dropped: 518 output_compression: NoCompression
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:56:18.372196) EVENT_LOG_v1 {"time_micros": 1760151378372183, "job": 68, "event": "compaction_finished", "compaction_time_micros": 91575, "compaction_time_cpu_micros": 34746, "output_level": 6, "num_output_files": 1, "total_output_size": 9381564, "num_input_records": 6758, "num_output_records": 6240, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000115.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151378373973, "job": 68, "event": "table_file_deletion", "file_number": 115}
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000113.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151378377550, "job": 68, "event": "table_file_deletion", "file_number": 113}
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:56:18.277635) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:56:18.377686) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:56:18.377692) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:56:18.377695) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:56:18.377697) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:56:18 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:56:18.377700) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:56:18 compute-0 ovn_controller[88370]: 2025-10-11T02:56:18Z|00194|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:56:19 compute-0 nova_compute[356901]: 2025-10-11 02:56:19.005 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2365: 321 pgs: 321 active+clean; 118 MiB data, 317 MiB used, 60 GiB / 60 GiB avail; 69 KiB/s rd, 2.8 MiB/s wr, 96 op/s
Oct 11 02:56:20 compute-0 ceph-mon[191930]: pgmap v2365: 321 pgs: 321 active+clean; 118 MiB data, 317 MiB used, 60 GiB / 60 GiB avail; 69 KiB/s rd, 2.8 MiB/s wr, 96 op/s
Oct 11 02:56:20 compute-0 nova_compute[356901]: 2025-10-11 02:56:20.695 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:20 compute-0 nova_compute[356901]: 2025-10-11 02:56:20.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:56:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2366: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 61 KiB/s rd, 2.6 MiB/s wr, 85 op/s
Oct 11 02:56:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e144 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:56:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e144 do_prune osdmap full prune enabled
Oct 11 02:56:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 e145: 3 total, 3 up, 3 in
Oct 11 02:56:21 compute-0 ceph-mon[191930]: log_channel(cluster) log [DBG] : osdmap e145: 3 total, 3 up, 3 in
Oct 11 02:56:21 compute-0 nova_compute[356901]: 2025-10-11 02:56:21.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:56:22 compute-0 nova_compute[356901]: 2025-10-11 02:56:22.724 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:22 compute-0 ceph-mon[191930]: pgmap v2366: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 61 KiB/s rd, 2.6 MiB/s wr, 85 op/s
Oct 11 02:56:22 compute-0 ceph-mon[191930]: osdmap e145: 3 total, 3 up, 3 in
Oct 11 02:56:23 compute-0 ovn_controller[88370]: 2025-10-11T02:56:23Z|00195|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:56:23 compute-0 nova_compute[356901]: 2025-10-11 02:56:23.219 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2368: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 59 KiB/s rd, 2.5 MiB/s wr, 81 op/s
Oct 11 02:56:23 compute-0 nova_compute[356901]: 2025-10-11 02:56:23.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:56:23 compute-0 nova_compute[356901]: 2025-10-11 02:56:23.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:56:24 compute-0 ceph-mon[191930]: pgmap v2368: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 59 KiB/s rd, 2.5 MiB/s wr, 81 op/s
Oct 11 02:56:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2369: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 48 KiB/s rd, 2.0 MiB/s wr, 66 op/s
Oct 11 02:56:25 compute-0 nova_compute[356901]: 2025-10-11 02:56:25.698 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:56:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:56:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:56:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:56:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:56:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:56:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:56:26 compute-0 ceph-mon[191930]: pgmap v2369: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 48 KiB/s rd, 2.0 MiB/s wr, 66 op/s
Oct 11 02:56:27 compute-0 podman[480368]: 2025-10-11 02:56:27.239684586 +0000 UTC m=+0.119243195 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, container_name=multipathd, org.label-schema.schema-version=1.0, config_id=multipathd, managed_by=edpm_ansible)
Oct 11 02:56:27 compute-0 podman[480369]: 2025-10-11 02:56:27.260185754 +0000 UTC m=+0.134711470 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, container_name=iscsid, org.label-schema.vendor=CentOS)
Oct 11 02:56:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2370: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 30 KiB/s rd, 1.6 MiB/s wr, 41 op/s
Oct 11 02:56:27 compute-0 nova_compute[356901]: 2025-10-11 02:56:27.730 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:56:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1934651986' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:56:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:56:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1934651986' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:56:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1934651986' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:56:27 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1934651986' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:56:28 compute-0 ceph-mon[191930]: pgmap v2370: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 30 KiB/s rd, 1.6 MiB/s wr, 41 op/s
Oct 11 02:56:28 compute-0 nova_compute[356901]: 2025-10-11 02:56:28.893 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:56:28 compute-0 nova_compute[356901]: 2025-10-11 02:56:28.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:56:28 compute-0 nova_compute[356901]: 2025-10-11 02:56:28.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:56:28 compute-0 nova_compute[356901]: 2025-10-11 02:56:28.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:56:29 compute-0 nova_compute[356901]: 2025-10-11 02:56:29.115 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:56:29 compute-0 nova_compute[356901]: 2025-10-11 02:56:29.116 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:56:29 compute-0 nova_compute[356901]: 2025-10-11 02:56:29.116 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:56:29 compute-0 nova_compute[356901]: 2025-10-11 02:56:29.117 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:56:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2371: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:29 compute-0 podman[157119]: time="2025-10-11T02:56:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:56:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:56:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:56:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:56:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9091 "" "Go-http-client/1.1"
Oct 11 02:56:30 compute-0 nova_compute[356901]: 2025-10-11 02:56:30.701 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:30 compute-0 ceph-mon[191930]: pgmap v2371: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:31 compute-0 ovn_controller[88370]: 2025-10-11T02:56:31Z|00196|binding|INFO|Releasing lport f0f8488b-423f-46a5-8a6a-984c2ae3438e from this chassis (sb_readonly=0)
Oct 11 02:56:31 compute-0 nova_compute[356901]: 2025-10-11 02:56:31.205 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2372: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:31 compute-0 openstack_network_exporter[374316]: ERROR   02:56:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:56:31 compute-0 openstack_network_exporter[374316]: ERROR   02:56:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:56:31 compute-0 openstack_network_exporter[374316]: ERROR   02:56:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:56:31 compute-0 openstack_network_exporter[374316]: ERROR   02:56:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:56:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:56:31 compute-0 openstack_network_exporter[374316]: ERROR   02:56:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:56:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:56:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:56:32 compute-0 nova_compute[356901]: 2025-10-11 02:56:32.004 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:56:32 compute-0 nova_compute[356901]: 2025-10-11 02:56:32.033 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:56:32 compute-0 nova_compute[356901]: 2025-10-11 02:56:32.034 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:56:32 compute-0 nova_compute[356901]: 2025-10-11 02:56:32.034 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:56:32 compute-0 nova_compute[356901]: 2025-10-11 02:56:32.735 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:32 compute-0 ceph-mon[191930]: pgmap v2372: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:32 compute-0 nova_compute[356901]: 2025-10-11 02:56:32.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:56:33 compute-0 nova_compute[356901]: 2025-10-11 02:56:33.005 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:56:33 compute-0 nova_compute[356901]: 2025-10-11 02:56:33.006 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:56:33 compute-0 nova_compute[356901]: 2025-10-11 02:56:33.007 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:56:33 compute-0 nova_compute[356901]: 2025-10-11 02:56:33.007 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:56:33 compute-0 nova_compute[356901]: 2025-10-11 02:56:33.008 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:56:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2373: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:56:33 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1505367567' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:56:33 compute-0 nova_compute[356901]: 2025-10-11 02:56:33.517 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.509s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:56:33 compute-0 nova_compute[356901]: 2025-10-11 02:56:33.610 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:56:33 compute-0 nova_compute[356901]: 2025-10-11 02:56:33.612 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:56:33 compute-0 nova_compute[356901]: 2025-10-11 02:56:33.613 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:56:33 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1505367567' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:56:33 compute-0 nova_compute[356901]: 2025-10-11 02:56:33.997 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:56:33 compute-0 nova_compute[356901]: 2025-10-11 02:56:33.999 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3656MB free_disk=59.955204010009766GB free_vcpus=7 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:56:33 compute-0 nova_compute[356901]: 2025-10-11 02:56:33.999 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:56:33 compute-0 nova_compute[356901]: 2025-10-11 02:56:33.999 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:56:34 compute-0 nova_compute[356901]: 2025-10-11 02:56:34.388 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:56:34 compute-0 nova_compute[356901]: 2025-10-11 02:56:34.388 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 1 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:56:34 compute-0 nova_compute[356901]: 2025-10-11 02:56:34.388 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1024MB phys_disk=59GB used_disk=2GB total_vcpus=8 used_vcpus=1 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:56:34 compute-0 nova_compute[356901]: 2025-10-11 02:56:34.521 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:56:34 compute-0 ceph-mon[191930]: pgmap v2373: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:56:34 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/487865444' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:56:34 compute-0 nova_compute[356901]: 2025-10-11 02:56:34.998 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.477s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:56:35 compute-0 nova_compute[356901]: 2025-10-11 02:56:35.009 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:56:35 compute-0 nova_compute[356901]: 2025-10-11 02:56:35.035 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:56:35 compute-0 nova_compute[356901]: 2025-10-11 02:56:35.146 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:56:35 compute-0 nova_compute[356901]: 2025-10-11 02:56:35.146 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 1.147s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:56:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2374: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:35 compute-0 nova_compute[356901]: 2025-10-11 02:56:35.706 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:35 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/487865444' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:56:36 compute-0 podman[480455]: 2025-10-11 02:56:36.23291934 +0000 UTC m=+0.125923871 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, managed_by=edpm_ansible, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, config_id=edpm, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:56:36 compute-0 podman[480457]: 2025-10-11 02:56:36.266465985 +0000 UTC m=+0.138974427 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 02:56:36 compute-0 podman[480456]: 2025-10-11 02:56:36.265953327 +0000 UTC m=+0.142885127 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, url=https://catalog.redhat.com/en/search?searchType=containers, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, com.redhat.component=ubi9-minimal-container, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., build-date=2025-08-20T13:12:41, io.buildah.version=1.33.7, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, name=ubi9-minimal, io.openshift.expose-services=, release=1755695350, vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, version=9.6, distribution-scope=public, vendor=Red Hat, Inc., architecture=x86_64, managed_by=edpm_ansible, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., config_id=edpm, container_name=openstack_network_exporter, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:56:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:56:36 compute-0 ceph-mon[191930]: pgmap v2374: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2375: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:37 compute-0 nova_compute[356901]: 2025-10-11 02:56:37.741 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:38 compute-0 ceph-mon[191930]: pgmap v2375: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2376: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:40 compute-0 nova_compute[356901]: 2025-10-11 02:56:40.708 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:40 compute-0 ceph-mon[191930]: pgmap v2376: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2377: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:56:42 compute-0 nova_compute[356901]: 2025-10-11 02:56:42.747 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:43 compute-0 ceph-mon[191930]: pgmap v2377: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2378: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:44 compute-0 podman[480517]: 2025-10-11 02:56:44.815498966 +0000 UTC m=+0.110641338 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.tags=base rhel9, name=ubi9, release-0.7.12=, build-date=2024-09-18T21:23:30, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.29.0, managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., distribution-scope=public, release=1214.1726694543, vcs-type=git, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., com.redhat.component=ubi9-container, container_name=kepler, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.expose-services=, version=9.4, config_id=edpm, maintainer=Red Hat, Inc., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, architecture=x86_64)
Oct 11 02:56:45 compute-0 ceph-mon[191930]: pgmap v2378: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2379: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:45 compute-0 nova_compute[356901]: 2025-10-11 02:56:45.713 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:56:47 compute-0 nova_compute[356901]: 2025-10-11 02:56:47.142 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:56:47 compute-0 ceph-mon[191930]: pgmap v2379: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2380: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:47 compute-0 nova_compute[356901]: 2025-10-11 02:56:47.753 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:49 compute-0 ceph-mon[191930]: pgmap v2380: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:49 compute-0 podman[480538]: 2025-10-11 02:56:49.236738207 +0000 UTC m=+0.108802972 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, config_id=edpm, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, io.buildah.version=1.41.4)
Oct 11 02:56:49 compute-0 podman[480539]: 2025-10-11 02:56:49.261637222 +0000 UTC m=+0.117861853 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, config_id=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:56:49 compute-0 podman[480536]: 2025-10-11 02:56:49.273907641 +0000 UTC m=+0.152012863 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 02:56:49 compute-0 podman[480537]: 2025-10-11 02:56:49.306731063 +0000 UTC m=+0.174715395 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller)
Oct 11 02:56:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2381: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:50 compute-0 nova_compute[356901]: 2025-10-11 02:56:50.717 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:51 compute-0 ceph-mon[191930]: pgmap v2381: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2382: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:56:52 compute-0 sudo[480622]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:56:52 compute-0 sudo[480622]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:56:52 compute-0 sudo[480622]: pam_unix(sudo:session): session closed for user root
Oct 11 02:56:52 compute-0 sudo[480647]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:56:52 compute-0 sudo[480647]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:56:52 compute-0 sudo[480647]: pam_unix(sudo:session): session closed for user root
Oct 11 02:56:52 compute-0 sudo[480672]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:56:52 compute-0 sudo[480672]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:56:52 compute-0 sudo[480672]: pam_unix(sudo:session): session closed for user root
Oct 11 02:56:52 compute-0 nova_compute[356901]: 2025-10-11 02:56:52.758 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:52 compute-0 sudo[480697]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:56:52 compute-0 sudo[480697]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:56:52 compute-0 nova_compute[356901]: 2025-10-11 02:56:52.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_incomplete_migrations run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:56:52 compute-0 nova_compute[356901]: 2025-10-11 02:56:52.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances with incomplete migration  _cleanup_incomplete_migrations /usr/lib/python3.9/site-packages/nova/compute/manager.py:11183
Oct 11 02:56:53 compute-0 ceph-mon[191930]: pgmap v2382: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:53 compute-0 sudo[480697]: pam_unix(sudo:session): session closed for user root
Oct 11 02:56:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2383: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:56:53 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:56:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:56:53 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:56:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:56:53 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:56:53 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev bcded37a-66ee-429e-a4a7-f34ebef0a22c does not exist
Oct 11 02:56:53 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev f8c3e295-33f9-4b4d-8028-430136a0d0f9 does not exist
Oct 11 02:56:53 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev dad76d8b-ef47-4708-8494-b5b54bc18996 does not exist
Oct 11 02:56:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:56:53 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:56:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:56:53 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:56:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:56:53 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:56:53 compute-0 sudo[480752]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:56:53 compute-0 sudo[480752]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:56:53 compute-0 sudo[480752]: pam_unix(sudo:session): session closed for user root
Oct 11 02:56:53 compute-0 sudo[480777]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:56:53 compute-0 sudo[480777]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:56:53 compute-0 sudo[480777]: pam_unix(sudo:session): session closed for user root
Oct 11 02:56:53 compute-0 sudo[480802]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:56:53 compute-0 sudo[480802]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:56:53 compute-0 sudo[480802]: pam_unix(sudo:session): session closed for user root
Oct 11 02:56:54 compute-0 sudo[480827]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:56:54 compute-0 sudo[480827]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:56:54 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:56:54 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:56:54 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:56:54 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:56:54 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:56:54 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:56:54 compute-0 podman[480892]: 2025-10-11 02:56:54.647082604 +0000 UTC m=+0.086760159 container create c8e233847638d1a6285e86f0d91a14c0703cd99e9bce4ef9da7da4512ec425c2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_mcnulty, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_REF=reef, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0)
Oct 11 02:56:54 compute-0 podman[480892]: 2025-10-11 02:56:54.610882412 +0000 UTC m=+0.050560027 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:56:54 compute-0 systemd[1]: Started libpod-conmon-c8e233847638d1a6285e86f0d91a14c0703cd99e9bce4ef9da7da4512ec425c2.scope.
Oct 11 02:56:54 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:56:54 compute-0 podman[480892]: 2025-10-11 02:56:54.783654953 +0000 UTC m=+0.223332548 container init c8e233847638d1a6285e86f0d91a14c0703cd99e9bce4ef9da7da4512ec425c2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_mcnulty, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.license=GPLv2)
Oct 11 02:56:54 compute-0 podman[480892]: 2025-10-11 02:56:54.803496453 +0000 UTC m=+0.243174008 container start c8e233847638d1a6285e86f0d91a14c0703cd99e9bce4ef9da7da4512ec425c2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_mcnulty, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:56:54 compute-0 podman[480892]: 2025-10-11 02:56:54.810353891 +0000 UTC m=+0.250031436 container attach c8e233847638d1a6285e86f0d91a14c0703cd99e9bce4ef9da7da4512ec425c2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_mcnulty, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:56:54 compute-0 systemd[1]: libpod-c8e233847638d1a6285e86f0d91a14c0703cd99e9bce4ef9da7da4512ec425c2.scope: Deactivated successfully.
Oct 11 02:56:54 compute-0 heuristic_mcnulty[480908]: 167 167
Oct 11 02:56:54 compute-0 podman[480892]: 2025-10-11 02:56:54.819633808 +0000 UTC m=+0.259311353 container died c8e233847638d1a6285e86f0d91a14c0703cd99e9bce4ef9da7da4512ec425c2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_mcnulty, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:56:54 compute-0 systemd[1]: var-lib-containers-storage-overlay-21b674b892477cb9bdcf1e02a272683d805fdaa2ee9ef851e7d2d07f7d9dc92f-merged.mount: Deactivated successfully.
Oct 11 02:56:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:56:54.890 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:56:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:56:54.892 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:56:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:56:54.893 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:56:54 compute-0 podman[480892]: 2025-10-11 02:56:54.896825867 +0000 UTC m=+0.336503392 container remove c8e233847638d1a6285e86f0d91a14c0703cd99e9bce4ef9da7da4512ec425c2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_mcnulty, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0)
Oct 11 02:56:54 compute-0 systemd[1]: libpod-conmon-c8e233847638d1a6285e86f0d91a14c0703cd99e9bce4ef9da7da4512ec425c2.scope: Deactivated successfully.
Oct 11 02:56:55 compute-0 ceph-mon[191930]: pgmap v2383: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:55 compute-0 podman[480930]: 2025-10-11 02:56:55.24564811 +0000 UTC m=+0.100055214 container create f4b4219d8141b28a96a00f413df67693a5f5466afbeb59cc1aac5737bb582f05 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_tesla, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:56:55 compute-0 podman[480930]: 2025-10-11 02:56:55.205633016 +0000 UTC m=+0.060040100 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:56:55 compute-0 systemd[1]: Started libpod-conmon-f4b4219d8141b28a96a00f413df67693a5f5466afbeb59cc1aac5737bb582f05.scope.
Oct 11 02:56:55 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:56:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b9a2c3394d5d44af081a9a0c3c3986f3ac440af1b03631600dc153b91efb1314/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:56:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b9a2c3394d5d44af081a9a0c3c3986f3ac440af1b03631600dc153b91efb1314/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:56:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b9a2c3394d5d44af081a9a0c3c3986f3ac440af1b03631600dc153b91efb1314/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:56:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b9a2c3394d5d44af081a9a0c3c3986f3ac440af1b03631600dc153b91efb1314/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:56:55 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b9a2c3394d5d44af081a9a0c3c3986f3ac440af1b03631600dc153b91efb1314/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:56:55 compute-0 podman[480930]: 2025-10-11 02:56:55.41085419 +0000 UTC m=+0.265261334 container init f4b4219d8141b28a96a00f413df67693a5f5466afbeb59cc1aac5737bb582f05 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_tesla, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 02:56:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2384: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:55 compute-0 podman[480930]: 2025-10-11 02:56:55.432985369 +0000 UTC m=+0.287392463 container start f4b4219d8141b28a96a00f413df67693a5f5466afbeb59cc1aac5737bb582f05 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_tesla, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2)
Oct 11 02:56:55 compute-0 podman[480930]: 2025-10-11 02:56:55.439005126 +0000 UTC m=+0.293412270 container attach f4b4219d8141b28a96a00f413df67693a5f5466afbeb59cc1aac5737bb582f05 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_tesla, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:56:55 compute-0 nova_compute[356901]: 2025-10-11 02:56:55.720 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:55 compute-0 nova_compute[356901]: 2025-10-11 02:56:55.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_expired_console_auth_tokens run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:56:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:56:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:56:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:56:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:56:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:56:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:56:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:56:56
Oct 11 02:56:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:56:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:56:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['vms', 'cephfs.cephfs.data', 'volumes', 'default.rgw.log', '.rgw.root', 'backups', 'default.rgw.control', 'default.rgw.meta', '.mgr', 'cephfs.cephfs.meta', 'images']
Oct 11 02:56:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:56:56 compute-0 lucid_tesla[480946]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:56:56 compute-0 lucid_tesla[480946]: --> relative data size: 1.0
Oct 11 02:56:56 compute-0 lucid_tesla[480946]: --> All data devices are unavailable
Oct 11 02:56:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:56:56 compute-0 systemd[1]: libpod-f4b4219d8141b28a96a00f413df67693a5f5466afbeb59cc1aac5737bb582f05.scope: Deactivated successfully.
Oct 11 02:56:56 compute-0 systemd[1]: libpod-f4b4219d8141b28a96a00f413df67693a5f5466afbeb59cc1aac5737bb582f05.scope: Consumed 1.285s CPU time.
Oct 11 02:56:56 compute-0 podman[480930]: 2025-10-11 02:56:56.801832062 +0000 UTC m=+1.656239136 container died f4b4219d8141b28a96a00f413df67693a5f5466afbeb59cc1aac5737bb582f05 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_tesla, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 02:56:56 compute-0 systemd[1]: var-lib-containers-storage-overlay-b9a2c3394d5d44af081a9a0c3c3986f3ac440af1b03631600dc153b91efb1314-merged.mount: Deactivated successfully.
Oct 11 02:56:56 compute-0 podman[480930]: 2025-10-11 02:56:56.897864687 +0000 UTC m=+1.752271781 container remove f4b4219d8141b28a96a00f413df67693a5f5466afbeb59cc1aac5737bb582f05 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_tesla, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, ceph=True, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:56:56 compute-0 systemd[1]: libpod-conmon-f4b4219d8141b28a96a00f413df67693a5f5466afbeb59cc1aac5737bb582f05.scope: Deactivated successfully.
Oct 11 02:56:56 compute-0 nova_compute[356901]: 2025-10-11 02:56:56.927 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._run_pending_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:56:56 compute-0 nova_compute[356901]: 2025-10-11 02:56:56.929 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11145
Oct 11 02:56:56 compute-0 nova_compute[356901]: 2025-10-11 02:56:56.946 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] There are 0 instances to clean _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11154
Oct 11 02:56:56 compute-0 sudo[480827]: pam_unix(sudo:session): session closed for user root
Oct 11 02:56:57 compute-0 sudo[480988]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:56:57 compute-0 sudo[480988]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:56:57 compute-0 sudo[480988]: pam_unix(sudo:session): session closed for user root
Oct 11 02:56:57 compute-0 sudo[481013]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:56:57 compute-0 sudo[481013]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:56:57 compute-0 sudo[481013]: pam_unix(sudo:session): session closed for user root
Oct 11 02:56:57 compute-0 ceph-mon[191930]: pgmap v2384: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:57 compute-0 sudo[481038]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:56:57 compute-0 sudo[481038]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:56:57 compute-0 sudo[481038]: pam_unix(sudo:session): session closed for user root
Oct 11 02:56:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2385: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:57 compute-0 sudo[481075]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:56:57 compute-0 sudo[481075]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:56:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:56:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:56:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:56:57 compute-0 podman[481063]: 2025-10-11 02:56:57.543852436 +0000 UTC m=+0.121674305 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=iscsid)
Oct 11 02:56:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:56:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:56:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:56:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:56:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:56:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:56:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:56:57 compute-0 podman[481062]: 2025-10-11 02:56:57.561461721 +0000 UTC m=+0.152934731 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, container_name=multipathd)
Oct 11 02:56:57 compute-0 nova_compute[356901]: 2025-10-11 02:56:57.763 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:56:58 compute-0 podman[481166]: 2025-10-11 02:56:58.103179335 +0000 UTC m=+0.064318606 container create d1335065de72412e87b1fb35d6935b904e0202a993e68c6e422a10a9f307ca1f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_lalande, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:56:58 compute-0 podman[481166]: 2025-10-11 02:56:58.079789002 +0000 UTC m=+0.040928313 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:56:58 compute-0 systemd[1]: Started libpod-conmon-d1335065de72412e87b1fb35d6935b904e0202a993e68c6e422a10a9f307ca1f.scope.
Oct 11 02:56:58 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:56:58 compute-0 podman[481166]: 2025-10-11 02:56:58.248848117 +0000 UTC m=+0.209987408 container init d1335065de72412e87b1fb35d6935b904e0202a993e68c6e422a10a9f307ca1f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_lalande, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, ceph=True)
Oct 11 02:56:58 compute-0 podman[481166]: 2025-10-11 02:56:58.260062118 +0000 UTC m=+0.221201389 container start d1335065de72412e87b1fb35d6935b904e0202a993e68c6e422a10a9f307ca1f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_lalande, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 02:56:58 compute-0 ceph-mon[191930]: pgmap v2385: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:58 compute-0 podman[481166]: 2025-10-11 02:56:58.265591238 +0000 UTC m=+0.226730549 container attach d1335065de72412e87b1fb35d6935b904e0202a993e68c6e422a10a9f307ca1f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_lalande, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2)
Oct 11 02:56:58 compute-0 thirsty_lalande[481182]: 167 167
Oct 11 02:56:58 compute-0 systemd[1]: libpod-d1335065de72412e87b1fb35d6935b904e0202a993e68c6e422a10a9f307ca1f.scope: Deactivated successfully.
Oct 11 02:56:58 compute-0 podman[481166]: 2025-10-11 02:56:58.275322519 +0000 UTC m=+0.236461810 container died d1335065de72412e87b1fb35d6935b904e0202a993e68c6e422a10a9f307ca1f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_lalande, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:56:58 compute-0 systemd[1]: var-lib-containers-storage-overlay-a5846ae1574a6d26ad4e7a7d4917ceee562352e6197b3a489d092a0a79530266-merged.mount: Deactivated successfully.
Oct 11 02:56:58 compute-0 podman[481166]: 2025-10-11 02:56:58.338357859 +0000 UTC m=+0.299497140 container remove d1335065de72412e87b1fb35d6935b904e0202a993e68c6e422a10a9f307ca1f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=thirsty_lalande, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:56:58 compute-0 systemd[1]: libpod-conmon-d1335065de72412e87b1fb35d6935b904e0202a993e68c6e422a10a9f307ca1f.scope: Deactivated successfully.
Oct 11 02:56:58 compute-0 podman[481205]: 2025-10-11 02:56:58.572507836 +0000 UTC m=+0.060592480 container create 301f6d93755f7cff6074c2d190f49b9d38985742cc111ddceb913997840b4e4c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=great_hugle, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:56:58 compute-0 systemd[1]: Started libpod-conmon-301f6d93755f7cff6074c2d190f49b9d38985742cc111ddceb913997840b4e4c.scope.
Oct 11 02:56:58 compute-0 podman[481205]: 2025-10-11 02:56:58.550769496 +0000 UTC m=+0.038854180 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:56:58 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:56:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1a93361750aae4c1d2b9e7966afdfaebb6c4c440bed96f740771dc4f589a409f/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:56:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1a93361750aae4c1d2b9e7966afdfaebb6c4c440bed96f740771dc4f589a409f/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:56:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1a93361750aae4c1d2b9e7966afdfaebb6c4c440bed96f740771dc4f589a409f/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:56:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1a93361750aae4c1d2b9e7966afdfaebb6c4c440bed96f740771dc4f589a409f/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:56:58 compute-0 podman[481205]: 2025-10-11 02:56:58.722927341 +0000 UTC m=+0.211012085 container init 301f6d93755f7cff6074c2d190f49b9d38985742cc111ddceb913997840b4e4c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=great_hugle, ceph=True, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3)
Oct 11 02:56:58 compute-0 podman[481205]: 2025-10-11 02:56:58.742380862 +0000 UTC m=+0.230465546 container start 301f6d93755f7cff6074c2d190f49b9d38985742cc111ddceb913997840b4e4c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=great_hugle, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:56:58 compute-0 podman[481205]: 2025-10-11 02:56:58.749167795 +0000 UTC m=+0.237252469 container attach 301f6d93755f7cff6074c2d190f49b9d38985742cc111ddceb913997840b4e4c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=great_hugle, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0)
Oct 11 02:56:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2386: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:56:59 compute-0 great_hugle[481222]: {
Oct 11 02:56:59 compute-0 great_hugle[481222]:     "0": [
Oct 11 02:56:59 compute-0 great_hugle[481222]:         {
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "devices": [
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "/dev/loop3"
Oct 11 02:56:59 compute-0 great_hugle[481222]:             ],
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "lv_name": "ceph_lv0",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "lv_size": "21470642176",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "name": "ceph_lv0",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "tags": {
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.cluster_name": "ceph",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.crush_device_class": "",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.encrypted": "0",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.osd_id": "0",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.type": "block",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.vdo": "0"
Oct 11 02:56:59 compute-0 great_hugle[481222]:             },
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "type": "block",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "vg_name": "ceph_vg0"
Oct 11 02:56:59 compute-0 great_hugle[481222]:         }
Oct 11 02:56:59 compute-0 great_hugle[481222]:     ],
Oct 11 02:56:59 compute-0 great_hugle[481222]:     "1": [
Oct 11 02:56:59 compute-0 great_hugle[481222]:         {
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "devices": [
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "/dev/loop4"
Oct 11 02:56:59 compute-0 great_hugle[481222]:             ],
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "lv_name": "ceph_lv1",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "lv_size": "21470642176",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "name": "ceph_lv1",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "tags": {
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.cluster_name": "ceph",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.crush_device_class": "",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.encrypted": "0",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.osd_id": "1",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.type": "block",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.vdo": "0"
Oct 11 02:56:59 compute-0 great_hugle[481222]:             },
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "type": "block",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "vg_name": "ceph_vg1"
Oct 11 02:56:59 compute-0 great_hugle[481222]:         }
Oct 11 02:56:59 compute-0 great_hugle[481222]:     ],
Oct 11 02:56:59 compute-0 great_hugle[481222]:     "2": [
Oct 11 02:56:59 compute-0 great_hugle[481222]:         {
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "devices": [
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "/dev/loop5"
Oct 11 02:56:59 compute-0 great_hugle[481222]:             ],
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "lv_name": "ceph_lv2",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "lv_size": "21470642176",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "name": "ceph_lv2",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "tags": {
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.cluster_name": "ceph",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.crush_device_class": "",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.encrypted": "0",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.osd_id": "2",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.type": "block",
Oct 11 02:56:59 compute-0 great_hugle[481222]:                 "ceph.vdo": "0"
Oct 11 02:56:59 compute-0 great_hugle[481222]:             },
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "type": "block",
Oct 11 02:56:59 compute-0 great_hugle[481222]:             "vg_name": "ceph_vg2"
Oct 11 02:56:59 compute-0 great_hugle[481222]:         }
Oct 11 02:56:59 compute-0 great_hugle[481222]:     ]
Oct 11 02:56:59 compute-0 great_hugle[481222]: }
Oct 11 02:56:59 compute-0 systemd[1]: libpod-301f6d93755f7cff6074c2d190f49b9d38985742cc111ddceb913997840b4e4c.scope: Deactivated successfully.
Oct 11 02:56:59 compute-0 podman[157119]: time="2025-10-11T02:56:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:56:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:56:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47834 "" "Go-http-client/1.1"
Oct 11 02:56:59 compute-0 podman[481232]: 2025-10-11 02:56:59.786613195 +0000 UTC m=+0.104273366 container died 301f6d93755f7cff6074c2d190f49b9d38985742cc111ddceb913997840b4e4c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=great_hugle, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:56:59 compute-0 systemd[1]: var-lib-containers-storage-overlay-1a93361750aae4c1d2b9e7966afdfaebb6c4c440bed96f740771dc4f589a409f-merged.mount: Deactivated successfully.
Oct 11 02:56:59 compute-0 podman[481232]: 2025-10-11 02:56:59.979432981 +0000 UTC m=+0.297093132 container remove 301f6d93755f7cff6074c2d190f49b9d38985742cc111ddceb913997840b4e4c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=great_hugle, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.license=GPLv2)
Oct 11 02:56:59 compute-0 systemd[1]: libpod-conmon-301f6d93755f7cff6074c2d190f49b9d38985742cc111ddceb913997840b4e4c.scope: Deactivated successfully.
Oct 11 02:56:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:56:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9086 "" "Go-http-client/1.1"
Oct 11 02:57:00 compute-0 sudo[481075]: pam_unix(sudo:session): session closed for user root
Oct 11 02:57:00 compute-0 sudo[481247]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:57:00 compute-0 sudo[481247]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:57:00 compute-0 sudo[481247]: pam_unix(sudo:session): session closed for user root
Oct 11 02:57:00 compute-0 sudo[481272]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:57:00 compute-0 sudo[481272]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:57:00 compute-0 sudo[481272]: pam_unix(sudo:session): session closed for user root
Oct 11 02:57:00 compute-0 ceph-mon[191930]: pgmap v2386: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:00 compute-0 sudo[481297]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:57:00 compute-0 sudo[481297]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:57:00 compute-0 sudo[481297]: pam_unix(sudo:session): session closed for user root
Oct 11 02:57:00 compute-0 sudo[481322]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:57:00 compute-0 sudo[481322]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:57:00 compute-0 nova_compute[356901]: 2025-10-11 02:57:00.724 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:01 compute-0 podman[481386]: 2025-10-11 02:57:01.185465682 +0000 UTC m=+0.084620950 container create ec140ae404f30cf7092efa23c4dc47443fb75ed4723b5e42a9def759ebf6cbb0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_blackburn, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:57:01 compute-0 podman[481386]: 2025-10-11 02:57:01.146704131 +0000 UTC m=+0.045859429 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:57:01 compute-0 systemd[1]: Started libpod-conmon-ec140ae404f30cf7092efa23c4dc47443fb75ed4723b5e42a9def759ebf6cbb0.scope.
Oct 11 02:57:01 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:57:01 compute-0 podman[481386]: 2025-10-11 02:57:01.365964615 +0000 UTC m=+0.265119863 container init ec140ae404f30cf7092efa23c4dc47443fb75ed4723b5e42a9def759ebf6cbb0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_blackburn, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:57:01 compute-0 podman[481386]: 2025-10-11 02:57:01.378748822 +0000 UTC m=+0.277904100 container start ec140ae404f30cf7092efa23c4dc47443fb75ed4723b5e42a9def759ebf6cbb0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_blackburn, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.39.3, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:57:01 compute-0 podman[481386]: 2025-10-11 02:57:01.38682172 +0000 UTC m=+0.285976958 container attach ec140ae404f30cf7092efa23c4dc47443fb75ed4723b5e42a9def759ebf6cbb0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_blackburn, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.license=GPLv2)
Oct 11 02:57:01 compute-0 intelligent_blackburn[481402]: 167 167
Oct 11 02:57:01 compute-0 systemd[1]: libpod-ec140ae404f30cf7092efa23c4dc47443fb75ed4723b5e42a9def759ebf6cbb0.scope: Deactivated successfully.
Oct 11 02:57:01 compute-0 podman[481386]: 2025-10-11 02:57:01.391147731 +0000 UTC m=+0.290303009 container died ec140ae404f30cf7092efa23c4dc47443fb75ed4723b5e42a9def759ebf6cbb0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_blackburn, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:57:01 compute-0 openstack_network_exporter[374316]: ERROR   02:57:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:57:01 compute-0 openstack_network_exporter[374316]: ERROR   02:57:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:57:01 compute-0 openstack_network_exporter[374316]: ERROR   02:57:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:57:01 compute-0 openstack_network_exporter[374316]: ERROR   02:57:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:57:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:57:01 compute-0 openstack_network_exporter[374316]: ERROR   02:57:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:57:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:57:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2387: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:01 compute-0 systemd[1]: var-lib-containers-storage-overlay-470209c85387ecc4c6251014e4d7a0e7798d148c68ccc1d21ac2aa09b037b28a-merged.mount: Deactivated successfully.
Oct 11 02:57:01 compute-0 podman[481386]: 2025-10-11 02:57:01.460724175 +0000 UTC m=+0.359879443 container remove ec140ae404f30cf7092efa23c4dc47443fb75ed4723b5e42a9def759ebf6cbb0 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_blackburn, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:57:01 compute-0 systemd[1]: libpod-conmon-ec140ae404f30cf7092efa23c4dc47443fb75ed4723b5e42a9def759ebf6cbb0.scope: Deactivated successfully.
Oct 11 02:57:01 compute-0 podman[481427]: 2025-10-11 02:57:01.705317527 +0000 UTC m=+0.064626919 container create 07e6cff65c1d4d5e11355a65e50d77ea6708510c3761a23ca41a7ba49d87d328 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_newton, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:57:01 compute-0 systemd[1]: Started libpod-conmon-07e6cff65c1d4d5e11355a65e50d77ea6708510c3761a23ca41a7ba49d87d328.scope.
Oct 11 02:57:01 compute-0 podman[481427]: 2025-10-11 02:57:01.683989206 +0000 UTC m=+0.043298618 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:57:01 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:57:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e7948f69855f3aa20b0aedde0fd1c24c9fd911027ade4f107abfac626fc56251/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:57:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e7948f69855f3aa20b0aedde0fd1c24c9fd911027ade4f107abfac626fc56251/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:57:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e7948f69855f3aa20b0aedde0fd1c24c9fd911027ade4f107abfac626fc56251/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:57:01 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/e7948f69855f3aa20b0aedde0fd1c24c9fd911027ade4f107abfac626fc56251/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:57:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:57:01 compute-0 podman[481427]: 2025-10-11 02:57:01.817788609 +0000 UTC m=+0.177098011 container init 07e6cff65c1d4d5e11355a65e50d77ea6708510c3761a23ca41a7ba49d87d328 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_newton, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 02:57:01 compute-0 podman[481427]: 2025-10-11 02:57:01.841770536 +0000 UTC m=+0.201079918 container start 07e6cff65c1d4d5e11355a65e50d77ea6708510c3761a23ca41a7ba49d87d328 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_newton, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.vendor=CentOS)
Oct 11 02:57:01 compute-0 podman[481427]: 2025-10-11 02:57:01.846027061 +0000 UTC m=+0.205336453 container attach 07e6cff65c1d4d5e11355a65e50d77ea6708510c3761a23ca41a7ba49d87d328 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_newton, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507)
Oct 11 02:57:02 compute-0 ovn_controller[88370]: 2025-10-11T02:57:02Z|00197|memory_trim|INFO|Detected inactivity (last active 30001 ms ago): trimming memory
Oct 11 02:57:02 compute-0 ceph-mon[191930]: pgmap v2387: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:02 compute-0 nova_compute[356901]: 2025-10-11 02:57:02.766 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:02 compute-0 nice_newton[481443]: {
Oct 11 02:57:02 compute-0 nice_newton[481443]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:57:02 compute-0 nice_newton[481443]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:57:02 compute-0 nice_newton[481443]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:57:02 compute-0 nice_newton[481443]:         "osd_id": 1,
Oct 11 02:57:02 compute-0 nice_newton[481443]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:57:02 compute-0 nice_newton[481443]:         "type": "bluestore"
Oct 11 02:57:02 compute-0 nice_newton[481443]:     },
Oct 11 02:57:02 compute-0 nice_newton[481443]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:57:02 compute-0 nice_newton[481443]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:57:02 compute-0 nice_newton[481443]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:57:02 compute-0 nice_newton[481443]:         "osd_id": 2,
Oct 11 02:57:02 compute-0 nice_newton[481443]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:57:02 compute-0 nice_newton[481443]:         "type": "bluestore"
Oct 11 02:57:02 compute-0 nice_newton[481443]:     },
Oct 11 02:57:02 compute-0 nice_newton[481443]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:57:02 compute-0 nice_newton[481443]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:57:02 compute-0 nice_newton[481443]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:57:02 compute-0 nice_newton[481443]:         "osd_id": 0,
Oct 11 02:57:02 compute-0 nice_newton[481443]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:57:02 compute-0 nice_newton[481443]:         "type": "bluestore"
Oct 11 02:57:02 compute-0 nice_newton[481443]:     }
Oct 11 02:57:02 compute-0 nice_newton[481443]: }
Oct 11 02:57:03 compute-0 systemd[1]: libpod-07e6cff65c1d4d5e11355a65e50d77ea6708510c3761a23ca41a7ba49d87d328.scope: Deactivated successfully.
Oct 11 02:57:03 compute-0 podman[481427]: 2025-10-11 02:57:03.043426163 +0000 UTC m=+1.402735595 container died 07e6cff65c1d4d5e11355a65e50d77ea6708510c3761a23ca41a7ba49d87d328 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_newton, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_REF=reef)
Oct 11 02:57:03 compute-0 systemd[1]: libpod-07e6cff65c1d4d5e11355a65e50d77ea6708510c3761a23ca41a7ba49d87d328.scope: Consumed 1.196s CPU time.
Oct 11 02:57:03 compute-0 systemd[1]: var-lib-containers-storage-overlay-e7948f69855f3aa20b0aedde0fd1c24c9fd911027ade4f107abfac626fc56251-merged.mount: Deactivated successfully.
Oct 11 02:57:03 compute-0 podman[481427]: 2025-10-11 02:57:03.146810701 +0000 UTC m=+1.506120093 container remove 07e6cff65c1d4d5e11355a65e50d77ea6708510c3761a23ca41a7ba49d87d328 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_newton, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:57:03 compute-0 systemd[1]: libpod-conmon-07e6cff65c1d4d5e11355a65e50d77ea6708510c3761a23ca41a7ba49d87d328.scope: Deactivated successfully.
Oct 11 02:57:03 compute-0 sudo[481322]: pam_unix(sudo:session): session closed for user root
Oct 11 02:57:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:57:03 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:57:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:57:03 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:57:03 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev f11da0cb-946e-4b26-8b61-2e4465f14916 does not exist
Oct 11 02:57:03 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev f5c49399-7f6a-4708-adb8-b25541c30e09 does not exist
Oct 11 02:57:03 compute-0 sudo[481488]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:57:03 compute-0 sudo[481488]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:57:03 compute-0 sudo[481488]: pam_unix(sudo:session): session closed for user root
Oct 11 02:57:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2388: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:03 compute-0 sudo[481513]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:57:03 compute-0 sudo[481513]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:57:03 compute-0 sudo[481513]: pam_unix(sudo:session): session closed for user root
Oct 11 02:57:04 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:57:04 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:57:05 compute-0 ceph-mon[191930]: pgmap v2388: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2389: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:05 compute-0 nova_compute[356901]: 2025-10-11 02:57:05.727 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:06 compute-0 ceph-mon[191930]: pgmap v2389: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:57:07 compute-0 podman[481539]: 2025-10-11 02:57:07.204856357 +0000 UTC m=+0.089455065 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.expose-services=, build-date=2025-08-20T13:12:41, name=ubi9-minimal, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.openshift.tags=minimal rhel9, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1755695350, managed_by=edpm_ansible, vcs-type=git, io.buildah.version=1.33.7, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, maintainer=Red Hat, Inc., architecture=x86_64, config_id=edpm, distribution-scope=public, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, url=https://catalog.redhat.com/en/search?searchType=containers, vendor=Red Hat, Inc., version=9.6, com.redhat.component=ubi9-minimal-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=openstack_network_exporter)
Oct 11 02:57:07 compute-0 podman[481538]: 2025-10-11 02:57:07.253319903 +0000 UTC m=+0.130169747 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:57:07 compute-0 podman[481540]: 2025-10-11 02:57:07.262113543 +0000 UTC m=+0.128986693 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2390: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0009191400908380543 of space, bias 1.0, pg target 0.2757420272514163 quantized to 32 (current 32)
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:57:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:57:07 compute-0 nova_compute[356901]: 2025-10-11 02:57:07.770 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:08 compute-0 ceph-mon[191930]: pgmap v2390: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2391: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:10 compute-0 ceph-mon[191930]: pgmap v2391: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:10 compute-0 nova_compute[356901]: 2025-10-11 02:57:10.730 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2392: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:57:12 compute-0 ceph-mon[191930]: pgmap v2392: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:12 compute-0 nova_compute[356901]: 2025-10-11 02:57:12.776 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2393: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.875 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.877 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.878 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.878 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.879 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.882 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.882 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.883 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.883 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.883 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.885 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.885 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.885 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.886 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.886 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.887 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.887 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.888 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.887 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{'inspect_vnics': {}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.888 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:57:13.887991) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.888 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{'inspect_vnics': {}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.889 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{'inspect_vnics': {}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.889 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{'inspect_vnics': {}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.890 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{'inspect_vnics': {}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.890 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{'inspect_vnics': {}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.890 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{'inspect_vnics': {}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.890 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{'inspect_vnics': {}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.891 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{'inspect_vnics': {}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.891 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{'inspect_vnics': {}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.891 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{'inspect_vnics': {}}], pollster history [{'network.incoming.bytes': [<NovaLikeServer: test_0>]}], and discovery cache [{'local_instances': [<NovaLikeServer: test_0>]}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.895 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2856 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.896 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.896 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.896 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.897 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.897 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.897 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.897 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 24 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.897 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.897 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.897 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.897 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.898 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.898 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.898 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.898 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.898 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.898 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.899 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.899 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.899 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.899 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.899 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.899 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.899 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.900 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.900 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.900 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.898 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:57:13.897146) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.901 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:57:13.898310) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.901 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:57:13.899255) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.902 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:57:13.900137) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:13 compute-0 nova_compute[356901]: 2025-10-11 02:57:13.915 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.931 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.932 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.932 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.934 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.934 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.934 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.934 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.935 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.935 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:13.936 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:57:13.935144) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.000 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.000 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.001 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.001 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.001 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.001 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.001 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.001 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.001 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.002 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.002 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.002 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.002 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.002 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.002 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.003 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.003 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.003 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.003 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.003 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.004 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:57:14.001898) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.004 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:57:14.003148) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.004 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.005 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.006 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.006 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.006 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.006 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.007 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.007 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.007 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:57:14.006998) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.008 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.008 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.009 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.009 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.009 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.009 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.009 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.010 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.010 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.010 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.011 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.012 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.012 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.012 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.012 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.012 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.013 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.013 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.013 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.014 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.014 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.015 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.015 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.015 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:57:14.010091) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.015 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:57:14.013109) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.015 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.016 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.016 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:57:14.016174) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.016 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.056 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.057 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.057 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.057 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.057 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.057 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.058 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.058 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.058 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.058 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:57:14.058055) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.059 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.059 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.059 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.059 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.059 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.059 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.060 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.060 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.060 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.060 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.060 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:57:14.059948) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.060 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.061 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.061 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.061 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.061 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.061 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.062 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.062 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:57:14.061627) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.062 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.062 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.062 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.062 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.063 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.063 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 33 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.063 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:57:14.063071) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.063 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.064 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.064 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.064 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.064 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.064 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.064 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.065 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.065 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.065 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.065 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.065 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.065 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.066 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.066 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.066 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.066 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.066 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.066 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:57:14.064496) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.067 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.067 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.067 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:57:14.065698) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.067 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.067 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.067 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.068 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.068 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.068 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.068 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.068 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.069 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.069 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.069 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:57:14.067010) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.070 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.070 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:57:14.068448) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.070 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.070 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.070 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.070 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.070 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.071 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.071 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.071 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.071 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:57:14.070581) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.072 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.072 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.072 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.072 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 68680000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.072 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.073 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.073 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.073 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.073 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.073 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.073 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2412 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.074 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.074 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.074 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.074 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.074 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:57:14.072358) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.075 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.075 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.075 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:57:14.073670) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.075 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.80859375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.075 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.075 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.076 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.076 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:57:14.075111) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.076 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.076 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.077 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.077 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.077 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.078 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.078 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.078 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.078 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.079 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.079 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.079 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.079 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.080 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.080 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.080 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.080 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.080 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.080 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.081 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.081 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.081 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.081 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.081 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.081 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:57:14.081 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:57:14 compute-0 ceph-mon[191930]: pgmap v2393: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:15 compute-0 podman[481598]: 2025-10-11 02:57:15.25403627 +0000 UTC m=+0.141473136 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vcs-type=git, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.29.0, maintainer=Red Hat, Inc., build-date=2024-09-18T21:23:30, io.openshift.tags=base rhel9, release-0.7.12=, vendor=Red Hat, Inc., com.redhat.component=ubi9-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, container_name=kepler, config_id=edpm, distribution-scope=public, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, architecture=x86_64, io.k8s.display-name=Red Hat Universal Base Image 9, name=ubi9, summary=Provides the latest release of Red Hat Universal Base Image 9., version=9.4, release=1214.1726694543, io.openshift.expose-services=)
Oct 11 02:57:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2394: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:15 compute-0 nova_compute[356901]: 2025-10-11 02:57:15.734 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:16 compute-0 ceph-mon[191930]: pgmap v2394: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:57:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2395: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:17 compute-0 nova_compute[356901]: 2025-10-11 02:57:17.781 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:17 compute-0 nova_compute[356901]: 2025-10-11 02:57:17.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:57:18 compute-0 ceph-mon[191930]: pgmap v2395: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2396: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:20 compute-0 podman[481617]: 2025-10-11 02:57:20.224524563 +0000 UTC m=+0.111732118 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:57:20 compute-0 podman[481619]: 2025-10-11 02:57:20.277608297 +0000 UTC m=+0.145172382 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_id=edpm, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']})
Oct 11 02:57:20 compute-0 podman[481621]: 2025-10-11 02:57:20.277998084 +0000 UTC m=+0.136506126 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, managed_by=edpm_ansible, container_name=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009)
Oct 11 02:57:20 compute-0 podman[481618]: 2025-10-11 02:57:20.298892043 +0000 UTC m=+0.175897995 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:57:20 compute-0 ceph-mon[191930]: pgmap v2396: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:20 compute-0 nova_compute[356901]: 2025-10-11 02:57:20.739 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:20 compute-0 nova_compute[356901]: 2025-10-11 02:57:20.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:57:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2397: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:57:22 compute-0 ceph-mon[191930]: pgmap v2397: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:22 compute-0 nova_compute[356901]: 2025-10-11 02:57:22.786 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2398: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:23 compute-0 nova_compute[356901]: 2025-10-11 02:57:23.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:57:24 compute-0 ceph-mon[191930]: pgmap v2398: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2399: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:25 compute-0 nova_compute[356901]: 2025-10-11 02:57:25.742 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:25 compute-0 nova_compute[356901]: 2025-10-11 02:57:25.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:57:25 compute-0 nova_compute[356901]: 2025-10-11 02:57:25.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:57:26 compute-0 ceph-mon[191930]: pgmap v2399: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:57:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:57:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:57:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:57:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:57:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:57:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:57:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2400: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:27 compute-0 nova_compute[356901]: 2025-10-11 02:57:27.791 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:57:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3808740793' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:57:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:57:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/3808740793' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:57:28 compute-0 podman[481703]: 2025-10-11 02:57:28.250489675 +0000 UTC m=+0.139748771 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, container_name=multipathd, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 02:57:28 compute-0 podman[481704]: 2025-10-11 02:57:28.266471943 +0000 UTC m=+0.146957067 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=iscsid, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:57:28 compute-0 ceph-mon[191930]: pgmap v2400: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3808740793' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:57:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/3808740793' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:57:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2401: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:29 compute-0 podman[157119]: time="2025-10-11T02:57:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:57:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:57:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:57:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:57:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9081 "" "Go-http-client/1.1"
Oct 11 02:57:29 compute-0 nova_compute[356901]: 2025-10-11 02:57:29.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:57:29 compute-0 nova_compute[356901]: 2025-10-11 02:57:29.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:57:29 compute-0 nova_compute[356901]: 2025-10-11 02:57:29.895 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:57:29 compute-0 nova_compute[356901]: 2025-10-11 02:57:29.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:57:30 compute-0 ceph-mon[191930]: pgmap v2401: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:30 compute-0 nova_compute[356901]: 2025-10-11 02:57:30.745 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:30 compute-0 nova_compute[356901]: 2025-10-11 02:57:30.907 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:57:30 compute-0 nova_compute[356901]: 2025-10-11 02:57:30.908 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:57:30 compute-0 nova_compute[356901]: 2025-10-11 02:57:30.909 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:57:30 compute-0 nova_compute[356901]: 2025-10-11 02:57:30.910 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:57:31 compute-0 openstack_network_exporter[374316]: ERROR   02:57:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:57:31 compute-0 openstack_network_exporter[374316]: ERROR   02:57:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:57:31 compute-0 openstack_network_exporter[374316]: ERROR   02:57:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:57:31 compute-0 openstack_network_exporter[374316]: ERROR   02:57:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:57:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:57:31 compute-0 openstack_network_exporter[374316]: ERROR   02:57:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:57:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:57:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2402: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:57:32 compute-0 sshd-session[481738]: Accepted publickey for zuul from 192.168.122.10 port 59922 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 02:57:32 compute-0 systemd-logind[804]: New session 65 of user zuul.
Oct 11 02:57:32 compute-0 systemd[1]: Started Session 65 of User zuul.
Oct 11 02:57:32 compute-0 sshd-session[481738]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 02:57:32 compute-0 sudo[481742]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt'
Oct 11 02:57:32 compute-0 sudo[481742]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:57:32 compute-0 ceph-mon[191930]: pgmap v2402: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:32 compute-0 nova_compute[356901]: 2025-10-11 02:57:32.796 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:33 compute-0 nova_compute[356901]: 2025-10-11 02:57:33.220 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:57:33 compute-0 nova_compute[356901]: 2025-10-11 02:57:33.241 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:57:33 compute-0 nova_compute[356901]: 2025-10-11 02:57:33.242 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:57:33 compute-0 nova_compute[356901]: 2025-10-11 02:57:33.243 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:57:33 compute-0 nova_compute[356901]: 2025-10-11 02:57:33.280 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_power_states run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:57:33 compute-0 nova_compute[356901]: 2025-10-11 02:57:33.309 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Triggering sync for uuid 0cc56d17-ec3a-4408-bccb-91b29427379e _sync_power_states /usr/lib/python3.9/site-packages/nova/compute/manager.py:10268
Oct 11 02:57:33 compute-0 nova_compute[356901]: 2025-10-11 02:57:33.310 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "0cc56d17-ec3a-4408-bccb-91b29427379e" by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:57:33 compute-0 nova_compute[356901]: 2025-10-11 02:57:33.311 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "0cc56d17-ec3a-4408-bccb-91b29427379e" acquired by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:57:33 compute-0 nova_compute[356901]: 2025-10-11 02:57:33.340 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "0cc56d17-ec3a-4408-bccb-91b29427379e" "released" by "nova.compute.manager.ComputeManager._sync_power_states.<locals>._sync.<locals>.query_driver_power_state_and_sync" :: held 0.029s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:57:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2403: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:33 compute-0 nova_compute[356901]: 2025-10-11 02:57:33.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:57:33 compute-0 nova_compute[356901]: 2025-10-11 02:57:33.923 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:57:33 compute-0 nova_compute[356901]: 2025-10-11 02:57:33.923 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:57:33 compute-0 nova_compute[356901]: 2025-10-11 02:57:33.924 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:57:33 compute-0 nova_compute[356901]: 2025-10-11 02:57:33.924 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:57:33 compute-0 nova_compute[356901]: 2025-10-11 02:57:33.924 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:57:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:57:34 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3072773695' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:57:34 compute-0 nova_compute[356901]: 2025-10-11 02:57:34.427 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.503s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:57:34 compute-0 nova_compute[356901]: 2025-10-11 02:57:34.534 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:57:34 compute-0 nova_compute[356901]: 2025-10-11 02:57:34.534 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:57:34 compute-0 nova_compute[356901]: 2025-10-11 02:57:34.534 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:57:34 compute-0 ceph-mon[191930]: pgmap v2403: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:34 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3072773695' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:57:35 compute-0 nova_compute[356901]: 2025-10-11 02:57:35.033 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:57:35 compute-0 nova_compute[356901]: 2025-10-11 02:57:35.035 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3611MB free_disk=59.955204010009766GB free_vcpus=7 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:57:35 compute-0 nova_compute[356901]: 2025-10-11 02:57:35.036 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:57:35 compute-0 nova_compute[356901]: 2025-10-11 02:57:35.037 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:57:35 compute-0 nova_compute[356901]: 2025-10-11 02:57:35.154 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:57:35 compute-0 nova_compute[356901]: 2025-10-11 02:57:35.155 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 1 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:57:35 compute-0 nova_compute[356901]: 2025-10-11 02:57:35.155 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1024MB phys_disk=59GB used_disk=2GB total_vcpus=8 used_vcpus=1 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:57:35 compute-0 nova_compute[356901]: 2025-10-11 02:57:35.207 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:57:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2404: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:57:35 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3757174418' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:57:35 compute-0 nova_compute[356901]: 2025-10-11 02:57:35.712 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.505s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:57:35 compute-0 nova_compute[356901]: 2025-10-11 02:57:35.725 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:57:35 compute-0 nova_compute[356901]: 2025-10-11 02:57:35.745 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:57:35 compute-0 nova_compute[356901]: 2025-10-11 02:57:35.749 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:57:35 compute-0 nova_compute[356901]: 2025-10-11 02:57:35.749 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.713s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:57:35 compute-0 nova_compute[356901]: 2025-10-11 02:57:35.750 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:35 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15541 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:36 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15543 -' entity='client.admin' cmd=[{"prefix": "crash ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:36 compute-0 ceph-mon[191930]: pgmap v2404: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:36 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3757174418' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:57:36 compute-0 ceph-mon[191930]: from='client.15541 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #117. Immutable memtables: 0.
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:57:36.819328) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 69] Flushing memtable with next log file: 117
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151456819372, "job": 69, "event": "flush_started", "num_memtables": 1, "num_entries": 887, "num_deletes": 252, "total_data_size": 1180286, "memory_usage": 1200856, "flush_reason": "Manual Compaction"}
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 69] Level-0 flush table #118: started
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151456831481, "cf_name": "default", "job": 69, "event": "table_file_creation", "file_number": 118, "file_size": 735014, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 48619, "largest_seqno": 49505, "table_properties": {"data_size": 731403, "index_size": 1327, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1221, "raw_key_size": 9856, "raw_average_key_size": 20, "raw_value_size": 723543, "raw_average_value_size": 1536, "num_data_blocks": 60, "num_entries": 471, "num_filter_entries": 471, "num_deletions": 252, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760151379, "oldest_key_time": 1760151379, "file_creation_time": 1760151456, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 118, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 69] Flush lasted 12284 microseconds, and 6324 cpu microseconds.
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:57:36.831610) [db/flush_job.cc:967] [default] [JOB 69] Level-0 flush table #118: 735014 bytes OK
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:57:36.831640) [db/memtable_list.cc:519] [default] Level-0 commit table #118 started
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:57:36.834617) [db/memtable_list.cc:722] [default] Level-0 commit table #118: memtable #1 done
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:57:36.834645) EVENT_LOG_v1 {"time_micros": 1760151456834635, "job": 69, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:57:36.834669) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 69] Try to delete WAL files size 1175949, prev total WAL file size 1175949, number of live WAL files 2.
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000114.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:57:36.836083) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '6D6772737461740032303034' seq:72057594037927935, type:22 .. '6D6772737461740032323535' seq:0, type:0; will stop at (end)
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 70] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 69 Base level 0, inputs: [118(717KB)], [116(9161KB)]
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151456836156, "job": 70, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [118], "files_L6": [116], "score": -1, "input_data_size": 10116578, "oldest_snapshot_seqno": -1}
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 70] Generated table #119: 6223 keys, 7250525 bytes, temperature: kUnknown
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151456893382, "cf_name": "default", "job": 70, "event": "table_file_creation", "file_number": 119, "file_size": 7250525, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 7213177, "index_size": 20662, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 15621, "raw_key_size": 162924, "raw_average_key_size": 26, "raw_value_size": 7104741, "raw_average_value_size": 1141, "num_data_blocks": 816, "num_entries": 6223, "num_filter_entries": 6223, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760151456, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 119, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:57:36.893749) [db/compaction/compaction_job.cc:1663] [default] [JOB 70] Compacted 1@0 + 1@6 files to L6 => 7250525 bytes
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:57:36.899267) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 176.5 rd, 126.5 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(0.7, 8.9 +0.0 blob) out(6.9 +0.0 blob), read-write-amplify(23.6) write-amplify(9.9) OK, records in: 6711, records dropped: 488 output_compression: NoCompression
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:57:36.899293) EVENT_LOG_v1 {"time_micros": 1760151456899280, "job": 70, "event": "compaction_finished", "compaction_time_micros": 57332, "compaction_time_cpu_micros": 29343, "output_level": 6, "num_output_files": 1, "total_output_size": 7250525, "num_input_records": 6711, "num_output_records": 6223, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000118.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151456899586, "job": 70, "event": "table_file_deletion", "file_number": 118}
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000116.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151456901895, "job": 70, "event": "table_file_deletion", "file_number": 116}
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:57:36.835889) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:57:36.902063) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:57:36.902069) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:57:36.902072) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:57:36.902075) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:57:36 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:57:36.902078) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:57:37 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status"} v 0) v1
Oct 11 02:57:37 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/538051887' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch
Oct 11 02:57:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2405: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:37 compute-0 nova_compute[356901]: 2025-10-11 02:57:37.801 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:37 compute-0 ceph-mon[191930]: from='client.15543 -' entity='client.admin' cmd=[{"prefix": "crash ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:37 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/538051887' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch
Oct 11 02:57:38 compute-0 podman[482036]: 2025-10-11 02:57:38.091567621 +0000 UTC m=+0.110414669 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.license=GPLv2, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=edpm, maintainer=OpenStack Kubernetes Operator team)
Oct 11 02:57:38 compute-0 podman[482040]: 2025-10-11 02:57:38.112324604 +0000 UTC m=+0.112452053 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.buildah.version=1.33.7, config_id=edpm, name=ubi9-minimal, url=https://catalog.redhat.com/en/search?searchType=containers, io.openshift.tags=minimal rhel9, architecture=x86_64, container_name=openstack_network_exporter, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vcs-type=git, version=9.6, build-date=2025-08-20T13:12:41, vendor=Red Hat, Inc., maintainer=Red Hat, Inc., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.expose-services=, distribution-scope=public, com.redhat.component=ubi9-minimal-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, release=1755695350)
Oct 11 02:57:38 compute-0 podman[482041]: 2025-10-11 02:57:38.118114514 +0000 UTC m=+0.125743826 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:57:38 compute-0 ceph-mon[191930]: pgmap v2405: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2406: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:40 compute-0 nova_compute[356901]: 2025-10-11 02:57:40.751 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:40 compute-0 ceph-mon[191930]: pgmap v2406: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:41 compute-0 ovs-vsctl[482140]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Oct 11 02:57:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2407: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:57:42 compute-0 virtqemud[153560]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Oct 11 02:57:42 compute-0 virtqemud[153560]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Oct 11 02:57:42 compute-0 virtqemud[153560]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Oct 11 02:57:42 compute-0 nova_compute[356901]: 2025-10-11 02:57:42.806 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:42 compute-0 ceph-mon[191930]: pgmap v2407: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:43 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: cache status {prefix=cache status} (starting...)
Oct 11 02:57:43 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: client ls {prefix=client ls} (starting...)
Oct 11 02:57:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2408: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:43 compute-0 lvm[482498]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Oct 11 02:57:43 compute-0 lvm[482498]: VG ceph_vg1 finished
Oct 11 02:57:43 compute-0 lvm[482504]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Oct 11 02:57:43 compute-0 lvm[482504]: VG ceph_vg2 finished
Oct 11 02:57:43 compute-0 lvm[482517]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Oct 11 02:57:43 compute-0 lvm[482517]: VG ceph_vg0 finished
Oct 11 02:57:44 compute-0 kernel: block dm-1: the capability attribute has been deprecated.
Oct 11 02:57:44 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: damage ls {prefix=damage ls} (starting...)
Oct 11 02:57:44 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: dump loads {prefix=dump loads} (starting...)
Oct 11 02:57:44 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15547 -' entity='client.admin' cmd=[{"prefix": "balancer eval", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:44 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: dump tree {prefix=dump tree,root=/} (starting...)
Oct 11 02:57:44 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: dump_blocked_ops {prefix=dump_blocked_ops} (starting...)
Oct 11 02:57:44 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: dump_historic_ops {prefix=dump_historic_ops} (starting...)
Oct 11 02:57:44 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15549 -' entity='client.admin' cmd=[{"prefix": "balancer status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:44 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: dump_historic_ops_by_duration {prefix=dump_historic_ops_by_duration} (starting...)
Oct 11 02:57:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "report"} v 0) v1
Oct 11 02:57:45 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3880602206' entity='client.admin' cmd=[{"prefix": "report"}]: dispatch
Oct 11 02:57:45 compute-0 ceph-mon[191930]: pgmap v2408: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:45 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: dump_ops_in_flight {prefix=dump_ops_in_flight} (starting...)
Oct 11 02:57:45 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: get subtrees {prefix=get subtrees} (starting...)
Oct 11 02:57:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2409: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:57:45 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1923816738' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:57:45 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: ops {prefix=ops} (starting...)
Oct 11 02:57:45 compute-0 nova_compute[356901]: 2025-10-11 02:57:45.753 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:45 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15557 -' entity='client.admin' cmd=[{"prefix": "healthcheck history ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:45 compute-0 ceph-mgr[192233]: mgr.server reply reply (95) Operation not supported Module 'prometheus' is not enabled/loaded (required by command 'healthcheck history ls'): use `ceph mgr module enable prometheus` to enable it
Oct 11 02:57:45 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T02:57:45.775+0000 7fe891be6640 -1 mgr.server reply reply (95) Operation not supported Module 'prometheus' is not enabled/loaded (required by command 'healthcheck history ls'): use `ceph mgr module enable prometheus` to enable it
Oct 11 02:57:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config log"} v 0) v1
Oct 11 02:57:45 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/489067154' entity='client.admin' cmd=[{"prefix": "config log"}]: dispatch
Oct 11 02:57:46 compute-0 ceph-mon[191930]: from='client.15547 -' entity='client.admin' cmd=[{"prefix": "balancer eval", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:46 compute-0 ceph-mon[191930]: from='client.15549 -' entity='client.admin' cmd=[{"prefix": "balancer status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:46 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3880602206' entity='client.admin' cmd=[{"prefix": "report"}]: dispatch
Oct 11 02:57:46 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1923816738' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:57:46 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/489067154' entity='client.admin' cmd=[{"prefix": "config log"}]: dispatch
Oct 11 02:57:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "log last", "channel": "cephadm"} v 0) v1
Oct 11 02:57:46 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/179041352' entity='client.admin' cmd=[{"prefix": "log last", "channel": "cephadm"}]: dispatch
Oct 11 02:57:46 compute-0 podman[482856]: 2025-10-11 02:57:46.253541296 +0000 UTC m=+0.143681323 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vendor=Red Hat, Inc., io.openshift.tags=base rhel9, maintainer=Red Hat, Inc., architecture=x86_64, build-date=2024-09-18T21:23:30, io.buildah.version=1.29.0, io.openshift.expose-services=, release=1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, version=9.4, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, distribution-scope=public, release-0.7.12=, summary=Provides the latest release of Red Hat Universal Base Image 9., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi9-container, io.k8s.display-name=Red Hat Universal Base Image 9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, vcs-type=git, container_name=kepler, managed_by=edpm_ansible, config_id=edpm, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9)
Oct 11 02:57:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config-key dump"} v 0) v1
Oct 11 02:57:46 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/261012368' entity='client.admin' cmd=[{"prefix": "config-key dump"}]: dispatch
Oct 11 02:57:46 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: session ls {prefix=session ls} (starting...)
Oct 11 02:57:46 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: status {prefix=status} (starting...)
Oct 11 02:57:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr dump"} v 0) v1
Oct 11 02:57:46 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/4172573539' entity='client.admin' cmd=[{"prefix": "mgr dump"}]: dispatch
Oct 11 02:57:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:57:46 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15567 -' entity='client.admin' cmd=[{"prefix": "crash ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:47 compute-0 ceph-mon[191930]: pgmap v2409: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:47 compute-0 ceph-mon[191930]: from='client.15557 -' entity='client.admin' cmd=[{"prefix": "healthcheck history ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:47 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/179041352' entity='client.admin' cmd=[{"prefix": "log last", "channel": "cephadm"}]: dispatch
Oct 11 02:57:47 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/261012368' entity='client.admin' cmd=[{"prefix": "config-key dump"}]: dispatch
Oct 11 02:57:47 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/4172573539' entity='client.admin' cmd=[{"prefix": "mgr dump"}]: dispatch
Oct 11 02:57:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr metadata"} v 0) v1
Oct 11 02:57:47 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/42632023' entity='client.admin' cmd=[{"prefix": "mgr metadata"}]: dispatch
Oct 11 02:57:47 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15571 -' entity='client.admin' cmd=[{"prefix": "crash stat", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2410: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr module ls"} v 0) v1
Oct 11 02:57:47 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/178529867' entity='client.admin' cmd=[{"prefix": "mgr module ls"}]: dispatch
Oct 11 02:57:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "features"} v 0) v1
Oct 11 02:57:47 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1521839415' entity='client.admin' cmd=[{"prefix": "features"}]: dispatch
Oct 11 02:57:47 compute-0 nova_compute[356901]: 2025-10-11 02:57:47.812 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr services"} v 0) v1
Oct 11 02:57:48 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1512380716' entity='client.admin' cmd=[{"prefix": "mgr services"}]: dispatch
Oct 11 02:57:48 compute-0 ceph-mon[191930]: from='client.15567 -' entity='client.admin' cmd=[{"prefix": "crash ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:48 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/42632023' entity='client.admin' cmd=[{"prefix": "mgr metadata"}]: dispatch
Oct 11 02:57:48 compute-0 ceph-mon[191930]: from='client.15571 -' entity='client.admin' cmd=[{"prefix": "crash stat", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:48 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/178529867' entity='client.admin' cmd=[{"prefix": "mgr module ls"}]: dispatch
Oct 11 02:57:48 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1521839415' entity='client.admin' cmd=[{"prefix": "features"}]: dispatch
Oct 11 02:57:48 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1512380716' entity='client.admin' cmd=[{"prefix": "mgr services"}]: dispatch
Oct 11 02:57:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "health", "detail": "detail"} v 0) v1
Oct 11 02:57:48 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2085166778' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch
Oct 11 02:57:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr stat"} v 0) v1
Oct 11 02:57:48 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1027593039' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch
Oct 11 02:57:48 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15583 -' entity='client.admin' cmd=[{"prefix": "insights", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:48 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T02:57:48.688+0000 7fe891be6640 -1 mgr.server reply reply (95) Operation not supported Module 'insights' is not enabled/loaded (required by command 'insights'): use `ceph mgr module enable insights` to enable it
Oct 11 02:57:48 compute-0 ceph-mgr[192233]: mgr.server reply reply (95) Operation not supported Module 'insights' is not enabled/loaded (required by command 'insights'): use `ceph mgr module enable insights` to enable it
Oct 11 02:57:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr versions"} v 0) v1
Oct 11 02:57:48 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2582309682' entity='client.admin' cmd=[{"prefix": "mgr versions"}]: dispatch
Oct 11 02:57:49 compute-0 ceph-mon[191930]: pgmap v2410: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:49 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2085166778' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch
Oct 11 02:57:49 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1027593039' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch
Oct 11 02:57:49 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2582309682' entity='client.admin' cmd=[{"prefix": "mgr versions"}]: dispatch
Oct 11 02:57:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "log last", "num": 10000, "level": "debug", "channel": "audit"} v 0) v1
Oct 11 02:57:49 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1783926262' entity='client.admin' cmd=[{"prefix": "log last", "num": 10000, "level": "debug", "channel": "audit"}]: dispatch
Oct 11 02:57:49 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15589 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2411: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "log last", "num": 10000, "level": "debug", "channel": "cluster"} v 0) v1
Oct 11 02:57:49 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3824960419' entity='client.admin' cmd=[{"prefix": "log last", "num": 10000, "level": "debug", "channel": "cluster"}]: dispatch
Oct 11 02:57:49 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15593 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:50 compute-0 ceph-mon[191930]: from='client.15583 -' entity='client.admin' cmd=[{"prefix": "insights", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:50 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1783926262' entity='client.admin' cmd=[{"prefix": "log last", "num": 10000, "level": "debug", "channel": "audit"}]: dispatch
Oct 11 02:57:50 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3824960419' entity='client.admin' cmd=[{"prefix": "log last", "num": 10000, "level": "debug", "channel": "cluster"}]: dispatch
Oct 11 02:57:50 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15597 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr dump"} v 0) v1
Oct 11 02:57:50 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2431724796' entity='client.admin' cmd=[{"prefix": "mgr dump"}]: dispatch
Oct 11 02:57:50 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15599 -' entity='client.admin' cmd=[{"prefix": "orch ls", "export": true, "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr metadata"} v 0) v1
Oct 11 02:57:50 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1363707742' entity='client.admin' cmd=[{"prefix": "mgr metadata"}]: dispatch
Oct 11 02:57:50 compute-0 nova_compute[356901]: 2025-10-11 02:57:50.757 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:03.147427+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:04.147694+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1360638 data_alloc: 234881024 data_used: 25800704
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9159000/0x0/0x4ffc00000, data 0x285a17d/0x2925000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:05.147985+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:06.148421+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:07.148763+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9159000/0x0/0x4ffc00000, data 0x285a17d/0x2925000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:08.149084+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:09.149413+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1360638 data_alloc: 234881024 data_used: 25800704
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:10.149814+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9159000/0x0/0x4ffc00000, data 0x285a17d/0x2925000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:11.150128+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:12.150482+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:13.150718+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9159000/0x0/0x4ffc00000, data 0x285a17d/0x2925000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:14.150997+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1360638 data_alloc: 234881024 data_used: 25800704
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:15.151306+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:16.151537+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9159000/0x0/0x4ffc00000, data 0x285a17d/0x2925000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:17.151732+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9159000/0x0/0x4ffc00000, data 0x285a17d/0x2925000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:18.152025+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:19.152393+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1360638 data_alloc: 234881024 data_used: 25800704
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:20.152682+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9159000/0x0/0x4ffc00000, data 0x285a17d/0x2925000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:21.153138+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:22.153465+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9159000/0x0/0x4ffc00000, data 0x285a17d/0x2925000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:23.153811+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:24.154148+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1360638 data_alloc: 234881024 data_used: 25800704
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:25.154523+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9159000/0x0/0x4ffc00000, data 0x285a17d/0x2925000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9159000/0x0/0x4ffc00000, data 0x285a17d/0x2925000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:26.154734+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108142592 unmapped: 3661824 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:27.155036+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108150784 unmapped: 3653632 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9159000/0x0/0x4ffc00000, data 0x285a17d/0x2925000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:28.155596+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108150784 unmapped: 3653632 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:29.155833+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108150784 unmapped: 3653632 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1360638 data_alloc: 234881024 data_used: 25800704
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5817c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b5817c00 session 0x5626b3c830e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b7299800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b7299800 session 0x5626b5802d20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b7299c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b7299c00 session 0x5626b58023c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:30.156146+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108158976 unmapped: 3645440 heap: 111804416 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aae400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b3aae400 session 0x5626b56b6b40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 69.279853821s of 69.972686768s, submitted: 90
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b57fb800 session 0x5626b30cbc20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aae400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b3aae400 session 0x5626b58305a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b57fb800 session 0x5626b3aa8f00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5817c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b5817c00 session 0x5626b58ca960
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b7299c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b7299c00 session 0x5626b49870e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:31.156640+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108535808 unmapped: 6471680 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b7299800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b7299800 session 0x5626b593c780
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aae400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b3aae400 session 0x5626b5803c20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b57fb800 session 0x5626b2d2ba40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5817c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b5817c00 session 0x5626b58cba40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:32.157069+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108609536 unmapped: 6397952 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:33.157482+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 6332416 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8d44000/0x0/0x4ffc00000, data 0x2c6e18d/0x2d3a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:34.157869+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 6332416 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1403397 data_alloc: 234881024 data_used: 25800704
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8d44000/0x0/0x4ffc00000, data 0x2c6e18d/0x2d3a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:35.158133+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 6332416 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:36.158527+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 6332416 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b7299c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b7299c00 session 0x5626b7eb7e00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:37.158906+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 6332416 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f4800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53be000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:38.159120+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108740608 unmapped: 6266880 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8d43000/0x0/0x4ffc00000, data 0x2c6e1b0/0x2d3b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:39.159446+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108740608 unmapped: 6266880 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1403566 data_alloc: 234881024 data_used: 25808896
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:40.159744+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108986368 unmapped: 6021120 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:41.159988+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 110264320 unmapped: 4743168 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:42.160224+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112582656 unmapped: 2424832 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8d43000/0x0/0x4ffc00000, data 0x2c6e1b0/0x2d3b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:43.160639+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112615424 unmapped: 2392064 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:44.160912+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112615424 unmapped: 2392064 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1433326 data_alloc: 251658240 data_used: 29880320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:45.161323+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112615424 unmapped: 2392064 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8d43000/0x0/0x4ffc00000, data 0x2c6e1b0/0x2d3b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:46.161544+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112615424 unmapped: 2392064 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:47.161890+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112615424 unmapped: 2392064 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:48.162197+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112615424 unmapped: 2392064 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:49.162556+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112615424 unmapped: 2392064 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1433326 data_alloc: 251658240 data_used: 29880320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8d43000/0x0/0x4ffc00000, data 0x2c6e1b0/0x2d3b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:50.162906+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112623616 unmapped: 2383872 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:51.163148+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112623616 unmapped: 2383872 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:52.163415+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112623616 unmapped: 2383872 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:53.163808+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112623616 unmapped: 2383872 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:54.164149+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112631808 unmapped: 2375680 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8d43000/0x0/0x4ffc00000, data 0x2c6e1b0/0x2d3b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1433326 data_alloc: 251658240 data_used: 29880320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:55.164547+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112672768 unmapped: 2334720 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:56.164966+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112672768 unmapped: 2334720 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:57.165342+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112672768 unmapped: 2334720 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:58.165600+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112672768 unmapped: 2334720 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8d43000/0x0/0x4ffc00000, data 0x2c6e1b0/0x2d3b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:59.165821+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112705536 unmapped: 2301952 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1433326 data_alloc: 251658240 data_used: 29880320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:00.166162+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8d43000/0x0/0x4ffc00000, data 0x2c6e1b0/0x2d3b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112705536 unmapped: 2301952 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:01.166513+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8d43000/0x0/0x4ffc00000, data 0x2c6e1b0/0x2d3b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112705536 unmapped: 2301952 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:02.166709+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112705536 unmapped: 2301952 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:03.166928+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112705536 unmapped: 2301952 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:04.167122+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8d43000/0x0/0x4ffc00000, data 0x2c6e1b0/0x2d3b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112689152 unmapped: 2318336 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1433326 data_alloc: 251658240 data_used: 29880320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:05.167414+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8d43000/0x0/0x4ffc00000, data 0x2c6e1b0/0x2d3b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112689152 unmapped: 2318336 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:06.167769+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112689152 unmapped: 2318336 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:07.168114+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112689152 unmapped: 2318336 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8d43000/0x0/0x4ffc00000, data 0x2c6e1b0/0x2d3b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:08.168463+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112697344 unmapped: 2310144 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:09.168690+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112697344 unmapped: 2310144 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1433326 data_alloc: 251658240 data_used: 29880320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:10.169041+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112697344 unmapped: 2310144 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:11.169405+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112697344 unmapped: 2310144 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:12.169660+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8d43000/0x0/0x4ffc00000, data 0x2c6e1b0/0x2d3b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112697344 unmapped: 2310144 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8d43000/0x0/0x4ffc00000, data 0x2c6e1b0/0x2d3b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:13.169842+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112738304 unmapped: 2269184 heap: 115007488 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:14.170341+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 43.579425812s of 43.773803711s, submitted: 34
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113139712 unmapped: 3964928 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1460612 data_alloc: 251658240 data_used: 29884416
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:15.170694+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115326976 unmapped: 1777664 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:16.170928+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 114761728 unmapped: 2342912 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:17.171133+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115236864 unmapped: 1867776 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8980000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:18.171363+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115343360 unmapped: 1761280 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:19.171695+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115343360 unmapped: 1761280 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1471292 data_alloc: 251658240 data_used: 29962240
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:20.171979+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115343360 unmapped: 1761280 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:21.172363+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115343360 unmapped: 1761280 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:22.172575+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115343360 unmapped: 1761280 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:23.172812+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8980000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115376128 unmapped: 1728512 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:24.173105+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115376128 unmapped: 1728512 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1471292 data_alloc: 251658240 data_used: 29962240
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:25.173539+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115376128 unmapped: 1728512 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:26.173895+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115376128 unmapped: 1728512 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:27.174135+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115376128 unmapped: 1728512 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8980000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:28.174386+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115417088 unmapped: 1687552 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:29.174815+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115417088 unmapped: 1687552 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1471292 data_alloc: 251658240 data_used: 29962240
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:30.175035+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115417088 unmapped: 1687552 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:31.175351+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115417088 unmapped: 1687552 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:32.175743+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115417088 unmapped: 1687552 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:33.175965+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115417088 unmapped: 1687552 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8980000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:34.176481+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115417088 unmapped: 1687552 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1471292 data_alloc: 251658240 data_used: 29962240
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:35.176763+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115417088 unmapped: 1687552 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:36.176963+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115417088 unmapped: 1687552 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:37.177212+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8980000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115417088 unmapped: 1687552 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:38.177666+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115417088 unmapped: 1687552 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:39.178031+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115417088 unmapped: 1687552 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1471292 data_alloc: 251658240 data_used: 29962240
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:40.178266+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115417088 unmapped: 1687552 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:41.178762+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8980000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115417088 unmapped: 1687552 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:42.179492+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115417088 unmapped: 1687552 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8980000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:43.179867+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115417088 unmapped: 1687552 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:44.180150+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115417088 unmapped: 1687552 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1471292 data_alloc: 251658240 data_used: 29962240
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8980000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:45.180588+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115417088 unmapped: 1687552 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:46.180954+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115417088 unmapped: 1687552 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 32.717578888s of 32.968685150s, submitted: 69
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:47.181309+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115654656 unmapped: 1449984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:48.181711+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115654656 unmapped: 1449984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:49.182078+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:50.182479+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:51.182993+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:52.183362+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:53.183634+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:54.184016+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:55.184297+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:56.184654+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:57.185072+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:58.185424+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:59.185742+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:00.186550+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:01.186978+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:02.187598+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:03.188192+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:04.188563+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:05.188974+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:06.189201+0000)
Oct 11 02:57:50 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:07.189496+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:08.189948+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:09.190439+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:10.190822+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:11.191358+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:12.191738+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:13.191950+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:14.192284+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:15.192725+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:16.193100+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:17.193547+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:18.193871+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:19.194432+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:20.194688+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:21.195130+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:22.195830+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:23.196077+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:24.196395+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115662848 unmapped: 1441792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:25.196669+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115679232 unmapped: 1425408 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:26.197054+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115679232 unmapped: 1425408 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:27.197309+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115679232 unmapped: 1425408 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:28.197903+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115679232 unmapped: 1425408 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:29.198374+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115679232 unmapped: 1425408 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:30.198577+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115490816 unmapped: 1613824 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:31.198838+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115490816 unmapped: 1613824 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:32.199105+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115490816 unmapped: 1613824 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:33.199505+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115490816 unmapped: 1613824 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:34.199720+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115490816 unmapped: 1613824 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:35.199919+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115490816 unmapped: 1613824 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:36.200310+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115490816 unmapped: 1613824 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:37.200513+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115499008 unmapped: 1605632 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:38.200711+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115499008 unmapped: 1605632 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:39.201050+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115499008 unmapped: 1605632 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:40.201410+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115499008 unmapped: 1605632 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:41.201708+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115499008 unmapped: 1605632 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:42.202055+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115499008 unmapped: 1605632 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:43.202449+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115499008 unmapped: 1605632 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:44.202828+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115507200 unmapped: 1597440 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:45.203189+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115507200 unmapped: 1597440 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:46.203841+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115507200 unmapped: 1597440 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:47.204392+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115507200 unmapped: 1597440 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:48.204831+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115507200 unmapped: 1597440 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:49.205164+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115507200 unmapped: 1597440 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:50.205601+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115507200 unmapped: 1597440 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:51.206040+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115515392 unmapped: 1589248 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:52.206351+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115515392 unmapped: 1589248 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:53.206553+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115515392 unmapped: 1589248 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:54.206943+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115515392 unmapped: 1589248 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:55.207369+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115515392 unmapped: 1589248 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets getting new tickets!
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:56.208099+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _finish_auth 0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:56.210790+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115523584 unmapped: 1581056 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:57.208500+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115523584 unmapped: 1581056 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:58.208823+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115523584 unmapped: 1581056 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:59.209165+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115523584 unmapped: 1581056 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:00.209594+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115523584 unmapped: 1581056 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:01.209961+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115523584 unmapped: 1581056 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:02.210447+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115523584 unmapped: 1581056 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:03.210777+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115523584 unmapped: 1581056 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:04.211086+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115523584 unmapped: 1581056 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:05.211479+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115523584 unmapped: 1581056 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:06.211777+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115539968 unmapped: 1564672 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:07.212136+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:08.212419+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115539968 unmapped: 1564672 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:09.212858+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115539968 unmapped: 1564672 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:10.213371+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115539968 unmapped: 1564672 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:11.213728+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115539968 unmapped: 1564672 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:12.214093+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115539968 unmapped: 1564672 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:13.214475+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115539968 unmapped: 1564672 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:14.214881+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115539968 unmapped: 1564672 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:15.215353+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115539968 unmapped: 1564672 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:16.215716+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115539968 unmapped: 1564672 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:17.216083+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115539968 unmapped: 1564672 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:18.216457+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115539968 unmapped: 1564672 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:19.216829+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115539968 unmapped: 1564672 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:20.217219+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115539968 unmapped: 1564672 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:21.217899+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115539968 unmapped: 1564672 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:22.218116+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115539968 unmapped: 1564672 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:23.218368+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115548160 unmapped: 1556480 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:24.218595+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115548160 unmapped: 1556480 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:25.218929+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115548160 unmapped: 1556480 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:26.219390+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115548160 unmapped: 1556480 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:27.219792+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115548160 unmapped: 1556480 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:28.220116+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115548160 unmapped: 1556480 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:29.220546+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115548160 unmapped: 1556480 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:30.220962+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115548160 unmapped: 1556480 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:31.221397+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115548160 unmapped: 1556480 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:32.221626+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115556352 unmapped: 1548288 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:33.221890+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115556352 unmapped: 1548288 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:34.222170+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115556352 unmapped: 1548288 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:35.222472+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115556352 unmapped: 1548288 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:36.222716+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115556352 unmapped: 1548288 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:37.222932+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115556352 unmapped: 1548288 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:38.223197+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115564544 unmapped: 1540096 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:39.223370+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115564544 unmapped: 1540096 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:40.223577+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115564544 unmapped: 1540096 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:41.223876+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115564544 unmapped: 1540096 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:42.224342+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115564544 unmapped: 1540096 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:43.224720+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115564544 unmapped: 1540096 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:44.224965+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115564544 unmapped: 1540096 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:45.225192+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115564544 unmapped: 1540096 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:46.225423+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115564544 unmapped: 1540096 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:47.225815+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115564544 unmapped: 1540096 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:48.226055+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115564544 unmapped: 1540096 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:49.226355+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115564544 unmapped: 1540096 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:50.226731+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115572736 unmapped: 1531904 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:51.227223+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115572736 unmapped: 1531904 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:52.227601+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115572736 unmapped: 1531904 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:53.227830+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115572736 unmapped: 1531904 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:54.228066+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115572736 unmapped: 1531904 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:55.228503+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115572736 unmapped: 1531904 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:56.228829+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115572736 unmapped: 1531904 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:57.229168+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115572736 unmapped: 1531904 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:58.229766+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115572736 unmapped: 1531904 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:59.230039+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115572736 unmapped: 1531904 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:00.230643+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115572736 unmapped: 1531904 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:01.231146+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115572736 unmapped: 1531904 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:02.231632+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115572736 unmapped: 1531904 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:03.232038+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115580928 unmapped: 1523712 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:04.232499+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115580928 unmapped: 1523712 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:05.232880+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115580928 unmapped: 1523712 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:06.233335+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115580928 unmapped: 1523712 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:07.233737+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115580928 unmapped: 1523712 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:08.234008+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115580928 unmapped: 1523712 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:09.234258+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115580928 unmapped: 1523712 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:10.234515+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115589120 unmapped: 1515520 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:11.234977+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115589120 unmapped: 1515520 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:12.235225+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115589120 unmapped: 1515520 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:13.235580+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115589120 unmapped: 1515520 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b5422800 session 0x5626b3aa81e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aae400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:14.236327+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115589120 unmapped: 1515520 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:15.236684+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115597312 unmapped: 1507328 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1468364 data_alloc: 251658240 data_used: 29954048
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:16.237132+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8985000/0x0/0x4ffc00000, data 0x30271b0/0x30f4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115597312 unmapped: 1507328 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:17.237378+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115597312 unmapped: 1507328 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:18.237653+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115597312 unmapped: 1507328 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b5994c00 session 0x5626b49872c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b7298000 session 0x5626b2da94a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b7298400 session 0x5626b339a780
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:19.237933+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115597312 unmapped: 1507328 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 152.578491211s of 152.763153076s, submitted: 12
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:20.238216+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1400684 data_alloc: 234881024 data_used: 26468352
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:21.238925+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8efb000/0x0/0x4ffc00000, data 0x2ab71a0/0x2b83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [0,0,0,0,0,0,1])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b57fb800 session 0x5626b4f9c780
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:22.239155+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:23.239438+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:24.239798+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:25.240094+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:26.240412+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:27.240849+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:28.241405+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:29.241860+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:30.242342+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:31.242840+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:32.243180+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:33.243792+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:34.244252+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:35.244626+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:36.245040+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:37.245407+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:38.245721+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:39.245973+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:40.246201+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:41.246731+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:42.246993+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:43.247357+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:44.247566+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:45.247777+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:46.248169+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:47.248425+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:48.248608+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:49.249016+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 3530752 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:50.249269+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:51.249689+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:52.249969+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:53.250511+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:54.250851+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:55.251393+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:56.251622+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:57.251839+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:58.252122+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:59.252542+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:00.253033+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:01.253348+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:02.253684+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:03.253944+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:04.254272+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:05.254634+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:06.255188+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:07.255462+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:08.255693+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:09.256025+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:10.256434+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:11.256708+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:12.256892+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:13.257207+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:14.257620+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:15.257858+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:16.258172+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:17.258432+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:18.258827+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:19.259223+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:20.259620+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:21.260040+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 3522560 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:22.260452+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 3506176 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:23.260789+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 3506176 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:24.261060+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 3506176 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:25.261318+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 3506176 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:26.261550+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 3506176 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:27.261784+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 3506176 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:28.261983+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 3506176 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:29.262541+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 3506176 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:30.262915+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 3506176 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:31.263442+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 3506176 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:32.263661+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 3506176 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:33.263936+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 3506176 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:34.264213+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 3506176 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:35.264480+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 3506176 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:36.264721+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 3506176 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:37.265061+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 3506176 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:38.265311+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 3506176 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:39.265649+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113606656 unmapped: 3497984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:40.266125+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113606656 unmapped: 3497984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:41.266516+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113606656 unmapped: 3497984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:42.266731+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113606656 unmapped: 3497984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:43.267119+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113606656 unmapped: 3497984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:44.267423+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113606656 unmapped: 3497984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:45.267771+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113606656 unmapped: 3497984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:46.267963+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113606656 unmapped: 3497984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:47.268170+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113606656 unmapped: 3497984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:48.268499+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113606656 unmapped: 3497984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:49.268746+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113606656 unmapped: 3497984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:50.269148+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:51.269538+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113606656 unmapped: 3497984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:52.269851+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113606656 unmapped: 3497984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:53.270122+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113606656 unmapped: 3497984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:54.270341+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113606656 unmapped: 3497984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:55.270744+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113606656 unmapped: 3497984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:56.271050+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113606656 unmapped: 3497984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:57.271463+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113606656 unmapped: 3497984 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:58.271675+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113614848 unmapped: 3489792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:59.272051+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113614848 unmapped: 3489792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:00.272436+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113614848 unmapped: 3489792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:01.272842+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113614848 unmapped: 3489792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:02.273116+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113614848 unmapped: 3489792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:03.273324+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113614848 unmapped: 3489792 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:04.273760+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113623040 unmapped: 3481600 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:05.273984+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113623040 unmapped: 3481600 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:06.274344+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113623040 unmapped: 3481600 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:07.274521+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113623040 unmapped: 3481600 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:08.274755+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113623040 unmapped: 3481600 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:09.275029+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113623040 unmapped: 3481600 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:10.275301+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113623040 unmapped: 3481600 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:11.275569+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113623040 unmapped: 3481600 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:12.275923+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113623040 unmapped: 3481600 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:13.276323+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113623040 unmapped: 3481600 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:14.276668+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113623040 unmapped: 3481600 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:15.277092+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113631232 unmapped: 3473408 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:16.277383+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113631232 unmapped: 3473408 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1398443 data_alloc: 234881024 data_used: 26460160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:17.277666+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113631232 unmapped: 3473408 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:18.278077+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113631232 unmapped: 3473408 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:19.278439+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113631232 unmapped: 3473408 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b3aaf000 session 0x5626b53f7e00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 119.615127563s of 119.691223145s, submitted: 5
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b3aafc00 session 0x5626b3a3ef00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b5816000 session 0x5626b339a5a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:20.278884+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 8413184 heap: 117104640 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f8eff000/0x0/0x4ffc00000, data 0x2ab31a0/0x2b7f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9911000/0x0/0x4ffc00000, data 0x20a11a0/0x216d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [0,0,0,0,1])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:21.279327+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b57fb800 session 0x5626b4fc6d20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9911000/0x0/0x4ffc00000, data 0x20a11a0/0x216d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1266404 data_alloc: 218103808 data_used: 19369984
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:22.279761+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9912000/0x0/0x4ffc00000, data 0x20a112e/0x216b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:23.280006+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:24.280461+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9912000/0x0/0x4ffc00000, data 0x20a112e/0x216b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:25.280963+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:26.281523+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1266404 data_alloc: 218103808 data_used: 19369984
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9912000/0x0/0x4ffc00000, data 0x20a112e/0x216b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:27.281982+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:28.282346+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:29.282721+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:30.283137+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:31.283708+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1266404 data_alloc: 218103808 data_used: 19369984
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:32.284069+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9912000/0x0/0x4ffc00000, data 0x20a112e/0x216b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:33.284342+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:34.287851+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:35.288073+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:36.288335+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1266404 data_alloc: 218103808 data_used: 19369984
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:37.288675+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9912000/0x0/0x4ffc00000, data 0x20a112e/0x216b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:38.289069+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:39.289458+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:40.289842+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9912000/0x0/0x4ffc00000, data 0x20a112e/0x216b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:41.290415+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1266404 data_alloc: 218103808 data_used: 19369984
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:42.291480+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:43.291718+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9912000/0x0/0x4ffc00000, data 0x20a112e/0x216b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4f9912000/0x0/0x4ffc00000, data 0x20a112e/0x216b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:44.292097+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:45.292515+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:46.292949+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109772800 unmapped: 9428992 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1266404 data_alloc: 218103808 data_used: 19369984
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:47.293468+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 27.548189163s of 27.785129547s, submitted: 32
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b57f5000 session 0x5626b6f53c20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b57f4800 session 0x5626b58023c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b53be000 session 0x5626b817e1e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 109789184 unmapped: 9412608 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:48.293713+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106643456 unmapped: 12558336 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4fa0dd000/0x0/0x4ffc00000, data 0x18d811e/0x19a1000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b3aafc00 session 0x5626b3a3eb40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:49.294014+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:50.294478+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4fa0e1000/0x0/0x4ffc00000, data 0x18d40fb/0x199c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:51.294978+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1171166 data_alloc: 218103808 data_used: 15216640
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:52.295482+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4fa0e1000/0x0/0x4ffc00000, data 0x18d40fb/0x199c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:53.295869+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:54.296372+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:55.296949+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:56.297343+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1171166 data_alloc: 218103808 data_used: 15216640
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:57.297831+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4fa0e1000/0x0/0x4ffc00000, data 0x18d40fb/0x199c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:58.298329+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:59.298580+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:00.298819+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4fa0e1000/0x0/0x4ffc00000, data 0x18d40fb/0x199c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:01.299305+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1171166 data_alloc: 218103808 data_used: 15216640
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4fa0e1000/0x0/0x4ffc00000, data 0x18d40fb/0x199c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:02.299654+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:03.300018+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4fa0e1000/0x0/0x4ffc00000, data 0x18d40fb/0x199c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:04.300353+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:05.300580+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:06.300938+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1171166 data_alloc: 218103808 data_used: 15216640
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:07.301319+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4fa0e1000/0x0/0x4ffc00000, data 0x18d40fb/0x199c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:08.301569+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:09.302056+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4fa0e1000/0x0/0x4ffc00000, data 0x18d40fb/0x199c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:10.302457+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:11.302862+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4fa0e1000/0x0/0x4ffc00000, data 0x18d40fb/0x199c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1171166 data_alloc: 218103808 data_used: 15216640
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:12.303305+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4fa0e1000/0x0/0x4ffc00000, data 0x18d40fb/0x199c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:13.303651+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:14.303897+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 12550144 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:15.304374+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106659840 unmapped: 12541952 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:16.304770+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106659840 unmapped: 12541952 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1171166 data_alloc: 218103808 data_used: 15216640
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:17.305140+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106659840 unmapped: 12541952 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:18.305346+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4fa0e1000/0x0/0x4ffc00000, data 0x18d40fb/0x199c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106659840 unmapped: 12541952 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:19.305588+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4fa0e1000/0x0/0x4ffc00000, data 0x18d40fb/0x199c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106659840 unmapped: 12541952 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:20.305834+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106659840 unmapped: 12541952 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:21.306361+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4fa0e1000/0x0/0x4ffc00000, data 0x18d40fb/0x199c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106659840 unmapped: 12541952 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1171166 data_alloc: 218103808 data_used: 15216640
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:22.306746+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106659840 unmapped: 12541952 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 35.406749725s of 35.619831085s, submitted: 34
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:23.307020+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106659840 unmapped: 12541952 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:24.307875+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106659840 unmapped: 12541952 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:25.308573+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106659840 unmapped: 12541952 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:26.309675+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106659840 unmapped: 12541952 heap: 119201792 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 heartbeat osd_stat(store_statfs(0x4fa0e1000/0x0/0x4ffc00000, data 0x18d410b/0x199d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1172804 data_alloc: 218103808 data_used: 15216640
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:27.309930+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106692608 unmapped: 28344320 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:28.310430+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 ms_handle_reset con 0x5626b57f5000 session 0x5626b3c80d20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106692608 unmapped: 28344320 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:29.310822+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 129 handle_osd_map epochs [130,130], i have 129, src has [1,130]
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106692608 unmapped: 28344320 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:30.311104+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106692608 unmapped: 28344320 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:31.311595+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f9951000/0x0/0x4ffc00000, data 0x2061688/0x212c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106692608 unmapped: 28344320 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1229082 data_alloc: 218103808 data_used: 15224832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f9951000/0x0/0x4ffc00000, data 0x2061688/0x212c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:32.311955+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106692608 unmapped: 28344320 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:33.312408+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106692608 unmapped: 28344320 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:34.312906+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106692608 unmapped: 28344320 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:35.313165+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106692608 unmapped: 28344320 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:36.313634+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57fb800 session 0x5626b30350e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5816000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b5816000 session 0x5626b3035c20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f9951000/0x0/0x4ffc00000, data 0x2061688/0x212c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b3aafc00 session 0x5626b5831860
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106659840 unmapped: 28377088 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1229082 data_alloc: 218103808 data_used: 15224832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:37.314055+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53be000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b53be000 session 0x5626b5831e00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 114581504 unmapped: 20455424 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57f5000 session 0x5626b3035e00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:38.314354+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 114294784 unmapped: 20742144 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57fb800 session 0x5626b3a3ed20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f9951000/0x0/0x4ffc00000, data 0x2061688/0x212c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5994c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:39.314699+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 16.210241318s of 16.285335541s, submitted: 6
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b5994c00 session 0x5626b3a3e5a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b3aafc00 session 0x5626b35921e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53be000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b53be000 session 0x5626b49614a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 21463040 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57f5000 session 0x5626b54b8f00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:40.314979+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57fb800 session 0x5626b3a3fa40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b7298000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b7298000 session 0x5626b4fc81e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b3aafc00 session 0x5626b54b8d20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53be000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b53be000 session 0x5626b4fc72c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113631232 unmapped: 21405696 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57f5000 session 0x5626b49b30e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57fb800 session 0x5626b593d2c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b7298400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b7298400 session 0x5626b58cab40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:41.315516+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b3aafc00 session 0x5626b4fc8d20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113664000 unmapped: 21372928 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1290692 data_alloc: 218103808 data_used: 22028288
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:42.315943+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53be000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b53be000 session 0x5626b5868d20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113737728 unmapped: 21299200 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57f5000 session 0x5626b30e2b40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:43.316408+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f94c9000/0x0/0x4ffc00000, data 0x24e770a/0x25b5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57fb800 session 0x5626b4cd14a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5817c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b5817c00 session 0x5626b3a93e00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113737728 unmapped: 21299200 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:44.316806+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113745920 unmapped: 21291008 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53be000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:45.317326+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113745920 unmapped: 21291008 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:46.317700+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113745920 unmapped: 21291008 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1290824 data_alloc: 218103808 data_used: 22028288
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:47.318129+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:48.318370+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f94c9000/0x0/0x4ffc00000, data 0x24e770a/0x25b5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:49.318629+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:50.319004+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f94c9000/0x0/0x4ffc00000, data 0x24e770a/0x25b5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:51.319496+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f94c9000/0x0/0x4ffc00000, data 0x24e770a/0x25b5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:52.319858+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1323944 data_alloc: 234881024 data_used: 25653248
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f94c9000/0x0/0x4ffc00000, data 0x24e770a/0x25b5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:53.320153+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:54.320637+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:55.321048+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3000.2 total, 600.0 interval
                                            Cumulative writes: 7165 writes, 28K keys, 7165 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.01 MB/s
                                            Cumulative WAL: 7165 writes, 1536 syncs, 4.66 writes per sync, written: 0.02 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 676 writes, 2157 keys, 676 commit groups, 1.0 writes per commit group, ingest: 2.05 MB, 0.00 MB/s
                                            Interval WAL: 676 writes, 300 syncs, 2.25 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f94c9000/0x0/0x4ffc00000, data 0x24e770a/0x25b5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:56.321439+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:57.321657+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1323944 data_alloc: 234881024 data_used: 25653248
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:58.322015+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:59.322285+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f94c9000/0x0/0x4ffc00000, data 0x24e770a/0x25b5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:00.322577+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:01.322987+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:02.323509+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1323944 data_alloc: 234881024 data_used: 25653248
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f94c9000/0x0/0x4ffc00000, data 0x24e770a/0x25b5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:03.323929+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f94c9000/0x0/0x4ffc00000, data 0x24e770a/0x25b5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:04.324431+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:05.324785+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:06.325131+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b3aafc00 session 0x5626b53f63c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 27.680980682s of 27.859249115s, submitted: 20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b53be000 session 0x5626b59a8960
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57f5000 session 0x5626b817e3c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:07.325398+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1322884 data_alloc: 234881024 data_used: 25657344
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112541696 unmapped: 22495232 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:08.325829+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57fb800 session 0x5626b3c961e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112541696 unmapped: 22495232 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:09.326174+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b7299c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f9951000/0x0/0x4ffc00000, data 0x2061688/0x212c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112541696 unmapped: 22495232 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:10.326575+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 130 handle_osd_map epochs [131,131], i have 130, src has [1,131]
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 131 ms_handle_reset con 0x5626b7299c00 session 0x5626b4f9d680
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 28770304 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:11.327054+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 28770304 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:12.328508+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1187286 data_alloc: 218103808 data_used: 14143488
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 28770304 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:13.328900+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 131 heartbeat osd_stat(store_statfs(0x4fa0db000/0x0/0x4ffc00000, data 0x18d7849/0x19a2000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 28770304 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:14.329966+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 28770304 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:15.330189+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 28770304 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:16.331488+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 131 heartbeat osd_stat(store_statfs(0x4fa0db000/0x0/0x4ffc00000, data 0x18d7849/0x19a2000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 131 handle_osd_map epochs [132,132], i have 131, src has [1,132]
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:17.331947+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106299392 unmapped: 28737536 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1190260 data_alloc: 218103808 data_used: 14143488
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:18.332583+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106299392 unmapped: 28737536 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:19.333057+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106299392 unmapped: 28737536 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:20.333496+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106299392 unmapped: 28737536 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 13.701698303s of 13.948337555s, submitted: 60
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:21.333923+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106299392 unmapped: 28737536 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:22.334478+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106307584 unmapped: 28729344 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1192976 data_alloc: 218103808 data_used: 14143488
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 132 heartbeat osd_stat(store_statfs(0x4fa0d7000/0x0/0x4ffc00000, data 0x18d92da/0x19a7000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:23.334862+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106414080 unmapped: 37019648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:24.335384+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106414080 unmapped: 37019648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:25.335796+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106438656 unmapped: 36995072 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 132 handle_osd_map epochs [133,133], i have 132, src has [1,133]
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 133 ms_handle_reset con 0x5626b3aafc00 session 0x5626b30e1860
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:26.336210+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 133 ms_handle_reset con 0x5626b5817000 session 0x5626b59a81e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53be000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:27.336575+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106471424 unmapped: 36962304 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1251901 data_alloc: 218103808 data_used: 14151680
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:28.336925+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106471424 unmapped: 36962304 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 133 heartbeat osd_stat(store_statfs(0x4f98d3000/0x0/0x4ffc00000, data 0x20dae5c/0x21aa000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:29.337443+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106471424 unmapped: 36962304 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:30.337849+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106471424 unmapped: 36962304 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:31.338175+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106471424 unmapped: 36962304 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:32.338415+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106471424 unmapped: 36962304 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1251901 data_alloc: 218103808 data_used: 14151680
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5817000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 11.585621834s of 11.708389282s, submitted: 13
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:33.338667+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106471424 unmapped: 36962304 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 133 handle_osd_map epochs [133,134], i have 133, src has [1,134]
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 134 ms_handle_reset con 0x5626b5817000 session 0x5626b3c80000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 134 heartbeat osd_stat(store_statfs(0x4f98d0000/0x0/0x4ffc00000, data 0x20dca2d/0x21ad000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:34.339129+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:35.339370+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:36.339739+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:37.339989+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1200130 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 134 heartbeat osd_stat(store_statfs(0x4fa0d1000/0x0/0x4ffc00000, data 0x18dc9fa/0x19ab000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:38.340335+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 134 heartbeat osd_stat(store_statfs(0x4fa0d1000/0x0/0x4ffc00000, data 0x18dc9fa/0x19ab000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:39.340721+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:40.340953+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 134 handle_osd_map epochs [135,135], i have 134, src has [1,135]
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:41.341439+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:42.341734+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202928 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:43.342087+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:44.342457+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:45.342929+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:46.343225+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:47.343581+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202928 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:48.343984+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:49.344363+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:50.344631+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:51.344913+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:52.345152+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202928 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:53.345558+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:54.345882+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:55.346383+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:56.346737+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:57.347094+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202928 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:58.347347+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:59.347727+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:00.348025+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:01.348606+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:02.349627+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202928 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:03.350000+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:04.350455+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:05.350970+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:06.351213+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:07.351644+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202928 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:08.351873+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:09.352184+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:10.352558+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:11.352834+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:12.353205+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202928 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:13.353538+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:14.353907+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:15.354407+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:16.354745+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:17.355039+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202928 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:18.355494+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:19.355906+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:20.356396+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 47.721370697s of 47.877182007s, submitted: 43
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:21.356811+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106586112 unmapped: 36847616 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:22.357059+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106602496 unmapped: 36831232 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:23.357361+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 36798464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:24.357771+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 36798464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:25.358162+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 36798464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:26.358554+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 36798464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:27.358966+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 36798464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:28.359214+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 36798464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:29.359673+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 36798464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:30.359944+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 36798464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:31.360497+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 36798464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:32.360782+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106643456 unmapped: 36790272 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:33.361160+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106643456 unmapped: 36790272 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:34.361501+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106643456 unmapped: 36790272 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:35.361917+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:36.362410+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:37.362781+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:38.363089+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:39.363506+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:40.363915+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:41.364392+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:42.364960+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:43.365560+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:44.366488+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:45.366917+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:46.367132+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:47.367499+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:48.367760+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:49.367991+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:50.368326+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:51.368637+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:52.368973+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:53.369183+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:54.369505+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:55.369729+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:56.369949+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:57.370193+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:58.370411+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:59.370778+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:00.371096+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:01.371476+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:02.371854+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:03.372203+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:04.372592+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:05.372801+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:06.373041+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:07.373427+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:08.373748+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:09.373952+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:10.374193+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:11.375354+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:12.375711+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:13.376064+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:14.376499+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:15.376844+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:16.377371+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:17.377651+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:18.377867+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:19.378376+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:20.378755+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:21.379333+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:22.379782+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:23.380164+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:24.380483+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:25.380743+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:26.381018+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:27.381436+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:28.381731+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:29.382112+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:30.382489+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:31.382906+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:32.383353+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:33.383681+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:34.384127+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:35.384340+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:36.384701+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:37.384923+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:38.385204+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:39.385618+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:40.385971+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:41.386458+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:42.386705+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:43.387013+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:44.387454+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:45.387781+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:46.388059+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:47.388493+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:48.388709+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:49.388936+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:50.389294+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:51.389715+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:52.389939+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:53.390370+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:54.390580+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:55.390916+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:56.391302+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:57.391530+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:58.391779+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:59.392153+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:00.392394+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:01.392865+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:02.393295+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:03.393548+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:04.393962+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:05.394439+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:06.394647+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:07.394970+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:08.395317+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:09.395702+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:10.395962+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:11.396487+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:12.397118+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:13.397552+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:14.398016+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:15.398502+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:16.398897+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:17.399381+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:18.399802+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:19.400095+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:20.400684+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:21.401340+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:22.401796+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:23.402332+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:24.402664+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:25.402991+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:26.403443+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:27.403943+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:28.404526+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:29.404845+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:30.405176+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:31.405634+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:32.406033+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:33.406596+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:34.407030+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:35.407552+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:36.407871+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:37.408616+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:38.409173+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:39.409657+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:40.409872+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:41.410370+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:42.410751+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:43.411157+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:44.411384+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:45.411808+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:46.412198+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:47.412549+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:48.412817+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:49.413082+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:50.413339+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:51.413713+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:52.414376+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:53.414681+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:54.415083+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:55.415474+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:56.415814+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:57.416143+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:58.416578+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:59.417051+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:00.417480+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:01.417796+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:02.418127+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:03.418654+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:04.418902+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:05.419347+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:06.419690+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:07.419954+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:08.420464+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:09.421191+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:10.421452+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:11.421741+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:12.422068+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:13.422505+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:14.422866+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:15.423220+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:16.423642+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:17.423965+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:18.424362+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:19.424580+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:20.424861+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:21.425101+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:22.425491+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:23.425893+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:24.426433+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:25.426762+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:26.426994+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:27.427218+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:28.427551+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:29.427781+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:30.428174+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:31.428718+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:32.428975+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:33.429351+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:34.429666+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 193.622985840s of 194.207275391s, submitted: 90
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106463232 unmapped: 36970496 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:35.430010+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 handle_osd_map epochs [135,136], i have 135, src has [1,136]
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 135 handle_osd_map epochs [136,136], i have 136, src has [1,136]
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 136 ms_handle_reset con 0x5626b57f5000 session 0x5626b3034780
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106512384 unmapped: 36921344 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:36.430512+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106528768 unmapped: 36904960 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:37.430778+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 136 handle_osd_map epochs [137,137], i have 136, src has [1,137]
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fb800 session 0x5626b35925a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106528768 unmapped: 36904960 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:38.431057+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106528768 unmapped: 36904960 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:39.431638+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106528768 unmapped: 36904960 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:40.431895+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:41.432382+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:42.432769+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:43.433167+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:44.433635+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:45.433995+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:46.434430+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:47.434666+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:48.435028+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:49.435429+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:50.435729+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:51.436142+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:52.436401+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:53.436838+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:54.437142+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:55.437380+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:56.437703+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:57.437923+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:58.438748+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:59.439122+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:00.439456+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:01.439801+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:02.440041+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:03.440356+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:04.440676+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:05.440904+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:06.441493+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:07.441925+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:08.442387+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:09.442747+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:10.443055+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:11.443350+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:12.443689+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:13.444101+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:14.444342+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106553344 unmapped: 36880384 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:15.444534+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106553344 unmapped: 36880384 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:16.444765+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106553344 unmapped: 36880384 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:17.445030+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106553344 unmapped: 36880384 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:18.445424+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106553344 unmapped: 36880384 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:19.445805+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:20.446362+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106553344 unmapped: 36880384 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:21.446701+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:22.447108+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:23.447492+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:24.447825+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:25.448439+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:26.448642+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:27.448879+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:28.449148+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:29.449534+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:30.449829+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b59cb000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b59cb000 session 0x5626b3a3ed20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b3aafc00 session 0x5626b56b6780
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b584a3c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:31.450112+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 56.384044647s of 56.646003723s, submitted: 30
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fb800 session 0x5626b4f9d0e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5817000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:32.450414+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113516544 unmapped: 29917184 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5817000 session 0x5626b4fc70e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b59cb000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b59cb000 session 0x5626b30e2d20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:33.450710+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b3aafc00 session 0x5626b59a83c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 29859840 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b3027a40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:34.451106+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 29859840 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1375902 data_alloc: 218103808 data_used: 20987904
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fb800 session 0x5626b817f0e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8848000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:35.451505+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 29859840 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5817000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5817000 session 0x5626b53f7e00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539e400 session 0x5626b5831c20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:36.451776+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 29859840 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:37.452080+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 29859840 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:38.452467+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 29851648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8848000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:39.452760+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 29851648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1375902 data_alloc: 218103808 data_used: 20987904
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8848000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:40.453060+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 29851648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8848000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:41.453380+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 29851648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:42.453837+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b3aafc00 session 0x5626b30ca780
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 29851648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b49b3680
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5817000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539f000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539f000 session 0x5626b817f680
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 10.559719086s of 11.683396339s, submitted: 4
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fd400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:43.454490+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 29835264 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8848000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:44.454723+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 29835264 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1377726 data_alloc: 218103808 data_used: 20987904
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:45.455067+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 121847808 unmapped: 21585920 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:46.455436+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 121241600 unmapped: 22192128 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539e400 session 0x5626b339b2c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b59cf000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b59cf000 session 0x5626b4f843c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:47.456027+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b3aafc00 session 0x5626b6f52f00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5817000 session 0x5626b6f53a40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115449856 unmapped: 27983872 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539f000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539f000 session 0x5626b6f52b40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fb800 session 0x5626b584be00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b3c823c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fd400 session 0x5626b5803680
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539f000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f7516000/0x0/0x4ffc00000, data 0x4082bad/0x4158000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:48.456366+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539f000 session 0x5626b3c82780
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b584a1e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5817000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fb800 session 0x5626b3c82000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115458048 unmapped: 27975680 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b59cf000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5817000 session 0x5626b339a780
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539f000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:49.456746+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115458048 unmapped: 27975680 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1546360 data_alloc: 218103808 data_used: 20992000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539e400 session 0x5626b584b680
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b817e1e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:50.457140+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b3aafc00 session 0x5626b3a3eb40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115466240 unmapped: 27967488 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fd400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fd400 session 0x5626b339b680
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fb800 session 0x5626b6f532c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b59cf000 session 0x5626b3c830e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:51.457582+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539f000 session 0x5626b339ab40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115466240 unmapped: 27967488 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fb800 session 0x5626b6f525a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b3aafc00 session 0x5626b3a3ef00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539e400 session 0x5626b3035e00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:52.457798+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f7516000/0x0/0x4ffc00000, data 0x4082bad/0x4158000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b3aafc00 session 0x5626b3f58960
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115466240 unmapped: 27967488 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539e400 session 0x5626b3372b40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:53.458210+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539f000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fb800 session 0x5626b3c6cf00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 7.995737553s of 10.312167168s, submitted: 42
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115433472 unmapped: 28000256 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b59cf000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b3c82b40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:54.458660+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fd400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b59ca800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 114819072 unmapped: 28614656 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550106 data_alloc: 218103808 data_used: 20992000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539f000 session 0x5626b59a9680
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b59cf000 session 0x5626b5830000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f7513000/0x0/0x4ffc00000, data 0x4082bf0/0x415b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:55.458925+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fd400 session 0x5626b6f53a40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b59ca800 session 0x5626b59a9860
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 114827264 unmapped: 28606464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f7400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f9000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:56.459130+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5874800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e2800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 114843648 unmapped: 28590080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f7513000/0x0/0x4ffc00000, data 0x4082bf0/0x415b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:57.459488+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 114843648 unmapped: 28590080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:58.459688+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 114712576 unmapped: 28721152 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:59.459874+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 114712576 unmapped: 28721152 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1566614 data_alloc: 218103808 data_used: 23199744
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:00.460075+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 114720768 unmapped: 28712960 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:01.460328+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115113984 unmapped: 28319744 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f7513000/0x0/0x4ffc00000, data 0x4082bf0/0x415b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:02.460494+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 117366784 unmapped: 26066944 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:03.460680+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 119152640 unmapped: 24281088 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:04.460952+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 119152640 unmapped: 24281088 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1620214 data_alloc: 234881024 data_used: 30789632
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:05.461143+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 119152640 unmapped: 24281088 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:06.461424+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 120389632 unmapped: 23044096 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:07.461624+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f7513000/0x0/0x4ffc00000, data 0x4082bf0/0x415b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 126394368 unmapped: 17039360 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:08.461829+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 126574592 unmapped: 16859136 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:09.462087+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 126623744 unmapped: 16809984 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1674134 data_alloc: 234881024 data_used: 38350848
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5874800 session 0x5626b3372000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b53e2800 session 0x5626b53f7e00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:10.464448+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fd400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 15.605598450s of 17.215515137s, submitted: 12
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 126599168 unmapped: 16834560 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fd400 session 0x5626b4cd1680
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:11.464825+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125960192 unmapped: 17473536 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:12.465144+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125960192 unmapped: 17473536 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b3c823c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f9000 session 0x5626b3c82780
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5874800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:13.465354+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f78df000/0x0/0x4ffc00000, data 0x3cb7be0/0x3d8f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123494400 unmapped: 19939328 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5874800 session 0x5626b53f74a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:14.465562+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b59ca800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b59ca800 session 0x5626b3c825a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b56b65a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f9000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f9000 session 0x5626b56b6b40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123510784 unmapped: 19922944 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fd400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fd400 session 0x5626b56b74a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1558501 data_alloc: 234881024 data_used: 30789632
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5874800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5874800 session 0x5626b56b7860
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b59cf000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b59cf000 session 0x5626b30cbe00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b4fc83c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f9000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:15.465755+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f9000 session 0x5626b4fc8000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fd400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fd400 session 0x5626b3a921e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123494400 unmapped: 19939328 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:16.465982+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123494400 unmapped: 19939328 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f74aa000/0x0/0x4ffc00000, data 0x40ecbe0/0x41c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:17.466203+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123494400 unmapped: 19939328 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:18.466426+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123494400 unmapped: 19939328 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:19.467269+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123494400 unmapped: 19939328 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1624982 data_alloc: 234881024 data_used: 30789632
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:20.467646+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f74aa000/0x0/0x4ffc00000, data 0x40ecbe0/0x41c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123387904 unmapped: 20045824 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:21.468091+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123387904 unmapped: 20045824 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f74aa000/0x0/0x4ffc00000, data 0x40ecbe0/0x41c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:22.468444+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123387904 unmapped: 20045824 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5874800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 12.408591270s of 12.638334274s, submitted: 29
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:23.468730+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5874800 session 0x5626b30cb4a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123731968 unmapped: 19701760 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e2c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:24.468936+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123740160 unmapped: 19693568 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1629553 data_alloc: 234881024 data_used: 30793728
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:25.469169+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123740160 unmapped: 19693568 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:26.469392+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123740160 unmapped: 19693568 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:27.469581+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 124067840 unmapped: 19365888 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f7480000/0x0/0x4ffc00000, data 0x4116be0/0x41ee000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:28.469806+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125206528 unmapped: 18227200 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f7480000/0x0/0x4ffc00000, data 0x4116be0/0x41ee000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:29.470152+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125206528 unmapped: 18227200 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1685393 data_alloc: 234881024 data_used: 36040704
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:30.470402+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125206528 unmapped: 18227200 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:31.470670+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125206528 unmapped: 18227200 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:32.470880+0000)
Oct 11 02:57:50 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125222912 unmapped: 18210816 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 9.958163261s of 10.014116287s, submitted: 8
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:33.471590+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132775936 unmapped: 10657792 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:34.471974+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129392640 unmapped: 14041088 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1773203 data_alloc: 234881024 data_used: 36089856
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f6a32000/0x0/0x4ffc00000, data 0x4b64be0/0x4c3c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:35.472297+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136060928 unmapped: 7372800 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:36.472528+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136347648 unmapped: 7086080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:37.472816+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136060928 unmapped: 7372800 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:38.472996+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136110080 unmapped: 7323648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:39.473450+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136110080 unmapped: 7323648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1862199 data_alloc: 234881024 data_used: 38940672
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:40.473658+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136110080 unmapped: 7323648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f62e1000/0x0/0x4ffc00000, data 0x52b3be0/0x538b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:41.473884+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f62e1000/0x0/0x4ffc00000, data 0x52b3be0/0x538b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136151040 unmapped: 7282688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:42.474101+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136200192 unmapped: 7233536 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:43.474317+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 7208960 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:44.474635+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 7208960 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1857875 data_alloc: 234881024 data_used: 38940672
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:45.475019+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 7208960 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f3000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f3000 session 0x5626b6f52b40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b817e000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f9000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f9000 session 0x5626b3f58b40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fd400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fd400 session 0x5626b817ed20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:46.475295+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5874800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 11.096207619s of 13.221765518s, submitted: 164
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5874800 session 0x5626b3c832c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d55c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b2d55c00 session 0x5626b817e5a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b3c6c000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f9000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136298496 unmapped: 15540224 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f9000 session 0x5626b5868b40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fd400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fd400 session 0x5626b49601e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:47.475754+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5a4a000/0x0/0x4ffc00000, data 0x5b4cbe0/0x5c24000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136298496 unmapped: 15540224 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:48.476116+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136298496 unmapped: 15540224 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:49.476482+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136298496 unmapped: 15540224 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1921949 data_alloc: 234881024 data_used: 38940672
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:50.476690+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136298496 unmapped: 15540224 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:51.477121+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5a4a000/0x0/0x4ffc00000, data 0x5b4cbe0/0x5c24000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136306688 unmapped: 15532032 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5874800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:52.477303+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5874800 session 0x5626b2da9c20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136306688 unmapped: 15532032 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:53.477515+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136314880 unmapped: 15523840 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:54.477736+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5a4a000/0x0/0x4ffc00000, data 0x5b4cbe0/0x5c24000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136314880 unmapped: 15523840 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1921949 data_alloc: 234881024 data_used: 38940672
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:55.478148+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136314880 unmapped: 15523840 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:56.478349+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5a4a000/0x0/0x4ffc00000, data 0x5b4cbe0/0x5c24000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [1])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141860864 unmapped: 9977856 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:57.478561+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b2d72000 session 0x5626b3c6d0e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b2d72400 session 0x5626b3c83a40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5a4a000/0x0/0x4ffc00000, data 0x5b4cbe0/0x5c24000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 143032320 unmapped: 8806400 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 11.342640877s of 11.475764275s, submitted: 8
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:58.478816+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b56b6f00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137682944 unmapped: 14155776 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:59.479151+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137682944 unmapped: 14155776 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1860859 data_alloc: 234881024 data_used: 38952960
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:00.479511+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 138248192 unmapped: 13590528 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:01.479778+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137666560 unmapped: 14172160 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:02.480168+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137666560 unmapped: 14172160 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:03.480445+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5cbf000/0x0/0x4ffc00000, data 0x58d7be0/0x59af000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137707520 unmapped: 14131200 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:04.480885+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137707520 unmapped: 14131200 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1916679 data_alloc: 234881024 data_used: 39317504
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:05.481365+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137707520 unmapped: 14131200 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:06.481727+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5cbd000/0x0/0x4ffc00000, data 0x58d9be0/0x59b1000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137707520 unmapped: 14131200 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:07.482135+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137134080 unmapped: 14704640 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:08.482508+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137134080 unmapped: 14704640 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:09.482843+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539e400 session 0x5626b3c801e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 11.344632149s of 11.581441879s, submitted: 51
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f7400 session 0x5626b5830960
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137134080 unmapped: 14704640 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1914803 data_alloc: 234881024 data_used: 39317504
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:10.483024+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f9000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f9000 session 0x5626b5803a40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137134080 unmapped: 14704640 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:11.483341+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137134080 unmapped: 14704640 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:12.483562+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5cbd000/0x0/0x4ffc00000, data 0x58d9be0/0x59b1000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137142272 unmapped: 14696448 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:13.484006+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137158656 unmapped: 14680064 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5cbd000/0x0/0x4ffc00000, data 0x58d9be0/0x59b1000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:14.484201+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136921088 unmapped: 14917632 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1915495 data_alloc: 234881024 data_used: 39432192
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5cbd000/0x0/0x4ffc00000, data 0x58d9be0/0x59b1000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:15.484436+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136921088 unmapped: 14917632 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:16.484740+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136921088 unmapped: 14917632 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:17.484973+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136921088 unmapped: 14917632 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:18.485464+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b3aafc00 session 0x5626b59a8b40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fb800 session 0x5626b53f72c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5cbd000/0x0/0x4ffc00000, data 0x58d9be0/0x59b1000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136921088 unmapped: 14917632 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:19.485977+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b2d72c00 session 0x5626b584ba40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 137 handle_osd_map epochs [138,138], i have 137, src has [1,138]
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 10.110903740s of 10.199507713s, submitted: 21
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 138 ms_handle_reset con 0x5626b2d73000 session 0x5626b30e1e00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133758976 unmapped: 18079744 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1782651 data_alloc: 234881024 data_used: 34406400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 138 ms_handle_reset con 0x5626b2d73400 session 0x5626b593d0e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:20.486175+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 138 ms_handle_reset con 0x5626b2d72c00 session 0x5626b593dc20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133775360 unmapped: 18063360 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:21.486436+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 138 heartbeat osd_stat(store_statfs(0x4f69f3000/0x0/0x4ffc00000, data 0x4b9fb6d/0x4c7a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 138 handle_osd_map epochs [139,139], i have 138, src has [1,139]
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133799936 unmapped: 18038784 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 139 ms_handle_reset con 0x5626b2d73000 session 0x5626b59a83c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:22.486669+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133808128 unmapped: 18030592 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:23.487337+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133808128 unmapped: 18030592 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:24.487861+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133808128 unmapped: 18030592 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 139 heartbeat osd_stat(store_statfs(0x4f69f2000/0x0/0x4ffc00000, data 0x4ba132e/0x4c7b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1783260 data_alloc: 234881024 data_used: 34406400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:25.488161+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 139 heartbeat osd_stat(store_statfs(0x4f69f2000/0x0/0x4ffc00000, data 0x4ba132e/0x4c7b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133808128 unmapped: 18030592 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:26.488553+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133808128 unmapped: 18030592 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:27.488790+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133808128 unmapped: 18030592 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:28.489189+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 139 heartbeat osd_stat(store_statfs(0x4f69f2000/0x0/0x4ffc00000, data 0x4ba132e/0x4c7b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133808128 unmapped: 18030592 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:29.489626+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 139 heartbeat osd_stat(store_statfs(0x4f69f2000/0x0/0x4ffc00000, data 0x4ba132e/0x4c7b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133808128 unmapped: 18030592 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 139 heartbeat osd_stat(store_statfs(0x4f69f2000/0x0/0x4ffc00000, data 0x4ba132e/0x4c7b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1783260 data_alloc: 234881024 data_used: 34406400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:30.490011+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133808128 unmapped: 18030592 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:31.490430+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 139 handle_osd_map epochs [140,140], i have 139, src has [1,140]
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 11.670613289s of 12.001146317s, submitted: 50
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133816320 unmapped: 18022400 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:32.490808+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f69ef000/0x0/0x4ffc00000, data 0x4ba2d91/0x4c7e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133816320 unmapped: 18022400 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:33.491469+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133824512 unmapped: 18014208 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:34.491801+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f69ef000/0x0/0x4ffc00000, data 0x4ba2d91/0x4c7e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133824512 unmapped: 18014208 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1786234 data_alloc: 234881024 data_used: 34406400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:35.492224+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133824512 unmapped: 18014208 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:36.492629+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133832704 unmapped: 18006016 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:37.492897+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f69ef000/0x0/0x4ffc00000, data 0x4ba2d91/0x4c7e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b53e2c00 session 0x5626b56b72c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b53e3800 session 0x5626b56b7a40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133840896 unmapped: 17997824 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:38.493095+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f69f0000/0x0/0x4ffc00000, data 0x4ba2d91/0x4c7e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 128294912 unmapped: 23543808 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:39.493982+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b3aafc00 session 0x5626b404fe00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7816000/0x0/0x4ffc00000, data 0x3d7dd81/0x3e58000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 23887872 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:40.494414+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1610862 data_alloc: 234881024 data_used: 25632768
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7840000/0x0/0x4ffc00000, data 0x3d53d81/0x3e2e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 23887872 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:41.494639+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 23887872 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:42.495020+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 23887872 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:43.495422+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 23887872 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:44.495613+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 23887872 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:45.495863+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1610862 data_alloc: 234881024 data_used: 25632768
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 23887872 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:46.496122+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 14.696439743s of 14.932935715s, submitted: 57
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7840000/0x0/0x4ffc00000, data 0x3d53d81/0x3e2e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 23846912 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:47.496431+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7840000/0x0/0x4ffc00000, data 0x3d53d81/0x3e2e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 128212992 unmapped: 23625728 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:48.496597+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 128212992 unmapped: 23625728 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:49.496800+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7840000/0x0/0x4ffc00000, data 0x3d53d81/0x3e2e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 128212992 unmapped: 23625728 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:50.497177+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1620174 data_alloc: 234881024 data_used: 26468352
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 128212992 unmapped: 23625728 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:51.497545+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d72c00 session 0x5626b4fc81e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d73000 session 0x5626b3a3e5a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e2c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b53e2c00 session 0x5626b58694a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b53e3800 session 0x5626b6f530e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b57fb800 session 0x5626b3c6c960
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 24084480 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:52.497812+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7554000/0x0/0x4ffc00000, data 0x403fd81/0x411a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 24084480 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:53.498140+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7554000/0x0/0x4ffc00000, data 0x403fd81/0x411a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7554000/0x0/0x4ffc00000, data 0x403fd81/0x411a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:54.498385+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 24084480 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:55.498582+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 24084480 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1649054 data_alloc: 234881024 data_used: 26468352
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:56.498840+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 24084480 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d72c00 session 0x5626b7eb7c20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:57.499058+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 24084480 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e2c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7554000/0x0/0x4ffc00000, data 0x403fd81/0x411a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:58.499285+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 24084480 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:59.499603+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 24084480 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:00.499862+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 24281088 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1649054 data_alloc: 234881024 data_used: 26468352
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:01.500105+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127492096 unmapped: 24346624 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 15.217692375s of 15.319958687s, submitted: 17
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:02.500351+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:03.500682+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7553000/0x0/0x4ffc00000, data 0x403fd81/0x411a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:04.500976+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:05.501192+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1658814 data_alloc: 234881024 data_used: 28086272
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7553000/0x0/0x4ffc00000, data 0x403fd81/0x411a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:06.501625+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:07.501945+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:08.502216+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:09.502570+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:10.502761+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1658814 data_alloc: 234881024 data_used: 28086272
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7553000/0x0/0x4ffc00000, data 0x403fd81/0x411a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:11.503287+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7553000/0x0/0x4ffc00000, data 0x403fd81/0x411a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:12.503808+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:13.504344+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:14.504797+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:15.505355+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1658814 data_alloc: 234881024 data_used: 28086272
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:16.505618+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7553000/0x0/0x4ffc00000, data 0x403fd81/0x411a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:17.505968+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d72400 session 0x5626b4961680
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 16.142997742s of 16.154426575s, submitted: 3
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b539e400 session 0x5626b4cd1680
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:18.506357+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b53e3800 session 0x5626b56b6f00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:19.506623+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:20.507019+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1466909 data_alloc: 218103808 data_used: 20537344
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:21.507311+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:22.507568+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f8558000/0x0/0x4ffc00000, data 0x303cd4e/0x3115000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:23.507805+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:24.508379+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f8558000/0x0/0x4ffc00000, data 0x303cd4e/0x3115000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:25.508775+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1466909 data_alloc: 218103808 data_used: 20537344
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:26.509378+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:27.509648+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f8558000/0x0/0x4ffc00000, data 0x303cd4e/0x3115000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:28.509945+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:29.510170+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:30.510552+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123142144 unmapped: 28696576 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1467229 data_alloc: 218103808 data_used: 20545536
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 12.610569954s of 12.732493401s, submitted: 38
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:31.511379+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125263872 unmapped: 26574848 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7b36000/0x0/0x4ffc00000, data 0x3a5fd4e/0x3b38000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:32.511749+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125444096 unmapped: 26394624 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:33.512086+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125386752 unmapped: 26451968 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:34.512483+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125386752 unmapped: 26451968 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:35.512887+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125394944 unmapped: 26443776 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7a90000/0x0/0x4ffc00000, data 0x3b04d4e/0x3bdd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1563015 data_alloc: 218103808 data_used: 21139456
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:36.513292+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125394944 unmapped: 26443776 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:37.513705+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125394944 unmapped: 26443776 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:38.513965+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125108224 unmapped: 26730496 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:39.514311+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125108224 unmapped: 26730496 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:40.514753+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125108224 unmapped: 26730496 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1558391 data_alloc: 218103808 data_used: 21139456
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:41.515062+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7a71000/0x0/0x4ffc00000, data 0x3b24d4e/0x3bfd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125108224 unmapped: 26730496 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:42.515562+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125108224 unmapped: 26730496 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7a71000/0x0/0x4ffc00000, data 0x3b24d4e/0x3bfd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:43.515932+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125108224 unmapped: 26730496 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:44.516205+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125108224 unmapped: 26730496 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 13.959811211s of 14.333621025s, submitted: 88
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:45.516477+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1559019 data_alloc: 218103808 data_used: 21200896
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:46.516768+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:47.517099+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7a61000/0x0/0x4ffc00000, data 0x3b34d4e/0x3c0d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7a61000/0x0/0x4ffc00000, data 0x3b34d4e/0x3c0d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:48.517471+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7a61000/0x0/0x4ffc00000, data 0x3b34d4e/0x3c0d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:49.517834+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7a61000/0x0/0x4ffc00000, data 0x3b34d4e/0x3c0d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:50.518159+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1559019 data_alloc: 218103808 data_used: 21200896
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:51.518692+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:52.519050+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d73800 session 0x5626b53f65a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:53.519526+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d73800 session 0x5626b339b860
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d72400 session 0x5626b339ad20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d72c00 session 0x5626b58032c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b539e400 session 0x5626b584a3c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:54.519818+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:55.520058+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1593799 data_alloc: 218103808 data_used: 21200896
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f6000/0x0/0x4ffc00000, data 0x3e9edb0/0x3f78000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:56.520514+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:57.520717+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f6000/0x0/0x4ffc00000, data 0x3e9edb0/0x3f78000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b53e3800 session 0x5626b5831c20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:58.521120+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b53e3800 session 0x5626b49614a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d72400 session 0x5626b3c82960
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 13.765749931s of 14.004361153s, submitted: 43
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d72c00 session 0x5626b30e3a40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:59.521665+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f6000/0x0/0x4ffc00000, data 0x3e9edb0/0x3f78000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:00.521897+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1594944 data_alloc: 218103808 data_used: 21200896
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:01.522155+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f5000/0x0/0x4ffc00000, data 0x3e9edd3/0x3f79000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:02.522370+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125345792 unmapped: 26492928 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:03.522607+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 26484736 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:04.523045+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 26484736 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:05.523401+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 26484736 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1620848 data_alloc: 234881024 data_used: 24686592
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:06.523742+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 26484736 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:07.524155+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 26484736 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f5000/0x0/0x4ffc00000, data 0x3e9edd3/0x3f79000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:08.524402+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 26484736 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:09.524677+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 26484736 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:10.525066+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 26484736 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1620848 data_alloc: 234881024 data_used: 24686592
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:11.525382+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 26484736 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:12.525623+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125362176 unmapped: 26476544 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:13.525965+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125362176 unmapped: 26476544 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f5000/0x0/0x4ffc00000, data 0x3e9edd3/0x3f79000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:14.526353+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125362176 unmapped: 26476544 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:15.526718+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125362176 unmapped: 26476544 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1620848 data_alloc: 234881024 data_used: 24686592
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:16.527024+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125362176 unmapped: 26476544 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f5000/0x0/0x4ffc00000, data 0x3e9edd3/0x3f79000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:17.527317+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125362176 unmapped: 26476544 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:18.527524+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125370368 unmapped: 26468352 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f5000/0x0/0x4ffc00000, data 0x3e9edd3/0x3f79000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:19.527731+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125370368 unmapped: 26468352 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:20.527999+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125370368 unmapped: 26468352 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1620848 data_alloc: 234881024 data_used: 24686592
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:21.528465+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125370368 unmapped: 26468352 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:22.528837+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f5000/0x0/0x4ffc00000, data 0x3e9edd3/0x3f79000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125370368 unmapped: 26468352 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:23.529138+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125370368 unmapped: 26468352 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:24.529724+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125370368 unmapped: 26468352 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:25.530054+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125370368 unmapped: 26468352 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1620848 data_alloc: 234881024 data_used: 24686592
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f5000/0x0/0x4ffc00000, data 0x3e9edd3/0x3f79000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:26.530465+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125370368 unmapped: 26468352 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:27.530846+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 28.517086029s of 28.619983673s, submitted: 9
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125378560 unmapped: 26460160 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:28.531086+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 140 handle_osd_map epochs [140,141], i have 140, src has [1,141]
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d73c00 session 0x5626b58030e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125394944 unmapped: 31105024 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:29.531406+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125394944 unmapped: 31105024 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:30.531779+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f7281000/0x0/0x4ffc00000, data 0x4310950/0x43ec000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125403136 unmapped: 31096832 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1655644 data_alloc: 234881024 data_used: 24694784
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:31.532028+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125403136 unmapped: 31096832 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:32.532312+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125403136 unmapped: 31096832 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:33.532616+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125403136 unmapped: 31096832 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:34.533003+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125403136 unmapped: 31096832 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:35.533548+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f7282000/0x0/0x4ffc00000, data 0x4310950/0x43ec000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130400256 unmapped: 26099712 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1717042 data_alloc: 234881024 data_used: 25780224
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f7282000/0x0/0x4ffc00000, data 0x4310950/0x43ec000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:36.533766+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130441216 unmapped: 26058752 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:37.534068+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 9.699342728s of 10.029482841s, submitted: 96
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129261568 unmapped: 27238400 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:38.534469+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b9795000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b9795000 session 0x5626b49b3680
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133783552 unmapped: 22716416 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:39.534737+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133881856 unmapped: 22618112 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d72400 session 0x5626b4fc92c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d72c00 session 0x5626b56b7c20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d73c00 session 0x5626b53f7680
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:40.534928+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53e3800 session 0x5626b30e3c20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b9795400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b9795400 session 0x5626b56b6000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f69a3000/0x0/0x4ffc00000, data 0x4bef950/0x4ccb000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133906432 unmapped: 22593536 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1742321 data_alloc: 234881024 data_used: 30715904
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:41.535400+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133906432 unmapped: 22593536 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:42.535743+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133906432 unmapped: 22593536 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:43.536018+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d72400 session 0x5626b58021e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133906432 unmapped: 22593536 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:44.536285+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f699f000/0x0/0x4ffc00000, data 0x4bf2973/0x4ccf000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d73800 session 0x5626b6f52960
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b539e400 session 0x5626b404fe00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133906432 unmapped: 22593536 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:45.536469+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53e3800 session 0x5626b3372b40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129998848 unmapped: 26501120 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1627004 data_alloc: 234881024 data_used: 25866240
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:46.536692+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129998848 unmapped: 26501120 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:47.536949+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130007040 unmapped: 26492928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:48.537283+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130007040 unmapped: 26492928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:49.537685+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6f9a000/0x0/0x4ffc00000, data 0x40bd8ee/0x4198000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130007040 unmapped: 26492928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6f9a000/0x0/0x4ffc00000, data 0x40bd8ee/0x4198000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:50.538117+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130007040 unmapped: 26492928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1627004 data_alloc: 234881024 data_used: 25866240
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:51.538368+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d73000 session 0x5626b5803a40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53e2c00 session 0x5626b5868d20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130007040 unmapped: 26492928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 14.008026123s of 14.405908585s, submitted: 75
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:52.538677+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f82b7000/0x0/0x4ffc00000, data 0x32dc8ee/0x33b7000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d72400 session 0x5626b58034a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:53.538969+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:54.539416+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:55.539691+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3600.2 total, 600.0 interval
                                            Cumulative writes: 8926 writes, 35K keys, 8926 commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 8926 writes, 2237 syncs, 3.99 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 1761 writes, 6906 keys, 1761 commit groups, 1.0 writes per commit group, ingest: 6.83 MB, 0.01 MB/s
                                            Interval WAL: 1761 writes, 701 syncs, 2.51 writes per sync, written: 0.01 GB, 0.01 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1506240 data_alloc: 234881024 data_used: 23580672
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:56.540112+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:57.540437+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:58.540706+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f82b7000/0x0/0x4ffc00000, data 0x32dc8ee/0x33b7000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:59.541096+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:00.541380+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1506240 data_alloc: 234881024 data_used: 23580672
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:01.541789+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:02.542036+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: mgrc ms_handle_reset ms_handle_reset con 0x5626b539e000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: mgrc reconnect Terminating session with v2:192.168.122.100:6800/1088804496
Oct 11 02:57:50 compute-0 ceph-osd[207831]: mgrc reconnect Starting new session with [v2:192.168.122.100:6800/1088804496,v1:192.168.122.100:6801/1088804496]
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: get_auth_request con 0x5626b9795400 auth_method 0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: mgrc handle_mgr_configure stats_period=5
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f82b7000/0x0/0x4ffc00000, data 0x32dc8ee/0x33b7000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:03.542896+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:04.543375+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:05.543746+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:06.544141+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1506240 data_alloc: 234881024 data_used: 23580672
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:07.544346+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f82b7000/0x0/0x4ffc00000, data 0x32dc8ee/0x33b7000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:08.544684+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f82b7000/0x0/0x4ffc00000, data 0x32dc8ee/0x33b7000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:09.545070+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:10.545431+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:11.545680+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1506240 data_alloc: 234881024 data_used: 23580672
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:12.545888+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f82b7000/0x0/0x4ffc00000, data 0x32dc8ee/0x33b7000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:13.546094+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:14.546707+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:15.546976+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:16.547158+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1506240 data_alloc: 234881024 data_used: 23580672
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f82b7000/0x0/0x4ffc00000, data 0x32dc8ee/0x33b7000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:17.547439+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f82b7000/0x0/0x4ffc00000, data 0x32dc8ee/0x33b7000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:18.547757+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 26.793706894s of 26.818393707s, submitted: 9
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:19.547999+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 128401408 unmapped: 28098560 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:20.548265+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 128401408 unmapped: 28098560 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f8068000/0x0/0x4ffc00000, data 0x352b8ee/0x3606000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f8063000/0x0/0x4ffc00000, data 0x352f8ee/0x360a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:21.548744+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f8063000/0x0/0x4ffc00000, data 0x352f8ee/0x360a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:22.549135+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:23.549384+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:24.549611+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:25.549887+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:26.550087+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:27.550496+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:28.550736+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:29.551278+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:30.551489+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:31.551862+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:32.552220+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:33.552802+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:34.553143+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:35.553382+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:36.553666+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:37.553953+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:38.554165+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:39.554533+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:40.554989+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:41.555465+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:42.555736+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:43.555971+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:44.556214+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:45.556564+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:46.557049+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:47.557457+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:48.557659+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:49.557983+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:50.558331+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:51.558589+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 32.365455627s of 32.435802460s, submitted: 14
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:52.558841+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:53.559112+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:54.559496+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:55.559916+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:56.560191+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:57.560524+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:58.560848+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:59.561113+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:00.561347+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:01.561841+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:02.562223+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:03.562686+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:04.563075+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:05.563356+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:06.563991+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:07.564275+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:08.564506+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:09.564716+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:10.564913+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:11.565263+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:12.565503+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:13.565736+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b3aae400 session 0x5626b5869680
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:14.565961+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:15.566269+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:16.566483+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:17.566686+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:18.566900+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:19.567177+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 28.230127335s of 28.241071701s, submitted: 1
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129507328 unmapped: 26992640 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:20.567525+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129523712 unmapped: 26976256 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:21.567826+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129597440 unmapped: 26902528 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:22.568023+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129638400 unmapped: 26861568 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:23.568214+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:24.568458+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:25.568694+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:26.568899+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:27.569149+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:28.569390+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:29.569678+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:30.569900+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:31.570185+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:32.570545+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:33.570925+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:34.571360+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:35.571616+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:36.571851+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:37.572171+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:38.572560+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:39.572797+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:40.573162+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:41.573671+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:42.574034+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:43.574361+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:44.574734+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:45.574982+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:46.575359+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:47.575544+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:48.575742+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:49.575995+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:50.576380+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:51.576798+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:52.577150+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:53.577513+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:54.577970+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:55.578392+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:56.578648+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:57.579028+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:58.579431+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:59.579861+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:00.580131+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:01.580591+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:02.580941+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:03.581320+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:04.581712+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:05.581981+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:06.582203+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:07.582590+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:08.582838+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:09.583084+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:10.583351+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:11.583849+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:12.584303+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:13.584724+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:14.585113+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:15.585486+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:16.585711+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:17.586019+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:18.586300+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:19.586566+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129712128 unmapped: 26787840 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:20.586933+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129712128 unmapped: 26787840 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:21.587354+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129712128 unmapped: 26787840 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:22.587758+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129712128 unmapped: 26787840 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:23.587984+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b539e400 session 0x5626b339a1e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53e3800 session 0x5626b404f4a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b9795800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b9795800 session 0x5626b53f6d20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d72400 session 0x5626b4fc9e00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 63.668666840s of 64.198707581s, submitted: 108
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b539e400 session 0x5626b4fc94a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e2c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53e2c00 session 0x5626b49b30e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130580480 unmapped: 29073408 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53e3800 session 0x5626b817e1e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b9795c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:24.588183+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b9795c00 session 0x5626b4cd01e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d72400 session 0x5626b59a8d20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130580480 unmapped: 29073408 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:25.588389+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130588672 unmapped: 29065216 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:26.588795+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130588672 unmapped: 29065216 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1638123 data_alloc: 234881024 data_used: 23650304
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:27.589094+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130654208 unmapped: 28999680 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:28.589375+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b539e400 session 0x5626b59a8b40
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130654208 unmapped: 28999680 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:29.589718+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e2c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53e2c00 session 0x5626b59a83c0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130662400 unmapped: 28991488 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:30.590097+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53e3800 session 0x5626b59a9e00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b4060000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b4060000 session 0x5626b59a85a0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130670592 unmapped: 28983296 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:31.590351+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b4060000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130670592 unmapped: 28983296 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1638123 data_alloc: 234881024 data_used: 23650304
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:32.590522+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130670592 unmapped: 28983296 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:33.590766+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130891776 unmapped: 28762112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:34.590973+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132702208 unmapped: 26951680 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:35.592140+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136151040 unmapped: 23502848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:36.592405+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136151040 unmapped: 23502848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1734603 data_alloc: 234881024 data_used: 36290560
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:37.592641+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:38.592864+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:39.593177+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:40.593511+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:41.593801+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:42.594480+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1734603 data_alloc: 234881024 data_used: 36290560
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:43.594771+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:44.594989+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:45.595187+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:46.595374+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:47.595613+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1734603 data_alloc: 234881024 data_used: 36290560
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:48.595817+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:49.596022+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:50.596218+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:51.596558+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:52.596820+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1734603 data_alloc: 234881024 data_used: 36290560
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:53.597373+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:54.597873+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:55.598276+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:56.598714+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:57.599174+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1734603 data_alloc: 234881024 data_used: 36290560
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:58.599466+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:59.599692+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:00.599905+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136232960 unmapped: 23420928 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:01.600193+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136232960 unmapped: 23420928 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:02.600402+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136232960 unmapped: 23420928 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1734603 data_alloc: 234881024 data_used: 36290560
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:03.600847+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136232960 unmapped: 23420928 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:04.601286+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136232960 unmapped: 23420928 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:05.601533+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136232960 unmapped: 23420928 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 42.054290771s of 42.212093353s, submitted: 35
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f724e000/0x0/0x4ffc00000, data 0x4344950/0x4420000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [0,0,0,0,0,1])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:06.601795+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 139223040 unmapped: 20430848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:07.602006+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 139264000 unmapped: 20389888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1809071 data_alloc: 234881024 data_used: 36700160
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:08.602503+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f69b0000/0x0/0x4ffc00000, data 0x4bda950/0x4cb6000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142024704 unmapped: 17629184 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:09.602931+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142024704 unmapped: 17629184 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:10.603373+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142024704 unmapped: 17629184 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:11.603799+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142024704 unmapped: 17629184 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:12.604295+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1821195 data_alloc: 234881024 data_used: 36532224
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f696b000/0x0/0x4ffc00000, data 0x4c19950/0x4cf5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:13.604607+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f696b000/0x0/0x4ffc00000, data 0x4c19950/0x4cf5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:14.604996+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:15.605323+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f696a000/0x0/0x4ffc00000, data 0x4c28950/0x4d04000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:16.605758+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:17.606191+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812327 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:18.606770+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:19.607213+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f696a000/0x0/0x4ffc00000, data 0x4c28950/0x4d04000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:20.607687+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:21.608140+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:22.608341+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f696a000/0x0/0x4ffc00000, data 0x4c28950/0x4d04000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812327 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:23.608632+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:24.608959+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:25.609179+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:26.609397+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 19.895868301s of 20.259223938s, submitted: 107
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f696a000/0x0/0x4ffc00000, data 0x4c28950/0x4d04000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:27.609619+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812195 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:28.609832+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:29.610076+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:30.610370+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:31.610785+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:32.611137+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812195 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:33.611518+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:34.611777+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:35.612173+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:36.612693+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:37.613044+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812195 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:38.613440+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 12.529780388s of 12.544162750s, submitted: 2
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:39.613678+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:40.614013+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:41.614348+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:42.614573+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812371 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:43.614808+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:44.615039+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:45.615375+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:46.615716+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:47.616099+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812371 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:48.616590+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:49.616883+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:50.617099+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:51.617386+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:52.617724+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812371 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:53.617971+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:54.618161+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:55.618545+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:56.618976+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:57.619364+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812371 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:58.619738+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:59.620148+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:00.620527+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:01.620966+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:02.621417+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812371 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:03.621707+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:04.622116+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:05.622474+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:06.622685+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:07.622895+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812371 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:08.623117+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:09.623361+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 30.708982468s of 30.720699310s, submitted: 1
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:10.623584+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:11.623863+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:12.624099+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:13.624371+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:14.624614+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141705216 unmapped: 17948672 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:15.624830+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141705216 unmapped: 17948672 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:16.625061+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141705216 unmapped: 17948672 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:17.625330+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:18.625582+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:19.625790+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:20.626018+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:21.626332+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:22.626543+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:23.626744+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:24.626934+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:25.627329+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:26.627570+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:27.627969+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:28.628452+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:29.628870+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:30.629198+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:31.629669+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:32.630027+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:33.630495+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:34.630832+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:35.631206+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:36.631562+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:37.632001+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:38.632327+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:39.632752+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:40.633038+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:41.633346+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:42.633737+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:43.634093+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:44.634380+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:45.634669+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:46.635033+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:47.635349+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:48.635694+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:49.636038+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:50.636288+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:51.636553+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:52.636769+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:53.636969+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:54.637214+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:55.637500+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:56.637727+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:57.637934+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:58.638138+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:59.638397+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:00.638603+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:01.638854+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:02.639516+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:03.640061+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:04.640360+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:05.640571+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:06.640787+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:07.641179+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:08.641648+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:09.644011+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:10.645706+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:11.647167+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:12.648858+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:13.650507+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:14.651012+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:15.651511+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:16.652123+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:17.652479+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:18.652982+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:19.653506+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:20.653857+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 71.278442383s of 71.294609070s, submitted: 7
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:21.654485+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:22.654802+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:23.655018+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:24.655318+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:25.655526+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:26.655749+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b5817400 session 0x5626b58ca780
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53be000 session 0x5626b3a92d20
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e2c00
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b5816c00 session 0x5626b58310e0
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53be000
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:27.656383+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:28.656927+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:29.657222+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:30.657487+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:31.657730+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:32.657958+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:33.658193+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:34.658392+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:35.658883+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:36.659312+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:37.659662+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:38.660060+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:39.660468+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:40.660888+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:41.661793+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:42.662051+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:43.662940+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:44.663300+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:45.663536+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:46.663809+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:47.664125+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:48.664485+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:49.664978+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:50.665379+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:51.665759+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:52.666352+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:53.666795+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:54.667053+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:55.667499+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:56.667915+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:57.668394+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:58.668766+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:59.669160+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:00.669669+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:01.670026+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:02.670536+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:03.670972+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:04.671287+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:05.671968+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:06.672405+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:07.672654+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:08.672918+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:09.673313+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141762560 unmapped: 17891328 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:10.673520+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:11.673830+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:12.674176+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:13.674510+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:14.674892+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:15.675288+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:16.675629+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:17.675910+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:18.676592+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:19.677014+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:20.677406+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:21.677761+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:22.678111+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:23.678459+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:24.678719+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:25.679046+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:26.679343+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:27.679617+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:28.679932+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:29.680328+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:30.680526+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:31.680822+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:32.681037+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:33.681548+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:34.681836+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:35.682205+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:36.682593+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:37.682907+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:38.683395+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:39.683794+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:40.684224+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:41.684830+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:42.685217+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:43.685717+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:44.686080+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:45.686461+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:46.686834+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:47.687139+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:48.687407+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:49.687711+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:50.688053+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:51.688420+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:52.688696+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:53.688922+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:50 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:50 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:54.689154+0000)
Oct 11 02:57:50 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:50 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:55.689395+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:56.689854+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:57.690328+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:58.690726+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:59.691020+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:00.691455+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:01.691937+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:02.692393+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:03.692638+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:04.692897+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:05.693120+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:06.693524+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:07.693916+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:08.694369+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:09.694770+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:10.695154+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:11.695781+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:12.696146+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:13.696394+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:14.696599+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:15.696829+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:16.697145+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:17.697359+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:18.697566+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:19.697899+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:20.698098+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:21.698384+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:22.698760+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141803520 unmapped: 17850368 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:23.699028+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141803520 unmapped: 17850368 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:24.699419+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141803520 unmapped: 17850368 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:25.699605+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141803520 unmapped: 17850368 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:26.699909+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141803520 unmapped: 17850368 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:27.700325+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141803520 unmapped: 17850368 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:28.700747+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141803520 unmapped: 17850368 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:29.701204+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141803520 unmapped: 17850368 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:30.701641+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141811712 unmapped: 17842176 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:31.701938+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141811712 unmapped: 17842176 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:32.702209+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141811712 unmapped: 17842176 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:33.702575+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:34.702837+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:35.703044+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:36.703488+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:37.703881+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:38.704365+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:39.704707+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:40.705035+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:41.705479+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:42.705813+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:43.706088+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:44.706334+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:45.706544+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:46.706823+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:47.707223+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:48.707819+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:49.708207+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:50.708657+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:51.709171+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:52.709392+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:53.709595+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:54.709831+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:55.710071+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:56.710319+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:57.710698+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:58.711210+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:59.711635+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:00.711993+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:01.712378+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:02.712612+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141836288 unmapped: 17817600 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:03.712794+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141836288 unmapped: 17817600 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:04.713105+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141836288 unmapped: 17817600 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:05.713443+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141836288 unmapped: 17817600 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:06.713897+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141836288 unmapped: 17817600 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:07.714216+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141836288 unmapped: 17817600 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:08.714652+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141869056 unmapped: 17784832 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:09.714871+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812963 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141885440 unmapped: 17768448 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:10.715363+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141885440 unmapped: 17768448 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:11.715847+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141885440 unmapped: 17768448 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:12.716215+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141885440 unmapped: 17768448 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:13.716552+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141893632 unmapped: 17760256 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 173.439102173s of 173.452651978s, submitted: 2
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:14.716830+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813919 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:15.717186+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6950000/0x0/0x4ffc00000, data 0x4c42950/0x4d1e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:16.717405+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:17.717747+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:18.718188+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:19.718579+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813919 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6950000/0x0/0x4ffc00000, data 0x4c42950/0x4d1e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:20.718836+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6950000/0x0/0x4ffc00000, data 0x4c42950/0x4d1e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:21.719323+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:22.719704+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:23.720094+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:24.720423+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813919 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6950000/0x0/0x4ffc00000, data 0x4c42950/0x4d1e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:25.720640+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:26.720850+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:27.721303+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:28.721538+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6950000/0x0/0x4ffc00000, data 0x4c42950/0x4d1e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:29.721893+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814399 data_alloc: 234881024 data_used: 36552704
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 15.848360062s of 15.867633820s, submitted: 2
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:30.722336+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142016512 unmapped: 17637376 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:31.722648+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142016512 unmapped: 17637376 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:32.723038+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142016512 unmapped: 17637376 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:33.723344+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142016512 unmapped: 17637376 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:34.723702+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814819 data_alloc: 234881024 data_used: 36552704
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142016512 unmapped: 17637376 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:35.723986+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142024704 unmapped: 17629184 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:36.724341+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142024704 unmapped: 17629184 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:37.724658+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:38.725141+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:39.725370+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814819 data_alloc: 234881024 data_used: 36552704
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:40.725702+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:41.726132+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:42.726448+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:43.726682+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:44.727042+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814819 data_alloc: 234881024 data_used: 36552704
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:45.727414+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:46.727752+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:47.728101+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:48.728484+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 19.181091309s of 19.198316574s, submitted: 2
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:49.728849+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142041088 unmapped: 17612800 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:50.729326+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142041088 unmapped: 17612800 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:51.729807+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142041088 unmapped: 17612800 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:52.730149+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:53.730481+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:54.730865+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:55.731196+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:56.731845+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:57.732069+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:58.732534+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:59.732898+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:00.733309+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:01.733580+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:02.734000+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:03.734300+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:04.734737+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:05.735125+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:06.735382+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:07.735852+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142065664 unmapped: 17588224 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:08.736322+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142065664 unmapped: 17588224 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:09.736689+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142065664 unmapped: 17588224 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:10.736925+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142065664 unmapped: 17588224 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:11.737432+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142065664 unmapped: 17588224 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:12.737673+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142065664 unmapped: 17588224 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:13.737929+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142065664 unmapped: 17588224 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:14.738180+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142065664 unmapped: 17588224 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:15.738667+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142073856 unmapped: 17580032 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:16.739136+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142073856 unmapped: 17580032 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:17.739534+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142073856 unmapped: 17580032 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:18.739850+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142073856 unmapped: 17580032 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:19.740095+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142073856 unmapped: 17580032 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:20.740487+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142073856 unmapped: 17580032 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:21.740801+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142073856 unmapped: 17580032 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:22.741161+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:23.741437+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:24.741674+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:25.742043+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:26.742827+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:27.743198+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:28.743421+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:29.743763+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:30.744090+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:31.744534+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:32.744743+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:33.744963+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:34.745283+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:35.745607+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:36.746009+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:37.746458+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:38.746846+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142090240 unmapped: 17563648 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:39.747190+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142090240 unmapped: 17563648 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:40.747581+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142090240 unmapped: 17563648 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:41.747864+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142090240 unmapped: 17563648 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:42.748324+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142098432 unmapped: 17555456 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:43.748781+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142098432 unmapped: 17555456 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:44.749133+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142098432 unmapped: 17555456 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:45.749417+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142098432 unmapped: 17555456 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:46.749695+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142106624 unmapped: 17547264 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:47.750013+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142106624 unmapped: 17547264 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:48.750465+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142106624 unmapped: 17547264 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:49.750836+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142106624 unmapped: 17547264 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:50.751324+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142106624 unmapped: 17547264 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:51.751735+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142106624 unmapped: 17547264 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:52.752204+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142106624 unmapped: 17547264 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:53.752725+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142106624 unmapped: 17547264 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:54.753034+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:55.753406+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 4200.2 total, 600.0 interval
                                            Cumulative writes: 9424 writes, 37K keys, 9424 commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 9424 writes, 2439 syncs, 3.86 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 498 writes, 1584 keys, 498 commit groups, 1.0 writes per commit group, ingest: 2.24 MB, 0.00 MB/s
                                            Interval WAL: 498 writes, 202 syncs, 2.47 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:56.753774+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:57.754142+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:58.754461+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:59.755010+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:00.755502+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:01.756037+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:02.756405+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:03.756645+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:04.756934+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:05.757152+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:06.757666+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:07.757946+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:08.758217+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:09.758596+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142123008 unmapped: 17530880 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:10.758865+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142123008 unmapped: 17530880 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:11.759315+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:12.759767+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:13.760158+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:14.760391+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:15.760781+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:16.761107+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:17.761354+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:18.761614+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:19.761937+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:20.762358+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:21.762781+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:22.763186+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:23.763662+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:24.763888+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:25.764143+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:26.764482+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:27.764864+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:28.765456+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:29.765759+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:30.766064+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:31.766560+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:32.767870+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:33.769319+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:34.769552+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:35.769855+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:36.770467+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:37.770986+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142147584 unmapped: 17506304 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:38.771313+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142147584 unmapped: 17506304 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:39.772342+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142147584 unmapped: 17506304 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:40.772874+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142147584 unmapped: 17506304 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:41.773504+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142147584 unmapped: 17506304 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:42.773746+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:43.774451+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:44.774748+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:45.775087+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:46.775661+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:47.775873+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:48.776394+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:49.777725+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:50.778079+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:51.778493+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:52.778809+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142163968 unmapped: 17489920 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:53.779453+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142163968 unmapped: 17489920 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:54.779764+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142163968 unmapped: 17489920 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:55.780023+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142163968 unmapped: 17489920 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:56.780417+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142163968 unmapped: 17489920 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:57.780669+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142163968 unmapped: 17489920 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:58.781055+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142163968 unmapped: 17489920 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:59.781401+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142163968 unmapped: 17489920 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:00.781644+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142163968 unmapped: 17489920 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:01.781945+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:02.782319+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:03.782551+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:04.782782+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:05.783395+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:06.784704+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:07.785094+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:08.785374+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:09.785686+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:10.786195+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:11.787018+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:12.787757+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:13.788141+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:14.788490+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142180352 unmapped: 17473536 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:15.788793+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142180352 unmapped: 17473536 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:16.789079+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142180352 unmapped: 17473536 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:17.789558+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142180352 unmapped: 17473536 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:18.789897+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142180352 unmapped: 17473536 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:19.790386+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142180352 unmapped: 17473536 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:20.791031+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142180352 unmapped: 17473536 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 151.628067017s of 151.660583496s, submitted: 15
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:21.791508+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142180352 unmapped: 17473536 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:22.791834+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142196736 unmapped: 17457152 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:23.792125+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142270464 unmapped: 17383424 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:24.792508+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:25.792947+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:26.793428+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:27.793934+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:28.794514+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:29.794898+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:30.795202+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:31.795787+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:32.796201+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:33.796641+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:34.797098+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:35.797577+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:36.797899+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:37.798218+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:38.798681+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:39.799079+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:40.799407+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:41.799973+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:42.800592+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:43.800982+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:44.801386+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:45.801769+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:46.802056+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:47.802440+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:48.802833+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:49.803548+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:50.803955+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:51.804567+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:52.804971+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:53.805517+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:54.805880+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:55.806406+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:56.806865+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:57.807114+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:58.807506+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:59.807956+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:00.808475+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:01.808945+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:02.809178+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:03.809468+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:04.809880+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:05.810118+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:06.810495+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:07.811039+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:08.811484+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:09.811912+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:10.812167+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:11.812464+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142303232 unmapped: 17350656 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:12.812673+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142303232 unmapped: 17350656 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:13.812873+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142303232 unmapped: 17350656 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:14.813222+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142303232 unmapped: 17350656 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:15.813664+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142303232 unmapped: 17350656 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:16.814643+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142303232 unmapped: 17350656 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:17.814861+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142303232 unmapped: 17350656 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:18.815069+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 57.462314606s of 58.026130676s, submitted: 90
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d72c00 session 0x5626b2da83c0
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d73c00 session 0x5626b5868780
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142319616 unmapped: 17334272 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:19.815296+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53e3800 session 0x5626b339bc20
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:20.815485+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6caa000/0x0/0x4ffc00000, data 0x48e892d/0x49c3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1784517 data_alloc: 234881024 data_used: 36499456
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:21.815741+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:22.816087+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:23.816359+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:24.816579+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:25.816934+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1784517 data_alloc: 234881024 data_used: 36499456
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6caa000/0x0/0x4ffc00000, data 0x48e892d/0x49c3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:26.817161+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:27.817371+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:28.817786+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:29.818168+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:30.818562+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6caa000/0x0/0x4ffc00000, data 0x48e892d/0x49c3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1784517 data_alloc: 234881024 data_used: 36499456
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:31.818923+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 12.665335655s of 12.825592041s, submitted: 30
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d72400 session 0x5626b59a8960
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b4060000 session 0x5626b59a8000
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:32.819329+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d72400 session 0x5626b58025a0
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f7915000/0x0/0x4ffc00000, data 0x31c88cb/0x32a2000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132472832 unmapped: 27181056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:33.819726+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132472832 unmapped: 27181056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:34.820132+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132472832 unmapped: 27181056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:35.820437+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f7915000/0x0/0x4ffc00000, data 0x31c88cb/0x32a2000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f7915000/0x0/0x4ffc00000, data 0x31c88cb/0x32a2000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1507173 data_alloc: 218103808 data_used: 23609344
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132472832 unmapped: 27181056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:36.820707+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:37.821085+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132472832 unmapped: 27181056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:38.821458+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132472832 unmapped: 27181056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:39.821841+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132472832 unmapped: 27181056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:40.822215+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132472832 unmapped: 27181056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72c00
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1507173 data_alloc: 218103808 data_used: 23609344
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:41.822792+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132472832 unmapped: 27181056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f7915000/0x0/0x4ffc00000, data 0x31c88cb/0x32a2000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 141 handle_osd_map epochs [142,142], i have 141, src has [1,142]
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 9.965906143s of 10.049147606s, submitted: 20
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 142 ms_handle_reset con 0x5626b2d72c00 session 0x5626b54b85a0
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:42.823185+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129794048 unmapped: 29859840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73c00
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 142 heartbeat osd_stat(store_statfs(0x4f8838000/0x0/0x4ffc00000, data 0x2d5a49c/0x2e35000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:43.823601+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129982464 unmapped: 38068224 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 142 handle_osd_map epochs [143,143], i have 142, src has [1,143]
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 143 ms_handle_reset con 0x5626b2d73c00 session 0x5626b6f521e0
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:44.824024+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b4060000
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130031616 unmapped: 38019072 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 143 handle_osd_map epochs [143,144], i have 143, src has [1,144]
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 144 handle_osd_map epochs [144,144], i have 144, src has [1,144]
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 144 ms_handle_reset con 0x5626b4060000 session 0x5626b3c83680
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:45.824554+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130088960 unmapped: 37961728 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1480342 data_alloc: 218103808 data_used: 18972672
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:46.824957+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130088960 unmapped: 37961728 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:47.825545+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130088960 unmapped: 37961728 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:48.825815+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130088960 unmapped: 37961728 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 144 heartbeat osd_stat(store_statfs(0x4f8830000/0x0/0x4ffc00000, data 0x2d5dc16/0x2e3b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:49.826120+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130088960 unmapped: 37961728 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:50.826446+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130088960 unmapped: 37961728 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:51.826745+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1480342 data_alloc: 218103808 data_used: 18972672
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130088960 unmapped: 37961728 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:52.827131+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130088960 unmapped: 37961728 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 144 heartbeat osd_stat(store_statfs(0x4f8830000/0x0/0x4ffc00000, data 0x2d5dc16/0x2e3b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 144 handle_osd_map epochs [145,145], i have 144, src has [1,145]
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 10.107682228s of 10.716229439s, submitted: 93
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:53.827561+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130113536 unmapped: 37937152 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:54.827837+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130113536 unmapped: 37937152 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:55.828211+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:56.828711+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481367 data_alloc: 218103808 data_used: 18972672
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:57.829015+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:58.829506+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:59.829966+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:00.830387+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:01.830866+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481367 data_alloc: 218103808 data_used: 18972672
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:02.831676+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:03.831922+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:04.832364+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:05.832778+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:06.833216+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481367 data_alloc: 218103808 data_used: 18972672
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:07.833788+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:08.834184+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:09.834610+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:10.835061+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:11.835446+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481367 data_alloc: 218103808 data_used: 18972672
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:12.835958+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:13.836345+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:14.836687+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:15.836876+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:16.837186+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481367 data_alloc: 218103808 data_used: 18972672
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:17.837548+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:18.837829+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:19.838047+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:20.838517+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:21.838921+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481367 data_alloc: 218103808 data_used: 18972672
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:22.839081+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:23.839448+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:24.839753+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:25.840096+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:26.840422+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481367 data_alloc: 218103808 data_used: 18972672
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:27.840684+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:28.841017+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:29.841499+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:30.841861+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:31.842328+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:32.842673+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:33.843037+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:34.843482+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:35.843831+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:36.844050+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:37.844549+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:38.844939+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:39.845406+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:40.845792+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:41.846197+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:42.846540+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:43.846810+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:44.847165+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:45.847571+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:46.847886+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:47.848353+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:48.848557+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:49.848863+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:50.849332+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:51.849733+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:52.850112+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:53.850354+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:54.850827+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:55.851215+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:56.851762+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:57.852129+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:58.852507+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:59.852945+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:00.853431+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:01.853737+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:02.853961+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:03.854334+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:04.854602+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:05.854887+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:06.855165+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:07.855567+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:08.855824+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:09.856045+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:10.856401+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:11.856737+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:12.857059+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:13.857345+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:14.857541+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:15.857866+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:16.858104+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:51 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:51 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: do_command 'config diff' '{prefix=config diff}'
Oct 11 02:57:51 compute-0 ceph-osd[207831]: do_command 'config diff' '{prefix=config diff}' result is 0 bytes
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:17.858501+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: do_command 'config show' '{prefix=config show}'
Oct 11 02:57:51 compute-0 ceph-osd[207831]: do_command 'config show' '{prefix=config show}' result is 0 bytes
Oct 11 02:57:51 compute-0 ceph-osd[207831]: do_command 'counter dump' '{prefix=counter dump}'
Oct 11 02:57:51 compute-0 ceph-osd[207831]: do_command 'counter dump' '{prefix=counter dump}' result is 0 bytes
Oct 11 02:57:51 compute-0 ceph-osd[207831]: do_command 'counter schema' '{prefix=counter schema}'
Oct 11 02:57:51 compute-0 ceph-osd[207831]: do_command 'counter schema' '{prefix=counter schema}' result is 0 bytes
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130375680 unmapped: 37675008 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:18.858727+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130088960 unmapped: 37961728 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 02:57:51 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:19.858912+0000)
Oct 11 02:57:51 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 02:57:51 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130383872 unmapped: 37666816 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:51 compute-0 ceph-osd[207831]: do_command 'log dump' '{prefix=log dump}'
Oct 11 02:57:51 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15603 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:51 compute-0 ceph-mon[191930]: from='client.15589 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:51 compute-0 ceph-mon[191930]: pgmap v2411: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:51 compute-0 ceph-mon[191930]: from='client.15593 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:51 compute-0 ceph-mon[191930]: from='client.15597 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:51 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2431724796' entity='client.admin' cmd=[{"prefix": "mgr dump"}]: dispatch
Oct 11 02:57:51 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1363707742' entity='client.admin' cmd=[{"prefix": "mgr metadata"}]: dispatch
Oct 11 02:57:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr module ls"} v 0) v1
Oct 11 02:57:51 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3194682983' entity='client.admin' cmd=[{"prefix": "mgr module ls"}]: dispatch
Oct 11 02:57:51 compute-0 podman[483620]: 2025-10-11 02:57:51.228627722 +0000 UTC m=+0.110835237 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, container_name=ovn_metadata_agent)
Oct 11 02:57:51 compute-0 podman[483615]: 2025-10-11 02:57:51.248924715 +0000 UTC m=+0.145616541 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 02:57:51 compute-0 podman[483618]: 2025-10-11 02:57:51.285592112 +0000 UTC m=+0.165381268 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007)
Oct 11 02:57:51 compute-0 podman[483617]: 2025-10-11 02:57:51.332056232 +0000 UTC m=+0.216201704 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, config_id=ovn_controller, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:57:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2412: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:51 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15607 -' entity='client.admin' cmd=[{"prefix": "orch status", "detail": true, "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr services"} v 0) v1
Oct 11 02:57:51 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2279772991' entity='client.admin' cmd=[{"prefix": "mgr services"}]: dispatch
Oct 11 02:57:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:57:52 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15611 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr versions"} v 0) v1
Oct 11 02:57:52 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1677549315' entity='client.admin' cmd=[{"prefix": "mgr versions"}]: dispatch
Oct 11 02:57:52 compute-0 ceph-mon[191930]: from='client.15599 -' entity='client.admin' cmd=[{"prefix": "orch ls", "export": true, "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:52 compute-0 ceph-mon[191930]: from='client.15603 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:52 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3194682983' entity='client.admin' cmd=[{"prefix": "mgr module ls"}]: dispatch
Oct 11 02:57:52 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2279772991' entity='client.admin' cmd=[{"prefix": "mgr services"}]: dispatch
Oct 11 02:57:52 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1677549315' entity='client.admin' cmd=[{"prefix": "mgr versions"}]: dispatch
Oct 11 02:57:52 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15615 -' entity='client.admin' cmd=[{"prefix": "balancer eval", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:57:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon stat"} v 0) v1
Oct 11 02:57:52 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2289855235' entity='client.admin' cmd=[{"prefix": "mon stat"}]: dispatch
Oct 11 02:57:52 compute-0 nova_compute[356901]: 2025-10-11 02:57:52.815 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:52 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15619 -' entity='client.admin' cmd=[{"prefix": "balancer status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:57:53 compute-0 ceph-mon[191930]: pgmap v2412: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:53 compute-0 ceph-mon[191930]: from='client.15607 -' entity='client.admin' cmd=[{"prefix": "orch status", "detail": true, "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:53 compute-0 ceph-mon[191930]: from='client.15611 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:53 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2289855235' entity='client.admin' cmd=[{"prefix": "mon stat"}]: dispatch
Oct 11 02:57:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2413: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "node ls"} v 0) v1
Oct 11 02:57:53 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2133508161' entity='client.admin' cmd=[{"prefix": "node ls"}]: dispatch
Oct 11 02:57:53 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15627 -' entity='client.admin' cmd=[{"prefix": "healthcheck history ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:57:53 compute-0 ceph-mgr[192233]: mgr.server reply reply (95) Operation not supported Module 'prometheus' is not enabled/loaded (required by command 'healthcheck history ls'): use `ceph mgr module enable prometheus` to enable it
Oct 11 02:57:53 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T02:57:53.710+0000 7fe891be6640 -1 mgr.server reply reply (95) Operation not supported Module 'prometheus' is not enabled/loaded (required by command 'healthcheck history ls'): use `ceph mgr module enable prometheus` to enable it
Oct 11 02:57:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd crush class ls"} v 0) v1
Oct 11 02:57:54 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/741836184' entity='client.admin' cmd=[{"prefix": "osd crush class ls"}]: dispatch
Oct 11 02:57:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "log last", "channel": "cephadm", "format": "json-pretty"} v 0) v1
Oct 11 02:57:54 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3303605460' entity='client.admin' cmd=[{"prefix": "log last", "channel": "cephadm", "format": "json-pretty"}]: dispatch
Oct 11 02:57:54 compute-0 ceph-mon[191930]: from='client.15615 -' entity='client.admin' cmd=[{"prefix": "balancer eval", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:57:54 compute-0 ceph-mon[191930]: from='client.15619 -' entity='client.admin' cmd=[{"prefix": "balancer status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:57:54 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2133508161' entity='client.admin' cmd=[{"prefix": "node ls"}]: dispatch
Oct 11 02:57:54 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/741836184' entity='client.admin' cmd=[{"prefix": "osd crush class ls"}]: dispatch
Oct 11 02:57:54 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3303605460' entity='client.admin' cmd=[{"prefix": "log last", "channel": "cephadm", "format": "json-pretty"}]: dispatch
Oct 11 02:57:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd crush dump"} v 0) v1
Oct 11 02:57:54 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2416311519' entity='client.admin' cmd=[{"prefix": "osd crush dump"}]: dispatch
Oct 11 02:57:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr dump", "format": "json-pretty"} v 0) v1
Oct 11 02:57:54 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2073942079' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json-pretty"}]: dispatch
Oct 11 02:57:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:57:54.892 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:57:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:57:54.892 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:57:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:57:54.893 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:57:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd crush rule ls"} v 0) v1
Oct 11 02:57:54 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1929031201' entity='client.admin' cmd=[{"prefix": "osd crush rule ls"}]: dispatch
Oct 11 02:57:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr metadata", "format": "json-pretty"} v 0) v1
Oct 11 02:57:55 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1669724990' entity='client.admin' cmd=[{"prefix": "mgr metadata", "format": "json-pretty"}]: dispatch
Oct 11 02:57:55 compute-0 ceph-mon[191930]: pgmap v2413: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:55 compute-0 ceph-mon[191930]: from='client.15627 -' entity='client.admin' cmd=[{"prefix": "healthcheck history ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:57:55 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2416311519' entity='client.admin' cmd=[{"prefix": "osd crush dump"}]: dispatch
Oct 11 02:57:55 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2073942079' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json-pretty"}]: dispatch
Oct 11 02:57:55 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1929031201' entity='client.admin' cmd=[{"prefix": "osd crush rule ls"}]: dispatch
Oct 11 02:57:55 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1669724990' entity='client.admin' cmd=[{"prefix": "mgr metadata", "format": "json-pretty"}]: dispatch
Oct 11 02:57:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd crush show-tunables"} v 0) v1
Oct 11 02:57:55 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1437057721' entity='client.admin' cmd=[{"prefix": "osd crush show-tunables"}]: dispatch
Oct 11 02:57:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2414: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr module ls", "format": "json-pretty"} v 0) v1
Oct 11 02:57:55 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1880438321' entity='client.admin' cmd=[{"prefix": "mgr module ls", "format": "json-pretty"}]: dispatch
Oct 11 02:57:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd crush tree", "show_shadow": true} v 0) v1
Oct 11 02:57:55 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/743982802' entity='client.admin' cmd=[{"prefix": "osd crush tree", "show_shadow": true}]: dispatch
Oct 11 02:57:55 compute-0 nova_compute[356901]: 2025-10-11 02:57:55.759 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:01.996935+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1250972 data_alloc: 234881024 data_used: 14913536
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f959d000/0x0/0x4ffc00000, data 0x20087d4/0x20d1000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99590144 unmapped: 3735552 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:02.997406+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f959d000/0x0/0x4ffc00000, data 0x20087d4/0x20d1000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99590144 unmapped: 3735552 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:03.997914+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:04.998451+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99590144 unmapped: 3735552 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:05.998767+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99590144 unmapped: 3735552 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f959d000/0x0/0x4ffc00000, data 0x20087d4/0x20d1000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:06.999102+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99590144 unmapped: 3735552 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1250972 data_alloc: 234881024 data_used: 14913536
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:07.999402+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99590144 unmapped: 3735552 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:08.999632+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99590144 unmapped: 3735552 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:09.999968+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99590144 unmapped: 3735552 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:11.000269+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99590144 unmapped: 3735552 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:12.000682+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99590144 unmapped: 3735552 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f959d000/0x0/0x4ffc00000, data 0x20087d4/0x20d1000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1250972 data_alloc: 234881024 data_used: 14913536
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:13.001148+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99590144 unmapped: 3735552 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 30.709411621s of 30.718933105s, submitted: 2
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:14.001525+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99647488 unmapped: 3678208 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:15.001828+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99647488 unmapped: 3678208 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f9598000/0x0/0x4ffc00000, data 0x20087d4/0x20d1000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:16.002540+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99647488 unmapped: 3678208 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:17.002997+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99647488 unmapped: 3678208 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1251884 data_alloc: 234881024 data_used: 14905344
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:18.003346+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99647488 unmapped: 3678208 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:19.003804+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99647488 unmapped: 3678208 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f9598000/0x0/0x4ffc00000, data 0x20087d4/0x20d1000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:20.004090+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99647488 unmapped: 3678208 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f9598000/0x0/0x4ffc00000, data 0x20087d4/0x20d1000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:21.004480+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99647488 unmapped: 3678208 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:22.004842+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99647488 unmapped: 3678208 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1251884 data_alloc: 234881024 data_used: 14905344
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:23.005306+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99647488 unmapped: 3678208 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f9598000/0x0/0x4ffc00000, data 0x20087d4/0x20d1000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:24.005742+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99647488 unmapped: 3678208 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:25.006129+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99647488 unmapped: 3678208 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:26.006573+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99647488 unmapped: 3678208 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:27.006748+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99647488 unmapped: 3678208 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1251884 data_alloc: 234881024 data_used: 14905344
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:28.007019+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99647488 unmapped: 3678208 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:29.007315+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99647488 unmapped: 3678208 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f9598000/0x0/0x4ffc00000, data 0x20087d4/0x20d1000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:30.007539+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99647488 unmapped: 3678208 heap: 103325696 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2649800
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 17.177772522s of 17.202590942s, submitted: 8
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca2649800 session 0x559ca1c93680
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:31.008024+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648000
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca2648000 session 0x559ca1c8f860
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca266f000
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca266f000 session 0x559ca1cab2c0
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99942400 unmapped: 12296192 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca266f000
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca266f000 session 0x559ca1c94d20
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca400
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca14ca400 session 0x559ca1c95860
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca14ca800 session 0x559c9f7114a0
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648000
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca2648000 session 0x559ca2208960
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:32.008282+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2649800
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca2649800 session 0x559ca231ad20
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99942400 unmapped: 12296192 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1297934 data_alloc: 234881024 data_used: 14905344
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:33.008525+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8f3b000/0x0/0x4ffc00000, data 0x26697e4/0x2733000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99942400 unmapped: 12296192 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:34.008787+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99942400 unmapped: 12296192 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:35.010839+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99942400 unmapped: 12296192 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:36.011048+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99942400 unmapped: 12296192 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2649800
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca2649800 session 0x559ca1c8d680
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:37.011558+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99942400 unmapped: 12296192 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1299255 data_alloc: 234881024 data_used: 14905344
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca400
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8f3b000/0x0/0x4ffc00000, data 0x26697e4/0x2733000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [1])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:38.011811+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99942400 unmapped: 12296192 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8f3b000/0x0/0x4ffc00000, data 0x26697e4/0x2733000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:39.012181+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99942400 unmapped: 12296192 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:40.012376+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 99942400 unmapped: 12296192 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648000
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:41.012596+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8f3b000/0x0/0x4ffc00000, data 0x26697e4/0x2733000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101761024 unmapped: 10477568 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 10.919919014s of 11.040625572s, submitted: 12
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:42.012950+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105562112 unmapped: 6676480 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1347783 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:43.013275+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105562112 unmapped: 6676480 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:44.013960+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105562112 unmapped: 6676480 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:45.014432+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105562112 unmapped: 6676480 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:46.014822+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105562112 unmapped: 6676480 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8f38000/0x0/0x4ffc00000, data 0x266c7e4/0x2736000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:47.015114+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105594880 unmapped: 6643712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1347783 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8f38000/0x0/0x4ffc00000, data 0x266c7e4/0x2736000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:48.015440+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105594880 unmapped: 6643712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:49.015801+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105594880 unmapped: 6643712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:50.016188+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105594880 unmapped: 6643712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:51.016544+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8f38000/0x0/0x4ffc00000, data 0x266c7e4/0x2736000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105594880 unmapped: 6643712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:52.016842+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105594880 unmapped: 6643712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1347783 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8f38000/0x0/0x4ffc00000, data 0x266c7e4/0x2736000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:53.017556+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105594880 unmapped: 6643712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:54.018020+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105594880 unmapped: 6643712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:55.018413+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105594880 unmapped: 6643712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8f38000/0x0/0x4ffc00000, data 0x266c7e4/0x2736000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:56.018776+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105594880 unmapped: 6643712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8f38000/0x0/0x4ffc00000, data 0x266c7e4/0x2736000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:57.019190+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105603072 unmapped: 6635520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1347783 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:58.019572+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105603072 unmapped: 6635520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:59.019924+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105603072 unmapped: 6635520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:00.020126+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105603072 unmapped: 6635520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:01.020446+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105603072 unmapped: 6635520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8f38000/0x0/0x4ffc00000, data 0x266c7e4/0x2736000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:02.020906+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105603072 unmapped: 6635520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1347783 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:03.021732+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105603072 unmapped: 6635520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8f38000/0x0/0x4ffc00000, data 0x266c7e4/0x2736000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:04.022102+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105603072 unmapped: 6635520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:05.022424+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105611264 unmapped: 6627328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:06.022852+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105611264 unmapped: 6627328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:07.023307+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105611264 unmapped: 6627328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1347783 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:08.023629+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105611264 unmapped: 6627328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:09.024116+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8f38000/0x0/0x4ffc00000, data 0x266c7e4/0x2736000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105611264 unmapped: 6627328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:10.024407+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105611264 unmapped: 6627328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:11.024798+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105611264 unmapped: 6627328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:12.025081+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105611264 unmapped: 6627328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1347783 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:13.026496+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105611264 unmapped: 6627328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:14.026971+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 32.768882751s of 32.784095764s, submitted: 2
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 107945984 unmapped: 4292608 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:15.027181+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b84000/0x0/0x4ffc00000, data 0x2a207e4/0x2aea000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108609536 unmapped: 3629056 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:16.027978+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108650496 unmapped: 3588096 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:17.028166+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108650496 unmapped: 3588096 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:18.028384+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108650496 unmapped: 3588096 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:19.028634+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108650496 unmapped: 3588096 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:20.028930+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:21.029202+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:22.029486+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:23.029767+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:24.030053+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:25.030417+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:26.030724+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108716032 unmapped: 3522560 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:27.031109+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108716032 unmapped: 3522560 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:28.031402+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108716032 unmapped: 3522560 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:29.031787+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108716032 unmapped: 3522560 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:30.032114+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108716032 unmapped: 3522560 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:31.032388+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108716032 unmapped: 3522560 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:32.032813+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108716032 unmapped: 3522560 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:55 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:55 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:55 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:33.033099+0000)
Oct 11 02:57:55 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:55 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:34.033384+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:35.033665+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:36.034009+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:37.034404+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:38.034793+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:39.035136+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:40.035460+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:41.035711+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:42.035928+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:43.036593+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:44.037083+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:45.040986+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:46.041481+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:47.041816+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:48.042210+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:49.042739+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:50.043192+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:51.043655+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:52.044092+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:53.044534+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:54.044995+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108724224 unmapped: 3514368 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:55.045454+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108732416 unmapped: 3506176 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:56.045909+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108732416 unmapped: 3506176 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:57.046445+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108732416 unmapped: 3506176 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:58.047024+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108732416 unmapped: 3506176 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:59.047466+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108732416 unmapped: 3506176 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:00.047903+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108732416 unmapped: 3506176 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:01.048165+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108732416 unmapped: 3506176 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:02.048817+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108732416 unmapped: 3506176 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:03.049454+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108732416 unmapped: 3506176 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:04.049948+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108740608 unmapped: 3497984 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:05.050183+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108740608 unmapped: 3497984 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:06.050583+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108748800 unmapped: 3489792 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:07.050974+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108748800 unmapped: 3489792 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:08.051448+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108748800 unmapped: 3489792 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:09.051849+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108748800 unmapped: 3489792 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:10.052380+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108748800 unmapped: 3489792 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:11.052643+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108748800 unmapped: 3489792 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:12.052930+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108642304 unmapped: 3596288 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:13.053384+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108642304 unmapped: 3596288 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:14.053882+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108642304 unmapped: 3596288 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:15.054392+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108642304 unmapped: 3596288 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:16.054820+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108642304 unmapped: 3596288 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:17.055334+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108642304 unmapped: 3596288 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:18.055685+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108642304 unmapped: 3596288 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:19.056068+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108642304 unmapped: 3596288 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:20.056497+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108642304 unmapped: 3596288 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:21.056764+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108642304 unmapped: 3596288 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:22.057163+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108642304 unmapped: 3596288 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:23.057624+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108642304 unmapped: 3596288 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:24.058138+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108642304 unmapped: 3596288 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:25.058634+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108642304 unmapped: 3596288 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:26.058987+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108642304 unmapped: 3596288 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:27.059523+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108650496 unmapped: 3588096 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:28.059754+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108650496 unmapped: 3588096 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:29.059936+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108650496 unmapped: 3588096 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:30.060566+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108650496 unmapped: 3588096 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:31.060791+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108650496 unmapped: 3588096 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:32.061009+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108650496 unmapped: 3588096 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:33.061489+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108650496 unmapped: 3588096 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:34.061842+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108650496 unmapped: 3588096 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:35.062210+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108650496 unmapped: 3588096 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:36.062478+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108650496 unmapped: 3588096 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:37.062798+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108650496 unmapped: 3588096 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:38.063131+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108650496 unmapped: 3588096 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:39.063467+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:40.063782+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108650496 unmapped: 3588096 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:41.064543+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108650496 unmapped: 3588096 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:42.064934+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108650496 unmapped: 3588096 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:43.065411+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108658688 unmapped: 3579904 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:44.065751+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108658688 unmapped: 3579904 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:45.065957+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108658688 unmapped: 3579904 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:46.066182+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108658688 unmapped: 3579904 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:47.066415+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108658688 unmapped: 3579904 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:48.066722+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108658688 unmapped: 3579904 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets getting new tickets!
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:49.067665+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _finish_auth 0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:49.069925+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108658688 unmapped: 3579904 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:50.068073+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108658688 unmapped: 3579904 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:51.068472+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108658688 unmapped: 3579904 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:52.068917+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108658688 unmapped: 3579904 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:53.069183+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108658688 unmapped: 3579904 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:54.069708+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108658688 unmapped: 3579904 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559c9f599000 session 0x559c9ff12000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca266f000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:55.070024+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108658688 unmapped: 3579904 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca00bf000 session 0x559ca1c921e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca00bf400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559c9f599800 session 0x559ca22094a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca00bf000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:56.070448+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108658688 unmapped: 3579904 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:57.072418+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108666880 unmapped: 3571712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:58.072751+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108666880 unmapped: 3571712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:59.072971+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108666880 unmapped: 3571712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:00.073396+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108666880 unmapped: 3571712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:01.073780+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108666880 unmapped: 3571712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:02.074142+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108666880 unmapped: 3571712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:03.074534+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108666880 unmapped: 3571712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:04.074899+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108666880 unmapped: 3571712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:05.075372+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108666880 unmapped: 3571712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:06.075685+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108666880 unmapped: 3571712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:07.075889+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108666880 unmapped: 3571712 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:08.076145+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 3563520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:09.076358+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 3563520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:10.076767+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 3563520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:11.077025+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 3563520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:12.077401+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 3563520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:13.077807+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 3563520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:14.078337+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 3563520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:15.078709+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 3563520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:16.079103+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 3563520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:17.079502+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 3563520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:18.079876+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 3563520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:19.080134+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 3563520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:20.080512+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 3563520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:21.080882+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 3563520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:22.081131+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 3563520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:23.081380+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108675072 unmapped: 3563520 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:24.081670+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:25.081901+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:26.082177+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:27.082566+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:28.082806+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:29.083155+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:30.083536+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:31.083781+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:32.084082+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:33.084566+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:34.084852+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:35.085076+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:36.085403+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:37.085855+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:38.086211+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:39.086587+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 3555328 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:40.086807+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 3547136 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:41.087111+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 3547136 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:42.087373+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 3547136 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:43.087792+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 3547136 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:44.089879+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 3547136 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:45.090100+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 3547136 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:46.090422+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 3547136 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:47.090769+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 3547136 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:48.091218+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 3547136 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:49.091610+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 3547136 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:50.091861+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 3547136 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:51.092412+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 3547136 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:52.092632+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 3547136 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:53.093160+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 3547136 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:54.093590+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 3547136 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:55.093925+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 3547136 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:56.094187+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:57.094530+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:58.094848+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:59.095133+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:00.095597+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:01.095927+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:02.096395+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:03.097743+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:04.098174+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:05.098569+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:06.098937+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:07.099443+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:08.099791+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:09.100599+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:10.100995+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:11.101404+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:12.101782+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:13.102197+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1380013 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca262f400 session 0x559c9f5d1e00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca266e800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:14.102717+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:15.103141+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:16.103439+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:17.103669+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:18.104003+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f8b7e000/0x0/0x4ffc00000, data 0x2a267e4/0x2af0000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 3538944 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 184.237762451s of 184.379837036s, submitted: 24
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca2635c00 session 0x559ca2056960
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1378721 data_alloc: 234881024 data_used: 21598208
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca2649400 session 0x559ca12fc1e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca2648800 session 0x559c9f5d70e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:19.104421+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f7b400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 3547136 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:20.104667+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105160704 unmapped: 7077888 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:21.104948+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106250240 unmapped: 5988352 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:22.105284+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca1f7b400 session 0x559ca2057a40
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:23.105510+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 234881024 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:24.105949+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:25.106214+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:26.106528+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:27.106787+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:28.107371+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 234881024 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:29.107600+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:30.107884+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:31.108149+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:32.108598+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:33.108803+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 234881024 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:34.109159+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:35.109388+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:36.109776+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:37.110016+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:38.110299+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 234881024 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:39.110640+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:40.110903+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:41.111154+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:42.111567+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:43.111979+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 234881024 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:44.112304+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:45.112612+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:46.112987+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:47.113348+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:48.113550+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 234881024 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:49.113795+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:50.114167+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:51.114433+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:52.114724+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:53.114962+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 234881024 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:54.115418+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:55.115681+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:56.116189+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:57.116568+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:58.116844+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 234881024 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:59.117089+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:00.117392+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:01.117644+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:02.117990+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:03.118211+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 234881024 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:04.118645+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 5971968 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:05.118885+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106274816 unmapped: 5963776 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:06.119174+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106274816 unmapped: 5963776 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:07.119367+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106274816 unmapped: 5963776 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:08.119578+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106274816 unmapped: 5963776 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:09.119900+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 234881024 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106274816 unmapped: 5963776 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:10.120050+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106274816 unmapped: 5963776 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:11.120301+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106274816 unmapped: 5963776 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:12.120764+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106274816 unmapped: 5963776 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:13.121158+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106274816 unmapped: 5963776 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:14.121695+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 234881024 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106274816 unmapped: 5963776 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:15.121966+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106274816 unmapped: 5963776 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:16.122218+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106274816 unmapped: 5963776 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:17.122631+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:18.122912+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:19.123195+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 234881024 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:20.123677+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:21.123988+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:22.124435+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:23.124678+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:24.125144+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 218103808 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:25.125551+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:26.125789+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:27.126036+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:28.126497+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:29.127001+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 218103808 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:30.127417+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:31.127716+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:32.127981+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:33.128187+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:34.128504+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 218103808 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:35.128901+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:36.129496+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:37.129874+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:38.130174+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:39.130394+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 218103808 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:40.130777+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:41.131056+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:42.131453+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:43.131686+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:44.131973+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 218103808 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:45.132430+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:46.132667+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:47.133062+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:48.133437+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:49.133716+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 218103808 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:50.134091+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:51.134561+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:52.134842+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:53.135326+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:54.135759+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 218103808 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:55.136187+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:56.136503+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:57.136939+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:58.137617+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:59.138047+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 218103808 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:00.138528+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:01.139003+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:02.139396+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:03.139776+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:04.140134+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 218103808 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:05.140659+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106283008 unmapped: 5955584 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:06.140944+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106291200 unmapped: 5947392 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:07.141128+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106291200 unmapped: 5947392 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:08.141389+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106291200 unmapped: 5947392 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:09.141763+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106291200 unmapped: 5947392 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 218103808 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:10.142134+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106291200 unmapped: 5947392 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:11.142443+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106291200 unmapped: 5947392 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:12.142688+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106291200 unmapped: 5947392 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:13.143015+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106291200 unmapped: 5947392 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:14.143311+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106291200 unmapped: 5947392 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235113 data_alloc: 218103808 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:15.143696+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106291200 unmapped: 5947392 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:16.143963+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106291200 unmapped: 5947392 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:17.144267+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106291200 unmapped: 5947392 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:18.144576+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f97ba000/0x0/0x4ffc00000, data 0x1ded701/0x1eb3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106291200 unmapped: 5947392 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:19.144910+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106291200 unmapped: 5947392 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 119.179214478s of 120.799133301s, submitted: 78
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1235041 data_alloc: 218103808 data_used: 15917056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca14cb400 session 0x559ca1c950e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca1520800 session 0x559ca1287c20
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2635c00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:20.145136+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105144320 unmapped: 7094272 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:21.145432+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca2635c00 session 0x559ca1cf10e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:22.145837+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f9b64000/0x0/0x4ffc00000, data 0x1a1e68f/0x1ae2000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:23.146180+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:24.146597+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1192186 data_alloc: 218103808 data_used: 14819328
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f9b64000/0x0/0x4ffc00000, data 0x1a1e68f/0x1ae2000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:25.146970+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:26.147195+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f9b64000/0x0/0x4ffc00000, data 0x1a1e68f/0x1ae2000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:27.147562+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:28.147779+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:29.148196+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1192186 data_alloc: 218103808 data_used: 14819328
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:30.148519+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f9b64000/0x0/0x4ffc00000, data 0x1a1e68f/0x1ae2000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f9b64000/0x0/0x4ffc00000, data 0x1a1e68f/0x1ae2000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:31.148966+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:32.149472+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:33.149858+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:34.150116+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1192186 data_alloc: 218103808 data_used: 14819328
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:35.150582+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:36.150890+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f9b64000/0x0/0x4ffc00000, data 0x1a1e68f/0x1ae2000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:37.151330+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f9b64000/0x0/0x4ffc00000, data 0x1a1e68f/0x1ae2000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:38.151706+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:39.152082+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1192186 data_alloc: 218103808 data_used: 14819328
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f9b64000/0x0/0x4ffc00000, data 0x1a1e68f/0x1ae2000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:40.152848+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:41.153442+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:42.153795+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:43.154018+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:44.154462+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f9b64000/0x0/0x4ffc00000, data 0x1a1e68f/0x1ae2000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1192186 data_alloc: 218103808 data_used: 14819328
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:45.154903+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f9b64000/0x0/0x4ffc00000, data 0x1a1e68f/0x1ae2000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:46.155383+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f9b64000/0x0/0x4ffc00000, data 0x1a1e68f/0x1ae2000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105168896 unmapped: 7069696 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:47.155791+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca2648000 session 0x559ca1c96000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca14ca400 session 0x559ca12a0f00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 27.672309875s of 27.930002213s, submitted: 46
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca14ca800 session 0x559c9f795a40
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106217472 unmapped: 6021120 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:48.156088+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:49.156466+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 ms_handle_reset con 0x559ca14cb400 session 0x559ca1c921e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1070205 data_alloc: 218103808 data_used: 8126464
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:50.156943+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4fa5a8000/0x0/0x4ffc00000, data 0x100367f/0x10c6000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:51.157268+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4fa5a8000/0x0/0x4ffc00000, data 0x100367f/0x10c6000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:52.157824+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:53.158197+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4fa5a8000/0x0/0x4ffc00000, data 0x100367f/0x10c6000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:54.158724+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1070205 data_alloc: 218103808 data_used: 8126464
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:55.159364+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:56.159652+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4fa5a8000/0x0/0x4ffc00000, data 0x100367f/0x10c6000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:57.159899+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4fa5a8000/0x0/0x4ffc00000, data 0x100367f/0x10c6000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:58.160182+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:59.160495+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1070205 data_alloc: 218103808 data_used: 8126464
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:00.160890+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4fa5a8000/0x0/0x4ffc00000, data 0x100367f/0x10c6000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:01.161103+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:02.161368+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:03.161749+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:04.162431+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1070205 data_alloc: 218103808 data_used: 8126464
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:05.162841+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4fa5a8000/0x0/0x4ffc00000, data 0x100367f/0x10c6000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:06.163161+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:07.163614+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:08.163897+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4fa5a8000/0x0/0x4ffc00000, data 0x100367f/0x10c6000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:09.164123+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1070205 data_alloc: 218103808 data_used: 8126464
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:10.164474+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4fa5a8000/0x0/0x4ffc00000, data 0x100367f/0x10c6000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:11.164859+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4fa5a8000/0x0/0x4ffc00000, data 0x100367f/0x10c6000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:12.165309+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:13.165750+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:14.166108+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1070205 data_alloc: 218103808 data_used: 8126464
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:15.166455+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:16.166775+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4fa5a8000/0x0/0x4ffc00000, data 0x100367f/0x10c6000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:17.167025+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:18.167299+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:19.167711+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1070205 data_alloc: 218103808 data_used: 8126464
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:20.167929+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4fa5a8000/0x0/0x4ffc00000, data 0x100367f/0x10c6000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:21.168683+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:22.168960+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1520800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 35.297542572s of 35.547161102s, submitted: 36
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:23.169276+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4fa5a8000/0x0/0x4ffc00000, data 0x100367f/0x10c6000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:24.169765+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100671488 unmapped: 11567104 heap: 112238592 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1071971 data_alloc: 218103808 data_used: 8130560
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:25.170184+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100687872 unmapped: 28336128 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:26.170533+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100687872 unmapped: 28336128 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:27.170849+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f9da8000/0x0/0x4ffc00000, data 0x180367f/0x18c6000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100687872 unmapped: 28336128 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:28.171331+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 heartbeat osd_stat(store_statfs(0x4f9da8000/0x0/0x4ffc00000, data 0x180367f/0x18c6000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 129 handle_osd_map epochs [130,130], i have 129, src has [1,130]
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 ms_handle_reset con 0x559ca1520800 session 0x559c9f48a1e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100696064 unmapped: 28327936 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:29.171785+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100655104 unmapped: 28368896 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1132700 data_alloc: 218103808 data_used: 8138752
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:30.171997+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100655104 unmapped: 28368896 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:31.172333+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100655104 unmapped: 28368896 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:32.172752+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100655104 unmapped: 28368896 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:33.172997+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f9da3000/0x0/0x4ffc00000, data 0x180521f/0x18ca000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100655104 unmapped: 28368896 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:34.173402+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100655104 unmapped: 28368896 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1132700 data_alloc: 218103808 data_used: 8138752
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:35.173773+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f9da3000/0x0/0x4ffc00000, data 0x180521f/0x18ca000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100409344 unmapped: 28614656 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:36.174201+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100409344 unmapped: 28614656 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:37.174795+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2635c00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 14.540469170s of 14.669846535s, submitted: 15
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 ms_handle_reset con 0x559ca2635c00 session 0x559ca231b860
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108421120 unmapped: 20602880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 ms_handle_reset con 0x559ca2648000 session 0x559ca21bb4a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:38.175120+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108421120 unmapped: 20602880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:39.175473+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 ms_handle_reset con 0x559ca14ca800 session 0x559ca01d9c20
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 20332544 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:40.175865+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1185450 data_alloc: 218103808 data_used: 14958592
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99af000/0x0/0x4ffc00000, data 0x1bfa21f/0x1cbf000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108740608 unmapped: 20283392 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 ms_handle_reset con 0x559ca14cb400 session 0x559ca1c96d20
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:41.176066+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99af000/0x0/0x4ffc00000, data 0x1bfa21f/0x1cbf000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108740608 unmapped: 20283392 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:42.176455+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108740608 unmapped: 20283392 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:43.177337+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1520800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 ms_handle_reset con 0x559ca1520800 session 0x559ca44ae5a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108748800 unmapped: 20275200 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:44.177581+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2635c00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108748800 unmapped: 20275200 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:45.177815+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1189229 data_alloc: 218103808 data_used: 14966784
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2649400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108765184 unmapped: 20258816 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:46.178670+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108765184 unmapped: 20258816 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:47.179001+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 20324352 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:48.179300+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3000.1 total, 600.0 interval
                                            Cumulative writes: 8598 writes, 33K keys, 8598 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.01 MB/s
                                            Cumulative WAL: 8598 writes, 1979 syncs, 4.34 writes per sync, written: 0.02 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 783 writes, 2196 keys, 783 commit groups, 1.0 writes per commit group, ingest: 1.52 MB, 0.00 MB/s
                                            Interval WAL: 783 writes, 356 syncs, 2.20 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:49.179723+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:50.180132+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1212241 data_alloc: 234881024 data_used: 18141184
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:51.180498+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:52.180910+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:53.181179+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:54.181662+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:55.181946+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1212241 data_alloc: 234881024 data_used: 18141184
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:56.182155+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:57.182569+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:58.182797+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:59.183326+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:00.183671+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1212241 data_alloc: 234881024 data_used: 18141184
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:01.184029+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:02.184499+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:03.185016+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:04.185416+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:05.185691+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1212241 data_alloc: 234881024 data_used: 18141184
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:06.186384+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 ms_handle_reset con 0x559ca2635c00 session 0x559ca1caa5a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:07.187131+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 ms_handle_reset con 0x559ca2648800 session 0x559ca44ae000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 29.282367706s of 29.483385086s, submitted: 24
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 ms_handle_reset con 0x559ca2649400 session 0x559ca1c93c20
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:08.187539+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106881024 unmapped: 22142976 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 ms_handle_reset con 0x559ca14ca800 session 0x559ca126cb40
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:09.187832+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106881024 unmapped: 22142976 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:10.188379+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106881024 unmapped: 22142976 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1156347 data_alloc: 218103808 data_used: 14954496
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 130 handle_osd_map epochs [131,131], i have 130, src has [1,131]
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 131 heartbeat osd_stat(store_statfs(0x4f9da5000/0x0/0x4ffc00000, data 0x18051fc/0x18c9000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:11.188761+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 131 ms_handle_reset con 0x559ca14cb400 session 0x559c9ff105a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101343232 unmapped: 27680768 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:12.189324+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101343232 unmapped: 27680768 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:13.189797+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101343232 unmapped: 27680768 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 131 heartbeat osd_stat(store_statfs(0x4fa5a1000/0x0/0x4ffc00000, data 0x1006dcd/0x10cc000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:14.190467+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101343232 unmapped: 27680768 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:15.190726+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101343232 unmapped: 27680768 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1085649 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 131 handle_osd_map epochs [131,132], i have 131, src has [1,132]
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:16.191528+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:17.192434+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 132 heartbeat osd_stat(store_statfs(0x4fa59e000/0x0/0x4ffc00000, data 0x1008830/0x10cf000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:18.193112+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:19.193798+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:20.194398+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1088623 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 132 heartbeat osd_stat(store_statfs(0x4fa59e000/0x0/0x4ffc00000, data 0x1008830/0x10cf000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1520800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:21.194651+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:22.195096+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:23.195357+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:24.195838+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:25.196432+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1088623 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 132 handle_osd_map epochs [133,133], i have 132, src has [1,133]
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 17.981964111s of 18.491548538s, submitted: 87
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 132 heartbeat osd_stat(store_statfs(0x4fa59e000/0x0/0x4ffc00000, data 0x1008830/0x10cf000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [0,0,0,0,1])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 132 handle_osd_map epochs [133,133], i have 133, src has [1,133]
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 132 handle_osd_map epochs [133,133], i have 133, src has [1,133]
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 133 ms_handle_reset con 0x559ca1520800 session 0x559ca21bbc20
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:26.197054+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:27.197529+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:28.197812+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:29.198107+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:30.198369+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 133 heartbeat osd_stat(store_statfs(0x4fa59b000/0x0/0x4ffc00000, data 0x100a3ad/0x10d2000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1091597 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:31.198793+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:32.200203+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2635c00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:33.200413+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 133 handle_osd_map epochs [134,134], i have 133, src has [1,134]
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 134 ms_handle_reset con 0x559ca2635c00 session 0x559ca21e92c0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:34.200783+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:35.200996+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1094571 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 134 heartbeat osd_stat(store_statfs(0x4fa598000/0x0/0x4ffc00000, data 0x100bf7e/0x10d5000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:36.201397+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:37.201732+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 134 heartbeat osd_stat(store_statfs(0x4fa598000/0x0/0x4ffc00000, data 0x100bf7e/0x10d5000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:38.202148+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 134 heartbeat osd_stat(store_statfs(0x4fa598000/0x0/0x4ffc00000, data 0x100bf7e/0x10d5000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:39.202514+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:40.202968+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1094571 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:41.203502+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 134 handle_osd_map epochs [135,135], i have 134, src has [1,135]
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 15.483499527s of 15.565405846s, submitted: 15
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:42.203924+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:43.204297+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:44.204556+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:45.204898+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1097545 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:46.205276+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:47.205642+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:48.206036+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:49.206698+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:50.207140+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1097545 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:51.207386+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:52.207727+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:53.208141+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:54.208908+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:55.209392+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1097545 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:56.209719+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:57.210079+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:58.210410+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:59.210760+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:00.211223+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1097545 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:01.211794+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:02.212128+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:03.212508+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:04.212843+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:05.213070+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1097545 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:06.213406+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:07.213769+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:08.214133+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:09.214365+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:10.214837+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1097545 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:11.215046+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:12.215434+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:13.215777+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:14.216212+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:15.216571+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1097545 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:16.216929+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:17.217216+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:18.217637+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:19.217998+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:20.218437+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 39.451190948s of 39.471805573s, submitted: 19
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:21.218901+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100974592 unmapped: 28049408 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:22.219398+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101031936 unmapped: 27992064 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:23.219834+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:24.220343+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:25.220782+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:26.221336+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:27.221695+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:28.221966+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:29.222413+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:30.222821+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:31.223338+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:32.223772+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:33.224453+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:34.225057+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:35.225529+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:36.225901+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:37.226195+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:38.226430+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:39.226947+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:40.227342+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:41.227674+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:42.227910+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:43.228147+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:44.228919+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:45.229161+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:46.229504+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:47.229771+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:48.229995+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:49.230420+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:50.230867+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:51.231364+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:52.231761+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:53.231962+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:54.232496+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:55.232887+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:56.233360+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:57.233759+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:58.233988+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:59.234213+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:00.234611+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:01.235401+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:02.235724+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:03.236081+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:04.236523+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:05.236999+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:06.237479+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:07.237889+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:08.238396+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:09.239004+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:10.239619+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:11.240062+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:12.240461+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:13.240893+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:14.241360+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:15.241853+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:16.242391+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:17.242817+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:18.243090+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:19.243618+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:20.244134+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:21.244517+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:22.244930+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:23.245472+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:24.245869+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:25.246362+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:26.246749+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:27.247158+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:28.247516+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:29.247844+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:30.248165+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:31.248425+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:32.248742+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:33.249156+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:34.249686+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:35.249974+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:36.250206+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:37.250651+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:38.250957+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:39.251182+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:40.251765+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:41.251989+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:42.252332+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:43.252771+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:44.253210+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:45.253699+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:46.254086+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:47.254383+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:48.254612+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:49.254864+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:50.255140+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:51.255495+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:52.255707+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:53.255969+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:54.256482+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:55.256839+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:56.257120+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:57.257531+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:58.257875+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:59.258209+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:00.258688+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:01.258941+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:02.259191+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:03.259478+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:04.259773+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:05.260213+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:06.260538+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:07.260748+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:08.261152+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:09.261568+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:10.262007+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:11.262468+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:12.262810+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:13.263096+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:14.263424+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:15.263794+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:16.264147+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:17.264521+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:18.264725+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:19.264981+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:20.265455+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:21.265663+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:22.266046+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:23.266445+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:24.266859+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:25.267423+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:26.267692+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:27.268027+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:28.268399+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:29.268753+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:30.269078+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:31.269430+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:32.269899+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:33.270294+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:34.270606+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:35.270940+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:36.271373+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:37.271603+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:38.271906+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:39.272288+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:40.272642+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:41.273014+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:42.273412+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:43.273758+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:44.274033+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:45.274648+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:46.274992+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:47.275405+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:48.275690+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:49.276076+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:50.276332+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:51.276723+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:52.277640+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:53.278105+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:54.278457+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:55.278679+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:56.278886+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:57.279307+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:58.279526+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:59.279983+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:00.280331+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:01.280756+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:02.281141+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:03.281415+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:04.281712+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:05.282142+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:06.282359+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:07.282611+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:08.283062+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:09.283305+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:10.283527+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:11.283826+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:12.284149+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:13.284478+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:14.285083+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:15.285503+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:16.285951+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:17.286398+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:18.286632+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:19.286969+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:20.287203+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:21.287458+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:22.287840+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:23.288063+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:24.288485+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:25.288783+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:26.289052+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:27.289283+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:28.289590+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:29.290036+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:30.290535+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:31.290810+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:32.291015+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:33.291353+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:34.291733+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 193.554534912s of 194.195724487s, submitted: 90
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101146624 unmapped: 27877376 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:35.291957+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 135 handle_osd_map epochs [136,136], i have 135, src has [1,136]
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 136 ms_handle_reset con 0x559ca14ca800 session 0x559ca21e90e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101154816 unmapped: 27869184 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:36.292497+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101220352 unmapped: 27803648 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 136 heartbeat osd_stat(store_statfs(0x4f991e000/0x0/0x4ffc00000, data 0x1c7f5c4/0x1d4f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1194261 data_alloc: 218103808 data_used: 8155136
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:37.292865+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 136 handle_osd_map epochs [137,137], i have 136, src has [1,137]
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14cb400 session 0x559ca21e8d20
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101244928 unmapped: 27779072 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:38.293146+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101244928 unmapped: 27779072 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:39.293547+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:40.293983+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:41.294475+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:42.294811+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:43.295401+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:44.295960+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:45.296316+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:46.296717+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:47.297013+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:48.297270+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:49.297557+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:50.297773+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:51.298045+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:52.298372+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:53.298890+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:54.299288+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:55.299745+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:56.300163+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:57.300552+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:58.300771+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:59.301108+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:00.301433+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:01.301692+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:02.301939+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:03.302468+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:04.302823+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:05.303102+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:06.303550+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:07.303926+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:08.304441+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:09.304765+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:10.305404+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:11.305816+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:12.306037+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:13.306358+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:14.306636+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:15.306894+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:16.307302+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:17.307825+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:18.308030+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:19.308312+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:20.308687+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:21.308948+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:22.309323+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:23.309651+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:24.310100+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:25.310352+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:26.310647+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:27.310870+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:28.311070+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:29.311690+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:30.311926+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1520800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1520800 session 0x559ca11c05a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2649400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2649400 session 0x559c9f416000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2648800 session 0x559ca1c961e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:31.312288+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14cb400 session 0x559ca2208000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1520800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1520800 session 0x559ca2209680
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2648800 session 0x559ca12a05a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14ca800 session 0x559ca21e8960
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2649400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:32.312569+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1207867 data_alloc: 218103808 data_used: 12812288
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2649400 session 0x559ca21e83c0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:33.312959+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:34.313448+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:35.313815+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:36.314076+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:37.314452+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1207867 data_alloc: 218103808 data_used: 12812288
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:38.314755+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:39.315147+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:40.315386+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:41.315672+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:42.316400+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14ca800 session 0x559c9ff10000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14cb400 session 0x559c9f5dc960
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1520800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1207867 data_alloc: 218103808 data_used: 12812288
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 67.697204590s of 67.921241760s, submitted: 29
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:43.316736+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2649800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca00bfc00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f9918000/0x0/0x4ffc00000, data 0x1c811b3/0x1d56000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [0,0,0,0,0,0,0,3,1])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:44.317109+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105930752 unmapped: 23093248 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:45.317356+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131252224 unmapped: 9363456 heap: 140615680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:46.317827+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f832a000/0x0/0x4ffc00000, data 0x326d205/0x3344000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [0,0,0,0,0,0,0,0,0,20,0,14])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 123445248 unmapped: 20848640 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1520800 session 0x559c9f48a5a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:47.318202+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca00bec00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca00bec00 session 0x559ca157a3c0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0800 session 0x559ca1caab40
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca00bec00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109428736 unmapped: 34865152 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2649800 session 0x559ca44af0e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1564012 data_alloc: 218103808 data_used: 12832768
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14ca800 session 0x559ca098d0e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2648800 session 0x559ca12a12c0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14cb400 session 0x559ca01d92c0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1520800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca00bfc00 session 0x559ca21e8000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:48.318695+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14ca800 session 0x559ca11c1680
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14cb400 session 0x559ca1cf01e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2649800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2648800 session 0x559ca1d13e00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109486080 unmapped: 34807808 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2649800 session 0x559ca1d130e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:49.319047+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109510656 unmapped: 34783232 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca00bec00 session 0x559ca1c8c3c0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14ca800 session 0x559ca1261c20
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:50.319317+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6bf0000/0x0/0x4ffc00000, data 0x49a72e9/0x4a7e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [0,0,0,0,0,0,0,0,2,2])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109199360 unmapped: 35094528 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1520800 session 0x559ca25e54a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14cb400 session 0x559c9f6b81e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:51.319646+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0400 session 0x559ca1261860
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3a000 session 0x559ca25e5c20
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109199360 unmapped: 35094528 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14ca800 session 0x559ca1cac3c0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2648800 session 0x559ca0123680
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0400 session 0x559c9f5d0780
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:52.320064+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14cb400 session 0x559c9f5d6f00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109199360 unmapped: 35094528 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1520800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1520800 session 0x559ca1d12b40
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1562696 data_alloc: 218103808 data_used: 12820480
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:53.320493+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6bf0000/0x0/0x4ffc00000, data 0x49a72e9/0x4a7e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0400 session 0x559ca1c94b40
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109199360 unmapped: 35094528 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14ca800 session 0x559ca1c8cf00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:54.320818+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2648800 session 0x559ca126d4a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2649800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109199360 unmapped: 35094528 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 3.315246582s of 11.861621857s, submitted: 155
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:55.321368+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3a400 session 0x559c9ebfa780
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14cb400 session 0x559c9f7105a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0400 session 0x559ca1c95e00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2649800 session 0x559c9ff92780
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109477888 unmapped: 34816000 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3ac00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:56.321588+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6b71000/0x0/0x4ffc00000, data 0x4a2530c/0x4afd000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109527040 unmapped: 34766848 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:57.321837+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109527040 unmapped: 34766848 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1576340 data_alloc: 218103808 data_used: 12824576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:58.322074+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109527040 unmapped: 34766848 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:59.322326+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 111394816 unmapped: 32899072 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:00.322534+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 113188864 unmapped: 31105024 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:01.322810+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 113549312 unmapped: 30744576 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6b71000/0x0/0x4ffc00000, data 0x4a2530c/0x4afd000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:02.323015+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 115482624 unmapped: 28811264 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6b71000/0x0/0x4ffc00000, data 0x4a2530c/0x4afd000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1658580 data_alloc: 234881024 data_used: 24391680
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:03.323218+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 119177216 unmapped: 25116672 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6b71000/0x0/0x4ffc00000, data 0x4a2530c/0x4afd000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:04.323555+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 119185408 unmapped: 25108480 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:05.323757+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 119185408 unmapped: 25108480 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:06.323963+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 122814464 unmapped: 21479424 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:07.324206+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 130637824 unmapped: 13656064 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1793940 data_alloc: 251658240 data_used: 40927232
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:08.324397+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 142221312 unmapped: 2072576 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:09.324601+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 143204352 unmapped: 1089536 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6b71000/0x0/0x4ffc00000, data 0x4a2530c/0x4afd000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 15.136783600s of 15.585795403s, submitted: 16
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3b400 session 0x559ca21e8780
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:10.324840+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3b800 session 0x559ca1d130e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3bc00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 143253504 unmapped: 1040384 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3bc00 session 0x559ca21e85a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:11.325180+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137125888 unmapped: 7168000 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:12.325477+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f7a7f000/0x0/0x4ffc00000, data 0x3a95287/0x3b6b000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137125888 unmapped: 7168000 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1674346 data_alloc: 251658240 data_used: 39333888
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3a800 session 0x559c9f5d5c20
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3b000 session 0x559ca157a960
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:13.325668+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127270912 unmapped: 17022976 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0400 session 0x559ca11c0780
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:14.325980+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f8a34000/0x0/0x4ffc00000, data 0x2b66215/0x2c3a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127270912 unmapped: 17022976 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3b400 session 0x559ca21e8960
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:15.326395+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f84fb000/0x0/0x4ffc00000, data 0x2d9f215/0x2e73000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f84fb000/0x0/0x4ffc00000, data 0x2d9f215/0x2e73000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:16.326612+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:17.326879+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1535170 data_alloc: 234881024 data_used: 24551424
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:18.327321+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:19.327561+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f7ff1000/0x0/0x4ffc00000, data 0x35a9215/0x367d000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:20.327831+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3b800 session 0x559c9ff11a40
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:21.328071+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0400 session 0x559c9ff10f00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:22.328469+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1535170 data_alloc: 234881024 data_used: 24551424
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f7ff1000/0x0/0x4ffc00000, data 0x35a9215/0x367d000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3a800 session 0x559ca157b680
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:23.328730+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 12.523505211s of 13.102807045s, submitted: 111
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3b000 session 0x559ca126c1e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:24.328966+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:25.329382+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f7ff0000/0x0/0x4ffc00000, data 0x35a9225/0x367e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:26.329584+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f7ff0000/0x0/0x4ffc00000, data 0x35a9225/0x367e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126328832 unmapped: 32661504 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:27.329782+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126263296 unmapped: 32727040 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1564048 data_alloc: 234881024 data_used: 28315648
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:28.329992+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127082496 unmapped: 31907840 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:29.330209+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127344640 unmapped: 31645696 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:30.330463+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127344640 unmapped: 31645696 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:31.331730+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127344640 unmapped: 31645696 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f7ff0000/0x0/0x4ffc00000, data 0x35a9225/0x367e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:32.332190+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127344640 unmapped: 31645696 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1605328 data_alloc: 234881024 data_used: 34054144
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:33.332472+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 9.957438469s of 10.036822319s, submitted: 2
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131047424 unmapped: 27942912 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:34.332738+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132907008 unmapped: 26083328 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f7408000/0x0/0x4ffc00000, data 0x4191225/0x4266000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [0,0,0,0,0,0,0,0,3])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:35.332960+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134840320 unmapped: 24150016 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:36.333143+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134553600 unmapped: 24436736 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:37.333332+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 23412736 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1764484 data_alloc: 251658240 data_used: 34795520
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:38.333543+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 23412736 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:39.333748+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 23412736 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:40.334175+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6d08000/0x0/0x4ffc00000, data 0x4883225/0x4958000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 23412736 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6d08000/0x0/0x4ffc00000, data 0x4883225/0x4958000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:41.334420+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135643136 unmapped: 23347200 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:42.334852+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133816320 unmapped: 25174016 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1756532 data_alloc: 251658240 data_used: 34807808
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:43.335132+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133816320 unmapped: 25174016 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:44.335383+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133824512 unmapped: 25165824 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6cf6000/0x0/0x4ffc00000, data 0x48a3225/0x4978000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:45.335602+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133824512 unmapped: 25165824 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2649800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2649800 session 0x559c9f5d6960
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca0492c00 session 0x559c9f5d70e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca0492c00 session 0x559c9f5d65a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:46.335897+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0400 session 0x559ca1cad2c0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 10.866591454s of 13.093039513s, submitted: 163
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3a800 session 0x559ca127bc20
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3b000 session 0x559c9f3ec5a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2649800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2649800 session 0x559c9ff93a40
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133971968 unmapped: 25018368 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca0492c00 session 0x559c9f701680
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0400 session 0x559c9f7001e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:47.336143+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133971968 unmapped: 25018368 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1796118 data_alloc: 251658240 data_used: 34807808
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:48.336520+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133971968 unmapped: 25018368 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:49.336865+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133971968 unmapped: 25018368 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:50.337103+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6855000/0x0/0x4ffc00000, data 0x4d43235/0x4e19000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6855000/0x0/0x4ffc00000, data 0x4d43235/0x4e19000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3a800 session 0x559ca12fc1e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133971968 unmapped: 25018368 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:51.337352+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3b000 session 0x559c9ff11860
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133980160 unmapped: 25010176 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0493c00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca0493c00 session 0x559c9f3ed680
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:52.337747+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca0492c00 session 0x559c9ff92f00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134291456 unmapped: 24698880 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1802017 data_alloc: 251658240 data_used: 34807808
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:53.337966+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6830000/0x0/0x4ffc00000, data 0x4d67245/0x4e3e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134291456 unmapped: 24698880 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:54.338220+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134291456 unmapped: 24698880 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:55.338643+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134291456 unmapped: 24698880 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:56.338837+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135061504 unmapped: 23928832 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:57.339061+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0400 session 0x559ca098c3c0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 10.916957855s of 11.087557793s, submitted: 16
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3a800 session 0x559ca126de00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 136241152 unmapped: 22749184 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1766757 data_alloc: 251658240 data_used: 34811904
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:58.339276+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f682d000/0x0/0x4ffc00000, data 0x4d6a245/0x4e41000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [0,0,0,1])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3b000 session 0x559ca259b860
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134742016 unmapped: 24248320 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:59.339485+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134742016 unmapped: 24248320 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:00.339750+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 23420928 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:01.339972+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 138387456 unmapped: 20602880 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:02.340182+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137797632 unmapped: 21192704 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1838016 data_alloc: 251658240 data_used: 36421632
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:03.340605+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 138289152 unmapped: 20701184 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:04.340899+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6551000/0x0/0x4ffc00000, data 0x503f225/0x5114000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 138289152 unmapped: 20701184 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:05.341371+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 138297344 unmapped: 20692992 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:06.341742+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 138297344 unmapped: 20692992 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:07.342124+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 9.573361397s of 10.074029922s, submitted: 121
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137592832 unmapped: 21397504 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1841876 data_alloc: 251658240 data_used: 36651008
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:08.342499+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6556000/0x0/0x4ffc00000, data 0x5041225/0x5116000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137592832 unmapped: 21397504 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:09.342826+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3a400 session 0x559ca05663c0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3ac00 session 0x559ca1d13e00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137568256 unmapped: 21422080 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:10.343030+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137568256 unmapped: 21422080 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:11.343638+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137568256 unmapped: 21422080 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:12.343925+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137576448 unmapped: 21413888 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1841696 data_alloc: 251658240 data_used: 36655104
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:13.344165+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6553000/0x0/0x4ffc00000, data 0x5046225/0x511b000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137576448 unmapped: 21413888 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:14.344536+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137674752 unmapped: 21315584 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:15.344759+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137674752 unmapped: 21315584 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:16.345196+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6553000/0x0/0x4ffc00000, data 0x5046225/0x511b000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137691136 unmapped: 21299200 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:17.345528+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137691136 unmapped: 21299200 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:18.345905+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1841856 data_alloc: 251658240 data_used: 37851136
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 11.048368454s of 11.090108871s, submitted: 7
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14ca800 session 0x559ca231af00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2648800 session 0x559c9f794000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6553000/0x0/0x4ffc00000, data 0x5046225/0x511b000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 136364032 unmapped: 22626304 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:19.346107+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6553000/0x0/0x4ffc00000, data 0x5046225/0x511b000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [0,1,1])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3a400 session 0x559ca22090e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 137 handle_osd_map epochs [137,138], i have 137, src has [1,138]
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 138 ms_handle_reset con 0x559ca1f3a800 session 0x559c9f6b85a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134152192 unmapped: 24838144 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:20.346847+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 138 ms_handle_reset con 0x559ca14ca800 session 0x559ca1cade00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 138 ms_handle_reset con 0x559ca1f3a400 session 0x559ca1cad680
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3ac00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134160384 unmapped: 24829952 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:21.347323+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 138 handle_osd_map epochs [139,139], i have 138, src has [1,139]
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134160384 unmapped: 24829952 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:22.347749+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 139 ms_handle_reset con 0x559ca1f3ac00 session 0x559ca21e8780
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 139 heartbeat osd_stat(store_statfs(0x4f730e000/0x0/0x4ffc00000, data 0x4287934/0x435f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134168576 unmapped: 24821760 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:23.348146+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1697050 data_alloc: 234881024 data_used: 32272384
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 139 heartbeat osd_stat(store_statfs(0x4f730e000/0x0/0x4ffc00000, data 0x4287911/0x435e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134176768 unmapped: 24813568 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:24.348551+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134176768 unmapped: 24813568 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:25.348747+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 139 heartbeat osd_stat(store_statfs(0x4f730e000/0x0/0x4ffc00000, data 0x4287911/0x435e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134176768 unmapped: 24813568 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:26.349147+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134176768 unmapped: 24813568 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:27.349650+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 139 heartbeat osd_stat(store_statfs(0x4f730e000/0x0/0x4ffc00000, data 0x4287911/0x435e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134176768 unmapped: 24813568 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:28.350359+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1697050 data_alloc: 234881024 data_used: 32272384
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134176768 unmapped: 24813568 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:29.350737+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134176768 unmapped: 24813568 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:30.351370+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 139 handle_osd_map epochs [139,140], i have 139, src has [1,140]
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 11.991467476s of 12.484093666s, submitted: 97
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134193152 unmapped: 24797184 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:31.351818+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134201344 unmapped: 24788992 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:32.352352+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:33.352782+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134201344 unmapped: 24788992 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1699243 data_alloc: 234881024 data_used: 32280576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f730c000/0x0/0x4ffc00000, data 0x4289374/0x4361000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:34.353321+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134201344 unmapped: 24788992 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:35.353706+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134201344 unmapped: 24788992 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:36.354113+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134201344 unmapped: 24788992 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:37.354377+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134201344 unmapped: 24788992 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f730d000/0x0/0x4ffc00000, data 0x4289374/0x4361000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3b400 session 0x559ca126d860
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:38.354583+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3b800 session 0x559c9f795680
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134201344 unmapped: 24788992 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1699515 data_alloc: 234881024 data_used: 32301056
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f730d000/0x0/0x4ffc00000, data 0x4289374/0x4361000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:39.354867+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127000576 unmapped: 31989760 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3b800 session 0x559ca1cada40
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:40.355092+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126181376 unmapped: 32808960 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:41.355358+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126181376 unmapped: 32808960 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:42.355603+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126181376 unmapped: 32808960 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:43.356005+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126181376 unmapped: 32808960 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1482020 data_alloc: 234881024 data_used: 20946944
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:44.356270+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126181376 unmapped: 32808960 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f84ea000/0x0/0x4ffc00000, data 0x30ad364/0x3184000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:45.356462+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126181376 unmapped: 32808960 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f84ea000/0x0/0x4ffc00000, data 0x30ad364/0x3184000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:46.356817+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126181376 unmapped: 32808960 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 15.414695740s of 15.540205956s, submitted: 39
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:47.357216+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126197760 unmapped: 32792576 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:48.359739+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126214144 unmapped: 32776192 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1489084 data_alloc: 234881024 data_used: 21655552
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:49.360015+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126214144 unmapped: 32776192 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:50.360293+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126214144 unmapped: 32776192 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:51.360477+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126214144 unmapped: 32776192 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca14ca800 session 0x559ca25e5680
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3a400 session 0x559ca259ab40
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3ac00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3ac00 session 0x559c9f5dc5a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3b400 session 0x559ca1cacf00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca14ca800 session 0x559c9f5dc1e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f838c000/0x0/0x4ffc00000, data 0x320a374/0x32e2000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3a400 session 0x559c9ff92d20
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3ac00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3ac00 session 0x559ca157af00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3b800 session 0x559c9f5d70e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:52.361529+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca2648800 session 0x559ca157ab40
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127098880 unmapped: 43442176 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:53.361723+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127098880 unmapped: 43442176 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1609851 data_alloc: 234881024 data_used: 21659648
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:54.362086+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127098880 unmapped: 43442176 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca14ca800 session 0x559c9ff13860
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:55.362356+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127098880 unmapped: 43442176 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3a400 session 0x559ca051be00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:56.362606+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127098880 unmapped: 43442176 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3ac00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3ac00 session 0x559c9f416000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 10.025475502s of 10.205513954s, submitted: 31
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3b800 session 0x559c9f6b9860
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:57.362843+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127123456 unmapped: 43417600 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0493800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f762d000/0x0/0x4ffc00000, data 0x3f673a7/0x4041000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:58.363089+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f762d000/0x0/0x4ffc00000, data 0x3f673a7/0x4041000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127123456 unmapped: 43417600 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1612578 data_alloc: 234881024 data_used: 21663744
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:59.363468+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127123456 unmapped: 43417600 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:00.363693+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126853120 unmapped: 43687936 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:01.363872+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f762d000/0x0/0x4ffc00000, data 0x3f673a7/0x4041000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 129507328 unmapped: 41033728 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:02.364128+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:03.364370+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1722604 data_alloc: 251658240 data_used: 36622336
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:04.364739+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:05.364931+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:06.365387+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7624000/0x0/0x4ffc00000, data 0x3f6d3a7/0x4047000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:07.365776+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:08.366130+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1722604 data_alloc: 251658240 data_used: 36622336
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:09.366389+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:10.366699+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7624000/0x0/0x4ffc00000, data 0x3f6d3a7/0x4047000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:11.366950+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:12.367341+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:13.367818+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1722604 data_alloc: 251658240 data_used: 36622336
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:14.368480+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:15.368883+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7624000/0x0/0x4ffc00000, data 0x3f6d3a7/0x4047000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:16.369166+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:17.369495+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 21.206089020s of 21.246303558s, submitted: 7
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca0492c00 session 0x559ca21e94a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca11b0400 session 0x559ca21e90e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:18.369803+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1722308 data_alloc: 251658240 data_used: 36626432
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:19.369973+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca0492c00 session 0x559c9ff92f00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 42786816 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f8a2b000/0x0/0x4ffc00000, data 0x2b40335/0x2c18000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:20.370424+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 42786816 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:21.370655+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 42786816 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:22.370909+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 42786816 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:23.371196+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 42786816 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1504244 data_alloc: 234881024 data_used: 27107328
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:24.371750+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 42786816 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:25.372147+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f8a2b000/0x0/0x4ffc00000, data 0x2b40335/0x2c18000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 42778624 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:26.372507+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f8a2b000/0x0/0x4ffc00000, data 0x2b40335/0x2c18000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 42778624 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:27.372747+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 42778624 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:28.372948+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 42778624 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1504244 data_alloc: 234881024 data_used: 27107328
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:29.373205+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 42778624 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:30.373677+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 42778624 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 12.612641335s of 12.727919579s, submitted: 29
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:31.373922+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132325376 unmapped: 38215680 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:32.374110+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f839d000/0x0/0x4ffc00000, data 0x31f9335/0x32d1000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132374528 unmapped: 38166528 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:33.374493+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1563814 data_alloc: 234881024 data_used: 27361280
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:34.374838+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:35.375220+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:36.375846+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f837c000/0x0/0x4ffc00000, data 0x3212335/0x32ea000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:37.376137+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:38.376571+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1563814 data_alloc: 234881024 data_used: 27361280
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:39.376996+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:40.377446+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f837c000/0x0/0x4ffc00000, data 0x3212335/0x32ea000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:41.377795+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:42.378013+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:43.378403+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1563814 data_alloc: 234881024 data_used: 27361280
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:44.378717+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f837c000/0x0/0x4ffc00000, data 0x3212335/0x32ea000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:45.379052+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:46.379399+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:47.379736+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:48.380149+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1563814 data_alloc: 234881024 data_used: 27361280
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:49.380579+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f837c000/0x0/0x4ffc00000, data 0x3212335/0x32ea000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:50.380883+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:51.381365+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:52.381638+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca14ca800 session 0x559ca051b680
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3a400 session 0x559c9f5dda40
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3ac00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3ac00 session 0x559c9ff12f00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca0492c00 session 0x559c9f5d4f00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 21.964023590s of 22.214895248s, submitted: 90
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:53.381878+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca11b0400 session 0x559ca12a1860
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca14ca800 session 0x559ca12a0780
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3a400 session 0x559ca1cf0b40
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3b800 session 0x559ca1cf05a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134004736 unmapped: 36536320 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca0492c00 session 0x559ca1cf1c20
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1607568 data_alloc: 234881024 data_used: 27361280
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:54.382338+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134004736 unmapped: 36536320 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:55.382693+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134004736 unmapped: 36536320 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:56.383000+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134086656 unmapped: 36454400 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:57.383295+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134086656 unmapped: 36454400 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:58.383472+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134086656 unmapped: 36454400 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1607568 data_alloc: 234881024 data_used: 27361280
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:59.383962+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134094848 unmapped: 36446208 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:00.384403+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134094848 unmapped: 36446208 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:01.384772+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134094848 unmapped: 36446208 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:02.385001+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134062080 unmapped: 36478976 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:03.385378+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 10.491098404s of 10.610158920s, submitted: 9
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134520832 unmapped: 36020224 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1632976 data_alloc: 234881024 data_used: 31027200
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:04.385693+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:05.386048+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:06.386448+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:07.386826+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:08.387184+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1644336 data_alloc: 234881024 data_used: 32641024
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:09.387379+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:10.387626+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:11.387987+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:12.388509+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd erasure-code-profile ls"} v 0) v1
Oct 11 02:57:56 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/143006115' entity='client.admin' cmd=[{"prefix": "osd erasure-code-profile ls"}]: dispatch
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:13.388937+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr services", "format": "json-pretty"} v 0) v1
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1644336 data_alloc: 234881024 data_used: 32641024
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:14.389477+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:15.389888+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 34963456 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1678805676' entity='client.admin' cmd=[{"prefix": "mgr services", "format": "json-pretty"}]: dispatch
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:16.390414+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 34963456 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:17.390979+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 34963456 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:18.391499+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 34963456 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1644336 data_alloc: 234881024 data_used: 32641024
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:19.391998+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 34963456 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:20.392436+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 34963456 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:21.392910+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 34963456 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:22.393268+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135585792 unmapped: 34955264 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:23.393723+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135585792 unmapped: 34955264 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1644336 data_alloc: 234881024 data_used: 32641024
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:24.394435+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135585792 unmapped: 34955264 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:25.394875+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135593984 unmapped: 34947072 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:26.395340+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135593984 unmapped: 34947072 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:27.395698+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 24.194051743s of 24.209526062s, submitted: 3
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135610368 unmapped: 34930688 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:28.395997+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 140 handle_osd_map epochs [141,141], i have 140, src has [1,141]
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca14ca800 session 0x559ca11c1a40
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135692288 unmapped: 34848768 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1763114 data_alloc: 234881024 data_used: 32649216
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:29.396350+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135692288 unmapped: 34848768 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:30.396732+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135700480 unmapped: 34840576 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:31.397126+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6d5f000/0x0/0x4ffc00000, data 0x4831f08/0x490e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135700480 unmapped: 34840576 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:32.397382+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135700480 unmapped: 34840576 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6d5f000/0x0/0x4ffc00000, data 0x4831f08/0x490e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:33.397809+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135700480 unmapped: 34840576 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1763114 data_alloc: 234881024 data_used: 32649216
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:34.398344+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135774208 unmapped: 34766848 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:35.398687+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 138289152 unmapped: 32251904 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:36.399012+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 138297344 unmapped: 32243712 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:37.399468+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6614000/0x0/0x4ffc00000, data 0x4f7cf08/0x5059000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca1f3a400 session 0x559c9f5d7a40
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0493000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca0493000 session 0x559ca051ad20
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 9.656514168s of 10.073102951s, submitted: 76
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0493400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca0493400 session 0x559ca157ad20
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 138362880 unmapped: 32178176 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:38.399855+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca0492c00 session 0x559c9f794d20
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0493000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca0493000 session 0x559ca098c3c0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 153591808 unmapped: 16949248 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1871926 data_alloc: 251658240 data_used: 48066560
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:39.400167+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca14ca800 session 0x559ca127b860
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157507584 unmapped: 13033472 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca1f3a400 session 0x559c9ff92780
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca266fc00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:40.400336+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca266fc00 session 0x559ca231a3c0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca0492c00 session 0x559c9f48b0e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0493000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca0493000 session 0x559ca231af00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca14ca800 session 0x559ca1d12f00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f53d9000/0x0/0x4ffc00000, data 0x5da6f7a/0x5e85000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 154574848 unmapped: 23838720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:41.400684+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 154574848 unmapped: 23838720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:42.401085+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f53d9000/0x0/0x4ffc00000, data 0x5da6f7a/0x5e85000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 154607616 unmapped: 23805952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:43.401388+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca1f3a400 session 0x559c9f5d1a40
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 154624000 unmapped: 23789568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1994344 data_alloc: 251658240 data_used: 48074752
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:44.401726+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca266e400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3bc00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca11b0400 session 0x559ca1cf01e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 154664960 unmapped: 23748608 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:45.401916+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca0492c00 session 0x559ca11c03c0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 151674880 unmapped: 26738688 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:46.402142+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 151674880 unmapped: 26738688 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:47.402370+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6149000/0x0/0x4ffc00000, data 0x5037f6a/0x5115000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 152780800 unmapped: 25632768 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:48.402572+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3600.1 total, 600.0 interval
                                            Cumulative writes: 10K writes, 41K keys, 10K commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 10K writes, 2796 syncs, 3.83 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 2104 writes, 7839 keys, 2104 commit groups, 1.0 writes per commit group, ingest: 8.42 MB, 0.01 MB/s
                                            Interval WAL: 2104 writes, 817 syncs, 2.58 writes per sync, written: 0.01 GB, 0.01 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6149000/0x0/0x4ffc00000, data 0x5037f6a/0x5115000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155754496 unmapped: 22659072 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1920008 data_alloc: 251658240 data_used: 52142080
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:49.402746+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6149000/0x0/0x4ffc00000, data 0x5037f6a/0x5115000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160505856 unmapped: 17907712 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:50.402961+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6149000/0x0/0x4ffc00000, data 0x5037f6a/0x5115000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160505856 unmapped: 17907712 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:51.403157+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 13.493240356s of 13.828572273s, submitted: 75
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca1f3b000 session 0x559ca2209860
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca0493800 session 0x559ca08370e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0493000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160505856 unmapped: 17907712 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:52.403381+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca0493000 session 0x559ca01223c0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150175744 unmapped: 28237824 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:53.403638+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150175744 unmapped: 28237824 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:54.403879+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1679329 data_alloc: 251658240 data_used: 40886272
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca266f000 session 0x559ca44ae780
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f76d7000/0x0/0x4ffc00000, data 0x3aabf27/0x3b86000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150175744 unmapped: 28237824 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:55.404090+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: mgrc ms_handle_reset ms_handle_reset con 0x559c9f7abc00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: mgrc reconnect Terminating session with v2:192.168.122.100:6800/1088804496
Oct 11 02:57:56 compute-0 ceph-osd[206800]: mgrc reconnect Starting new session with [v2:192.168.122.100:6800/1088804496,v1:192.168.122.100:6801/1088804496]
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: get_auth_request con 0x559ca266f000 auth_method 0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: mgrc handle_mgr_configure stats_period=5
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca00bf400 session 0x559ca051a780
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca266ec00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca00bf000 session 0x559c9f5d14a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca00bf400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f76d7000/0x0/0x4ffc00000, data 0x3aabf27/0x3b86000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:56.404472+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:57.404867+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:58.405352+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:59.405726+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1679329 data_alloc: 251658240 data_used: 40886272
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:00.406119+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f76d7000/0x0/0x4ffc00000, data 0x3aabf27/0x3b86000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:01.406521+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:02.406833+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:03.407075+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:04.407368+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1679329 data_alloc: 251658240 data_used: 40886272
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f76d7000/0x0/0x4ffc00000, data 0x3aabf27/0x3b86000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:05.407834+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:06.408441+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:07.408668+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:08.408901+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f76d7000/0x0/0x4ffc00000, data 0x3aabf27/0x3b86000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:09.409132+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1679329 data_alloc: 251658240 data_used: 40886272
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:10.409432+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:11.409745+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:12.409971+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:13.410318+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f76d7000/0x0/0x4ffc00000, data 0x3aabf27/0x3b86000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:14.410645+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1679329 data_alloc: 251658240 data_used: 40886272
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f76d7000/0x0/0x4ffc00000, data 0x3aabf27/0x3b86000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:15.411175+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:16.411549+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:17.411950+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:18.412328+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 27.144214630s of 27.347295761s, submitted: 47
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:19.412613+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155402240 unmapped: 23011328 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1800615 data_alloc: 251658240 data_used: 40894464
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:20.412814+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 158810112 unmapped: 19603456 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f65f2000/0x0/0x4ffc00000, data 0x4b89f27/0x4c64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:21.413438+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155615232 unmapped: 22798336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:22.413722+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155615232 unmapped: 22798336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:23.414141+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155615232 unmapped: 22798336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:24.415307+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155615232 unmapped: 22798336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812281 data_alloc: 251658240 data_used: 41115648
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:25.415651+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155582464 unmapped: 22831104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f655d000/0x0/0x4ffc00000, data 0x4c26f27/0x4d01000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:26.416095+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155582464 unmapped: 22831104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:27.416538+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:28.416768+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6552000/0x0/0x4ffc00000, data 0x4c31f27/0x4d0c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:29.416992+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6552000/0x0/0x4ffc00000, data 0x4c31f27/0x4d0c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1809173 data_alloc: 251658240 data_used: 41119744
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6552000/0x0/0x4ffc00000, data 0x4c31f27/0x4d0c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:30.417224+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:31.417492+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:32.417939+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6552000/0x0/0x4ffc00000, data 0x4c31f27/0x4d0c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:33.418206+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:34.418694+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1809173 data_alloc: 251658240 data_used: 41119744
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:35.419108+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:36.419465+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:37.419746+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6552000/0x0/0x4ffc00000, data 0x4c31f27/0x4d0c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:38.420140+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 19.423469543s of 19.880949020s, submitted: 173
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:39.420363+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6552000/0x0/0x4ffc00000, data 0x4c31f27/0x4d0c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155475968 unmapped: 22937600 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808865 data_alloc: 251658240 data_used: 41119744
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6552000/0x0/0x4ffc00000, data 0x4c31f27/0x4d0c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:40.420892+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155475968 unmapped: 22937600 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:41.421136+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155475968 unmapped: 22937600 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:42.421480+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155475968 unmapped: 22937600 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:43.421831+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155484160 unmapped: 22929408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:44.422359+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155484160 unmapped: 22929408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808865 data_alloc: 251658240 data_used: 41119744
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:45.422592+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155484160 unmapped: 22929408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:46.422811+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155484160 unmapped: 22929408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:47.423162+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155484160 unmapped: 22929408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:48.423454+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155484160 unmapped: 22929408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:49.423925+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155484160 unmapped: 22929408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808865 data_alloc: 251658240 data_used: 41119744
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:50.424323+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155492352 unmapped: 22921216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:51.424827+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 12.526916504s of 12.543886185s, submitted: 2
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155500544 unmapped: 22913024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:52.425379+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155500544 unmapped: 22913024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:53.425803+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155500544 unmapped: 22913024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:54.426462+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155500544 unmapped: 22913024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808865 data_alloc: 251658240 data_used: 41119744
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:55.426687+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155508736 unmapped: 22904832 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:56.427143+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155508736 unmapped: 22904832 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:57.427403+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155508736 unmapped: 22904832 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:58.427667+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155508736 unmapped: 22904832 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:59.427915+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155508736 unmapped: 22904832 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808865 data_alloc: 251658240 data_used: 41119744
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:00.428221+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155516928 unmapped: 22896640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:01.428504+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155516928 unmapped: 22896640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:02.428936+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155516928 unmapped: 22896640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:03.429126+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155516928 unmapped: 22896640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:04.429400+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155516928 unmapped: 22896640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808865 data_alloc: 251658240 data_used: 41119744
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:05.429626+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155516928 unmapped: 22896640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:06.430094+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155516928 unmapped: 22896640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:07.430539+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155516928 unmapped: 22896640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:08.430798+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155525120 unmapped: 22888448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:09.431168+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155525120 unmapped: 22888448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808865 data_alloc: 251658240 data_used: 41119744
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:10.431444+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155525120 unmapped: 22888448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:11.431871+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155525120 unmapped: 22888448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:12.432300+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155533312 unmapped: 22880256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:13.432679+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155533312 unmapped: 22880256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca266e800 session 0x559ca44af4a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0493800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:14.433157+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155533312 unmapped: 22880256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808865 data_alloc: 251658240 data_used: 41119744
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:15.433441+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155533312 unmapped: 22880256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:16.440546+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155533312 unmapped: 22880256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:17.440816+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155533312 unmapped: 22880256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:18.441042+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155533312 unmapped: 22880256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:19.441512+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155533312 unmapped: 22880256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 28.290304184s of 28.305244446s, submitted: 2
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808513 data_alloc: 251658240 data_used: 41123840
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:20.441933+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155582464 unmapped: 22831104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:21.442150+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155582464 unmapped: 22831104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:22.442394+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155607040 unmapped: 22806528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:23.443015+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155623424 unmapped: 22790144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:24.443332+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155623424 unmapped: 22790144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:25.443582+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155623424 unmapped: 22790144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:26.443929+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155623424 unmapped: 22790144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:27.444171+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155623424 unmapped: 22790144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:28.444376+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155623424 unmapped: 22790144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:29.444584+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155623424 unmapped: 22790144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:30.444972+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155623424 unmapped: 22790144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:31.445704+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155631616 unmapped: 22781952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:32.445904+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155631616 unmapped: 22781952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:33.446352+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155631616 unmapped: 22781952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:34.446652+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155631616 unmapped: 22781952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:35.446925+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155631616 unmapped: 22781952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:36.447570+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155631616 unmapped: 22781952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:37.447864+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155631616 unmapped: 22781952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:38.448140+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:39.448366+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155648000 unmapped: 22765568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:40.448596+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155648000 unmapped: 22765568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:41.448960+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155648000 unmapped: 22765568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:42.449343+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155648000 unmapped: 22765568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:43.449688+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155648000 unmapped: 22765568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:44.450138+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155648000 unmapped: 22765568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:45.450513+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155648000 unmapped: 22765568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:46.450931+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155648000 unmapped: 22765568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:47.451169+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155656192 unmapped: 22757376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:48.451385+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155656192 unmapped: 22757376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:49.451738+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155656192 unmapped: 22757376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:50.452111+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155656192 unmapped: 22757376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:51.452343+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155656192 unmapped: 22757376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:52.452612+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155656192 unmapped: 22757376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:53.452970+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155656192 unmapped: 22757376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:54.453509+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155656192 unmapped: 22757376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:55.453878+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:56.454208+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:57.454643+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:58.455029+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:59.455436+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:00.455758+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:01.456096+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:02.456383+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:03.456710+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:04.457032+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:05.457433+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:06.457745+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:07.458308+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:08.458637+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:09.458970+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:10.459290+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:11.459652+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155672576 unmapped: 22740992 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:12.459866+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155672576 unmapped: 22740992 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:13.460101+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155672576 unmapped: 22740992 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:14.460378+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155672576 unmapped: 22740992 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:15.460575+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155672576 unmapped: 22740992 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:16.460825+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155672576 unmapped: 22740992 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:17.461061+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155672576 unmapped: 22740992 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:18.461479+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155672576 unmapped: 22740992 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:19.461815+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155680768 unmapped: 22732800 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:20.462060+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155680768 unmapped: 22732800 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:21.462757+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155680768 unmapped: 22732800 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:22.463029+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155680768 unmapped: 22732800 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:23.463253+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 63.620548248s of 64.190055847s, submitted: 110
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca1f3b000 session 0x559ca1cad860
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155926528 unmapped: 22487040 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:24.463528+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155926528 unmapped: 22487040 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:25.463890+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1873141 data_alloc: 251658240 data_used: 41144320
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e43000/0x0/0x4ffc00000, data 0x5340f27/0x541b000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155926528 unmapped: 22487040 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:26.464207+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155926528 unmapped: 22487040 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:27.464723+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:28.465202+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155926528 unmapped: 22487040 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:29.465483+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155926528 unmapped: 22487040 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e43000/0x0/0x4ffc00000, data 0x5340f27/0x541b000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:30.465841+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155926528 unmapped: 22487040 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1873141 data_alloc: 251658240 data_used: 41144320
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca11b0400 session 0x559ca1c8d0e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:31.466093+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155934720 unmapped: 22478848 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:32.466365+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155934720 unmapped: 22478848 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:33.466607+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155934720 unmapped: 22478848 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:34.466855+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155934720 unmapped: 22478848 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e42000/0x0/0x4ffc00000, data 0x5340f4a/0x541c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:35.467085+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 156688384 unmapped: 21725184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1910234 data_alloc: 251658240 data_used: 46198784
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:36.467310+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157319168 unmapped: 21094400 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:37.467711+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157319168 unmapped: 21094400 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:38.467925+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157319168 unmapped: 21094400 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e42000/0x0/0x4ffc00000, data 0x5340f4a/0x541c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:39.468162+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157319168 unmapped: 21094400 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:40.468370+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157319168 unmapped: 21094400 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1917754 data_alloc: 251658240 data_used: 47218688
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:41.468632+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157319168 unmapped: 21094400 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:42.468835+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157319168 unmapped: 21094400 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:43.469041+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157319168 unmapped: 21094400 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:44.469390+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157327360 unmapped: 21086208 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e42000/0x0/0x4ffc00000, data 0x5340f4a/0x541c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:45.469667+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157327360 unmapped: 21086208 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1917754 data_alloc: 251658240 data_used: 47218688
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:46.469922+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157327360 unmapped: 21086208 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:47.470284+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157327360 unmapped: 21086208 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:48.470844+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e42000/0x0/0x4ffc00000, data 0x5340f4a/0x541c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157327360 unmapped: 21086208 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:49.471096+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157327360 unmapped: 21086208 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:50.471764+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157327360 unmapped: 21086208 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1917754 data_alloc: 251658240 data_used: 47218688
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:51.471998+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157327360 unmapped: 21086208 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:52.472305+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157335552 unmapped: 21078016 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:53.472546+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157335552 unmapped: 21078016 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:54.472915+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e42000/0x0/0x4ffc00000, data 0x5340f4a/0x541c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157335552 unmapped: 21078016 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:55.473186+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157335552 unmapped: 21078016 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1917754 data_alloc: 251658240 data_used: 47218688
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:56.473532+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157335552 unmapped: 21078016 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:57.473783+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157343744 unmapped: 21069824 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:58.474071+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157343744 unmapped: 21069824 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:59.474350+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157343744 unmapped: 21069824 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e42000/0x0/0x4ffc00000, data 0x5340f4a/0x541c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:00.474581+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157351936 unmapped: 21061632 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1917754 data_alloc: 251658240 data_used: 47218688
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:01.474824+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e42000/0x0/0x4ffc00000, data 0x5340f4a/0x541c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157351936 unmapped: 21061632 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:02.475047+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157351936 unmapped: 21061632 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:03.475316+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157351936 unmapped: 21061632 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e42000/0x0/0x4ffc00000, data 0x5340f4a/0x541c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:04.475566+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157351936 unmapped: 21061632 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e42000/0x0/0x4ffc00000, data 0x5340f4a/0x541c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:05.475763+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157351936 unmapped: 21061632 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1918234 data_alloc: 251658240 data_used: 47230976
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 42.103767395s of 42.208259583s, submitted: 21
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:06.476015+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160333824 unmapped: 18079744 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:07.476288+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160964608 unmapped: 17448960 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:08.476655+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 161062912 unmapped: 17350656 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:09.477013+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 161062912 unmapped: 17350656 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fb000/0x0/0x4ffc00000, data 0x5a86f4a/0x5b62000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:10.477346+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 161062912 unmapped: 17350656 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990890 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:11.477641+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 161062912 unmapped: 17350656 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:12.477916+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 161062912 unmapped: 17350656 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:13.478367+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 161062912 unmapped: 17350656 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:14.478808+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160260096 unmapped: 18153472 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:15.479047+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160260096 unmapped: 18153472 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:16.479313+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:17.479686+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:18.480050+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:19.480299+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:20.480638+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:21.480981+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:22.481207+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:23.481554+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:24.481869+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:25.482106+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:26.482451+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:27.482668+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:28.482961+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:29.483378+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:30.483660+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:31.484008+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:32.484430+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:33.484684+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:34.484994+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:35.485409+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:36.485714+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160276480 unmapped: 18137088 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:37.485914+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160276480 unmapped: 18137088 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:38.486203+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160276480 unmapped: 18137088 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 32.356250763s of 32.752002716s, submitted: 100
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:39.486559+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160284672 unmapped: 18128896 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:40.486912+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160284672 unmapped: 18128896 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985218 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:41.487185+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160284672 unmapped: 18128896 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:42.487361+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160284672 unmapped: 18128896 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:43.487630+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160284672 unmapped: 18128896 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:44.487886+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160284672 unmapped: 18128896 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:45.488170+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160284672 unmapped: 18128896 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:46.488354+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160284672 unmapped: 18128896 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:47.488587+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160284672 unmapped: 18128896 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:48.489037+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:49.489277+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:50.489610+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:51.489910+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:52.490112+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:53.490365+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:54.490690+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:55.491064+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:56.491430+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:57.491761+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:58.491965+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:59.492411+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:00.492743+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:01.492967+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:02.493292+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:03.493511+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:04.493755+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:05.493987+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:06.494368+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:07.494732+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:08.495103+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:09.495506+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:10.495889+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:11.496212+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:12.496541+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:13.496920+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:14.497476+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:15.497877+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:16.498206+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:17.498503+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:18.498859+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:19.499208+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:20.499639+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:21.500009+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:22.500304+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:23.500711+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:24.501200+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:25.501490+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:26.501725+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160317440 unmapped: 18096128 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:27.502098+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160317440 unmapped: 18096128 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:28.502374+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160317440 unmapped: 18096128 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:29.502821+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160317440 unmapped: 18096128 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:30.503189+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160317440 unmapped: 18096128 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:31.503412+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160317440 unmapped: 18096128 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:32.503815+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160317440 unmapped: 18096128 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:33.504154+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160317440 unmapped: 18096128 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:34.504691+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160317440 unmapped: 18096128 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:35.505022+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:36.505433+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:37.505747+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:38.506087+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:39.506470+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:40.506904+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:41.507192+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:42.507576+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:43.508075+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:44.509111+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:45.509446+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:46.509785+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:47.510112+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:48.510334+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:49.510778+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:50.511123+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:51.511496+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160333824 unmapped: 18079744 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:52.511674+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160333824 unmapped: 18079744 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:53.512037+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160333824 unmapped: 18079744 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:54.516077+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160333824 unmapped: 18079744 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:55.516443+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160333824 unmapped: 18079744 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:56.516635+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160333824 unmapped: 18079744 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:57.516828+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160333824 unmapped: 18079744 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:58.517115+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160333824 unmapped: 18079744 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:59.517449+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:00.517712+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:01.517980+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:02.518370+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:03.518734+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:04.519092+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:05.519364+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:06.519649+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:07.519854+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:08.520185+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:09.520624+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:10.521076+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:11.521490+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:12.521811+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160350208 unmapped: 18063360 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:13.522149+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160350208 unmapped: 18063360 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:14.523002+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160350208 unmapped: 18063360 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:15.523327+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160350208 unmapped: 18063360 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:16.523512+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160350208 unmapped: 18063360 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:17.523782+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160350208 unmapped: 18063360 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:18.524035+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160350208 unmapped: 18063360 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:19.524267+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160358400 unmapped: 18055168 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:20.524520+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160358400 unmapped: 18055168 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 102.055480957s of 102.074127197s, submitted: 2
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:21.524722+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160374784 unmapped: 18038784 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1987746 data_alloc: 251658240 data_used: 48824320
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:22.525003+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160374784 unmapped: 18038784 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:23.525272+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160374784 unmapped: 18038784 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:24.525666+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160374784 unmapped: 18038784 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:25.526021+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160374784 unmapped: 18038784 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:26.526408+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160374784 unmapped: 18038784 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1987746 data_alloc: 251658240 data_used: 48824320
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:27.526639+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160464896 unmapped: 17948672 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56e1000/0x0/0x4ffc00000, data 0x5aa1f4a/0x5b7d000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:28.526813+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160464896 unmapped: 17948672 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:29.526994+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160464896 unmapped: 17948672 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:30.527433+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160464896 unmapped: 17948672 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:31.527629+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160464896 unmapped: 17948672 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1988774 data_alloc: 251658240 data_used: 48824320
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56e1000/0x0/0x4ffc00000, data 0x5aa1f4a/0x5b7d000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:32.527907+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160473088 unmapped: 17940480 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56e1000/0x0/0x4ffc00000, data 0x5aa1f4a/0x5b7d000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:33.528378+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160473088 unmapped: 17940480 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:34.528801+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160473088 unmapped: 17940480 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56e1000/0x0/0x4ffc00000, data 0x5aa1f4a/0x5b7d000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:35.529122+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160473088 unmapped: 17940480 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:36.529500+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160473088 unmapped: 17940480 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1988774 data_alloc: 251658240 data_used: 48824320
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:37.529811+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160481280 unmapped: 17932288 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:38.530043+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160481280 unmapped: 17932288 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:39.530330+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160481280 unmapped: 17932288 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56e1000/0x0/0x4ffc00000, data 0x5aa1f4a/0x5b7d000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:40.530501+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160481280 unmapped: 17932288 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:41.530840+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160481280 unmapped: 17932288 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989094 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:42.531514+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160481280 unmapped: 17932288 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56e1000/0x0/0x4ffc00000, data 0x5aa1f4a/0x5b7d000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 21.782728195s of 21.812334061s, submitted: 4
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:43.531866+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:44.532180+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:45.532459+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:46.532704+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989582 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:47.532953+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:48.533208+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:49.533652+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:50.533887+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:51.534291+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989582 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:52.534727+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:53.534963+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:54.535179+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:55.535370+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:56.535691+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160571392 unmapped: 17842176 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989582 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:57.535931+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160571392 unmapped: 17842176 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:58.536532+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160571392 unmapped: 17842176 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:59.536871+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160571392 unmapped: 17842176 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:00.537110+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160571392 unmapped: 17842176 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca00be800 session 0x559c9f5d50e0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559c9f7ab800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:01.537408+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989582 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 19.184776306s of 19.200614929s, submitted: 3
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:02.537648+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:03.537896+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:04.538156+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:05.538415+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:06.538766+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989758 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:07.539099+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:08.539326+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:09.539595+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:10.539778+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:11.540102+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989758 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 10.225237846s of 10.240203857s, submitted: 3
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:12.540391+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160587776 unmapped: 17825792 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:13.540760+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160587776 unmapped: 17825792 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:14.541215+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160587776 unmapped: 17825792 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:15.541743+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160587776 unmapped: 17825792 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:16.542151+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160587776 unmapped: 17825792 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:17.542676+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160587776 unmapped: 17825792 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:18.543108+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160587776 unmapped: 17825792 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:19.543504+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160595968 unmapped: 17817600 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:20.543958+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:21.544418+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:22.544738+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:23.545190+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:24.545648+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:25.546023+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:26.546462+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:27.546867+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:28.547215+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:29.547641+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:30.547882+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:31.548177+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:32.548595+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:33.548997+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:34.549349+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:35.549737+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:36.550876+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:37.552533+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:38.554099+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:39.555742+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:40.557212+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:41.558811+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:42.560413+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:43.562045+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:44.562810+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:45.563113+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:46.563495+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:47.563943+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:48.564577+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:49.564895+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:50.565457+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:51.565889+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:52.566529+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:53.567043+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:54.567513+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:55.567918+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:56.568575+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:57.569044+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:58.569766+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:59.570195+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:00.570782+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:01.571192+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:02.571382+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:03.571645+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:04.572197+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:05.572437+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:06.573001+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:07.573385+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:08.573751+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:09.574157+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:10.574406+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:11.574722+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:12.575117+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:13.575383+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:14.575670+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:15.575923+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:16.576424+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:17.576666+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:18.576896+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:19.577193+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:20.577405+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:21.577815+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:22.578074+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:23.578338+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:24.578673+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:25.578906+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:26.579119+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:27.579369+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:28.579627+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:29.579916+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:30.580153+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:31.580521+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:32.580806+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:33.581413+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:34.582457+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:35.582810+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:36.583350+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:37.583767+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:38.584155+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:39.584433+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:40.584804+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:41.585148+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:42.585481+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:43.585887+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:44.586294+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:45.587011+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:46.587470+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:47.587809+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:48.588024+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:49.588440+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:50.588646+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:51.588866+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:52.589198+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:53.589404+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:54.589598+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:55.589808+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:56.590081+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:57.590440+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:58.590799+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:59.591030+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:00.591280+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:01.591512+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:02.591722+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:03.591945+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:04.592208+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:05.592519+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:06.592759+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:07.593156+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:08.593418+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:09.593799+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:10.594144+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:11.594524+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:12.594922+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990894 data_alloc: 251658240 data_used: 48922624
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:13.595277+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:14.595698+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 122.132949829s of 122.139366150s, submitted: 1
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:15.596082+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:16.596451+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c9000/0x0/0x4ffc00000, data 0x5ab9f4a/0x5b95000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:17.596892+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990550 data_alloc: 251658240 data_used: 48922624
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:18.597386+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c9000/0x0/0x4ffc00000, data 0x5ab9f4a/0x5b95000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:19.597779+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c9000/0x0/0x4ffc00000, data 0x5ab9f4a/0x5b95000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c9000/0x0/0x4ffc00000, data 0x5ab9f4a/0x5b95000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:20.598147+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:21.598575+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:22.598921+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990550 data_alloc: 251658240 data_used: 48922624
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:23.599382+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:24.600027+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:25.600331+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c9000/0x0/0x4ffc00000, data 0x5ab9f4a/0x5b95000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:26.600667+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:27.601083+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c9000/0x0/0x4ffc00000, data 0x5ab9f4a/0x5b95000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990710 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:28.601322+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:29.601701+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 15.860550880s of 15.867979050s, submitted: 1
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:30.602003+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:31.602328+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c8000/0x0/0x4ffc00000, data 0x5abaf4a/0x5b96000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:32.602584+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990778 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:33.602902+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:34.603166+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:35.603375+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:36.603594+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:37.604025+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c8000/0x0/0x4ffc00000, data 0x5abaf4a/0x5b96000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990778 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:38.604525+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:39.604927+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:40.605159+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c8000/0x0/0x4ffc00000, data 0x5abaf4a/0x5b96000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:41.605569+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:42.606134+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990778 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:43.606367+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:44.606785+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c8000/0x0/0x4ffc00000, data 0x5abaf4a/0x5b96000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:45.607044+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:46.607539+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:47.607865+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c8000/0x0/0x4ffc00000, data 0x5abaf4a/0x5b96000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990778 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:48.608411+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c8000/0x0/0x4ffc00000, data 0x5abaf4a/0x5b96000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:49.608900+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 19.193056107s of 19.199436188s, submitted: 1
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:50.609513+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:51.609883+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:52.610365+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990910 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:53.610680+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:54.611164+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:55.611574+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:56.611921+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:57.612465+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990910 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:58.612782+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:59.613182+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 10.222181320s of 10.239167213s, submitted: 2
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:00.613506+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:01.613897+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:02.614159+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:03.614583+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:04.614875+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:05.615106+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:06.615561+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:07.615770+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:08.615980+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:09.616467+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:10.616739+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:11.616978+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:12.617455+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:13.617719+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:14.618016+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:15.618541+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:16.619069+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:17.619461+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:18.619863+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:19.620381+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:20.620755+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:21.621209+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:22.621719+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:23.621934+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:24.622410+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:25.622786+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:26.623335+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:27.623799+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:28.624079+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:29.624451+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:30.624740+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:31.625117+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:32.625542+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:33.625804+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:34.626369+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:35.626686+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:36.626938+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:37.627422+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:38.627634+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:39.627956+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:40.628195+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:41.628510+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:42.628733+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:43.629432+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:44.629741+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:45.631405+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:46.631700+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:47.632037+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:48.632539+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 4200.1 total, 600.0 interval
                                            Cumulative writes: 11K writes, 44K keys, 11K commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 11K writes, 3098 syncs, 3.70 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 775 writes, 2877 keys, 775 commit groups, 1.0 writes per commit group, ingest: 3.62 MB, 0.01 MB/s
                                            Interval WAL: 775 writes, 302 syncs, 2.57 writes per sync, written: 0.00 GB, 0.01 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:49.632907+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:50.633334+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:51.633733+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:52.634366+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:53.634669+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:54.635162+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:55.635509+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:56.635880+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:57.636178+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:58.636423+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:59.636691+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:00.637178+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:01.637803+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:02.638045+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:03.638485+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:04.638960+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:05.639509+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:06.640043+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:07.640481+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:08.641020+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:09.641421+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:10.641652+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:11.642003+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:12.642492+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:13.642858+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:14.643414+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:15.643890+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:16.644134+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:17.644385+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:18.644691+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:19.645004+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:20.645184+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:21.645597+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:22.645978+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:23.646439+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:24.646918+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:25.647342+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:26.648133+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:27.648374+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:28.648647+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:29.649991+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:30.650486+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:31.651073+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:32.651385+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:33.652034+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:34.652347+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:35.652735+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:36.653385+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:37.653850+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:38.654489+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:39.654778+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:40.656515+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:41.656757+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:42.657067+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:43.657548+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:44.658463+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:45.658768+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:46.659016+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:47.659301+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:48.659730+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:49.660091+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:50.660417+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:51.660685+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:52.661019+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:53.661342+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:54.661933+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:55.662469+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:56.662845+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:57.663098+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:58.663476+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:59.664072+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:00.664433+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:01.664630+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:02.665159+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:03.665433+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:04.665808+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:05.666031+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:06.666693+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:07.668196+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:08.668594+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:09.668792+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:10.669402+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:11.669921+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:12.671582+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:13.672846+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:14.673611+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160776192 unmapped: 17637376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:15.673869+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160776192 unmapped: 17637376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:16.674376+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160776192 unmapped: 17637376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:17.674788+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160776192 unmapped: 17637376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:18.675091+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160776192 unmapped: 17637376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:19.675433+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160776192 unmapped: 17637376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:20.675790+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160776192 unmapped: 17637376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 141.434371948s of 141.442596436s, submitted: 1
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:21.676293+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160784384 unmapped: 17629184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:22.676716+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160784384 unmapped: 17629184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:23.677022+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160817152 unmapped: 17596416 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:24.677342+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:25.677796+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:26.678176+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:27.678571+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:28.678814+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:29.679098+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:30.679442+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:31.679747+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:32.680094+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:33.680361+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:34.680909+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:35.681606+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:36.682024+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:37.682381+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:38.682855+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 251658240 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:39.683195+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:40.683675+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:41.684150+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:42.684433+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:43.684849+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 234881024 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:44.685168+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:45.685550+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:46.685928+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:47.686402+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:48.686788+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 234881024 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:49.687325+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:50.687734+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:51.688143+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:52.688556+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:53.688969+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 234881024 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:54.689485+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:55.689898+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:56.690451+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:57.690840+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:58.691365+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 218103808 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:59.691742+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:00.691970+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:01.692201+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:02.692594+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:03.692867+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 218103808 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:04.693374+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:05.693573+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:06.694064+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:07.694453+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:08.694787+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 218103808 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:09.695175+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:10.695412+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:11.695634+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:12.695844+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:13.696078+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 218103808 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:14.696423+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:15.696685+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160858112 unmapped: 17555456 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:16.697060+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160858112 unmapped: 17555456 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:17.697382+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160858112 unmapped: 17555456 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:18.697595+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 57.406566620s of 57.994510651s, submitted: 90
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca266e400 session 0x559c9f5dc5a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca1f3bc00 session 0x559c9ff123c0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990354 data_alloc: 218103808 data_used: 48926720
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160858112 unmapped: 17555456 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca00be800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:19.697810+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca00be800 session 0x559ca25e4960
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:20.698010+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:21.698250+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:22.698764+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f7426000/0x0/0x4ffc00000, data 0x3adfed8/0x3bb9000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:23.699404+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647764 data_alloc: 218103808 data_used: 33910784
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:24.700175+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:25.700373+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:26.700607+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f7426000/0x0/0x4ffc00000, data 0x3adfed8/0x3bb9000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:27.700860+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:28.701170+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647764 data_alloc: 218103808 data_used: 33910784
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:29.701566+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:30.701927+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:31.702404+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f7426000/0x0/0x4ffc00000, data 0x3adfed8/0x3bb9000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca14ca800 session 0x559ca1c8c3c0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:32.702763+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca00be800
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 13.333739281s of 13.553236961s, submitted: 50
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f76a5000/0x0/0x4ffc00000, data 0x2f88ed8/0x3062000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca00be800 session 0x559ca25e5a40
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:33.703145+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 146096128 unmapped: 32317440 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1482456 data_alloc: 218103808 data_used: 26468352
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:34.703629+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 146096128 unmapped: 32317440 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f84fd000/0x0/0x4ffc00000, data 0x2c87eb5/0x2d60000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f84fd000/0x0/0x4ffc00000, data 0x2c87eb5/0x2d60000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:35.704002+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 146104320 unmapped: 32309248 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:36.704560+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 146104320 unmapped: 32309248 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:37.705008+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 146104320 unmapped: 32309248 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:38.705591+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 146104320 unmapped: 32309248 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f84fd000/0x0/0x4ffc00000, data 0x2c87eb5/0x2d60000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1482456 data_alloc: 218103808 data_used: 26468352
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:39.706028+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 146104320 unmapped: 32309248 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:40.706487+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 146104320 unmapped: 32309248 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:41.706932+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 146096128 unmapped: 32317440 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 141 handle_osd_map epochs [142,142], i have 141, src has [1,142]
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:42.707361+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 142 ms_handle_reset con 0x559ca11b0400 session 0x559c9f5d63c0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132194304 unmapped: 46219264 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 142 heartbeat osd_stat(store_statfs(0x4f84ff000/0x0/0x4ffc00000, data 0x2c87e92/0x2d5f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b000
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:43.707722+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132194304 unmapped: 46219264 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 10.512029648s of 10.966043472s, submitted: 80
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 142 handle_osd_map epochs [143,143], i have 142, src has [1,143]
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 143 ms_handle_reset con 0x559ca1f3b000 session 0x559ca126d4a0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1372255 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:44.708167+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133292032 unmapped: 49782784 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3bc00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 143 handle_osd_map epochs [144,144], i have 143, src has [1,144]
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1437057721' entity='client.admin' cmd=[{"prefix": "osd crush show-tunables"}]: dispatch
Oct 11 02:57:56 compute-0 ceph-mon[191930]: pgmap v2414: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:56 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1880438321' entity='client.admin' cmd=[{"prefix": "mgr module ls", "format": "json-pretty"}]: dispatch
Oct 11 02:57:56 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/743982802' entity='client.admin' cmd=[{"prefix": "osd crush tree", "show_shadow": true}]: dispatch
Oct 11 02:57:56 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/143006115' entity='client.admin' cmd=[{"prefix": "osd erasure-code-profile ls"}]: dispatch
Oct 11 02:57:56 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1678805676' entity='client.admin' cmd=[{"prefix": "mgr services", "format": "json-pretty"}]: dispatch
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:45.708478+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 144 ms_handle_reset con 0x559ca1f3bc00 session 0x559ca126de00
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133324800 unmapped: 49750016 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:46.708930+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133324800 unmapped: 49750016 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:47.709441+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133324800 unmapped: 49750016 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:48.709785+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 144 heartbeat osd_stat(store_statfs(0x4f94f4000/0x0/0x4ffc00000, data 0x1c8d1ba/0x1d67000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133324800 unmapped: 49750016 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345275 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:49.710137+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133324800 unmapped: 49750016 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 144 heartbeat osd_stat(store_statfs(0x4f94f4000/0x0/0x4ffc00000, data 0x1c8d1ba/0x1d67000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:50.710546+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133324800 unmapped: 49750016 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:51.711022+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 144 handle_osd_map epochs [144,145], i have 144, src has [1,145]
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:52.711499+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:53.711971+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:54.712364+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:55.713003+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:56.713501+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:57.713891+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:58.714539+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:59.714931+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:00.715487+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:01.715837+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:02.716156+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:03.716374+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:04.716818+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:05.717171+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:06.717574+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:07.718020+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:08.718552+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:09.718865+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:10.719350+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:11.719643+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:12.720082+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:13.720413+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:14.720682+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:15.720906+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:16.721217+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:17.722563+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:18.723786+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:19.724341+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:20.724676+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:21.725029+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:22.725337+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:23.725645+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:24.726037+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:25.726492+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:26.726828+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:27.727104+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:28.727412+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:29.727759+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:30.728105+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:31.728372+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:32.728720+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:33.729051+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:34.729536+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:35.730077+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:36.730529+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:37.731022+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:38.731414+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:39.731785+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:40.732169+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:41.732462+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:42.732866+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:43.733454+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:44.733929+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:45.734810+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:46.735361+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:47.735735+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:48.736465+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:49.736883+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:50.737146+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:51.737535+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:52.737923+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:53.738387+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:54.738736+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:55.739201+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:56.739729+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:57.740113+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:58.740535+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:59.740946+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:00.741508+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:01.741802+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:02.742187+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:03.742376+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:04.742630+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:05.743099+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:06.743462+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:07.743765+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:08.744046+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:09.744375+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:10.744567+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:11.744808+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:12.745054+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:13.745361+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:14.745614+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:15.745817+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:16.746040+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:17.747783+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:18.748010+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:19.748271+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:20.748606+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:21.748839+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133365760 unmapped: 49709056 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:22.749054+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: do_command 'config diff' '{prefix=config diff}'
Oct 11 02:57:56 compute-0 ceph-osd[206800]: do_command 'config diff' '{prefix=config diff}' result is 0 bytes
Oct 11 02:57:56 compute-0 ceph-osd[206800]: do_command 'config show' '{prefix=config show}'
Oct 11 02:57:56 compute-0 ceph-osd[206800]: do_command 'config show' '{prefix=config show}' result is 0 bytes
Oct 11 02:57:56 compute-0 ceph-osd[206800]: do_command 'counter dump' '{prefix=counter dump}'
Oct 11 02:57:56 compute-0 ceph-osd[206800]: do_command 'counter dump' '{prefix=counter dump}' result is 0 bytes
Oct 11 02:57:56 compute-0 ceph-osd[206800]: do_command 'counter schema' '{prefix=counter schema}'
Oct 11 02:57:56 compute-0 ceph-osd[206800]: do_command 'counter schema' '{prefix=counter schema}' result is 0 bytes
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133423104 unmapped: 49651712 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:23.749290+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132972544 unmapped: 50102272 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:24.749749+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:57:56 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:57:56 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 02:57:56 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132784128 unmapped: 50290688 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 02:57:56 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:25.750078+0000)
Oct 11 02:57:56 compute-0 ceph-osd[206800]: do_command 'log dump' '{prefix=log dump}'
Oct 11 02:57:56 compute-0 rsyslogd[187706]: imjournal from <compute-0:ceph-osd>: begin to drop messages due to rate-limiting
Oct 11 02:57:56 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 02:57:56 compute-0 crontab[484397]: (root) LIST (root)
Oct 11 02:57:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata"} v 0) v1
Oct 11 02:57:56 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3427841102' entity='client.admin' cmd=[{"prefix": "osd metadata"}]: dispatch
Oct 11 02:57:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:57:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:57:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:57:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:57:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:57:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:57:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr stat", "format": "json-pretty"} v 0) v1
Oct 11 02:57:56 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2045422719' entity='client.admin' cmd=[{"prefix": "mgr stat", "format": "json-pretty"}]: dispatch
Oct 11 02:57:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:57:56
Oct 11 02:57:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:57:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:57:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.control', 'cephfs.cephfs.meta', 'cephfs.cephfs.data', 'volumes', 'default.rgw.meta', 'default.rgw.log', '.mgr', '.rgw.root', 'images', 'backups', 'vms']
Oct 11 02:57:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:57:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:57:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd utilization"} v 0) v1
Oct 11 02:57:57 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3711407299' entity='client.admin' cmd=[{"prefix": "osd utilization"}]: dispatch
Oct 11 02:57:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr versions", "format": "json-pretty"} v 0) v1
Oct 11 02:57:57 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3834832610' entity='client.admin' cmd=[{"prefix": "mgr versions", "format": "json-pretty"}]: dispatch
Oct 11 02:57:57 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3427841102' entity='client.admin' cmd=[{"prefix": "osd metadata"}]: dispatch
Oct 11 02:57:57 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2045422719' entity='client.admin' cmd=[{"prefix": "mgr stat", "format": "json-pretty"}]: dispatch
Oct 11 02:57:57 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3711407299' entity='client.admin' cmd=[{"prefix": "osd utilization"}]: dispatch
Oct 11 02:57:57 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3834832610' entity='client.admin' cmd=[{"prefix": "mgr versions", "format": "json-pretty"}]: dispatch
Oct 11 02:57:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2415: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:57:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:57:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:57:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:57:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:57:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:57:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:57:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:57:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:57:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:57:57 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15661 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:57:57 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15659 -' entity='client.admin' cmd=[{"prefix": "telemetry channel ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:57 compute-0 nova_compute[356901]: 2025-10-11 02:57:57.819 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:57:58 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15663 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:57:58 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15665 -' entity='client.admin' cmd=[{"prefix": "telemetry collection ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:58 compute-0 ceph-mon[191930]: pgmap v2415: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:58 compute-0 ceph-mon[191930]: from='client.15661 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:57:58 compute-0 ceph-mon[191930]: from='client.15659 -' entity='client.admin' cmd=[{"prefix": "telemetry channel ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:58 compute-0 ceph-mon[191930]: from='client.15663 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:57:58 compute-0 ceph-mon[191930]: from='client.15665 -' entity='client.admin' cmd=[{"prefix": "telemetry collection ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:57:58 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15667 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:57:58 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15671 -' entity='client.admin' cmd=[{"prefix": "orch ls", "export": true, "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:57:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "quorum_status"} v 0) v1
Oct 11 02:57:59 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1683545019' entity='client.admin' cmd=[{"prefix": "quorum_status"}]: dispatch
Oct 11 02:57:59 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15675 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:57:59 compute-0 podman[484730]: 2025-10-11 02:57:59.228470595 +0000 UTC m=+0.111664582 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, container_name=multipathd, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Oct 11 02:57:59 compute-0 podman[484733]: 2025-10-11 02:57:59.236879417 +0000 UTC m=+0.118790880 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_managed=true, container_name=iscsid)
Oct 11 02:57:59 compute-0 ceph-mon[191930]: from='client.15667 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:57:59 compute-0 ceph-mon[191930]: from='client.15671 -' entity='client.admin' cmd=[{"prefix": "orch ls", "export": true, "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:57:59 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1683545019' entity='client.admin' cmd=[{"prefix": "quorum_status"}]: dispatch
Oct 11 02:57:59 compute-0 ceph-mon[191930]: from='client.15675 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:57:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2416: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:57:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "versions"} v 0) v1
Oct 11 02:57:59 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3884453306' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch
Oct 11 02:57:59 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15679 -' entity='client.admin' cmd=[{"prefix": "orch status", "detail": true, "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:57:59 compute-0 podman[157119]: time="2025-10-11T02:57:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:57:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:57:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:57:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:57:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9092 "" "Go-http-client/1.1"
Oct 11 02:58:00 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15683 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "health", "detail": "detail", "format": "json-pretty"} v 0) v1
Oct 11 02:58:00 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/213475179' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail", "format": "json-pretty"}]: dispatch
Oct 11 02:58:00 compute-0 ceph-mon[191930]: pgmap v2416: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:00 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3884453306' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch
Oct 11 02:58:00 compute-0 ceph-mon[191930]: from='client.15679 -' entity='client.admin' cmd=[{"prefix": "orch status", "detail": true, "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:00 compute-0 ceph-mon[191930]: from='client.15683 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:00 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/213475179' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail", "format": "json-pretty"}]: dispatch
Oct 11 02:58:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "format": "json-pretty"} v 0) v1
Oct 11 02:58:00 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2106558264' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json-pretty"}]: dispatch
Oct 11 02:58:00 compute-0 nova_compute[356901]: 2025-10-11 02:58:00.760 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:00 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
Oct 11 02:58:00 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106618880 unmapped: 7151616 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:24:59.561108+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1465811 data_alloc: 234881024 data_used: 21749760
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106618880 unmapped: 7151616 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:00.561316+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:01.561638+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106618880 unmapped: 7151616 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:02.561974+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106618880 unmapped: 7151616 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:03.562485+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106618880 unmapped: 7151616 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a27000/0x0/0x4ffc00000, data 0x2f8ea88/0x3057000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:04.562839+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106618880 unmapped: 7151616 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1465811 data_alloc: 234881024 data_used: 21749760
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:05.563328+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106618880 unmapped: 7151616 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:06.563710+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106618880 unmapped: 7151616 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:07.564046+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106618880 unmapped: 7151616 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:08.564432+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106618880 unmapped: 7151616 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:09.564754+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a27000/0x0/0x4ffc00000, data 0x2f8ea88/0x3057000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106618880 unmapped: 7151616 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1465811 data_alloc: 234881024 data_used: 21749760
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:10.564982+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106618880 unmapped: 7151616 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:11.565219+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106618880 unmapped: 7151616 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:12.565525+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106618880 unmapped: 7151616 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:13.565739+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106618880 unmapped: 7151616 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a27000/0x0/0x4ffc00000, data 0x2f8ea88/0x3057000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:14.565984+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106618880 unmapped: 7151616 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1465811 data_alloc: 234881024 data_used: 21749760
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:15.566193+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106618880 unmapped: 7151616 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:16.566826+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106618880 unmapped: 7151616 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:17.567346+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106627072 unmapped: 7143424 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:18.567810+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106627072 unmapped: 7143424 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:19.568220+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106627072 unmapped: 7143424 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1465811 data_alloc: 234881024 data_used: 21749760
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a27000/0x0/0x4ffc00000, data 0x2f8ea88/0x3057000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:20.568611+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106627072 unmapped: 7143424 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:21.568986+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106627072 unmapped: 7143424 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a27000/0x0/0x4ffc00000, data 0x2f8ea88/0x3057000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:22.569359+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106627072 unmapped: 7143424 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a27000/0x0/0x4ffc00000, data 0x2f8ea88/0x3057000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:23.569695+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106627072 unmapped: 7143424 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:24.569967+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 7135232 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1465811 data_alloc: 234881024 data_used: 21749760
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:25.570381+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 7135232 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:26.570667+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 7135232 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:27.571021+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 7135232 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:28.571481+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 7135232 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a27000/0x0/0x4ffc00000, data 0x2f8ea88/0x3057000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:29.571899+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 7135232 heap: 113770496 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1465811 data_alloc: 234881024 data_used: 21749760
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025ba3c400 session 0x56025be8f860
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025ba3c000 session 0x56025c221e00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b4b4000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025b4b4000 session 0x56025c924960
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x5602594adc00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x5602594adc00 session 0x56025db3af00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 47.747955322s of 47.771453857s, submitted: 3
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:30.572333+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025ba3c000 session 0x56025db3ad20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107569152 unmapped: 12500992 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025ba3c400 session 0x56025a79b2c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22cc00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025c22cc00 session 0x56025c2081e0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb95c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025eb95c00 session 0x560258ea9e00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x5602594adc00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x5602594adc00 session 0x56025a229e00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:31.572535+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107585536 unmapped: 12484608 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025ba3c000 session 0x56025c0e70e0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025ba3c400 session 0x56025cc1fa40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22cc00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025c22cc00 session 0x56025a79b680
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb95800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025eb95800 session 0x56025be8e960
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:32.572896+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107585536 unmapped: 12484608 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:33.573322+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107585536 unmapped: 12484608 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:34.573570+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107585536 unmapped: 12484608 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8458000/0x0/0x4ffc00000, data 0x355bb4c/0x3626000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1519280 data_alloc: 234881024 data_used: 21749760
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x5602594adc00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x5602594adc00 session 0x56025c0672c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:35.573789+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025ba3c000 session 0x56025b96d2c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107585536 unmapped: 12484608 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025ba3c400 session 0x56025c925e00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22cc00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:36.574314+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025c22cc00 session 0x56025c925a40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107806720 unmapped: 12263424 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:37.574584+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107806720 unmapped: 12263424 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:38.575099+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107806720 unmapped: 12263424 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f841b000/0x0/0x4ffc00000, data 0x3597b5c/0x3663000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:39.575337+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107806720 unmapped: 12263424 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1525282 data_alloc: 234881024 data_used: 21762048
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f841b000/0x0/0x4ffc00000, data 0x3597b5c/0x3663000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:40.575579+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 109166592 unmapped: 10903552 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:41.575947+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 10.932176590s of 11.190330505s, submitted: 45
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112353280 unmapped: 7716864 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:42.576534+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112410624 unmapped: 7659520 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f841b000/0x0/0x4ffc00000, data 0x3597b5c/0x3663000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:43.576954+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112410624 unmapped: 7659520 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:44.577455+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112418816 unmapped: 7651328 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1560146 data_alloc: 234881024 data_used: 26701824
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:45.577843+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112418816 unmapped: 7651328 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:46.578380+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112418816 unmapped: 7651328 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:47.578801+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f841b000/0x0/0x4ffc00000, data 0x3597b5c/0x3663000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112451584 unmapped: 7618560 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:48.579112+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112451584 unmapped: 7618560 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:49.579460+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112451584 unmapped: 7618560 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1560146 data_alloc: 234881024 data_used: 26701824
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:50.579812+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112459776 unmapped: 7610368 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:51.579978+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112459776 unmapped: 7610368 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:52.580375+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112459776 unmapped: 7610368 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f841b000/0x0/0x4ffc00000, data 0x3597b5c/0x3663000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:53.580721+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112492544 unmapped: 7577600 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:54.580927+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112492544 unmapped: 7577600 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1560146 data_alloc: 234881024 data_used: 26701824
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:55.581567+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112492544 unmapped: 7577600 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:56.581912+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112492544 unmapped: 7577600 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:57.582166+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112492544 unmapped: 7577600 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:58.582371+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f841b000/0x0/0x4ffc00000, data 0x3597b5c/0x3663000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112492544 unmapped: 7577600 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:25:59.582580+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112500736 unmapped: 7569408 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1560146 data_alloc: 234881024 data_used: 26701824
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:00.582828+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112508928 unmapped: 7561216 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:01.583042+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112508928 unmapped: 7561216 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:02.583455+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112508928 unmapped: 7561216 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:03.583888+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112508928 unmapped: 7561216 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f841b000/0x0/0x4ffc00000, data 0x3597b5c/0x3663000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:04.584165+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112508928 unmapped: 7561216 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1560146 data_alloc: 234881024 data_used: 26701824
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:05.584418+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112508928 unmapped: 7561216 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:06.584812+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f841b000/0x0/0x4ffc00000, data 0x3597b5c/0x3663000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112541696 unmapped: 7528448 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f841b000/0x0/0x4ffc00000, data 0x3597b5c/0x3663000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:07.585218+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112541696 unmapped: 7528448 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:08.585710+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112541696 unmapped: 7528448 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:09.586157+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112541696 unmapped: 7528448 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1560146 data_alloc: 234881024 data_used: 26701824
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f841b000/0x0/0x4ffc00000, data 0x3597b5c/0x3663000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:10.586603+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f841b000/0x0/0x4ffc00000, data 0x3597b5c/0x3663000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112541696 unmapped: 7528448 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:11.587033+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112541696 unmapped: 7528448 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:12.587491+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112541696 unmapped: 7528448 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:13.587734+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112566272 unmapped: 7503872 heap: 120070144 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 32.596218109s of 32.601860046s, submitted: 1
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:14.588150+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8390000/0x0/0x4ffc00000, data 0x3622b5c/0x36ee000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 116482048 unmapped: 6111232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1638178 data_alloc: 234881024 data_used: 26763264
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7ad3000/0x0/0x4ffc00000, data 0x3ed9b5c/0x3fa5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:15.588391+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115712000 unmapped: 6881280 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:16.588700+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114745344 unmapped: 7847936 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:17.589103+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114745344 unmapped: 7847936 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a33000/0x0/0x4ffc00000, data 0x3f7eb5c/0x404a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:18.589404+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114761728 unmapped: 7831552 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:19.589666+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114761728 unmapped: 7831552 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1650600 data_alloc: 234881024 data_used: 26951680
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:20.589904+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114761728 unmapped: 7831552 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:21.590155+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a33000/0x0/0x4ffc00000, data 0x3f7eb5c/0x404a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114950144 unmapped: 7643136 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a33000/0x0/0x4ffc00000, data 0x3f7eb5c/0x404a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:22.590392+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114950144 unmapped: 7643136 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:23.590607+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114950144 unmapped: 7643136 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:24.590845+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114950144 unmapped: 7643136 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647696 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:25.591222+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114950144 unmapped: 7643136 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:26.591630+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114950144 unmapped: 7643136 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:27.592075+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114950144 unmapped: 7643136 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:28.592386+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114950144 unmapped: 7643136 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:29.592686+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114950144 unmapped: 7643136 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647696 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:30.593072+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114950144 unmapped: 7643136 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:31.593414+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114950144 unmapped: 7643136 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:32.593797+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114950144 unmapped: 7643136 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:33.594166+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114950144 unmapped: 7643136 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:34.594625+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114950144 unmapped: 7643136 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647696 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:35.594963+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114950144 unmapped: 7643136 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:36.595371+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114950144 unmapped: 7643136 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:37.595598+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114958336 unmapped: 7634944 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:38.596037+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114991104 unmapped: 7602176 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:39.596373+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114999296 unmapped: 7593984 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647696 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:40.596834+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114999296 unmapped: 7593984 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:41.597077+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115007488 unmapped: 7585792 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:42.597401+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115007488 unmapped: 7585792 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:43.597821+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115007488 unmapped: 7585792 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:44.598196+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115007488 unmapped: 7585792 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647696 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:45.598606+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115007488 unmapped: 7585792 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:46.599025+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 32.504028320s of 32.961372375s, submitted: 105
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115007488 unmapped: 7585792 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:47.599473+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115007488 unmapped: 7585792 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:48.600083+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115015680 unmapped: 7577600 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:49.600442+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115015680 unmapped: 7577600 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:50.600801+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115015680 unmapped: 7577600 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:51.601185+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115015680 unmapped: 7577600 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:52.601538+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115015680 unmapped: 7577600 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:53.602058+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115015680 unmapped: 7577600 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:54.602382+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115023872 unmapped: 7569408 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:55.602767+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115032064 unmapped: 7561216 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:56.603027+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115032064 unmapped: 7561216 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:57.604006+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115032064 unmapped: 7561216 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:58.604345+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115032064 unmapped: 7561216 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:26:59.604827+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115032064 unmapped: 7561216 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:00.605061+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115032064 unmapped: 7561216 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:01.605532+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115032064 unmapped: 7561216 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config dump"} v 0) v1
Oct 11 02:58:01 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3370820490' entity='client.admin' cmd=[{"prefix": "config dump"}]: dispatch
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:02.605941+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115032064 unmapped: 7561216 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:03.606409+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115032064 unmapped: 7561216 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:04.606804+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115032064 unmapped: 7561216 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:05.607151+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115032064 unmapped: 7561216 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:06.607472+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115032064 unmapped: 7561216 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:07.607757+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115032064 unmapped: 7561216 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:08.608222+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115040256 unmapped: 7553024 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:09.608582+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115040256 unmapped: 7553024 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:10.608982+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115040256 unmapped: 7553024 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:11.609488+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115040256 unmapped: 7553024 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:12.609836+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115040256 unmapped: 7553024 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:13.610141+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115040256 unmapped: 7553024 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:14.610647+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115040256 unmapped: 7553024 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:15.610984+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115040256 unmapped: 7553024 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:16.611670+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115040256 unmapped: 7553024 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:17.612195+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115040256 unmapped: 7553024 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:18.612512+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115040256 unmapped: 7553024 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:19.612973+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115048448 unmapped: 7544832 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:20.613450+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115048448 unmapped: 7544832 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:21.613851+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115048448 unmapped: 7544832 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:22.614332+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115048448 unmapped: 7544832 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:23.614949+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115048448 unmapped: 7544832 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:24.615632+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:25.615984+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115048448 unmapped: 7544832 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:26.616476+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115056640 unmapped: 7536640 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:27.616836+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115056640 unmapped: 7536640 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:28.617206+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115056640 unmapped: 7536640 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:29.617455+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115056640 unmapped: 7536640 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:30.617797+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115056640 unmapped: 7536640 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:31.618093+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115056640 unmapped: 7536640 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:32.618386+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115056640 unmapped: 7536640 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:33.618608+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115056640 unmapped: 7536640 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:34.618849+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115064832 unmapped: 7528448 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:35.619021+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115064832 unmapped: 7528448 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:36.619473+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115064832 unmapped: 7528448 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:37.619806+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115064832 unmapped: 7528448 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:38.620003+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115064832 unmapped: 7528448 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:39.620248+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115064832 unmapped: 7528448 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:40.620596+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115064832 unmapped: 7528448 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets getting new tickets!
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:41.621118+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _finish_auth 0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:41.623389+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115089408 unmapped: 7503872 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:42.621502+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115089408 unmapped: 7503872 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:43.621832+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115089408 unmapped: 7503872 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:44.622071+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115097600 unmapped: 7495680 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:45.622427+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115097600 unmapped: 7495680 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:46.622718+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115097600 unmapped: 7495680 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:47.623098+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115097600 unmapped: 7495680 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:48.623399+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115097600 unmapped: 7495680 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:49.623796+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115097600 unmapped: 7495680 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:50.624136+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115097600 unmapped: 7495680 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:51.624443+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115097600 unmapped: 7495680 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:52.624818+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115105792 unmapped: 7487488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:53.625070+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115105792 unmapped: 7487488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:54.625363+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115105792 unmapped: 7487488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:55.625617+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115105792 unmapped: 7487488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025c045c00 session 0x56025c0d3a40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x5602594adc00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:56.626069+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115105792 unmapped: 7487488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:57.626507+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115105792 unmapped: 7487488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:58.626891+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115113984 unmapped: 7479296 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:27:59.627319+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115113984 unmapped: 7479296 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:00.627749+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115113984 unmapped: 7479296 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:01.628100+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115113984 unmapped: 7479296 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:02.628499+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115113984 unmapped: 7479296 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:03.628963+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115113984 unmapped: 7479296 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:04.629419+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115113984 unmapped: 7479296 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:05.629664+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115113984 unmapped: 7479296 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:06.630040+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115113984 unmapped: 7479296 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:07.630424+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115113984 unmapped: 7479296 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:08.630949+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115122176 unmapped: 7471104 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:09.631325+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115122176 unmapped: 7471104 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:10.631595+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115122176 unmapped: 7471104 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:11.631883+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115122176 unmapped: 7471104 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:12.632484+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115122176 unmapped: 7471104 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:13.632857+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115122176 unmapped: 7471104 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:14.633069+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115122176 unmapped: 7471104 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:15.633445+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115122176 unmapped: 7471104 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:16.633821+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115122176 unmapped: 7471104 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:17.634205+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:18.634673+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:19.634906+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:20.635311+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:21.635669+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:22.635902+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:23.636438+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:24.636652+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:25.636946+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:26.637362+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:27.637802+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:28.638208+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:29.638807+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:30.639382+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:31.639813+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:32.640223+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:33.640545+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:34.640747+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:35.641119+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:36.641424+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:37.641758+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115130368 unmapped: 7462912 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:38.641944+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115138560 unmapped: 7454720 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:39.642196+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115138560 unmapped: 7454720 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:40.642429+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115138560 unmapped: 7454720 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:41.642637+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115138560 unmapped: 7454720 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:42.642997+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115138560 unmapped: 7454720 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:43.643372+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115138560 unmapped: 7454720 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:44.643634+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115138560 unmapped: 7454720 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:45.643919+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115138560 unmapped: 7454720 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:46.644206+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115138560 unmapped: 7454720 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:47.644494+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115138560 unmapped: 7454720 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:48.644731+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115138560 unmapped: 7454720 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:49.645065+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115146752 unmapped: 7446528 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:50.645425+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115146752 unmapped: 7446528 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:51.645679+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115146752 unmapped: 7446528 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:52.646019+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115146752 unmapped: 7446528 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:53.646335+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115146752 unmapped: 7446528 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:54.646624+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115146752 unmapped: 7446528 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:55.646976+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115146752 unmapped: 7446528 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:56.647548+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115146752 unmapped: 7446528 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:57.648089+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115146752 unmapped: 7446528 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:58.648426+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 115146752 unmapped: 7446528 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:28:59.648794+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114769920 unmapped: 7823360 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:00.649215+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114769920 unmapped: 7823360 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:01.649708+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114778112 unmapped: 7815168 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:02.650022+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114778112 unmapped: 7815168 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:03.650392+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114778112 unmapped: 7815168 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:04.650613+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114786304 unmapped: 7806976 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:05.651027+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114786304 unmapped: 7806976 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:06.651527+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114786304 unmapped: 7806976 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:07.651912+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114786304 unmapped: 7806976 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:08.652179+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114786304 unmapped: 7806976 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:09.652618+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114786304 unmapped: 7806976 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:10.652947+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114786304 unmapped: 7806976 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:11.653200+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114786304 unmapped: 7806976 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:12.653449+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114786304 unmapped: 7806976 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:13.653739+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025c128000 session 0x56025c220000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114794496 unmapped: 7798784 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:14.654084+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114794496 unmapped: 7798784 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:15.654410+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114794496 unmapped: 7798784 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647872 data_alloc: 234881024 data_used: 26955776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:16.654792+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114794496 unmapped: 7798784 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:17.655119+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f7a1b000/0x0/0x4ffc00000, data 0x3f97b5c/0x4063000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114794496 unmapped: 7798784 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:18.655414+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 151.443557739s of 151.610015869s, submitted: 1
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025c22c000 session 0x56025c0d21e0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025c22c400 session 0x56025b96d860
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 114802688 unmapped: 7790592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:19.655737+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 111099904 unmapped: 11493376 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:20.656113+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b5c/0x3014000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 111116288 unmapped: 11476992 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:21.656406+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025ba3c400 session 0x56025c924f00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 111116288 unmapped: 11476992 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:22.656828+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 111116288 unmapped: 11476992 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:23.657152+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 111116288 unmapped: 11476992 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:24.657387+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 111116288 unmapped: 11476992 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:25.657617+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 111116288 unmapped: 11476992 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:26.657909+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 111116288 unmapped: 11476992 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:27.658131+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 111116288 unmapped: 11476992 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:28.658531+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 111116288 unmapped: 11476992 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:29.658808+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 111116288 unmapped: 11476992 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:30.659038+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 111116288 unmapped: 11476992 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:31.659406+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:32.659756+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:33.659969+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:34.660214+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:35.660572+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:36.660858+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:37.661130+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:38.661357+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:39.661586+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:40.661868+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:41.662309+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:42.662729+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:43.663076+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:44.663473+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:45.663715+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:46.664013+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:47.664330+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:48.664673+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:49.664913+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:50.665106+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:51.665520+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:52.665751+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:53.665950+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:54.666177+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:55.666613+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:56.666911+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:57.667156+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:58.668169+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:29:59.668460+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:00.668779+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:01.669063+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:02.669379+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:03.669586+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:04.669810+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:05.670029+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:06.670548+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:07.670896+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:08.671095+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:09.671474+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:10.671744+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:11.672022+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:12.672359+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:13.672556+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:14.672803+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:15.673073+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:16.673520+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:17.673785+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:18.674220+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:19.674659+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:20.675048+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:21.675472+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:22.675865+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:23.676129+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:24.676543+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:25.676901+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:26.677348+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:27.677763+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:28.678157+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:29.678468+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:30.678708+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:31.679076+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:32.679440+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:33.679780+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:34.680029+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:35.680491+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:36.681068+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:37.681494+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:38.681657+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:39.682032+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:40.682512+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:41.682920+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:42.683657+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:43.683864+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:44.684401+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:45.684786+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:46.685340+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:47.685601+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:48.685950+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:49.686217+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110706688 unmapped: 11886592 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:50.686671+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110714880 unmapped: 11878400 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:51.687057+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110714880 unmapped: 11878400 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:52.687516+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110714880 unmapped: 11878400 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:53.687764+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110714880 unmapped: 11878400 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:54.687966+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110714880 unmapped: 11878400 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:55.688415+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110714880 unmapped: 11878400 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:56.688748+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110714880 unmapped: 11878400 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:57.689110+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110714880 unmapped: 11878400 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:58.689875+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110714880 unmapped: 11878400 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:30:59.690905+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110714880 unmapped: 11878400 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:00.691354+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110714880 unmapped: 11878400 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:01.691769+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110723072 unmapped: 11870208 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:02.692140+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110723072 unmapped: 11870208 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:03.692577+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110723072 unmapped: 11870208 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:04.692983+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110723072 unmapped: 11870208 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:05.693220+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110723072 unmapped: 11870208 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:06.693699+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110723072 unmapped: 11870208 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:07.694078+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110723072 unmapped: 11870208 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:08.694433+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110723072 unmapped: 11870208 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:09.694708+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110723072 unmapped: 11870208 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:10.695004+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110723072 unmapped: 11870208 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:11.695380+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110723072 unmapped: 11870208 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:12.695577+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110723072 unmapped: 11870208 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:13.695806+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110723072 unmapped: 11870208 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:14.696206+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110723072 unmapped: 11870208 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:15.696659+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110723072 unmapped: 11870208 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:16.696980+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f8a6a000/0x0/0x4ffc00000, data 0x2f48b39/0x3013000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110723072 unmapped: 11870208 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1475398 data_alloc: 234881024 data_used: 20066304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:17.697360+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110723072 unmapped: 11870208 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:18.697705+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110731264 unmapped: 11862016 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 120.138801575s of 120.801856995s, submitted: 38
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025c22d800 session 0x56025b974780
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025c251800 session 0x56025db3a000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:19.698046+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 110731264 unmapped: 11862016 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:20.698327+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025ba3c400 session 0x56025a2bc3c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:21.698795+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f9e78000/0x0/0x4ffc00000, data 0x1b3cb06/0x1c05000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1266544 data_alloc: 218103808 data_used: 12206080
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:22.699148+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:23.699579+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:24.700017+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f9e7c000/0x0/0x4ffc00000, data 0x1b38b06/0x1c01000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:25.700496+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:26.700815+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1266544 data_alloc: 218103808 data_used: 12206080
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:27.701203+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f9e7c000/0x0/0x4ffc00000, data 0x1b38b06/0x1c01000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:28.701457+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:29.701738+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:30.702025+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:31.702605+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1266544 data_alloc: 218103808 data_used: 12206080
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f9e7c000/0x0/0x4ffc00000, data 0x1b38b06/0x1c01000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:32.703021+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f9e7c000/0x0/0x4ffc00000, data 0x1b38b06/0x1c01000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:33.703510+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:34.703770+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f9e7c000/0x0/0x4ffc00000, data 0x1b38b06/0x1c01000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:35.704059+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:36.704368+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1266544 data_alloc: 218103808 data_used: 12206080
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:37.704770+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:38.705339+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:39.705563+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:40.705886+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f9e7c000/0x0/0x4ffc00000, data 0x1b38b06/0x1c01000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:41.706417+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1266544 data_alloc: 218103808 data_used: 12206080
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:42.706739+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f9e7c000/0x0/0x4ffc00000, data 0x1b38b06/0x1c01000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:43.707111+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:44.707511+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:45.707838+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:46.708440+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 105218048 unmapped: 17375232 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 27.589385986s of 27.813987732s, submitted: 43
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025eb94000 session 0x56025c208b40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1266296 data_alloc: 218103808 data_used: 12206080
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025eb94400 session 0x56025c208000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025eb94800 session 0x56025c925680
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:47.708715+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4f9e7c000/0x0/0x4ffc00000, data 0x1b38b06/0x1c01000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101736448 unmapped: 20856832 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:48.709077+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4fae4a000/0x0/0x4ffc00000, data 0xb6baa4/0xc33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [0,0,2])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 ms_handle_reset con 0x56025c22c000 session 0x56025c0e25a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4fae88000/0x0/0x4ffc00000, data 0xb2fa32/0xbf5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101416960 unmapped: 21176320 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:49.709435+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101416960 unmapped: 21176320 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:50.709834+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101416960 unmapped: 21176320 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:51.710208+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101416960 unmapped: 21176320 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1099335 data_alloc: 218103808 data_used: 7000064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:52.710600+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101416960 unmapped: 21176320 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4fae88000/0x0/0x4ffc00000, data 0xb2fa32/0xbf5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:53.710926+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101416960 unmapped: 21176320 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:54.711340+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101416960 unmapped: 21176320 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:55.711621+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101416960 unmapped: 21176320 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:56.712047+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4fae88000/0x0/0x4ffc00000, data 0xb2fa32/0xbf5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101416960 unmapped: 21176320 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1099335 data_alloc: 218103808 data_used: 7000064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:57.712425+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101416960 unmapped: 21176320 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:58.712767+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101416960 unmapped: 21176320 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:31:59.713341+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101416960 unmapped: 21176320 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:00.713728+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101416960 unmapped: 21176320 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:01.714071+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101416960 unmapped: 21176320 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1099335 data_alloc: 218103808 data_used: 7000064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:02.714426+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4fae88000/0x0/0x4ffc00000, data 0xb2fa32/0xbf5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101416960 unmapped: 21176320 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:03.714637+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101416960 unmapped: 21176320 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:04.714990+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101425152 unmapped: 21168128 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:05.715344+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4fae88000/0x0/0x4ffc00000, data 0xb2fa32/0xbf5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101425152 unmapped: 21168128 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:06.715826+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101425152 unmapped: 21168128 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1099335 data_alloc: 218103808 data_used: 7000064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:07.716060+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101425152 unmapped: 21168128 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:08.716331+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101425152 unmapped: 21168128 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:09.716699+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4fae88000/0x0/0x4ffc00000, data 0xb2fa32/0xbf5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101425152 unmapped: 21168128 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:10.716925+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101425152 unmapped: 21168128 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:11.718191+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101433344 unmapped: 21159936 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1099335 data_alloc: 218103808 data_used: 7000064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:12.718499+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4fae88000/0x0/0x4ffc00000, data 0xb2fa32/0xbf5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101433344 unmapped: 21159936 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:13.718776+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101433344 unmapped: 21159936 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:14.719025+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4fae88000/0x0/0x4ffc00000, data 0xb2fa32/0xbf5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101433344 unmapped: 21159936 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:15.719461+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101433344 unmapped: 21159936 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:16.719717+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101433344 unmapped: 21159936 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1099335 data_alloc: 218103808 data_used: 7000064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:17.719961+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101433344 unmapped: 21159936 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:18.720307+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101433344 unmapped: 21159936 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:19.720676+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101433344 unmapped: 21159936 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:20.720983+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4fae88000/0x0/0x4ffc00000, data 0xb2fa32/0xbf5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101433344 unmapped: 21159936 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:21.721408+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101433344 unmapped: 21159936 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1099335 data_alloc: 218103808 data_used: 7000064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:22.721784+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 35.305160522s of 35.668144226s, submitted: 51
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101457920 unmapped: 21135360 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:23.722206+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101457920 unmapped: 21135360 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:24.722653+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101474304 unmapped: 21118976 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:25.722886+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101474304 unmapped: 21118976 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:26.723424+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4fae88000/0x0/0x4ffc00000, data 0xb2fa55/0xbf6000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100769792 unmapped: 21823488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:27.723670+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1101124 data_alloc: 218103808 data_used: 7000064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 heartbeat osd_stat(store_statfs(0x4fae88000/0x0/0x4ffc00000, data 0xb2fa55/0xbf6000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 129 handle_osd_map epochs [129,130], i have 129, src has [1,130]
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100769792 unmapped: 21823488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025c22c000 session 0x56025d75f4a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:28.723857+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fae84000/0x0/0x4ffc00000, data 0xb315d2/0xbf9000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100769792 unmapped: 21823488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:29.724344+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100769792 unmapped: 21823488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:30.724611+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100769792 unmapped: 21823488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:31.724955+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fae84000/0x0/0x4ffc00000, data 0xb315d2/0xbf9000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100769792 unmapped: 21823488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:32.725474+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1105130 data_alloc: 218103808 data_used: 7008256
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fae84000/0x0/0x4ffc00000, data 0xb315d2/0xbf9000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100769792 unmapped: 21823488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:33.725794+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100769792 unmapped: 21823488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:34.725980+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fae84000/0x0/0x4ffc00000, data 0xb315d2/0xbf9000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100769792 unmapped: 21823488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:35.726499+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100769792 unmapped: 21823488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:36.726952+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025ba3c400 session 0x56025d75f680
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94000 session 0x56025a297860
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94400 session 0x56025a296960
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100769792 unmapped: 21823488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:37.727430+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1105130 data_alloc: 218103808 data_used: 7008256
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100769792 unmapped: 21823488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:38.727639+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94800 session 0x560259c2f0e0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 16.167192459s of 16.237197876s, submitted: 10
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94800 session 0x56025c222b40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:39.727902+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 102735872 unmapped: 26673152 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025ba3c400 session 0x56025c0e2b40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025c22c000 session 0x56025a4741e0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94000 session 0x56025a2bc3c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94400 session 0x56025c0d23c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025ba3c400 session 0x56025c209e00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025c22c000 session 0x56025a297e00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94000 session 0x56025a2963c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:40.728198+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 102670336 unmapped: 26738688 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94800 session 0x56025a2972c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fa6c1000/0x0/0x4ffc00000, data 0x12f4634/0x13bd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3000.1 total, 600.0 interval
                                            Cumulative writes: 7474 writes, 29K keys, 7474 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.01 MB/s
                                            Cumulative WAL: 7474 writes, 1672 syncs, 4.47 writes per sync, written: 0.02 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 991 writes, 3371 keys, 991 commit groups, 1.0 writes per commit group, ingest: 2.86 MB, 0.00 MB/s
                                            Interval WAL: 991 writes, 421 syncs, 2.35 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:41.728616+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 102670336 unmapped: 26738688 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fa6c1000/0x0/0x4ffc00000, data 0x12f4634/0x13bd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025c22c400 session 0x56025a296d20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:42.728860+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 102670336 unmapped: 26738688 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1168958 data_alloc: 218103808 data_used: 7008256
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025c22c400 session 0x56025a79b860
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025ba3c400 session 0x56025a79ab40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:43.729328+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 102686720 unmapped: 26722304 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025c22c000 session 0x56025c07c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fa69b000/0x0/0x4ffc00000, data 0x1318667/0x13e3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:44.729630+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101965824 unmapped: 27443200 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:45.730059+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fa69b000/0x0/0x4ffc00000, data 0x1318667/0x13e3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101974016 unmapped: 27435008 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:46.730434+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101974016 unmapped: 27435008 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:47.730710+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101974016 unmapped: 27435008 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1177097 data_alloc: 218103808 data_used: 7208960
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fa69b000/0x0/0x4ffc00000, data 0x1318667/0x13e3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:48.730905+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101998592 unmapped: 27410432 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:49.731500+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:50.731946+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:51.732420+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fa69b000/0x0/0x4ffc00000, data 0x1318667/0x13e3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:52.732827+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1231817 data_alloc: 218103808 data_used: 14794752
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:53.733060+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:54.733309+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:55.733551+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:56.733986+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:57.734458+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1231817 data_alloc: 218103808 data_used: 14794752
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fa69b000/0x0/0x4ffc00000, data 0x1318667/0x13e3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:58.734719+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:59.735084+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:00.735364+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:01.735760+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:02.736171+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1231817 data_alloc: 218103808 data_used: 14794752
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fa69b000/0x0/0x4ffc00000, data 0x1318667/0x13e3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:03.736448+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fa69b000/0x0/0x4ffc00000, data 0x1318667/0x13e3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:04.736913+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:05.737510+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:06.737835+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 27.275114059s of 27.844760895s, submitted: 51
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94000 session 0x5602598d4780
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94800 session 0x56025c2201e0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:07.738792+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100073472 unmapped: 29335552 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1114758 data_alloc: 218103808 data_used: 7008256
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025ba3c400 session 0x56025b97fe00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:08.739163+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100114432 unmapped: 29294592 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fae5f000/0x0/0x4ffc00000, data 0xb315d2/0xbf9000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:09.739407+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100114432 unmapped: 29294592 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 130 handle_osd_map epochs [130,131], i have 130, src has [1,131]
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:10.739911+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100106240 unmapped: 29302784 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 131 ms_handle_reset con 0x56025c22c000 session 0x56025b6d1860
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:11.740970+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100106240 unmapped: 29302784 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:12.741964+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100106240 unmapped: 29302784 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1118674 data_alloc: 218103808 data_used: 7016448
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:13.742635+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 131 heartbeat osd_stat(store_statfs(0x4faa71000/0x0/0x4ffc00000, data 0xb33180/0xbfb000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100106240 unmapped: 29302784 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:14.742845+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100106240 unmapped: 29302784 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:15.743637+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100106240 unmapped: 29302784 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:16.744653+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100106240 unmapped: 29302784 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _renew_subs
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 131 handle_osd_map epochs [132,132], i have 131, src has [1,132]
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 9.644417763s of 10.028837204s, submitted: 71
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:17.745458+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 132 heartbeat osd_stat(store_statfs(0x4faa6f000/0x0/0x4ffc00000, data 0xb34be3/0xbfe000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100089856 unmapped: 29319168 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1121616 data_alloc: 218103808 data_used: 7020544
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 132 heartbeat osd_stat(store_statfs(0x4faa6f000/0x0/0x4ffc00000, data 0xb34be3/0xbfe000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:18.746521+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100089856 unmapped: 29319168 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:19.746907+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100089856 unmapped: 29319168 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:20.747547+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100089856 unmapped: 29319168 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 132 heartbeat osd_stat(store_statfs(0x4faa6f000/0x0/0x4ffc00000, data 0xb34be3/0xbfe000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:21.748046+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100089856 unmapped: 29319168 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:22.748748+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100089856 unmapped: 29319168 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1122457 data_alloc: 218103808 data_used: 7020544
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:23.749378+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100081664 unmapped: 29327360 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:24.749756+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100081664 unmapped: 29327360 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 132 handle_osd_map epochs [132,133], i have 132, src has [1,133]
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:25.750015+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 133 ms_handle_reset con 0x56025c22c400 session 0x56025c209680
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100147200 unmapped: 45088768 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:26.750531+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100147200 unmapped: 45088768 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 133 heartbeat osd_stat(store_statfs(0x4fa2df000/0x0/0x4ffc00000, data 0x12c2183/0x138e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:27.750888+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100147200 unmapped: 45088768 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1180503 data_alloc: 218103808 data_used: 7028736
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:28.751421+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100147200 unmapped: 45088768 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:29.751714+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100147200 unmapped: 45088768 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:30.752052+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100147200 unmapped: 45088768 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:31.752351+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100147200 unmapped: 45088768 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:32.752607+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 15.445683479s of 15.610335350s, submitted: 25
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 133 heartbeat osd_stat(store_statfs(0x4fa2e1000/0x0/0x4ffc00000, data 0x12c2160/0x138d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100163584 unmapped: 45072384 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1178957 data_alloc: 218103808 data_used: 7028736
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _renew_subs
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 133 handle_osd_map epochs [134,134], i have 133, src has [1,134]
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:33.752780+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 134 ms_handle_reset con 0x56025eb94000 session 0x56025c924780
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100196352 unmapped: 45039616 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:34.753001+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100196352 unmapped: 45039616 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:35.753274+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100196352 unmapped: 45039616 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 134 heartbeat osd_stat(store_statfs(0x4faa69000/0x0/0x4ffc00000, data 0xb38331/0xc04000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:36.753821+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100204544 unmapped: 45031424 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:37.754327+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100204544 unmapped: 45031424 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1131291 data_alloc: 218103808 data_used: 7036928
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:38.754801+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100204544 unmapped: 45031424 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 134 heartbeat osd_stat(store_statfs(0x4faa69000/0x0/0x4ffc00000, data 0xb38331/0xc04000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:39.755082+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:40.755356+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 134 handle_osd_map epochs [134,135], i have 134, src has [1,135]
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:41.755732+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:42.756729+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1134265 data_alloc: 218103808 data_used: 7036928
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:43.757051+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:44.757421+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:45.757729+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:46.758407+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:47.758701+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1134425 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:48.759000+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:49.759328+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:50.759686+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:51.760041+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:52.760361+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1134425 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:53.760740+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:54.761108+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:55.761494+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:56.761995+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:57.762208+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1134425 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:58.762661+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:59.763061+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:00.763400+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:01.763603+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:02.763961+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1134425 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:03.764203+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:04.764555+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:05.764797+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:06.765208+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:07.765620+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1134425 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:08.765832+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:09.766187+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:10.766657+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:11.766846+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:12.767540+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1134425 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:13.767954+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:14.768177+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:15.768543+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:16.768848+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:17.769341+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1134425 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:18.769647+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:19.769994+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 47.773204803s of 47.967689514s, submitted: 49
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:20.770561+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100220928 unmapped: 45015040 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:21.771038+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100253696 unmapped: 44982272 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:22.771527+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100294656 unmapped: 44941312 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133617 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:23.771769+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:24.772492+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:25.772931+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:26.773275+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:27.773727+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:28.774051+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:29.774322+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:30.774630+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:31.774969+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:32.775535+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:33.775803+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:34.776160+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:35.776447+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:36.776920+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:37.777433+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:38.777667+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:39.778186+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:40.778487+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:41.778699+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:42.778976+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:43.779378+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:44.779591+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:45.779970+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:46.780637+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:47.781102+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:48.781527+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:49.781842+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:50.782374+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:51.782770+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:52.783125+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:53.783424+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:54.783646+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:55.783984+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:56.784458+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:57.784688+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:58.785065+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:59.785449+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:00.785876+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:01.786348+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:02.786765+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:03.787027+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:04.787487+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:05.787646+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:06.787961+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:07.788414+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:08.788799+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:09.789358+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:10.789621+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:11.789938+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:12.790394+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:13.790792+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:14.791453+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:15.791875+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:16.792364+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:17.792643+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:18.793054+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:19.793532+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:20.793902+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:21.794328+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:22.794681+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:23.795078+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:24.795526+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:25.795922+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:26.796221+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:27.796538+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:28.796819+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:29.797197+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:30.797867+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:31.798101+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:32.798407+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:33.798766+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:34.799139+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:35.799496+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:36.799979+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:37.800449+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:38.800700+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:39.801129+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:40.801389+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:41.801772+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:42.802000+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:43.802364+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:44.802609+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:45.802838+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:46.803196+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:47.803597+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:48.803794+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:49.804325+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:50.804623+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:51.804863+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:52.805102+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:53.805359+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:54.805567+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:55.805827+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:56.806462+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:57.806687+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:58.806898+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:59.807395+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:00.807791+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:01.808069+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:02.808314+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:03.808694+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:04.809051+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:05.809388+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:06.810023+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:07.810419+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:08.810665+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:09.810892+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:10.811107+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:11.811487+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:12.811892+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:13.812348+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:14.812743+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:15.813138+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:16.813691+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:17.813993+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:18.814422+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:19.814822+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:20.815206+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:21.815657+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:22.816008+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:23.816597+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:24.817021+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:25.817499+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:26.817891+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:27.818365+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:28.818796+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:29.819145+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:30.819562+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:31.820032+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:32.820444+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:33.820834+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:34.821195+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:35.821630+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:36.822112+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:37.822496+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:38.822908+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:39.823357+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:40.823548+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:41.823940+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:42.824407+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:43.824895+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:44.825332+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:45.825717+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:46.826218+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:47.826656+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:48.826960+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:49.827400+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:50.827783+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:51.828184+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:52.828511+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:53.828869+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:54.829374+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:55.829828+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:56.830182+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:57.830590+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:58.830805+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:59.831034+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:00.831356+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:01.831756+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:02.832100+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:03.832383+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:04.832580+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:05.832832+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:06.833316+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:07.833585+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:08.833786+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:09.834029+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:10.834383+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:11.834566+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:12.834960+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:13.835465+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:14.835749+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:15.835995+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:16.836538+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:17.836916+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:18.837727+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:19.838123+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:20.838602+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:21.838984+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:22.839334+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:23.839706+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:24.840024+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:25.840347+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:26.840673+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:27.841029+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:28.841299+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:29.841489+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:30.841885+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:31.842330+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:32.842808+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:33.843174+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:34.843592+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22d800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 193.742782593s of 194.405731201s, submitted: 106
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100352000 unmapped: 44883968 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:35.843964+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 ms_handle_reset con 0x56025c22d800 session 0x56025db3be00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100352000 unmapped: 44883968 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:36.844446+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 135 handle_osd_map epochs [136,136], i have 135, src has [1,136]
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 136 heartbeat osd_stat(store_statfs(0x4fa267000/0x0/0x4ffc00000, data 0x1339d94/0x1407000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 136 handle_osd_map epochs [136,137], i have 136, src has [1,137]
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100376576 unmapped: 44859392 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:37.844899+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100376576 unmapped: 44859392 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:38.845374+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:39.845811+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:40.846018+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:41.846436+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:42.846663+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:43.846962+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:44.847182+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:45.847534+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:46.848009+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:47.848475+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:48.848814+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:49.849215+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:50.849528+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:51.849897+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:52.850141+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:53.850565+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:54.851009+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:55.851514+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:56.851941+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:57.852178+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:58.852420+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:59.852797+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:00.853339+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:01.853720+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:02.854042+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:03.854331+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:04.854727+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:05.855325+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:06.855847+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:07.856401+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:08.856794+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:09.857158+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:10.857374+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:11.857674+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:12.858142+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:13.858381+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:14.858888+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:15.859121+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:16.859696+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:17.859974+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:18.860469+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:19.864997+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:20.865375+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:21.865819+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:22.866164+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:23.866459+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:24.866849+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:25.867212+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:26.867599+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:27.867870+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100204544 unmapped: 45031424 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:28.868107+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100204544 unmapped: 45031424 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:29.868487+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100204544 unmapped: 45031424 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:30.868878+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100204544 unmapped: 45031424 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:31.869106+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107479040 unmapped: 37756928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:32.869620+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025ba3c400 session 0x56025c220780
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107479040 unmapped: 37756928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025eb94c00 session 0x56025c276d20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb95000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025eb95000 session 0x56025a297e00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7b000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7b000 session 0x56025a2972c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:33.869969+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107479040 unmapped: 37756928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7ac00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7ac00 session 0x56025a296d20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:34.870460+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107479040 unmapped: 37756928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1216693 data_alloc: 218103808 data_used: 13864960
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025ba3c400 session 0x56025a297860
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7b000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7b000 session 0x56025d75f0e0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:35.870851+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107462656 unmapped: 37773312 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:36.871503+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107462656 unmapped: 37773312 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:37.871904+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107462656 unmapped: 37773312 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:38.872158+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107462656 unmapped: 37773312 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:39.872617+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107462656 unmapped: 37773312 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1216693 data_alloc: 218103808 data_used: 13864960
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:40.873604+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107462656 unmapped: 37773312 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:41.874090+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107462656 unmapped: 37773312 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 67.453536987s of 67.516914368s, submitted: 5
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb95000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025eb95000 session 0x56025c063680
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7a800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723c00 session 0x560259c025a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:42.874489+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107479040 unmapped: 37756928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:43.874956+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108576768 unmapped: 36659200 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:44.875393+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 118153216 unmapped: 27082752 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1331760 data_alloc: 218103808 data_used: 13864960
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:45.875715+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112107520 unmapped: 33128448 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f981d000/0x0/0x4ffc00000, data 0x1d8049d/0x1e51000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [0,0,0,0,0,0,0,0,0,14])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:46.876156+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025eb94c00 session 0x56025d75e1e0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108658688 unmapped: 36577280 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723c00 session 0x56025c07de00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025ba3c400 session 0x56025b96d2c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7b000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7a800 session 0x56025c2083c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:47.876507+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723800 session 0x56025d75eb40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723400 session 0x560259c03860
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108290048 unmapped: 36945920 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723400 session 0x56025c0d2d20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b722800 session 0x56025d75fe00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:48.876807+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108290048 unmapped: 36945920 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8dbf000/0x0/0x4ffc00000, data 0x27de49d/0x28af000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [0,0,0,0,0,0,0,0,1])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:49.877365+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7b000 session 0x56025a2b34a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723000 session 0x56025b6d1e00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108298240 unmapped: 36937728 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1398318 data_alloc: 218103808 data_used: 13869056
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723c00 session 0x56025a788b40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:50.877817+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b722c00 session 0x56025c0e6f00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108298240 unmapped: 36937728 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b722800 session 0x56025a4754a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723c00 session 0x560259b52000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:51.878047+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723000 session 0x5602598d4780
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108265472 unmapped: 36970496 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723400 session 0x56025a79b860
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723800 session 0x56025a79ab40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723c00 session 0x56025999ab40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:52.878333+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723000 session 0x56025c2774a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8dbf000/0x0/0x4ffc00000, data 0x27de49d/0x28af000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108265472 unmapped: 36970496 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b722800 session 0x56025d90e1e0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 6.248156548s of 10.819179535s, submitted: 63
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723400 session 0x56025c07c5a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8dbf000/0x0/0x4ffc00000, data 0x27de49d/0x28af000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b722800 session 0x56025c2234a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:53.878557+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8dbf000/0x0/0x4ffc00000, data 0x27de49d/0x28af000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [0,0,0,0,0,0,1])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723800 session 0x560259b1fc20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108265472 unmapped: 36970496 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723c00 session 0x560259b1e960
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7a800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7b000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:54.878968+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108281856 unmapped: 36954112 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b722c00 session 0x56025c222960
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723000 session 0x56025a79b2c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1408982 data_alloc: 218103808 data_used: 13869056
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7a800 session 0x56025a8723c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7b000 session 0x560259b22960
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:55.879399+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108634112 unmapped: 36601856 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb95000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:56.879639+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108658688 unmapped: 36577280 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:57.880002+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 36552704 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:58.880219+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8d91000/0x0/0x4ffc00000, data 0x2808503/0x28dd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 36552704 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:59.880450+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8d91000/0x0/0x4ffc00000, data 0x2808503/0x28dd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 109608960 unmapped: 35627008 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1443276 data_alloc: 234881024 data_used: 18096128
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:00.880766+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112582656 unmapped: 32653312 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8d91000/0x0/0x4ffc00000, data 0x2808503/0x28dd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:01.880978+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8d91000/0x0/0x4ffc00000, data 0x2808503/0x28dd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 116989952 unmapped: 28246016 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:02.881205+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 119250944 unmapped: 25985024 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:03.881463+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 119250944 unmapped: 25985024 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:04.881703+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8d91000/0x0/0x4ffc00000, data 0x2808503/0x28dd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 119250944 unmapped: 25985024 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1531276 data_alloc: 234881024 data_used: 29814784
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:05.881899+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121102336 unmapped: 24133632 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:06.882127+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8d91000/0x0/0x4ffc00000, data 0x2808503/0x28dd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121135104 unmapped: 24100864 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:07.882337+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121659392 unmapped: 23576576 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:08.882512+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121659392 unmapped: 23576576 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:09.882718+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025eb95000 session 0x56025a6e1a40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 15.274926186s of 16.816574097s, submitted: 28
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025eb94000 session 0x5602598d5a40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121659392 unmapped: 23576576 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1547784 data_alloc: 234881024 data_used: 31969280
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cda1000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:10.882933+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cda1000 session 0x56025c0e2780
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121675776 unmapped: 23560192 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:11.883349+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8f9f000/0x0/0x4ffc00000, data 0x25fb4f3/0x26cf000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121716736 unmapped: 23519232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:12.883707+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723c00 session 0x56025bfba960
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025ba3c400 session 0x56025c07c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7a800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121716736 unmapped: 23519232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:13.883901+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7a800 session 0x56025c0623c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120741888 unmapped: 24494080 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7b000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7b000 session 0x560259b1ed20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cda1000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cda1000 session 0x56025c0e2f00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723c00 session 0x56025c0e23c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025ba3c400 session 0x56025a2b2780
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:14.884109+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7a800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7a800 session 0x56025c063c20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7b000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7b000 session 0x5602598bcf00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121167872 unmapped: 24068096 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1516696 data_alloc: 234881024 data_used: 29810688
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:15.884356+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121167872 unmapped: 24068096 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:16.884600+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121167872 unmapped: 24068096 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:17.884915+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8f11000/0x0/0x4ffc00000, data 0x2688532/0x275c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121200640 unmapped: 24035328 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:18.885167+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121200640 unmapped: 24035328 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:19.885415+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121208832 unmapped: 24027136 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1516696 data_alloc: 234881024 data_used: 29810688
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:20.885625+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121208832 unmapped: 24027136 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:21.885955+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121208832 unmapped: 24027136 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:22.886186+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 12.641468048s of 13.075722694s, submitted: 79
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8f11000/0x0/0x4ffc00000, data 0x2688532/0x275c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025eb94000 session 0x56025a6e12c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121217024 unmapped: 24018944 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:23.886512+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121217024 unmapped: 24018944 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:24.886714+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121217024 unmapped: 24018944 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1517309 data_alloc: 234881024 data_used: 29810688
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:25.887028+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121241600 unmapped: 23994368 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:26.887345+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8f11000/0x0/0x4ffc00000, data 0x2688555/0x275d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122454016 unmapped: 22781952 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:27.887598+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122527744 unmapped: 22708224 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:28.887823+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122552320 unmapped: 22683648 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:29.888006+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8f11000/0x0/0x4ffc00000, data 0x2688555/0x275d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122585088 unmapped: 22650880 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1537789 data_alloc: 234881024 data_used: 32591872
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:30.888264+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122585088 unmapped: 22650880 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:31.888504+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122585088 unmapped: 22650880 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:32.888750+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 9.404064178s of 10.049774170s, submitted: 7
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122667008 unmapped: 22568960 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:33.888947+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123805696 unmapped: 21430272 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:34.889156+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8e36000/0x0/0x4ffc00000, data 0x275b555/0x2830000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 129884160 unmapped: 15351808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1611137 data_alloc: 234881024 data_used: 32829440
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:35.889407+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 130719744 unmapped: 14516224 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:36.889711+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 132194304 unmapped: 13041664 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:37.889911+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 132268032 unmapped: 12967936 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:38.890127+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 132268032 unmapped: 12967936 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:39.890576+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8152000/0x0/0x4ffc00000, data 0x343f555/0x3514000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 132276224 unmapped: 12959744 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1662761 data_alloc: 234881024 data_used: 33730560
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:40.890758+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 132284416 unmapped: 12951552 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:41.891003+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 132284416 unmapped: 12951552 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:42.891209+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131956736 unmapped: 13279232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:43.900793+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131956736 unmapped: 13279232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:44.901025+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131956736 unmapped: 13279232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8138000/0x0/0x4ffc00000, data 0x3461555/0x3536000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1654897 data_alloc: 234881024 data_used: 33730560
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:45.901404+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7b000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 11.470405579s of 13.082652092s, submitted: 158
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 138895360 unmapped: 10018816 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7b000 session 0x56025c0621e0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb95000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:46.901695+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025eb95000 session 0x56025a2bdc20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cda0c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cda0c00 session 0x56025c2225a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22cc00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025c22cc00 session 0x5602598bd4a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025c22c400 session 0x56025c924d20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 15556608 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:47.902020+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 15556608 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:48.902341+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133373952 unmapped: 15540224 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:49.902591+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133382144 unmapped: 15532032 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1729517 data_alloc: 234881024 data_used: 33730560
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:50.902803+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f792c000/0x0/0x4ffc00000, data 0x3c6c5b7/0x3d42000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133382144 unmapped: 15532032 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:51.903029+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025c22c400 session 0x56025c0e6b40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 15556608 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:52.903440+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f792b000/0x0/0x4ffc00000, data 0x3c6c5da/0x3d43000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 15556608 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22cc00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:53.903683+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 15556608 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:54.903890+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 15556608 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1729922 data_alloc: 234881024 data_used: 33742848
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:55.904093+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 132521984 unmapped: 16392192 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:56.904367+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 10.772935867s of 11.063573837s, submitted: 61
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025c22cc00 session 0x56025b97ed20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 132898816 unmapped: 16015360 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:57.904587+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7b000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7b000 session 0x560259b1ed20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8133000/0x0/0x4ffc00000, data 0x3464578/0x353a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131457024 unmapped: 17457152 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:58.906139+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8133000/0x0/0x4ffc00000, data 0x3464578/0x353a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8133000/0x0/0x4ffc00000, data 0x3464555/0x3539000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131457024 unmapped: 17457152 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:59.906406+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8133000/0x0/0x4ffc00000, data 0x3464555/0x3539000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131465216 unmapped: 17448960 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1664360 data_alloc: 234881024 data_used: 32956416
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:00.906799+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 134193152 unmapped: 14721024 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:01.907634+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f7676000/0x0/0x4ffc00000, data 0x3f1b555/0x3ff0000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 134201344 unmapped: 14712832 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:02.908005+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 134389760 unmapped: 14524416 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:03.908322+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f75cf000/0x0/0x4ffc00000, data 0x3fc2555/0x4097000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 134389760 unmapped: 14524416 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:04.908590+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 134389760 unmapped: 14524416 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1762668 data_alloc: 234881024 data_used: 33112064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:05.909005+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 134397952 unmapped: 14516224 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:06.909602+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 9.508942604s of 10.104267120s, submitted: 143
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133242880 unmapped: 15671296 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:07.909948+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f75d1000/0x0/0x4ffc00000, data 0x3fc8555/0x409d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133382144 unmapped: 15532032 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:08.910438+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b722c00 session 0x56025c07c960
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133390336 unmapped: 15523840 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:09.910670+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133390336 unmapped: 15523840 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:10.910893+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1760452 data_alloc: 234881024 data_used: 33099776
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f75ac000/0x0/0x4ffc00000, data 0x3fed555/0x40c2000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cda0c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133398528 unmapped: 15515648 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:11.911148+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133398528 unmapped: 15515648 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:12.911456+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:13.911735+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133398528 unmapped: 15515648 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:14.911966+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133611520 unmapped: 15302656 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:15.912200+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131956736 unmapped: 16957440 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1758984 data_alloc: 234881024 data_used: 33935360
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:16.912529+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131956736 unmapped: 16957440 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f75ac000/0x0/0x4ffc00000, data 0x3fed555/0x40c2000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:17.912755+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131956736 unmapped: 16957440 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b722800 session 0x56025c0ec000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723800 session 0x56025b6d0f00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:18.913034+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131915776 unmapped: 16998400 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 11.391749382s of 11.534832001s, submitted: 32
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b722800 session 0x56025c0e25a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:19.914336+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f75ac000/0x0/0x4ffc00000, data 0x3fed555/0x40c2000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 handle_osd_map epochs [138,138], i have 137, src has [1,138]
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 137 handle_osd_map epochs [138,138], i have 138, src has [1,138]
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125747200 unmapped: 23166976 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:20.914603+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125747200 unmapped: 23166976 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1541034 data_alloc: 234881024 data_used: 24788992
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:21.914878+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 138 handle_osd_map epochs [138,139], i have 138, src has [1,139]
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125812736 unmapped: 23101440 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 139 heartbeat osd_stat(store_statfs(0x4f8954000/0x0/0x4ffc00000, data 0x2c45c61/0x2d1a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 139 ms_handle_reset con 0x56025b722c00 session 0x56025b97e780
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 139 heartbeat osd_stat(store_statfs(0x4f8954000/0x0/0x4ffc00000, data 0x2c45c61/0x2d1a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:22.915146+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125812736 unmapped: 23101440 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:23.915408+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125812736 unmapped: 23101440 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:24.915652+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125820928 unmapped: 23093248 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:25.915969+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125820928 unmapped: 23093248 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1543156 data_alloc: 234881024 data_used: 24793088
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:26.916486+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125820928 unmapped: 23093248 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:27.916908+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125820928 unmapped: 23093248 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 139 heartbeat osd_stat(store_statfs(0x4f8954000/0x0/0x4ffc00000, data 0x2c45c61/0x2d1a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 139 heartbeat osd_stat(store_statfs(0x4f8954000/0x0/0x4ffc00000, data 0x2c45c61/0x2d1a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:28.917409+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125820928 unmapped: 23093248 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 139 heartbeat osd_stat(store_statfs(0x4f8954000/0x0/0x4ffc00000, data 0x2c45c61/0x2d1a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:29.917711+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125820928 unmapped: 23093248 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:30.918075+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125829120 unmapped: 23085056 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1543156 data_alloc: 234881024 data_used: 24793088
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _renew_subs
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 139 handle_osd_map epochs [140,140], i have 139, src has [1,140]
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 12.436845779s of 12.615279198s, submitted: 41
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:31.918405+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125181952 unmapped: 23732224 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:32.918836+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125181952 unmapped: 23732224 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:33.919214+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125181952 unmapped: 23732224 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:34.919533+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125181952 unmapped: 23732224 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f894f000/0x0/0x4ffc00000, data 0x2c486c4/0x2d1e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:35.919842+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125181952 unmapped: 23732224 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1547110 data_alloc: 234881024 data_used: 24801280
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:36.920187+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125181952 unmapped: 23732224 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:37.920397+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125272064 unmapped: 23642112 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025b723c00 session 0x56025c062b40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:38.921483+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125296640 unmapped: 23617536 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025c22c400 session 0x56025d75f860
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:39.921691+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 26025984 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:40.922029+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 26025984 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1415394 data_alloc: 234881024 data_used: 21823488
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f9129000/0x0/0x4ffc00000, data 0x1dcb63f/0x1e9f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:41.922431+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 26025984 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:42.922664+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 26025984 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f9129000/0x0/0x4ffc00000, data 0x1dcb63f/0x1e9f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:43.922844+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 26025984 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:44.923031+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 26025984 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:45.923254+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f9129000/0x0/0x4ffc00000, data 0x1dcb63f/0x1e9f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 26025984 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1415394 data_alloc: 234881024 data_used: 21823488
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 14.772927284s of 14.955487251s, submitted: 47
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:46.923490+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 26001408 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:47.923889+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122929152 unmapped: 25985024 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:48.924134+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122929152 unmapped: 25985024 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:49.924479+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122929152 unmapped: 25985024 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:50.924750+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122929152 unmapped: 25985024 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1420990 data_alloc: 234881024 data_used: 22102016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f97cf000/0x0/0x4ffc00000, data 0x1dcb63f/0x1e9f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:51.924951+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025c22c400 session 0x56025b97f4a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124108800 unmapped: 24805376 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025b722800 session 0x56025a7bda40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:52.925387+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124436480 unmapped: 24477696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f9430000/0x0/0x4ffc00000, data 0x21696a1/0x223e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:53.925875+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124436480 unmapped: 24477696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:54.926320+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025b722c00 session 0x560259b1e960
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124436480 unmapped: 24477696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025b723800 session 0x56025d75e3c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:55.926715+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124436480 unmapped: 24477696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1450013 data_alloc: 234881024 data_used: 22102016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025b723c00 session 0x56025d75f4a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:56.927065+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 9.964710236s of 10.208685875s, submitted: 51
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025b723c00 session 0x56025d75fe00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:57.927305+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:58.927615+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f940c000/0x0/0x4ffc00000, data 0x218d6a1/0x2262000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:59.928089+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:00.928314+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f940c000/0x0/0x4ffc00000, data 0x218d6a1/0x2262000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1465806 data_alloc: 234881024 data_used: 23711744
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:01.928529+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:02.928822+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:03.929309+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f940c000/0x0/0x4ffc00000, data 0x218d6a1/0x2262000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:04.929608+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:05.929986+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481950 data_alloc: 234881024 data_used: 25804800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:06.930496+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 10.207981110s of 10.273481369s, submitted: 18
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f940c000/0x0/0x4ffc00000, data 0x218d6a1/0x2262000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:07.930949+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:08.931425+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124788736 unmapped: 24125440 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:09.931675+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124796928 unmapped: 24117248 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:10.932047+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124796928 unmapped: 24117248 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1480718 data_alloc: 234881024 data_used: 25804800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:11.932427+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124805120 unmapped: 24109056 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f940c000/0x0/0x4ffc00000, data 0x218d6a1/0x2262000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:12.932800+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124805120 unmapped: 24109056 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:13.933196+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124805120 unmapped: 24109056 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:14.933431+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124805120 unmapped: 24109056 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:15.933707+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124805120 unmapped: 24109056 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1480718 data_alloc: 234881024 data_used: 25804800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:16.934176+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124805120 unmapped: 24109056 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:17.934467+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f940c000/0x0/0x4ffc00000, data 0x218d6a1/0x2262000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 10.966763496s of 10.988698959s, submitted: 3
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025cda0c00 session 0x56025be8ef00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124805120 unmapped: 24109056 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:18.934678+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120258560 unmapped: 28655616 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025b723800 session 0x56025c066960
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:19.934958+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:20.935156+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1340116 data_alloc: 234881024 data_used: 17596416
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f9e95000/0x0/0x4ffc00000, data 0x17046a1/0x17d9000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:21.935516+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:22.935908+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:23.936442+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:24.936735+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:25.937129+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1340116 data_alloc: 234881024 data_used: 17596416
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:26.937529+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f9e95000/0x0/0x4ffc00000, data 0x17046a1/0x17d9000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:27.937779+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:28.938089+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:29.938361+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f9e95000/0x0/0x4ffc00000, data 0x17046a1/0x17d9000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:30.938590+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 12.631653786s of 12.728900909s, submitted: 25
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122511360 unmapped: 26402816 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1399472 data_alloc: 234881024 data_used: 17620992
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:31.938838+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122560512 unmapped: 26353664 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:32.939087+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121290752 unmapped: 27623424 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f971b000/0x0/0x4ffc00000, data 0x1e7e6a1/0x1f53000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:33.939478+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:34.939694+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f971b000/0x0/0x4ffc00000, data 0x1e7e6a1/0x1f53000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:35.940052+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1411456 data_alloc: 234881024 data_used: 18411520
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:36.940461+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:37.940863+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:38.941312+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:39.941596+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:40.942126+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1411472 data_alloc: 234881024 data_used: 18411520
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f971b000/0x0/0x4ffc00000, data 0x1e7e6a1/0x1f53000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:41.942508+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f971b000/0x0/0x4ffc00000, data 0x1e7e6a1/0x1f53000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:42.942927+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:43.943213+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121364480 unmapped: 27549696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:44.943639+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121364480 unmapped: 27549696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:45.943889+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121364480 unmapped: 27549696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1411472 data_alloc: 234881024 data_used: 18411520
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:46.944382+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f971b000/0x0/0x4ffc00000, data 0x1e7e6a1/0x1f53000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121364480 unmapped: 27549696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:47.944662+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121364480 unmapped: 27549696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:48.945018+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121364480 unmapped: 27549696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:49.945495+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121364480 unmapped: 27549696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:50.945808+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121372672 unmapped: 27541504 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1411472 data_alloc: 234881024 data_used: 18411520
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:51.946128+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121372672 unmapped: 27541504 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:52.946361+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 21.937910080s of 22.197715759s, submitted: 64
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f971b000/0x0/0x4ffc00000, data 0x1e7e6a1/0x1f53000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [1,0,0,0,1,3,1])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025c22c400 session 0x56025a2b25a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122314752 unmapped: 34480128 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:53.946694+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122314752 unmapped: 34480128 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:54.946890+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122314752 unmapped: 34480128 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:55.947166+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b62000/0x0/0x4ffc00000, data 0x2a376a1/0x2b0c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122314752 unmapped: 34480128 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1503518 data_alloc: 234881024 data_used: 18415616
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:56.947525+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122314752 unmapped: 34480128 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22cc00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025c22cc00 session 0x560259b1e1e0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:57.947773+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025b723800 session 0x56025c062780
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122322944 unmapped: 34471936 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:58.948117+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025b723c00 session 0x56025a79ab40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025c22c400 session 0x56025a297860
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122478592 unmapped: 34316288 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:59.948398+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cda0c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122478592 unmapped: 34316288 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:00.948616+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122478592 unmapped: 34316288 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1508493 data_alloc: 234881024 data_used: 18415616
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:01.948861+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b3d000/0x0/0x4ffc00000, data 0x2a5b6b1/0x2b31000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb95000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122085376 unmapped: 34709504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:02.949060+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123609088 unmapped: 33185792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 10.425390244s of 10.618241310s, submitted: 27
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:03.949298+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:04.949588+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:05.949933+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1594241 data_alloc: 234881024 data_used: 30023680
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:06.950198+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:07.950367+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b3b000/0x0/0x4ffc00000, data 0x2a5c6b1/0x2b32000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:08.950626+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:09.950892+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:10.951221+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b3b000/0x0/0x4ffc00000, data 0x2a5c6b1/0x2b32000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b3b000/0x0/0x4ffc00000, data 0x2a5c6b1/0x2b32000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1594241 data_alloc: 234881024 data_used: 30023680
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:11.951485+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:12.951751+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:13.952120+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:14.952464+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:15.952787+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b3b000/0x0/0x4ffc00000, data 0x2a5c6b1/0x2b32000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1594241 data_alloc: 234881024 data_used: 30023680
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:16.953122+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b3b000/0x0/0x4ffc00000, data 0x2a5c6b1/0x2b32000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:17.953495+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:18.953714+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:19.954148+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:20.954495+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1594241 data_alloc: 234881024 data_used: 30023680
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:21.954695+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b3b000/0x0/0x4ffc00000, data 0x2a5c6b1/0x2b32000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128589824 unmapped: 28205056 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:22.955053+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b3b000/0x0/0x4ffc00000, data 0x2a5c6b1/0x2b32000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128589824 unmapped: 28205056 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:23.955432+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128589824 unmapped: 28205056 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:24.955735+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128589824 unmapped: 28205056 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:25.956004+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128589824 unmapped: 28205056 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1594241 data_alloc: 234881024 data_used: 30023680
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:26.956310+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128589824 unmapped: 28205056 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:27.956506+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 24.193414688s of 24.200784683s, submitted: 1
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b3b000/0x0/0x4ffc00000, data 0x2a5c6b1/0x2b32000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128589824 unmapped: 28205056 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025c22c000 session 0x56025a229c20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:28.956759+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128589824 unmapped: 28205056 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:29.957063+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _renew_subs
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 140 handle_osd_map epochs [141,141], i have 140, src has [1,141]
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128622592 unmapped: 28172288 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:30.957507+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128622592 unmapped: 28172288 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1599701 data_alloc: 234881024 data_used: 30031872
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:31.957840+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8b37000/0x0/0x4ffc00000, data 0x2a5e23d/0x2b36000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128622592 unmapped: 28172288 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:32.958098+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128622592 unmapped: 28172288 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:33.958408+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128630784 unmapped: 28164096 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:34.958715+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 134488064 unmapped: 22306816 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:35.959046+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8b37000/0x0/0x4ffc00000, data 0x2a5e23d/0x2b36000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [0,1])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 134979584 unmapped: 21815296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1679465 data_alloc: 234881024 data_used: 30416896
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:36.959456+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 135323648 unmapped: 21471232 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:37.959703+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b728c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b728c00 session 0x56025c07dc20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 9.719120979s of 10.028226852s, submitted: 74
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b723800 session 0x56025c07d0e0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b723c00 session 0x56025c07c5a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22c000 session 0x56025a2b3c20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 136052736 unmapped: 20742144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:38.959938+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22c400 session 0x56025a2b2780
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 136052736 unmapped: 20742144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b728800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b728800 session 0x56025a2b34a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:39.960173+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b723800 session 0x56025a2b21e0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 136077312 unmapped: 20717568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:40.960409+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f7b32000/0x0/0x4ffc00000, data 0x3a5c23d/0x3b34000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3600.1 total, 600.0 interval
                                            Cumulative writes: 9523 writes, 36K keys, 9523 commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 9523 writes, 2506 syncs, 3.80 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 2049 writes, 7169 keys, 2049 commit groups, 1.0 writes per commit group, ingest: 7.34 MB, 0.01 MB/s
                                            Interval WAL: 2049 writes, 834 syncs, 2.46 writes per sync, written: 0.01 GB, 0.01 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 136077312 unmapped: 20717568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:41.960944+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1736163 data_alloc: 234881024 data_used: 30232576
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b723c00 session 0x56025a7883c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 136077312 unmapped: 20717568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:42.961370+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22c000 session 0x56025a7885a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f7b32000/0x0/0x4ffc00000, data 0x3a5c23d/0x3b34000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22c400 session 0x5602598bcf00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 136110080 unmapped: 20684800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:43.961836+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7a800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025cd7a800 session 0x5602598bd4a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 135733248 unmapped: 21061632 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:44.962040+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025eb95000 session 0x56025c062b40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025cda0c00 session 0x56025a4741e0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:45.962281+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125075456 unmapped: 31719424 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b723c00 session 0x56025b974d20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:46.962543+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125075456 unmapped: 31719424 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1480820 data_alloc: 234881024 data_used: 18042880
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8d8d000/0x0/0x4ffc00000, data 0x24b123d/0x2589000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:47.962888+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125116416 unmapped: 31678464 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: mgrc ms_handle_reset ms_handle_reset con 0x56025c048800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: mgrc reconnect Terminating session with v2:192.168.122.100:6800/1088804496
Oct 11 02:58:01 compute-0 ceph-osd[205667]: mgrc reconnect Starting new session with [v2:192.168.122.100:6800/1088804496,v1:192.168.122.100:6801/1088804496]
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: get_auth_request con 0x56025cd7a800 auth_method 0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: mgrc handle_mgr_configure stats_period=5
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:48.963323+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 31440896 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:49.963636+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 126304256 unmapped: 30490624 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:50.964066+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 126304256 unmapped: 30490624 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8d8d000/0x0/0x4ffc00000, data 0x24b123d/0x2589000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8d8d000/0x0/0x4ffc00000, data 0x24b123d/0x2589000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 13.589024544s of 13.861538887s, submitted: 67
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b722c00 session 0x56025c07cb40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:51.964442+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b722800 session 0x56025c0e7860
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 126304256 unmapped: 30490624 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1525144 data_alloc: 234881024 data_used: 24367104
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b722c00 session 0x56025a6e1a40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:52.964692+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9bfe000/0x0/0x4ffc00000, data 0x19981db/0x1a6f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:53.964973+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:54.965292+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x5602594adc00 session 0x56025c07cf00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:55.965707+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9c22000/0x0/0x4ffc00000, data 0x19741db/0x1a4b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:56.966123+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1403522 data_alloc: 234881024 data_used: 19841024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:57.966489+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:58.966897+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:59.967412+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:00.967783+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9c22000/0x0/0x4ffc00000, data 0x19741db/0x1a4b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:01.968197+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1403522 data_alloc: 234881024 data_used: 19841024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:02.968541+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:03.968830+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:04.969120+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9c22000/0x0/0x4ffc00000, data 0x19741db/0x1a4b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:05.969469+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9c22000/0x0/0x4ffc00000, data 0x19741db/0x1a4b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:06.969739+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1403522 data_alloc: 234881024 data_used: 19841024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9c22000/0x0/0x4ffc00000, data 0x19741db/0x1a4b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:07.970043+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:08.970357+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:09.970594+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:10.970901+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:11.971336+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9c22000/0x0/0x4ffc00000, data 0x19741db/0x1a4b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1403522 data_alloc: 234881024 data_used: 19841024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:12.971671+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:13.971908+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:14.972168+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:15.973956+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:16.974362+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1403522 data_alloc: 234881024 data_used: 19841024
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9c22000/0x0/0x4ffc00000, data 0x19741db/0x1a4b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:17.974706+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:18.974968+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 27.218790054s of 27.380781174s, submitted: 36
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:19.975199+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 126967808 unmapped: 29827072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:20.975443+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127369216 unmapped: 29425664 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:21.975825+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127369216 unmapped: 29425664 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:22.976035+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127369216 unmapped: 29425664 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:23.976402+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127369216 unmapped: 29425664 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:24.976937+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127369216 unmapped: 29425664 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:25.977254+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127369216 unmapped: 29425664 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:26.977801+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:27.978518+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:28.978848+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:29.979078+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:30.979391+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:31.979935+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:32.980660+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:33.981141+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:34.981826+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:35.982569+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:36.983354+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:37.983633+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:38.983952+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:39.984427+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:40.985016+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:41.985527+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:42.985936+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:43.986368+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:44.986841+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:45.987339+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:46.987643+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:47.987944+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:48.988421+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:49.988933+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:50.989327+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:51.989580+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:52.989996+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:53.990445+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:54.990748+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:55.991383+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:56.991884+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:57.992723+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:58.993170+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:59.993603+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:00.994096+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:01.994559+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:02.994836+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:03.995135+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:04.995418+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2106558264' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json-pretty"}]: dispatch
Oct 11 02:58:01 compute-0 ceph-mon[191930]: from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
Oct 11 02:58:01 compute-0 ceph-mon[191930]: from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
Oct 11 02:58:01 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3370820490' entity='client.admin' cmd=[{"prefix": "config dump"}]: dispatch
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:05.995732+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:06.996151+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:07.996462+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:08.997084+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:09.997497+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:10.998089+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:11.998495+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:12.998811+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025ba3c000 session 0x56025c0ec3c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cda0c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:13.999323+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:14.999736+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:16.000138+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:17.000543+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:18.000879+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:19.001308+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 60.488044739s of 60.699516296s, submitted: 57
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:20.001541+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127410176 unmapped: 29384704 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:21.001883+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127426560 unmapped: 29368320 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:22.002094+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127492096 unmapped: 29302784 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458868 data_alloc: 234881024 data_used: 20365312
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:23.002292+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:24.002703+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:25.003032+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:26.003303+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:27.003552+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:28.003861+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:29.004327+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:30.004708+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:31.004910+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:32.005319+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:33.005734+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:34.006186+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:35.006607+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:36.007036+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:37.007423+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:38.007826+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:39.008185+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:40.008529+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:41.008887+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:42.009168+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:43.009582+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:44.009916+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:45.010350+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:46.010742+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:47.011206+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:48.011770+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:49.011969+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:50.012392+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:51.012581+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:52.012736+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:53.012946+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:54.013604+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:55.014014+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:56.014436+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:57.014965+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:58.015695+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:59.016073+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:00.016423+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:01.016758+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:02.017120+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:03.017441+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:04.017850+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:05.018305+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:06.018645+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:07.019065+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:08.019501+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:09.019900+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:10.020303+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:11.020615+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:12.020925+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:13.021347+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:14.021638+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:15.021986+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:16.022173+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:17.022566+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:18.023004+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:19.023478+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:20.023717+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:21.023938+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:22.024190+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:23.024431+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb95000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025eb95000 session 0x56025c2223c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22c400 session 0x56025c222b40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025d73c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025d73c000 session 0x56025c0e2d20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025d73c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025d73c400 session 0x56025c0e3a40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:24.024656+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127565824 unmapped: 29229056 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 63.423805237s of 64.131790161s, submitted: 136
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b722c00 session 0x56025c0e21e0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22c400 session 0x56025c0e34a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025d73c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025d73c000 session 0x56025cc1e1e0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb95000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025eb95000 session 0x56025b97fa40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025d73c800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025d73c800 session 0x56025c0e7680
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:25.025044+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127000576 unmapped: 29794304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f966a000/0x0/0x4ffc00000, data 0x1f2c1eb/0x2004000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:26.025396+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127000576 unmapped: 29794304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:27.025879+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127000576 unmapped: 29794304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:28.026442+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f966a000/0x0/0x4ffc00000, data 0x1f2c1eb/0x2004000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127000576 unmapped: 29794304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1466955 data_alloc: 234881024 data_used: 20365312
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:29.026861+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127000576 unmapped: 29794304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b722c00 session 0x56025b96cd20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22c400 session 0x56025b6d03c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:30.027333+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127016960 unmapped: 29777920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025d73c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025d73c000 session 0x56025d75f680
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb95000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:31.027649+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127393792 unmapped: 29401088 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025eb95000 session 0x56025b6d1c20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025d73cc00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025d73d000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:32.027875+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127393792 unmapped: 29401088 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:33.028093+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127393792 unmapped: 29401088 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1473892 data_alloc: 234881024 data_used: 20369408
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:34.028376+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127393792 unmapped: 29401088 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:35.028572+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127401984 unmapped: 29392896 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:36.028761+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127401984 unmapped: 29392896 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:37.029026+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127401984 unmapped: 29392896 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:38.029271+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127401984 unmapped: 29392896 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1477892 data_alloc: 234881024 data_used: 20897792
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:39.029562+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127401984 unmapped: 29392896 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:40.029865+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127401984 unmapped: 29392896 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:41.030133+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127401984 unmapped: 29392896 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:42.030313+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127401984 unmapped: 29392896 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:43.030500+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127410176 unmapped: 29384704 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1477892 data_alloc: 234881024 data_used: 20897792
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:44.030733+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127410176 unmapped: 29384704 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:45.031312+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127410176 unmapped: 29384704 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:46.031643+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127410176 unmapped: 29384704 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:47.032305+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127410176 unmapped: 29384704 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:48.032687+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127410176 unmapped: 29384704 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1477892 data_alloc: 234881024 data_used: 20897792
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:49.033073+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127410176 unmapped: 29384704 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:50.033329+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:51.033657+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:52.034071+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:53.034347+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1477892 data_alloc: 234881024 data_used: 20897792
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:54.034712+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:55.035052+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:56.035428+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:57.035866+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:58.036101+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1477892 data_alloc: 234881024 data_used: 20897792
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:59.036467+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:00.036772+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127426560 unmapped: 29368320 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:01.037009+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127426560 unmapped: 29368320 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:02.037453+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127426560 unmapped: 29368320 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:03.037723+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127426560 unmapped: 29368320 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1477892 data_alloc: 234881024 data_used: 20897792
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:04.037976+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127426560 unmapped: 29368320 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:05.038601+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127426560 unmapped: 29368320 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:06.038973+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 42.074275970s of 42.221923828s, submitted: 23
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127426560 unmapped: 29368320 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:07.039352+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:08.039722+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1545754 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:09.040219+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:10.040560+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:11.040868+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:12.041312+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:13.041609+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:14.041864+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:15.042053+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:16.042316+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:17.042724+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:18.042993+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:19.043432+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:20.043643+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:21.043991+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:22.044457+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:23.044681+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127664128 unmapped: 29130752 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:24.044947+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127664128 unmapped: 29130752 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:25.045366+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127664128 unmapped: 29130752 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:26.045844+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127664128 unmapped: 29130752 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 20.101493835s of 20.269266129s, submitted: 29
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:27.046630+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:28.046975+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:29.047417+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:30.047751+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:31.048003+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:32.048387+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:33.048795+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:34.049535+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:35.049857+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:36.050416+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:37.050815+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:38.051219+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:39.051765+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:40.052147+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:41.052467+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:42.052676+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:43.052909+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:44.053104+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:45.053682+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:46.053888+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:47.054294+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:48.054565+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:49.054779+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:50.055205+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:51.055474+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:52.055652+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:53.055866+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:54.056143+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:55.056623+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:56.056944+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:57.057348+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:58.057521+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127688704 unmapped: 29106176 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:59.057760+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127688704 unmapped: 29106176 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:00.058137+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127688704 unmapped: 29106176 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:01.058502+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127688704 unmapped: 29106176 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:02.058707+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:03.058971+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:04.059337+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:05.059575+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:06.059782+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:07.060127+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:08.060357+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:09.060754+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:10.060993+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:11.061201+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:12.061389+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:13.061682+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:14.061991+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:15.062455+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:16.062719+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:17.063388+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:18.063727+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:19.063983+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:20.064302+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:21.064722+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:22.065053+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:23.065427+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:24.065726+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:25.066124+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:26.066354+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127713280 unmapped: 29081600 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:27.066691+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127713280 unmapped: 29081600 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:28.067014+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:29.067431+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:30.067652+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:31.067917+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:32.068368+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:33.068634+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:34.068961+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:35.069390+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:36.069706+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:37.070118+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:38.070485+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:39.070851+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:40.071104+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:41.071447+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:42.071839+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127729664 unmapped: 29065216 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:43.072193+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:44.072676+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127729664 unmapped: 29065216 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:45.073122+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127729664 unmapped: 29065216 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:46.073501+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127729664 unmapped: 29065216 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:47.073956+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127729664 unmapped: 29065216 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:48.074219+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127729664 unmapped: 29065216 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:49.074694+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127729664 unmapped: 29065216 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:50.074978+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127729664 unmapped: 29065216 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:51.075321+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127729664 unmapped: 29065216 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:52.075544+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127737856 unmapped: 29057024 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:53.075782+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127737856 unmapped: 29057024 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:54.076096+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127737856 unmapped: 29057024 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:55.076363+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127737856 unmapped: 29057024 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:56.076563+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127737856 unmapped: 29057024 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:57.076878+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:58.077095+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:59.077461+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:00.077729+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:01.078147+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:02.078429+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:03.078849+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:04.079283+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:05.079661+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:06.080054+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:07.080415+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 29040640 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:08.080724+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 29040640 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:09.081140+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 29040640 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:10.081551+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 29040640 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:11.081890+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 29040640 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:12.082431+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 29040640 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:13.082862+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 29040640 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:14.083329+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 29040640 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:15.083567+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 29040640 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:16.083864+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:17.084188+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:18.084619+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:19.084810+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:20.085130+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:21.085376+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:22.085623+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:23.085816+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:24.086340+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550512 data_alloc: 234881024 data_used: 21090304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:25.086724+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:26.087145+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22dc00 session 0x56025c276780
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:27.087663+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:28.087850+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:29.088038+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550512 data_alloc: 234881024 data_used: 21090304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:30.088451+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127770624 unmapped: 29024256 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:31.088835+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:32.089084+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:33.089325+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:34.089709+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550512 data_alloc: 234881024 data_used: 21090304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:35.089955+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:36.090358+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:37.090635+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:38.090976+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:39.091174+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550512 data_alloc: 234881024 data_used: 21090304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:40.091478+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:41.093102+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:42.093664+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:43.094943+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:44.095197+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550512 data_alloc: 234881024 data_used: 21090304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:45.095491+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:46.095771+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:47.096679+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:48.097025+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:49.097632+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550512 data_alloc: 234881024 data_used: 21090304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:50.097866+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:51.098147+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:52.098391+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:53.098686+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:54.099005+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550512 data_alloc: 234881024 data_used: 21090304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:55.099651+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:56.099993+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127795200 unmapped: 28999680 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:57.100422+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127795200 unmapped: 28999680 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:58.100777+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127795200 unmapped: 28999680 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:59.101199+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550512 data_alloc: 234881024 data_used: 21090304
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127795200 unmapped: 28999680 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:00.101615+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28991488 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:01.102067+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28991488 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:02.102432+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 155.622192383s of 155.630996704s, submitted: 1
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:03.102804+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:04.103006+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:05.103320+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:06.103634+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:07.104059+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:08.104420+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:09.104704+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:10.105014+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:11.105338+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:12.105594+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:13.105917+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:14.106314+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:15.106550+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:16.108660+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:17.109099+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:18.109538+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:19.109927+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:20.110338+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:21.110717+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:22.111078+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:23.111407+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:24.111646+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:25.111978+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:26.112439+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:27.112759+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:28.113056+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:29.113466+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:30.113648+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:31.113969+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:32.114420+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:33.115061+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127827968 unmapped: 28966912 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:34.115376+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127827968 unmapped: 28966912 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:35.115721+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127827968 unmapped: 28966912 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:36.116019+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127827968 unmapped: 28966912 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:37.116416+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127827968 unmapped: 28966912 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:38.116747+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127827968 unmapped: 28966912 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:39.122893+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127827968 unmapped: 28966912 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:40.123395+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127827968 unmapped: 28966912 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:41.123719+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127827968 unmapped: 28966912 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:42.124102+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127836160 unmapped: 28958720 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:43.124389+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127836160 unmapped: 28958720 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:44.124785+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127836160 unmapped: 28958720 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:45.125200+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127836160 unmapped: 28958720 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:46.125726+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127836160 unmapped: 28958720 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:47.126107+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127836160 unmapped: 28958720 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:48.126709+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127836160 unmapped: 28958720 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:49.127031+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:50.127607+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127844352 unmapped: 28950528 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:51.127976+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:52.128391+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:53.128960+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:54.129173+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:55.129439+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:56.129690+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:57.130209+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:58.130731+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:59.131081+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:00.131292+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:01.131685+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:02.132117+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:03.132335+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:04.132647+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:05.132866+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:06.133308+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:07.133708+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:08.134081+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:09.134594+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:10.134862+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:11.135130+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:12.135560+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:13.135892+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:14.136156+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:15.136388+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:16.136753+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:17.137341+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:18.137739+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:19.138081+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:20.138465+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:21.138692+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:22.138952+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:23.139335+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:24.139696+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:25.139910+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:26.140141+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:27.140504+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:28.140693+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:29.141036+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:30.141280+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:31.141480+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:32.141650+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:33.141856+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:34.142064+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:35.142309+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:36.142663+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:37.143609+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:38.144157+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:39.144371+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127877120 unmapped: 28917760 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:40.144768+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:41.145041+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:42.145390+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:43.145633+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:44.145908+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:45.146391+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:46.146694+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:47.147376+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:48.148345+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:49.148749+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:50.149039+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:51.149472+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:52.149840+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:53.150705+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:54.151060+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:55.151348+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:56.151620+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:57.151960+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:58.152220+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:59.152722+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:00.153099+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:01.153374+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:02.153710+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:03.154178+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:04.154346+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:05.154569+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:06.155023+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:07.155446+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:08.155843+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 126.354400635s of 126.374946594s, submitted: 14
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:09.156207+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:10.156575+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:11.156957+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:12.157730+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:13.158103+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:14.158433+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:15.158919+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:16.159401+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:17.159789+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:18.160203+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:19.160582+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:20.160934+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:21.161433+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:22.161768+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:23.162131+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:24.162524+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:25.162891+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:26.163181+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:27.163597+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:28.163845+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:29.164073+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:30.164409+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127909888 unmapped: 28884992 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:31.164594+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:32.164934+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:33.165167+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:34.165429+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:35.165889+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:36.166337+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:37.166809+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:38.167157+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:39.167524+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:40.167999+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:41.168482+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:42.168802+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:43.169045+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:44.169501+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:45.169853+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:46.170347+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:47.170692+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:48.171038+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:49.171536+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:50.171759+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:51.172139+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:52.172491+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127934464 unmapped: 28860416 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:53.172861+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127934464 unmapped: 28860416 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:54.173213+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127934464 unmapped: 28860416 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:55.173514+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127934464 unmapped: 28860416 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:56.173793+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127934464 unmapped: 28860416 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:57.174157+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127934464 unmapped: 28860416 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:58.174630+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127934464 unmapped: 28860416 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:59.174910+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127934464 unmapped: 28860416 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:00.175339+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127942656 unmapped: 28852224 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:01.175701+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:02.175893+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:03.176703+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:04.177069+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:05.177412+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:06.177763+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:07.178079+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:08.178372+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:09.178748+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:10.179143+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:11.179656+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:12.180006+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:13.180295+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:14.180620+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:15.180982+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:16.181629+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:17.182072+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:18.182399+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:19.182654+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:20.182905+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:21.183138+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:22.183594+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:23.183885+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:24.184395+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127967232 unmapped: 28827648 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:25.184812+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127967232 unmapped: 28827648 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:26.185171+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127967232 unmapped: 28827648 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:27.185554+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127967232 unmapped: 28827648 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:28.185937+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127967232 unmapped: 28827648 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:29.186352+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127967232 unmapped: 28827648 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:30.187711+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127967232 unmapped: 28827648 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:31.187973+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127975424 unmapped: 28819456 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:32.188297+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127975424 unmapped: 28819456 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:33.188509+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127975424 unmapped: 28819456 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:34.194349+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127975424 unmapped: 28819456 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:35.194593+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127975424 unmapped: 28819456 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:36.194957+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127975424 unmapped: 28819456 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:37.195311+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127975424 unmapped: 28819456 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:38.195546+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127975424 unmapped: 28819456 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:39.195848+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:40.196077+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:41.196507+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 4200.1 total, 600.0 interval
                                            Cumulative writes: 10K writes, 38K keys, 10K commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 10K writes, 2784 syncs, 3.64 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 615 writes, 1777 keys, 615 commit groups, 1.0 writes per commit group, ingest: 1.43 MB, 0.00 MB/s
                                            Interval WAL: 615 writes, 278 syncs, 2.21 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:42.196813+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:43.197193+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:44.197456+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:45.197651+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:46.198122+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:47.198546+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:48.198941+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:49.199262+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:50.199694+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:51.199977+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:52.200266+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:53.200653+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:54.200933+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:55.201428+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:56.201727+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:57.202075+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:58.202445+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:59.202685+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:00.203066+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:01.203573+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:02.203789+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:03.204135+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:04.204719+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:05.204963+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:06.205214+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:07.205694+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:08.205974+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128000000 unmapped: 28794880 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:09.206336+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128000000 unmapped: 28794880 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:10.206556+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:11.206917+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:12.207199+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:13.207427+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:14.207800+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:15.208324+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:16.208777+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:17.209303+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:18.209763+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:19.210035+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:20.210330+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:21.210644+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:22.211004+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:23.211510+0000)
Oct 11 02:58:01 compute-0 openstack_network_exporter[374316]: ERROR   02:58:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:58:01 compute-0 openstack_network_exporter[374316]: ERROR   02:58:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:58:01 compute-0 openstack_network_exporter[374316]: ERROR   02:58:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:58:01 compute-0 openstack_network_exporter[374316]: ERROR   02:58:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:58:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:58:01 compute-0 openstack_network_exporter[374316]: ERROR   02:58:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:58:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:24.211967+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:25.212383+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:26.212777+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:27.213332+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:28.213542+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:29.214035+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:30.214492+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:31.214681+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:32.214938+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:33.215641+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:34.216032+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:35.216410+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:36.216966+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128024576 unmapped: 28770304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:37.217344+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128024576 unmapped: 28770304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:38.217569+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128024576 unmapped: 28770304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:39.217861+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128024576 unmapped: 28770304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:40.218148+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128024576 unmapped: 28770304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:41.218540+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128024576 unmapped: 28770304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:42.218843+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128024576 unmapped: 28770304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:43.219452+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128024576 unmapped: 28770304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:44.219878+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128024576 unmapped: 28770304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:45.220161+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128032768 unmapped: 28762112 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:46.220398+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128032768 unmapped: 28762112 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:47.220844+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128032768 unmapped: 28762112 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:48.221098+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128032768 unmapped: 28762112 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:49.221492+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128032768 unmapped: 28762112 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:50.221837+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:51.222344+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:52.222827+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:53.223023+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:54.223407+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:55.223788+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:56.224135+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:57.224556+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:58.224979+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:59.225417+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:00.226087+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:01.226379+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:02.226595+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:03.228951+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:04.229168+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:05.229362+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:06.229566+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:07.230032+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:08.230343+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:09.230869+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:10.231382+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:11.231700+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:12.232559+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:13.232950+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:14.234046+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128057344 unmapped: 28737536 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:15.234413+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128057344 unmapped: 28737536 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:16.234635+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128057344 unmapped: 28737536 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:17.235145+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128057344 unmapped: 28737536 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:18.235391+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128057344 unmapped: 28737536 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:19.235846+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128057344 unmapped: 28737536 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:20.236369+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128057344 unmapped: 28737536 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:21.236706+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 192.754379272s of 192.776382446s, submitted: 3
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128073728 unmapped: 28721152 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:22.237023+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128106496 unmapped: 28688384 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:23.237290+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128163840 unmapped: 28631040 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:24.237483+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128221184 unmapped: 28573696 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:25.237835+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128221184 unmapped: 28573696 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:26.238185+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128221184 unmapped: 28573696 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:27.238632+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128221184 unmapped: 28573696 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:28.238897+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128221184 unmapped: 28573696 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:29.239328+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128221184 unmapped: 28573696 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:30.239684+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128221184 unmapped: 28573696 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:31.240094+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128221184 unmapped: 28573696 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:32.240412+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:33.240744+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:34.241216+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:35.241558+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:36.242071+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:37.242443+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:38.242819+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:39.243376+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:40.243693+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:41.244056+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:42.244459+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:43.244913+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:44.245578+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:45.246021+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:46.246537+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:47.247060+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:48.247374+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:49.247785+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:50.248058+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:51.248427+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:52.248801+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:53.249128+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:54.249444+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:55.249679+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:56.250069+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:57.250537+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:58.250881+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:59.251377+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:00.251677+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:01.252154+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:02.252564+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:03.252846+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:04.253139+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:05.253407+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:06.253697+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:07.254143+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:08.254365+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:09.254759+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:10.255092+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:11.255599+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:12.255855+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:13.256091+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:14.256452+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:15.256931+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:16.257187+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:17.257519+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128245760 unmapped: 28549120 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:18.257960+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128245760 unmapped: 28549120 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:19.258400+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22c000 session 0x56025bfba3c0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b723800 session 0x5602598bda40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 57.812805176s of 58.417026520s, submitted: 106
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:20.258611+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22c400 session 0x56025c222960
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f94d8000/0x0/0x4ffc00000, data 0x1cad1fb/0x1d86000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:21.258822+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:22.259159+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1411485 data_alloc: 218103808 data_used: 14237696
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:23.259440+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9503000/0x0/0x4ffc00000, data 0x1c831eb/0x1d5b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:24.259824+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9503000/0x0/0x4ffc00000, data 0x1c831eb/0x1d5b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:25.260318+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9503000/0x0/0x4ffc00000, data 0x1c831eb/0x1d5b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:26.260803+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:27.261477+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1411485 data_alloc: 218103808 data_used: 14237696
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:28.261724+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9503000/0x0/0x4ffc00000, data 0x1c831eb/0x1d5b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:29.262201+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9503000/0x0/0x4ffc00000, data 0x1c831eb/0x1d5b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:30.262624+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:31.262967+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 12.343958855s of 12.403041840s, submitted: 12
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025d73cc00 session 0x56025a2b25a0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:32.263423+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025d73d000 session 0x56025b9a21e0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1411129 data_alloc: 218103808 data_used: 14237696
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123904000 unmapped: 32890880 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:33.263807+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b723800 session 0x56025b6a5c20
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123174912 unmapped: 33619968 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:34.264386+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123174912 unmapped: 33619968 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9e44000/0x0/0x4ffc00000, data 0x13441cb/0x141a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:35.264638+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9e44000/0x0/0x4ffc00000, data 0x13441cb/0x141a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123174912 unmapped: 33619968 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:36.264896+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123174912 unmapped: 33619968 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:37.265503+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1331102 data_alloc: 218103808 data_used: 13574144
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123174912 unmapped: 33619968 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:38.265759+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123174912 unmapped: 33619968 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:39.266068+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123174912 unmapped: 33619968 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:40.266436+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9e44000/0x0/0x4ffc00000, data 0x13441cb/0x141a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123174912 unmapped: 33619968 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:41.266757+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123174912 unmapped: 33619968 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 141 handle_osd_map epochs [141,142], i have 141, src has [1,142]
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 9.774773598s of 10.002117157s, submitted: 51
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:42.267031+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1334403 data_alloc: 218103808 data_used: 13578240
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 33570816 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 142 ms_handle_reset con 0x56025c22c000 session 0x56025b97e780
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:43.267459+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 142 heartbeat osd_stat(store_statfs(0x4f9e41000/0x0/0x4ffc00000, data 0x1345d8d/0x141c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131612672 unmapped: 33579008 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 142 handle_osd_map epochs [142,143], i have 142, src has [1,143]
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:44.267826+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 143 ms_handle_reset con 0x56025c22c400 session 0x56025b6d1680
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025d73cc00
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:45.268096+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _renew_subs
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 143 handle_osd_map epochs [144,144], i have 143, src has [1,144]
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 144 ms_handle_reset con 0x56025d73cc00 session 0x56025a297a40
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:46.268511+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:47.269047+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1343540 data_alloc: 218103808 data_used: 13586432
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:48.269397+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:49.269809+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 144 heartbeat osd_stat(store_statfs(0x4f9e3a000/0x0/0x4ffc00000, data 0x1349507/0x1422000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:50.270012+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 144 heartbeat osd_stat(store_statfs(0x4f9e3a000/0x0/0x4ffc00000, data 0x1349507/0x1422000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:51.270615+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:52.271015+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1343540 data_alloc: 218103808 data_used: 13586432
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:53.271289+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:54.271727+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 144 heartbeat osd_stat(store_statfs(0x4f9e3a000/0x0/0x4ffc00000, data 0x1349507/0x1422000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 144 handle_osd_map epochs [145,145], i have 144, src has [1,145]
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 144 handle_osd_map epochs [145,145], i have 145, src has [1,145]
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 12.784080505s of 12.961950302s, submitted: 25
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:55.272093+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:56.272490+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:57.272812+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345842 data_alloc: 218103808 data_used: 13586432
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:58.273106+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:59.273580+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:00.273963+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:01.274447+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:02.274759+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:03.275170+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:04.275562+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:05.275800+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:06.276066+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:07.276513+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:08.276986+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:09.277486+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:10.278211+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:11.278764+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:12.279028+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:13.279404+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:14.279685+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:15.279953+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:16.280433+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:17.280895+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:18.281374+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:19.281600+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:20.281859+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:21.282202+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:22.282619+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:23.282913+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:24.283298+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:25.283567+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:26.283937+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:27.284433+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:28.284682+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:29.285004+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:30.285448+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:31.285844+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:32.286201+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:33.286529+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:34.286904+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:35.287143+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:36.287521+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:37.287766+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:38.288156+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:39.288651+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:40.289037+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:41.289445+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:42.289715+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:43.290095+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:44.290521+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:45.290837+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:46.291461+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:47.292048+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:48.292307+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:49.292714+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:50.292971+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:51.293431+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:52.293729+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:53.294076+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:54.294408+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:55.294854+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:56.295428+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:57.296051+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:58.296406+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:59.296765+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:00.297173+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:01.297558+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:02.297998+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:03.298972+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:04.299185+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:05.299427+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:06.299758+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:07.300199+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:08.300420+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:09.300782+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:10.301024+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:11.301458+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:12.301764+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:13.301947+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:14.302138+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:15.302336+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:16.302609+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:17.302868+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:18.303114+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:19.303317+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:20.303526+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:21.303909+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:22.304107+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:23.304357+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:24.304565+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:25.304747+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:26.306393+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:27.306658+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: do_command 'config diff' '{prefix=config diff}'
Oct 11 02:58:01 compute-0 ceph-osd[205667]: do_command 'config diff' '{prefix=config diff}' result is 0 bytes
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 02:58:01 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 02:58:01 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:28.306869+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: do_command 'config show' '{prefix=config show}'
Oct 11 02:58:01 compute-0 ceph-osd[205667]: do_command 'config show' '{prefix=config show}' result is 0 bytes
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123617280 unmapped: 41574400 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: do_command 'counter dump' '{prefix=counter dump}'
Oct 11 02:58:01 compute-0 ceph-osd[205667]: do_command 'counter dump' '{prefix=counter dump}' result is 0 bytes
Oct 11 02:58:01 compute-0 ceph-osd[205667]: do_command 'counter schema' '{prefix=counter schema}'
Oct 11 02:58:01 compute-0 ceph-osd[205667]: do_command 'counter schema' '{prefix=counter schema}' result is 0 bytes
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:29.307062+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 42008576 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 02:58:01 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:30.307376+0000)
Oct 11 02:58:01 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122552320 unmapped: 42639360 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 02:58:01 compute-0 ceph-osd[205667]: do_command 'log dump' '{prefix=log dump}'
Oct 11 02:58:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2417: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:01 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15693 -' entity='client.admin' cmd=[{"prefix": "device ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:58:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:58:01 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 02:58:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "detail": "detail"} v 0) v1
Oct 11 02:58:02 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1694769215' entity='client.admin' cmd=[{"prefix": "df", "detail": "detail"}]: dispatch
Oct 11 02:58:02 compute-0 ceph-mon[191930]: pgmap v2417: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:02 compute-0 ceph-mon[191930]: from='client.15693 -' entity='client.admin' cmd=[{"prefix": "device ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:58:02 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1694769215' entity='client.admin' cmd=[{"prefix": "df", "detail": "detail"}]: dispatch
Oct 11 02:58:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df"} v 0) v1
Oct 11 02:58:02 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/791841407' entity='client.admin' cmd=[{"prefix": "df"}]: dispatch
Oct 11 02:58:02 compute-0 nova_compute[356901]: 2025-10-11 02:58:02.822 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "fs dump"} v 0) v1
Oct 11 02:58:03 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1811530889' entity='client.admin' cmd=[{"prefix": "fs dump"}]: dispatch
Oct 11 02:58:03 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/791841407' entity='client.admin' cmd=[{"prefix": "df"}]: dispatch
Oct 11 02:58:03 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1811530889' entity='client.admin' cmd=[{"prefix": "fs dump"}]: dispatch
Oct 11 02:58:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2418: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "fs ls"} v 0) v1
Oct 11 02:58:03 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/186035080' entity='client.admin' cmd=[{"prefix": "fs ls"}]: dispatch
Oct 11 02:58:03 compute-0 sudo[485175]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:58:03 compute-0 sudo[485175]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:03 compute-0 sudo[485175]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:03 compute-0 sudo[485208]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:58:03 compute-0 sudo[485208]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:03 compute-0 sudo[485208]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:03 compute-0 sudo[485254]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:58:03 compute-0 sudo[485254]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:03 compute-0 sudo[485254]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:04 compute-0 sudo[485281]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 check-host
Oct 11 02:58:04 compute-0 sudo[485281]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:04 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15703 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:58:04 compute-0 sudo[485281]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:58:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:58:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:58:04 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:58:04 compute-0 ceph-mon[191930]: pgmap v2418: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:04 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/186035080' entity='client.admin' cmd=[{"prefix": "fs ls"}]: dispatch
Oct 11 02:58:04 compute-0 ceph-mon[191930]: from='client.15703 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:58:04 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:58:04 compute-0 sudo[485356]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:58:04 compute-0 sudo[485356]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:04 compute-0 sudo[485356]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:04 compute-0 sudo[485387]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:58:04 compute-0 sudo[485387]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:04 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mds stat"} v 0) v1
Oct 11 02:58:04 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3162672592' entity='client.admin' cmd=[{"prefix": "mds stat"}]: dispatch
Oct 11 02:58:04 compute-0 sudo[485387]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:04 compute-0 systemd[1]: Starting Hostname Service...
Oct 11 02:58:04 compute-0 sudo[485427]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:58:04 compute-0 sudo[485427]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:04 compute-0 sudo[485427]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:04 compute-0 sudo[485466]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:58:04 compute-0 sudo[485466]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:04 compute-0 systemd[1]: Started Hostname Service.
Oct 11 02:58:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump"} v 0) v1
Oct 11 02:58:05 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/639971961' entity='client.admin' cmd=[{"prefix": "mon dump"}]: dispatch
Oct 11 02:58:05 compute-0 sudo[485466]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:05 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:58:05 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3162672592' entity='client.admin' cmd=[{"prefix": "mds stat"}]: dispatch
Oct 11 02:58:05 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/639971961' entity='client.admin' cmd=[{"prefix": "mon dump"}]: dispatch
Oct 11 02:58:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:58:05 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:58:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:58:05 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:58:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:58:05 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:58:05 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 0d76b46b-4999-4bf7-9dec-c6339cd5d971 does not exist
Oct 11 02:58:05 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 640b62f3-1849-47ee-b8ca-aa5c0b525be2 does not exist
Oct 11 02:58:05 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 7531f1cc-eb92-48e2-8792-02ca180c16e8 does not exist
Oct 11 02:58:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:58:05 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:58:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:58:05 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:58:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:58:05 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:58:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2419: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:05 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15709 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:58:05 compute-0 sudo[485606]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:58:05 compute-0 sudo[485606]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:05 compute-0 sudo[485606]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:05 compute-0 sudo[485633]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:58:05 compute-0 sudo[485633]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:05 compute-0 sudo[485633]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:05 compute-0 sudo[485660]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:58:05 compute-0 sudo[485660]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:05 compute-0 sudo[485660]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:05 compute-0 nova_compute[356901]: 2025-10-11 02:58:05.762 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:05 compute-0 sudo[485704]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:58:05 compute-0 sudo[485704]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:05 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd blocklist ls"} v 0) v1
Oct 11 02:58:05 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3533385019' entity='client.admin' cmd=[{"prefix": "osd blocklist ls"}]: dispatch
Oct 11 02:58:06 compute-0 podman[485779]: 2025-10-11 02:58:06.250214284 +0000 UTC m=+0.065573616 container create 2b4a74a3cc39d50200f728b20f54124f2c33f5e8ed853589d1f3be5fe33b2f7d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_yonath, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:58:06 compute-0 podman[485779]: 2025-10-11 02:58:06.223877995 +0000 UTC m=+0.039237357 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:58:06 compute-0 systemd[1]: Started libpod-conmon-2b4a74a3cc39d50200f728b20f54124f2c33f5e8ed853589d1f3be5fe33b2f7d.scope.
Oct 11 02:58:06 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:58:06 compute-0 podman[485779]: 2025-10-11 02:58:06.37247443 +0000 UTC m=+0.187833772 container init 2b4a74a3cc39d50200f728b20f54124f2c33f5e8ed853589d1f3be5fe33b2f7d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_yonath, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:58:06 compute-0 podman[485779]: 2025-10-11 02:58:06.385988757 +0000 UTC m=+0.201348099 container start 2b4a74a3cc39d50200f728b20f54124f2c33f5e8ed853589d1f3be5fe33b2f7d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_yonath, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0)
Oct 11 02:58:06 compute-0 mystifying_yonath[485810]: 167 167
Oct 11 02:58:06 compute-0 podman[485779]: 2025-10-11 02:58:06.399527476 +0000 UTC m=+0.214886828 container attach 2b4a74a3cc39d50200f728b20f54124f2c33f5e8ed853589d1f3be5fe33b2f7d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_yonath, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef)
Oct 11 02:58:06 compute-0 systemd[1]: libpod-2b4a74a3cc39d50200f728b20f54124f2c33f5e8ed853589d1f3be5fe33b2f7d.scope: Deactivated successfully.
Oct 11 02:58:06 compute-0 podman[485779]: 2025-10-11 02:58:06.401388738 +0000 UTC m=+0.216748080 container died 2b4a74a3cc39d50200f728b20f54124f2c33f5e8ed853589d1f3be5fe33b2f7d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_yonath, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0)
Oct 11 02:58:06 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:58:06 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:58:06 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:58:06 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:58:06 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:58:06 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:58:06 compute-0 ceph-mon[191930]: pgmap v2419: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:06 compute-0 ceph-mon[191930]: from='client.15709 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:58:06 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3533385019' entity='client.admin' cmd=[{"prefix": "osd blocklist ls"}]: dispatch
Oct 11 02:58:06 compute-0 systemd[1]: var-lib-containers-storage-overlay-b40ce9d22b16118d80ab14d98fb64c94df1351b56ca56052bd966b058f0e6547-merged.mount: Deactivated successfully.
Oct 11 02:58:06 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15713 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:58:06 compute-0 podman[485779]: 2025-10-11 02:58:06.487743057 +0000 UTC m=+0.303102389 container remove 2b4a74a3cc39d50200f728b20f54124f2c33f5e8ed853589d1f3be5fe33b2f7d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=mystifying_yonath, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS)
Oct 11 02:58:06 compute-0 systemd[1]: libpod-conmon-2b4a74a3cc39d50200f728b20f54124f2c33f5e8ed853589d1f3be5fe33b2f7d.scope: Deactivated successfully.
Oct 11 02:58:06 compute-0 podman[485840]: 2025-10-11 02:58:06.702919544 +0000 UTC m=+0.073347436 container create f9381e7cf11c1f3ee368795d5fc1c2938a4fd74bd15a4db884d2e2af48533ca3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_liskov, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 02:58:06 compute-0 systemd[1]: Started libpod-conmon-f9381e7cf11c1f3ee368795d5fc1c2938a4fd74bd15a4db884d2e2af48533ca3.scope.
Oct 11 02:58:06 compute-0 podman[485840]: 2025-10-11 02:58:06.682411918 +0000 UTC m=+0.052839840 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:58:06 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:58:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/560610f032518988e6ce1d4dc9bcca09c4a4d943556fcce2bf0f25725bbdde21/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:58:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/560610f032518988e6ce1d4dc9bcca09c4a4d943556fcce2bf0f25725bbdde21/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:58:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/560610f032518988e6ce1d4dc9bcca09c4a4d943556fcce2bf0f25725bbdde21/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:58:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/560610f032518988e6ce1d4dc9bcca09c4a4d943556fcce2bf0f25725bbdde21/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:58:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/560610f032518988e6ce1d4dc9bcca09c4a4d943556fcce2bf0f25725bbdde21/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:58:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:58:06 compute-0 podman[485840]: 2025-10-11 02:58:06.856670348 +0000 UTC m=+0.227098270 container init f9381e7cf11c1f3ee368795d5fc1c2938a4fd74bd15a4db884d2e2af48533ca3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_liskov, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3)
Oct 11 02:58:06 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15715 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:58:06 compute-0 podman[485840]: 2025-10-11 02:58:06.870484945 +0000 UTC m=+0.240912847 container start f9381e7cf11c1f3ee368795d5fc1c2938a4fd74bd15a4db884d2e2af48533ca3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_liskov, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:58:06 compute-0 podman[485840]: 2025-10-11 02:58:06.887128047 +0000 UTC m=+0.257555999 container attach f9381e7cf11c1f3ee368795d5fc1c2938a4fd74bd15a4db884d2e2af48533ca3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_liskov, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:58:07 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd dump"} v 0) v1
Oct 11 02:58:07 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3619486833' entity='client.admin' cmd=[{"prefix": "osd dump"}]: dispatch
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0009191400908380543 of space, bias 1.0, pg target 0.2757420272514163 quantized to 32 (current 32)
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:58:07 compute-0 ceph-mon[191930]: from='client.15713 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:58:07 compute-0 ceph-mon[191930]: from='client.15715 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:58:07 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3619486833' entity='client.admin' cmd=[{"prefix": "osd dump"}]: dispatch
Oct 11 02:58:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2420: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:07 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd numa-status"} v 0) v1
Oct 11 02:58:07 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3568366846' entity='client.admin' cmd=[{"prefix": "osd numa-status"}]: dispatch
Oct 11 02:58:07 compute-0 nova_compute[356901]: 2025-10-11 02:58:07.827 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:08 compute-0 compassionate_liskov[485882]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:58:08 compute-0 compassionate_liskov[485882]: --> relative data size: 1.0
Oct 11 02:58:08 compute-0 compassionate_liskov[485882]: --> All data devices are unavailable
Oct 11 02:58:08 compute-0 podman[485840]: 2025-10-11 02:58:08.15903999 +0000 UTC m=+1.529467892 container died f9381e7cf11c1f3ee368795d5fc1c2938a4fd74bd15a4db884d2e2af48533ca3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_liskov, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default)
Oct 11 02:58:08 compute-0 systemd[1]: libpod-f9381e7cf11c1f3ee368795d5fc1c2938a4fd74bd15a4db884d2e2af48533ca3.scope: Deactivated successfully.
Oct 11 02:58:08 compute-0 systemd[1]: libpod-f9381e7cf11c1f3ee368795d5fc1c2938a4fd74bd15a4db884d2e2af48533ca3.scope: Consumed 1.112s CPU time.
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15721 -' entity='client.admin' cmd=[{"prefix": "osd perf", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:58:08 compute-0 systemd[1]: var-lib-containers-storage-overlay-560610f032518988e6ce1d4dc9bcca09c4a4d943556fcce2bf0f25725bbdde21-merged.mount: Deactivated successfully.
Oct 11 02:58:08 compute-0 podman[485840]: 2025-10-11 02:58:08.296598693 +0000 UTC m=+1.667026595 container remove f9381e7cf11c1f3ee368795d5fc1c2938a4fd74bd15a4db884d2e2af48533ca3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_liskov, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.vendor=CentOS)
Oct 11 02:58:08 compute-0 systemd[1]: libpod-conmon-f9381e7cf11c1f3ee368795d5fc1c2938a4fd74bd15a4db884d2e2af48533ca3.scope: Deactivated successfully.
Oct 11 02:58:08 compute-0 sudo[485704]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:08 compute-0 podman[486032]: 2025-10-11 02:58:08.352373958 +0000 UTC m=+0.136603635 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 02:58:08 compute-0 podman[486027]: 2025-10-11 02:58:08.372883337 +0000 UTC m=+0.159793766 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=edpm, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 02:58:08 compute-0 podman[486031]: 2025-10-11 02:58:08.393967397 +0000 UTC m=+0.180138154 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-type=git, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1755695350, config_id=edpm, maintainer=Red Hat, Inc., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., managed_by=edpm_ansible, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, version=9.6, vendor=Red Hat, Inc., distribution-scope=public, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.33.7, io.openshift.expose-services=, build-date=2025-08-20T13:12:41, io.openshift.tags=minimal rhel9, url=https://catalog.redhat.com/en/search?searchType=containers, architecture=x86_64, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, name=ubi9-minimal, com.redhat.component=ubi9-minimal-container, container_name=openstack_network_exporter)
Oct 11 02:58:08 compute-0 sudo[486105]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:58:08 compute-0 sudo[486105]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:08 compute-0 sudo[486105]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:08 compute-0 ceph-mon[191930]: pgmap v2420: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:08 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3568366846' entity='client.admin' cmd=[{"prefix": "osd numa-status"}]: dispatch
Oct 11 02:58:08 compute-0 ceph-mon[191930]: from='client.15721 -' entity='client.admin' cmd=[{"prefix": "osd perf", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:58:08 compute-0 sudo[486153]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:58:08 compute-0 sudo[486153]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:08 compute-0 sudo[486153]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:08 compute-0 sudo[486181]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:58:08 compute-0 sudo[486181]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:08 compute-0 sudo[486181]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15723 -' entity='client.admin' cmd=[{"prefix": "osd pool autoscale-status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0009191400908380543 of space, bias 1.0, pg target 0.2757420272514163 quantized to 32 (current 32)
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:08 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:58:08 compute-0 sudo[486211]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:58:08 compute-0 sudo[486211]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool ls", "detail": "detail"} v 0) v1
Oct 11 02:58:09 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1649755828' entity='client.admin' cmd=[{"prefix": "osd pool ls", "detail": "detail"}]: dispatch
Oct 11 02:58:09 compute-0 podman[486324]: 2025-10-11 02:58:09.221507279 +0000 UTC m=+0.069701784 container create 1a7c7e076bceb9a2bdba41b5ce2f6a9031cd03a34a21e475978f7df5e98ebf67 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_fermi, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:58:09 compute-0 podman[486324]: 2025-10-11 02:58:09.195761605 +0000 UTC m=+0.043956140 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:58:09 compute-0 systemd[1]: Started libpod-conmon-1a7c7e076bceb9a2bdba41b5ce2f6a9031cd03a34a21e475978f7df5e98ebf67.scope.
Oct 11 02:58:09 compute-0 auditd[699]: Audit daemon rotating log files
Oct 11 02:58:09 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:58:09 compute-0 podman[486324]: 2025-10-11 02:58:09.377592388 +0000 UTC m=+0.225786913 container init 1a7c7e076bceb9a2bdba41b5ce2f6a9031cd03a34a21e475978f7df5e98ebf67 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_fermi, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef)
Oct 11 02:58:09 compute-0 podman[486324]: 2025-10-11 02:58:09.389165417 +0000 UTC m=+0.237359922 container start 1a7c7e076bceb9a2bdba41b5ce2f6a9031cd03a34a21e475978f7df5e98ebf67 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_fermi, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:58:09 compute-0 podman[486324]: 2025-10-11 02:58:09.395164557 +0000 UTC m=+0.243359052 container attach 1a7c7e076bceb9a2bdba41b5ce2f6a9031cd03a34a21e475978f7df5e98ebf67 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_fermi, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:58:09 compute-0 keen_fermi[486376]: 167 167
Oct 11 02:58:09 compute-0 systemd[1]: libpod-1a7c7e076bceb9a2bdba41b5ce2f6a9031cd03a34a21e475978f7df5e98ebf67.scope: Deactivated successfully.
Oct 11 02:58:09 compute-0 podman[486324]: 2025-10-11 02:58:09.402980636 +0000 UTC m=+0.251175131 container died 1a7c7e076bceb9a2bdba41b5ce2f6a9031cd03a34a21e475978f7df5e98ebf67 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_fermi, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:58:09 compute-0 systemd[1]: var-lib-containers-storage-overlay-47d734cd76e7545871c1cb00da373dfe83d335960e39091050a62fd3fd8ed091-merged.mount: Deactivated successfully.
Oct 11 02:58:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2421: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:09 compute-0 podman[486324]: 2025-10-11 02:58:09.478866102 +0000 UTC m=+0.327060597 container remove 1a7c7e076bceb9a2bdba41b5ce2f6a9031cd03a34a21e475978f7df5e98ebf67 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_fermi, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:58:09 compute-0 ceph-mon[191930]: from='client.15723 -' entity='client.admin' cmd=[{"prefix": "osd pool autoscale-status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:58:09 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1649755828' entity='client.admin' cmd=[{"prefix": "osd pool ls", "detail": "detail"}]: dispatch
Oct 11 02:58:09 compute-0 systemd[1]: libpod-conmon-1a7c7e076bceb9a2bdba41b5ce2f6a9031cd03a34a21e475978f7df5e98ebf67.scope: Deactivated successfully.
Oct 11 02:58:09 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd stat"} v 0) v1
Oct 11 02:58:09 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/33920501' entity='client.admin' cmd=[{"prefix": "osd stat"}]: dispatch
Oct 11 02:58:09 compute-0 podman[486437]: 2025-10-11 02:58:09.724442081 +0000 UTC m=+0.090310600 container create ce2403e5a4091f254fca29fc574e5034c44b3e207c3404b631c285c9ead424c3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_payne, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:58:09 compute-0 systemd[1]: Started libpod-conmon-ce2403e5a4091f254fca29fc574e5034c44b3e207c3404b631c285c9ead424c3.scope.
Oct 11 02:58:09 compute-0 podman[486437]: 2025-10-11 02:58:09.681793826 +0000 UTC m=+0.047662395 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:58:09 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:58:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b136bf53b9ede2c755cac7b976fd35e4039d66e4229315dfd1a3e08769cedc0b/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:58:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b136bf53b9ede2c755cac7b976fd35e4039d66e4229315dfd1a3e08769cedc0b/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:58:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b136bf53b9ede2c755cac7b976fd35e4039d66e4229315dfd1a3e08769cedc0b/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:58:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b136bf53b9ede2c755cac7b976fd35e4039d66e4229315dfd1a3e08769cedc0b/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:58:09 compute-0 podman[486437]: 2025-10-11 02:58:09.85497725 +0000 UTC m=+0.220845799 container init ce2403e5a4091f254fca29fc574e5034c44b3e207c3404b631c285c9ead424c3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_payne, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0)
Oct 11 02:58:09 compute-0 podman[486437]: 2025-10-11 02:58:09.865993189 +0000 UTC m=+0.231861728 container start ce2403e5a4091f254fca29fc574e5034c44b3e207c3404b631c285c9ead424c3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_payne, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:58:09 compute-0 podman[486437]: 2025-10-11 02:58:09.872714261 +0000 UTC m=+0.238582810 container attach ce2403e5a4091f254fca29fc574e5034c44b3e207c3404b631c285c9ead424c3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_payne, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507)
Oct 11 02:58:09 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15729 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:58:10 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15731 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:58:10 compute-0 ceph-mon[191930]: pgmap v2421: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:10 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/33920501' entity='client.admin' cmd=[{"prefix": "osd stat"}]: dispatch
Oct 11 02:58:10 compute-0 ceph-mon[191930]: from='client.15729 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:58:10 compute-0 magical_payne[486489]: {
Oct 11 02:58:10 compute-0 magical_payne[486489]:     "0": [
Oct 11 02:58:10 compute-0 magical_payne[486489]:         {
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "devices": [
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "/dev/loop3"
Oct 11 02:58:10 compute-0 magical_payne[486489]:             ],
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "lv_name": "ceph_lv0",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "lv_size": "21470642176",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "name": "ceph_lv0",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "tags": {
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.cluster_name": "ceph",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.crush_device_class": "",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.encrypted": "0",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.osd_id": "0",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.type": "block",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.vdo": "0"
Oct 11 02:58:10 compute-0 magical_payne[486489]:             },
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "type": "block",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "vg_name": "ceph_vg0"
Oct 11 02:58:10 compute-0 magical_payne[486489]:         }
Oct 11 02:58:10 compute-0 magical_payne[486489]:     ],
Oct 11 02:58:10 compute-0 magical_payne[486489]:     "1": [
Oct 11 02:58:10 compute-0 magical_payne[486489]:         {
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "devices": [
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "/dev/loop4"
Oct 11 02:58:10 compute-0 magical_payne[486489]:             ],
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "lv_name": "ceph_lv1",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "lv_size": "21470642176",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "name": "ceph_lv1",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "tags": {
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.cluster_name": "ceph",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.crush_device_class": "",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.encrypted": "0",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.osd_id": "1",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.type": "block",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.vdo": "0"
Oct 11 02:58:10 compute-0 magical_payne[486489]:             },
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "type": "block",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "vg_name": "ceph_vg1"
Oct 11 02:58:10 compute-0 magical_payne[486489]:         }
Oct 11 02:58:10 compute-0 magical_payne[486489]:     ],
Oct 11 02:58:10 compute-0 magical_payne[486489]:     "2": [
Oct 11 02:58:10 compute-0 magical_payne[486489]:         {
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "devices": [
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "/dev/loop5"
Oct 11 02:58:10 compute-0 magical_payne[486489]:             ],
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "lv_name": "ceph_lv2",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "lv_size": "21470642176",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "name": "ceph_lv2",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "tags": {
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.cluster_name": "ceph",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.crush_device_class": "",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.encrypted": "0",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.osd_id": "2",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.type": "block",
Oct 11 02:58:10 compute-0 magical_payne[486489]:                 "ceph.vdo": "0"
Oct 11 02:58:10 compute-0 magical_payne[486489]:             },
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "type": "block",
Oct 11 02:58:10 compute-0 magical_payne[486489]:             "vg_name": "ceph_vg2"
Oct 11 02:58:10 compute-0 magical_payne[486489]:         }
Oct 11 02:58:10 compute-0 magical_payne[486489]:     ]
Oct 11 02:58:10 compute-0 magical_payne[486489]: }
Oct 11 02:58:10 compute-0 systemd[1]: libpod-ce2403e5a4091f254fca29fc574e5034c44b3e207c3404b631c285c9ead424c3.scope: Deactivated successfully.
Oct 11 02:58:10 compute-0 podman[486437]: 2025-10-11 02:58:10.669715075 +0000 UTC m=+1.035583614 container died ce2403e5a4091f254fca29fc574e5034c44b3e207c3404b631c285c9ead424c3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_payne, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.license=GPLv2)
Oct 11 02:58:10 compute-0 systemd[1]: var-lib-containers-storage-overlay-b136bf53b9ede2c755cac7b976fd35e4039d66e4229315dfd1a3e08769cedc0b-merged.mount: Deactivated successfully.
Oct 11 02:58:10 compute-0 nova_compute[356901]: 2025-10-11 02:58:10.765 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:10 compute-0 podman[486437]: 2025-10-11 02:58:10.780657201 +0000 UTC m=+1.146525730 container remove ce2403e5a4091f254fca29fc574e5034c44b3e207c3404b631c285c9ead424c3 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_payne, org.label-schema.vendor=CentOS, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3)
Oct 11 02:58:10 compute-0 systemd[1]: libpod-conmon-ce2403e5a4091f254fca29fc574e5034c44b3e207c3404b631c285c9ead424c3.scope: Deactivated successfully.
Oct 11 02:58:10 compute-0 sudo[486211]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:10 compute-0 sudo[486620]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:58:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status"} v 0) v1
Oct 11 02:58:10 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/270017503' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch
Oct 11 02:58:10 compute-0 sudo[486620]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:10 compute-0 sudo[486620]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:11 compute-0 sudo[486658]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:58:11 compute-0 sudo[486658]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:11 compute-0 sudo[486658]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:11 compute-0 sudo[486703]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:58:11 compute-0 sudo[486703]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:11 compute-0 sudo[486703]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:11 compute-0 sudo[486761]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:58:11 compute-0 sudo[486761]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "time-sync-status"} v 0) v1
Oct 11 02:58:11 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2060527392' entity='client.admin' cmd=[{"prefix": "time-sync-status"}]: dispatch
Oct 11 02:58:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2422: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:11 compute-0 ceph-mon[191930]: from='client.15731 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 02:58:11 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/270017503' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch
Oct 11 02:58:11 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2060527392' entity='client.admin' cmd=[{"prefix": "time-sync-status"}]: dispatch
Oct 11 02:58:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config dump", "format": "json-pretty"} v 0) v1
Oct 11 02:58:11 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3130158615' entity='client.admin' cmd=[{"prefix": "config dump", "format": "json-pretty"}]: dispatch
Oct 11 02:58:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:58:11 compute-0 podman[486924]: 2025-10-11 02:58:11.831394798 +0000 UTC m=+0.075637118 container create e9a28755d63d54da555dccfcffed3939e75a7b04888197c1ec3398182d9258ac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_buck, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2)
Oct 11 02:58:11 compute-0 podman[486924]: 2025-10-11 02:58:11.802890837 +0000 UTC m=+0.047133197 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:58:11 compute-0 systemd[1]: Started libpod-conmon-e9a28755d63d54da555dccfcffed3939e75a7b04888197c1ec3398182d9258ac.scope.
Oct 11 02:58:11 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:58:11 compute-0 podman[486924]: 2025-10-11 02:58:11.966882743 +0000 UTC m=+0.211125083 container init e9a28755d63d54da555dccfcffed3939e75a7b04888197c1ec3398182d9258ac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_buck, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS)
Oct 11 02:58:11 compute-0 podman[486924]: 2025-10-11 02:58:11.984633214 +0000 UTC m=+0.228875554 container start e9a28755d63d54da555dccfcffed3939e75a7b04888197c1ec3398182d9258ac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_buck, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 02:58:11 compute-0 podman[486924]: 2025-10-11 02:58:11.989717408 +0000 UTC m=+0.233959738 container attach e9a28755d63d54da555dccfcffed3939e75a7b04888197c1ec3398182d9258ac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_buck, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3)
Oct 11 02:58:11 compute-0 heuristic_buck[486967]: 167 167
Oct 11 02:58:11 compute-0 systemd[1]: libpod-e9a28755d63d54da555dccfcffed3939e75a7b04888197c1ec3398182d9258ac.scope: Deactivated successfully.
Oct 11 02:58:11 compute-0 podman[486924]: 2025-10-11 02:58:11.995833446 +0000 UTC m=+0.240075766 container died e9a28755d63d54da555dccfcffed3939e75a7b04888197c1ec3398182d9258ac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_buck, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:58:12 compute-0 systemd[1]: var-lib-containers-storage-overlay-4d991477fcc86549fcd0e4575f2fdab256ed133dc3b4f627b2b524dcf1efa69e-merged.mount: Deactivated successfully.
Oct 11 02:58:12 compute-0 podman[486924]: 2025-10-11 02:58:12.05257788 +0000 UTC m=+0.296820200 container remove e9a28755d63d54da555dccfcffed3939e75a7b04888197c1ec3398182d9258ac (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_buck, io.buildah.version=1.39.3, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2)
Oct 11 02:58:12 compute-0 systemd[1]: libpod-conmon-e9a28755d63d54da555dccfcffed3939e75a7b04888197c1ec3398182d9258ac.scope: Deactivated successfully.
Oct 11 02:58:12 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15739 -' entity='client.admin' cmd=[{"prefix": "device ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:12 compute-0 podman[487058]: 2025-10-11 02:58:12.31421444 +0000 UTC m=+0.076316267 container create 55a41cb7a8d01d856c0205a945f6c3faa7e6ee31a1e1477c23e260d010cdb968 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_faraday, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:58:12 compute-0 podman[487058]: 2025-10-11 02:58:12.285368164 +0000 UTC m=+0.047469991 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:58:12 compute-0 systemd[1]: Started libpod-conmon-55a41cb7a8d01d856c0205a945f6c3faa7e6ee31a1e1477c23e260d010cdb968.scope.
Oct 11 02:58:12 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:58:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f3c80cd7c9416bdd89b0fec8387f135d9effae9b4727e0b20d51b51df38e16a4/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:58:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f3c80cd7c9416bdd89b0fec8387f135d9effae9b4727e0b20d51b51df38e16a4/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:58:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f3c80cd7c9416bdd89b0fec8387f135d9effae9b4727e0b20d51b51df38e16a4/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:58:12 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/f3c80cd7c9416bdd89b0fec8387f135d9effae9b4727e0b20d51b51df38e16a4/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:58:12 compute-0 podman[487058]: 2025-10-11 02:58:12.469209371 +0000 UTC m=+0.231311198 container init 55a41cb7a8d01d856c0205a945f6c3faa7e6ee31a1e1477c23e260d010cdb968 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_faraday, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 02:58:12 compute-0 podman[487058]: 2025-10-11 02:58:12.484208266 +0000 UTC m=+0.246310073 container start 55a41cb7a8d01d856c0205a945f6c3faa7e6ee31a1e1477c23e260d010cdb968 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_faraday, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:58:12 compute-0 podman[487058]: 2025-10-11 02:58:12.501088765 +0000 UTC m=+0.263190602 container attach 55a41cb7a8d01d856c0205a945f6c3faa7e6ee31a1e1477c23e260d010cdb968 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_faraday, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:58:12 compute-0 ceph-mon[191930]: pgmap v2422: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:12 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3130158615' entity='client.admin' cmd=[{"prefix": "config dump", "format": "json-pretty"}]: dispatch
Oct 11 02:58:12 compute-0 ceph-mon[191930]: from='client.15739 -' entity='client.admin' cmd=[{"prefix": "device ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "detail": "detail", "format": "json-pretty"} v 0) v1
Oct 11 02:58:12 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/4288756459' entity='client.admin' cmd=[{"prefix": "df", "detail": "detail", "format": "json-pretty"}]: dispatch
Oct 11 02:58:12 compute-0 nova_compute[356901]: 2025-10-11 02:58:12.832 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json-pretty"} v 0) v1
Oct 11 02:58:13 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3191334523' entity='client.admin' cmd=[{"prefix": "df", "format": "json-pretty"}]: dispatch
Oct 11 02:58:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2423: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:13 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/4288756459' entity='client.admin' cmd=[{"prefix": "df", "detail": "detail", "format": "json-pretty"}]: dispatch
Oct 11 02:58:13 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3191334523' entity='client.admin' cmd=[{"prefix": "df", "format": "json-pretty"}]: dispatch
Oct 11 02:58:13 compute-0 sharp_faraday[487102]: {
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:         "osd_id": 1,
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:         "type": "bluestore"
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:     },
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:         "osd_id": 2,
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:         "type": "bluestore"
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:     },
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:         "osd_id": 0,
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:         "type": "bluestore"
Oct 11 02:58:13 compute-0 sharp_faraday[487102]:     }
Oct 11 02:58:13 compute-0 sharp_faraday[487102]: }
Oct 11 02:58:13 compute-0 podman[487058]: 2025-10-11 02:58:13.614411015 +0000 UTC m=+1.376512822 container died 55a41cb7a8d01d856c0205a945f6c3faa7e6ee31a1e1477c23e260d010cdb968 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_faraday, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:58:13 compute-0 systemd[1]: libpod-55a41cb7a8d01d856c0205a945f6c3faa7e6ee31a1e1477c23e260d010cdb968.scope: Deactivated successfully.
Oct 11 02:58:13 compute-0 systemd[1]: libpod-55a41cb7a8d01d856c0205a945f6c3faa7e6ee31a1e1477c23e260d010cdb968.scope: Consumed 1.082s CPU time.
Oct 11 02:58:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "fs dump", "format": "json-pretty"} v 0) v1
Oct 11 02:58:13 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1524352026' entity='client.admin' cmd=[{"prefix": "fs dump", "format": "json-pretty"}]: dispatch
Oct 11 02:58:13 compute-0 systemd[1]: var-lib-containers-storage-overlay-f3c80cd7c9416bdd89b0fec8387f135d9effae9b4727e0b20d51b51df38e16a4-merged.mount: Deactivated successfully.
Oct 11 02:58:13 compute-0 podman[487058]: 2025-10-11 02:58:13.779742777 +0000 UTC m=+1.541844584 container remove 55a41cb7a8d01d856c0205a945f6c3faa7e6ee31a1e1477c23e260d010cdb968 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=sharp_faraday, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:58:13 compute-0 systemd[1]: libpod-conmon-55a41cb7a8d01d856c0205a945f6c3faa7e6ee31a1e1477c23e260d010cdb968.scope: Deactivated successfully.
Oct 11 02:58:13 compute-0 sudo[486761]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:58:13 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:58:13 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:58:13 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:58:13 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev a45ac36d-f7e8-47b3-b448-4de452bf32f5 does not exist
Oct 11 02:58:13 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 988fb35c-e16a-4325-924a-85bc2f546b76 does not exist
Oct 11 02:58:13 compute-0 sudo[487571]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:58:13 compute-0 sudo[487571]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:13 compute-0 sudo[487571]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:14 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "fs ls", "format": "json-pretty"} v 0) v1
Oct 11 02:58:14 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3051949057' entity='client.admin' cmd=[{"prefix": "fs ls", "format": "json-pretty"}]: dispatch
Oct 11 02:58:14 compute-0 sudo[487610]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:58:14 compute-0 sudo[487610]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:58:14 compute-0 sudo[487610]: pam_unix(sudo:session): session closed for user root
Oct 11 02:58:14 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15749 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:14 compute-0 ovs-appctl[487745]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Oct 11 02:58:14 compute-0 ovs-appctl[487749]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Oct 11 02:58:14 compute-0 ovs-appctl[487759]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Oct 11 02:58:14 compute-0 ceph-mon[191930]: pgmap v2423: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:14 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1524352026' entity='client.admin' cmd=[{"prefix": "fs dump", "format": "json-pretty"}]: dispatch
Oct 11 02:58:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:58:14 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:58:14 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3051949057' entity='client.admin' cmd=[{"prefix": "fs ls", "format": "json-pretty"}]: dispatch
Oct 11 02:58:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mds stat", "format": "json-pretty"} v 0) v1
Oct 11 02:58:15 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/883174776' entity='client.admin' cmd=[{"prefix": "mds stat", "format": "json-pretty"}]: dispatch
Oct 11 02:58:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2424: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json-pretty"} v 0) v1
Oct 11 02:58:15 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1379200778' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json-pretty"}]: dispatch
Oct 11 02:58:15 compute-0 nova_compute[356901]: 2025-10-11 02:58:15.772 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:15 compute-0 ceph-mon[191930]: from='client.15749 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:15 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/883174776' entity='client.admin' cmd=[{"prefix": "mds stat", "format": "json-pretty"}]: dispatch
Oct 11 02:58:15 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1379200778' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json-pretty"}]: dispatch
Oct 11 02:58:15 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15755 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd blocklist ls", "format": "json-pretty"} v 0) v1
Oct 11 02:58:16 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2281677987' entity='client.admin' cmd=[{"prefix": "osd blocklist ls", "format": "json-pretty"}]: dispatch
Oct 11 02:58:16 compute-0 nova_compute[356901]: 2025-10-11 02:58:16.753 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:58:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:58:16 compute-0 ceph-mon[191930]: pgmap v2424: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:16 compute-0 ceph-mon[191930]: from='client.15755 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:16 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2281677987' entity='client.admin' cmd=[{"prefix": "osd blocklist ls", "format": "json-pretty"}]: dispatch
Oct 11 02:58:16 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15759 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:17 compute-0 podman[488175]: 2025-10-11 02:58:17.267844958 +0000 UTC m=+0.149401711 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_id=edpm, io.buildah.version=1.29.0, distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, version=9.4, io.k8s.display-name=Red Hat Universal Base Image 9, name=ubi9, release-0.7.12=, io.openshift.expose-services=, release=1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2024-09-18T21:23:30, maintainer=Red Hat, Inc., vcs-type=git, io.openshift.tags=base rhel9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, architecture=x86_64, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, com.redhat.component=ubi9-container, container_name=kepler, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Oct 11 02:58:17 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15761 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2425: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:17 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd dump", "format": "json-pretty"} v 0) v1
Oct 11 02:58:17 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2158534784' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json-pretty"}]: dispatch
Oct 11 02:58:17 compute-0 nova_compute[356901]: 2025-10-11 02:58:17.838 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:17 compute-0 ceph-mon[191930]: from='client.15759 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:17 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2158534784' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json-pretty"}]: dispatch
Oct 11 02:58:17 compute-0 nova_compute[356901]: 2025-10-11 02:58:17.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:58:18 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd numa-status", "format": "json-pretty"} v 0) v1
Oct 11 02:58:18 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3487470610' entity='client.admin' cmd=[{"prefix": "osd numa-status", "format": "json-pretty"}]: dispatch
Oct 11 02:58:18 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15767 -' entity='client.admin' cmd=[{"prefix": "osd perf", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:18 compute-0 ceph-mon[191930]: from='client.15761 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:18 compute-0 ceph-mon[191930]: pgmap v2425: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:18 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3487470610' entity='client.admin' cmd=[{"prefix": "osd numa-status", "format": "json-pretty"}]: dispatch
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15769 -' entity='client.admin' cmd=[{"prefix": "osd pool autoscale-status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0009191400908380543 of space, bias 1.0, pg target 0.2757420272514163 quantized to 32 (current 32)
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:58:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2426: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool ls", "detail": "detail", "format": "json-pretty"} v 0) v1
Oct 11 02:58:19 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3556609822' entity='client.admin' cmd=[{"prefix": "osd pool ls", "detail": "detail", "format": "json-pretty"}]: dispatch
Oct 11 02:58:19 compute-0 ceph-mon[191930]: from='client.15767 -' entity='client.admin' cmd=[{"prefix": "osd perf", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:19 compute-0 ceph-mon[191930]: from='client.15769 -' entity='client.admin' cmd=[{"prefix": "osd pool autoscale-status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:19 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3556609822' entity='client.admin' cmd=[{"prefix": "osd pool ls", "detail": "detail", "format": "json-pretty"}]: dispatch
Oct 11 02:58:19 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd stat", "format": "json-pretty"} v 0) v1
Oct 11 02:58:19 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3557282388' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json-pretty"}]: dispatch
Oct 11 02:58:20 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15775 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:20 compute-0 nova_compute[356901]: 2025-10-11 02:58:20.774 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:20 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15777 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:20 compute-0 nova_compute[356901]: 2025-10-11 02:58:20.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:58:20 compute-0 ceph-mon[191930]: pgmap v2426: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:20 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3557282388' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json-pretty"}]: dispatch
Oct 11 02:58:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status", "format": "json-pretty"} v 0) v1
Oct 11 02:58:21 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1013493414' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 02:58:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2427: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:21 compute-0 podman[489222]: 2025-10-11 02:58:21.689994969 +0000 UTC m=+0.100424114 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, config_id=ovn_metadata_agent)
Oct 11 02:58:21 compute-0 podman[489212]: 2025-10-11 02:58:21.698623437 +0000 UTC m=+0.123923827 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 02:58:21 compute-0 podman[489219]: 2025-10-11 02:58:21.748902788 +0000 UTC m=+0.165720540 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, tcib_managed=true, config_id=ovn_controller, managed_by=edpm_ansible)
Oct 11 02:58:21 compute-0 podman[489220]: 2025-10-11 02:58:21.823643681 +0000 UTC m=+0.237162927 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 10 Base Image, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, container_name=ceilometer_agent_compute, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 02:58:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:58:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "time-sync-status", "format": "json-pretty"} v 0) v1
Oct 11 02:58:21 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/120738989' entity='client.admin' cmd=[{"prefix": "time-sync-status", "format": "json-pretty"}]: dispatch
Oct 11 02:58:22 compute-0 ceph-mon[191930]: from='client.15775 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:22 compute-0 ceph-mon[191930]: from='client.15777 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 02:58:22 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1013493414' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 02:58:22 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/120738989' entity='client.admin' cmd=[{"prefix": "time-sync-status", "format": "json-pretty"}]: dispatch
Oct 11 02:58:22 compute-0 nova_compute[356901]: 2025-10-11 02:58:22.840 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:23 compute-0 ceph-mon[191930]: pgmap v2427: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2428: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:24 compute-0 ceph-mon[191930]: pgmap v2428: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:24 compute-0 nova_compute[356901]: 2025-10-11 02:58:24.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:58:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2429: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:25 compute-0 nova_compute[356901]: 2025-10-11 02:58:25.778 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:25 compute-0 nova_compute[356901]: 2025-10-11 02:58:25.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:58:25 compute-0 nova_compute[356901]: 2025-10-11 02:58:25.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:58:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:58:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:58:26 compute-0 ceph-mon[191930]: pgmap v2429: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:58:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:58:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:58:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:58:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:58:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2430: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:58:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1264692340' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:58:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:58:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1264692340' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:58:27 compute-0 nova_compute[356901]: 2025-10-11 02:58:27.844 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:28 compute-0 ceph-mon[191930]: pgmap v2430: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1264692340' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:58:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1264692340' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:58:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2431: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:29 compute-0 podman[157119]: time="2025-10-11T02:58:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:58:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:58:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:58:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:58:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9104 "" "Go-http-client/1.1"
Oct 11 02:58:30 compute-0 podman[489811]: 2025-10-11 02:58:30.262839799 +0000 UTC m=+0.141272479 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 02:58:30 compute-0 podman[489810]: 2025-10-11 02:58:30.263639936 +0000 UTC m=+0.154486746 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, managed_by=edpm_ansible, tcib_managed=true, config_id=multipathd, container_name=multipathd, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']})
Oct 11 02:58:30 compute-0 nova_compute[356901]: 2025-10-11 02:58:30.780 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:30 compute-0 ceph-mon[191930]: pgmap v2431: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:30 compute-0 nova_compute[356901]: 2025-10-11 02:58:30.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:58:30 compute-0 nova_compute[356901]: 2025-10-11 02:58:30.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:58:30 compute-0 nova_compute[356901]: 2025-10-11 02:58:30.899 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:58:31 compute-0 openstack_network_exporter[374316]: ERROR   02:58:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:58:31 compute-0 openstack_network_exporter[374316]: ERROR   02:58:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:58:31 compute-0 openstack_network_exporter[374316]: ERROR   02:58:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:58:31 compute-0 openstack_network_exporter[374316]: ERROR   02:58:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:58:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:58:31 compute-0 openstack_network_exporter[374316]: ERROR   02:58:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:58:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:58:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2432: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:58:32 compute-0 nova_compute[356901]: 2025-10-11 02:58:32.015 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:58:32 compute-0 nova_compute[356901]: 2025-10-11 02:58:32.015 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:58:32 compute-0 nova_compute[356901]: 2025-10-11 02:58:32.015 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:58:32 compute-0 nova_compute[356901]: 2025-10-11 02:58:32.016 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:58:32 compute-0 nova_compute[356901]: 2025-10-11 02:58:32.848 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:32 compute-0 ceph-mon[191930]: pgmap v2432: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2433: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:33 compute-0 virtqemud[153560]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Oct 11 02:58:33 compute-0 nova_compute[356901]: 2025-10-11 02:58:33.817 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:58:33 compute-0 nova_compute[356901]: 2025-10-11 02:58:33.843 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:58:33 compute-0 nova_compute[356901]: 2025-10-11 02:58:33.843 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:58:33 compute-0 nova_compute[356901]: 2025-10-11 02:58:33.843 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:58:33 compute-0 nova_compute[356901]: 2025-10-11 02:58:33.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:58:33 compute-0 nova_compute[356901]: 2025-10-11 02:58:33.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:58:33 compute-0 nova_compute[356901]: 2025-10-11 02:58:33.926 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:58:33 compute-0 nova_compute[356901]: 2025-10-11 02:58:33.926 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:58:33 compute-0 nova_compute[356901]: 2025-10-11 02:58:33.926 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:58:33 compute-0 nova_compute[356901]: 2025-10-11 02:58:33.927 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:58:33 compute-0 nova_compute[356901]: 2025-10-11 02:58:33.927 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:58:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:58:34 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1966147378' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:58:34 compute-0 nova_compute[356901]: 2025-10-11 02:58:34.453 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.526s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:58:34 compute-0 nova_compute[356901]: 2025-10-11 02:58:34.545 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:58:34 compute-0 nova_compute[356901]: 2025-10-11 02:58:34.545 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:58:34 compute-0 nova_compute[356901]: 2025-10-11 02:58:34.545 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:58:34 compute-0 unix_chkpwd[490271]: password check failed for user (root)
Oct 11 02:58:34 compute-0 sshd-session[490184]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.33  user=root
Oct 11 02:58:34 compute-0 systemd[1]: systemd-hostnamed.service: Deactivated successfully.
Oct 11 02:58:34 compute-0 ceph-mon[191930]: pgmap v2433: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:34 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1966147378' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:58:34 compute-0 nova_compute[356901]: 2025-10-11 02:58:34.980 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:58:34 compute-0 nova_compute[356901]: 2025-10-11 02:58:34.981 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3463MB free_disk=59.955204010009766GB free_vcpus=7 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:58:34 compute-0 nova_compute[356901]: 2025-10-11 02:58:34.981 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:58:34 compute-0 nova_compute[356901]: 2025-10-11 02:58:34.982 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:58:35 compute-0 nova_compute[356901]: 2025-10-11 02:58:35.075 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:58:35 compute-0 nova_compute[356901]: 2025-10-11 02:58:35.076 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 1 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:58:35 compute-0 nova_compute[356901]: 2025-10-11 02:58:35.076 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1024MB phys_disk=59GB used_disk=2GB total_vcpus=8 used_vcpus=1 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:58:35 compute-0 nova_compute[356901]: 2025-10-11 02:58:35.124 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:58:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2434: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:58:35 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3394435827' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:58:35 compute-0 nova_compute[356901]: 2025-10-11 02:58:35.596 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.472s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:58:35 compute-0 nova_compute[356901]: 2025-10-11 02:58:35.607 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:58:35 compute-0 nova_compute[356901]: 2025-10-11 02:58:35.628 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:58:35 compute-0 nova_compute[356901]: 2025-10-11 02:58:35.630 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:58:35 compute-0 nova_compute[356901]: 2025-10-11 02:58:35.630 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.648s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:58:35 compute-0 nova_compute[356901]: 2025-10-11 02:58:35.783 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:35 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3394435827' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:58:36 compute-0 systemd[1]: Starting Time & Date Service...
Oct 11 02:58:36 compute-0 systemd[1]: Started Time & Date Service.
Oct 11 02:58:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:58:36 compute-0 systemd[1]: Starting Hostname Service...
Oct 11 02:58:36 compute-0 ceph-mon[191930]: pgmap v2434: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:37 compute-0 systemd[1]: Started Hostname Service.
Oct 11 02:58:37 compute-0 sshd-session[490184]: Failed password for root from 193.46.255.33 port 15840 ssh2
Oct 11 02:58:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2435: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:37 compute-0 nova_compute[356901]: 2025-10-11 02:58:37.852 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:38 compute-0 unix_chkpwd[490400]: password check failed for user (root)
Oct 11 02:58:38 compute-0 ceph-mon[191930]: pgmap v2435: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:39 compute-0 podman[490401]: 2025-10-11 02:58:39.240090145 +0000 UTC m=+0.120023608 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_ipmi)
Oct 11 02:58:39 compute-0 podman[490403]: 2025-10-11 02:58:39.243611457 +0000 UTC m=+0.110808887 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:58:39 compute-0 podman[490402]: 2025-10-11 02:58:39.262706425 +0000 UTC m=+0.150010055 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., name=ubi9-minimal, vcs-type=git, config_id=edpm, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, architecture=x86_64, com.redhat.component=ubi9-minimal-container, io.buildah.version=1.33.7, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., managed_by=edpm_ansible, container_name=openstack_network_exporter, release=1755695350, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vendor=Red Hat, Inc., version=9.6, io.openshift.expose-services=, io.openshift.tags=minimal rhel9, build-date=2025-08-20T13:12:41, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:58:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2436: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:39 compute-0 sshd-session[490184]: Failed password for root from 193.46.255.33 port 15840 ssh2
Oct 11 02:58:40 compute-0 nova_compute[356901]: 2025-10-11 02:58:40.785 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:40 compute-0 ceph-mon[191930]: pgmap v2436: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:41 compute-0 unix_chkpwd[490461]: password check failed for user (root)
Oct 11 02:58:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2437: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:58:42 compute-0 nova_compute[356901]: 2025-10-11 02:58:42.857 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:42 compute-0 ceph-mon[191930]: pgmap v2437: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:43 compute-0 sshd-session[490184]: Failed password for root from 193.46.255.33 port 15840 ssh2
Oct 11 02:58:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2438: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:44 compute-0 sshd-session[490184]: Received disconnect from 193.46.255.33 port 15840:11:  [preauth]
Oct 11 02:58:44 compute-0 sshd-session[490184]: Disconnected from authenticating user root 193.46.255.33 port 15840 [preauth]
Oct 11 02:58:44 compute-0 sshd-session[490184]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.33  user=root
Oct 11 02:58:45 compute-0 ceph-mon[191930]: pgmap v2438: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:45 compute-0 unix_chkpwd[490464]: password check failed for user (root)
Oct 11 02:58:45 compute-0 sshd-session[490462]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.33  user=root
Oct 11 02:58:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2439: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:45 compute-0 nova_compute[356901]: 2025-10-11 02:58:45.788 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:58:47 compute-0 ceph-mon[191930]: pgmap v2439: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:47 compute-0 podman[490465]: 2025-10-11 02:58:47.482219907 +0000 UTC m=+0.122009680 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=kepler, release=1214.1726694543, managed_by=edpm_ansible, name=ubi9, config_id=edpm, com.redhat.component=ubi9-container, release-0.7.12=, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-type=git, vendor=Red Hat, Inc., architecture=x86_64, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.expose-services=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, version=9.4, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=base rhel9, build-date=2024-09-18T21:23:30, maintainer=Red Hat, Inc., io.buildah.version=1.29.0, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 02:58:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2440: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:47 compute-0 sshd-session[490462]: Failed password for root from 193.46.255.33 port 25028 ssh2
Oct 11 02:58:47 compute-0 nova_compute[356901]: 2025-10-11 02:58:47.861 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:48 compute-0 unix_chkpwd[490485]: password check failed for user (root)
Oct 11 02:58:49 compute-0 ceph-mon[191930]: pgmap v2440: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2441: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:49 compute-0 sshd-session[490462]: Failed password for root from 193.46.255.33 port 25028 ssh2
Oct 11 02:58:50 compute-0 unix_chkpwd[490487]: password check failed for user (root)
Oct 11 02:58:50 compute-0 nova_compute[356901]: 2025-10-11 02:58:50.626 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:58:50 compute-0 nova_compute[356901]: 2025-10-11 02:58:50.791 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:51 compute-0 ceph-mon[191930]: pgmap v2441: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2442: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:58:52 compute-0 podman[490488]: 2025-10-11 02:58:52.136078375 +0000 UTC m=+0.130247800 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:58:52 compute-0 podman[490491]: 2025-10-11 02:58:52.136766634 +0000 UTC m=+0.119106382 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 02:58:52 compute-0 podman[490490]: 2025-10-11 02:58:52.15221314 +0000 UTC m=+0.124334126 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251007, config_id=edpm)
Oct 11 02:58:52 compute-0 podman[490489]: 2025-10-11 02:58:52.173961128 +0000 UTC m=+0.146556988 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, config_id=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 02:58:52 compute-0 sshd-session[490462]: Failed password for root from 193.46.255.33 port 25028 ssh2
Oct 11 02:58:52 compute-0 nova_compute[356901]: 2025-10-11 02:58:52.865 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:53 compute-0 ceph-mon[191930]: pgmap v2442: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:53 compute-0 sshd-session[490462]: Received disconnect from 193.46.255.33 port 25028:11:  [preauth]
Oct 11 02:58:53 compute-0 sshd-session[490462]: Disconnected from authenticating user root 193.46.255.33 port 25028 [preauth]
Oct 11 02:58:53 compute-0 sshd-session[490462]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.33  user=root
Oct 11 02:58:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2443: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:54 compute-0 unix_chkpwd[490570]: password check failed for user (root)
Oct 11 02:58:54 compute-0 sshd-session[490568]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.33  user=root
Oct 11 02:58:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:58:54.893 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:58:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:58:54.893 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:58:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:58:54.893 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:58:55 compute-0 ceph-mon[191930]: pgmap v2443: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2444: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:55 compute-0 nova_compute[356901]: 2025-10-11 02:58:55.794 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:56 compute-0 sshd-session[490568]: Failed password for root from 193.46.255.33 port 43258 ssh2
Oct 11 02:58:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:58:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:58:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:58:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:58:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:58:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:58:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:58:56
Oct 11 02:58:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:58:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:58:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['vms', '.rgw.root', 'cephfs.cephfs.meta', 'default.rgw.log', '.mgr', 'backups', 'images', 'cephfs.cephfs.data', 'default.rgw.control', 'default.rgw.meta', 'volumes']
Oct 11 02:58:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:58:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:58:57 compute-0 ceph-mon[191930]: pgmap v2444: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2445: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:57 compute-0 unix_chkpwd[490571]: password check failed for user (root)
Oct 11 02:58:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:58:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:58:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:58:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:58:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:58:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:58:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:58:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:58:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:58:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:58:57 compute-0 nova_compute[356901]: 2025-10-11 02:58:57.871 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:58:59 compute-0 ceph-mon[191930]: pgmap v2445: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2446: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:58:59 compute-0 podman[157119]: time="2025-10-11T02:58:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:58:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:58:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:58:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:58:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9108 "" "Go-http-client/1.1"
Oct 11 02:58:59 compute-0 sshd-session[490568]: Failed password for root from 193.46.255.33 port 43258 ssh2
Oct 11 02:59:00 compute-0 unix_chkpwd[490572]: password check failed for user (root)
Oct 11 02:59:00 compute-0 nova_compute[356901]: 2025-10-11 02:59:00.797 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:01 compute-0 podman[490574]: 2025-10-11 02:59:01.210909858 +0000 UTC m=+0.094112761 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, container_name=iscsid, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, config_id=iscsid, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']})
Oct 11 02:59:01 compute-0 podman[490573]: 2025-10-11 02:59:01.220947007 +0000 UTC m=+0.106231499 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, container_name=multipathd, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd)
Oct 11 02:59:01 compute-0 ceph-mon[191930]: pgmap v2446: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:01 compute-0 openstack_network_exporter[374316]: ERROR   02:59:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:59:01 compute-0 openstack_network_exporter[374316]: ERROR   02:59:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:59:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:59:01 compute-0 openstack_network_exporter[374316]: ERROR   02:59:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:59:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:59:01 compute-0 openstack_network_exporter[374316]: ERROR   02:59:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:59:01 compute-0 openstack_network_exporter[374316]: ERROR   02:59:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:59:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2447: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:59:01 compute-0 sshd-session[490568]: Failed password for root from 193.46.255.33 port 43258 ssh2
Oct 11 02:59:02 compute-0 sshd-session[490568]: Received disconnect from 193.46.255.33 port 43258:11:  [preauth]
Oct 11 02:59:02 compute-0 sshd-session[490568]: Disconnected from authenticating user root 193.46.255.33 port 43258 [preauth]
Oct 11 02:59:02 compute-0 sshd-session[490568]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.33  user=root
Oct 11 02:59:02 compute-0 nova_compute[356901]: 2025-10-11 02:59:02.878 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:03 compute-0 ceph-mon[191930]: pgmap v2447: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2448: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:04 compute-0 ceph-mon[191930]: pgmap v2448: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2449: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:05 compute-0 nova_compute[356901]: 2025-10-11 02:59:05.801 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:06 compute-0 ceph-mon[191930]: pgmap v2449: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:06 compute-0 systemd[1]: systemd-timedated.service: Deactivated successfully.
Oct 11 02:59:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:59:07 compute-0 systemd[1]: systemd-hostnamed.service: Deactivated successfully.
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0009191400908380543 of space, bias 1.0, pg target 0.2757420272514163 quantized to 32 (current 32)
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 02:59:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2450: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:07 compute-0 nova_compute[356901]: 2025-10-11 02:59:07.882 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:08 compute-0 ceph-mon[191930]: pgmap v2450: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2451: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:09 compute-0 podman[490616]: 2025-10-11 02:59:09.909661446 +0000 UTC m=+0.081333766 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 02:59:09 compute-0 podman[490615]: 2025-10-11 02:59:09.929626526 +0000 UTC m=+0.107416214 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., url=https://catalog.redhat.com/en/search?searchType=containers, build-date=2025-08-20T13:12:41, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.buildah.version=1.33.7, com.redhat.component=ubi9-minimal-container, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., version=9.6, vcs-type=git, managed_by=edpm_ansible, config_id=edpm, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., container_name=openstack_network_exporter, release=1755695350, name=ubi9-minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, architecture=x86_64, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, distribution-scope=public, io.openshift.expose-services=)
Oct 11 02:59:09 compute-0 podman[490614]: 2025-10-11 02:59:09.942893807 +0000 UTC m=+0.123254419 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=edpm, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true)
Oct 11 02:59:10 compute-0 ceph-mon[191930]: pgmap v2451: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:10 compute-0 nova_compute[356901]: 2025-10-11 02:59:10.806 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2452: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:59:12 compute-0 ceph-mon[191930]: pgmap v2452: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:12 compute-0 nova_compute[356901]: 2025-10-11 02:59:12.885 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2453: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.876 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.877 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.877 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.878 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.878 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.879 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.879 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.879 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.879 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.879 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4f710>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.886 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.887 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.887 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.887 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.887 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.888 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T02:59:13.887475) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.896 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2856 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.897 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.897 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.897 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.897 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.897 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.897 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.898 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 25 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.898 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.898 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.898 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.899 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.899 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.899 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.899 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.899 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.899 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.900 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.900 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.900 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.900 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.900 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.901 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.901 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.901 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.901 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.901 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.901 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.902 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T02:59:13.897935) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.902 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T02:59:13.899321) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.903 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T02:59:13.900493) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.903 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T02:59:13.901649) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.930 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.931 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.931 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.932 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.932 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.932 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.932 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.932 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.933 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.934 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T02:59:13.933019) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.991 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.992 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.993 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.993 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.993 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.993 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.993 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.994 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.994 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.994 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.994 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T02:59:13.994152) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.994 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.995 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.995 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.995 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.996 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.996 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.996 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.996 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.996 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.996 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.997 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T02:59:13.996397) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.997 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.997 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.997 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.998 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.998 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.998 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.998 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.998 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.998 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.999 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T02:59:13.998399) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:13.999 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.000 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.000 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.000 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.000 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.000 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.000 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.001 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.001 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.001 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T02:59:14.000817) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.001 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.002 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.002 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.002 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.002 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.002 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.002 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.003 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.003 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.003 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.003 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T02:59:14.002892) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.004 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.004 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.004 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.004 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.005 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.005 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.005 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T02:59:14.005080) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.035 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.036 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.036 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.036 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.037 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.037 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.037 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.037 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.037 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.038 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T02:59:14.037305) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.038 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.038 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.038 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.038 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.039 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.039 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.039 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.039 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.039 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T02:59:14.039319) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.040 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.040 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.040 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.040 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.040 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.040 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.040 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.041 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.041 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.041 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T02:59:14.040948) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.041 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.041 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.041 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.042 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.042 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.042 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 33 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.042 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T02:59:14.042123) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.042 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.043 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.043 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.043 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.043 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.043 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.043 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 70 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.044 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.044 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T02:59:14.043492) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.044 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.044 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.044 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.044 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.044 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.045 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T02:59:14.044788) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.045 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.045 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.045 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.045 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.045 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.045 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.046 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T02:59:14.045912) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.046 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.046 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.046 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.046 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.046 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.047 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.047 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.047 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.047 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T02:59:14.047171) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.047 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.048 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.048 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.048 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.048 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.048 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.048 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.049 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.049 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T02:59:14.049084) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.049 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.049 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.049 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.050 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.050 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.050 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.050 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.050 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 70360000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.050 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T02:59:14.050405) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.051 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.051 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.051 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.051 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.051 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.051 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.052 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2482 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.052 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.052 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T02:59:14.051919) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.053 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.053 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.053 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.053 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.053 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.053 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T02:59:14.053568) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.053 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.80859375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.054 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.054 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.054 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.055 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.055 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.055 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.055 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.055 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.055 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.056 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.056 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.056 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.056 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.056 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.056 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.056 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.056 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.057 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.057 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.057 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.057 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.057 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.057 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.057 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.057 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.058 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.058 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.058 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 02:59:14.058 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 02:59:14 compute-0 sudo[490677]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:59:14 compute-0 sudo[490677]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:59:14 compute-0 sudo[490677]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:14 compute-0 sudo[490702]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:59:14 compute-0 sudo[490702]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:59:14 compute-0 sudo[490702]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:14 compute-0 sudo[490727]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:59:14 compute-0 sudo[490727]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:59:14 compute-0 sudo[490727]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:14 compute-0 sudo[490752]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 02:59:14 compute-0 sudo[490752]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:59:14 compute-0 ceph-mon[191930]: pgmap v2453: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:15 compute-0 sudo[490752]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"} v 0) v1
Oct 11 02:59:15 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"}]: dispatch
Oct 11 02:59:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:59:15 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:59:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 02:59:15 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:59:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 02:59:15 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:59:15 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 12c9f0fb-3f2a-4b16-b4a0-a0392ddaaca2 does not exist
Oct 11 02:59:15 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 93ca66f7-fcba-4fac-bcf3-0f63d48e476b does not exist
Oct 11 02:59:15 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 0f0e4dfb-e1a6-498e-abf0-4bd2e5cdadca does not exist
Oct 11 02:59:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 02:59:15 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:59:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 02:59:15 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:59:15 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 02:59:15 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:59:15 compute-0 sudo[490807]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:59:15 compute-0 sudo[490807]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:59:15 compute-0 sudo[490807]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:15 compute-0 sudo[490832]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:59:15 compute-0 sudo[490832]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:59:15 compute-0 sudo[490832]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2454: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:15 compute-0 sudo[490857]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:59:15 compute-0 sudo[490857]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:59:15 compute-0 sudo[490857]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:15 compute-0 sudo[490882]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 02:59:15 compute-0 sudo[490882]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:59:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config rm", "who": "osd/host:compute-0", "name": "osd_memory_target"}]: dispatch
Oct 11 02:59:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:59:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 02:59:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:59:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 02:59:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 02:59:15 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 02:59:15 compute-0 nova_compute[356901]: 2025-10-11 02:59:15.808 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:16 compute-0 podman[490947]: 2025-10-11 02:59:16.202986274 +0000 UTC m=+0.085158233 container create 6f73912880a97b09620bf5dfe2d5ca105f19f5a5bfb9a2cca46219def952772d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_mclean, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:59:16 compute-0 podman[490947]: 2025-10-11 02:59:16.154836714 +0000 UTC m=+0.037008713 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:59:16 compute-0 systemd[1]: Started libpod-conmon-6f73912880a97b09620bf5dfe2d5ca105f19f5a5bfb9a2cca46219def952772d.scope.
Oct 11 02:59:16 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:59:16 compute-0 podman[490947]: 2025-10-11 02:59:16.33450952 +0000 UTC m=+0.216681499 container init 6f73912880a97b09620bf5dfe2d5ca105f19f5a5bfb9a2cca46219def952772d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_mclean, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 02:59:16 compute-0 podman[490947]: 2025-10-11 02:59:16.353835577 +0000 UTC m=+0.236007526 container start 6f73912880a97b09620bf5dfe2d5ca105f19f5a5bfb9a2cca46219def952772d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_mclean, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:59:16 compute-0 podman[490947]: 2025-10-11 02:59:16.359213479 +0000 UTC m=+0.241385428 container attach 6f73912880a97b09620bf5dfe2d5ca105f19f5a5bfb9a2cca46219def952772d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_mclean, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, ceph=True)
Oct 11 02:59:16 compute-0 elastic_mclean[490963]: 167 167
Oct 11 02:59:16 compute-0 podman[490947]: 2025-10-11 02:59:16.36934894 +0000 UTC m=+0.251520889 container died 6f73912880a97b09620bf5dfe2d5ca105f19f5a5bfb9a2cca46219def952772d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_mclean, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS)
Oct 11 02:59:16 compute-0 systemd[1]: libpod-6f73912880a97b09620bf5dfe2d5ca105f19f5a5bfb9a2cca46219def952772d.scope: Deactivated successfully.
Oct 11 02:59:16 compute-0 systemd[1]: var-lib-containers-storage-overlay-e258a085ab9490e5253e6dbab793e61f99c581219057d421ad4217cf531dbb10-merged.mount: Deactivated successfully.
Oct 11 02:59:16 compute-0 podman[490947]: 2025-10-11 02:59:16.472413206 +0000 UTC m=+0.354585175 container remove 6f73912880a97b09620bf5dfe2d5ca105f19f5a5bfb9a2cca46219def952772d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=elastic_mclean, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3)
Oct 11 02:59:16 compute-0 systemd[1]: libpod-conmon-6f73912880a97b09620bf5dfe2d5ca105f19f5a5bfb9a2cca46219def952772d.scope: Deactivated successfully.
Oct 11 02:59:16 compute-0 podman[490988]: 2025-10-11 02:59:16.699752764 +0000 UTC m=+0.080435215 container create e8244bfc5d51cde3ea1573f8ee2ff0cbdd344d5e970dc5aa4664fd334740aacc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_colden, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, OSD_FLAVOR=default, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507)
Oct 11 02:59:16 compute-0 podman[490988]: 2025-10-11 02:59:16.658516763 +0000 UTC m=+0.039199254 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:59:16 compute-0 systemd[1]: Started libpod-conmon-e8244bfc5d51cde3ea1573f8ee2ff0cbdd344d5e970dc5aa4664fd334740aacc.scope.
Oct 11 02:59:16 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:59:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bec8c3582c7c77c5bcdeb97965d8a944be3a4b6a5eb97731e592d2f56ec34ca3/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:59:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bec8c3582c7c77c5bcdeb97965d8a944be3a4b6a5eb97731e592d2f56ec34ca3/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:59:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bec8c3582c7c77c5bcdeb97965d8a944be3a4b6a5eb97731e592d2f56ec34ca3/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:59:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bec8c3582c7c77c5bcdeb97965d8a944be3a4b6a5eb97731e592d2f56ec34ca3/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:59:16 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/bec8c3582c7c77c5bcdeb97965d8a944be3a4b6a5eb97731e592d2f56ec34ca3/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 02:59:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:59:16 compute-0 nova_compute[356901]: 2025-10-11 02:59:16.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:59:16 compute-0 podman[490988]: 2025-10-11 02:59:16.918362124 +0000 UTC m=+0.299044595 container init e8244bfc5d51cde3ea1573f8ee2ff0cbdd344d5e970dc5aa4664fd334740aacc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_colden, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, OSD_FLAVOR=default)
Oct 11 02:59:16 compute-0 podman[490988]: 2025-10-11 02:59:16.931349113 +0000 UTC m=+0.312031574 container start e8244bfc5d51cde3ea1573f8ee2ff0cbdd344d5e970dc5aa4664fd334740aacc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_colden, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:59:16 compute-0 podman[490988]: 2025-10-11 02:59:16.940707364 +0000 UTC m=+0.321389835 container attach e8244bfc5d51cde3ea1573f8ee2ff0cbdd344d5e970dc5aa4664fd334740aacc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_colden, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 02:59:17 compute-0 ceph-mon[191930]: pgmap v2454: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2455: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:17 compute-0 nova_compute[356901]: 2025-10-11 02:59:17.891 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:17 compute-0 sudo[481742]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:17 compute-0 sshd-session[481741]: Received disconnect from 192.168.122.10 port 59922:11: disconnected by user
Oct 11 02:59:17 compute-0 sshd-session[481741]: Disconnected from user zuul 192.168.122.10 port 59922
Oct 11 02:59:17 compute-0 sshd-session[481738]: pam_unix(sshd:session): session closed for user zuul
Oct 11 02:59:17 compute-0 systemd[1]: session-65.scope: Deactivated successfully.
Oct 11 02:59:17 compute-0 systemd[1]: session-65.scope: Consumed 3min 29.590s CPU time, 967.4M memory peak, read 495.7M from disk, written 283.9M to disk.
Oct 11 02:59:17 compute-0 systemd-logind[804]: Session 65 logged out. Waiting for processes to exit.
Oct 11 02:59:17 compute-0 systemd-logind[804]: Removed session 65.
Oct 11 02:59:18 compute-0 podman[491023]: 2025-10-11 02:59:18.151752603 +0000 UTC m=+0.136033617 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, architecture=x86_64, container_name=kepler, io.k8s.display-name=Red Hat Universal Base Image 9, release=1214.1726694543, build-date=2024-09-18T21:23:30, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, name=ubi9, vcs-type=git, version=9.4, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, config_id=edpm, managed_by=edpm_ansible, com.redhat.component=ubi9-container, release-0.7.12=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, summary=Provides the latest release of Red Hat Universal Base Image 9., distribution-scope=public, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, maintainer=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.29.0, io.openshift.tags=base rhel9, vendor=Red Hat, Inc.)
Oct 11 02:59:18 compute-0 sshd-session[491034]: Accepted publickey for zuul from 192.168.122.10 port 56938 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 02:59:18 compute-0 systemd-logind[804]: New session 66 of user zuul.
Oct 11 02:59:18 compute-0 systemd[1]: Started Session 66 of User zuul.
Oct 11 02:59:18 compute-0 sshd-session[491034]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 02:59:18 compute-0 sudo[491053]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/cat /var/tmp/sos-osp/sosreport-compute-0-2025-10-11-berpmwy.tar.xz
Oct 11 02:59:18 compute-0 sudo[491053]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:59:18 compute-0 brave_colden[491004]: --> passed data devices: 0 physical, 3 LVM
Oct 11 02:59:18 compute-0 brave_colden[491004]: --> relative data size: 1.0
Oct 11 02:59:18 compute-0 brave_colden[491004]: --> All data devices are unavailable
Oct 11 02:59:18 compute-0 systemd[1]: libpod-e8244bfc5d51cde3ea1573f8ee2ff0cbdd344d5e970dc5aa4664fd334740aacc.scope: Deactivated successfully.
Oct 11 02:59:18 compute-0 podman[490988]: 2025-10-11 02:59:18.37054661 +0000 UTC m=+1.751229051 container died e8244bfc5d51cde3ea1573f8ee2ff0cbdd344d5e970dc5aa4664fd334740aacc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_colden, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3)
Oct 11 02:59:18 compute-0 systemd[1]: libpod-e8244bfc5d51cde3ea1573f8ee2ff0cbdd344d5e970dc5aa4664fd334740aacc.scope: Consumed 1.238s CPU time.
Oct 11 02:59:18 compute-0 ceph-mon[191930]: pgmap v2455: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:18 compute-0 systemd[1]: var-lib-containers-storage-overlay-bec8c3582c7c77c5bcdeb97965d8a944be3a4b6a5eb97731e592d2f56ec34ca3-merged.mount: Deactivated successfully.
Oct 11 02:59:18 compute-0 sudo[491053]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:18 compute-0 sshd-session[491052]: Received disconnect from 192.168.122.10 port 56938:11: disconnected by user
Oct 11 02:59:18 compute-0 sshd-session[491052]: Disconnected from user zuul 192.168.122.10 port 56938
Oct 11 02:59:18 compute-0 sshd-session[491034]: pam_unix(sshd:session): session closed for user zuul
Oct 11 02:59:18 compute-0 systemd[1]: session-66.scope: Deactivated successfully.
Oct 11 02:59:18 compute-0 systemd-logind[804]: Session 66 logged out. Waiting for processes to exit.
Oct 11 02:59:18 compute-0 systemd-logind[804]: Removed session 66.
Oct 11 02:59:18 compute-0 podman[490988]: 2025-10-11 02:59:18.532064423 +0000 UTC m=+1.912746884 container remove e8244bfc5d51cde3ea1573f8ee2ff0cbdd344d5e970dc5aa4664fd334740aacc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_colden, ceph=True, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 02:59:18 compute-0 systemd[1]: libpod-conmon-e8244bfc5d51cde3ea1573f8ee2ff0cbdd344d5e970dc5aa4664fd334740aacc.scope: Deactivated successfully.
Oct 11 02:59:18 compute-0 sudo[490882]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:18 compute-0 sshd-session[491093]: Accepted publickey for zuul from 192.168.122.10 port 56946 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 02:59:18 compute-0 systemd-logind[804]: New session 67 of user zuul.
Oct 11 02:59:18 compute-0 systemd[1]: Started Session 67 of User zuul.
Oct 11 02:59:18 compute-0 sshd-session[491093]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 02:59:18 compute-0 sudo[491097]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:59:18 compute-0 sudo[491097]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:59:18 compute-0 sudo[491097]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:18 compute-0 sudo[491124]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/rm -rf /var/tmp/sos-osp
Oct 11 02:59:18 compute-0 sudo[491124]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:59:18 compute-0 sudo[491130]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:59:18 compute-0 sudo[491124]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:18 compute-0 sudo[491130]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:59:18 compute-0 sshd-session[491119]: Received disconnect from 192.168.122.10 port 56946:11: disconnected by user
Oct 11 02:59:18 compute-0 sshd-session[491119]: Disconnected from user zuul 192.168.122.10 port 56946
Oct 11 02:59:18 compute-0 sudo[491130]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:18 compute-0 sshd-session[491093]: pam_unix(sshd:session): session closed for user zuul
Oct 11 02:59:18 compute-0 systemd[1]: session-67.scope: Deactivated successfully.
Oct 11 02:59:18 compute-0 systemd-logind[804]: Session 67 logged out. Waiting for processes to exit.
Oct 11 02:59:18 compute-0 systemd-logind[804]: Removed session 67.
Oct 11 02:59:18 compute-0 sudo[491174]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:59:18 compute-0 sudo[491174]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:59:18 compute-0 sudo[491174]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:18 compute-0 sudo[491199]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 02:59:18 compute-0 sudo[491199]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:59:19 compute-0 podman[491266]: 2025-10-11 02:59:19.48540465 +0000 UTC m=+0.117257810 container create bfcbdbda462d94e08d3d340161e229892827ba5d0175c06eb237cb4161615c58 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_allen, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:59:19 compute-0 podman[491266]: 2025-10-11 02:59:19.405315728 +0000 UTC m=+0.037168908 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:59:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2456: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:19 compute-0 systemd[1]: Started libpod-conmon-bfcbdbda462d94e08d3d340161e229892827ba5d0175c06eb237cb4161615c58.scope.
Oct 11 02:59:19 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:59:19 compute-0 podman[491266]: 2025-10-11 02:59:19.667496467 +0000 UTC m=+0.299349667 container init bfcbdbda462d94e08d3d340161e229892827ba5d0175c06eb237cb4161615c58 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_allen, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:59:19 compute-0 podman[491266]: 2025-10-11 02:59:19.679041891 +0000 UTC m=+0.310895091 container start bfcbdbda462d94e08d3d340161e229892827ba5d0175c06eb237cb4161615c58 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_allen, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:59:19 compute-0 podman[491266]: 2025-10-11 02:59:19.685302777 +0000 UTC m=+0.317155977 container attach bfcbdbda462d94e08d3d340161e229892827ba5d0175c06eb237cb4161615c58 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_allen, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:59:19 compute-0 keen_allen[491283]: 167 167
Oct 11 02:59:19 compute-0 systemd[1]: libpod-bfcbdbda462d94e08d3d340161e229892827ba5d0175c06eb237cb4161615c58.scope: Deactivated successfully.
Oct 11 02:59:19 compute-0 podman[491266]: 2025-10-11 02:59:19.689881599 +0000 UTC m=+0.321734759 container died bfcbdbda462d94e08d3d340161e229892827ba5d0175c06eb237cb4161615c58 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_allen, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:59:19 compute-0 systemd[1]: var-lib-containers-storage-overlay-52eb581d212975753753ca29820d3f97790cd5cd17f6f5d0c7e6385b59a9c9a0-merged.mount: Deactivated successfully.
Oct 11 02:59:19 compute-0 podman[491266]: 2025-10-11 02:59:19.749960388 +0000 UTC m=+0.381813548 container remove bfcbdbda462d94e08d3d340161e229892827ba5d0175c06eb237cb4161615c58 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=keen_allen, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef)
Oct 11 02:59:19 compute-0 systemd[1]: libpod-conmon-bfcbdbda462d94e08d3d340161e229892827ba5d0175c06eb237cb4161615c58.scope: Deactivated successfully.
Oct 11 02:59:19 compute-0 nova_compute[356901]: 2025-10-11 02:59:19.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:59:20 compute-0 podman[491306]: 2025-10-11 02:59:20.017841751 +0000 UTC m=+0.080495718 container create 55d39c2ea69028bad6bedbf34ead7968bef6b15d81b0870bbfa030f1f301be1c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_lederberg, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef)
Oct 11 02:59:20 compute-0 podman[491306]: 2025-10-11 02:59:19.991010752 +0000 UTC m=+0.053664769 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:59:20 compute-0 systemd[1]: Started libpod-conmon-55d39c2ea69028bad6bedbf34ead7968bef6b15d81b0870bbfa030f1f301be1c.scope.
Oct 11 02:59:20 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:59:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/44b7796dfd3c1722f221c1f9e4ddf87bcf7f03de567533d96ac56f7dc88ee78b/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:59:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/44b7796dfd3c1722f221c1f9e4ddf87bcf7f03de567533d96ac56f7dc88ee78b/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:59:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/44b7796dfd3c1722f221c1f9e4ddf87bcf7f03de567533d96ac56f7dc88ee78b/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:59:20 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/44b7796dfd3c1722f221c1f9e4ddf87bcf7f03de567533d96ac56f7dc88ee78b/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:59:20 compute-0 podman[491306]: 2025-10-11 02:59:20.165578086 +0000 UTC m=+0.228232153 container init 55d39c2ea69028bad6bedbf34ead7968bef6b15d81b0870bbfa030f1f301be1c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_lederberg, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 02:59:20 compute-0 podman[491306]: 2025-10-11 02:59:20.184179896 +0000 UTC m=+0.246833863 container start 55d39c2ea69028bad6bedbf34ead7968bef6b15d81b0870bbfa030f1f301be1c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_lederberg, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS)
Oct 11 02:59:20 compute-0 podman[491306]: 2025-10-11 02:59:20.189715884 +0000 UTC m=+0.252369871 container attach 55d39c2ea69028bad6bedbf34ead7968bef6b15d81b0870bbfa030f1f301be1c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_lederberg, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:59:20 compute-0 ceph-mon[191930]: pgmap v2456: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:20 compute-0 nova_compute[356901]: 2025-10-11 02:59:20.810 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]: {
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:     "0": [
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:         {
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "devices": [
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "/dev/loop3"
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             ],
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "lv_name": "ceph_lv0",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "lv_size": "21470642176",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "name": "ceph_lv0",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "tags": {
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.cluster_name": "ceph",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.crush_device_class": "",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.encrypted": "0",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.osd_id": "0",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.type": "block",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.vdo": "0"
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             },
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "type": "block",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "vg_name": "ceph_vg0"
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:         }
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:     ],
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:     "1": [
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:         {
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "devices": [
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "/dev/loop4"
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             ],
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "lv_name": "ceph_lv1",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "lv_size": "21470642176",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "name": "ceph_lv1",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "tags": {
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.cluster_name": "ceph",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.crush_device_class": "",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.encrypted": "0",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.osd_id": "1",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.type": "block",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.vdo": "0"
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             },
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "type": "block",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "vg_name": "ceph_vg1"
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:         }
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:     ],
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:     "2": [
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:         {
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "devices": [
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "/dev/loop5"
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             ],
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "lv_name": "ceph_lv2",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "lv_size": "21470642176",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "name": "ceph_lv2",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "tags": {
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.cluster_name": "ceph",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.crush_device_class": "",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.encrypted": "0",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.osd_id": "2",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.type": "block",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:                 "ceph.vdo": "0"
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             },
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "type": "block",
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:             "vg_name": "ceph_vg2"
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:         }
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]:     ]
Oct 11 02:59:21 compute-0 dazzling_lederberg[491322]: }
Oct 11 02:59:21 compute-0 systemd[1]: libpod-55d39c2ea69028bad6bedbf34ead7968bef6b15d81b0870bbfa030f1f301be1c.scope: Deactivated successfully.
Oct 11 02:59:21 compute-0 podman[491306]: 2025-10-11 02:59:21.123515477 +0000 UTC m=+1.186169484 container died 55d39c2ea69028bad6bedbf34ead7968bef6b15d81b0870bbfa030f1f301be1c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_lederberg, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS)
Oct 11 02:59:21 compute-0 systemd[1]: var-lib-containers-storage-overlay-44b7796dfd3c1722f221c1f9e4ddf87bcf7f03de567533d96ac56f7dc88ee78b-merged.mount: Deactivated successfully.
Oct 11 02:59:21 compute-0 podman[491306]: 2025-10-11 02:59:21.208737341 +0000 UTC m=+1.271391308 container remove 55d39c2ea69028bad6bedbf34ead7968bef6b15d81b0870bbfa030f1f301be1c (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_lederberg, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Oct 11 02:59:21 compute-0 systemd[1]: libpod-conmon-55d39c2ea69028bad6bedbf34ead7968bef6b15d81b0870bbfa030f1f301be1c.scope: Deactivated successfully.
Oct 11 02:59:21 compute-0 sudo[491199]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:21 compute-0 sudo[491343]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:59:21 compute-0 sudo[491343]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:59:21 compute-0 sudo[491343]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:21 compute-0 sudo[491368]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 02:59:21 compute-0 sudo[491368]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:59:21 compute-0 sudo[491368]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2457: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:21 compute-0 sudo[491393]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:59:21 compute-0 sudo[491393]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:59:21 compute-0 sudo[491393]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:21 compute-0 sudo[491418]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 02:59:21 compute-0 sudo[491418]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:59:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:59:21 compute-0 nova_compute[356901]: 2025-10-11 02:59:21.898 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:59:22 compute-0 podman[491481]: 2025-10-11 02:59:22.207890312 +0000 UTC m=+0.039222306 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:59:22 compute-0 podman[491481]: 2025-10-11 02:59:22.32698351 +0000 UTC m=+0.158315524 container create 8845466459dde76e7668fa9382be753e8f8651cf92e36c65f143cb030f062c14 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_jang, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 02:59:22 compute-0 systemd[1]: Started libpod-conmon-8845466459dde76e7668fa9382be753e8f8651cf92e36c65f143cb030f062c14.scope.
Oct 11 02:59:22 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:59:22 compute-0 podman[491481]: 2025-10-11 02:59:22.468409188 +0000 UTC m=+0.299741212 container init 8845466459dde76e7668fa9382be753e8f8651cf92e36c65f143cb030f062c14 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_jang, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:59:22 compute-0 podman[491481]: 2025-10-11 02:59:22.486198057 +0000 UTC m=+0.317530031 container start 8845466459dde76e7668fa9382be753e8f8651cf92e36c65f143cb030f062c14 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_jang, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:59:22 compute-0 zealous_jang[491506]: 167 167
Oct 11 02:59:22 compute-0 systemd[1]: libpod-8845466459dde76e7668fa9382be753e8f8651cf92e36c65f143cb030f062c14.scope: Deactivated successfully.
Oct 11 02:59:22 compute-0 podman[491481]: 2025-10-11 02:59:22.509452142 +0000 UTC m=+0.340784156 container attach 8845466459dde76e7668fa9382be753e8f8651cf92e36c65f143cb030f062c14 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_jang, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2)
Oct 11 02:59:22 compute-0 podman[491481]: 2025-10-11 02:59:22.509984182 +0000 UTC m=+0.341316166 container died 8845466459dde76e7668fa9382be753e8f8651cf92e36c65f143cb030f062c14 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_jang, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 02:59:22 compute-0 podman[491495]: 2025-10-11 02:59:22.535500811 +0000 UTC m=+0.149823025 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 02:59:22 compute-0 podman[491503]: 2025-10-11 02:59:22.544163597 +0000 UTC m=+0.144493395 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, config_id=ovn_metadata_agent)
Oct 11 02:59:22 compute-0 systemd[1]: var-lib-containers-storage-overlay-db7344665cab672a9ade19fd44cf34f8a0a31c45bab8038d3b764eb36efbea6a-merged.mount: Deactivated successfully.
Oct 11 02:59:22 compute-0 ceph-mon[191930]: pgmap v2457: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:22 compute-0 podman[491481]: 2025-10-11 02:59:22.615902024 +0000 UTC m=+0.447233998 container remove 8845466459dde76e7668fa9382be753e8f8651cf92e36c65f143cb030f062c14 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=zealous_jang, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0)
Oct 11 02:59:22 compute-0 podman[491498]: 2025-10-11 02:59:22.621724823 +0000 UTC m=+0.219723833 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_managed=true, container_name=ovn_controller, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, org.label-schema.build-date=20251009)
Oct 11 02:59:22 compute-0 podman[491499]: 2025-10-11 02:59:22.627617955 +0000 UTC m=+0.227059049 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_compute, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']})
Oct 11 02:59:22 compute-0 systemd[1]: libpod-conmon-8845466459dde76e7668fa9382be753e8f8651cf92e36c65f143cb030f062c14.scope: Deactivated successfully.
Oct 11 02:59:22 compute-0 podman[491603]: 2025-10-11 02:59:22.867211333 +0000 UTC m=+0.107038086 container create f36e807170e5779d1c95038a667ffc35b6e65dd52f9ea9bda178e21c74453caa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_kowalevski, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, ceph=True)
Oct 11 02:59:22 compute-0 podman[491603]: 2025-10-11 02:59:22.801990251 +0000 UTC m=+0.041817084 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 02:59:22 compute-0 nova_compute[356901]: 2025-10-11 02:59:22.895 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:22 compute-0 systemd[1]: Started libpod-conmon-f36e807170e5779d1c95038a667ffc35b6e65dd52f9ea9bda178e21c74453caa.scope.
Oct 11 02:59:22 compute-0 systemd[1]: Started libcrun container.
Oct 11 02:59:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3b08dff7010cc32c82ea1ccbc62a083a08e8c67dab074cda31ce6326994c0534/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 02:59:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3b08dff7010cc32c82ea1ccbc62a083a08e8c67dab074cda31ce6326994c0534/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 02:59:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3b08dff7010cc32c82ea1ccbc62a083a08e8c67dab074cda31ce6326994c0534/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 02:59:22 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/3b08dff7010cc32c82ea1ccbc62a083a08e8c67dab074cda31ce6326994c0534/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 02:59:23 compute-0 podman[491603]: 2025-10-11 02:59:23.050138442 +0000 UTC m=+0.289965225 container init f36e807170e5779d1c95038a667ffc35b6e65dd52f9ea9bda178e21c74453caa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_kowalevski, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 02:59:23 compute-0 podman[491603]: 2025-10-11 02:59:23.068782083 +0000 UTC m=+0.308608836 container start f36e807170e5779d1c95038a667ffc35b6e65dd52f9ea9bda178e21c74453caa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_kowalevski, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default)
Oct 11 02:59:23 compute-0 podman[491603]: 2025-10-11 02:59:23.129809227 +0000 UTC m=+0.369636290 container attach f36e807170e5779d1c95038a667ffc35b6e65dd52f9ea9bda178e21c74453caa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_kowalevski, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 02:59:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2458: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]: {
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:         "osd_id": 1,
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:         "type": "bluestore"
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:     },
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:         "osd_id": 2,
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:         "type": "bluestore"
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:     },
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:         "osd_id": 0,
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:         "type": "bluestore"
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]:     }
Oct 11 02:59:24 compute-0 amazing_kowalevski[491619]: }
Oct 11 02:59:24 compute-0 systemd[1]: libpod-f36e807170e5779d1c95038a667ffc35b6e65dd52f9ea9bda178e21c74453caa.scope: Deactivated successfully.
Oct 11 02:59:24 compute-0 systemd[1]: libpod-f36e807170e5779d1c95038a667ffc35b6e65dd52f9ea9bda178e21c74453caa.scope: Consumed 1.088s CPU time.
Oct 11 02:59:24 compute-0 podman[491652]: 2025-10-11 02:59:24.227828246 +0000 UTC m=+0.045897397 container died f36e807170e5779d1c95038a667ffc35b6e65dd52f9ea9bda178e21c74453caa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_kowalevski, org.label-schema.build-date=20250507, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 02:59:24 compute-0 systemd[1]: var-lib-containers-storage-overlay-3b08dff7010cc32c82ea1ccbc62a083a08e8c67dab074cda31ce6326994c0534-merged.mount: Deactivated successfully.
Oct 11 02:59:24 compute-0 podman[491652]: 2025-10-11 02:59:24.298752272 +0000 UTC m=+0.116821403 container remove f36e807170e5779d1c95038a667ffc35b6e65dd52f9ea9bda178e21c74453caa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_kowalevski, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 02:59:24 compute-0 systemd[1]: libpod-conmon-f36e807170e5779d1c95038a667ffc35b6e65dd52f9ea9bda178e21c74453caa.scope: Deactivated successfully.
Oct 11 02:59:24 compute-0 sudo[491418]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 02:59:24 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:59:24 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 02:59:24 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:59:24 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 0d9173c9-2ff7-425d-a71c-9073fcec9401 does not exist
Oct 11 02:59:24 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 5866105e-8c64-4033-9ea6-83b5cf55afab does not exist
Oct 11 02:59:24 compute-0 sudo[491666]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 02:59:24 compute-0 sudo[491666]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:59:24 compute-0 sudo[491666]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:24 compute-0 sudo[491691]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 02:59:24 compute-0 sudo[491691]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 02:59:24 compute-0 sudo[491691]: pam_unix(sudo:session): session closed for user root
Oct 11 02:59:24 compute-0 ceph-mon[191930]: pgmap v2458: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:59:24 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 02:59:24 compute-0 nova_compute[356901]: 2025-10-11 02:59:24.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:59:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2459: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:25 compute-0 nova_compute[356901]: 2025-10-11 02:59:25.813 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:59:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:59:26 compute-0 ceph-mon[191930]: pgmap v2459: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:59:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:59:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:59:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:59:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:59:26 compute-0 nova_compute[356901]: 2025-10-11 02:59:26.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:59:26 compute-0 nova_compute[356901]: 2025-10-11 02:59:26.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 02:59:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2460: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 02:59:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/34344169' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:59:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 02:59:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/34344169' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:59:27 compute-0 nova_compute[356901]: 2025-10-11 02:59:27.899 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:28 compute-0 ceph-mon[191930]: pgmap v2460: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/34344169' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 02:59:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/34344169' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 02:59:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2461: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:29 compute-0 podman[157119]: time="2025-10-11T02:59:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:59:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:59:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:59:29 compute-0 podman[157119]: @ - - [11/Oct/2025:02:59:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9107 "" "Go-http-client/1.1"
Oct 11 02:59:30 compute-0 ceph-mon[191930]: pgmap v2461: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:30 compute-0 nova_compute[356901]: 2025-10-11 02:59:30.816 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:31 compute-0 openstack_network_exporter[374316]: ERROR   02:59:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 02:59:31 compute-0 openstack_network_exporter[374316]: ERROR   02:59:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:59:31 compute-0 openstack_network_exporter[374316]: ERROR   02:59:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 02:59:31 compute-0 openstack_network_exporter[374316]: ERROR   02:59:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 02:59:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:59:31 compute-0 openstack_network_exporter[374316]: ERROR   02:59:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 02:59:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 02:59:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2462: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:59:32 compute-0 podman[491717]: 2025-10-11 02:59:32.214560465 +0000 UTC m=+0.097910093 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=iscsid, container_name=iscsid, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2)
Oct 11 02:59:32 compute-0 podman[491716]: 2025-10-11 02:59:32.219376976 +0000 UTC m=+0.106663732 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, tcib_managed=true, config_id=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, org.label-schema.build-date=20251009, io.buildah.version=1.41.3)
Oct 11 02:59:32 compute-0 ceph-mon[191930]: pgmap v2462: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:32 compute-0 nova_compute[356901]: 2025-10-11 02:59:32.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:59:32 compute-0 nova_compute[356901]: 2025-10-11 02:59:32.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 02:59:32 compute-0 nova_compute[356901]: 2025-10-11 02:59:32.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 02:59:32 compute-0 nova_compute[356901]: 2025-10-11 02:59:32.903 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:33 compute-0 nova_compute[356901]: 2025-10-11 02:59:33.303 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 02:59:33 compute-0 nova_compute[356901]: 2025-10-11 02:59:33.303 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 02:59:33 compute-0 nova_compute[356901]: 2025-10-11 02:59:33.304 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 02:59:33 compute-0 nova_compute[356901]: 2025-10-11 02:59:33.304 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 02:59:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2463: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:34 compute-0 ceph-mon[191930]: pgmap v2463: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2464: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:35 compute-0 nova_compute[356901]: 2025-10-11 02:59:35.821 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:36 compute-0 ceph-mon[191930]: pgmap v2464: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:59:37 compute-0 nova_compute[356901]: 2025-10-11 02:59:37.091 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 02:59:37 compute-0 nova_compute[356901]: 2025-10-11 02:59:37.110 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 02:59:37 compute-0 nova_compute[356901]: 2025-10-11 02:59:37.111 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 02:59:37 compute-0 nova_compute[356901]: 2025-10-11 02:59:37.112 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:59:37 compute-0 nova_compute[356901]: 2025-10-11 02:59:37.113 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:59:37 compute-0 nova_compute[356901]: 2025-10-11 02:59:37.148 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:59:37 compute-0 nova_compute[356901]: 2025-10-11 02:59:37.149 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:59:37 compute-0 nova_compute[356901]: 2025-10-11 02:59:37.150 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:59:37 compute-0 nova_compute[356901]: 2025-10-11 02:59:37.150 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 02:59:37 compute-0 nova_compute[356901]: 2025-10-11 02:59:37.150 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:59:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2465: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:37 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:59:37 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2031912173' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:59:37 compute-0 nova_compute[356901]: 2025-10-11 02:59:37.647 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.496s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:59:37 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2031912173' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:59:37 compute-0 nova_compute[356901]: 2025-10-11 02:59:37.726 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:59:37 compute-0 nova_compute[356901]: 2025-10-11 02:59:37.727 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:59:37 compute-0 nova_compute[356901]: 2025-10-11 02:59:37.728 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 02:59:37 compute-0 nova_compute[356901]: 2025-10-11 02:59:37.907 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:38 compute-0 nova_compute[356901]: 2025-10-11 02:59:38.061 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 02:59:38 compute-0 nova_compute[356901]: 2025-10-11 02:59:38.062 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3657MB free_disk=59.955204010009766GB free_vcpus=7 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 02:59:38 compute-0 nova_compute[356901]: 2025-10-11 02:59:38.062 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:59:38 compute-0 nova_compute[356901]: 2025-10-11 02:59:38.063 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:59:38 compute-0 nova_compute[356901]: 2025-10-11 02:59:38.150 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 02:59:38 compute-0 nova_compute[356901]: 2025-10-11 02:59:38.150 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 1 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 02:59:38 compute-0 nova_compute[356901]: 2025-10-11 02:59:38.151 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1024MB phys_disk=59GB used_disk=2GB total_vcpus=8 used_vcpus=1 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 02:59:38 compute-0 nova_compute[356901]: 2025-10-11 02:59:38.167 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing inventories for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:804
Oct 11 02:59:38 compute-0 nova_compute[356901]: 2025-10-11 02:59:38.189 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating ProviderTree inventory for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 from _refresh_and_get_inventory using data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} _refresh_and_get_inventory /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:768
Oct 11 02:59:38 compute-0 nova_compute[356901]: 2025-10-11 02:59:38.189 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating inventory in ProviderTree for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 with inventory: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:176
Oct 11 02:59:38 compute-0 nova_compute[356901]: 2025-10-11 02:59:38.208 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing aggregate associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, aggregates: None _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:813
Oct 11 02:59:38 compute-0 nova_compute[356901]: 2025-10-11 02:59:38.227 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing trait associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, traits: COMPUTE_VOLUME_EXTEND,COMPUTE_NET_VIF_MODEL_VMXNET3,HW_CPU_X86_SSSE3,COMPUTE_RESCUE_BFV,COMPUTE_SOCKET_PCI_NUMA_AFFINITY,COMPUTE_NODE,HW_CPU_X86_SVM,COMPUTE_STORAGE_BUS_SCSI,HW_CPU_X86_FMA3,COMPUTE_GRAPHICS_MODEL_NONE,COMPUTE_NET_VIF_MODEL_RTL8139,HW_CPU_X86_SSE4A,COMPUTE_IMAGE_TYPE_QCOW2,HW_CPU_X86_BMI2,HW_CPU_X86_SSE42,HW_CPU_X86_AVX2,COMPUTE_IMAGE_TYPE_RAW,COMPUTE_VIOMMU_MODEL_VIRTIO,HW_CPU_X86_AESNI,COMPUTE_STORAGE_BUS_FDC,COMPUTE_GRAPHICS_MODEL_VIRTIO,HW_CPU_X86_AMD_SVM,COMPUTE_NET_VIF_MODEL_NE2K_PCI,COMPUTE_ACCELERATORS,HW_CPU_X86_SSE2,COMPUTE_GRAPHICS_MODEL_VGA,HW_CPU_X86_ABM,HW_CPU_X86_AVX,COMPUTE_NET_VIF_MODEL_E1000,COMPUTE_STORAGE_BUS_USB,COMPUTE_NET_ATTACH_INTERFACE,HW_CPU_X86_MMX,COMPUTE_SECURITY_TPM_2_0,COMPUTE_IMAGE_TYPE_ISO,HW_CPU_X86_SSE41,COMPUTE_IMAGE_TYPE_AKI,COMPUTE_IMAGE_TYPE_AMI,COMPUTE_NET_ATTACH_INTERFACE_WITH_TAG,COMPUTE_DEVICE_TAGGING,COMPUTE_SECURITY_UEFI_SECURE_BOOT,COMPUTE_TRUSTED_CERTS,COMPUTE_NET_VIF_MODEL_VIRTIO,COMPUTE_VIOMMU_MODEL_INTEL,COMPUTE_STORAGE_BUS_SATA,HW_CPU_X86_SSE,COMPUTE_STORAGE_BUS_VIRTIO,COMPUTE_NET_VIF_MODEL_PCNET,COMPUTE_GRAPHICS_MODEL_CIRRUS,HW_CPU_X86_SHA,HW_CPU_X86_BMI,COMPUTE_NET_VIF_MODEL_E1000E,COMPUTE_NET_VIF_MODEL_SPAPR_VLAN,COMPUTE_VOLUME_ATTACH_WITH_TAG,COMPUTE_GRAPHICS_MODEL_BOCHS,COMPUTE_VIOMMU_MODEL_AUTO,COMPUTE_IMAGE_TYPE_ARI,HW_CPU_X86_CLMUL,COMPUTE_STORAGE_BUS_IDE,COMPUTE_VOLUME_MULTI_ATTACH,HW_CPU_X86_F16C,COMPUTE_SECURITY_TPM_1_2 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:825
Oct 11 02:59:38 compute-0 nova_compute[356901]: 2025-10-11 02:59:38.269 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 02:59:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 02:59:38 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/11105105' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:59:38 compute-0 ceph-mon[191930]: pgmap v2465: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:38 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/11105105' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 02:59:38 compute-0 nova_compute[356901]: 2025-10-11 02:59:38.731 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.462s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 02:59:38 compute-0 nova_compute[356901]: 2025-10-11 02:59:38.743 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 02:59:38 compute-0 nova_compute[356901]: 2025-10-11 02:59:38.763 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 02:59:38 compute-0 nova_compute[356901]: 2025-10-11 02:59:38.765 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 02:59:38 compute-0 nova_compute[356901]: 2025-10-11 02:59:38.766 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.703s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:59:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2466: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:39 compute-0 nova_compute[356901]: 2025-10-11 02:59:39.760 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 02:59:40 compute-0 podman[491798]: 2025-10-11 02:59:40.240986874 +0000 UTC m=+0.109922664 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 02:59:40 compute-0 podman[491797]: 2025-10-11 02:59:40.255003621 +0000 UTC m=+0.132166080 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., name=ubi9-minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, version=9.6, vendor=Red Hat, Inc., io.openshift.expose-services=, managed_by=edpm_ansible, distribution-scope=public, url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.component=ubi9-minimal-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., release=1755695350, build-date=2025-08-20T13:12:41, container_name=openstack_network_exporter, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.buildah.version=1.33.7, vcs-type=git, config_id=edpm)
Oct 11 02:59:40 compute-0 podman[491796]: 2025-10-11 02:59:40.268158106 +0000 UTC m=+0.150990488 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 02:59:40 compute-0 ceph-mon[191930]: pgmap v2466: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:40 compute-0 nova_compute[356901]: 2025-10-11 02:59:40.825 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2467: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:59:42 compute-0 ceph-mon[191930]: pgmap v2467: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:42 compute-0 nova_compute[356901]: 2025-10-11 02:59:42.912 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2468: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:44 compute-0 ceph-mon[191930]: pgmap v2468: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2469: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:45 compute-0 nova_compute[356901]: 2025-10-11 02:59:45.828 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:46 compute-0 ceph-mon[191930]: pgmap v2469: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:59:47 compute-0 rsyslogd[187706]: imjournal: 17198 messages lost due to rate-limiting (20000 allowed within 600 seconds)
Oct 11 02:59:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2470: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:47 compute-0 nova_compute[356901]: 2025-10-11 02:59:47.918 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:48 compute-0 ceph-mon[191930]: pgmap v2470: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #120. Immutable memtables: 0.
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:59:48.890555) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 71] Flushing memtable with next log file: 120
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151588890617, "job": 71, "event": "flush_started", "num_memtables": 1, "num_entries": 1513, "num_deletes": 251, "total_data_size": 2141033, "memory_usage": 2187184, "flush_reason": "Manual Compaction"}
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 71] Level-0 flush table #121: started
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151588915959, "cf_name": "default", "job": 71, "event": "table_file_creation", "file_number": 121, "file_size": 2096667, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 49506, "largest_seqno": 51018, "table_properties": {"data_size": 2089519, "index_size": 4030, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 2053, "raw_key_size": 16568, "raw_average_key_size": 20, "raw_value_size": 2074652, "raw_average_value_size": 2599, "num_data_blocks": 180, "num_entries": 798, "num_filter_entries": 798, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760151457, "oldest_key_time": 1760151457, "file_creation_time": 1760151588, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 121, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 71] Flush lasted 25533 microseconds, and 13926 cpu microseconds.
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:59:48.916091) [db/flush_job.cc:967] [default] [JOB 71] Level-0 flush table #121: 2096667 bytes OK
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:59:48.916129) [db/memtable_list.cc:519] [default] Level-0 commit table #121 started
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:59:48.919582) [db/memtable_list.cc:722] [default] Level-0 commit table #121: memtable #1 done
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:59:48.919615) EVENT_LOG_v1 {"time_micros": 1760151588919603, "job": 71, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:59:48.919649) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 71] Try to delete WAL files size 2134103, prev total WAL file size 2134103, number of live WAL files 2.
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000117.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:59:48.922361) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F730034373639' seq:72057594037927935, type:22 .. '7061786F730035303231' seq:0, type:0; will stop at (end)
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 72] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 71 Base level 0, inputs: [121(2047KB)], [119(7080KB)]
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151588922466, "job": 72, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [121], "files_L6": [119], "score": -1, "input_data_size": 9347192, "oldest_snapshot_seqno": -1}
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 72] Generated table #122: 6507 keys, 7587264 bytes, temperature: kUnknown
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151588976764, "cf_name": "default", "job": 72, "event": "table_file_creation", "file_number": 122, "file_size": 7587264, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 7547852, "index_size": 22022, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 16325, "raw_key_size": 170065, "raw_average_key_size": 26, "raw_value_size": 7434244, "raw_average_value_size": 1142, "num_data_blocks": 869, "num_entries": 6507, "num_filter_entries": 6507, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760151588, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 122, "seqno_to_time_mapping": "N/A"}}
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:59:48.977118) [db/compaction/compaction_job.cc:1663] [default] [JOB 72] Compacted 1@0 + 1@6 files to L6 => 7587264 bytes
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:59:48.979745) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 171.8 rd, 139.5 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(2.0, 6.9 +0.0 blob) out(7.2 +0.0 blob), read-write-amplify(8.1) write-amplify(3.6) OK, records in: 7021, records dropped: 514 output_compression: NoCompression
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:59:48.979773) EVENT_LOG_v1 {"time_micros": 1760151588979759, "job": 72, "event": "compaction_finished", "compaction_time_micros": 54402, "compaction_time_cpu_micros": 39313, "output_level": 6, "num_output_files": 1, "total_output_size": 7587264, "num_input_records": 7021, "num_output_records": 6507, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000121.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151588980693, "job": 72, "event": "table_file_deletion", "file_number": 121}
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000119.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151588983287, "job": 72, "event": "table_file_deletion", "file_number": 119}
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:59:48.921711) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:59:48.983657) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:59:48.983666) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:59:48.983669) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:59:48.983672) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:59:48 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-02:59:48.983675) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 02:59:49 compute-0 podman[491857]: 2025-10-11 02:59:49.238998059 +0000 UTC m=+0.120582985 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.29.0, build-date=2024-09-18T21:23:30, io.k8s.display-name=Red Hat Universal Base Image 9, com.redhat.component=ubi9-container, config_id=edpm, release-0.7.12=, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, name=ubi9, summary=Provides the latest release of Red Hat Universal Base Image 9., vendor=Red Hat, Inc., container_name=kepler, managed_by=edpm_ansible, io.openshift.expose-services=, version=9.4, vcs-type=git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., release=1214.1726694543, io.openshift.tags=base rhel9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']})
Oct 11 02:59:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2471: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:50 compute-0 nova_compute[356901]: 2025-10-11 02:59:50.831 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:50 compute-0 ceph-mon[191930]: pgmap v2471: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2472: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:59:52 compute-0 ceph-mon[191930]: pgmap v2472: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:52 compute-0 nova_compute[356901]: 2025-10-11 02:59:52.923 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:53 compute-0 podman[491879]: 2025-10-11 02:59:53.212065207 +0000 UTC m=+0.096361675 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 02:59:53 compute-0 podman[491882]: 2025-10-11 02:59:53.218558841 +0000 UTC m=+0.084482928 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, container_name=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, managed_by=edpm_ansible)
Oct 11 02:59:53 compute-0 podman[491881]: 2025-10-11 02:59:53.243848452 +0000 UTC m=+0.127899121 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_compute)
Oct 11 02:59:53 compute-0 podman[491880]: 2025-10-11 02:59:53.253086179 +0000 UTC m=+0.134145395 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, io.buildah.version=1.41.3)
Oct 11 02:59:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2473: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:59:54.894 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 02:59:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:59:54.895 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 02:59:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 02:59:54.896 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 02:59:54 compute-0 ceph-mon[191930]: pgmap v2473: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2474: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:55 compute-0 nova_compute[356901]: 2025-10-11 02:59:55.835 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:59:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:59:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:59:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:59:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 02:59:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 02:59:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_02:59:56
Oct 11 02:59:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 02:59:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 02:59:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['cephfs.cephfs.meta', 'volumes', 'default.rgw.log', 'cephfs.cephfs.data', 'backups', '.rgw.root', 'images', 'default.rgw.meta', 'default.rgw.control', '.mgr', 'vms']
Oct 11 02:59:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 02:59:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 02:59:56 compute-0 ceph-mon[191930]: pgmap v2474: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2475: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 02:59:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:59:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:59:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:59:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:59:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 02:59:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 02:59:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 02:59:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 02:59:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 02:59:57 compute-0 nova_compute[356901]: 2025-10-11 02:59:57.927 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 02:59:58 compute-0 ceph-mon[191930]: pgmap v2475: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2476: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 02:59:59 compute-0 podman[157119]: time="2025-10-11T02:59:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 02:59:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:59:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 02:59:59 compute-0 podman[157119]: @ - - [11/Oct/2025:02:59:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9108 "" "Go-http-client/1.1"
Oct 11 03:00:00 compute-0 nova_compute[356901]: 2025-10-11 03:00:00.839 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:00 compute-0 ceph-mon[191930]: pgmap v2476: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:01 compute-0 openstack_network_exporter[374316]: ERROR   03:00:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 03:00:01 compute-0 openstack_network_exporter[374316]: ERROR   03:00:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:00:01 compute-0 openstack_network_exporter[374316]: ERROR   03:00:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 03:00:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:00:01 compute-0 openstack_network_exporter[374316]: ERROR   03:00:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:00:01 compute-0 openstack_network_exporter[374316]: ERROR   03:00:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 03:00:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:00:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2477: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:00:02 compute-0 nova_compute[356901]: 2025-10-11 03:00:02.931 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:02 compute-0 ceph-mon[191930]: pgmap v2477: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:03 compute-0 podman[491963]: 2025-10-11 03:00:03.226775752 +0000 UTC m=+0.128421680 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=multipathd, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, container_name=multipathd, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 03:00:03 compute-0 podman[491964]: 2025-10-11 03:00:03.269783239 +0000 UTC m=+0.155739197 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, config_id=iscsid, container_name=iscsid, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']})
Oct 11 03:00:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2478: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:05 compute-0 ceph-mon[191930]: pgmap v2478: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2479: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:05 compute-0 nova_compute[356901]: 2025-10-11 03:00:05.841 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:00:07 compute-0 ceph-mon[191930]: pgmap v2479: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0009191400908380543 of space, bias 1.0, pg target 0.2757420272514163 quantized to 32 (current 32)
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 03:00:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2480: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:07 compute-0 nova_compute[356901]: 2025-10-11 03:00:07.936 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:09 compute-0 ceph-mon[191930]: pgmap v2480: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2481: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:10 compute-0 nova_compute[356901]: 2025-10-11 03:00:10.845 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:11 compute-0 ceph-mon[191930]: pgmap v2481: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:11 compute-0 podman[492001]: 2025-10-11 03:00:11.231921402 +0000 UTC m=+0.121587333 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, tcib_managed=true, config_id=edpm, org.label-schema.build-date=20251009)
Oct 11 03:00:11 compute-0 podman[492008]: 2025-10-11 03:00:11.250673477 +0000 UTC m=+0.113957276 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 03:00:11 compute-0 podman[492002]: 2025-10-11 03:00:11.268110083 +0000 UTC m=+0.136906019 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, build-date=2025-08-20T13:12:41, managed_by=edpm_ansible, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, com.redhat.component=ubi9-minimal-container, io.openshift.expose-services=, name=ubi9-minimal, release=1755695350, version=9.6, maintainer=Red Hat, Inc., container_name=openstack_network_exporter, url=https://catalog.redhat.com/en/search?searchType=containers, config_id=edpm, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vendor=Red Hat, Inc., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, vcs-type=git, architecture=x86_64, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.buildah.version=1.33.7, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Oct 11 03:00:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2482: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:00:12 compute-0 nova_compute[356901]: 2025-10-11 03:00:12.943 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:13 compute-0 ceph-mon[191930]: pgmap v2482: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2483: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:15 compute-0 ceph-mon[191930]: pgmap v2483: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2484: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:15 compute-0 nova_compute[356901]: 2025-10-11 03:00:15.849 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:00:17 compute-0 ceph-mon[191930]: pgmap v2484: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2485: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:17 compute-0 nova_compute[356901]: 2025-10-11 03:00:17.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:00:17 compute-0 nova_compute[356901]: 2025-10-11 03:00:17.950 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:19 compute-0 ceph-mon[191930]: pgmap v2485: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2486: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:20 compute-0 podman[492064]: 2025-10-11 03:00:20.232049488 +0000 UTC m=+0.114641842 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.openshift.tags=base rhel9, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.buildah.version=1.29.0, com.redhat.component=ubi9-container, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.expose-services=, summary=Provides the latest release of Red Hat Universal Base Image 9., maintainer=Red Hat, Inc., config_id=edpm, name=ubi9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1214.1726694543, vcs-type=git, build-date=2024-09-18T21:23:30, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, architecture=x86_64, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., version=9.4, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=kepler, managed_by=edpm_ansible, release-0.7.12=)
Oct 11 03:00:20 compute-0 nova_compute[356901]: 2025-10-11 03:00:20.851 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:20 compute-0 nova_compute[356901]: 2025-10-11 03:00:20.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:00:21 compute-0 ceph-mon[191930]: pgmap v2486: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2487: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:00:21 compute-0 nova_compute[356901]: 2025-10-11 03:00:21.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:00:22 compute-0 nova_compute[356901]: 2025-10-11 03:00:22.954 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:23 compute-0 ceph-mon[191930]: pgmap v2487: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2488: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:24 compute-0 podman[492085]: 2025-10-11 03:00:24.204309455 +0000 UTC m=+0.094747491 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 03:00:24 compute-0 podman[492087]: 2025-10-11 03:00:24.269449127 +0000 UTC m=+0.133403200 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 10 Base Image, io.buildah.version=1.41.4, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, org.label-schema.vendor=CentOS, config_id=edpm, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 03:00:24 compute-0 podman[492094]: 2025-10-11 03:00:24.271153376 +0000 UTC m=+0.128133917 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true)
Oct 11 03:00:24 compute-0 podman[492086]: 2025-10-11 03:00:24.297041353 +0000 UTC m=+0.168978254 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']})
Oct 11 03:00:24 compute-0 sudo[492167]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:00:24 compute-0 sudo[492167]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:00:24 compute-0 sudo[492167]: pam_unix(sudo:session): session closed for user root
Oct 11 03:00:24 compute-0 sudo[492192]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:00:24 compute-0 sudo[492192]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:00:24 compute-0 sudo[492192]: pam_unix(sudo:session): session closed for user root
Oct 11 03:00:25 compute-0 sudo[492217]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:00:25 compute-0 sudo[492217]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:00:25 compute-0 sudo[492217]: pam_unix(sudo:session): session closed for user root
Oct 11 03:00:25 compute-0 sudo[492242]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 03:00:25 compute-0 sudo[492242]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:00:25 compute-0 ceph-mon[191930]: pgmap v2488: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2489: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:25 compute-0 sudo[492242]: pam_unix(sudo:session): session closed for user root
Oct 11 03:00:25 compute-0 nova_compute[356901]: 2025-10-11 03:00:25.855 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:25 compute-0 nova_compute[356901]: 2025-10-11 03:00:25.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:00:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 03:00:25 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:00:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 03:00:25 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 03:00:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 03:00:25 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:00:25 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 3aaa5d72-280c-4156-9691-7199b4d5ff34 does not exist
Oct 11 03:00:25 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 35bd8d77-571d-4da3-acbe-9e2a386b649f does not exist
Oct 11 03:00:25 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev d9e724e1-83cf-4e6a-b85b-f1d0afe7c78b does not exist
Oct 11 03:00:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 03:00:25 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 03:00:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 03:00:25 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 03:00:25 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 03:00:25 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:00:26 compute-0 sudo[492300]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:00:26 compute-0 sudo[492300]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:00:26 compute-0 sudo[492300]: pam_unix(sudo:session): session closed for user root
Oct 11 03:00:26 compute-0 sudo[492325]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:00:26 compute-0 sudo[492325]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:00:26 compute-0 sudo[492325]: pam_unix(sudo:session): session closed for user root
Oct 11 03:00:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:00:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 03:00:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:00:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 03:00:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 03:00:26 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:00:26 compute-0 sudo[492350]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:00:26 compute-0 sudo[492350]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:00:26 compute-0 sudo[492350]: pam_unix(sudo:session): session closed for user root
Oct 11 03:00:26 compute-0 sudo[492375]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 03:00:26 compute-0 sudo[492375]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:00:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:00:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:00:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:00:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:00:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:00:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:00:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:00:26 compute-0 nova_compute[356901]: 2025-10-11 03:00:26.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:00:26 compute-0 nova_compute[356901]: 2025-10-11 03:00:26.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 03:00:27 compute-0 podman[492439]: 2025-10-11 03:00:27.20009012 +0000 UTC m=+0.087989143 container create b86c0ea17e2a25b1e510dd97ac79839c7fb1a2c5d7e234c7d5cdf772191904f4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_feistel, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default)
Oct 11 03:00:27 compute-0 systemd[1]: Started libpod-conmon-b86c0ea17e2a25b1e510dd97ac79839c7fb1a2c5d7e234c7d5cdf772191904f4.scope.
Oct 11 03:00:27 compute-0 podman[492439]: 2025-10-11 03:00:27.163401293 +0000 UTC m=+0.051300326 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:00:27 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:00:27 compute-0 ceph-mon[191930]: pgmap v2489: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:27 compute-0 podman[492439]: 2025-10-11 03:00:27.329684712 +0000 UTC m=+0.217583755 container init b86c0ea17e2a25b1e510dd97ac79839c7fb1a2c5d7e234c7d5cdf772191904f4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_feistel, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default)
Oct 11 03:00:27 compute-0 podman[492439]: 2025-10-11 03:00:27.351724981 +0000 UTC m=+0.239624034 container start b86c0ea17e2a25b1e510dd97ac79839c7fb1a2c5d7e234c7d5cdf772191904f4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_feistel, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True)
Oct 11 03:00:27 compute-0 podman[492439]: 2025-10-11 03:00:27.358814601 +0000 UTC m=+0.246713724 container attach b86c0ea17e2a25b1e510dd97ac79839c7fb1a2c5d7e234c7d5cdf772191904f4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_feistel, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True, org.label-schema.license=GPLv2)
Oct 11 03:00:27 compute-0 amazing_feistel[492453]: 167 167
Oct 11 03:00:27 compute-0 systemd[1]: libpod-b86c0ea17e2a25b1e510dd97ac79839c7fb1a2c5d7e234c7d5cdf772191904f4.scope: Deactivated successfully.
Oct 11 03:00:27 compute-0 conmon[492453]: conmon b86c0ea17e2a25b1e510 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-b86c0ea17e2a25b1e510dd97ac79839c7fb1a2c5d7e234c7d5cdf772191904f4.scope/container/memory.events
Oct 11 03:00:27 compute-0 podman[492439]: 2025-10-11 03:00:27.366537735 +0000 UTC m=+0.254436748 container died b86c0ea17e2a25b1e510dd97ac79839c7fb1a2c5d7e234c7d5cdf772191904f4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_feistel, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, CEPH_REF=reef, ceph=True, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 03:00:27 compute-0 systemd[1]: var-lib-containers-storage-overlay-349b4f2ac129d0f18e6c2448385a44fb81541e6d6f0b1ff55d3449507abd7428-merged.mount: Deactivated successfully.
Oct 11 03:00:27 compute-0 podman[492439]: 2025-10-11 03:00:27.431182469 +0000 UTC m=+0.319081482 container remove b86c0ea17e2a25b1e510dd97ac79839c7fb1a2c5d7e234c7d5cdf772191904f4 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=amazing_feistel, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 03:00:27 compute-0 systemd[1]: libpod-conmon-b86c0ea17e2a25b1e510dd97ac79839c7fb1a2c5d7e234c7d5cdf772191904f4.scope: Deactivated successfully.
Oct 11 03:00:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2490: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:27 compute-0 podman[492476]: 2025-10-11 03:00:27.66341133 +0000 UTC m=+0.087741369 container create 5107acc731258304d55114e03e71454d43cc499e5b2564903dbdfc42804c1b49 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_sammet, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 03:00:27 compute-0 podman[492476]: 2025-10-11 03:00:27.620560532 +0000 UTC m=+0.044890621 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:00:27 compute-0 systemd[1]: Started libpod-conmon-5107acc731258304d55114e03e71454d43cc499e5b2564903dbdfc42804c1b49.scope.
Oct 11 03:00:27 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:00:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0d3fcad0ea2ff1b2b3248cd9b185ffbedb9c64b47a3db620d8c7c5f806b86c2e/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 03:00:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0d3fcad0ea2ff1b2b3248cd9b185ffbedb9c64b47a3db620d8c7c5f806b86c2e/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 03:00:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0d3fcad0ea2ff1b2b3248cd9b185ffbedb9c64b47a3db620d8c7c5f806b86c2e/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 03:00:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0d3fcad0ea2ff1b2b3248cd9b185ffbedb9c64b47a3db620d8c7c5f806b86c2e/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 03:00:27 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0d3fcad0ea2ff1b2b3248cd9b185ffbedb9c64b47a3db620d8c7c5f806b86c2e/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 03:00:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 03:00:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2278309877' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 03:00:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 03:00:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/2278309877' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 03:00:27 compute-0 podman[492476]: 2025-10-11 03:00:27.828192985 +0000 UTC m=+0.252523064 container init 5107acc731258304d55114e03e71454d43cc499e5b2564903dbdfc42804c1b49 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_sammet, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 03:00:27 compute-0 podman[492476]: 2025-10-11 03:00:27.85002796 +0000 UTC m=+0.274357999 container start 5107acc731258304d55114e03e71454d43cc499e5b2564903dbdfc42804c1b49 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_sammet, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 03:00:27 compute-0 podman[492476]: 2025-10-11 03:00:27.856616581 +0000 UTC m=+0.280946680 container attach 5107acc731258304d55114e03e71454d43cc499e5b2564903dbdfc42804c1b49 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_sammet, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507)
Oct 11 03:00:27 compute-0 nova_compute[356901]: 2025-10-11 03:00:27.958 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2278309877' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 03:00:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/2278309877' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 03:00:29 compute-0 vibrant_sammet[492493]: --> passed data devices: 0 physical, 3 LVM
Oct 11 03:00:29 compute-0 vibrant_sammet[492493]: --> relative data size: 1.0
Oct 11 03:00:29 compute-0 vibrant_sammet[492493]: --> All data devices are unavailable
Oct 11 03:00:29 compute-0 systemd[1]: libpod-5107acc731258304d55114e03e71454d43cc499e5b2564903dbdfc42804c1b49.scope: Deactivated successfully.
Oct 11 03:00:29 compute-0 podman[492476]: 2025-10-11 03:00:29.223666153 +0000 UTC m=+1.647996192 container died 5107acc731258304d55114e03e71454d43cc499e5b2564903dbdfc42804c1b49 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_sammet, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507)
Oct 11 03:00:29 compute-0 systemd[1]: libpod-5107acc731258304d55114e03e71454d43cc499e5b2564903dbdfc42804c1b49.scope: Consumed 1.296s CPU time.
Oct 11 03:00:29 compute-0 systemd[1]: var-lib-containers-storage-overlay-0d3fcad0ea2ff1b2b3248cd9b185ffbedb9c64b47a3db620d8c7c5f806b86c2e-merged.mount: Deactivated successfully.
Oct 11 03:00:29 compute-0 ceph-mon[191930]: pgmap v2490: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:29 compute-0 podman[492476]: 2025-10-11 03:00:29.338401582 +0000 UTC m=+1.762731581 container remove 5107acc731258304d55114e03e71454d43cc499e5b2564903dbdfc42804c1b49 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_sammet, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 03:00:29 compute-0 systemd[1]: libpod-conmon-5107acc731258304d55114e03e71454d43cc499e5b2564903dbdfc42804c1b49.scope: Deactivated successfully.
Oct 11 03:00:29 compute-0 sudo[492375]: pam_unix(sudo:session): session closed for user root
Oct 11 03:00:29 compute-0 sudo[492536]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:00:29 compute-0 sudo[492536]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:00:29 compute-0 sudo[492536]: pam_unix(sudo:session): session closed for user root
Oct 11 03:00:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2491: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:29 compute-0 sudo[492561]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:00:29 compute-0 sudo[492561]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:00:29 compute-0 sudo[492561]: pam_unix(sudo:session): session closed for user root
Oct 11 03:00:29 compute-0 podman[157119]: time="2025-10-11T03:00:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 03:00:29 compute-0 podman[157119]: @ - - [11/Oct/2025:03:00:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 03:00:29 compute-0 sudo[492586]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:00:29 compute-0 sudo[492586]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:00:29 compute-0 sudo[492586]: pam_unix(sudo:session): session closed for user root
Oct 11 03:00:29 compute-0 podman[157119]: @ - - [11/Oct/2025:03:00:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9113 "" "Go-http-client/1.1"
Oct 11 03:00:29 compute-0 sudo[492611]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 03:00:29 compute-0 sudo[492611]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:00:30 compute-0 ceph-mon[191930]: pgmap v2491: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:30 compute-0 podman[492674]: 2025-10-11 03:00:30.617823582 +0000 UTC m=+0.108028241 container create f3fc0efeb4c233cb2ae8f87a8494b03d38662222af04aa9379bcbbe13590714f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_wiles, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True)
Oct 11 03:00:30 compute-0 podman[492674]: 2025-10-11 03:00:30.573951478 +0000 UTC m=+0.064156187 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:00:30 compute-0 systemd[1]: Started libpod-conmon-f3fc0efeb4c233cb2ae8f87a8494b03d38662222af04aa9379bcbbe13590714f.scope.
Oct 11 03:00:30 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:00:30 compute-0 podman[492674]: 2025-10-11 03:00:30.807577435 +0000 UTC m=+0.297782094 container init f3fc0efeb4c233cb2ae8f87a8494b03d38662222af04aa9379bcbbe13590714f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_wiles, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 03:00:30 compute-0 podman[492674]: 2025-10-11 03:00:30.820954033 +0000 UTC m=+0.311158662 container start f3fc0efeb4c233cb2ae8f87a8494b03d38662222af04aa9379bcbbe13590714f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_wiles, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, OSD_FLAVOR=default, ceph=True)
Oct 11 03:00:30 compute-0 podman[492674]: 2025-10-11 03:00:30.825763627 +0000 UTC m=+0.315968256 container attach f3fc0efeb4c233cb2ae8f87a8494b03d38662222af04aa9379bcbbe13590714f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_wiles, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 03:00:30 compute-0 quirky_wiles[492690]: 167 167
Oct 11 03:00:30 compute-0 systemd[1]: libpod-f3fc0efeb4c233cb2ae8f87a8494b03d38662222af04aa9379bcbbe13590714f.scope: Deactivated successfully.
Oct 11 03:00:30 compute-0 conmon[492690]: conmon f3fc0efeb4c233cb2ae8 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-f3fc0efeb4c233cb2ae8f87a8494b03d38662222af04aa9379bcbbe13590714f.scope/container/memory.events
Oct 11 03:00:30 compute-0 podman[492674]: 2025-10-11 03:00:30.834958976 +0000 UTC m=+0.325163605 container died f3fc0efeb4c233cb2ae8f87a8494b03d38662222af04aa9379bcbbe13590714f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_wiles, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 03:00:30 compute-0 nova_compute[356901]: 2025-10-11 03:00:30.858 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:30 compute-0 systemd[1]: var-lib-containers-storage-overlay-324e0b6eef1de192dbd8e22e7d5f0803b5c0109cdb364496a4237c13e7ea3406-merged.mount: Deactivated successfully.
Oct 11 03:00:30 compute-0 podman[492674]: 2025-10-11 03:00:30.913170084 +0000 UTC m=+0.403374713 container remove f3fc0efeb4c233cb2ae8f87a8494b03d38662222af04aa9379bcbbe13590714f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_wiles, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 03:00:30 compute-0 systemd[1]: libpod-conmon-f3fc0efeb4c233cb2ae8f87a8494b03d38662222af04aa9379bcbbe13590714f.scope: Deactivated successfully.
Oct 11 03:00:31 compute-0 podman[492712]: 2025-10-11 03:00:31.189295309 +0000 UTC m=+0.101563847 container create 8e64f07dbf257df4d1ce54f044a6e84b37601d15e082f5780aa2eb3d1ee51452 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_shannon, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_REF=reef)
Oct 11 03:00:31 compute-0 podman[492712]: 2025-10-11 03:00:31.151126798 +0000 UTC m=+0.063395376 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:00:31 compute-0 systemd[1]: Started libpod-conmon-8e64f07dbf257df4d1ce54f044a6e84b37601d15e082f5780aa2eb3d1ee51452.scope.
Oct 11 03:00:31 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:00:31 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b7d9d795545a6ea1057d664c5bca8982f867e4fbfbd79e52b96c85fa3cd34f94/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 03:00:31 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b7d9d795545a6ea1057d664c5bca8982f867e4fbfbd79e52b96c85fa3cd34f94/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 03:00:31 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b7d9d795545a6ea1057d664c5bca8982f867e4fbfbd79e52b96c85fa3cd34f94/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 03:00:31 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/b7d9d795545a6ea1057d664c5bca8982f867e4fbfbd79e52b96c85fa3cd34f94/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 03:00:31 compute-0 podman[492712]: 2025-10-11 03:00:31.359660329 +0000 UTC m=+0.271928897 container init 8e64f07dbf257df4d1ce54f044a6e84b37601d15e082f5780aa2eb3d1ee51452 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_shannon, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3)
Oct 11 03:00:31 compute-0 podman[492712]: 2025-10-11 03:00:31.380334552 +0000 UTC m=+0.292603090 container start 8e64f07dbf257df4d1ce54f044a6e84b37601d15e082f5780aa2eb3d1ee51452 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_shannon, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0)
Oct 11 03:00:31 compute-0 podman[492712]: 2025-10-11 03:00:31.385189085 +0000 UTC m=+0.297457633 container attach 8e64f07dbf257df4d1ce54f044a6e84b37601d15e082f5780aa2eb3d1ee51452 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_shannon, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507)
Oct 11 03:00:31 compute-0 openstack_network_exporter[374316]: ERROR   03:00:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:00:31 compute-0 openstack_network_exporter[374316]: ERROR   03:00:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:00:31 compute-0 openstack_network_exporter[374316]: ERROR   03:00:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 03:00:31 compute-0 openstack_network_exporter[374316]: ERROR   03:00:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 03:00:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:00:31 compute-0 openstack_network_exporter[374316]: ERROR   03:00:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 03:00:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:00:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2492: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:00:32 compute-0 silly_shannon[492728]: {
Oct 11 03:00:32 compute-0 silly_shannon[492728]:     "0": [
Oct 11 03:00:32 compute-0 silly_shannon[492728]:         {
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "devices": [
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "/dev/loop3"
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             ],
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "lv_name": "ceph_lv0",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "lv_size": "21470642176",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "name": "ceph_lv0",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "tags": {
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.cluster_name": "ceph",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.crush_device_class": "",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.encrypted": "0",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.osd_id": "0",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.type": "block",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.vdo": "0"
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             },
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "type": "block",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "vg_name": "ceph_vg0"
Oct 11 03:00:32 compute-0 silly_shannon[492728]:         }
Oct 11 03:00:32 compute-0 silly_shannon[492728]:     ],
Oct 11 03:00:32 compute-0 silly_shannon[492728]:     "1": [
Oct 11 03:00:32 compute-0 silly_shannon[492728]:         {
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "devices": [
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "/dev/loop4"
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             ],
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "lv_name": "ceph_lv1",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "lv_size": "21470642176",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "name": "ceph_lv1",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "tags": {
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.cluster_name": "ceph",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.crush_device_class": "",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.encrypted": "0",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.osd_id": "1",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.type": "block",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.vdo": "0"
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             },
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "type": "block",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "vg_name": "ceph_vg1"
Oct 11 03:00:32 compute-0 silly_shannon[492728]:         }
Oct 11 03:00:32 compute-0 silly_shannon[492728]:     ],
Oct 11 03:00:32 compute-0 silly_shannon[492728]:     "2": [
Oct 11 03:00:32 compute-0 silly_shannon[492728]:         {
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "devices": [
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "/dev/loop5"
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             ],
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "lv_name": "ceph_lv2",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "lv_size": "21470642176",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "name": "ceph_lv2",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "tags": {
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.cluster_name": "ceph",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.crush_device_class": "",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.encrypted": "0",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.osd_id": "2",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.type": "block",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:                 "ceph.vdo": "0"
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             },
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "type": "block",
Oct 11 03:00:32 compute-0 silly_shannon[492728]:             "vg_name": "ceph_vg2"
Oct 11 03:00:32 compute-0 silly_shannon[492728]:         }
Oct 11 03:00:32 compute-0 silly_shannon[492728]:     ]
Oct 11 03:00:32 compute-0 silly_shannon[492728]: }
Oct 11 03:00:32 compute-0 systemd[1]: libpod-8e64f07dbf257df4d1ce54f044a6e84b37601d15e082f5780aa2eb3d1ee51452.scope: Deactivated successfully.
Oct 11 03:00:32 compute-0 podman[492712]: 2025-10-11 03:00:32.295701823 +0000 UTC m=+1.207970391 container died 8e64f07dbf257df4d1ce54f044a6e84b37601d15e082f5780aa2eb3d1ee51452 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_shannon, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef)
Oct 11 03:00:32 compute-0 systemd[1]: var-lib-containers-storage-overlay-b7d9d795545a6ea1057d664c5bca8982f867e4fbfbd79e52b96c85fa3cd34f94-merged.mount: Deactivated successfully.
Oct 11 03:00:32 compute-0 podman[492712]: 2025-10-11 03:00:32.400074032 +0000 UTC m=+1.312342570 container remove 8e64f07dbf257df4d1ce54f044a6e84b37601d15e082f5780aa2eb3d1ee51452 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=silly_shannon, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 03:00:32 compute-0 systemd[1]: libpod-conmon-8e64f07dbf257df4d1ce54f044a6e84b37601d15e082f5780aa2eb3d1ee51452.scope: Deactivated successfully.
Oct 11 03:00:32 compute-0 sudo[492611]: pam_unix(sudo:session): session closed for user root
Oct 11 03:00:32 compute-0 sudo[492750]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:00:32 compute-0 sudo[492750]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:00:32 compute-0 sudo[492750]: pam_unix(sudo:session): session closed for user root
Oct 11 03:00:32 compute-0 ceph-mon[191930]: pgmap v2492: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:32 compute-0 sudo[492775]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:00:32 compute-0 sudo[492775]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:00:32 compute-0 sudo[492775]: pam_unix(sudo:session): session closed for user root
Oct 11 03:00:32 compute-0 sudo[492800]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:00:32 compute-0 sudo[492800]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:00:32 compute-0 sudo[492800]: pam_unix(sudo:session): session closed for user root
Oct 11 03:00:32 compute-0 nova_compute[356901]: 2025-10-11 03:00:32.898 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:00:32 compute-0 sudo[492825]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 03:00:32 compute-0 sudo[492825]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:00:32 compute-0 nova_compute[356901]: 2025-10-11 03:00:32.962 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:33 compute-0 podman[492889]: 2025-10-11 03:00:33.481308792 +0000 UTC m=+0.062367780 container create 297541641c52caca5b286d4560322bdbc69a33cc8063ddf6d472d7b46823d801 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_swartz, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 03:00:33 compute-0 systemd[1]: Started libpod-conmon-297541641c52caca5b286d4560322bdbc69a33cc8063ddf6d472d7b46823d801.scope.
Oct 11 03:00:33 compute-0 podman[492889]: 2025-10-11 03:00:33.46268822 +0000 UTC m=+0.043747218 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:00:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2493: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:33 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:00:33 compute-0 podman[492889]: 2025-10-11 03:00:33.594200975 +0000 UTC m=+0.175259963 container init 297541641c52caca5b286d4560322bdbc69a33cc8063ddf6d472d7b46823d801 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_swartz, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 03:00:33 compute-0 podman[492889]: 2025-10-11 03:00:33.607859366 +0000 UTC m=+0.188918344 container start 297541641c52caca5b286d4560322bdbc69a33cc8063ddf6d472d7b46823d801 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_swartz, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507)
Oct 11 03:00:33 compute-0 podman[492889]: 2025-10-11 03:00:33.61351018 +0000 UTC m=+0.194569158 container attach 297541641c52caca5b286d4560322bdbc69a33cc8063ddf6d472d7b46823d801 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_swartz, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 03:00:33 compute-0 quirky_swartz[492906]: 167 167
Oct 11 03:00:33 compute-0 systemd[1]: libpod-297541641c52caca5b286d4560322bdbc69a33cc8063ddf6d472d7b46823d801.scope: Deactivated successfully.
Oct 11 03:00:33 compute-0 podman[492889]: 2025-10-11 03:00:33.619081196 +0000 UTC m=+0.200140194 container died 297541641c52caca5b286d4560322bdbc69a33cc8063ddf6d472d7b46823d801 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_swartz, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 03:00:33 compute-0 systemd[1]: var-lib-containers-storage-overlay-cc452de5f10a0649604654000b99f26eeabfb841e51722f0927a4f2640d42592-merged.mount: Deactivated successfully.
Oct 11 03:00:33 compute-0 podman[492889]: 2025-10-11 03:00:33.680493298 +0000 UTC m=+0.261552296 container remove 297541641c52caca5b286d4560322bdbc69a33cc8063ddf6d472d7b46823d801 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quirky_swartz, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507)
Oct 11 03:00:33 compute-0 podman[492902]: 2025-10-11 03:00:33.688784559 +0000 UTC m=+0.138759012 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_id=multipathd, container_name=multipathd, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 03:00:33 compute-0 systemd[1]: libpod-conmon-297541641c52caca5b286d4560322bdbc69a33cc8063ddf6d472d7b46823d801.scope: Deactivated successfully.
Oct 11 03:00:33 compute-0 podman[492905]: 2025-10-11 03:00:33.716420004 +0000 UTC m=+0.163906577 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 03:00:33 compute-0 nova_compute[356901]: 2025-10-11 03:00:33.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:00:33 compute-0 nova_compute[356901]: 2025-10-11 03:00:33.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 03:00:33 compute-0 nova_compute[356901]: 2025-10-11 03:00:33.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 03:00:33 compute-0 podman[492964]: 2025-10-11 03:00:33.974441595 +0000 UTC m=+0.116618215 container create 7f1011602d60198ce0552e0cde232273924ac6961f5be25717d412fb468b01ed (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_albattani, ceph=True, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 03:00:34 compute-0 podman[492964]: 2025-10-11 03:00:33.931388871 +0000 UTC m=+0.073565591 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:00:34 compute-0 systemd[1]: Started libpod-conmon-7f1011602d60198ce0552e0cde232273924ac6961f5be25717d412fb468b01ed.scope.
Oct 11 03:00:34 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:00:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/31f4d9435633f388c40e2c81bb54f4a530bc8d32485acf48d46f97f53317c601/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 03:00:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/31f4d9435633f388c40e2c81bb54f4a530bc8d32485acf48d46f97f53317c601/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 03:00:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/31f4d9435633f388c40e2c81bb54f4a530bc8d32485acf48d46f97f53317c601/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 03:00:34 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/31f4d9435633f388c40e2c81bb54f4a530bc8d32485acf48d46f97f53317c601/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 03:00:34 compute-0 podman[492964]: 2025-10-11 03:00:34.151850955 +0000 UTC m=+0.294027615 container init 7f1011602d60198ce0552e0cde232273924ac6961f5be25717d412fb468b01ed (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_albattani, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS)
Oct 11 03:00:34 compute-0 podman[492964]: 2025-10-11 03:00:34.167683794 +0000 UTC m=+0.309860414 container start 7f1011602d60198ce0552e0cde232273924ac6961f5be25717d412fb468b01ed (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_albattani, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 03:00:34 compute-0 podman[492964]: 2025-10-11 03:00:34.17409104 +0000 UTC m=+0.316267730 container attach 7f1011602d60198ce0552e0cde232273924ac6961f5be25717d412fb468b01ed (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_albattani, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Oct 11 03:00:34 compute-0 ceph-mon[191930]: pgmap v2493: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:35 compute-0 nova_compute[356901]: 2025-10-11 03:00:35.107 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 03:00:35 compute-0 nova_compute[356901]: 2025-10-11 03:00:35.108 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 03:00:35 compute-0 nova_compute[356901]: 2025-10-11 03:00:35.108 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 03:00:35 compute-0 nova_compute[356901]: 2025-10-11 03:00:35.108 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]: {
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:         "osd_id": 1,
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:         "type": "bluestore"
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:     },
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:         "osd_id": 2,
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:         "type": "bluestore"
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:     },
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:         "osd_id": 0,
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:         "type": "bluestore"
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]:     }
Oct 11 03:00:35 compute-0 heuristic_albattani[492980]: }
Oct 11 03:00:35 compute-0 systemd[1]: libpod-7f1011602d60198ce0552e0cde232273924ac6961f5be25717d412fb468b01ed.scope: Deactivated successfully.
Oct 11 03:00:35 compute-0 podman[492964]: 2025-10-11 03:00:35.349683739 +0000 UTC m=+1.491860349 container died 7f1011602d60198ce0552e0cde232273924ac6961f5be25717d412fb468b01ed (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_albattani, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, ceph=True, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 03:00:35 compute-0 systemd[1]: libpod-7f1011602d60198ce0552e0cde232273924ac6961f5be25717d412fb468b01ed.scope: Consumed 1.185s CPU time.
Oct 11 03:00:35 compute-0 systemd[1]: var-lib-containers-storage-overlay-31f4d9435633f388c40e2c81bb54f4a530bc8d32485acf48d46f97f53317c601-merged.mount: Deactivated successfully.
Oct 11 03:00:35 compute-0 podman[492964]: 2025-10-11 03:00:35.427318851 +0000 UTC m=+1.569495471 container remove 7f1011602d60198ce0552e0cde232273924ac6961f5be25717d412fb468b01ed (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=heuristic_albattani, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 03:00:35 compute-0 systemd[1]: libpod-conmon-7f1011602d60198ce0552e0cde232273924ac6961f5be25717d412fb468b01ed.scope: Deactivated successfully.
Oct 11 03:00:35 compute-0 sudo[492825]: pam_unix(sudo:session): session closed for user root
Oct 11 03:00:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 03:00:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:00:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 03:00:35 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:00:35 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev ff26fc48-db98-42d3-b2c5-bb0ede4ab6d7 does not exist
Oct 11 03:00:35 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 88c5c646-011f-4b30-b7f4-622349d9e836 does not exist
Oct 11 03:00:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2494: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:35 compute-0 sudo[493025]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:00:35 compute-0 sudo[493025]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:00:35 compute-0 sudo[493025]: pam_unix(sudo:session): session closed for user root
Oct 11 03:00:35 compute-0 sudo[493050]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 03:00:35 compute-0 sudo[493050]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:00:35 compute-0 sudo[493050]: pam_unix(sudo:session): session closed for user root
Oct 11 03:00:35 compute-0 nova_compute[356901]: 2025-10-11 03:00:35.862 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:00:36 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:00:36 compute-0 ceph-mon[191930]: pgmap v2494: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:00:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2495: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:37 compute-0 nova_compute[356901]: 2025-10-11 03:00:37.967 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:38 compute-0 nova_compute[356901]: 2025-10-11 03:00:38.275 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 03:00:38 compute-0 nova_compute[356901]: 2025-10-11 03:00:38.298 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 03:00:38 compute-0 nova_compute[356901]: 2025-10-11 03:00:38.299 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 03:00:38 compute-0 nova_compute[356901]: 2025-10-11 03:00:38.301 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:00:38 compute-0 nova_compute[356901]: 2025-10-11 03:00:38.332 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 03:00:38 compute-0 nova_compute[356901]: 2025-10-11 03:00:38.333 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 03:00:38 compute-0 nova_compute[356901]: 2025-10-11 03:00:38.334 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 03:00:38 compute-0 nova_compute[356901]: 2025-10-11 03:00:38.335 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 03:00:38 compute-0 nova_compute[356901]: 2025-10-11 03:00:38.336 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 03:00:38 compute-0 ceph-mon[191930]: pgmap v2495: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 03:00:38 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/921815266' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:00:38 compute-0 nova_compute[356901]: 2025-10-11 03:00:38.831 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.495s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 03:00:38 compute-0 nova_compute[356901]: 2025-10-11 03:00:38.930 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 03:00:38 compute-0 nova_compute[356901]: 2025-10-11 03:00:38.931 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 03:00:38 compute-0 nova_compute[356901]: 2025-10-11 03:00:38.931 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 03:00:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2496: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:39 compute-0 nova_compute[356901]: 2025-10-11 03:00:39.631 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 03:00:39 compute-0 nova_compute[356901]: 2025-10-11 03:00:39.634 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3577MB free_disk=59.955204010009766GB free_vcpus=7 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 03:00:39 compute-0 nova_compute[356901]: 2025-10-11 03:00:39.635 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 03:00:39 compute-0 nova_compute[356901]: 2025-10-11 03:00:39.637 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 03:00:39 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/921815266' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:00:39 compute-0 nova_compute[356901]: 2025-10-11 03:00:39.747 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 03:00:39 compute-0 nova_compute[356901]: 2025-10-11 03:00:39.748 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 1 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 03:00:39 compute-0 nova_compute[356901]: 2025-10-11 03:00:39.749 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1024MB phys_disk=59GB used_disk=2GB total_vcpus=8 used_vcpus=1 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 03:00:39 compute-0 nova_compute[356901]: 2025-10-11 03:00:39.807 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 03:00:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 03:00:40 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3285776909' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:00:40 compute-0 nova_compute[356901]: 2025-10-11 03:00:40.321 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.514s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 03:00:40 compute-0 nova_compute[356901]: 2025-10-11 03:00:40.333 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 03:00:40 compute-0 nova_compute[356901]: 2025-10-11 03:00:40.382 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 03:00:40 compute-0 nova_compute[356901]: 2025-10-11 03:00:40.386 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 03:00:40 compute-0 nova_compute[356901]: 2025-10-11 03:00:40.387 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.751s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 03:00:40 compute-0 ceph-mon[191930]: pgmap v2496: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:40 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3285776909' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:00:40 compute-0 nova_compute[356901]: 2025-10-11 03:00:40.865 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:41 compute-0 nova_compute[356901]: 2025-10-11 03:00:41.383 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:00:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2497: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:00:42 compute-0 podman[493121]: 2025-10-11 03:00:42.243905198 +0000 UTC m=+0.109863307 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 03:00:42 compute-0 podman[493120]: 2025-10-11 03:00:42.265758842 +0000 UTC m=+0.140240786 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, distribution-scope=public, build-date=2025-08-20T13:12:41, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.buildah.version=1.33.7, url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.component=ubi9-minimal-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.expose-services=, managed_by=edpm_ansible, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, config_id=edpm, version=9.6, container_name=openstack_network_exporter, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., vendor=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, release=1755695350, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., architecture=x86_64, name=ubi9-minimal)
Oct 11 03:00:42 compute-0 podman[493119]: 2025-10-11 03:00:42.267334464 +0000 UTC m=+0.140467060 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_ipmi, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.build-date=20251009)
Oct 11 03:00:42 compute-0 ceph-mon[191930]: pgmap v2497: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:42 compute-0 nova_compute[356901]: 2025-10-11 03:00:42.972 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2498: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:44 compute-0 ceph-mon[191930]: pgmap v2498: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2499: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:45 compute-0 nova_compute[356901]: 2025-10-11 03:00:45.868 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:46 compute-0 ceph-mon[191930]: pgmap v2499: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:00:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2500: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:47 compute-0 nova_compute[356901]: 2025-10-11 03:00:47.977 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:48 compute-0 ceph-mon[191930]: pgmap v2500: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2501: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:50 compute-0 ceph-mon[191930]: pgmap v2501: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:50 compute-0 nova_compute[356901]: 2025-10-11 03:00:50.870 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:51 compute-0 podman[493184]: 2025-10-11 03:00:51.269225073 +0000 UTC m=+0.155469090 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, release-0.7.12=, version=9.4, name=ubi9, maintainer=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, build-date=2024-09-18T21:23:30, container_name=kepler, distribution-scope=public, io.buildah.version=1.29.0, release=1214.1726694543, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vendor=Red Hat, Inc., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 9, com.redhat.component=ubi9-container, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, config_id=edpm, io.openshift.expose-services=, managed_by=edpm_ansible, architecture=x86_64, io.openshift.tags=base rhel9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Oct 11 03:00:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2502: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:00:52 compute-0 ceph-mon[191930]: pgmap v2502: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:52 compute-0 nova_compute[356901]: 2025-10-11 03:00:52.983 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2503: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:53 compute-0 nova_compute[356901]: 2025-10-11 03:00:53.892 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:00:54 compute-0 ceph-mon[191930]: pgmap v2503: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:00:54.895 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 03:00:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:00:54.896 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 03:00:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:00:54.897 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 03:00:55 compute-0 podman[493204]: 2025-10-11 03:00:55.242607954 +0000 UTC m=+0.126258322 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 03:00:55 compute-0 podman[493206]: 2025-10-11 03:00:55.248389765 +0000 UTC m=+0.127380836 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, container_name=ceilometer_agent_compute, org.label-schema.vendor=CentOS, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']})
Oct 11 03:00:55 compute-0 podman[493207]: 2025-10-11 03:00:55.266672415 +0000 UTC m=+0.126655813 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20251009, container_name=ovn_metadata_agent, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_id=ovn_metadata_agent, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 03:00:55 compute-0 podman[493205]: 2025-10-11 03:00:55.294693561 +0000 UTC m=+0.176071784 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_id=ovn_controller, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 03:00:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2504: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:55 compute-0 nova_compute[356901]: 2025-10-11 03:00:55.874 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:00:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:00:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:00:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:00:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:00:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:00:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_03:00:56
Oct 11 03:00:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 03:00:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 03:00:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['volumes', 'cephfs.cephfs.meta', 'images', 'cephfs.cephfs.data', 'default.rgw.log', 'backups', 'default.rgw.control', 'vms', '.mgr', '.rgw.root', 'default.rgw.meta']
Oct 11 03:00:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 03:00:56 compute-0 ceph-mon[191930]: pgmap v2504: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:00:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2505: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 03:00:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 03:00:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 03:00:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 03:00:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 03:00:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 03:00:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 03:00:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 03:00:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 03:00:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 03:00:57 compute-0 nova_compute[356901]: 2025-10-11 03:00:57.989 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:00:58 compute-0 ceph-mon[191930]: pgmap v2505: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2506: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:00:59 compute-0 podman[157119]: time="2025-10-11T03:00:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 03:00:59 compute-0 podman[157119]: @ - - [11/Oct/2025:03:00:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 03:00:59 compute-0 podman[157119]: @ - - [11/Oct/2025:03:00:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9115 "" "Go-http-client/1.1"
Oct 11 03:01:00 compute-0 nova_compute[356901]: 2025-10-11 03:01:00.877 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:00 compute-0 ceph-mon[191930]: pgmap v2506: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:01 compute-0 openstack_network_exporter[374316]: ERROR   03:01:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 03:01:01 compute-0 openstack_network_exporter[374316]: ERROR   03:01:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:01:01 compute-0 openstack_network_exporter[374316]: ERROR   03:01:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:01:01 compute-0 openstack_network_exporter[374316]: ERROR   03:01:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 03:01:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:01:01 compute-0 openstack_network_exporter[374316]: ERROR   03:01:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 03:01:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:01:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2507: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:01 compute-0 CROND[493288]: (root) CMD (run-parts /etc/cron.hourly)
Oct 11 03:01:01 compute-0 run-parts[493291]: (/etc/cron.hourly) starting 0anacron
Oct 11 03:01:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:01:01 compute-0 run-parts[493297]: (/etc/cron.hourly) finished 0anacron
Oct 11 03:01:01 compute-0 CROND[493287]: (root) CMDEND (run-parts /etc/cron.hourly)
Oct 11 03:01:02 compute-0 ceph-mon[191930]: pgmap v2507: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:02 compute-0 nova_compute[356901]: 2025-10-11 03:01:02.993 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2508: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:04 compute-0 podman[493298]: 2025-10-11 03:01:04.256802289 +0000 UTC m=+0.135905177 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, container_name=multipathd)
Oct 11 03:01:04 compute-0 podman[493299]: 2025-10-11 03:01:04.281724599 +0000 UTC m=+0.154046765 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.vendor=CentOS, config_id=iscsid, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009)
Oct 11 03:01:04 compute-0 ceph-mon[191930]: pgmap v2508: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2509: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:05 compute-0 nova_compute[356901]: 2025-10-11 03:01:05.880 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:01:06 compute-0 ceph-mon[191930]: pgmap v2509: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0009191400908380543 of space, bias 1.0, pg target 0.2757420272514163 quantized to 32 (current 32)
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 03:01:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2510: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:08 compute-0 nova_compute[356901]: 2025-10-11 03:01:07.999 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:08 compute-0 ceph-mon[191930]: pgmap v2510: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 03:01:09 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 4800.0 total, 600.0 interval
                                            Cumulative writes: 11K writes, 51K keys, 11K commit groups, 1.0 writes per commit group, ingest: 0.07 GB, 0.01 MB/s
                                            Cumulative WAL: 11K writes, 11K syncs, 1.00 writes per sync, written: 0.07 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 1348 writes, 6179 keys, 1348 commit groups, 1.0 writes per commit group, ingest: 8.68 MB, 0.01 MB/s
                                            Interval WAL: 1348 writes, 1348 syncs, 1.00 writes per sync, written: 0.01 GB, 0.01 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
                                            
                                            ** Compaction Stats [default] **
                                            Level    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                              L0      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.1      0.1       0.0   1.0      0.0     89.3      0.71              0.28        36    0.020       0      0       0.0       0.0
                                              L6      1/0    7.24 MB   0.0      0.3     0.1      0.3       0.3      0.0       0.0   4.2    142.0    117.1      2.26              1.14        35    0.064    193K    19K       0.0       0.0
                                             Sum      1/0    7.24 MB   0.0      0.3     0.1      0.3       0.3      0.1       0.0   5.2    108.0    110.4      2.97              1.42        71    0.042    193K    19K       0.0       0.0
                                             Int      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   5.6    126.8    129.1      0.37              0.18        10    0.037     33K   2558       0.0       0.0
                                            
                                            ** Compaction Stats [default] **
                                            Priority    Files   Size     Score Read(GB)  Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB)
                                            ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
                                             Low      0/0    0.00 KB   0.0      0.3     0.1      0.3       0.3      0.0       0.0   0.0    142.0    117.1      2.26              1.14        35    0.064    193K    19K       0.0       0.0
                                            High      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.1      0.1       0.0   0.0      0.0     89.8      0.71              0.28        35    0.020       0      0       0.0       0.0
                                            User      0/0    0.00 KB   0.0      0.0     0.0      0.0       0.0      0.0       0.0   0.0      0.0     12.0      0.00              0.00         1    0.004       0      0       0.0       0.0
                                            
                                            Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0
                                            
                                            Uptime(secs): 4800.0 total, 600.0 interval
                                            Flush(GB): cumulative 0.062, interval 0.008
                                            AddFile(GB): cumulative 0.000, interval 0.000
                                            AddFile(Total Files): cumulative 0, interval 0
                                            AddFile(L0 Files): cumulative 0, interval 0
                                            AddFile(Keys): cumulative 0, interval 0
                                            Cumulative compaction: 0.32 GB write, 0.07 MB/s write, 0.31 GB read, 0.07 MB/s read, 3.0 seconds
                                            Interval compaction: 0.05 GB write, 0.08 MB/s write, 0.05 GB read, 0.08 MB/s read, 0.4 seconds
                                            Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count
                                            Block cache BinnedLRUCache@0x55816e47f1f0#2 capacity: 304.00 MB usage: 40.41 MB table_size: 0 occupancy: 18446744073709551615 collections: 9 last_copies: 0 last_secs: 0.000401 secs_since: 0
                                            Block cache entry stats(count,size,portion): DataBlock(2781,39.02 MB,12.8343%) FilterBlock(72,541.36 KB,0.173905%) IndexBlock(72,888.05 KB,0.285274%) Misc(1,0.00 KB,0%)
                                            
                                            ** File Read Latency Histogram By Level [default] **
Oct 11 03:01:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2511: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:10 compute-0 nova_compute[356901]: 2025-10-11 03:01:10.883 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:10 compute-0 ceph-mon[191930]: pgmap v2511: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2512: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:01:12 compute-0 ceph-mon[191930]: pgmap v2512: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:13 compute-0 nova_compute[356901]: 2025-10-11 03:01:13.005 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:13 compute-0 podman[493337]: 2025-10-11 03:01:13.229212777 +0000 UTC m=+0.098744068 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 03:01:13 compute-0 podman[493336]: 2025-10-11 03:01:13.245901222 +0000 UTC m=+0.125578399 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.expose-services=, io.openshift.tags=minimal rhel9, distribution-scope=public, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, com.redhat.component=ubi9-minimal-container, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., managed_by=edpm_ansible, version=9.6, maintainer=Red Hat, Inc., vendor=Red Hat, Inc., io.buildah.version=1.33.7, release=1755695350, vcs-type=git, name=ubi9-minimal, config_id=edpm, container_name=openstack_network_exporter, url=https://catalog.redhat.com/en/search?searchType=containers, build-date=2025-08-20T13:12:41, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']})
Oct 11 03:01:13 compute-0 podman[493335]: 2025-10-11 03:01:13.258983938 +0000 UTC m=+0.144405257 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 03:01:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2513: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.877 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.878 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.878 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.879 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.882 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.882 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.882 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.882 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.882 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.883 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.883 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.883 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.883 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.885 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.885 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.885 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.885 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.886 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc4dbe0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.890 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.890 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.891 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.891 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.891 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.892 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T03:01:13.891611) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.900 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2856 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.902 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.902 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.902 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.902 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.902 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.903 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.903 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T03:01:13.903140) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.903 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 25 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.904 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.904 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.905 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.905 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.905 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.905 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.905 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.906 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.907 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.907 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.907 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.908 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.908 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T03:01:13.905737) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.908 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.908 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.909 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.909 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.910 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.910 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T03:01:13.908222) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.910 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.910 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.910 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.911 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T03:01:13.910537) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.941 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.942 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.943 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.943 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.944 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.944 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.944 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.944 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.944 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:13.945 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T03:01:13.944749) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.009 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.010 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.010 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.011 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.011 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.012 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.012 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.012 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.012 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.012 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.013 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T03:01:14.012714) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.013 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.014 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.015 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.015 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.016 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.016 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.016 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.016 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.016 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.017 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.017 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.018 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.018 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.018 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.019 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.019 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.019 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.019 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.020 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.020 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T03:01:14.016576) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.021 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T03:01:14.019590) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.021 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.021 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.021 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.022 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.022 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.022 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.022 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.022 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.023 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.023 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.024 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.024 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T03:01:14.022521) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.025 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.025 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.025 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.025 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.026 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T03:01:14.025716) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.025 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.026 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.026 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.027 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.027 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.028 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.028 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.028 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.028 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.028 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.028 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T03:01:14.028471) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.053 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.054 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.054 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.054 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.054 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.054 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.054 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.055 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.055 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.055 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.055 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T03:01:14.054866) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.056 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.056 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.056 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.056 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.056 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.056 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.056 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.057 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.057 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.057 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.057 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.057 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.058 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.058 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.058 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.058 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.058 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.059 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.059 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.059 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.059 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T03:01:14.056766) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.059 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.059 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T03:01:14.058375) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.059 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T03:01:14.059537) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.059 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 33 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.060 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.060 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.060 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.060 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.060 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.060 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.060 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.061 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.061 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.061 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.061 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.061 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.061 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.062 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.062 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.062 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.062 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.062 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T03:01:14.060699) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.063 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.063 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T03:01:14.061911) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.063 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.063 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.063 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.063 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T03:01:14.063102) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.063 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.064 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.064 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.064 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.064 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.064 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.064 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.065 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.065 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.065 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.065 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.066 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.066 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.066 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.066 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.066 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.066 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.067 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.067 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.067 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T03:01:14.064411) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.067 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.067 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T03:01:14.066272) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.067 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.067 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 72230000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.068 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.068 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.068 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.068 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T03:01:14.067613) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.068 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.068 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.068 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.069 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T03:01:14.068943) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.069 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2482 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.069 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.069 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.069 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.069 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.070 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.070 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.070 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.80859375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.070 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.071 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T03:01:14.070151) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.071 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.071 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.071 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.072 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.072 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.072 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.073 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.073 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.073 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.073 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.073 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.073 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.074 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.074 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.074 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.074 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.074 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.075 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.075 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.075 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.075 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.075 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.076 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.076 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.076 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.076 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.076 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:01:14.076 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:01:14 compute-0 ceph-mon[191930]: pgmap v2513: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2514: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:15 compute-0 nova_compute[356901]: 2025-10-11 03:01:15.886 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:01:16 compute-0 ceph-mon[191930]: pgmap v2514: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2515: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:18 compute-0 nova_compute[356901]: 2025-10-11 03:01:18.010 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:18 compute-0 nova_compute[356901]: 2025-10-11 03:01:18.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:01:19 compute-0 ceph-mon[191930]: pgmap v2515: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2516: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:20 compute-0 nova_compute[356901]: 2025-10-11 03:01:20.889 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:20 compute-0 nova_compute[356901]: 2025-10-11 03:01:20.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:01:21 compute-0 ceph-mon[191930]: pgmap v2516: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2517: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:01:21 compute-0 nova_compute[356901]: 2025-10-11 03:01:21.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:01:22 compute-0 podman[493395]: 2025-10-11 03:01:22.272882668 +0000 UTC m=+0.157398386 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.29.0, vendor=Red Hat, Inc., name=ubi9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, container_name=kepler, config_id=edpm, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, io.openshift.tags=base rhel9, architecture=x86_64, release-0.7.12=, summary=Provides the latest release of Red Hat Universal Base Image 9., com.redhat.component=ubi9-container, version=9.4, build-date=2024-09-18T21:23:30, io.k8s.display-name=Red Hat Universal Base Image 9, maintainer=Red Hat, Inc., release=1214.1726694543, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, managed_by=edpm_ansible, vcs-type=git, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f)
Oct 11 03:01:23 compute-0 nova_compute[356901]: 2025-10-11 03:01:23.015 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:23 compute-0 ceph-mon[191930]: pgmap v2517: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2518: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:25 compute-0 ceph-mon[191930]: pgmap v2518: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2519: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:25 compute-0 nova_compute[356901]: 2025-10-11 03:01:25.893 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:26 compute-0 podman[493416]: 2025-10-11 03:01:26.24665446 +0000 UTC m=+0.113816082 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, managed_by=edpm_ansible, tcib_managed=true, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, org.label-schema.name=CentOS Stream 10 Base Image, config_id=edpm, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007)
Oct 11 03:01:26 compute-0 podman[493414]: 2025-10-11 03:01:26.251648328 +0000 UTC m=+0.127691056 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 03:01:26 compute-0 podman[493417]: 2025-10-11 03:01:26.264071241 +0000 UTC m=+0.117980040 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true)
Oct 11 03:01:26 compute-0 podman[493415]: 2025-10-11 03:01:26.307044385 +0000 UTC m=+0.180236232 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, config_id=ovn_controller, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, org.label-schema.license=GPLv2)
Oct 11 03:01:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:01:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:01:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:01:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:01:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:01:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:01:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:01:26 compute-0 nova_compute[356901]: 2025-10-11 03:01:26.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:01:27 compute-0 ceph-mon[191930]: pgmap v2519: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2520: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 03:01:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1544244874' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 03:01:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 03:01:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1544244874' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 03:01:28 compute-0 nova_compute[356901]: 2025-10-11 03:01:28.019 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1544244874' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 03:01:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1544244874' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 03:01:28 compute-0 nova_compute[356901]: 2025-10-11 03:01:28.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:01:28 compute-0 nova_compute[356901]: 2025-10-11 03:01:28.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 03:01:29 compute-0 ceph-mon[191930]: pgmap v2520: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2521: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:29 compute-0 podman[157119]: time="2025-10-11T03:01:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 03:01:29 compute-0 podman[157119]: @ - - [11/Oct/2025:03:01:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 03:01:29 compute-0 podman[157119]: @ - - [11/Oct/2025:03:01:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9104 "" "Go-http-client/1.1"
Oct 11 03:01:30 compute-0 nova_compute[356901]: 2025-10-11 03:01:30.896 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:31 compute-0 ceph-mon[191930]: pgmap v2521: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:31 compute-0 openstack_network_exporter[374316]: ERROR   03:01:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:01:31 compute-0 openstack_network_exporter[374316]: ERROR   03:01:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:01:31 compute-0 openstack_network_exporter[374316]: ERROR   03:01:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 03:01:31 compute-0 openstack_network_exporter[374316]: ERROR   03:01:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 03:01:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:01:31 compute-0 openstack_network_exporter[374316]: ERROR   03:01:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 03:01:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:01:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2522: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:01:32 compute-0 nova_compute[356901]: 2025-10-11 03:01:32.899 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:01:33 compute-0 nova_compute[356901]: 2025-10-11 03:01:33.023 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:33 compute-0 ceph-mon[191930]: pgmap v2522: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2523: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:34 compute-0 nova_compute[356901]: 2025-10-11 03:01:34.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:01:34 compute-0 nova_compute[356901]: 2025-10-11 03:01:34.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 03:01:34 compute-0 nova_compute[356901]: 2025-10-11 03:01:34.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 03:01:35 compute-0 podman[493497]: 2025-10-11 03:01:35.208959256 +0000 UTC m=+0.094198002 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, container_name=iscsid, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true)
Oct 11 03:01:35 compute-0 nova_compute[356901]: 2025-10-11 03:01:35.220 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 03:01:35 compute-0 nova_compute[356901]: 2025-10-11 03:01:35.221 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 03:01:35 compute-0 nova_compute[356901]: 2025-10-11 03:01:35.221 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 03:01:35 compute-0 nova_compute[356901]: 2025-10-11 03:01:35.221 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 03:01:35 compute-0 podman[493496]: 2025-10-11 03:01:35.236558721 +0000 UTC m=+0.118687586 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, managed_by=edpm_ansible, tcib_managed=true, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 03:01:35 compute-0 ceph-mon[191930]: pgmap v2523: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2524: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:35 compute-0 nova_compute[356901]: 2025-10-11 03:01:35.900 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:35 compute-0 sudo[493534]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:01:35 compute-0 sudo[493534]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:35 compute-0 sudo[493534]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:36 compute-0 sudo[493559]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:01:36 compute-0 sudo[493559]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:36 compute-0 sudo[493559]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:36 compute-0 sudo[493584]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:01:36 compute-0 sudo[493584]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:36 compute-0 sudo[493584]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:36 compute-0 sudo[493609]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ls
Oct 11 03:01:36 compute-0 sudo[493609]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:01:37 compute-0 ceph-mon[191930]: pgmap v2524: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:37 compute-0 podman[493706]: 2025-10-11 03:01:37.360512398 +0000 UTC m=+0.151365241 container exec ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default)
Oct 11 03:01:37 compute-0 podman[493706]: 2025-10-11 03:01:37.511353569 +0000 UTC m=+0.302206432 container exec_died ab2a7db9f9dd741d4d1ddef00b4d16d967a81a97e3a84d9480a92e895b6dd51e (image=quay.io/ceph/ceph:v18, name=ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mon-compute-0, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.license=GPLv2)
Oct 11 03:01:37 compute-0 nova_compute[356901]: 2025-10-11 03:01:37.588 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 03:01:37 compute-0 nova_compute[356901]: 2025-10-11 03:01:37.606 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 03:01:37 compute-0 nova_compute[356901]: 2025-10-11 03:01:37.607 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 03:01:37 compute-0 nova_compute[356901]: 2025-10-11 03:01:37.608 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:01:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2525: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:37 compute-0 nova_compute[356901]: 2025-10-11 03:01:37.634 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 03:01:37 compute-0 nova_compute[356901]: 2025-10-11 03:01:37.635 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 03:01:37 compute-0 nova_compute[356901]: 2025-10-11 03:01:37.636 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 03:01:37 compute-0 nova_compute[356901]: 2025-10-11 03:01:37.637 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 03:01:37 compute-0 nova_compute[356901]: 2025-10-11 03:01:37.638 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 03:01:38 compute-0 nova_compute[356901]: 2025-10-11 03:01:38.026 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 03:01:38 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/455582862' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:01:38 compute-0 nova_compute[356901]: 2025-10-11 03:01:38.143 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.505s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 03:01:38 compute-0 nova_compute[356901]: 2025-10-11 03:01:38.226 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 03:01:38 compute-0 nova_compute[356901]: 2025-10-11 03:01:38.227 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 03:01:38 compute-0 nova_compute[356901]: 2025-10-11 03:01:38.227 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 03:01:38 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/455582862' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:01:38 compute-0 sudo[493609]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 03:01:38 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:01:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 03:01:38 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:01:38 compute-0 nova_compute[356901]: 2025-10-11 03:01:38.588 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 03:01:38 compute-0 nova_compute[356901]: 2025-10-11 03:01:38.589 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3582MB free_disk=59.955204010009766GB free_vcpus=7 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 03:01:38 compute-0 nova_compute[356901]: 2025-10-11 03:01:38.590 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 03:01:38 compute-0 nova_compute[356901]: 2025-10-11 03:01:38.590 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 03:01:38 compute-0 sudo[493882]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:01:38 compute-0 sudo[493882]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:38 compute-0 sudo[493882]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:38 compute-0 nova_compute[356901]: 2025-10-11 03:01:38.755 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 03:01:38 compute-0 nova_compute[356901]: 2025-10-11 03:01:38.756 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 1 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 03:01:38 compute-0 nova_compute[356901]: 2025-10-11 03:01:38.756 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1024MB phys_disk=59GB used_disk=2GB total_vcpus=8 used_vcpus=1 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 03:01:38 compute-0 sudo[493907]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:01:38 compute-0 sudo[493907]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:38 compute-0 sudo[493907]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:38 compute-0 sudo[493932]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:01:38 compute-0 sudo[493932]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:38 compute-0 sudo[493932]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:38 compute-0 nova_compute[356901]: 2025-10-11 03:01:38.912 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 03:01:38 compute-0 sudo[493957]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 03:01:38 compute-0 sudo[493957]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:39 compute-0 ceph-mon[191930]: pgmap v2525: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:39 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:01:39 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:01:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 03:01:39 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2332816335' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:01:39 compute-0 nova_compute[356901]: 2025-10-11 03:01:39.446 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.534s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 03:01:39 compute-0 nova_compute[356901]: 2025-10-11 03:01:39.460 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 03:01:39 compute-0 nova_compute[356901]: 2025-10-11 03:01:39.485 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 03:01:39 compute-0 nova_compute[356901]: 2025-10-11 03:01:39.488 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 03:01:39 compute-0 nova_compute[356901]: 2025-10-11 03:01:39.488 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.898s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 03:01:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2526: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:39 compute-0 sudo[493957]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 03:01:39 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:01:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 03:01:39 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 03:01:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 03:01:39 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:01:39 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 8a632cf5-ed32-4fc5-a98c-1c04cceba5cb does not exist
Oct 11 03:01:39 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev edd77962-d341-4618-bc99-91f35d06e719 does not exist
Oct 11 03:01:39 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 8694efe3-fc55-420a-b27d-7fcec9c8ca72 does not exist
Oct 11 03:01:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 03:01:39 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 03:01:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 03:01:39 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 03:01:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 03:01:39 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:01:39 compute-0 sudo[494035]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:01:39 compute-0 sudo[494035]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:39 compute-0 sudo[494035]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:40 compute-0 sudo[494060]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:01:40 compute-0 sudo[494060]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:40 compute-0 sudo[494060]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:40 compute-0 sudo[494085]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:01:40 compute-0 sudo[494085]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:40 compute-0 sudo[494085]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:40 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2332816335' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:01:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:01:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 03:01:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:01:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 03:01:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 03:01:40 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:01:40 compute-0 sudo[494110]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 03:01:40 compute-0 sudo[494110]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:40 compute-0 nova_compute[356901]: 2025-10-11 03:01:40.901 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:40 compute-0 podman[494173]: 2025-10-11 03:01:40.947155837 +0000 UTC m=+0.086066511 container create 5e7733bbb71035cd4bb09fab30d03c4404b383f5d26bd4c25ffab8289856fc15 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_swirles, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0)
Oct 11 03:01:40 compute-0 podman[494173]: 2025-10-11 03:01:40.905925517 +0000 UTC m=+0.044836201 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:01:41 compute-0 systemd[1]: Started libpod-conmon-5e7733bbb71035cd4bb09fab30d03c4404b383f5d26bd4c25ffab8289856fc15.scope.
Oct 11 03:01:41 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:01:41 compute-0 podman[494173]: 2025-10-11 03:01:41.108655929 +0000 UTC m=+0.247566623 container init 5e7733bbb71035cd4bb09fab30d03c4404b383f5d26bd4c25ffab8289856fc15 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_swirles, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS)
Oct 11 03:01:41 compute-0 podman[494173]: 2025-10-11 03:01:41.124037578 +0000 UTC m=+0.262948252 container start 5e7733bbb71035cd4bb09fab30d03c4404b383f5d26bd4c25ffab8289856fc15 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_swirles, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 03:01:41 compute-0 gallant_swirles[494189]: 167 167
Oct 11 03:01:41 compute-0 systemd[1]: libpod-5e7733bbb71035cd4bb09fab30d03c4404b383f5d26bd4c25ffab8289856fc15.scope: Deactivated successfully.
Oct 11 03:01:41 compute-0 podman[494173]: 2025-10-11 03:01:41.145977581 +0000 UTC m=+0.284888255 container attach 5e7733bbb71035cd4bb09fab30d03c4404b383f5d26bd4c25ffab8289856fc15 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_swirles, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 03:01:41 compute-0 podman[494173]: 2025-10-11 03:01:41.148080826 +0000 UTC m=+0.286991530 container died 5e7733bbb71035cd4bb09fab30d03c4404b383f5d26bd4c25ffab8289856fc15 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_swirles, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 03:01:41 compute-0 systemd[1]: var-lib-containers-storage-overlay-840fab528e8a5063aa00d64ac03b00b45f6fc345d50021d39d4d3f1537ad077a-merged.mount: Deactivated successfully.
Oct 11 03:01:41 compute-0 podman[494173]: 2025-10-11 03:01:41.290471966 +0000 UTC m=+0.429382630 container remove 5e7733bbb71035cd4bb09fab30d03c4404b383f5d26bd4c25ffab8289856fc15 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=gallant_swirles, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 03:01:41 compute-0 systemd[1]: libpod-conmon-5e7733bbb71035cd4bb09fab30d03c4404b383f5d26bd4c25ffab8289856fc15.scope: Deactivated successfully.
Oct 11 03:01:41 compute-0 ceph-mon[191930]: pgmap v2526: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:41 compute-0 nova_compute[356901]: 2025-10-11 03:01:41.484 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:01:41 compute-0 podman[494213]: 2025-10-11 03:01:41.570440484 +0000 UTC m=+0.097677175 container create 4d639dd943c99f17c689c31312d401c6ecacb15e5f84bb1a6d01cc2d1fbf817b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_tharp, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.build-date=20250507)
Oct 11 03:01:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2527: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:41 compute-0 podman[494213]: 2025-10-11 03:01:41.529260095 +0000 UTC m=+0.056496756 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:01:41 compute-0 systemd[1]: Started libpod-conmon-4d639dd943c99f17c689c31312d401c6ecacb15e5f84bb1a6d01cc2d1fbf817b.scope.
Oct 11 03:01:41 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:01:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/90acca535e97bda7fe3be17f736474854772c179e50102621f658aa0405d974c/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 03:01:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/90acca535e97bda7fe3be17f736474854772c179e50102621f658aa0405d974c/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 03:01:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/90acca535e97bda7fe3be17f736474854772c179e50102621f658aa0405d974c/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 03:01:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/90acca535e97bda7fe3be17f736474854772c179e50102621f658aa0405d974c/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 03:01:41 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/90acca535e97bda7fe3be17f736474854772c179e50102621f658aa0405d974c/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 03:01:41 compute-0 podman[494213]: 2025-10-11 03:01:41.769650362 +0000 UTC m=+0.296887023 container init 4d639dd943c99f17c689c31312d401c6ecacb15e5f84bb1a6d01cc2d1fbf817b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_tharp, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507)
Oct 11 03:01:41 compute-0 podman[494213]: 2025-10-11 03:01:41.798255472 +0000 UTC m=+0.325492123 container start 4d639dd943c99f17c689c31312d401c6ecacb15e5f84bb1a6d01cc2d1fbf817b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_tharp, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2)
Oct 11 03:01:41 compute-0 podman[494213]: 2025-10-11 03:01:41.816832845 +0000 UTC m=+0.344069536 container attach 4d639dd943c99f17c689c31312d401c6ecacb15e5f84bb1a6d01cc2d1fbf817b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_tharp, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3)
Oct 11 03:01:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:01:42 compute-0 ceph-mon[191930]: pgmap v2527: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:43 compute-0 nova_compute[356901]: 2025-10-11 03:01:43.039 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:43 compute-0 loving_tharp[494229]: --> passed data devices: 0 physical, 3 LVM
Oct 11 03:01:43 compute-0 loving_tharp[494229]: --> relative data size: 1.0
Oct 11 03:01:43 compute-0 loving_tharp[494229]: --> All data devices are unavailable
Oct 11 03:01:43 compute-0 systemd[1]: libpod-4d639dd943c99f17c689c31312d401c6ecacb15e5f84bb1a6d01cc2d1fbf817b.scope: Deactivated successfully.
Oct 11 03:01:43 compute-0 systemd[1]: libpod-4d639dd943c99f17c689c31312d401c6ecacb15e5f84bb1a6d01cc2d1fbf817b.scope: Consumed 1.386s CPU time.
Oct 11 03:01:43 compute-0 podman[494213]: 2025-10-11 03:01:43.243668629 +0000 UTC m=+1.770905300 container died 4d639dd943c99f17c689c31312d401c6ecacb15e5f84bb1a6d01cc2d1fbf817b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_tharp, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 03:01:43 compute-0 systemd[1]: var-lib-containers-storage-overlay-90acca535e97bda7fe3be17f736474854772c179e50102621f658aa0405d974c-merged.mount: Deactivated successfully.
Oct 11 03:01:43 compute-0 podman[494213]: 2025-10-11 03:01:43.405896087 +0000 UTC m=+1.933132738 container remove 4d639dd943c99f17c689c31312d401c6ecacb15e5f84bb1a6d01cc2d1fbf817b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_tharp, ceph=True, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 03:01:43 compute-0 systemd[1]: libpod-conmon-4d639dd943c99f17c689c31312d401c6ecacb15e5f84bb1a6d01cc2d1fbf817b.scope: Deactivated successfully.
Oct 11 03:01:43 compute-0 sudo[494110]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:43 compute-0 podman[494265]: 2025-10-11 03:01:43.465064418 +0000 UTC m=+0.157534802 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.openshift.expose-services=, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, managed_by=edpm_ansible, io.openshift.tags=minimal rhel9, name=ubi9-minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, maintainer=Red Hat, Inc., io.buildah.version=1.33.7, config_id=edpm, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, vcs-type=git, com.redhat.component=ubi9-minimal-container, url=https://catalog.redhat.com/en/search?searchType=containers, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, version=9.6, build-date=2025-08-20T13:12:41, release=1755695350, vendor=Red Hat, Inc., container_name=openstack_network_exporter)
Oct 11 03:01:43 compute-0 podman[494259]: 2025-10-11 03:01:43.468597974 +0000 UTC m=+0.180116627 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>)
Oct 11 03:01:43 compute-0 podman[494286]: 2025-10-11 03:01:43.517778538 +0000 UTC m=+0.164964916 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.license=GPLv2)
Oct 11 03:01:43 compute-0 sudo[494326]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:01:43 compute-0 sudo[494326]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:43 compute-0 sudo[494326]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2528: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:43 compute-0 sudo[494352]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:01:43 compute-0 sudo[494352]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:43 compute-0 sudo[494352]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:43 compute-0 sudo[494377]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:01:43 compute-0 sudo[494377]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:43 compute-0 sudo[494377]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:43 compute-0 sudo[494402]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 03:01:43 compute-0 sudo[494402]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:44 compute-0 podman[494464]: 2025-10-11 03:01:44.481708978 +0000 UTC m=+0.085759180 container create 3d6150fca174d8f5b63583a4c56f496a4b91d1702e9829b5cab99ef1938a420d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_cori, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 03:01:44 compute-0 podman[494464]: 2025-10-11 03:01:44.451723148 +0000 UTC m=+0.055773360 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:01:44 compute-0 systemd[1]: Started libpod-conmon-3d6150fca174d8f5b63583a4c56f496a4b91d1702e9829b5cab99ef1938a420d.scope.
Oct 11 03:01:44 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:01:44 compute-0 podman[494464]: 2025-10-11 03:01:44.629672067 +0000 UTC m=+0.233722269 container init 3d6150fca174d8f5b63583a4c56f496a4b91d1702e9829b5cab99ef1938a420d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_cori, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 03:01:44 compute-0 podman[494464]: 2025-10-11 03:01:44.651925651 +0000 UTC m=+0.255975873 container start 3d6150fca174d8f5b63583a4c56f496a4b91d1702e9829b5cab99ef1938a420d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_cori, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 03:01:44 compute-0 podman[494464]: 2025-10-11 03:01:44.662394965 +0000 UTC m=+0.266445197 container attach 3d6150fca174d8f5b63583a4c56f496a4b91d1702e9829b5cab99ef1938a420d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_cori, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 03:01:44 compute-0 nice_cori[494480]: 167 167
Oct 11 03:01:44 compute-0 systemd[1]: libpod-3d6150fca174d8f5b63583a4c56f496a4b91d1702e9829b5cab99ef1938a420d.scope: Deactivated successfully.
Oct 11 03:01:44 compute-0 podman[494464]: 2025-10-11 03:01:44.67012079 +0000 UTC m=+0.274170972 container died 3d6150fca174d8f5b63583a4c56f496a4b91d1702e9829b5cab99ef1938a420d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_cori, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=reef, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 03:01:44 compute-0 ceph-mon[191930]: pgmap v2528: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:44 compute-0 systemd[1]: var-lib-containers-storage-overlay-0daf114d25b285fb26fc0d3cc3a68bd49c1b85d094d43ac15120ad82748cf271-merged.mount: Deactivated successfully.
Oct 11 03:01:44 compute-0 podman[494464]: 2025-10-11 03:01:44.743010741 +0000 UTC m=+0.347060903 container remove 3d6150fca174d8f5b63583a4c56f496a4b91d1702e9829b5cab99ef1938a420d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_cori, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, ceph=True, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 03:01:44 compute-0 systemd[1]: libpod-conmon-3d6150fca174d8f5b63583a4c56f496a4b91d1702e9829b5cab99ef1938a420d.scope: Deactivated successfully.
Oct 11 03:01:45 compute-0 podman[494504]: 2025-10-11 03:01:45.009113434 +0000 UTC m=+0.106322034 container create a1941c676114700ce58eb3b57a51787438862f5e4fab791108e55332cb4d62f5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_carver, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 03:01:45 compute-0 podman[494504]: 2025-10-11 03:01:44.96443668 +0000 UTC m=+0.061645330 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:01:45 compute-0 systemd[1]: Started libpod-conmon-a1941c676114700ce58eb3b57a51787438862f5e4fab791108e55332cb4d62f5.scope.
Oct 11 03:01:45 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:01:45 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/26edf8d66b01789ae56f6b45e18e081c174571ba67468937a93624500833f76f/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 03:01:45 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/26edf8d66b01789ae56f6b45e18e081c174571ba67468937a93624500833f76f/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 03:01:45 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/26edf8d66b01789ae56f6b45e18e081c174571ba67468937a93624500833f76f/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 03:01:45 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/26edf8d66b01789ae56f6b45e18e081c174571ba67468937a93624500833f76f/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 03:01:45 compute-0 podman[494504]: 2025-10-11 03:01:45.156790153 +0000 UTC m=+0.253998743 container init a1941c676114700ce58eb3b57a51787438862f5e4fab791108e55332cb4d62f5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_carver, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True)
Oct 11 03:01:45 compute-0 podman[494504]: 2025-10-11 03:01:45.170034465 +0000 UTC m=+0.267243035 container start a1941c676114700ce58eb3b57a51787438862f5e4fab791108e55332cb4d62f5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_carver, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS)
Oct 11 03:01:45 compute-0 podman[494504]: 2025-10-11 03:01:45.178450986 +0000 UTC m=+0.275659556 container attach a1941c676114700ce58eb3b57a51787438862f5e4fab791108e55332cb4d62f5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_carver, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507)
Oct 11 03:01:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2529: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:45 compute-0 nova_compute[356901]: 2025-10-11 03:01:45.904 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:45 compute-0 musing_carver[494520]: {
Oct 11 03:01:45 compute-0 musing_carver[494520]:     "0": [
Oct 11 03:01:45 compute-0 musing_carver[494520]:         {
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "devices": [
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "/dev/loop3"
Oct 11 03:01:45 compute-0 musing_carver[494520]:             ],
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "lv_name": "ceph_lv0",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "lv_size": "21470642176",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "name": "ceph_lv0",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "tags": {
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.cluster_name": "ceph",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.crush_device_class": "",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.encrypted": "0",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.osd_id": "0",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.type": "block",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.vdo": "0"
Oct 11 03:01:45 compute-0 musing_carver[494520]:             },
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "type": "block",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "vg_name": "ceph_vg0"
Oct 11 03:01:45 compute-0 musing_carver[494520]:         }
Oct 11 03:01:45 compute-0 musing_carver[494520]:     ],
Oct 11 03:01:45 compute-0 musing_carver[494520]:     "1": [
Oct 11 03:01:45 compute-0 musing_carver[494520]:         {
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "devices": [
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "/dev/loop4"
Oct 11 03:01:45 compute-0 musing_carver[494520]:             ],
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "lv_name": "ceph_lv1",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "lv_size": "21470642176",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "name": "ceph_lv1",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "tags": {
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.cluster_name": "ceph",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.crush_device_class": "",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.encrypted": "0",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.osd_id": "1",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.type": "block",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.vdo": "0"
Oct 11 03:01:45 compute-0 musing_carver[494520]:             },
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "type": "block",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "vg_name": "ceph_vg1"
Oct 11 03:01:45 compute-0 musing_carver[494520]:         }
Oct 11 03:01:45 compute-0 musing_carver[494520]:     ],
Oct 11 03:01:45 compute-0 musing_carver[494520]:     "2": [
Oct 11 03:01:45 compute-0 musing_carver[494520]:         {
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "devices": [
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "/dev/loop5"
Oct 11 03:01:45 compute-0 musing_carver[494520]:             ],
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "lv_name": "ceph_lv2",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "lv_size": "21470642176",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "name": "ceph_lv2",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "tags": {
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.cluster_name": "ceph",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.crush_device_class": "",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.encrypted": "0",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.osd_id": "2",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.type": "block",
Oct 11 03:01:45 compute-0 musing_carver[494520]:                 "ceph.vdo": "0"
Oct 11 03:01:45 compute-0 musing_carver[494520]:             },
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "type": "block",
Oct 11 03:01:45 compute-0 musing_carver[494520]:             "vg_name": "ceph_vg2"
Oct 11 03:01:45 compute-0 musing_carver[494520]:         }
Oct 11 03:01:45 compute-0 musing_carver[494520]:     ]
Oct 11 03:01:45 compute-0 musing_carver[494520]: }
Oct 11 03:01:45 compute-0 systemd[1]: libpod-a1941c676114700ce58eb3b57a51787438862f5e4fab791108e55332cb4d62f5.scope: Deactivated successfully.
Oct 11 03:01:46 compute-0 podman[494529]: 2025-10-11 03:01:46.085203925 +0000 UTC m=+0.056449355 container died a1941c676114700ce58eb3b57a51787438862f5e4fab791108e55332cb4d62f5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_carver, OSD_FLAVOR=default, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 03:01:46 compute-0 systemd[1]: var-lib-containers-storage-overlay-26edf8d66b01789ae56f6b45e18e081c174571ba67468937a93624500833f76f-merged.mount: Deactivated successfully.
Oct 11 03:01:46 compute-0 podman[494529]: 2025-10-11 03:01:46.21176189 +0000 UTC m=+0.183007240 container remove a1941c676114700ce58eb3b57a51787438862f5e4fab791108e55332cb4d62f5 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=musing_carver, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3)
Oct 11 03:01:46 compute-0 systemd[1]: libpod-conmon-a1941c676114700ce58eb3b57a51787438862f5e4fab791108e55332cb4d62f5.scope: Deactivated successfully.
Oct 11 03:01:46 compute-0 sudo[494402]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:46 compute-0 sudo[494543]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:01:46 compute-0 sudo[494543]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:46 compute-0 sudo[494543]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:46 compute-0 sudo[494568]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:01:46 compute-0 sudo[494568]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:46 compute-0 sudo[494568]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:46 compute-0 sudo[494593]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:01:46 compute-0 sudo[494593]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:46 compute-0 sudo[494593]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:46 compute-0 ceph-mon[191930]: pgmap v2529: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:46 compute-0 sudo[494618]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 03:01:46 compute-0 sudo[494618]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #123. Immutable memtables: 0.
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:01:46.914318) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 73] Flushing memtable with next log file: 123
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151706914463, "job": 73, "event": "flush_started", "num_memtables": 1, "num_entries": 1193, "num_deletes": 256, "total_data_size": 1772985, "memory_usage": 1802496, "flush_reason": "Manual Compaction"}
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 73] Level-0 flush table #124: started
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151706933840, "cf_name": "default", "job": 73, "event": "table_file_creation", "file_number": 124, "file_size": 1744873, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 51019, "largest_seqno": 52211, "table_properties": {"data_size": 1739213, "index_size": 3055, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1541, "raw_key_size": 11897, "raw_average_key_size": 19, "raw_value_size": 1727768, "raw_average_value_size": 2813, "num_data_blocks": 137, "num_entries": 614, "num_filter_entries": 614, "num_deletions": 256, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760151589, "oldest_key_time": 1760151589, "file_creation_time": 1760151706, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 124, "seqno_to_time_mapping": "N/A"}}
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 73] Flush lasted 19626 microseconds, and 9740 cpu microseconds.
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:01:46.933960) [db/flush_job.cc:967] [default] [JOB 73] Level-0 flush table #124: 1744873 bytes OK
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:01:46.933991) [db/memtable_list.cc:519] [default] Level-0 commit table #124 started
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:01:46.936752) [db/memtable_list.cc:722] [default] Level-0 commit table #124: memtable #1 done
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:01:46.936777) EVENT_LOG_v1 {"time_micros": 1760151706936770, "job": 73, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:01:46.936806) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 73] Try to delete WAL files size 1767522, prev total WAL file size 1767522, number of live WAL files 2.
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000120.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:01:46.938313) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '6C6F676D0032303134' seq:72057594037927935, type:22 .. '6C6F676D0032323636' seq:0, type:0; will stop at (end)
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 74] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 73 Base level 0, inputs: [124(1703KB)], [122(7409KB)]
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151706938397, "job": 74, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [124], "files_L6": [122], "score": -1, "input_data_size": 9332137, "oldest_snapshot_seqno": -1}
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 74] Generated table #125: 6597 keys, 9221793 bytes, temperature: kUnknown
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151706995321, "cf_name": "default", "job": 74, "event": "table_file_creation", "file_number": 125, "file_size": 9221793, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 9179566, "index_size": 24643, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 16517, "raw_key_size": 172818, "raw_average_key_size": 26, "raw_value_size": 9061994, "raw_average_value_size": 1373, "num_data_blocks": 981, "num_entries": 6597, "num_filter_entries": 6597, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760151706, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 125, "seqno_to_time_mapping": "N/A"}}
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:01:46.995656) [db/compaction/compaction_job.cc:1663] [default] [JOB 74] Compacted 1@0 + 1@6 files to L6 => 9221793 bytes
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:01:46.998283) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 163.6 rd, 161.7 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(1.7, 7.2 +0.0 blob) out(8.8 +0.0 blob), read-write-amplify(10.6) write-amplify(5.3) OK, records in: 7121, records dropped: 524 output_compression: NoCompression
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:01:46.998311) EVENT_LOG_v1 {"time_micros": 1760151706998298, "job": 74, "event": "compaction_finished", "compaction_time_micros": 57029, "compaction_time_cpu_micros": 26251, "output_level": 6, "num_output_files": 1, "total_output_size": 9221793, "num_input_records": 7121, "num_output_records": 6597, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000124.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 03:01:46 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151706998956, "job": 74, "event": "table_file_deletion", "file_number": 124}
Oct 11 03:01:47 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000122.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 03:01:47 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151707000965, "job": 74, "event": "table_file_deletion", "file_number": 122}
Oct 11 03:01:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:01:46.938065) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 03:01:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:01:47.001100) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 03:01:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:01:47.001108) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 03:01:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:01:47.001111) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 03:01:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:01:47.001113) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 03:01:47 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:01:47.001114) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 03:01:47 compute-0 podman[494682]: 2025-10-11 03:01:47.346819775 +0000 UTC m=+0.069263722 container create 50ed48354389a145a2d9c5a459d6851be5b7671ab863116ab830a06d74482a93 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_ellis, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=reef)
Oct 11 03:01:47 compute-0 podman[494682]: 2025-10-11 03:01:47.319126427 +0000 UTC m=+0.041570364 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:01:47 compute-0 systemd[1]: Started libpod-conmon-50ed48354389a145a2d9c5a459d6851be5b7671ab863116ab830a06d74482a93.scope.
Oct 11 03:01:47 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:01:47 compute-0 podman[494682]: 2025-10-11 03:01:47.488949196 +0000 UTC m=+0.211393123 container init 50ed48354389a145a2d9c5a459d6851be5b7671ab863116ab830a06d74482a93 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_ellis, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507)
Oct 11 03:01:47 compute-0 podman[494682]: 2025-10-11 03:01:47.499906097 +0000 UTC m=+0.222350044 container start 50ed48354389a145a2d9c5a459d6851be5b7671ab863116ab830a06d74482a93 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_ellis, org.label-schema.vendor=CentOS, CEPH_REF=reef, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0)
Oct 11 03:01:47 compute-0 podman[494682]: 2025-10-11 03:01:47.507764487 +0000 UTC m=+0.230208414 container attach 50ed48354389a145a2d9c5a459d6851be5b7671ab863116ab830a06d74482a93 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_ellis, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_REF=reef, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 03:01:47 compute-0 vibrant_ellis[494698]: 167 167
Oct 11 03:01:47 compute-0 systemd[1]: libpod-50ed48354389a145a2d9c5a459d6851be5b7671ab863116ab830a06d74482a93.scope: Deactivated successfully.
Oct 11 03:01:47 compute-0 podman[494682]: 2025-10-11 03:01:47.513703049 +0000 UTC m=+0.236146956 container died 50ed48354389a145a2d9c5a459d6851be5b7671ab863116ab830a06d74482a93 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_ellis, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0)
Oct 11 03:01:47 compute-0 systemd[1]: var-lib-containers-storage-overlay-707709b1f88ccd7e6bd18f05821e4a8f4fd3b0a0e4595ed5a39df7f0f1c5ca18-merged.mount: Deactivated successfully.
Oct 11 03:01:47 compute-0 podman[494682]: 2025-10-11 03:01:47.588124024 +0000 UTC m=+0.310567971 container remove 50ed48354389a145a2d9c5a459d6851be5b7671ab863116ab830a06d74482a93 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=vibrant_ellis, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0)
Oct 11 03:01:47 compute-0 systemd[1]: libpod-conmon-50ed48354389a145a2d9c5a459d6851be5b7671ab863116ab830a06d74482a93.scope: Deactivated successfully.
Oct 11 03:01:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2530: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:47 compute-0 podman[494720]: 2025-10-11 03:01:47.858076845 +0000 UTC m=+0.063572539 container create b48918931b1b71805f2ebda3f20b3b69dc6d0d9887c913e0a6759d7ff353dd9e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_dhawan, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.vendor=CentOS, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 03:01:47 compute-0 podman[494720]: 2025-10-11 03:01:47.840421475 +0000 UTC m=+0.045917179 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:01:47 compute-0 systemd[1]: Started libpod-conmon-b48918931b1b71805f2ebda3f20b3b69dc6d0d9887c913e0a6759d7ff353dd9e.scope.
Oct 11 03:01:47 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:01:47 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fe55dc6d73e70bd50ff003db5daa95de11313dd41adb25757e200370c9e387a5/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 03:01:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fe55dc6d73e70bd50ff003db5daa95de11313dd41adb25757e200370c9e387a5/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 03:01:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fe55dc6d73e70bd50ff003db5daa95de11313dd41adb25757e200370c9e387a5/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 03:01:48 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/fe55dc6d73e70bd50ff003db5daa95de11313dd41adb25757e200370c9e387a5/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 03:01:48 compute-0 podman[494720]: 2025-10-11 03:01:48.043605844 +0000 UTC m=+0.249101568 container init b48918931b1b71805f2ebda3f20b3b69dc6d0d9887c913e0a6759d7ff353dd9e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_dhawan, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef)
Oct 11 03:01:48 compute-0 nova_compute[356901]: 2025-10-11 03:01:48.046 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:48 compute-0 podman[494720]: 2025-10-11 03:01:48.070137621 +0000 UTC m=+0.275633355 container start b48918931b1b71805f2ebda3f20b3b69dc6d0d9887c913e0a6759d7ff353dd9e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_dhawan, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 03:01:48 compute-0 podman[494720]: 2025-10-11 03:01:48.080673407 +0000 UTC m=+0.286169201 container attach b48918931b1b71805f2ebda3f20b3b69dc6d0d9887c913e0a6759d7ff353dd9e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_dhawan, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0)
Oct 11 03:01:48 compute-0 ceph-mon[191930]: pgmap v2530: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]: {
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:         "osd_id": 1,
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:         "type": "bluestore"
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:     },
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:         "osd_id": 2,
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:         "type": "bluestore"
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:     },
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:         "osd_id": 0,
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:         "type": "bluestore"
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]:     }
Oct 11 03:01:49 compute-0 dazzling_dhawan[494735]: }
Oct 11 03:01:49 compute-0 systemd[1]: libpod-b48918931b1b71805f2ebda3f20b3b69dc6d0d9887c913e0a6759d7ff353dd9e.scope: Deactivated successfully.
Oct 11 03:01:49 compute-0 podman[494720]: 2025-10-11 03:01:49.23556727 +0000 UTC m=+1.441063004 container died b48918931b1b71805f2ebda3f20b3b69dc6d0d9887c913e0a6759d7ff353dd9e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_dhawan, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS)
Oct 11 03:01:49 compute-0 systemd[1]: libpod-b48918931b1b71805f2ebda3f20b3b69dc6d0d9887c913e0a6759d7ff353dd9e.scope: Consumed 1.171s CPU time.
Oct 11 03:01:49 compute-0 systemd[1]: var-lib-containers-storage-overlay-fe55dc6d73e70bd50ff003db5daa95de11313dd41adb25757e200370c9e387a5-merged.mount: Deactivated successfully.
Oct 11 03:01:49 compute-0 podman[494720]: 2025-10-11 03:01:49.335889819 +0000 UTC m=+1.541385513 container remove b48918931b1b71805f2ebda3f20b3b69dc6d0d9887c913e0a6759d7ff353dd9e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_dhawan, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef)
Oct 11 03:01:49 compute-0 systemd[1]: libpod-conmon-b48918931b1b71805f2ebda3f20b3b69dc6d0d9887c913e0a6759d7ff353dd9e.scope: Deactivated successfully.
Oct 11 03:01:49 compute-0 sudo[494618]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 03:01:49 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:01:49 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 03:01:49 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:01:49 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 5d35a670-698e-4c22-857a-8062964f1d49 does not exist
Oct 11 03:01:49 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 250c4d81-193d-46c1-bd45-38e0c6f2741b does not exist
Oct 11 03:01:49 compute-0 sudo[494779]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:01:49 compute-0 sudo[494779]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:49 compute-0 sudo[494779]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2531: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:49 compute-0 sudo[494805]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 03:01:49 compute-0 sudo[494805]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:01:49 compute-0 sudo[494805]: pam_unix(sudo:session): session closed for user root
Oct 11 03:01:50 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:01:50 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:01:50 compute-0 ceph-mon[191930]: pgmap v2531: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:50 compute-0 nova_compute[356901]: 2025-10-11 03:01:50.909 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2532: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:01:52 compute-0 ceph-mon[191930]: pgmap v2532: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:53 compute-0 nova_compute[356901]: 2025-10-11 03:01:53.051 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:53 compute-0 podman[494830]: 2025-10-11 03:01:53.196606399 +0000 UTC m=+0.086737375 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, name=ubi9, container_name=kepler, distribution-scope=public, managed_by=edpm_ansible, io.openshift.expose-services=, vendor=Red Hat, Inc., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, architecture=x86_64, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, summary=Provides the latest release of Red Hat Universal Base Image 9., io.buildah.version=1.29.0, version=9.4, build-date=2024-09-18T21:23:30, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=1214.1726694543, io.k8s.display-name=Red Hat Universal Base Image 9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_id=edpm, release-0.7.12=, com.redhat.component=ubi9-container, io.openshift.tags=base rhel9, maintainer=Red Hat, Inc.)
Oct 11 03:01:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2533: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:54 compute-0 ceph-mon[191930]: pgmap v2533: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:01:54.897 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 03:01:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:01:54.898 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 03:01:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:01:54.899 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 03:01:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2534: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:55 compute-0 nova_compute[356901]: 2025-10-11 03:01:55.912 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:01:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:01:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:01:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:01:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:01:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:01:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_03:01:56
Oct 11 03:01:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 03:01:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 03:01:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.control', 'vms', 'images', '.rgw.root', 'default.rgw.meta', 'cephfs.cephfs.data', 'cephfs.cephfs.meta', 'default.rgw.log', 'volumes', 'backups', '.mgr']
Oct 11 03:01:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 03:01:56 compute-0 ceph-mon[191930]: pgmap v2534: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:01:57 compute-0 podman[494847]: 2025-10-11 03:01:57.209503617 +0000 UTC m=+0.092565123 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 03:01:57 compute-0 podman[494850]: 2025-10-11 03:01:57.220468568 +0000 UTC m=+0.089785264 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, config_id=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 03:01:57 compute-0 podman[494849]: 2025-10-11 03:01:57.243047064 +0000 UTC m=+0.109104404 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, tcib_managed=true, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 10 Base Image, config_id=edpm, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 03:01:57 compute-0 podman[494848]: 2025-10-11 03:01:57.28273452 +0000 UTC m=+0.155312462 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, config_id=ovn_controller, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 03:01:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 03:01:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 03:01:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 03:01:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 03:01:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 03:01:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 03:01:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 03:01:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 03:01:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 03:01:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 03:01:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2535: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:57 compute-0 nova_compute[356901]: 2025-10-11 03:01:57.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._run_pending_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:01:57 compute-0 nova_compute[356901]: 2025-10-11 03:01:57.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11145
Oct 11 03:01:57 compute-0 nova_compute[356901]: 2025-10-11 03:01:57.922 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] There are 0 instances to clean _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11154
Oct 11 03:01:58 compute-0 nova_compute[356901]: 2025-10-11 03:01:58.055 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:01:58 compute-0 ceph-mon[191930]: pgmap v2535: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2536: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:01:59 compute-0 podman[157119]: time="2025-10-11T03:01:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 03:01:59 compute-0 podman[157119]: @ - - [11/Oct/2025:03:01:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 03:01:59 compute-0 podman[157119]: @ - - [11/Oct/2025:03:01:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9108 "" "Go-http-client/1.1"
Oct 11 03:02:00 compute-0 nova_compute[356901]: 2025-10-11 03:02:00.916 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:01 compute-0 ceph-mon[191930]: pgmap v2536: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:01 compute-0 openstack_network_exporter[374316]: ERROR   03:02:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 03:02:01 compute-0 openstack_network_exporter[374316]: ERROR   03:02:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:02:01 compute-0 openstack_network_exporter[374316]: ERROR   03:02:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:02:01 compute-0 openstack_network_exporter[374316]: ERROR   03:02:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 03:02:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:02:01 compute-0 openstack_network_exporter[374316]: ERROR   03:02:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 03:02:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:02:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2537: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:02:02 compute-0 nova_compute[356901]: 2025-10-11 03:02:02.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_incomplete_migrations run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:02:02 compute-0 nova_compute[356901]: 2025-10-11 03:02:02.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Cleaning up deleted instances with incomplete migration  _cleanup_incomplete_migrations /usr/lib/python3.9/site-packages/nova/compute/manager.py:11183
Oct 11 03:02:03 compute-0 nova_compute[356901]: 2025-10-11 03:02:03.058 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:03 compute-0 ceph-mon[191930]: pgmap v2537: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2538: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:05 compute-0 ceph-mon[191930]: pgmap v2538: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2539: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:05 compute-0 nova_compute[356901]: 2025-10-11 03:02:05.919 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:06 compute-0 podman[494929]: 2025-10-11 03:02:06.236078746 +0000 UTC m=+0.111162287 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.license=GPLv2, container_name=iscsid, io.buildah.version=1.41.3, org.label-schema.build-date=20251009)
Oct 11 03:02:06 compute-0 podman[494928]: 2025-10-11 03:02:06.23757792 +0000 UTC m=+0.119382270 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_id=multipathd, container_name=multipathd, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 03:02:06 compute-0 ceph-mon[191930]: pgmap v2539: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0009191400908380543 of space, bias 1.0, pg target 0.2757420272514163 quantized to 32 (current 32)
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 03:02:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2540: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:07 compute-0 nova_compute[356901]: 2025-10-11 03:02:07.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._cleanup_expired_console_auth_tokens run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:02:08 compute-0 nova_compute[356901]: 2025-10-11 03:02:08.061 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:08 compute-0 ceph-mon[191930]: pgmap v2540: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2541: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:10 compute-0 nova_compute[356901]: 2025-10-11 03:02:10.922 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:10 compute-0 ceph-mon[191930]: pgmap v2541: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2542: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:02:12 compute-0 ceph-mon[191930]: pgmap v2542: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:13 compute-0 nova_compute[356901]: 2025-10-11 03:02:13.066 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2543: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:14 compute-0 podman[494969]: 2025-10-11 03:02:14.22973296 +0000 UTC m=+0.116129804 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, managed_by=edpm_ansible)
Oct 11 03:02:14 compute-0 podman[494970]: 2025-10-11 03:02:14.233898988 +0000 UTC m=+0.117078288 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, io.openshift.tags=minimal rhel9, config_id=edpm, container_name=openstack_network_exporter, name=ubi9-minimal, com.redhat.component=ubi9-minimal-container, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, release=1755695350, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, managed_by=edpm_ansible, url=https://catalog.redhat.com/en/search?searchType=containers, vendor=Red Hat, Inc., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.openshift.expose-services=, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., maintainer=Red Hat, Inc., build-date=2025-08-20T13:12:41, version=9.6, architecture=x86_64, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public)
Oct 11 03:02:14 compute-0 podman[494971]: 2025-10-11 03:02:14.272580548 +0000 UTC m=+0.143618114 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter)
Oct 11 03:02:15 compute-0 ceph-mon[191930]: pgmap v2543: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2544: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:15 compute-0 nova_compute[356901]: 2025-10-11 03:02:15.924 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:02:17 compute-0 ceph-mon[191930]: pgmap v2544: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2545: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:18 compute-0 nova_compute[356901]: 2025-10-11 03:02:18.070 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:18 compute-0 nova_compute[356901]: 2025-10-11 03:02:18.924 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:02:19 compute-0 ceph-mon[191930]: pgmap v2545: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2546: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:20 compute-0 nova_compute[356901]: 2025-10-11 03:02:20.089 2 DEBUG oslo_concurrency.processutils [None req-8af0aaf5-9b39-4d45-be6d-50a915b8edf6 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] Running cmd (subprocess): env LANG=C uptime execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 03:02:20 compute-0 nova_compute[356901]: 2025-10-11 03:02:20.148 2 DEBUG oslo_concurrency.processutils [None req-8af0aaf5-9b39-4d45-be6d-50a915b8edf6 d215f3ebbc07435493ccd666fc80109d 97026531b3404a11869cb85a059c4a0d - - default default] CMD "env LANG=C uptime" returned: 0 in 0.059s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 03:02:20 compute-0 ceph-mon[191930]: pgmap v2546: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:20 compute-0 nova_compute[356901]: 2025-10-11 03:02:20.928 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2547: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:02:22 compute-0 ceph-mon[191930]: pgmap v2547: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:22 compute-0 nova_compute[356901]: 2025-10-11 03:02:22.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:02:23 compute-0 nova_compute[356901]: 2025-10-11 03:02:23.075 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2548: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:23 compute-0 nova_compute[356901]: 2025-10-11 03:02:23.898 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:02:24 compute-0 podman[495031]: 2025-10-11 03:02:24.238122144 +0000 UTC m=+0.128608540 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, vendor=Red Hat, Inc., managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., container_name=kepler, version=9.4, io.buildah.version=1.29.0, architecture=x86_64, config_id=edpm, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9, com.redhat.component=ubi9-container, name=ubi9, io.openshift.expose-services=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., build-date=2024-09-18T21:23:30, release=1214.1726694543, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, vcs-type=git, io.openshift.tags=base rhel9, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, distribution-scope=public, release-0.7.12=)
Oct 11 03:02:24 compute-0 ceph-mon[191930]: pgmap v2548: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2549: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:25 compute-0 nova_compute[356901]: 2025-10-11 03:02:25.930 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:02:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:02:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:02:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:02:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:02:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:02:26 compute-0 ceph-mon[191930]: pgmap v2549: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:26 compute-0 nova_compute[356901]: 2025-10-11 03:02:26.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:02:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:02:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2550: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 03:02:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/4292353538' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 03:02:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 03:02:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/4292353538' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 03:02:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:02:28.077 286362 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: SbGlobalUpdateEvent(events=('update',), table='SB_Global', conditions=None, old_conditions=None), priority=20 to row=SB_Global(external_ids={}, nb_cfg=18, options={'arp_ns_explicit_output': 'true', 'mac_prefix': 'fe:55:97', 'max_tunid': '16711680', 'northd_internal_version': '24.03.7-20.33.0-76.8', 'svc_monitor_mac': 'ce:9c:4f:b4:85:9b'}, ipsec=False) old=SB_Global(nb_cfg=17) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43
Oct 11 03:02:28 compute-0 nova_compute[356901]: 2025-10-11 03:02:28.078 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:28 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:02:28.079 286362 DEBUG neutron.agent.ovn.metadata.agent [-] Delaying updating chassis table for 5 seconds run /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:274
Oct 11 03:02:28 compute-0 podman[495049]: 2025-10-11 03:02:28.212410344 +0000 UTC m=+0.112956541 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 03:02:28 compute-0 podman[495052]: 2025-10-11 03:02:28.215662779 +0000 UTC m=+0.088828375 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, container_name=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, config_id=ovn_metadata_agent, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3)
Oct 11 03:02:28 compute-0 podman[495051]: 2025-10-11 03:02:28.250681131 +0000 UTC m=+0.126460039 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.build-date=20251007, io.buildah.version=1.41.4, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.license=GPLv2, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=edpm, maintainer=OpenStack Kubernetes Operator team)
Oct 11 03:02:28 compute-0 podman[495050]: 2025-10-11 03:02:28.275802423 +0000 UTC m=+0.169996689 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, container_name=ovn_controller, org.label-schema.build-date=20251009, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller)
Oct 11 03:02:28 compute-0 ceph-mon[191930]: pgmap v2550: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/4292353538' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 03:02:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/4292353538' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 03:02:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2551: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:29 compute-0 podman[157119]: time="2025-10-11T03:02:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 03:02:29 compute-0 podman[157119]: @ - - [11/Oct/2025:03:02:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 03:02:29 compute-0 podman[157119]: @ - - [11/Oct/2025:03:02:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9114 "" "Go-http-client/1.1"
Oct 11 03:02:30 compute-0 nova_compute[356901]: 2025-10-11 03:02:30.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:02:30 compute-0 nova_compute[356901]: 2025-10-11 03:02:30.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 03:02:30 compute-0 nova_compute[356901]: 2025-10-11 03:02:30.931 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:31 compute-0 ceph-mon[191930]: pgmap v2551: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:31 compute-0 openstack_network_exporter[374316]: ERROR   03:02:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 03:02:31 compute-0 openstack_network_exporter[374316]: ERROR   03:02:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:02:31 compute-0 openstack_network_exporter[374316]: ERROR   03:02:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:02:31 compute-0 openstack_network_exporter[374316]: ERROR   03:02:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 03:02:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:02:31 compute-0 openstack_network_exporter[374316]: ERROR   03:02:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 03:02:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:02:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2552: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:02:33 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:02:33.081 286362 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=47bfc2f2-df38-4ab3-85ce-8b3a50eea8f6, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '18'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89
Oct 11 03:02:33 compute-0 nova_compute[356901]: 2025-10-11 03:02:33.083 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:33 compute-0 ceph-mon[191930]: pgmap v2552: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2553: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:34 compute-0 nova_compute[356901]: 2025-10-11 03:02:34.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:02:35 compute-0 ceph-mon[191930]: pgmap v2553: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2554: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:35 compute-0 nova_compute[356901]: 2025-10-11 03:02:35.936 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:36 compute-0 nova_compute[356901]: 2025-10-11 03:02:36.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:02:36 compute-0 nova_compute[356901]: 2025-10-11 03:02:36.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 03:02:36 compute-0 nova_compute[356901]: 2025-10-11 03:02:36.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 03:02:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:02:37 compute-0 podman[495135]: 2025-10-11 03:02:37.22817611 +0000 UTC m=+0.116136394 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=iscsid, managed_by=edpm_ansible, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Oct 11 03:02:37 compute-0 podman[495134]: 2025-10-11 03:02:37.234726115 +0000 UTC m=+0.128987856 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=multipathd, managed_by=edpm_ansible, org.label-schema.license=GPLv2)
Oct 11 03:02:37 compute-0 nova_compute[356901]: 2025-10-11 03:02:37.236 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 03:02:37 compute-0 nova_compute[356901]: 2025-10-11 03:02:37.237 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 03:02:37 compute-0 nova_compute[356901]: 2025-10-11 03:02:37.237 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 03:02:37 compute-0 nova_compute[356901]: 2025-10-11 03:02:37.237 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 03:02:37 compute-0 ceph-mon[191930]: pgmap v2554: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2555: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:38 compute-0 nova_compute[356901]: 2025-10-11 03:02:38.089 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:38 compute-0 nova_compute[356901]: 2025-10-11 03:02:38.456 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 03:02:38 compute-0 nova_compute[356901]: 2025-10-11 03:02:38.478 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 03:02:38 compute-0 nova_compute[356901]: 2025-10-11 03:02:38.479 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 03:02:38 compute-0 nova_compute[356901]: 2025-10-11 03:02:38.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:02:38 compute-0 nova_compute[356901]: 2025-10-11 03:02:38.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:02:38 compute-0 nova_compute[356901]: 2025-10-11 03:02:38.938 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 03:02:38 compute-0 nova_compute[356901]: 2025-10-11 03:02:38.938 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 03:02:38 compute-0 nova_compute[356901]: 2025-10-11 03:02:38.939 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 03:02:38 compute-0 nova_compute[356901]: 2025-10-11 03:02:38.939 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 03:02:38 compute-0 nova_compute[356901]: 2025-10-11 03:02:38.940 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 03:02:39 compute-0 ceph-mon[191930]: pgmap v2555: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 03:02:39 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/4092001259' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:02:39 compute-0 nova_compute[356901]: 2025-10-11 03:02:39.482 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.542s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 03:02:39 compute-0 nova_compute[356901]: 2025-10-11 03:02:39.633 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 03:02:39 compute-0 nova_compute[356901]: 2025-10-11 03:02:39.634 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 03:02:39 compute-0 nova_compute[356901]: 2025-10-11 03:02:39.634 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 03:02:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2556: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:40 compute-0 nova_compute[356901]: 2025-10-11 03:02:40.042 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 03:02:40 compute-0 nova_compute[356901]: 2025-10-11 03:02:40.045 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3642MB free_disk=59.955204010009766GB free_vcpus=7 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 03:02:40 compute-0 nova_compute[356901]: 2025-10-11 03:02:40.046 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 03:02:40 compute-0 nova_compute[356901]: 2025-10-11 03:02:40.046 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 03:02:40 compute-0 nova_compute[356901]: 2025-10-11 03:02:40.147 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 03:02:40 compute-0 nova_compute[356901]: 2025-10-11 03:02:40.148 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 1 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 03:02:40 compute-0 nova_compute[356901]: 2025-10-11 03:02:40.148 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1024MB phys_disk=59GB used_disk=2GB total_vcpus=8 used_vcpus=1 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 03:02:40 compute-0 nova_compute[356901]: 2025-10-11 03:02:40.196 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 03:02:40 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/4092001259' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:02:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 03:02:40 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2456996253' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:02:40 compute-0 nova_compute[356901]: 2025-10-11 03:02:40.738 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.542s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 03:02:40 compute-0 nova_compute[356901]: 2025-10-11 03:02:40.751 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 03:02:40 compute-0 nova_compute[356901]: 2025-10-11 03:02:40.805 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 03:02:40 compute-0 nova_compute[356901]: 2025-10-11 03:02:40.808 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 03:02:40 compute-0 nova_compute[356901]: 2025-10-11 03:02:40.808 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.762s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 03:02:40 compute-0 nova_compute[356901]: 2025-10-11 03:02:40.939 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:41 compute-0 ceph-mon[191930]: pgmap v2556: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:41 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2456996253' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:02:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2557: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:02:42 compute-0 ceph-mon[191930]: pgmap v2557: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:43 compute-0 nova_compute[356901]: 2025-10-11 03:02:43.093 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2558: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:44 compute-0 podman[495220]: 2025-10-11 03:02:44.818638184 +0000 UTC m=+0.101965892 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 03:02:44 compute-0 podman[495218]: 2025-10-11 03:02:44.850626342 +0000 UTC m=+0.135766460 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, config_id=edpm, container_name=ceilometer_agent_ipmi, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.schema-version=1.0)
Oct 11 03:02:44 compute-0 podman[495219]: 2025-10-11 03:02:44.856571747 +0000 UTC m=+0.121990838 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, name=ubi9-minimal, io.openshift.expose-services=, com.redhat.component=ubi9-minimal-container, release=1755695350, container_name=openstack_network_exporter, vendor=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vcs-type=git, config_id=edpm, distribution-scope=public, io.buildah.version=1.33.7, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, url=https://catalog.redhat.com/en/search?searchType=containers, io.openshift.tags=minimal rhel9, architecture=x86_64, managed_by=edpm_ansible, maintainer=Red Hat, Inc., version=9.6, build-date=2025-08-20T13:12:41, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 03:02:44 compute-0 ceph-mon[191930]: pgmap v2558: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2559: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:45 compute-0 nova_compute[356901]: 2025-10-11 03:02:45.941 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:02:46 compute-0 ceph-mon[191930]: pgmap v2559: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2560: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:48 compute-0 nova_compute[356901]: 2025-10-11 03:02:48.099 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:48 compute-0 ceph-mon[191930]: pgmap v2560: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2561: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:49 compute-0 sudo[495281]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:02:49 compute-0 sudo[495281]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:02:49 compute-0 sudo[495281]: pam_unix(sudo:session): session closed for user root
Oct 11 03:02:50 compute-0 sudo[495306]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:02:50 compute-0 sudo[495306]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:02:50 compute-0 sudo[495306]: pam_unix(sudo:session): session closed for user root
Oct 11 03:02:50 compute-0 sudo[495331]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:02:50 compute-0 sudo[495331]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:02:50 compute-0 sudo[495331]: pam_unix(sudo:session): session closed for user root
Oct 11 03:02:50 compute-0 sudo[495356]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 03:02:50 compute-0 sudo[495356]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:02:50 compute-0 nova_compute[356901]: 2025-10-11 03:02:50.946 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:50 compute-0 sudo[495356]: pam_unix(sudo:session): session closed for user root
Oct 11 03:02:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 03:02:51 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:02:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 03:02:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 03:02:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 03:02:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:02:51 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev be227570-64c1-4274-9c33-dcfaf8f2fc4a does not exist
Oct 11 03:02:51 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 2bcd380c-23d5-4265-b776-357113cf7e95 does not exist
Oct 11 03:02:51 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 8b3a3e2e-10a3-4f5a-8bab-5857924fc4de does not exist
Oct 11 03:02:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 03:02:51 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 03:02:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 03:02:51 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 03:02:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 03:02:51 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:02:51 compute-0 ceph-mon[191930]: pgmap v2561: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:51 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:02:51 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 03:02:51 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:02:51 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 03:02:51 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 03:02:51 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:02:51 compute-0 sudo[495412]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:02:51 compute-0 sudo[495412]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:02:51 compute-0 sudo[495412]: pam_unix(sudo:session): session closed for user root
Oct 11 03:02:51 compute-0 sudo[495437]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:02:51 compute-0 sudo[495437]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:02:51 compute-0 sudo[495437]: pam_unix(sudo:session): session closed for user root
Oct 11 03:02:51 compute-0 sudo[495462]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:02:51 compute-0 sudo[495462]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:02:51 compute-0 sudo[495462]: pam_unix(sudo:session): session closed for user root
Oct 11 03:02:51 compute-0 sudo[495487]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 03:02:51 compute-0 sudo[495487]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:02:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2562: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:02:52 compute-0 podman[495552]: 2025-10-11 03:02:52.242742725 +0000 UTC m=+0.072998403 container create cba44ca3f5bba8c51d68152b8d737dc2d0986684b9e9b45d189b1a58ada3853f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_mestorf, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 03:02:52 compute-0 podman[495552]: 2025-10-11 03:02:52.220324921 +0000 UTC m=+0.050580639 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:02:52 compute-0 systemd[1]: Started libpod-conmon-cba44ca3f5bba8c51d68152b8d737dc2d0986684b9e9b45d189b1a58ada3853f.scope.
Oct 11 03:02:52 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:02:52 compute-0 podman[495552]: 2025-10-11 03:02:52.398704515 +0000 UTC m=+0.228960213 container init cba44ca3f5bba8c51d68152b8d737dc2d0986684b9e9b45d189b1a58ada3853f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_mestorf, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS)
Oct 11 03:02:52 compute-0 podman[495552]: 2025-10-11 03:02:52.416997884 +0000 UTC m=+0.247253572 container start cba44ca3f5bba8c51d68152b8d737dc2d0986684b9e9b45d189b1a58ada3853f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_mestorf, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, ceph=True)
Oct 11 03:02:52 compute-0 podman[495552]: 2025-10-11 03:02:52.423469056 +0000 UTC m=+0.253724754 container attach cba44ca3f5bba8c51d68152b8d737dc2d0986684b9e9b45d189b1a58ada3853f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_mestorf, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 03:02:52 compute-0 suspicious_mestorf[495568]: 167 167
Oct 11 03:02:52 compute-0 systemd[1]: libpod-cba44ca3f5bba8c51d68152b8d737dc2d0986684b9e9b45d189b1a58ada3853f.scope: Deactivated successfully.
Oct 11 03:02:52 compute-0 podman[495552]: 2025-10-11 03:02:52.432139391 +0000 UTC m=+0.262395109 container died cba44ca3f5bba8c51d68152b8d737dc2d0986684b9e9b45d189b1a58ada3853f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_mestorf, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, ceph=True, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 03:02:52 compute-0 systemd[1]: var-lib-containers-storage-overlay-39a6674fba01e493c8694c24a45fbba2d57ca3a9b816d60326f6c2ef0cdc6955-merged.mount: Deactivated successfully.
Oct 11 03:02:52 compute-0 podman[495552]: 2025-10-11 03:02:52.522050396 +0000 UTC m=+0.352306114 container remove cba44ca3f5bba8c51d68152b8d737dc2d0986684b9e9b45d189b1a58ada3853f (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_mestorf, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.39.3, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 03:02:52 compute-0 systemd[1]: libpod-conmon-cba44ca3f5bba8c51d68152b8d737dc2d0986684b9e9b45d189b1a58ada3853f.scope: Deactivated successfully.
Oct 11 03:02:52 compute-0 podman[495591]: 2025-10-11 03:02:52.811618484 +0000 UTC m=+0.083670682 container create e15b1aea7bca6fa4d41ff673ec4e44cb7bd8a222a892d882293b92f5ed80653b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_dubinsky, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 03:02:52 compute-0 podman[495591]: 2025-10-11 03:02:52.774390904 +0000 UTC m=+0.046443182 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:02:52 compute-0 systemd[1]: Started libpod-conmon-e15b1aea7bca6fa4d41ff673ec4e44cb7bd8a222a892d882293b92f5ed80653b.scope.
Oct 11 03:02:52 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:02:52 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c84454b3db6fb91ebfcb1fe9abb7a9e4744bd72538f9ab41f69bbd227de84a93/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 03:02:52 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c84454b3db6fb91ebfcb1fe9abb7a9e4744bd72538f9ab41f69bbd227de84a93/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 03:02:52 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c84454b3db6fb91ebfcb1fe9abb7a9e4744bd72538f9ab41f69bbd227de84a93/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 03:02:52 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c84454b3db6fb91ebfcb1fe9abb7a9e4744bd72538f9ab41f69bbd227de84a93/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 03:02:52 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/c84454b3db6fb91ebfcb1fe9abb7a9e4744bd72538f9ab41f69bbd227de84a93/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 03:02:52 compute-0 podman[495591]: 2025-10-11 03:02:52.981059886 +0000 UTC m=+0.253112104 container init e15b1aea7bca6fa4d41ff673ec4e44cb7bd8a222a892d882293b92f5ed80653b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_dubinsky, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2)
Oct 11 03:02:52 compute-0 podman[495591]: 2025-10-11 03:02:52.997712311 +0000 UTC m=+0.269764539 container start e15b1aea7bca6fa4d41ff673ec4e44cb7bd8a222a892d882293b92f5ed80653b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_dubinsky, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 03:02:53 compute-0 podman[495591]: 2025-10-11 03:02:53.007926586 +0000 UTC m=+0.279978874 container attach e15b1aea7bca6fa4d41ff673ec4e44cb7bd8a222a892d882293b92f5ed80653b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_dubinsky, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2)
Oct 11 03:02:53 compute-0 nova_compute[356901]: 2025-10-11 03:02:53.104 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:53 compute-0 ceph-mon[191930]: pgmap v2562: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2563: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:54 compute-0 hungry_dubinsky[495606]: --> passed data devices: 0 physical, 3 LVM
Oct 11 03:02:54 compute-0 hungry_dubinsky[495606]: --> relative data size: 1.0
Oct 11 03:02:54 compute-0 hungry_dubinsky[495606]: --> All data devices are unavailable
Oct 11 03:02:54 compute-0 systemd[1]: libpod-e15b1aea7bca6fa4d41ff673ec4e44cb7bd8a222a892d882293b92f5ed80653b.scope: Deactivated successfully.
Oct 11 03:02:54 compute-0 systemd[1]: libpod-e15b1aea7bca6fa4d41ff673ec4e44cb7bd8a222a892d882293b92f5ed80653b.scope: Consumed 1.206s CPU time.
Oct 11 03:02:54 compute-0 podman[495591]: 2025-10-11 03:02:54.279862679 +0000 UTC m=+1.551914937 container died e15b1aea7bca6fa4d41ff673ec4e44cb7bd8a222a892d882293b92f5ed80653b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_dubinsky, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 03:02:54 compute-0 systemd[1]: var-lib-containers-storage-overlay-c84454b3db6fb91ebfcb1fe9abb7a9e4744bd72538f9ab41f69bbd227de84a93-merged.mount: Deactivated successfully.
Oct 11 03:02:54 compute-0 podman[495591]: 2025-10-11 03:02:54.36931673 +0000 UTC m=+1.641368928 container remove e15b1aea7bca6fa4d41ff673ec4e44cb7bd8a222a892d882293b92f5ed80653b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=hungry_dubinsky, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 03:02:54 compute-0 systemd[1]: libpod-conmon-e15b1aea7bca6fa4d41ff673ec4e44cb7bd8a222a892d882293b92f5ed80653b.scope: Deactivated successfully.
Oct 11 03:02:54 compute-0 sudo[495487]: pam_unix(sudo:session): session closed for user root
Oct 11 03:02:54 compute-0 podman[495636]: 2025-10-11 03:02:54.453065544 +0000 UTC m=+0.122002188 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, version=9.4, container_name=kepler, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, release=1214.1726694543, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, config_id=edpm, com.redhat.component=ubi9-container, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel9, summary=Provides the latest release of Red Hat Universal Base Image 9., io.k8s.display-name=Red Hat Universal Base Image 9, maintainer=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.29.0, release-0.7.12=, distribution-scope=public, architecture=x86_64, managed_by=edpm_ansible, io.openshift.expose-services=, name=ubi9, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, build-date=2024-09-18T21:23:30, vendor=Red Hat, Inc.)
Oct 11 03:02:54 compute-0 sudo[495668]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:02:54 compute-0 sudo[495668]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:02:54 compute-0 sudo[495668]: pam_unix(sudo:session): session closed for user root
Oct 11 03:02:54 compute-0 sudo[495694]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:02:54 compute-0 sudo[495694]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:02:54 compute-0 sudo[495694]: pam_unix(sudo:session): session closed for user root
Oct 11 03:02:54 compute-0 sudo[495719]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:02:54 compute-0 sudo[495719]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:02:54 compute-0 sudo[495719]: pam_unix(sudo:session): session closed for user root
Oct 11 03:02:54 compute-0 sudo[495744]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 03:02:54 compute-0 sudo[495744]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:02:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:02:54.899 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 03:02:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:02:54.899 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 03:02:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:02:54.901 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 03:02:55 compute-0 ceph-mon[191930]: pgmap v2563: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:55 compute-0 podman[495808]: 2025-10-11 03:02:55.500073799 +0000 UTC m=+0.081676777 container create e54fe753f6dcffd03f0e44571fe2a29228303cced02db279b3333c99e6d81612 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jovial_kowalevski, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0)
Oct 11 03:02:55 compute-0 podman[495808]: 2025-10-11 03:02:55.471783292 +0000 UTC m=+0.053386290 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:02:55 compute-0 systemd[1]: Started libpod-conmon-e54fe753f6dcffd03f0e44571fe2a29228303cced02db279b3333c99e6d81612.scope.
Oct 11 03:02:55 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:02:55 compute-0 podman[495808]: 2025-10-11 03:02:55.661135266 +0000 UTC m=+0.242738294 container init e54fe753f6dcffd03f0e44571fe2a29228303cced02db279b3333c99e6d81612 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jovial_kowalevski, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.build-date=20250507)
Oct 11 03:02:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2564: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:55 compute-0 podman[495808]: 2025-10-11 03:02:55.68202646 +0000 UTC m=+0.263629448 container start e54fe753f6dcffd03f0e44571fe2a29228303cced02db279b3333c99e6d81612 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jovial_kowalevski, OSD_FLAVOR=default, CEPH_REF=reef, io.buildah.version=1.39.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS)
Oct 11 03:02:55 compute-0 podman[495808]: 2025-10-11 03:02:55.688755211 +0000 UTC m=+0.270358189 container attach e54fe753f6dcffd03f0e44571fe2a29228303cced02db279b3333c99e6d81612 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jovial_kowalevski, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef)
Oct 11 03:02:55 compute-0 jovial_kowalevski[495823]: 167 167
Oct 11 03:02:55 compute-0 systemd[1]: libpod-e54fe753f6dcffd03f0e44571fe2a29228303cced02db279b3333c99e6d81612.scope: Deactivated successfully.
Oct 11 03:02:55 compute-0 podman[495808]: 2025-10-11 03:02:55.695168281 +0000 UTC m=+0.276771249 container died e54fe753f6dcffd03f0e44571fe2a29228303cced02db279b3333c99e6d81612 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jovial_kowalevski, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 03:02:55 compute-0 systemd[1]: var-lib-containers-storage-overlay-d9f04a66d64e50d0c6bfe0f38f402a1cbea9ab080119a4012a483da795bb344f-merged.mount: Deactivated successfully.
Oct 11 03:02:55 compute-0 podman[495808]: 2025-10-11 03:02:55.771020126 +0000 UTC m=+0.352623114 container remove e54fe753f6dcffd03f0e44571fe2a29228303cced02db279b3333c99e6d81612 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=jovial_kowalevski, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=reef)
Oct 11 03:02:55 compute-0 systemd[1]: libpod-conmon-e54fe753f6dcffd03f0e44571fe2a29228303cced02db279b3333c99e6d81612.scope: Deactivated successfully.
Oct 11 03:02:55 compute-0 nova_compute[356901]: 2025-10-11 03:02:55.949 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:56 compute-0 podman[495846]: 2025-10-11 03:02:56.050436271 +0000 UTC m=+0.069769697 container create df4e251b028da885b20ac33f7a6e820f3037e7bc53b3b2914d96c4445f8e4716 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_bell, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 03:02:56 compute-0 systemd[1]: Started libpod-conmon-df4e251b028da885b20ac33f7a6e820f3037e7bc53b3b2914d96c4445f8e4716.scope.
Oct 11 03:02:56 compute-0 podman[495846]: 2025-10-11 03:02:56.03177433 +0000 UTC m=+0.051107806 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:02:56 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:02:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4a39c7b0458f37f66fccd828cb774603beaef0ddde5d4d98abc576200bdb94d8/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 03:02:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4a39c7b0458f37f66fccd828cb774603beaef0ddde5d4d98abc576200bdb94d8/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 03:02:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4a39c7b0458f37f66fccd828cb774603beaef0ddde5d4d98abc576200bdb94d8/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 03:02:56 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/4a39c7b0458f37f66fccd828cb774603beaef0ddde5d4d98abc576200bdb94d8/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 03:02:56 compute-0 podman[495846]: 2025-10-11 03:02:56.187218463 +0000 UTC m=+0.206551939 container init df4e251b028da885b20ac33f7a6e820f3037e7bc53b3b2914d96c4445f8e4716 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_bell, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_REF=reef)
Oct 11 03:02:56 compute-0 podman[495846]: 2025-10-11 03:02:56.207135645 +0000 UTC m=+0.226469091 container start df4e251b028da885b20ac33f7a6e820f3037e7bc53b3b2914d96c4445f8e4716 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_bell, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 03:02:56 compute-0 podman[495846]: 2025-10-11 03:02:56.213921788 +0000 UTC m=+0.233255224 container attach df4e251b028da885b20ac33f7a6e820f3037e7bc53b3b2914d96c4445f8e4716 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_bell, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2)
Oct 11 03:02:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:02:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:02:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:02:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:02:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:02:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:02:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_03:02:56
Oct 11 03:02:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 03:02:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 03:02:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.log', 'cephfs.cephfs.data', '.rgw.root', 'volumes', 'default.rgw.control', 'cephfs.cephfs.meta', '.mgr', 'backups', 'images', 'default.rgw.meta', 'vms']
Oct 11 03:02:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 03:02:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:02:57 compute-0 brave_bell[495862]: {
Oct 11 03:02:57 compute-0 brave_bell[495862]:     "0": [
Oct 11 03:02:57 compute-0 brave_bell[495862]:         {
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "devices": [
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "/dev/loop3"
Oct 11 03:02:57 compute-0 brave_bell[495862]:             ],
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "lv_name": "ceph_lv0",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "lv_size": "21470642176",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "name": "ceph_lv0",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "tags": {
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.cluster_name": "ceph",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.crush_device_class": "",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.encrypted": "0",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.osd_id": "0",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.type": "block",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.vdo": "0"
Oct 11 03:02:57 compute-0 brave_bell[495862]:             },
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "type": "block",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "vg_name": "ceph_vg0"
Oct 11 03:02:57 compute-0 brave_bell[495862]:         }
Oct 11 03:02:57 compute-0 brave_bell[495862]:     ],
Oct 11 03:02:57 compute-0 brave_bell[495862]:     "1": [
Oct 11 03:02:57 compute-0 brave_bell[495862]:         {
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "devices": [
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "/dev/loop4"
Oct 11 03:02:57 compute-0 brave_bell[495862]:             ],
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "lv_name": "ceph_lv1",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "lv_size": "21470642176",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "name": "ceph_lv1",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "tags": {
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.cluster_name": "ceph",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.crush_device_class": "",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.encrypted": "0",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.osd_id": "1",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.type": "block",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.vdo": "0"
Oct 11 03:02:57 compute-0 brave_bell[495862]:             },
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "type": "block",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "vg_name": "ceph_vg1"
Oct 11 03:02:57 compute-0 brave_bell[495862]:         }
Oct 11 03:02:57 compute-0 brave_bell[495862]:     ],
Oct 11 03:02:57 compute-0 brave_bell[495862]:     "2": [
Oct 11 03:02:57 compute-0 brave_bell[495862]:         {
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "devices": [
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "/dev/loop5"
Oct 11 03:02:57 compute-0 brave_bell[495862]:             ],
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "lv_name": "ceph_lv2",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "lv_size": "21470642176",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "name": "ceph_lv2",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "tags": {
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.cluster_name": "ceph",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.crush_device_class": "",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.encrypted": "0",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.osd_id": "2",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.type": "block",
Oct 11 03:02:57 compute-0 brave_bell[495862]:                 "ceph.vdo": "0"
Oct 11 03:02:57 compute-0 brave_bell[495862]:             },
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "type": "block",
Oct 11 03:02:57 compute-0 brave_bell[495862]:             "vg_name": "ceph_vg2"
Oct 11 03:02:57 compute-0 brave_bell[495862]:         }
Oct 11 03:02:57 compute-0 brave_bell[495862]:     ]
Oct 11 03:02:57 compute-0 brave_bell[495862]: }
Oct 11 03:02:57 compute-0 systemd[1]: libpod-df4e251b028da885b20ac33f7a6e820f3037e7bc53b3b2914d96c4445f8e4716.scope: Deactivated successfully.
Oct 11 03:02:57 compute-0 podman[495846]: 2025-10-11 03:02:57.099689539 +0000 UTC m=+1.119022975 container died df4e251b028da885b20ac33f7a6e820f3037e7bc53b3b2914d96c4445f8e4716 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_bell, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 03:02:57 compute-0 systemd[1]: var-lib-containers-storage-overlay-4a39c7b0458f37f66fccd828cb774603beaef0ddde5d4d98abc576200bdb94d8-merged.mount: Deactivated successfully.
Oct 11 03:02:57 compute-0 podman[495846]: 2025-10-11 03:02:57.211595156 +0000 UTC m=+1.230928582 container remove df4e251b028da885b20ac33f7a6e820f3037e7bc53b3b2914d96c4445f8e4716 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=brave_bell, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 03:02:57 compute-0 systemd[1]: libpod-conmon-df4e251b028da885b20ac33f7a6e820f3037e7bc53b3b2914d96c4445f8e4716.scope: Deactivated successfully.
Oct 11 03:02:57 compute-0 sudo[495744]: pam_unix(sudo:session): session closed for user root
Oct 11 03:02:57 compute-0 ceph-mon[191930]: pgmap v2564: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:57 compute-0 sudo[495883]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:02:57 compute-0 sudo[495883]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:02:57 compute-0 sudo[495883]: pam_unix(sudo:session): session closed for user root
Oct 11 03:02:57 compute-0 sudo[495908]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:02:57 compute-0 sudo[495908]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:02:57 compute-0 sudo[495908]: pam_unix(sudo:session): session closed for user root
Oct 11 03:02:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 03:02:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 03:02:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 03:02:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 03:02:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 03:02:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 03:02:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 03:02:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 03:02:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 03:02:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 03:02:57 compute-0 sudo[495933]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:02:57 compute-0 sudo[495933]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:02:57 compute-0 sudo[495933]: pam_unix(sudo:session): session closed for user root
Oct 11 03:02:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2565: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:57 compute-0 sudo[495958]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 03:02:57 compute-0 sudo[495958]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:02:57 compute-0 nova_compute[356901]: 2025-10-11 03:02:57.804 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:02:58 compute-0 nova_compute[356901]: 2025-10-11 03:02:58.109 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:02:58 compute-0 podman[496021]: 2025-10-11 03:02:58.239679499 +0000 UTC m=+0.056063557 container create b5cef72e16f1e999d6643c38fd37b069eb022679cb80d0e5678698cbfb809510 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_ellis, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True)
Oct 11 03:02:58 compute-0 systemd[1]: Started libpod-conmon-b5cef72e16f1e999d6643c38fd37b069eb022679cb80d0e5678698cbfb809510.scope.
Oct 11 03:02:58 compute-0 podman[496021]: 2025-10-11 03:02:58.218828646 +0000 UTC m=+0.035212734 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:02:58 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:02:58 compute-0 podman[496021]: 2025-10-11 03:02:58.357367095 +0000 UTC m=+0.173751173 container init b5cef72e16f1e999d6643c38fd37b069eb022679cb80d0e5678698cbfb809510 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_ellis, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 03:02:58 compute-0 podman[496021]: 2025-10-11 03:02:58.366879787 +0000 UTC m=+0.183263845 container start b5cef72e16f1e999d6643c38fd37b069eb022679cb80d0e5678698cbfb809510 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_ellis, ceph=True, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 03:02:58 compute-0 podman[496021]: 2025-10-11 03:02:58.37247289 +0000 UTC m=+0.188857008 container attach b5cef72e16f1e999d6643c38fd37b069eb022679cb80d0e5678698cbfb809510 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_ellis, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, io.buildah.version=1.39.3, ceph=True)
Oct 11 03:02:58 compute-0 lucid_ellis[496051]: 167 167
Oct 11 03:02:58 compute-0 systemd[1]: libpod-b5cef72e16f1e999d6643c38fd37b069eb022679cb80d0e5678698cbfb809510.scope: Deactivated successfully.
Oct 11 03:02:58 compute-0 podman[496021]: 2025-10-11 03:02:58.375353945 +0000 UTC m=+0.191738003 container died b5cef72e16f1e999d6643c38fd37b069eb022679cb80d0e5678698cbfb809510 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_ellis, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 03:02:58 compute-0 systemd[1]: var-lib-containers-storage-overlay-c619b95559aed4ff69a066de0b5934ddd289c7f4ac4bb1354c62530a407a0ec8-merged.mount: Deactivated successfully.
Oct 11 03:02:58 compute-0 podman[496021]: 2025-10-11 03:02:58.432669713 +0000 UTC m=+0.249053771 container remove b5cef72e16f1e999d6643c38fd37b069eb022679cb80d0e5678698cbfb809510 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=lucid_ellis, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 03:02:58 compute-0 podman[496038]: 2025-10-11 03:02:58.457058092 +0000 UTC m=+0.153546352 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_id=ovn_metadata_agent, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3)
Oct 11 03:02:58 compute-0 podman[496035]: 2025-10-11 03:02:58.464613169 +0000 UTC m=+0.158169563 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>)
Oct 11 03:02:58 compute-0 systemd[1]: libpod-conmon-b5cef72e16f1e999d6643c38fd37b069eb022679cb80d0e5678698cbfb809510.scope: Deactivated successfully.
Oct 11 03:02:58 compute-0 podman[496040]: 2025-10-11 03:02:58.468128534 +0000 UTC m=+0.147274316 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 10 Base Image, tcib_managed=true, org.label-schema.build-date=20251007, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.41.4, managed_by=edpm_ansible, config_id=edpm, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team)
Oct 11 03:02:58 compute-0 podman[496039]: 2025-10-11 03:02:58.513421908 +0000 UTC m=+0.196560581 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, config_id=ovn_controller, container_name=ovn_controller, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.schema-version=1.0)
Oct 11 03:02:58 compute-0 podman[496138]: 2025-10-11 03:02:58.641584548 +0000 UTC m=+0.068511126 container create 1d7597b6569cdd07db476941add54422f6cd6f8b275b1d9e3cb8c14fe4af7e71 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_kowalevski, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS)
Oct 11 03:02:58 compute-0 podman[496138]: 2025-10-11 03:02:58.607455859 +0000 UTC m=+0.034382497 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:02:58 compute-0 systemd[1]: Started libpod-conmon-1d7597b6569cdd07db476941add54422f6cd6f8b275b1d9e3cb8c14fe4af7e71.scope.
Oct 11 03:02:58 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:02:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0e5cf0fc78265e9ca52d8f6cb92b3b93705ab9dd3f6d9e59009f78a16928e6d2/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 03:02:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0e5cf0fc78265e9ca52d8f6cb92b3b93705ab9dd3f6d9e59009f78a16928e6d2/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 03:02:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0e5cf0fc78265e9ca52d8f6cb92b3b93705ab9dd3f6d9e59009f78a16928e6d2/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 03:02:58 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/0e5cf0fc78265e9ca52d8f6cb92b3b93705ab9dd3f6d9e59009f78a16928e6d2/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 03:02:58 compute-0 podman[496138]: 2025-10-11 03:02:58.770166061 +0000 UTC m=+0.197092669 container init 1d7597b6569cdd07db476941add54422f6cd6f8b275b1d9e3cb8c14fe4af7e71 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_kowalevski, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 03:02:58 compute-0 podman[496138]: 2025-10-11 03:02:58.78939 +0000 UTC m=+0.216316588 container start 1d7597b6569cdd07db476941add54422f6cd6f8b275b1d9e3cb8c14fe4af7e71 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_kowalevski, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_REF=reef, ceph=True)
Oct 11 03:02:58 compute-0 podman[496138]: 2025-10-11 03:02:58.794403115 +0000 UTC m=+0.221329733 container attach 1d7597b6569cdd07db476941add54422f6cd6f8b275b1d9e3cb8c14fe4af7e71 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_kowalevski, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 03:02:59 compute-0 ceph-mon[191930]: pgmap v2565: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2566: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:02:59 compute-0 podman[157119]: time="2025-10-11T03:02:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 03:02:59 compute-0 podman[157119]: @ - - [11/Oct/2025:03:02:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 47839 "" "Go-http-client/1.1"
Oct 11 03:02:59 compute-0 podman[157119]: @ - - [11/Oct/2025:03:02:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9535 "" "Go-http-client/1.1"
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]: {
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:         "osd_id": 1,
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:         "type": "bluestore"
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:     },
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:         "osd_id": 2,
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:         "type": "bluestore"
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:     },
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:         "osd_id": 0,
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:         "type": "bluestore"
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]:     }
Oct 11 03:02:59 compute-0 crazy_kowalevski[496154]: }
Oct 11 03:02:59 compute-0 systemd[1]: libpod-1d7597b6569cdd07db476941add54422f6cd6f8b275b1d9e3cb8c14fe4af7e71.scope: Deactivated successfully.
Oct 11 03:02:59 compute-0 systemd[1]: libpod-1d7597b6569cdd07db476941add54422f6cd6f8b275b1d9e3cb8c14fe4af7e71.scope: Consumed 1.114s CPU time.
Oct 11 03:02:59 compute-0 podman[496187]: 2025-10-11 03:02:59.97817467 +0000 UTC m=+0.051290031 container died 1d7597b6569cdd07db476941add54422f6cd6f8b275b1d9e3cb8c14fe4af7e71 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_kowalevski, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 03:03:00 compute-0 systemd[1]: var-lib-containers-storage-overlay-0e5cf0fc78265e9ca52d8f6cb92b3b93705ab9dd3f6d9e59009f78a16928e6d2-merged.mount: Deactivated successfully.
Oct 11 03:03:00 compute-0 podman[496187]: 2025-10-11 03:03:00.069975868 +0000 UTC m=+0.143091129 container remove 1d7597b6569cdd07db476941add54422f6cd6f8b275b1d9e3cb8c14fe4af7e71 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_kowalevski, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 03:03:00 compute-0 systemd[1]: libpod-conmon-1d7597b6569cdd07db476941add54422f6cd6f8b275b1d9e3cb8c14fe4af7e71.scope: Deactivated successfully.
Oct 11 03:03:00 compute-0 sudo[495958]: pam_unix(sudo:session): session closed for user root
Oct 11 03:03:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 03:03:00 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:03:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 03:03:00 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:03:00 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 3247307e-85aa-45aa-9614-3d09e0bfdd11 does not exist
Oct 11 03:03:00 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 08f099cf-bfcf-435c-a341-c947fa19b60a does not exist
Oct 11 03:03:00 compute-0 sudo[496202]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:03:00 compute-0 sudo[496202]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:03:00 compute-0 sudo[496202]: pam_unix(sudo:session): session closed for user root
Oct 11 03:03:00 compute-0 sudo[496227]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 03:03:00 compute-0 sudo[496227]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:03:00 compute-0 sudo[496227]: pam_unix(sudo:session): session closed for user root
Oct 11 03:03:00 compute-0 nova_compute[356901]: 2025-10-11 03:03:00.952 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:01 compute-0 ceph-mon[191930]: pgmap v2566: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:01 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:03:01 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:03:01 compute-0 openstack_network_exporter[374316]: ERROR   03:03:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 03:03:01 compute-0 openstack_network_exporter[374316]: ERROR   03:03:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:03:01 compute-0 openstack_network_exporter[374316]: ERROR   03:03:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:03:01 compute-0 openstack_network_exporter[374316]: ERROR   03:03:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 03:03:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:03:01 compute-0 openstack_network_exporter[374316]: ERROR   03:03:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 03:03:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:03:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2567: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:03:03 compute-0 nova_compute[356901]: 2025-10-11 03:03:03.114 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:03 compute-0 ceph-mon[191930]: pgmap v2567: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2568: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:05 compute-0 ceph-mon[191930]: pgmap v2568: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2569: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:05 compute-0 nova_compute[356901]: 2025-10-11 03:03:05.956 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:03:07 compute-0 ceph-mon[191930]: pgmap v2569: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0009191400908380543 of space, bias 1.0, pg target 0.2757420272514163 quantized to 32 (current 32)
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 03:03:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2570: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:08 compute-0 nova_compute[356901]: 2025-10-11 03:03:08.118 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:08 compute-0 podman[496252]: 2025-10-11 03:03:08.260741201 +0000 UTC m=+0.146012524 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, container_name=multipathd, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, config_id=multipathd, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true)
Oct 11 03:03:08 compute-0 podman[496253]: 2025-10-11 03:03:08.281096178 +0000 UTC m=+0.152566839 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_managed=true, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, config_id=iscsid)
Oct 11 03:03:09 compute-0 ceph-mon[191930]: pgmap v2570: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2571: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:10 compute-0 nova_compute[356901]: 2025-10-11 03:03:10.959 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:11 compute-0 ceph-mon[191930]: pgmap v2571: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 03:03:11 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 4800.1 total, 600.0 interval
                                            Cumulative writes: 10K writes, 39K keys, 10K commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 10K writes, 2968 syncs, 3.55 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 387 writes, 929 keys, 387 commit groups, 1.0 writes per commit group, ingest: 0.30 MB, 0.00 MB/s
                                            Interval WAL: 387 writes, 184 syncs, 2.10 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 03:03:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2572: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:03:13 compute-0 nova_compute[356901]: 2025-10-11 03:03:13.120 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:13 compute-0 ceph-mon[191930]: pgmap v2572: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2573: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.878 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.879 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.879 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.880 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.881 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.882 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.882 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.882 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.882 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.882 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.883 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.883 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.883 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.883 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.883 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.883 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.883 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.883 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.883 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2dc2b9e0>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.889 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.890 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.890 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.890 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.891 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.892 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T03:03:13.891048) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.899 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2856 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.901 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.901 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.902 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.902 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.902 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.902 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.903 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T03:03:13.902715) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.903 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 25 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.903 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.904 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.904 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.904 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.904 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.905 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.905 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.905 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.906 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.906 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.906 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.907 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.907 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.907 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.908 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.908 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.908 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.909 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T03:03:13.904926) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.909 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.909 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T03:03:13.907401) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.909 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.909 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.910 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T03:03:13.909847) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.934 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.935 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.935 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.936 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.936 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.937 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.937 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.937 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.937 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.938 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T03:03:13.937624) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.994 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.995 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.995 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.996 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.996 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.996 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.996 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.996 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.996 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.996 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.997 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.997 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.997 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T03:03:13.996625) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.997 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.997 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.997 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.997 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.998 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.998 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.998 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.998 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T03:03:13.998122) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.998 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.998 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.999 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.999 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.999 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.999 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.999 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.999 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.999 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:13.999 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.000 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.000 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.000 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.000 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.000 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.000 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.000 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.000 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.001 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.001 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.001 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.001 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.002 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.002 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.002 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.002 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.002 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.002 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.002 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.003 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.003 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T03:03:13.999594) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.003 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.003 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.003 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T03:03:14.000840) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.003 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.003 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.003 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T03:03:14.002298) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.003 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.004 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T03:03:14.003705) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.026 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.027 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.027 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.027 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.027 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.027 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.027 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.027 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.027 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.028 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.028 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.028 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.028 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.028 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.028 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T03:03:14.027549) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.028 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.028 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.029 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.029 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.029 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.029 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.029 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.029 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.029 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T03:03:14.028899) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.029 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.029 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.030 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.030 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.030 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.030 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.030 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.030 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.030 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.030 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 33 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.031 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T03:03:14.029971) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.031 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.031 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T03:03:14.030737) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.031 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.031 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.031 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.031 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.031 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.031 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.032 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.032 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.032 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.032 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.032 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.032 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.032 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T03:03:14.031642) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.033 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.033 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.033 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.033 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T03:03:14.032824) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.033 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.033 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.033 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.033 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.033 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T03:03:14.033652) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.034 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.034 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.034 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.034 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.034 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.034 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.034 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.034 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T03:03:14.034543) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.034 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.035 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.035 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.035 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.035 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.035 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.035 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.035 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.035 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.036 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.036 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.036 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.036 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T03:03:14.035755) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.036 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.036 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.036 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.036 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 74190000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.036 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T03:03:14.036631) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.037 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.037 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.037 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.037 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.037 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.037 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.037 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2482 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.037 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.037 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.038 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.038 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.038 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.038 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T03:03:14.037482) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.038 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.038 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.80859375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.038 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T03:03:14.038367) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.038 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.038 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.038 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.039 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.039 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.039 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.039 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.039 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.039 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.039 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.039 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.039 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.039 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.039 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.039 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.040 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.040 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.040 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.040 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.040 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.040 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.040 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.040 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.040 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.040 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.040 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.040 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.040 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:03:14.040 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:03:15 compute-0 podman[496294]: 2025-10-11 03:03:15.200982311 +0000 UTC m=+0.067586606 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm)
Oct 11 03:03:15 compute-0 podman[496292]: 2025-10-11 03:03:15.199149541 +0000 UTC m=+0.089694030 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, managed_by=edpm_ansible)
Oct 11 03:03:15 compute-0 podman[496293]: 2025-10-11 03:03:15.218857797 +0000 UTC m=+0.105969683 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, build-date=2025-08-20T13:12:41, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., url=https://catalog.redhat.com/en/search?searchType=containers, vcs-type=git, distribution-scope=public, maintainer=Red Hat, Inc., vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, com.redhat.component=ubi9-minimal-container, container_name=openstack_network_exporter, io.openshift.expose-services=, io.openshift.tags=minimal rhel9, architecture=x86_64, config_id=edpm, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, managed_by=edpm_ansible, name=ubi9-minimal, release=1755695350, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.6, io.buildah.version=1.33.7)
Oct 11 03:03:15 compute-0 ceph-mon[191930]: pgmap v2573: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2574: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:15 compute-0 nova_compute[356901]: 2025-10-11 03:03:15.963 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:03:17 compute-0 ceph-mon[191930]: pgmap v2574: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2575: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:18 compute-0 nova_compute[356901]: 2025-10-11 03:03:18.126 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 03:03:18 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 4800.1 total, 600.0 interval
                                            Cumulative writes: 11K writes, 45K keys, 11K commit groups, 1.0 writes per commit group, ingest: 0.04 GB, 0.01 MB/s
                                            Cumulative WAL: 11K writes, 3324 syncs, 3.60 writes per sync, written: 0.04 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 478 writes, 1385 keys, 478 commit groups, 1.0 writes per commit group, ingest: 0.40 MB, 0.00 MB/s
                                            Interval WAL: 478 writes, 226 syncs, 2.12 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 03:03:18 compute-0 nova_compute[356901]: 2025-10-11 03:03:18.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:03:19 compute-0 ceph-mon[191930]: pgmap v2575: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2576: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:20 compute-0 nova_compute[356901]: 2025-10-11 03:03:20.968 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:21 compute-0 ceph-mon[191930]: pgmap v2576: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2577: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:03:22 compute-0 nova_compute[356901]: 2025-10-11 03:03:22.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:03:23 compute-0 nova_compute[356901]: 2025-10-11 03:03:23.129 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:23 compute-0 ceph-mon[191930]: pgmap v2577: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2578: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:25 compute-0 podman[496354]: 2025-10-11 03:03:25.246524361 +0000 UTC m=+0.135455139 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9, maintainer=Red Hat, Inc., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, version=9.4, config_id=edpm, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, managed_by=edpm_ansible, summary=Provides the latest release of Red Hat Universal Base Image 9., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, container_name=kepler, vendor=Red Hat, Inc., build-date=2024-09-18T21:23:30, com.redhat.component=ubi9-container, io.buildah.version=1.29.0, name=ubi9, io.openshift.expose-services=, io.openshift.tags=base rhel9, release=1214.1726694543, release-0.7.12=, vcs-type=git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64)
Oct 11 03:03:25 compute-0 ceph-mon[191930]: pgmap v2578: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2579: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:25 compute-0 nova_compute[356901]: 2025-10-11 03:03:25.898 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:03:25 compute-0 nova_compute[356901]: 2025-10-11 03:03:25.972 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:26 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 03:03:26 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 4800.2 total, 600.0 interval
                                            Cumulative writes: 9892 writes, 38K keys, 9892 commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 9892 writes, 2660 syncs, 3.72 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 468 writes, 1210 keys, 468 commit groups, 1.0 writes per commit group, ingest: 0.45 MB, 0.00 MB/s
                                            Interval WAL: 468 writes, 221 syncs, 2.12 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 03:03:26 compute-0 ceph-mgr[192233]: [devicehealth INFO root] Check health
Oct 11 03:03:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:03:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:03:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:03:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:03:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:03:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:03:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:03:27 compute-0 ceph-mon[191930]: pgmap v2579: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2580: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 03:03:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1514179024' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 03:03:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 03:03:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/1514179024' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 03:03:27 compute-0 nova_compute[356901]: 2025-10-11 03:03:27.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:03:28 compute-0 nova_compute[356901]: 2025-10-11 03:03:28.135 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1514179024' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 03:03:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/1514179024' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 03:03:29 compute-0 podman[496373]: 2025-10-11 03:03:29.227160563 +0000 UTC m=+0.112132075 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 03:03:29 compute-0 podman[496376]: 2025-10-11 03:03:29.254602952 +0000 UTC m=+0.118298557 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Oct 11 03:03:29 compute-0 podman[496375]: 2025-10-11 03:03:29.294696656 +0000 UTC m=+0.168659677 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, container_name=ceilometer_agent_compute, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, config_id=edpm, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 03:03:29 compute-0 podman[496374]: 2025-10-11 03:03:29.30705074 +0000 UTC m=+0.183353238 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_controller, org.label-schema.vendor=CentOS, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 03:03:29 compute-0 ceph-mon[191930]: pgmap v2580: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2581: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:29 compute-0 podman[157119]: time="2025-10-11T03:03:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 03:03:29 compute-0 podman[157119]: @ - - [11/Oct/2025:03:03:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 03:03:29 compute-0 podman[157119]: @ - - [11/Oct/2025:03:03:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9108 "" "Go-http-client/1.1"
Oct 11 03:03:30 compute-0 nova_compute[356901]: 2025-10-11 03:03:30.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:03:30 compute-0 nova_compute[356901]: 2025-10-11 03:03:30.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 03:03:30 compute-0 nova_compute[356901]: 2025-10-11 03:03:30.976 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:31 compute-0 ceph-mon[191930]: pgmap v2581: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:31 compute-0 openstack_network_exporter[374316]: ERROR   03:03:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:03:31 compute-0 openstack_network_exporter[374316]: ERROR   03:03:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:03:31 compute-0 openstack_network_exporter[374316]: ERROR   03:03:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 03:03:31 compute-0 openstack_network_exporter[374316]: ERROR   03:03:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 03:03:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:03:31 compute-0 openstack_network_exporter[374316]: ERROR   03:03:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 03:03:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:03:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2582: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:03:33 compute-0 nova_compute[356901]: 2025-10-11 03:03:33.140 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:33 compute-0 ceph-mon[191930]: pgmap v2582: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2583: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:35 compute-0 ceph-mon[191930]: pgmap v2583: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2584: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:35 compute-0 nova_compute[356901]: 2025-10-11 03:03:35.980 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:36 compute-0 nova_compute[356901]: 2025-10-11 03:03:36.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:03:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:03:37 compute-0 ceph-mon[191930]: pgmap v2584: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2585: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:37 compute-0 nova_compute[356901]: 2025-10-11 03:03:37.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:03:37 compute-0 nova_compute[356901]: 2025-10-11 03:03:37.899 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 03:03:37 compute-0 nova_compute[356901]: 2025-10-11 03:03:37.899 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 03:03:38 compute-0 nova_compute[356901]: 2025-10-11 03:03:38.145 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:38 compute-0 nova_compute[356901]: 2025-10-11 03:03:38.257 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 03:03:38 compute-0 nova_compute[356901]: 2025-10-11 03:03:38.258 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 03:03:38 compute-0 nova_compute[356901]: 2025-10-11 03:03:38.259 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 03:03:38 compute-0 nova_compute[356901]: 2025-10-11 03:03:38.260 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 03:03:39 compute-0 podman[496456]: 2025-10-11 03:03:39.266493025 +0000 UTC m=+0.145886027 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, container_name=multipathd, org.label-schema.build-date=20251009, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 03:03:39 compute-0 podman[496457]: 2025-10-11 03:03:39.284170465 +0000 UTC m=+0.158278996 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, config_id=iscsid, container_name=iscsid, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.build-date=20251009)
Oct 11 03:03:39 compute-0 ceph-mon[191930]: pgmap v2585: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #126. Immutable memtables: 0.
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:03:39.449829) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 75] Flushing memtable with next log file: 126
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151819449891, "job": 75, "event": "flush_started", "num_memtables": 1, "num_entries": 1105, "num_deletes": 251, "total_data_size": 1676355, "memory_usage": 1708048, "flush_reason": "Manual Compaction"}
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 75] Level-0 flush table #127: started
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151819467197, "cf_name": "default", "job": 75, "event": "table_file_creation", "file_number": 127, "file_size": 1649762, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 52212, "largest_seqno": 53316, "table_properties": {"data_size": 1644345, "index_size": 2876, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1477, "raw_key_size": 11323, "raw_average_key_size": 19, "raw_value_size": 1633581, "raw_average_value_size": 2841, "num_data_blocks": 129, "num_entries": 575, "num_filter_entries": 575, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760151707, "oldest_key_time": 1760151707, "file_creation_time": 1760151819, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 127, "seqno_to_time_mapping": "N/A"}}
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 75] Flush lasted 17714 microseconds, and 9350 cpu microseconds.
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:03:39.467545) [db/flush_job.cc:967] [default] [JOB 75] Level-0 flush table #127: 1649762 bytes OK
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:03:39.467571) [db/memtable_list.cc:519] [default] Level-0 commit table #127 started
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:03:39.469987) [db/memtable_list.cc:722] [default] Level-0 commit table #127: memtable #1 done
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:03:39.470009) EVENT_LOG_v1 {"time_micros": 1760151819470002, "job": 75, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:03:39.470032) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 75] Try to delete WAL files size 1671249, prev total WAL file size 1671249, number of live WAL files 2.
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000123.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:03:39.471586) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '7061786F730035303230' seq:72057594037927935, type:22 .. '7061786F730035323732' seq:0, type:0; will stop at (end)
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 76] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 75 Base level 0, inputs: [127(1611KB)], [125(9005KB)]
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151819471665, "job": 76, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [127], "files_L6": [125], "score": -1, "input_data_size": 10871555, "oldest_snapshot_seqno": -1}
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 76] Generated table #128: 6658 keys, 9107843 bytes, temperature: kUnknown
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151819555640, "cf_name": "default", "job": 76, "event": "table_file_creation", "file_number": 128, "file_size": 9107843, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 9065307, "index_size": 24759, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 16709, "raw_key_size": 174714, "raw_average_key_size": 26, "raw_value_size": 8946830, "raw_average_value_size": 1343, "num_data_blocks": 979, "num_entries": 6658, "num_filter_entries": 6658, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760151819, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 128, "seqno_to_time_mapping": "N/A"}}
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:03:39.556114) [db/compaction/compaction_job.cc:1663] [default] [JOB 76] Compacted 1@0 + 1@6 files to L6 => 9107843 bytes
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:03:39.564790) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 129.3 rd, 108.3 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(1.6, 8.8 +0.0 blob) out(8.7 +0.0 blob), read-write-amplify(12.1) write-amplify(5.5) OK, records in: 7172, records dropped: 514 output_compression: NoCompression
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:03:39.564826) EVENT_LOG_v1 {"time_micros": 1760151819564810, "job": 76, "event": "compaction_finished", "compaction_time_micros": 84110, "compaction_time_cpu_micros": 39275, "output_level": 6, "num_output_files": 1, "total_output_size": 9107843, "num_input_records": 7172, "num_output_records": 6658, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000127.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151819565591, "job": 76, "event": "table_file_deletion", "file_number": 127}
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000125.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151819569546, "job": 76, "event": "table_file_deletion", "file_number": 125}
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:03:39.471197) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:03:39.569849) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:03:39.569858) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:03:39.569862) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:03:39.569865) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 03:03:39 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:03:39.569870) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 03:03:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2586: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:40 compute-0 ceph-mon[191930]: pgmap v2586: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:40 compute-0 nova_compute[356901]: 2025-10-11 03:03:40.984 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:41 compute-0 nova_compute[356901]: 2025-10-11 03:03:41.345 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 03:03:41 compute-0 nova_compute[356901]: 2025-10-11 03:03:41.363 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 03:03:41 compute-0 nova_compute[356901]: 2025-10-11 03:03:41.363 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 03:03:41 compute-0 nova_compute[356901]: 2025-10-11 03:03:41.364 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:03:41 compute-0 nova_compute[356901]: 2025-10-11 03:03:41.391 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 03:03:41 compute-0 nova_compute[356901]: 2025-10-11 03:03:41.392 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 03:03:41 compute-0 nova_compute[356901]: 2025-10-11 03:03:41.392 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 03:03:41 compute-0 nova_compute[356901]: 2025-10-11 03:03:41.393 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 03:03:41 compute-0 nova_compute[356901]: 2025-10-11 03:03:41.393 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 03:03:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2587: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 03:03:41 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3579391539' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:03:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:03:41 compute-0 nova_compute[356901]: 2025-10-11 03:03:41.953 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.559s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 03:03:42 compute-0 nova_compute[356901]: 2025-10-11 03:03:42.049 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 03:03:42 compute-0 nova_compute[356901]: 2025-10-11 03:03:42.049 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 03:03:42 compute-0 nova_compute[356901]: 2025-10-11 03:03:42.049 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 03:03:42 compute-0 nova_compute[356901]: 2025-10-11 03:03:42.494 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 03:03:42 compute-0 nova_compute[356901]: 2025-10-11 03:03:42.495 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3660MB free_disk=59.955204010009766GB free_vcpus=7 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 03:03:42 compute-0 nova_compute[356901]: 2025-10-11 03:03:42.495 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 03:03:42 compute-0 nova_compute[356901]: 2025-10-11 03:03:42.495 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 03:03:42 compute-0 ceph-mon[191930]: pgmap v2587: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:42 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3579391539' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:03:43 compute-0 nova_compute[356901]: 2025-10-11 03:03:43.152 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:43 compute-0 nova_compute[356901]: 2025-10-11 03:03:43.481 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 03:03:43 compute-0 nova_compute[356901]: 2025-10-11 03:03:43.483 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 1 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 03:03:43 compute-0 nova_compute[356901]: 2025-10-11 03:03:43.484 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1024MB phys_disk=59GB used_disk=2GB total_vcpus=8 used_vcpus=1 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 03:03:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2588: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:44 compute-0 nova_compute[356901]: 2025-10-11 03:03:44.387 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 03:03:44 compute-0 ceph-mon[191930]: pgmap v2588: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 03:03:44 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2709264080' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:03:44 compute-0 nova_compute[356901]: 2025-10-11 03:03:44.909 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.522s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 03:03:44 compute-0 nova_compute[356901]: 2025-10-11 03:03:44.919 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 03:03:44 compute-0 nova_compute[356901]: 2025-10-11 03:03:44.946 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 03:03:44 compute-0 nova_compute[356901]: 2025-10-11 03:03:44.948 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 03:03:44 compute-0 nova_compute[356901]: 2025-10-11 03:03:44.948 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 2.452s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 03:03:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2589: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:45 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2709264080' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:03:45 compute-0 nova_compute[356901]: 2025-10-11 03:03:45.986 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:46 compute-0 podman[496541]: 2025-10-11 03:03:46.220590653 +0000 UTC m=+0.096516371 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 03:03:46 compute-0 podman[496540]: 2025-10-11 03:03:46.24442138 +0000 UTC m=+0.138401643 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=minimal rhel9, vendor=Red Hat, Inc., io.buildah.version=1.33.7, config_id=edpm, name=ubi9-minimal, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, build-date=2025-08-20T13:12:41, url=https://catalog.redhat.com/en/search?searchType=containers, container_name=openstack_network_exporter, com.redhat.component=ubi9-minimal-container, version=9.6, io.openshift.expose-services=, managed_by=edpm_ansible, vcs-type=git, architecture=x86_64, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., release=1755695350, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b)
Oct 11 03:03:46 compute-0 podman[496539]: 2025-10-11 03:03:46.247899172 +0000 UTC m=+0.135821765 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_managed=true, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 03:03:46 compute-0 ceph-mon[191930]: pgmap v2589: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:46 compute-0 nova_compute[356901]: 2025-10-11 03:03:46.942 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:03:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:03:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2590: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:48 compute-0 nova_compute[356901]: 2025-10-11 03:03:48.157 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:48 compute-0 ceph-mon[191930]: pgmap v2590: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2591: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:50 compute-0 ceph-mon[191930]: pgmap v2591: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:50 compute-0 nova_compute[356901]: 2025-10-11 03:03:50.990 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2592: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:03:52 compute-0 ceph-mon[191930]: pgmap v2592: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:53 compute-0 nova_compute[356901]: 2025-10-11 03:03:53.164 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2593: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:54 compute-0 ceph-mon[191930]: pgmap v2593: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:03:54.900 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 03:03:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:03:54.901 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 03:03:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:03:54.902 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 03:03:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2594: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:55 compute-0 nova_compute[356901]: 2025-10-11 03:03:55.995 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:56 compute-0 podman[496604]: 2025-10-11 03:03:56.198783848 +0000 UTC m=+0.090933302 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Red Hat, Inc., com.redhat.component=ubi9-container, distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vendor=Red Hat, Inc., container_name=kepler, version=9.4, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.29.0, release=1214.1726694543, io.openshift.expose-services=, name=ubi9, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.openshift.tags=base rhel9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, managed_by=edpm_ansible, io.k8s.display-name=Red Hat Universal Base Image 9, release-0.7.12=, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-type=git, config_id=edpm, build-date=2024-09-18T21:23:30, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 03:03:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:03:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:03:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:03:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:03:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:03:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:03:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_03:03:56
Oct 11 03:03:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 03:03:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 03:03:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['default.rgw.control', 'default.rgw.meta', 'volumes', 'vms', 'backups', 'images', 'default.rgw.log', 'cephfs.cephfs.data', '.mgr', '.rgw.root', 'cephfs.cephfs.meta']
Oct 11 03:03:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 03:03:56 compute-0 ceph-mon[191930]: pgmap v2594: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:03:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 03:03:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 03:03:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 03:03:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 03:03:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 03:03:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 03:03:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 03:03:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 03:03:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 03:03:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 03:03:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2595: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:58 compute-0 nova_compute[356901]: 2025-10-11 03:03:58.175 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:03:58 compute-0 ceph-mon[191930]: pgmap v2595: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2596: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:03:59 compute-0 podman[157119]: time="2025-10-11T03:03:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 03:03:59 compute-0 podman[157119]: @ - - [11/Oct/2025:03:03:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 03:03:59 compute-0 podman[157119]: @ - - [11/Oct/2025:03:03:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9112 "" "Go-http-client/1.1"
Oct 11 03:04:00 compute-0 podman[496625]: 2025-10-11 03:04:00.217145262 +0000 UTC m=+0.091107935 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, io.buildah.version=1.41.4, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, container_name=ceilometer_agent_compute, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.schema-version=1.0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 10 Base Image, tcib_managed=true)
Oct 11 03:04:00 compute-0 podman[496626]: 2025-10-11 03:04:00.239569065 +0000 UTC m=+0.093044628 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_metadata_agent, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 03:04:00 compute-0 podman[496623]: 2025-10-11 03:04:00.26217206 +0000 UTC m=+0.136988159 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 03:04:00 compute-0 podman[496624]: 2025-10-11 03:04:00.275223234 +0000 UTC m=+0.149066741 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=ovn_controller, io.buildah.version=1.41.3, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0)
Oct 11 03:04:00 compute-0 sudo[496711]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:04:00 compute-0 sudo[496711]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:04:00 compute-0 sudo[496711]: pam_unix(sudo:session): session closed for user root
Oct 11 03:04:00 compute-0 sudo[496736]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:04:00 compute-0 sudo[496736]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:04:00 compute-0 sudo[496736]: pam_unix(sudo:session): session closed for user root
Oct 11 03:04:00 compute-0 sudo[496761]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:04:00 compute-0 sudo[496761]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:04:00 compute-0 sudo[496761]: pam_unix(sudo:session): session closed for user root
Oct 11 03:04:00 compute-0 sudo[496786]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 03:04:00 compute-0 sudo[496786]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:04:00 compute-0 ceph-mon[191930]: pgmap v2596: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:01 compute-0 nova_compute[356901]: 2025-10-11 03:04:00.997 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:01 compute-0 openstack_network_exporter[374316]: ERROR   03:04:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:04:01 compute-0 openstack_network_exporter[374316]: ERROR   03:04:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:04:01 compute-0 openstack_network_exporter[374316]: ERROR   03:04:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 03:04:01 compute-0 openstack_network_exporter[374316]: ERROR   03:04:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 03:04:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:04:01 compute-0 openstack_network_exporter[374316]: ERROR   03:04:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 03:04:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:04:01 compute-0 sudo[496786]: pam_unix(sudo:session): session closed for user root
Oct 11 03:04:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 03:04:01 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:04:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 03:04:01 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 03:04:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 03:04:01 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:04:01 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 7db66d44-8548-412c-809f-1f297d575f08 does not exist
Oct 11 03:04:01 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev ee87da3c-6909-4a93-ae3a-73bbdf5f4a0a does not exist
Oct 11 03:04:01 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev c7f32e8f-1349-42aa-8657-10c5c4fdc9a1 does not exist
Oct 11 03:04:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 03:04:01 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 03:04:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 03:04:01 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 03:04:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 03:04:01 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:04:01 compute-0 sudo[496842]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:04:01 compute-0 sudo[496842]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:04:01 compute-0 sudo[496842]: pam_unix(sudo:session): session closed for user root
Oct 11 03:04:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2597: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:01 compute-0 sudo[496867]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:04:01 compute-0 sudo[496867]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:04:01 compute-0 sudo[496867]: pam_unix(sudo:session): session closed for user root
Oct 11 03:04:01 compute-0 sudo[496892]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:04:01 compute-0 sudo[496892]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:04:01 compute-0 sudo[496892]: pam_unix(sudo:session): session closed for user root
Oct 11 03:04:01 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:04:01 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 03:04:01 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:04:01 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 03:04:01 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 03:04:01 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:04:01 compute-0 sudo[496917]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 03:04:01 compute-0 sudo[496917]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:04:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:04:02 compute-0 podman[496981]: 2025-10-11 03:04:02.503904803 +0000 UTC m=+0.067535738 container create 132d272429b1d9f3ba5157f095b574336a20b0eb42fd171dcb4992b0f1882e76 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_wozniak, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True)
Oct 11 03:04:02 compute-0 podman[496981]: 2025-10-11 03:04:02.478510218 +0000 UTC m=+0.042141243 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:04:02 compute-0 systemd[1]: Started libpod-conmon-132d272429b1d9f3ba5157f095b574336a20b0eb42fd171dcb4992b0f1882e76.scope.
Oct 11 03:04:02 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:04:02 compute-0 podman[496981]: 2025-10-11 03:04:02.639736587 +0000 UTC m=+0.203367622 container init 132d272429b1d9f3ba5157f095b574336a20b0eb42fd171dcb4992b0f1882e76 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_wozniak, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 03:04:02 compute-0 podman[496981]: 2025-10-11 03:04:02.651117466 +0000 UTC m=+0.214748411 container start 132d272429b1d9f3ba5157f095b574336a20b0eb42fd171dcb4992b0f1882e76 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_wozniak, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 03:04:02 compute-0 podman[496981]: 2025-10-11 03:04:02.657424476 +0000 UTC m=+0.221055471 container attach 132d272429b1d9f3ba5157f095b574336a20b0eb42fd171dcb4992b0f1882e76 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_wozniak, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 03:04:02 compute-0 competent_wozniak[496997]: 167 167
Oct 11 03:04:02 compute-0 systemd[1]: libpod-132d272429b1d9f3ba5157f095b574336a20b0eb42fd171dcb4992b0f1882e76.scope: Deactivated successfully.
Oct 11 03:04:02 compute-0 podman[496981]: 2025-10-11 03:04:02.663039434 +0000 UTC m=+0.226670409 container died 132d272429b1d9f3ba5157f095b574336a20b0eb42fd171dcb4992b0f1882e76 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_wozniak, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 03:04:02 compute-0 systemd[1]: var-lib-containers-storage-overlay-723884d38f64acb6489208c3421b9ed5c3218ae5288d1454f380c3858673168a-merged.mount: Deactivated successfully.
Oct 11 03:04:02 compute-0 podman[496981]: 2025-10-11 03:04:02.751440905 +0000 UTC m=+0.315071850 container remove 132d272429b1d9f3ba5157f095b574336a20b0eb42fd171dcb4992b0f1882e76 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=competent_wozniak, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 03:04:02 compute-0 systemd[1]: libpod-conmon-132d272429b1d9f3ba5157f095b574336a20b0eb42fd171dcb4992b0f1882e76.scope: Deactivated successfully.
Oct 11 03:04:02 compute-0 ceph-mon[191930]: pgmap v2597: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:02 compute-0 podman[497025]: 2025-10-11 03:04:02.99664384 +0000 UTC m=+0.087941570 container create 36c735dde7784745ee76b5fb291c8cf7206204fd28b8b3186cced4edab1c62fa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_swartz, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, ceph=True)
Oct 11 03:04:03 compute-0 podman[497025]: 2025-10-11 03:04:02.965344569 +0000 UTC m=+0.056642309 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:04:03 compute-0 systemd[1]: Started libpod-conmon-36c735dde7784745ee76b5fb291c8cf7206204fd28b8b3186cced4edab1c62fa.scope.
Oct 11 03:04:03 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:04:03 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d1b6438244b0cbe67b164bb262b601133df435654630e0fca8d449652c303c01/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 03:04:03 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d1b6438244b0cbe67b164bb262b601133df435654630e0fca8d449652c303c01/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 03:04:03 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d1b6438244b0cbe67b164bb262b601133df435654630e0fca8d449652c303c01/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 03:04:03 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d1b6438244b0cbe67b164bb262b601133df435654630e0fca8d449652c303c01/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 03:04:03 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d1b6438244b0cbe67b164bb262b601133df435654630e0fca8d449652c303c01/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 03:04:03 compute-0 podman[497025]: 2025-10-11 03:04:03.171603286 +0000 UTC m=+0.262901036 container init 36c735dde7784745ee76b5fb291c8cf7206204fd28b8b3186cced4edab1c62fa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_swartz, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 03:04:03 compute-0 nova_compute[356901]: 2025-10-11 03:04:03.178 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:03 compute-0 podman[497025]: 2025-10-11 03:04:03.189574074 +0000 UTC m=+0.280871764 container start 36c735dde7784745ee76b5fb291c8cf7206204fd28b8b3186cced4edab1c62fa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_swartz, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 03:04:03 compute-0 podman[497025]: 2025-10-11 03:04:03.194396683 +0000 UTC m=+0.285694443 container attach 36c735dde7784745ee76b5fb291c8cf7206204fd28b8b3186cced4edab1c62fa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_swartz, CEPH_REF=reef, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0)
Oct 11 03:04:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2598: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:04 compute-0 cranky_swartz[497041]: --> passed data devices: 0 physical, 3 LVM
Oct 11 03:04:04 compute-0 cranky_swartz[497041]: --> relative data size: 1.0
Oct 11 03:04:04 compute-0 cranky_swartz[497041]: --> All data devices are unavailable
Oct 11 03:04:04 compute-0 systemd[1]: libpod-36c735dde7784745ee76b5fb291c8cf7206204fd28b8b3186cced4edab1c62fa.scope: Deactivated successfully.
Oct 11 03:04:04 compute-0 podman[497025]: 2025-10-11 03:04:04.518389691 +0000 UTC m=+1.609687431 container died 36c735dde7784745ee76b5fb291c8cf7206204fd28b8b3186cced4edab1c62fa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_swartz, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.build-date=20250507)
Oct 11 03:04:04 compute-0 systemd[1]: libpod-36c735dde7784745ee76b5fb291c8cf7206204fd28b8b3186cced4edab1c62fa.scope: Consumed 1.263s CPU time.
Oct 11 03:04:04 compute-0 systemd[1]: var-lib-containers-storage-overlay-d1b6438244b0cbe67b164bb262b601133df435654630e0fca8d449652c303c01-merged.mount: Deactivated successfully.
Oct 11 03:04:04 compute-0 podman[497025]: 2025-10-11 03:04:04.624928024 +0000 UTC m=+1.716225724 container remove 36c735dde7784745ee76b5fb291c8cf7206204fd28b8b3186cced4edab1c62fa (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=cranky_swartz, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 03:04:04 compute-0 systemd[1]: libpod-conmon-36c735dde7784745ee76b5fb291c8cf7206204fd28b8b3186cced4edab1c62fa.scope: Deactivated successfully.
Oct 11 03:04:04 compute-0 sudo[496917]: pam_unix(sudo:session): session closed for user root
Oct 11 03:04:04 compute-0 sudo[497080]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:04:04 compute-0 sudo[497080]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:04:04 compute-0 sudo[497080]: pam_unix(sudo:session): session closed for user root
Oct 11 03:04:04 compute-0 ceph-mon[191930]: pgmap v2598: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:04 compute-0 sudo[497105]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:04:04 compute-0 sudo[497105]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:04:04 compute-0 sudo[497105]: pam_unix(sudo:session): session closed for user root
Oct 11 03:04:05 compute-0 sudo[497130]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:04:05 compute-0 sudo[497130]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:04:05 compute-0 sudo[497130]: pam_unix(sudo:session): session closed for user root
Oct 11 03:04:05 compute-0 sudo[497155]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 03:04:05 compute-0 sudo[497155]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:04:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2599: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:05 compute-0 podman[497217]: 2025-10-11 03:04:05.857631353 +0000 UTC m=+0.088947631 container create cb8ae891914947c003f173883135db0f023e24edeff0c1f625d3ffd48a43e32b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_hofstadter, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS)
Oct 11 03:04:05 compute-0 podman[497217]: 2025-10-11 03:04:05.823443336 +0000 UTC m=+0.054759654 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:04:05 compute-0 systemd[1]: Started libpod-conmon-cb8ae891914947c003f173883135db0f023e24edeff0c1f625d3ffd48a43e32b.scope.
Oct 11 03:04:05 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:04:06 compute-0 nova_compute[356901]: 2025-10-11 03:04:06.002 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:06 compute-0 podman[497217]: 2025-10-11 03:04:06.016542383 +0000 UTC m=+0.247858721 container init cb8ae891914947c003f173883135db0f023e24edeff0c1f625d3ffd48a43e32b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_hofstadter, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20250507, io.buildah.version=1.39.3)
Oct 11 03:04:06 compute-0 podman[497217]: 2025-10-11 03:04:06.034866378 +0000 UTC m=+0.266182666 container start cb8ae891914947c003f173883135db0f023e24edeff0c1f625d3ffd48a43e32b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_hofstadter, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, OSD_FLAVOR=default, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 03:04:06 compute-0 podman[497217]: 2025-10-11 03:04:06.041716657 +0000 UTC m=+0.273032985 container attach cb8ae891914947c003f173883135db0f023e24edeff0c1f625d3ffd48a43e32b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_hofstadter, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 03:04:06 compute-0 crazy_hofstadter[497233]: 167 167
Oct 11 03:04:06 compute-0 systemd[1]: libpod-cb8ae891914947c003f173883135db0f023e24edeff0c1f625d3ffd48a43e32b.scope: Deactivated successfully.
Oct 11 03:04:06 compute-0 podman[497217]: 2025-10-11 03:04:06.047651582 +0000 UTC m=+0.278967830 container died cb8ae891914947c003f173883135db0f023e24edeff0c1f625d3ffd48a43e32b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_hofstadter, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True)
Oct 11 03:04:06 compute-0 systemd[1]: var-lib-containers-storage-overlay-4b782e07ede58cde76cb5b3086401efa06f6d246e148479f53b1bcdc909bb8c1-merged.mount: Deactivated successfully.
Oct 11 03:04:06 compute-0 podman[497217]: 2025-10-11 03:04:06.118145152 +0000 UTC m=+0.349461440 container remove cb8ae891914947c003f173883135db0f023e24edeff0c1f625d3ffd48a43e32b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=crazy_hofstadter, CEPH_REF=reef, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 03:04:06 compute-0 systemd[1]: libpod-conmon-cb8ae891914947c003f173883135db0f023e24edeff0c1f625d3ffd48a43e32b.scope: Deactivated successfully.
Oct 11 03:04:06 compute-0 podman[497256]: 2025-10-11 03:04:06.395137059 +0000 UTC m=+0.103152339 container create 158eb5cd58f692986df7e28a8ed476c79b84a5442ad8c37c594c7105dbbc2b71 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_swartz, CEPH_REF=reef, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 03:04:06 compute-0 podman[497256]: 2025-10-11 03:04:06.367376858 +0000 UTC m=+0.075392228 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:04:06 compute-0 systemd[1]: Started libpod-conmon-158eb5cd58f692986df7e28a8ed476c79b84a5442ad8c37c594c7105dbbc2b71.scope.
Oct 11 03:04:06 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:04:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d294c33cb0f027575397dfc9aaea76bd9b54138c365b033139da85d88f2f90c4/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 03:04:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d294c33cb0f027575397dfc9aaea76bd9b54138c365b033139da85d88f2f90c4/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 03:04:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d294c33cb0f027575397dfc9aaea76bd9b54138c365b033139da85d88f2f90c4/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 03:04:06 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/d294c33cb0f027575397dfc9aaea76bd9b54138c365b033139da85d88f2f90c4/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 03:04:06 compute-0 podman[497256]: 2025-10-11 03:04:06.526942222 +0000 UTC m=+0.234957542 container init 158eb5cd58f692986df7e28a8ed476c79b84a5442ad8c37c594c7105dbbc2b71 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_swartz, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, ceph=True, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 03:04:06 compute-0 podman[497256]: 2025-10-11 03:04:06.541315933 +0000 UTC m=+0.249331213 container start 158eb5cd58f692986df7e28a8ed476c79b84a5442ad8c37c594c7105dbbc2b71 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_swartz, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 03:04:06 compute-0 podman[497256]: 2025-10-11 03:04:06.547183151 +0000 UTC m=+0.255198431 container attach 158eb5cd58f692986df7e28a8ed476c79b84a5442ad8c37c594c7105dbbc2b71 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_swartz, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 03:04:06 compute-0 ceph-mon[191930]: pgmap v2599: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]: {
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:     "0": [
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:         {
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "devices": [
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "/dev/loop3"
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             ],
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "lv_name": "ceph_lv0",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "lv_size": "21470642176",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "name": "ceph_lv0",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "tags": {
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.cluster_name": "ceph",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.crush_device_class": "",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.encrypted": "0",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.osd_id": "0",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.type": "block",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.vdo": "0"
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             },
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "type": "block",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "vg_name": "ceph_vg0"
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:         }
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:     ],
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:     "1": [
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:         {
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "devices": [
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "/dev/loop4"
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             ],
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "lv_name": "ceph_lv1",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "lv_size": "21470642176",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "name": "ceph_lv1",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "tags": {
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.cluster_name": "ceph",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.crush_device_class": "",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.encrypted": "0",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.osd_id": "1",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.type": "block",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.vdo": "0"
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             },
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "type": "block",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "vg_name": "ceph_vg1"
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:         }
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:     ],
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:     "2": [
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:         {
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "devices": [
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "/dev/loop5"
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             ],
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "lv_name": "ceph_lv2",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "lv_size": "21470642176",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "name": "ceph_lv2",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "tags": {
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.cluster_name": "ceph",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.crush_device_class": "",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.encrypted": "0",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.osd_id": "2",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.type": "block",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:                 "ceph.vdo": "0"
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             },
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "type": "block",
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:             "vg_name": "ceph_vg2"
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:         }
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]:     ]
Oct 11 03:04:07 compute-0 suspicious_swartz[497271]: }
Oct 11 03:04:07 compute-0 systemd[1]: libpod-158eb5cd58f692986df7e28a8ed476c79b84a5442ad8c37c594c7105dbbc2b71.scope: Deactivated successfully.
Oct 11 03:04:07 compute-0 podman[497256]: 2025-10-11 03:04:07.449530766 +0000 UTC m=+1.157546106 container died 158eb5cd58f692986df7e28a8ed476c79b84a5442ad8c37c594c7105dbbc2b71 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_swartz, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 03:04:07 compute-0 systemd[1]: var-lib-containers-storage-overlay-d294c33cb0f027575397dfc9aaea76bd9b54138c365b033139da85d88f2f90c4-merged.mount: Deactivated successfully.
Oct 11 03:04:07 compute-0 podman[497256]: 2025-10-11 03:04:07.534437986 +0000 UTC m=+1.242453256 container remove 158eb5cd58f692986df7e28a8ed476c79b84a5442ad8c37c594c7105dbbc2b71 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_swartz, OSD_FLAVOR=default, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 03:04:07 compute-0 systemd[1]: libpod-conmon-158eb5cd58f692986df7e28a8ed476c79b84a5442ad8c37c594c7105dbbc2b71.scope: Deactivated successfully.
Oct 11 03:04:07 compute-0 sudo[497155]: pam_unix(sudo:session): session closed for user root
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0009191400908380543 of space, bias 1.0, pg target 0.2757420272514163 quantized to 32 (current 32)
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 03:04:07 compute-0 sudo[497291]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:04:07 compute-0 sudo[497291]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:04:07 compute-0 sudo[497291]: pam_unix(sudo:session): session closed for user root
Oct 11 03:04:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2600: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:07 compute-0 sudo[497316]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:04:07 compute-0 sudo[497316]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:04:07 compute-0 sudo[497316]: pam_unix(sudo:session): session closed for user root
Oct 11 03:04:07 compute-0 sudo[497341]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:04:07 compute-0 sudo[497341]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:04:07 compute-0 sudo[497341]: pam_unix(sudo:session): session closed for user root
Oct 11 03:04:07 compute-0 sudo[497366]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 03:04:07 compute-0 sudo[497366]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:04:08 compute-0 nova_compute[356901]: 2025-10-11 03:04:08.184 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:08 compute-0 podman[497430]: 2025-10-11 03:04:08.509990384 +0000 UTC m=+0.083279165 container create d23618c3792fa1332e645d1938baec8cbfbe39d1007f3837c87e1f0d23d85a63 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_davinci, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 03:04:08 compute-0 podman[497430]: 2025-10-11 03:04:08.472617023 +0000 UTC m=+0.045905884 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:04:08 compute-0 systemd[1]: Started libpod-conmon-d23618c3792fa1332e645d1938baec8cbfbe39d1007f3837c87e1f0d23d85a63.scope.
Oct 11 03:04:08 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:04:08 compute-0 podman[497430]: 2025-10-11 03:04:08.663546816 +0000 UTC m=+0.236835637 container init d23618c3792fa1332e645d1938baec8cbfbe39d1007f3837c87e1f0d23d85a63 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_davinci, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 03:04:08 compute-0 podman[497430]: 2025-10-11 03:04:08.682875731 +0000 UTC m=+0.256164512 container start d23618c3792fa1332e645d1938baec8cbfbe39d1007f3837c87e1f0d23d85a63 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_davinci, ceph=True, org.label-schema.build-date=20250507, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 03:04:08 compute-0 nice_davinci[497445]: 167 167
Oct 11 03:04:08 compute-0 systemd[1]: libpod-d23618c3792fa1332e645d1938baec8cbfbe39d1007f3837c87e1f0d23d85a63.scope: Deactivated successfully.
Oct 11 03:04:08 compute-0 podman[497430]: 2025-10-11 03:04:08.696368307 +0000 UTC m=+0.269657108 container attach d23618c3792fa1332e645d1938baec8cbfbe39d1007f3837c87e1f0d23d85a63 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_davinci, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.39.3, ceph=True, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 03:04:08 compute-0 conmon[497445]: conmon d23618c3792fa1332e64 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-d23618c3792fa1332e645d1938baec8cbfbe39d1007f3837c87e1f0d23d85a63.scope/container/memory.events
Oct 11 03:04:08 compute-0 podman[497430]: 2025-10-11 03:04:08.698650707 +0000 UTC m=+0.271939488 container died d23618c3792fa1332e645d1938baec8cbfbe39d1007f3837c87e1f0d23d85a63 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_davinci, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 03:04:08 compute-0 systemd[1]: var-lib-containers-storage-overlay-b3d10e02fd85b4ab68294857b76fe2ac828ea5c8baa95d0c9ed15cad416ecab7-merged.mount: Deactivated successfully.
Oct 11 03:04:08 compute-0 podman[497430]: 2025-10-11 03:04:08.857473201 +0000 UTC m=+0.430762012 container remove d23618c3792fa1332e645d1938baec8cbfbe39d1007f3837c87e1f0d23d85a63 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=nice_davinci, CEPH_REF=reef, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 03:04:08 compute-0 systemd[1]: libpod-conmon-d23618c3792fa1332e645d1938baec8cbfbe39d1007f3837c87e1f0d23d85a63.scope: Deactivated successfully.
Oct 11 03:04:08 compute-0 ceph-mon[191930]: pgmap v2600: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:09 compute-0 podman[497472]: 2025-10-11 03:04:09.163088784 +0000 UTC m=+0.080196627 container create 2de3d555266a629288f7900208230f6f26dc8bb8e6fbadafdfb7795e80de3e78 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=interesting_tesla, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 03:04:09 compute-0 podman[497472]: 2025-10-11 03:04:09.136428499 +0000 UTC m=+0.053536382 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:04:09 compute-0 systemd[1]: Started libpod-conmon-2de3d555266a629288f7900208230f6f26dc8bb8e6fbadafdfb7795e80de3e78.scope.
Oct 11 03:04:09 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:04:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/81b4a2caae5bc8c758a0676f9ac2bde233d182960d26e308c73aec1531bbfe03/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 03:04:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/81b4a2caae5bc8c758a0676f9ac2bde233d182960d26e308c73aec1531bbfe03/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 03:04:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/81b4a2caae5bc8c758a0676f9ac2bde233d182960d26e308c73aec1531bbfe03/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 03:04:09 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/81b4a2caae5bc8c758a0676f9ac2bde233d182960d26e308c73aec1531bbfe03/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 03:04:09 compute-0 podman[497472]: 2025-10-11 03:04:09.344803641 +0000 UTC m=+0.261911574 container init 2de3d555266a629288f7900208230f6f26dc8bb8e6fbadafdfb7795e80de3e78 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=interesting_tesla, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 03:04:09 compute-0 podman[497472]: 2025-10-11 03:04:09.363634946 +0000 UTC m=+0.280742789 container start 2de3d555266a629288f7900208230f6f26dc8bb8e6fbadafdfb7795e80de3e78 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=interesting_tesla, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 03:04:09 compute-0 podman[497472]: 2025-10-11 03:04:09.417141598 +0000 UTC m=+0.334249541 container attach 2de3d555266a629288f7900208230f6f26dc8bb8e6fbadafdfb7795e80de3e78 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=interesting_tesla, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 03:04:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2601: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:10 compute-0 podman[497495]: 2025-10-11 03:04:10.234870033 +0000 UTC m=+0.108396120 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, org.label-schema.build-date=20251009, config_id=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 03:04:10 compute-0 podman[497499]: 2025-10-11 03:04:10.243888436 +0000 UTC m=+0.117058828 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, container_name=iscsid, managed_by=edpm_ansible, org.label-schema.license=GPLv2, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, io.buildah.version=1.41.3, tcib_managed=true, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=iscsid, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 03:04:10 compute-0 interesting_tesla[497488]: {
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:         "osd_id": 1,
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:         "type": "bluestore"
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:     },
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:         "osd_id": 2,
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:         "type": "bluestore"
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:     },
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:         "osd_id": 0,
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:         "type": "bluestore"
Oct 11 03:04:10 compute-0 interesting_tesla[497488]:     }
Oct 11 03:04:10 compute-0 interesting_tesla[497488]: }
Oct 11 03:04:10 compute-0 systemd[1]: libpod-2de3d555266a629288f7900208230f6f26dc8bb8e6fbadafdfb7795e80de3e78.scope: Deactivated successfully.
Oct 11 03:04:10 compute-0 systemd[1]: libpod-2de3d555266a629288f7900208230f6f26dc8bb8e6fbadafdfb7795e80de3e78.scope: Consumed 1.148s CPU time.
Oct 11 03:04:10 compute-0 podman[497560]: 2025-10-11 03:04:10.582411588 +0000 UTC m=+0.048231672 container died 2de3d555266a629288f7900208230f6f26dc8bb8e6fbadafdfb7795e80de3e78 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=interesting_tesla, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS)
Oct 11 03:04:10 compute-0 systemd[1]: var-lib-containers-storage-overlay-81b4a2caae5bc8c758a0676f9ac2bde233d182960d26e308c73aec1531bbfe03-merged.mount: Deactivated successfully.
Oct 11 03:04:10 compute-0 podman[497560]: 2025-10-11 03:04:10.687842485 +0000 UTC m=+0.153662529 container remove 2de3d555266a629288f7900208230f6f26dc8bb8e6fbadafdfb7795e80de3e78 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=interesting_tesla, org.label-schema.schema-version=1.0, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, OSD_FLAVOR=default, ceph=True)
Oct 11 03:04:10 compute-0 systemd[1]: libpod-conmon-2de3d555266a629288f7900208230f6f26dc8bb8e6fbadafdfb7795e80de3e78.scope: Deactivated successfully.
Oct 11 03:04:10 compute-0 sudo[497366]: pam_unix(sudo:session): session closed for user root
Oct 11 03:04:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 03:04:10 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:04:10 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 03:04:10 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:04:10 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev e4d1c8df-74c1-4647-87da-3aa5d4aaad1a does not exist
Oct 11 03:04:10 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 4d2ab316-ca09-403c-9701-e6b5cb69e0ee does not exist
Oct 11 03:04:10 compute-0 sudo[497575]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:04:10 compute-0 sudo[497575]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:04:10 compute-0 sudo[497575]: pam_unix(sudo:session): session closed for user root
Oct 11 03:04:10 compute-0 ceph-mon[191930]: pgmap v2601: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:10 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:04:10 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:04:11 compute-0 nova_compute[356901]: 2025-10-11 03:04:11.007 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:11 compute-0 sudo[497600]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 03:04:11 compute-0 sudo[497600]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:04:11 compute-0 sudo[497600]: pam_unix(sudo:session): session closed for user root
Oct 11 03:04:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2602: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:04:13 compute-0 ceph-mon[191930]: pgmap v2602: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:13 compute-0 nova_compute[356901]: 2025-10-11 03:04:13.191 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2603: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:15 compute-0 ceph-mon[191930]: pgmap v2603: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2604: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:16 compute-0 nova_compute[356901]: 2025-10-11 03:04:16.010 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:04:17 compute-0 ceph-mon[191930]: pgmap v2604: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:17 compute-0 podman[497625]: 2025-10-11 03:04:17.197209125 +0000 UTC m=+0.093763210 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']})
Oct 11 03:04:17 compute-0 podman[497627]: 2025-10-11 03:04:17.19708143 +0000 UTC m=+0.080975006 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 03:04:17 compute-0 podman[497626]: 2025-10-11 03:04:17.223928698 +0000 UTC m=+0.114975181 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, version=9.6, distribution-scope=public, name=ubi9-minimal, io.openshift.expose-services=, com.redhat.component=ubi9-minimal-container, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.buildah.version=1.33.7, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, managed_by=edpm_ansible, vcs-type=git, build-date=2025-08-20T13:12:41, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., config_id=edpm, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://catalog.redhat.com/en/search?searchType=containers, release=1755695350, container_name=openstack_network_exporter, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, vendor=Red Hat, Inc.)
Oct 11 03:04:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2605: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:18 compute-0 nova_compute[356901]: 2025-10-11 03:04:18.196 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:19 compute-0 ceph-mon[191930]: pgmap v2605: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2606: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:19 compute-0 nova_compute[356901]: 2025-10-11 03:04:19.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:04:21 compute-0 nova_compute[356901]: 2025-10-11 03:04:21.014 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:21 compute-0 ceph-mon[191930]: pgmap v2606: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2607: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:04:22 compute-0 nova_compute[356901]: 2025-10-11 03:04:22.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:04:23 compute-0 ceph-mon[191930]: pgmap v2607: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:23 compute-0 nova_compute[356901]: 2025-10-11 03:04:23.199 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2608: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:25 compute-0 ceph-mon[191930]: pgmap v2608: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2609: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:26 compute-0 nova_compute[356901]: 2025-10-11 03:04:26.018 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:04:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:04:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:04:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:04:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:04:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:04:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:04:27 compute-0 ceph-mon[191930]: pgmap v2609: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:27 compute-0 podman[497690]: 2025-10-11 03:04:27.255552415 +0000 UTC m=+0.135354258 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, name=ubi9, summary=Provides the latest release of Red Hat Universal Base Image 9., com.redhat.component=ubi9-container, config_id=edpm, io.buildah.version=1.29.0, release=1214.1726694543, architecture=x86_64, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, vcs-type=git, io.openshift.expose-services=, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., vendor=Red Hat, Inc., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, build-date=2024-09-18T21:23:30, release-0.7.12=, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=kepler, io.openshift.tags=base rhel9, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=9.4)
Oct 11 03:04:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2610: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 03:04:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/123501876' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 03:04:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 03:04:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/123501876' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 03:04:27 compute-0 nova_compute[356901]: 2025-10-11 03:04:27.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:04:27 compute-0 nova_compute[356901]: 2025-10-11 03:04:27.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:04:28 compute-0 nova_compute[356901]: 2025-10-11 03:04:28.204 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/123501876' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 03:04:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/123501876' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 03:04:29 compute-0 ceph-mon[191930]: pgmap v2610: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2611: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:29 compute-0 podman[157119]: time="2025-10-11T03:04:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 03:04:29 compute-0 podman[157119]: @ - - [11/Oct/2025:03:04:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 03:04:29 compute-0 podman[157119]: @ - - [11/Oct/2025:03:04:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9113 "" "Go-http-client/1.1"
Oct 11 03:04:31 compute-0 nova_compute[356901]: 2025-10-11 03:04:31.022 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:31 compute-0 podman[497713]: 2025-10-11 03:04:31.230730502 +0000 UTC m=+0.107008126 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, tcib_managed=true, config_id=edpm, org.label-schema.license=GPLv2, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, io.buildah.version=1.41.4, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, container_name=ceilometer_agent_compute, maintainer=OpenStack Kubernetes Operator team)
Oct 11 03:04:31 compute-0 podman[497711]: 2025-10-11 03:04:31.236499355 +0000 UTC m=+0.129053098 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible)
Oct 11 03:04:31 compute-0 podman[497720]: 2025-10-11 03:04:31.253210388 +0000 UTC m=+0.115471678 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251009, tcib_managed=true, org.label-schema.license=GPLv2, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Oct 11 03:04:31 compute-0 podman[497712]: 2025-10-11 03:04:31.257045941 +0000 UTC m=+0.144398257 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=ovn_controller, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, managed_by=edpm_ansible, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 03:04:31 compute-0 ceph-mon[191930]: pgmap v2611: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:31 compute-0 openstack_network_exporter[374316]: ERROR   03:04:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:04:31 compute-0 openstack_network_exporter[374316]: ERROR   03:04:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:04:31 compute-0 openstack_network_exporter[374316]: ERROR   03:04:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 03:04:31 compute-0 openstack_network_exporter[374316]: ERROR   03:04:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 03:04:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:04:31 compute-0 openstack_network_exporter[374316]: ERROR   03:04:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 03:04:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:04:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2612: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:04:32 compute-0 nova_compute[356901]: 2025-10-11 03:04:32.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:04:32 compute-0 nova_compute[356901]: 2025-10-11 03:04:32.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 03:04:33 compute-0 nova_compute[356901]: 2025-10-11 03:04:33.207 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:33 compute-0 ceph-mon[191930]: pgmap v2612: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2613: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:35 compute-0 ceph-mon[191930]: pgmap v2613: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2614: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:36 compute-0 nova_compute[356901]: 2025-10-11 03:04:36.026 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:04:37 compute-0 ceph-mon[191930]: pgmap v2614: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2615: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:37 compute-0 nova_compute[356901]: 2025-10-11 03:04:37.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:04:38 compute-0 nova_compute[356901]: 2025-10-11 03:04:38.213 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:38 compute-0 nova_compute[356901]: 2025-10-11 03:04:38.898 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:04:38 compute-0 nova_compute[356901]: 2025-10-11 03:04:38.899 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 03:04:38 compute-0 nova_compute[356901]: 2025-10-11 03:04:38.899 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 03:04:39 compute-0 ceph-mon[191930]: pgmap v2615: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:39 compute-0 nova_compute[356901]: 2025-10-11 03:04:39.386 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 03:04:39 compute-0 nova_compute[356901]: 2025-10-11 03:04:39.386 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 03:04:39 compute-0 nova_compute[356901]: 2025-10-11 03:04:39.387 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 03:04:39 compute-0 nova_compute[356901]: 2025-10-11 03:04:39.387 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 03:04:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2616: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:40 compute-0 nova_compute[356901]: 2025-10-11 03:04:40.643 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 03:04:40 compute-0 nova_compute[356901]: 2025-10-11 03:04:40.659 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 03:04:40 compute-0 nova_compute[356901]: 2025-10-11 03:04:40.660 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 03:04:40 compute-0 nova_compute[356901]: 2025-10-11 03:04:40.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:04:40 compute-0 nova_compute[356901]: 2025-10-11 03:04:40.936 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 03:04:40 compute-0 nova_compute[356901]: 2025-10-11 03:04:40.937 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 03:04:40 compute-0 nova_compute[356901]: 2025-10-11 03:04:40.938 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 03:04:40 compute-0 nova_compute[356901]: 2025-10-11 03:04:40.939 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 03:04:40 compute-0 nova_compute[356901]: 2025-10-11 03:04:40.940 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 03:04:41 compute-0 nova_compute[356901]: 2025-10-11 03:04:41.030 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:41 compute-0 podman[497790]: 2025-10-11 03:04:41.238622046 +0000 UTC m=+0.122837837 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3, managed_by=edpm_ansible, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_id=multipathd, container_name=multipathd)
Oct 11 03:04:41 compute-0 podman[497791]: 2025-10-11 03:04:41.242673989 +0000 UTC m=+0.131115582 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_id=iscsid, container_name=iscsid)
Oct 11 03:04:41 compute-0 ceph-mon[191930]: pgmap v2616: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 03:04:41 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3061879381' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:04:41 compute-0 nova_compute[356901]: 2025-10-11 03:04:41.528 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.588s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 03:04:41 compute-0 nova_compute[356901]: 2025-10-11 03:04:41.627 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 03:04:41 compute-0 nova_compute[356901]: 2025-10-11 03:04:41.627 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 03:04:41 compute-0 nova_compute[356901]: 2025-10-11 03:04:41.628 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 03:04:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2617: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:04:42 compute-0 nova_compute[356901]: 2025-10-11 03:04:42.042 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 03:04:42 compute-0 nova_compute[356901]: 2025-10-11 03:04:42.044 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3648MB free_disk=59.955204010009766GB free_vcpus=7 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 03:04:42 compute-0 nova_compute[356901]: 2025-10-11 03:04:42.045 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 03:04:42 compute-0 nova_compute[356901]: 2025-10-11 03:04:42.045 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 03:04:42 compute-0 nova_compute[356901]: 2025-10-11 03:04:42.116 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 03:04:42 compute-0 nova_compute[356901]: 2025-10-11 03:04:42.117 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 1 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 03:04:42 compute-0 nova_compute[356901]: 2025-10-11 03:04:42.117 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1024MB phys_disk=59GB used_disk=2GB total_vcpus=8 used_vcpus=1 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 03:04:42 compute-0 nova_compute[356901]: 2025-10-11 03:04:42.138 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing inventories for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:804
Oct 11 03:04:42 compute-0 nova_compute[356901]: 2025-10-11 03:04:42.156 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating ProviderTree inventory for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 from _refresh_and_get_inventory using data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} _refresh_and_get_inventory /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:768
Oct 11 03:04:42 compute-0 nova_compute[356901]: 2025-10-11 03:04:42.157 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Updating inventory in ProviderTree for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 with inventory: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:176
Oct 11 03:04:42 compute-0 nova_compute[356901]: 2025-10-11 03:04:42.173 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing aggregate associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, aggregates: None _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:813
Oct 11 03:04:42 compute-0 nova_compute[356901]: 2025-10-11 03:04:42.205 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Refreshing trait associations for resource provider 256b11da-7f71-42c0-941c-ea1e909a35f8, traits: COMPUTE_VOLUME_EXTEND,COMPUTE_NET_VIF_MODEL_VMXNET3,HW_CPU_X86_SSSE3,COMPUTE_RESCUE_BFV,COMPUTE_SOCKET_PCI_NUMA_AFFINITY,COMPUTE_NODE,HW_CPU_X86_SVM,COMPUTE_STORAGE_BUS_SCSI,HW_CPU_X86_FMA3,COMPUTE_GRAPHICS_MODEL_NONE,COMPUTE_NET_VIF_MODEL_RTL8139,HW_CPU_X86_SSE4A,COMPUTE_IMAGE_TYPE_QCOW2,HW_CPU_X86_BMI2,HW_CPU_X86_SSE42,HW_CPU_X86_AVX2,COMPUTE_IMAGE_TYPE_RAW,COMPUTE_VIOMMU_MODEL_VIRTIO,HW_CPU_X86_AESNI,COMPUTE_STORAGE_BUS_FDC,COMPUTE_GRAPHICS_MODEL_VIRTIO,HW_CPU_X86_AMD_SVM,COMPUTE_NET_VIF_MODEL_NE2K_PCI,COMPUTE_ACCELERATORS,HW_CPU_X86_SSE2,COMPUTE_GRAPHICS_MODEL_VGA,HW_CPU_X86_ABM,HW_CPU_X86_AVX,COMPUTE_NET_VIF_MODEL_E1000,COMPUTE_STORAGE_BUS_USB,COMPUTE_NET_ATTACH_INTERFACE,HW_CPU_X86_MMX,COMPUTE_SECURITY_TPM_2_0,COMPUTE_IMAGE_TYPE_ISO,HW_CPU_X86_SSE41,COMPUTE_IMAGE_TYPE_AKI,COMPUTE_IMAGE_TYPE_AMI,COMPUTE_NET_ATTACH_INTERFACE_WITH_TAG,COMPUTE_DEVICE_TAGGING,COMPUTE_SECURITY_UEFI_SECURE_BOOT,COMPUTE_TRUSTED_CERTS,COMPUTE_NET_VIF_MODEL_VIRTIO,COMPUTE_VIOMMU_MODEL_INTEL,COMPUTE_STORAGE_BUS_SATA,HW_CPU_X86_SSE,COMPUTE_STORAGE_BUS_VIRTIO,COMPUTE_NET_VIF_MODEL_PCNET,COMPUTE_GRAPHICS_MODEL_CIRRUS,HW_CPU_X86_SHA,HW_CPU_X86_BMI,COMPUTE_NET_VIF_MODEL_E1000E,COMPUTE_NET_VIF_MODEL_SPAPR_VLAN,COMPUTE_VOLUME_ATTACH_WITH_TAG,COMPUTE_GRAPHICS_MODEL_BOCHS,COMPUTE_VIOMMU_MODEL_AUTO,COMPUTE_IMAGE_TYPE_ARI,HW_CPU_X86_CLMUL,COMPUTE_STORAGE_BUS_IDE,COMPUTE_VOLUME_MULTI_ATTACH,HW_CPU_X86_F16C,COMPUTE_SECURITY_TPM_1_2 _refresh_associations /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:825
Oct 11 03:04:42 compute-0 nova_compute[356901]: 2025-10-11 03:04:42.257 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 03:04:42 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3061879381' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:04:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 03:04:42 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1746143787' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:04:42 compute-0 nova_compute[356901]: 2025-10-11 03:04:42.854 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.597s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 03:04:42 compute-0 nova_compute[356901]: 2025-10-11 03:04:42.868 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 03:04:42 compute-0 nova_compute[356901]: 2025-10-11 03:04:42.902 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 03:04:42 compute-0 nova_compute[356901]: 2025-10-11 03:04:42.906 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 03:04:42 compute-0 nova_compute[356901]: 2025-10-11 03:04:42.907 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.862s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 03:04:43 compute-0 nova_compute[356901]: 2025-10-11 03:04:43.218 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:43 compute-0 ceph-mon[191930]: pgmap v2617: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:43 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1746143787' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:04:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2618: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:43 compute-0 nova_compute[356901]: 2025-10-11 03:04:43.904 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:04:45 compute-0 ceph-mon[191930]: pgmap v2618: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2619: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:46 compute-0 nova_compute[356901]: 2025-10-11 03:04:46.035 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:04:47 compute-0 ceph-mon[191930]: pgmap v2619: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2620: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:48 compute-0 podman[497873]: 2025-10-11 03:04:48.217989943 +0000 UTC m=+0.104245869 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 03:04:48 compute-0 nova_compute[356901]: 2025-10-11 03:04:48.224 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:48 compute-0 podman[497872]: 2025-10-11 03:04:48.265282604 +0000 UTC m=+0.147246609 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vendor=Red Hat, Inc., version=9.6, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, architecture=x86_64, com.redhat.component=ubi9-minimal-container, config_id=edpm, io.openshift.expose-services=, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, io.buildah.version=1.33.7, build-date=2025-08-20T13:12:41, url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=ubi9-minimal, vcs-type=git, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., release=1755695350, maintainer=Red Hat, Inc., container_name=openstack_network_exporter, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, io.openshift.tags=minimal rhel9, managed_by=edpm_ansible)
Oct 11 03:04:48 compute-0 podman[497871]: 2025-10-11 03:04:48.271032686 +0000 UTC m=+0.159441568 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_ipmi, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Oct 11 03:04:49 compute-0 ceph-mon[191930]: pgmap v2620: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2621: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:51 compute-0 nova_compute[356901]: 2025-10-11 03:04:51.038 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:51 compute-0 ceph-mon[191930]: pgmap v2621: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2622: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:04:53 compute-0 nova_compute[356901]: 2025-10-11 03:04:53.228 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:53 compute-0 ceph-mon[191930]: pgmap v2622: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:04:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2623: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 7.0 KiB/s rd, 0 B/s wr, 11 op/s
Oct 11 03:04:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:04:54.903 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 03:04:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:04:54.904 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 03:04:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:04:54.905 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 03:04:55 compute-0 ceph-mon[191930]: pgmap v2623: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 7.0 KiB/s rd, 0 B/s wr, 11 op/s
Oct 11 03:04:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2624: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 7.0 KiB/s rd, 0 B/s wr, 11 op/s
Oct 11 03:04:56 compute-0 nova_compute[356901]: 2025-10-11 03:04:56.044 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:04:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:04:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:04:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:04:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:04:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:04:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_03:04:56
Oct 11 03:04:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 03:04:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 03:04:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['backups', 'default.rgw.log', '.mgr', 'images', 'cephfs.cephfs.meta', 'default.rgw.control', 'vms', '.rgw.root', 'cephfs.cephfs.data', 'default.rgw.meta', 'volumes']
Oct 11 03:04:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 03:04:56 compute-0 nova_compute[356901]: 2025-10-11 03:04:56.891 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:04:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:04:57 compute-0 ceph-mon[191930]: pgmap v2624: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 7.0 KiB/s rd, 0 B/s wr, 11 op/s
Oct 11 03:04:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 03:04:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 03:04:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 03:04:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 03:04:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 03:04:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 03:04:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 03:04:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 03:04:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 03:04:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 03:04:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2625: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 18 KiB/s rd, 0 B/s wr, 30 op/s
Oct 11 03:04:58 compute-0 podman[497933]: 2025-10-11 03:04:58.206673012 +0000 UTC m=+0.100778981 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.29.0, com.redhat.component=ubi9-container, vcs-type=git, version=9.4, build-date=2024-09-18T21:23:30, io.openshift.tags=base rhel9, release-0.7.12=, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, container_name=kepler, release=1214.1726694543, io.k8s.display-name=Red Hat Universal Base Image 9, io.openshift.expose-services=, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_id=edpm, managed_by=edpm_ansible, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, name=ubi9, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Red Hat, Inc., vendor=Red Hat, Inc.)
Oct 11 03:04:58 compute-0 nova_compute[356901]: 2025-10-11 03:04:58.232 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:04:59 compute-0 ceph-mon[191930]: pgmap v2625: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 18 KiB/s rd, 0 B/s wr, 30 op/s
Oct 11 03:04:59 compute-0 podman[157119]: time="2025-10-11T03:04:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 03:04:59 compute-0 podman[157119]: @ - - [11/Oct/2025:03:04:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 03:04:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2626: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 03:04:59 compute-0 podman[157119]: @ - - [11/Oct/2025:03:04:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9114 "" "Go-http-client/1.1"
Oct 11 03:05:00 compute-0 ceph-mon[191930]: pgmap v2626: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 03:05:01 compute-0 nova_compute[356901]: 2025-10-11 03:05:01.047 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:01 compute-0 openstack_network_exporter[374316]: ERROR   03:05:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:05:01 compute-0 openstack_network_exporter[374316]: ERROR   03:05:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:05:01 compute-0 openstack_network_exporter[374316]: ERROR   03:05:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 03:05:01 compute-0 openstack_network_exporter[374316]: ERROR   03:05:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 03:05:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:05:01 compute-0 openstack_network_exporter[374316]: ERROR   03:05:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 03:05:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:05:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2627: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 03:05:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:05:02 compute-0 podman[497953]: 2025-10-11 03:05:02.214465255 +0000 UTC m=+0.104927424 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 03:05:02 compute-0 podman[497956]: 2025-10-11 03:05:02.243288397 +0000 UTC m=+0.119805098 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, container_name=ovn_metadata_agent, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.build-date=20251009, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=ovn_metadata_agent, io.buildah.version=1.41.3)
Oct 11 03:05:02 compute-0 podman[497955]: 2025-10-11 03:05:02.246199608 +0000 UTC m=+0.117030556 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, container_name=ceilometer_agent_compute, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 10 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, io.buildah.version=1.41.4, org.label-schema.license=GPLv2, config_id=edpm, tcib_build_tag=d674bdc5502e72c153d04cef014162b0, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251007, org.label-schema.vendor=CentOS)
Oct 11 03:05:02 compute-0 podman[497954]: 2025-10-11 03:05:02.308970657 +0000 UTC m=+0.186922888 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 03:05:02 compute-0 ceph-mon[191930]: pgmap v2627: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 03:05:03 compute-0 nova_compute[356901]: 2025-10-11 03:05:03.235 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2628: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 03:05:04 compute-0 ceph-mon[191930]: pgmap v2628: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 36 KiB/s rd, 0 B/s wr, 59 op/s
Oct 11 03:05:05 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2629: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 29 KiB/s rd, 0 B/s wr, 47 op/s
Oct 11 03:05:06 compute-0 nova_compute[356901]: 2025-10-11 03:05:06.050 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:06 compute-0 ceph-mon[191930]: pgmap v2629: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 29 KiB/s rd, 0 B/s wr, 47 op/s
Oct 11 03:05:06 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] _maybe_adjust
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0009191400908380543 of space, bias 1.0, pg target 0.2757420272514163 quantized to 32 (current 32)
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 03:05:07 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2630: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 29 KiB/s rd, 0 B/s wr, 47 op/s
Oct 11 03:05:08 compute-0 nova_compute[356901]: 2025-10-11 03:05:08.241 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:08 compute-0 ceph-mon[191930]: pgmap v2630: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 29 KiB/s rd, 0 B/s wr, 47 op/s
Oct 11 03:05:09 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2631: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 17 KiB/s rd, 0 B/s wr, 29 op/s
Oct 11 03:05:10 compute-0 ceph-mon[191930]: pgmap v2631: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail; 17 KiB/s rd, 0 B/s wr, 29 op/s
Oct 11 03:05:11 compute-0 nova_compute[356901]: 2025-10-11 03:05:11.053 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:11 compute-0 sudo[498035]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:05:11 compute-0 sudo[498035]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:05:11 compute-0 sudo[498035]: pam_unix(sudo:session): session closed for user root
Oct 11 03:05:11 compute-0 sudo[498060]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:05:11 compute-0 sudo[498060]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:05:11 compute-0 sudo[498060]: pam_unix(sudo:session): session closed for user root
Oct 11 03:05:11 compute-0 sudo[498095]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:05:11 compute-0 sudo[498095]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:05:11 compute-0 sudo[498095]: pam_unix(sudo:session): session closed for user root
Oct 11 03:05:11 compute-0 podman[498085]: 2025-10-11 03:05:11.537920994 +0000 UTC m=+0.139072241 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, managed_by=edpm_ansible, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, config_id=iscsid, container_name=iscsid, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 03:05:11 compute-0 podman[498084]: 2025-10-11 03:05:11.547127854 +0000 UTC m=+0.153781555 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=multipathd, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, config_id=multipathd, managed_by=edpm_ansible)
Oct 11 03:05:11 compute-0 sudo[498149]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Oct 11 03:05:11 compute-0 sudo[498149]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:05:11 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2632: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:11 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:05:12 compute-0 sudo[498149]: pam_unix(sudo:session): session closed for user root
Oct 11 03:05:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 03:05:12 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:05:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.admin"} v 0) v1
Oct 11 03:05:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 03:05:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/osd_remove_queue}] v 0) v1
Oct 11 03:05:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:05:12 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 0a6218ff-b684-4ffa-973e-5fedc775779a does not exist
Oct 11 03:05:12 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 86b7fd60-1e63-47b9-82d9-a403805c5be4 does not exist
Oct 11 03:05:12 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 328ab60b-3207-47f5-91bb-4801e74382d6 does not exist
Oct 11 03:05:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "states": ["destroyed"], "format": "json"} v 0) v1
Oct 11 03:05:12 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 03:05:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "auth get", "entity": "client.bootstrap-osd"} v 0) v1
Oct 11 03:05:12 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 03:05:12 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 03:05:12 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:05:12 compute-0 sudo[498205]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:05:12 compute-0 sudo[498205]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:05:12 compute-0 sudo[498205]: pam_unix(sudo:session): session closed for user root
Oct 11 03:05:12 compute-0 sudo[498230]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:05:12 compute-0 sudo[498230]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:05:12 compute-0 sudo[498230]: pam_unix(sudo:session): session closed for user root
Oct 11 03:05:12 compute-0 sudo[498255]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:05:12 compute-0 sudo[498255]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:05:12 compute-0 sudo[498255]: pam_unix(sudo:session): session closed for user root
Oct 11 03:05:12 compute-0 ceph-mon[191930]: pgmap v2632: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:05:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch
Oct 11 03:05:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:05:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch
Oct 11 03:05:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch
Oct 11 03:05:12 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:05:13 compute-0 sudo[498280]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --yes --no-systemd
Oct 11 03:05:13 compute-0 sudo[498280]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:05:13 compute-0 nova_compute[356901]: 2025-10-11 03:05:13.247 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:13 compute-0 podman[498345]: 2025-10-11 03:05:13.652557671 +0000 UTC m=+0.110570188 container create c60feef64ee6df6027987cd20d22e8c6f4dd632db7d75ac393632b263d74851a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=frosty_vaughan, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 03:05:13 compute-0 podman[498345]: 2025-10-11 03:05:13.604000378 +0000 UTC m=+0.062012995 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:05:13 compute-0 systemd[1]: Started libpod-conmon-c60feef64ee6df6027987cd20d22e8c6f4dd632db7d75ac393632b263d74851a.scope.
Oct 11 03:05:13 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2633: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:13 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:05:13 compute-0 podman[498345]: 2025-10-11 03:05:13.833480477 +0000 UTC m=+0.291493004 container init c60feef64ee6df6027987cd20d22e8c6f4dd632db7d75ac393632b263d74851a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=frosty_vaughan, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 03:05:13 compute-0 podman[498345]: 2025-10-11 03:05:13.844207747 +0000 UTC m=+0.302220274 container start c60feef64ee6df6027987cd20d22e8c6f4dd632db7d75ac393632b263d74851a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=frosty_vaughan, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, CEPH_REF=reef)
Oct 11 03:05:13 compute-0 podman[498345]: 2025-10-11 03:05:13.850541586 +0000 UTC m=+0.308554103 container attach c60feef64ee6df6027987cd20d22e8c6f4dd632db7d75ac393632b263d74851a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=frosty_vaughan, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 03:05:13 compute-0 frosty_vaughan[498361]: 167 167
Oct 11 03:05:13 compute-0 systemd[1]: libpod-c60feef64ee6df6027987cd20d22e8c6f4dd632db7d75ac393632b263d74851a.scope: Deactivated successfully.
Oct 11 03:05:13 compute-0 conmon[498361]: conmon c60feef64ee6df602798 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-c60feef64ee6df6027987cd20d22e8c6f4dd632db7d75ac393632b263d74851a.scope/container/memory.events
Oct 11 03:05:13 compute-0 podman[498345]: 2025-10-11 03:05:13.857041743 +0000 UTC m=+0.315054260 container died c60feef64ee6df6027987cd20d22e8c6f4dd632db7d75ac393632b263d74851a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=frosty_vaughan, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3)
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.879 14 DEBUG ceilometer.polling.manager [-] The number of pollsters in source [pollsters] is bigger than the number of worker threads to execute them. Therefore, one can expect the process to be longer than the expected. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:253
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.880 14 DEBUG ceilometer.polling.manager [-] Processing pollsters for [pollsters] with [1] threads. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:262
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.880 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.882 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesPollster object at 0x7f5b2e0db800>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.883 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.883 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.884 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.885 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.885 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.885 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.885 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.886 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.886 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.887 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.887 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.887 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd40>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.887 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.888 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.888 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.888 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.888 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.888 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.889 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.889 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.890 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.891 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.891 14 DEBUG ceilometer.polling.manager [-] Registering pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbfe0>] from source [pollsters] to be executed via executor [<concurrent.futures.thread.ThreadPoolExecutor object at 0x7f5b2df21190>] with cache [{}], pollster history [{}], and discovery cache [{}]. register_pollster_execution /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:276
Oct 11 03:05:13 compute-0 systemd[1]: var-lib-containers-storage-overlay-69a81dee430eadead3a8307a39b1eb54c111e6d5aff0ee8b22b5586966c5e94b-merged.mount: Deactivated successfully.
Oct 11 03:05:13 compute-0 podman[498345]: 2025-10-11 03:05:13.94459442 +0000 UTC m=+0.402606937 container remove c60feef64ee6df6027987cd20d22e8c6f4dd632db7d75ac393632b263d74851a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=frosty_vaughan, org.label-schema.license=GPLv2, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 03:05:13 compute-0 systemd[1]: libpod-conmon-c60feef64ee6df6027987cd20d22e8c6f4dd632db7d75ac393632b263d74851a.scope: Deactivated successfully.
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.992 14 DEBUG ceilometer.compute.discovery [-] instance data: {'id': '0cc56d17-ec3a-4408-bccb-91b29427379e', 'name': 'test_0', 'flavor': {'id': '486e1451-345c-45d6-b075-f4717e759025', 'name': 'm1.small', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 1, 'swap': 0}, 'image': {'id': 'a7e4c8c4-d723-4c5b-b8f5-5568fde4bdb7'}, 'os_type': 'hvm', 'architecture': 'x86_64', 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', 'OS-EXT-SRV-ATTR:host': 'compute-0.ctlplane.example.com', 'OS-EXT-STS:vm_state': 'running', 'tenant_id': '97026531b3404a11869cb85a059c4a0d', 'user_id': 'd215f3ebbc07435493ccd666fc80109d', 'hostId': '2b4fa4499c2c57b15bb8caf80f2914a25f73987c47d26eab431cb736', 'status': 'active', 'metadata': {}} discover_libvirt_polling /usr/lib/python3.12/site-packages/ceilometer/compute/discovery.py:315
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.993 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes in the context of pollsters
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.993 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.993 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db830>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.993 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:13 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:13.994 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes (2025-10-11T03:05:13.993499) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.000 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes volume: 2856 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.001 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.001 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingPacketsPollster object at 0x7f5b2e168080>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.002 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.002 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.002 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1680b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.002 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.003 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets volume: 25 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.003 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets (2025-10-11T03:05:14.002876) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.003 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.004 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingDropPollster object at 0x7f5b2e168110>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.004 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.004 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.004 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e168140>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.005 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.005 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.005 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.drop (2025-10-11T03:05:14.005044) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.005 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.drop in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.006 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingErrorsPollster object at 0x7f5b2e1681a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.006 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.006 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.006 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1681d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.007 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.007 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.007 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.packets.error (2025-10-11T03:05:14.007100) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.007 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.packets.error in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.008 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceCapacityPollster object at 0x7f5b2e0d97f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.008 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.capacity in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.008 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.008 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db290>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.009 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.capacity heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.009 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.capacity (2025-10-11T03:05:14.009138) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.054 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.056 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.057 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.capacity volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.058 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.capacity in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.058 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadBytesPollster object at 0x7f5b2e0db1d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.059 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.059 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.059 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db2c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.059 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.061 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.bytes (2025-10-11T03:05:14.059765) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.128 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 23308800 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.128 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 3227648 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.129 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.bytes volume: 274786 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.129 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.bytes in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.129 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskReadLatencyPollster object at 0x7f5b2e0db2f0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.129 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.latency in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.129 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.130 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db320>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.130 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.130 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 1873916781 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.130 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 320672168 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.131 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.latency volume: 185829476 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.131 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.latency in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.131 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceReadRequestsPollster object at 0x7f5b2e0db350>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.131 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.read.requests in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.131 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.131 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.latency (2025-10-11T03:05:14.130096) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.131 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db380>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.131 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.read.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.131 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 840 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.132 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 173 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.132 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.read.requests volume: 109 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.132 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.read.requests in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.132 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDevicePhysicalPollster object at 0x7f5b2e0db3b0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.133 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.usage in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.133 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.133 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db3e0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.133 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.133 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.133 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.read.requests (2025-10-11T03:05:14.131795) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.133 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.133 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.usage volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.134 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.usage in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.134 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.usage (2025-10-11T03:05:14.133396) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.134 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteBytesPollster object at 0x7f5b2e0db410>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.134 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.134 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.134 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db440>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.134 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.134 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 41771008 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.135 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 512 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.135 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.bytes (2025-10-11T03:05:14.134849) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.135 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.bytes volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.135 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.bytes in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.136 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceDiskWriteLatencyPollster object at 0x7f5b2e0db470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.136 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.latency in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.136 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.136 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db4a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.136 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.latency heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.136 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 6143855966 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.136 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 32496400 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.136 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.latency volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.137 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.latency in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.137 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.PowerStatePollster object at 0x7f5b2e168470>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.137 14 INFO ceilometer.polling.manager [-] Polling pollster power.state in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.137 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.137 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e1684a0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.137 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: power.state heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.138 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.latency (2025-10-11T03:05:14.136412) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.138 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for power.state (2025-10-11T03:05:14.137771) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.168 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/power.state volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.169 14 INFO ceilometer.polling.manager [-] Finished polling pollster power.state in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.170 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceWriteRequestsPollster object at 0x7f5b2e0db4d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.170 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.write.requests in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.170 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.170 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db500>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.170 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.write.requests heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.170 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 233 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.170 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 1 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.170 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.write.requests volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.171 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.write.requests in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.171 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesDeltaPollster object at 0x7f5b2e0db860>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.171 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.171 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.171 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbd10>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.171 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.171 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.172 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.bytes.delta in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.172 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingBytesRatePollster object at 0x7f5b2e0dbb00>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.172 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.172 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.EphemeralSizePollster object at 0x7f5b2e0db530>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.172 14 INFO ceilometer.polling.manager [-] Polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.172 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.172 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db560>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.172 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.ephemeral.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.172 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.ephemeral.size in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.173 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingPacketsPollster object at 0x7f5b2e0dbd70>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.173 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.173 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.173 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbda0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.173 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.173 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets volume: 33 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.173 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.173 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesDeltaPollster object at 0x7f5b2e0dbf50>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.173 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.173 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.174 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b320115b0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.174 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes.delta heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.174 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes.delta volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.174 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes.delta in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.174 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.RootSizePollster object at 0x7f5b2e0db590>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.174 14 INFO ceilometer.polling.manager [-] Polling pollster disk.root.size in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.174 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.174 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db5c0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.174 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.root.size heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.175 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.root.size in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.175 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingDropPollster object at 0x7f5b2e0dbdd0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.175 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.175 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.175 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe00>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.175 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.drop heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.175 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.drop volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.175 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.drop in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.176 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.disk.PerDeviceAllocationPollster object at 0x7f5b3207d9d0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.176 14 INFO ceilometer.polling.manager [-] Polling pollster disk.device.allocation in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.176 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.176 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31b21e20>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.176 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: disk.device.allocation heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.176 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.176 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 1073741824 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.177 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/disk.device.allocation volume: 485376 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.177 14 INFO ceilometer.polling.manager [-] Finished polling pollster disk.device.allocation in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.177 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.write.requests (2025-10-11T03:05:14.170413) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.177 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.IncomingErrorsPollster object at 0x7f5b2e0dbe30>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.177 14 INFO ceilometer.polling.manager [-] Polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.178 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.177 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.bytes.delta (2025-10-11T03:05:14.171859) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.178 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbe60>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.178 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.incoming.packets.error heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.178 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.incoming.packets.error volume: 0 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.178 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.ephemeral.size (2025-10-11T03:05:14.172761) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.178 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets (2025-10-11T03:05:14.173355) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.178 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.incoming.packets.error in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.178 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes.delta (2025-10-11T03:05:14.174109) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.178 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.CPUPollster object at 0x7f5b2e0d97c0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.178 14 INFO ceilometer.polling.manager [-] Polling pollster cpu in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.178 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.root.size (2025-10-11T03:05:14.174824) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.178 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.178 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b31a94ef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.178 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.drop (2025-10-11T03:05:14.175578) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.178 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for disk.device.allocation (2025-10-11T03:05:14.176505) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.178 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: cpu heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.179 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/cpu volume: 76110000000 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.179 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.incoming.packets.error (2025-10-11T03:05:14.178131) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.179 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for cpu (2025-10-11T03:05:14.178901) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.179 14 INFO ceilometer.polling.manager [-] Finished polling pollster cpu in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.179 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesPollster object at 0x7f5b2e0dbec0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.179 14 INFO ceilometer.polling.manager [-] Polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.179 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.179 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0dbef0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.179 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: network.outgoing.bytes heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.179 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/network.outgoing.bytes volume: 2482 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.180 14 INFO ceilometer.polling.manager [-] Finished polling pollster network.outgoing.bytes in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.180 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.instance_stats.MemoryUsagePollster object at 0x7f5b2e0db7a0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.180 14 INFO ceilometer.polling.manager [-] Polling pollster memory.usage in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.180 14 DEBUG ceilometer.polling.manager [-] Checking if we need coordination for pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] with coordination group name [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:333
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.180 14 DEBUG ceilometer.polling.manager [-] The pollster [<stevedore.extension.Extension object at 0x7f5b2e0db7d0>] is not configured in a source for polling that requires coordination. The current hashrings are the following [None]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:355
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.180 14 DEBUG ceilometer.polling.manager [-] Polster heartbeat update: memory.usage heartbeat /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:636
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.180 14 DEBUG ceilometer.compute.pollsters [-] 0cc56d17-ec3a-4408-bccb-91b29427379e/memory.usage volume: 48.80859375 _stats_to_sample /usr/lib/python3.12/site-packages/ceilometer/compute/pollsters/__init__.py:108
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.181 14 INFO ceilometer.polling.manager [-] Finished polling pollster memory.usage in the context of pollsters
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.181 14 DEBUG ceilometer.polling.manager [-] Executing discovery process for pollsters [<ceilometer.compute.pollsters.net.OutgoingBytesRatePollster object at 0x7f5b2e0dbfb0>] and discovery method [local_instances] via process [<bound method AgentManager.discover of <ceilometer.polling.manager.AgentManager object at 0x7f5b2f287b30>>]. _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:294
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.181 14 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes.rate, no new resources found this cycle _internal_pollster_run /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:321
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.181 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.182 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.182 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for network.outgoing.bytes (2025-10-11T03:05:14.179815) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.182 12 DEBUG ceilometer.polling.manager [-] Updated heartbeat for memory.usage (2025-10-11T03:05:14.180702) _update_status /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:502
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.182 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.182 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.183 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.capacity]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.183 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.183 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.183 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.read.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.183 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.184 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.184 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.latency]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.185 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [power.state]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.185 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.write.requests]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.186 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.187 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.187 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.ephemeral.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.187 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.187 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.delta]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.187 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.root.size]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.187 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.drop]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.187 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [disk.device.allocation]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.188 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.incoming.packets.error]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.188 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [cpu]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.188 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.188 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [memory.usage]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 ceilometer_agent_compute[367788]: 2025-10-11 03:05:14.188 14 DEBUG ceilometer.polling.manager [-] Finished processing pollster [network.outgoing.bytes.rate]. execute_polling_task_processing /usr/lib/python3.12/site-packages/ceilometer/polling/manager.py:272
Oct 11 03:05:14 compute-0 podman[498385]: 2025-10-11 03:05:14.199318734 +0000 UTC m=+0.081200980 container create 489d65d54d199e591649f012f11078d23c0fe4da07120493bcf7c564206a66df (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_hodgkin, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_REF=reef, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20250507)
Oct 11 03:05:14 compute-0 podman[498385]: 2025-10-11 03:05:14.179529384 +0000 UTC m=+0.061411640 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:05:14 compute-0 systemd[1]: Started libpod-conmon-489d65d54d199e591649f012f11078d23c0fe4da07120493bcf7c564206a66df.scope.
Oct 11 03:05:14 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:05:14 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1be9b8b89602f2d7ad5514b91abf0b6acde315913ae42ae4248ed970d071331d/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 03:05:14 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1be9b8b89602f2d7ad5514b91abf0b6acde315913ae42ae4248ed970d071331d/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 03:05:14 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1be9b8b89602f2d7ad5514b91abf0b6acde315913ae42ae4248ed970d071331d/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 03:05:14 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1be9b8b89602f2d7ad5514b91abf0b6acde315913ae42ae4248ed970d071331d/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 03:05:14 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/1be9b8b89602f2d7ad5514b91abf0b6acde315913ae42ae4248ed970d071331d/merged/var/lib/ceph/bootstrap-osd/ceph.keyring supports timestamps until 2038 (0x7fffffff)
Oct 11 03:05:14 compute-0 podman[498385]: 2025-10-11 03:05:14.37426916 +0000 UTC m=+0.256151436 container init 489d65d54d199e591649f012f11078d23c0fe4da07120493bcf7c564206a66df (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_hodgkin, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 03:05:14 compute-0 podman[498385]: 2025-10-11 03:05:14.396394055 +0000 UTC m=+0.278276351 container start 489d65d54d199e591649f012f11078d23c0fe4da07120493bcf7c564206a66df (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_hodgkin, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Oct 11 03:05:14 compute-0 podman[498385]: 2025-10-11 03:05:14.403364159 +0000 UTC m=+0.285246445 container attach 489d65d54d199e591649f012f11078d23c0fe4da07120493bcf7c564206a66df (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_hodgkin, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.build-date=20250507)
Oct 11 03:05:14 compute-0 ceph-mon[191930]: pgmap v2633: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:15 compute-0 sshd-session[498420]: Accepted publickey for zuul from 192.168.122.10 port 36360 ssh2: ECDSA SHA256:0vbtJs26HLL/qqa+KGG1tXbm5W4qBuoqkhT/swdtB6w
Oct 11 03:05:15 compute-0 systemd-logind[804]: New session 68 of user zuul.
Oct 11 03:05:15 compute-0 systemd[1]: Started Session 68 of User zuul.
Oct 11 03:05:15 compute-0 sshd-session[498420]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Oct 11 03:05:15 compute-0 intelligent_hodgkin[498401]: --> passed data devices: 0 physical, 3 LVM
Oct 11 03:05:15 compute-0 intelligent_hodgkin[498401]: --> relative data size: 1.0
Oct 11 03:05:15 compute-0 intelligent_hodgkin[498401]: --> All data devices are unavailable
Oct 11 03:05:15 compute-0 systemd[1]: libpod-489d65d54d199e591649f012f11078d23c0fe4da07120493bcf7c564206a66df.scope: Deactivated successfully.
Oct 11 03:05:15 compute-0 systemd[1]: libpod-489d65d54d199e591649f012f11078d23c0fe4da07120493bcf7c564206a66df.scope: Consumed 1.254s CPU time.
Oct 11 03:05:15 compute-0 podman[498385]: 2025-10-11 03:05:15.705459992 +0000 UTC m=+1.587342248 container died 489d65d54d199e591649f012f11078d23c0fe4da07120493bcf7c564206a66df (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_hodgkin, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef)
Oct 11 03:05:15 compute-0 sudo[498432]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt'
Oct 11 03:05:15 compute-0 systemd[1]: var-lib-containers-storage-overlay-1be9b8b89602f2d7ad5514b91abf0b6acde315913ae42ae4248ed970d071331d-merged.mount: Deactivated successfully.
Oct 11 03:05:15 compute-0 sudo[498432]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 03:05:15 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2634: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:15 compute-0 podman[498385]: 2025-10-11 03:05:15.797649879 +0000 UTC m=+1.679532135 container remove 489d65d54d199e591649f012f11078d23c0fe4da07120493bcf7c564206a66df (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=intelligent_hodgkin, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=reef, OSD_FLAVOR=default)
Oct 11 03:05:15 compute-0 systemd[1]: libpod-conmon-489d65d54d199e591649f012f11078d23c0fe4da07120493bcf7c564206a66df.scope: Deactivated successfully.
Oct 11 03:05:15 compute-0 sudo[498280]: pam_unix(sudo:session): session closed for user root
Oct 11 03:05:15 compute-0 sudo[498472]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:05:15 compute-0 sudo[498472]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:05:15 compute-0 sudo[498472]: pam_unix(sudo:session): session closed for user root
Oct 11 03:05:16 compute-0 nova_compute[356901]: 2025-10-11 03:05:16.055 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:16 compute-0 sudo[498500]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:05:16 compute-0 sudo[498500]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:05:16 compute-0 sudo[498500]: pam_unix(sudo:session): session closed for user root
Oct 11 03:05:16 compute-0 ceph-mon[191930]: pgmap v2634: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:16 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:05:17 compute-0 sudo[498528]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:05:17 compute-0 sudo[498528]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:05:17 compute-0 sudo[498528]: pam_unix(sudo:session): session closed for user root
Oct 11 03:05:17 compute-0 sudo[498568]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- lvm list --format json
Oct 11 03:05:17 compute-0 sudo[498568]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:05:17 compute-0 podman[498664]: 2025-10-11 03:05:17.769263581 +0000 UTC m=+0.064192861 container create 59ff37b98172fe49ede14fc26056f9c8f068eedb648c09c9c4fc878669e58927 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_jemison, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 03:05:17 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2635: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:17 compute-0 systemd[1]: Started libpod-conmon-59ff37b98172fe49ede14fc26056f9c8f068eedb648c09c9c4fc878669e58927.scope.
Oct 11 03:05:17 compute-0 podman[498664]: 2025-10-11 03:05:17.742484396 +0000 UTC m=+0.037413726 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:05:17 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:05:17 compute-0 podman[498664]: 2025-10-11 03:05:17.902190331 +0000 UTC m=+0.197119651 container init 59ff37b98172fe49ede14fc26056f9c8f068eedb648c09c9c4fc878669e58927 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_jemison, io.buildah.version=1.39.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, ceph=True, CEPH_REF=reef)
Oct 11 03:05:17 compute-0 podman[498664]: 2025-10-11 03:05:17.915292936 +0000 UTC m=+0.210222236 container start 59ff37b98172fe49ede14fc26056f9c8f068eedb648c09c9c4fc878669e58927 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_jemison, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Oct 11 03:05:17 compute-0 podman[498664]: 2025-10-11 03:05:17.921222697 +0000 UTC m=+0.216151987 container attach 59ff37b98172fe49ede14fc26056f9c8f068eedb648c09c9c4fc878669e58927 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_jemison, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 03:05:17 compute-0 suspicious_jemison[498697]: 167 167
Oct 11 03:05:17 compute-0 systemd[1]: libpod-59ff37b98172fe49ede14fc26056f9c8f068eedb648c09c9c4fc878669e58927.scope: Deactivated successfully.
Oct 11 03:05:17 compute-0 conmon[498697]: conmon 59ff37b98172fe49ede1 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-59ff37b98172fe49ede14fc26056f9c8f068eedb648c09c9c4fc878669e58927.scope/container/memory.events
Oct 11 03:05:17 compute-0 podman[498664]: 2025-10-11 03:05:17.927523236 +0000 UTC m=+0.222452526 container died 59ff37b98172fe49ede14fc26056f9c8f068eedb648c09c9c4fc878669e58927 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_jemison, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, OSD_FLAVOR=default, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 03:05:17 compute-0 systemd[1]: var-lib-containers-storage-overlay-501a772504eeec4a9846ac3a7eafbb5f5e3f771216c28c5d7e5fba2062abfb14-merged.mount: Deactivated successfully.
Oct 11 03:05:17 compute-0 podman[498664]: 2025-10-11 03:05:17.993019547 +0000 UTC m=+0.287948877 container remove 59ff37b98172fe49ede14fc26056f9c8f068eedb648c09c9c4fc878669e58927 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=suspicious_jemison, CEPH_REF=reef, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0)
Oct 11 03:05:18 compute-0 systemd[1]: libpod-conmon-59ff37b98172fe49ede14fc26056f9c8f068eedb648c09c9c4fc878669e58927.scope: Deactivated successfully.
Oct 11 03:05:18 compute-0 nova_compute[356901]: 2025-10-11 03:05:18.254 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:18 compute-0 podman[498729]: 2025-10-11 03:05:18.289802346 +0000 UTC m=+0.078875575 container create 472ba46350cd6a2d24693d2169f5a875ab7120a26418a613aae82ecbb572d89b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_nash, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20250507)
Oct 11 03:05:18 compute-0 systemd[1]: Started libpod-conmon-472ba46350cd6a2d24693d2169f5a875ab7120a26418a613aae82ecbb572d89b.scope.
Oct 11 03:05:18 compute-0 podman[498729]: 2025-10-11 03:05:18.258081183 +0000 UTC m=+0.047154432 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:05:18 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:05:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/157241a86884caa17868dfce84c6d1f8531441b27e739bbe90bca21c86789a82/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 03:05:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/157241a86884caa17868dfce84c6d1f8531441b27e739bbe90bca21c86789a82/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 03:05:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/157241a86884caa17868dfce84c6d1f8531441b27e739bbe90bca21c86789a82/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 03:05:18 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/157241a86884caa17868dfce84c6d1f8531441b27e739bbe90bca21c86789a82/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 03:05:18 compute-0 podman[498729]: 2025-10-11 03:05:18.41752858 +0000 UTC m=+0.206601819 container init 472ba46350cd6a2d24693d2169f5a875ab7120a26418a613aae82ecbb572d89b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_nash, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, CEPH_REF=reef)
Oct 11 03:05:18 compute-0 podman[498729]: 2025-10-11 03:05:18.433739674 +0000 UTC m=+0.222812893 container start 472ba46350cd6a2d24693d2169f5a875ab7120a26418a613aae82ecbb572d89b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_nash, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, ceph=True, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)
Oct 11 03:05:18 compute-0 podman[498729]: 2025-10-11 03:05:18.438324304 +0000 UTC m=+0.227397523 container attach 472ba46350cd6a2d24693d2169f5a875ab7120a26418a613aae82ecbb572d89b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_nash, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2)
Oct 11 03:05:18 compute-0 podman[498744]: 2025-10-11 03:05:18.468965914 +0000 UTC m=+0.115130348 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, architecture=x86_64, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-type=git, name=ubi9-minimal, url=https://catalog.redhat.com/en/search?searchType=containers, vendor=Red Hat, Inc., build-date=2025-08-20T13:12:41, io.openshift.expose-services=, managed_by=edpm_ansible, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, config_id=edpm, version=9.6, io.buildah.version=1.33.7, com.redhat.component=ubi9-minimal-container, release=1755695350, container_name=openstack_network_exporter, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.)
Oct 11 03:05:18 compute-0 podman[498745]: 2025-10-11 03:05:18.488771514 +0000 UTC m=+0.137301363 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']})
Oct 11 03:05:18 compute-0 podman[498743]: 2025-10-11 03:05:18.4970502 +0000 UTC m=+0.142466209 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']}, container_name=ceilometer_agent_ipmi, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, config_id=edpm, managed_by=edpm_ansible, org.label-schema.build-date=20251009)
Oct 11 03:05:18 compute-0 ceph-mon[191930]: pgmap v2635: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:19 compute-0 funny_nash[498760]: {
Oct 11 03:05:19 compute-0 funny_nash[498760]:     "0": [
Oct 11 03:05:19 compute-0 funny_nash[498760]:         {
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "devices": [
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "/dev/loop3"
Oct 11 03:05:19 compute-0 funny_nash[498760]:             ],
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "lv_name": "ceph_lv0",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "lv_path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "lv_size": "21470642176",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "lv_tags": "ceph.block_device=/dev/ceph_vg0/ceph_lv0,ceph.block_uuid=rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a9c7940d-c154-46ef-9c18-8ba55dddd3d6,ceph.osd_id=0,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "lv_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "name": "ceph_lv0",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "path": "/dev/ceph_vg0/ceph_lv0",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "tags": {
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.block_device": "/dev/ceph_vg0/ceph_lv0",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.block_uuid": "rTODFn-h40M-YJfM-rluY-oarn-xriK-MMzByw",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.cluster_name": "ceph",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.crush_device_class": "",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.encrypted": "0",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.osd_fsid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.osd_id": "0",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.type": "block",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.vdo": "0"
Oct 11 03:05:19 compute-0 funny_nash[498760]:             },
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "type": "block",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "vg_name": "ceph_vg0"
Oct 11 03:05:19 compute-0 funny_nash[498760]:         }
Oct 11 03:05:19 compute-0 funny_nash[498760]:     ],
Oct 11 03:05:19 compute-0 funny_nash[498760]:     "1": [
Oct 11 03:05:19 compute-0 funny_nash[498760]:         {
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "devices": [
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "/dev/loop4"
Oct 11 03:05:19 compute-0 funny_nash[498760]:             ],
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "lv_name": "ceph_lv1",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "lv_path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "lv_size": "21470642176",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "lv_tags": "ceph.block_device=/dev/ceph_vg1/ceph_lv1,ceph.block_uuid=4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=6af45214-b1a1-4565-9175-30c80d9ec207,ceph.osd_id=1,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "lv_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "name": "ceph_lv1",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "path": "/dev/ceph_vg1/ceph_lv1",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "tags": {
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.block_device": "/dev/ceph_vg1/ceph_lv1",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.block_uuid": "4xZCE9-KurM-jT9v-1MW0-2F9l-dk1h-ypuwSp",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.cluster_name": "ceph",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.crush_device_class": "",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.encrypted": "0",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.osd_fsid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.osd_id": "1",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.type": "block",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.vdo": "0"
Oct 11 03:05:19 compute-0 funny_nash[498760]:             },
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "type": "block",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "vg_name": "ceph_vg1"
Oct 11 03:05:19 compute-0 funny_nash[498760]:         }
Oct 11 03:05:19 compute-0 funny_nash[498760]:     ],
Oct 11 03:05:19 compute-0 funny_nash[498760]:     "2": [
Oct 11 03:05:19 compute-0 funny_nash[498760]:         {
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "devices": [
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "/dev/loop5"
Oct 11 03:05:19 compute-0 funny_nash[498760]:             ],
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "lv_name": "ceph_lv2",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "lv_path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "lv_size": "21470642176",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "lv_tags": "ceph.block_device=/dev/ceph_vg2/ceph_lv2,ceph.block_uuid=jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=3c7617c3-7a20-523e-a9de-20c0d6ba41da,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=8fabd243-1f3b-4c55-a0cd-bf4f8313cb83,ceph.osd_id=2,ceph.osdspec_affinity=default_drive_group,ceph.type=block,ceph.vdo=0",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "lv_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "name": "ceph_lv2",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "path": "/dev/ceph_vg2/ceph_lv2",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "tags": {
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.block_device": "/dev/ceph_vg2/ceph_lv2",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.block_uuid": "jb6sJT-rPTr-6yv2-yoKT-eIzt-y5ve-xmTusX",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.cephx_lockbox_secret": "",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.cluster_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.cluster_name": "ceph",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.crush_device_class": "",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.encrypted": "0",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.osd_fsid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.osd_id": "2",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.osdspec_affinity": "default_drive_group",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.type": "block",
Oct 11 03:05:19 compute-0 funny_nash[498760]:                 "ceph.vdo": "0"
Oct 11 03:05:19 compute-0 funny_nash[498760]:             },
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "type": "block",
Oct 11 03:05:19 compute-0 funny_nash[498760]:             "vg_name": "ceph_vg2"
Oct 11 03:05:19 compute-0 funny_nash[498760]:         }
Oct 11 03:05:19 compute-0 funny_nash[498760]:     ]
Oct 11 03:05:19 compute-0 funny_nash[498760]: }
Oct 11 03:05:19 compute-0 podman[498729]: 2025-10-11 03:05:19.244858258 +0000 UTC m=+1.033931487 container died 472ba46350cd6a2d24693d2169f5a875ab7120a26418a613aae82ecbb572d89b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_nash, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20250507, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 03:05:19 compute-0 systemd[1]: libpod-472ba46350cd6a2d24693d2169f5a875ab7120a26418a613aae82ecbb572d89b.scope: Deactivated successfully.
Oct 11 03:05:19 compute-0 systemd[1]: var-lib-containers-storage-overlay-157241a86884caa17868dfce84c6d1f8531441b27e739bbe90bca21c86789a82-merged.mount: Deactivated successfully.
Oct 11 03:05:19 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15839 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:19 compute-0 podman[498729]: 2025-10-11 03:05:19.35308244 +0000 UTC m=+1.142155669 container remove 472ba46350cd6a2d24693d2169f5a875ab7120a26418a613aae82ecbb572d89b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=funny_nash, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 03:05:19 compute-0 systemd[1]: libpod-conmon-472ba46350cd6a2d24693d2169f5a875ab7120a26418a613aae82ecbb572d89b.scope: Deactivated successfully.
Oct 11 03:05:19 compute-0 sudo[498568]: pam_unix(sudo:session): session closed for user root
Oct 11 03:05:19 compute-0 sudo[498892]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:05:19 compute-0 sudo[498892]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:05:19 compute-0 sudo[498892]: pam_unix(sudo:session): session closed for user root
Oct 11 03:05:19 compute-0 sudo[498921]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:05:19 compute-0 sudo[498921]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:05:19 compute-0 sudo[498921]: pam_unix(sudo:session): session closed for user root
Oct 11 03:05:19 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2636: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:19 compute-0 sudo[498950]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:05:19 compute-0 sudo[498950]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:05:19 compute-0 sudo[498950]: pam_unix(sudo:session): session closed for user root
Oct 11 03:05:19 compute-0 nova_compute[356901]: 2025-10-11 03:05:19.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:05:19 compute-0 sudo[498993]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/3c7617c3-7a20-523e-a9de-20c0d6ba41da/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --image quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0 --timeout 895 ceph-volume --fsid 3c7617c3-7a20-523e-a9de-20c0d6ba41da -- raw list --format json
Oct 11 03:05:19 compute-0 sudo[498993]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:05:19 compute-0 ceph-mon[191930]: from='client.15839 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:20 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15841 -' entity='client.admin' cmd=[{"prefix": "crash ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:20 compute-0 podman[499078]: 2025-10-11 03:05:20.478548148 +0000 UTC m=+0.082710820 container create 51a43f005bb47cb2d447c937aced4df2162ba593b6595aacaf3cbdeebde4dd8e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_kepler, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0)
Oct 11 03:05:20 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status"} v 0) v1
Oct 11 03:05:20 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3104657848' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch
Oct 11 03:05:20 compute-0 podman[499078]: 2025-10-11 03:05:20.44314511 +0000 UTC m=+0.047307842 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:05:20 compute-0 systemd[1]: Started libpod-conmon-51a43f005bb47cb2d447c937aced4df2162ba593b6595aacaf3cbdeebde4dd8e.scope.
Oct 11 03:05:20 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:05:20 compute-0 podman[499078]: 2025-10-11 03:05:20.635502111 +0000 UTC m=+0.239664773 container init 51a43f005bb47cb2d447c937aced4df2162ba593b6595aacaf3cbdeebde4dd8e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_kepler, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef)
Oct 11 03:05:20 compute-0 podman[499078]: 2025-10-11 03:05:20.646507569 +0000 UTC m=+0.250670251 container start 51a43f005bb47cb2d447c937aced4df2162ba593b6595aacaf3cbdeebde4dd8e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_kepler, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3)
Oct 11 03:05:20 compute-0 dazzling_kepler[499096]: 167 167
Oct 11 03:05:20 compute-0 systemd[1]: libpod-51a43f005bb47cb2d447c937aced4df2162ba593b6595aacaf3cbdeebde4dd8e.scope: Deactivated successfully.
Oct 11 03:05:20 compute-0 podman[499078]: 2025-10-11 03:05:20.657624356 +0000 UTC m=+0.261787038 container attach 51a43f005bb47cb2d447c937aced4df2162ba593b6595aacaf3cbdeebde4dd8e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_kepler, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3)
Oct 11 03:05:20 compute-0 podman[499078]: 2025-10-11 03:05:20.658800108 +0000 UTC m=+0.262962780 container died 51a43f005bb47cb2d447c937aced4df2162ba593b6595aacaf3cbdeebde4dd8e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_kepler, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20250507, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3)
Oct 11 03:05:20 compute-0 systemd[1]: var-lib-containers-storage-overlay-ac7415e6a7bec06eb69f1872dcb163cf41d30b7931aac7fcbe489e07911ade12-merged.mount: Deactivated successfully.
Oct 11 03:05:20 compute-0 podman[499078]: 2025-10-11 03:05:20.727480479 +0000 UTC m=+0.331643121 container remove 51a43f005bb47cb2d447c937aced4df2162ba593b6595aacaf3cbdeebde4dd8e (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=dazzling_kepler, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, io.buildah.version=1.39.3, CEPH_REF=reef, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True)
Oct 11 03:05:20 compute-0 systemd[1]: libpod-conmon-51a43f005bb47cb2d447c937aced4df2162ba593b6595aacaf3cbdeebde4dd8e.scope: Deactivated successfully.
Oct 11 03:05:20 compute-0 ceph-mon[191930]: pgmap v2636: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:20 compute-0 ceph-mon[191930]: from='client.15841 -' entity='client.admin' cmd=[{"prefix": "crash ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:20 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3104657848' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch
Oct 11 03:05:21 compute-0 podman[499140]: 2025-10-11 03:05:21.015654874 +0000 UTC m=+0.087078661 container create 65e3be9e9cd5dc6841862b2c5fd3daed99e199d269e5fd338daabe54f189518b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_rubin, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Oct 11 03:05:21 compute-0 nova_compute[356901]: 2025-10-11 03:05:21.057 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:21 compute-0 podman[499140]: 2025-10-11 03:05:20.981835855 +0000 UTC m=+0.053259712 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Oct 11 03:05:21 compute-0 systemd[1]: Started libpod-conmon-65e3be9e9cd5dc6841862b2c5fd3daed99e199d269e5fd338daabe54f189518b.scope.
Oct 11 03:05:21 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:05:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6f9a1239dc6eb40ddbe121ad083347ec4cb893959e33a3f575bce4c9347832b5/merged/rootfs supports timestamps until 2038 (0x7fffffff)
Oct 11 03:05:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6f9a1239dc6eb40ddbe121ad083347ec4cb893959e33a3f575bce4c9347832b5/merged/var/log/ceph supports timestamps until 2038 (0x7fffffff)
Oct 11 03:05:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6f9a1239dc6eb40ddbe121ad083347ec4cb893959e33a3f575bce4c9347832b5/merged/etc/ceph/ceph.conf supports timestamps until 2038 (0x7fffffff)
Oct 11 03:05:21 compute-0 kernel: xfs filesystem being remounted at /var/lib/containers/storage/overlay/6f9a1239dc6eb40ddbe121ad083347ec4cb893959e33a3f575bce4c9347832b5/merged/var/lib/ceph/crash supports timestamps until 2038 (0x7fffffff)
Oct 11 03:05:21 compute-0 podman[499140]: 2025-10-11 03:05:21.153654151 +0000 UTC m=+0.225077968 container init 65e3be9e9cd5dc6841862b2c5fd3daed99e199d269e5fd338daabe54f189518b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_rubin, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, CEPH_REF=reef, io.buildah.version=1.39.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 03:05:21 compute-0 podman[499140]: 2025-10-11 03:05:21.180170068 +0000 UTC m=+0.251593885 container start 65e3be9e9cd5dc6841862b2c5fd3daed99e199d269e5fd338daabe54f189518b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_rubin, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=reef)
Oct 11 03:05:21 compute-0 podman[499140]: 2025-10-11 03:05:21.186678925 +0000 UTC m=+0.258102802 container attach 65e3be9e9cd5dc6841862b2c5fd3daed99e199d269e5fd338daabe54f189518b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_rubin, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9)
Oct 11 03:05:21 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2637: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:21 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:05:22 compute-0 blissful_rubin[499156]: {
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:     "6af45214-b1a1-4565-9175-30c80d9ec207": {
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:         "osd_id": 1,
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:         "osd_uuid": "6af45214-b1a1-4565-9175-30c80d9ec207",
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:         "type": "bluestore"
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:     },
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:     "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83": {
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:         "osd_id": 2,
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:         "osd_uuid": "8fabd243-1f3b-4c55-a0cd-bf4f8313cb83",
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:         "type": "bluestore"
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:     },
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:     "a9c7940d-c154-46ef-9c18-8ba55dddd3d6": {
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:         "ceph_fsid": "3c7617c3-7a20-523e-a9de-20c0d6ba41da",
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:         "osd_id": 0,
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:         "osd_uuid": "a9c7940d-c154-46ef-9c18-8ba55dddd3d6",
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:         "type": "bluestore"
Oct 11 03:05:22 compute-0 blissful_rubin[499156]:     }
Oct 11 03:05:22 compute-0 blissful_rubin[499156]: }
Oct 11 03:05:22 compute-0 systemd[1]: libpod-65e3be9e9cd5dc6841862b2c5fd3daed99e199d269e5fd338daabe54f189518b.scope: Deactivated successfully.
Oct 11 03:05:22 compute-0 systemd[1]: libpod-65e3be9e9cd5dc6841862b2c5fd3daed99e199d269e5fd338daabe54f189518b.scope: Consumed 1.178s CPU time.
Oct 11 03:05:22 compute-0 podman[499140]: 2025-10-11 03:05:22.378988146 +0000 UTC m=+1.450411933 container died 65e3be9e9cd5dc6841862b2c5fd3daed99e199d269e5fd338daabe54f189518b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_rubin, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=reef, org.label-schema.build-date=20250507)
Oct 11 03:05:22 compute-0 systemd[1]: var-lib-containers-storage-overlay-6f9a1239dc6eb40ddbe121ad083347ec4cb893959e33a3f575bce4c9347832b5-merged.mount: Deactivated successfully.
Oct 11 03:05:22 compute-0 podman[499140]: 2025-10-11 03:05:22.458835094 +0000 UTC m=+1.530258871 container remove 65e3be9e9cd5dc6841862b2c5fd3daed99e199d269e5fd338daabe54f189518b (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=blissful_rubin, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3)
Oct 11 03:05:22 compute-0 systemd[1]: libpod-conmon-65e3be9e9cd5dc6841862b2c5fd3daed99e199d269e5fd338daabe54f189518b.scope: Deactivated successfully.
Oct 11 03:05:22 compute-0 sudo[498993]: pam_unix(sudo:session): session closed for user root
Oct 11 03:05:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0.devices.0}] v 0) v1
Oct 11 03:05:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:05:22 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command([{prefix=config-key set, key=mgr/cephadm/host.compute-0}] v 0) v1
Oct 11 03:05:22 compute-0 ceph-mon[191930]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:05:22 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev 256aa2db-b008-47ff-9c71-87da2c692c49 does not exist
Oct 11 03:05:22 compute-0 ceph-mgr[192233]: [progress WARNING root] complete: ev dafcdf2c-4cd2-43a2-a8a1-0dc1d71e3c59 does not exist
Oct 11 03:05:22 compute-0 sudo[499206]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:05:22 compute-0 sudo[499206]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:05:22 compute-0 sudo[499206]: pam_unix(sudo:session): session closed for user root
Oct 11 03:05:22 compute-0 sudo[499231]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 03:05:22 compute-0 sudo[499231]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:05:22 compute-0 sudo[499231]: pam_unix(sudo:session): session closed for user root
Oct 11 03:05:22 compute-0 nova_compute[356901]: 2025-10-11 03:05:22.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:05:22 compute-0 ceph-mon[191930]: pgmap v2637: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:22 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:05:22 compute-0 ceph-mon[191930]: from='mgr.14130 192.168.122.100:0/3963629127' entity='mgr.compute-0.bzgmgr' 
Oct 11 03:05:23 compute-0 nova_compute[356901]: 2025-10-11 03:05:23.260 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:23 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2638: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:24 compute-0 ovs-vsctl[499291]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Oct 11 03:05:25 compute-0 ceph-mon[191930]: pgmap v2638: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:25 compute-0 virtqemud[153560]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Oct 11 03:05:25 compute-0 virtqemud[153560]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Oct 11 03:05:25 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2639: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:25 compute-0 virtqemud[153560]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Oct 11 03:05:26 compute-0 nova_compute[356901]: 2025-10-11 03:05:26.061 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:05:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:05:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:05:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:05:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:05:26 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:05:26 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: cache status {prefix=cache status} (starting...)
Oct 11 03:05:26 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: client ls {prefix=client ls} (starting...)
Oct 11 03:05:26 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:05:27 compute-0 ceph-mon[191930]: pgmap v2639: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:27 compute-0 lvm[499638]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Oct 11 03:05:27 compute-0 lvm[499638]: VG ceph_vg2 finished
Oct 11 03:05:27 compute-0 lvm[499656]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Oct 11 03:05:27 compute-0 lvm[499656]: VG ceph_vg0 finished
Oct 11 03:05:27 compute-0 lvm[499667]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Oct 11 03:05:27 compute-0 lvm[499667]: VG ceph_vg1 finished
Oct 11 03:05:27 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: damage ls {prefix=damage ls} (starting...)
Oct 11 03:05:27 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2640: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:27 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: dump loads {prefix=dump loads} (starting...)
Oct 11 03:05:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"df", "format":"json"} v 0) v1
Oct 11 03:05:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/644219709' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 03:05:27 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"} v 0) v1
Oct 11 03:05:27 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.10:0/644219709' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 03:05:27 compute-0 nova_compute[356901]: 2025-10-11 03:05:27.898 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:05:27 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: dump tree {prefix=dump tree,root=/} (starting...)
Oct 11 03:05:27 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15845 -' entity='client.admin' cmd=[{"prefix": "balancer eval", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/644219709' entity='client.openstack' cmd=[{"prefix":"df", "format":"json"}]: dispatch
Oct 11 03:05:28 compute-0 ceph-mon[191930]: from='client.? 192.168.122.10:0/644219709' entity='client.openstack' cmd=[{"prefix":"osd pool get-quota", "pool": "volumes", "format":"json"}]: dispatch
Oct 11 03:05:28 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: dump_blocked_ops {prefix=dump_blocked_ops} (starting...)
Oct 11 03:05:28 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: dump_historic_ops {prefix=dump_historic_ops} (starting...)
Oct 11 03:05:28 compute-0 nova_compute[356901]: 2025-10-11 03:05:28.264 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:28 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: dump_historic_ops_by_duration {prefix=dump_historic_ops_by_duration} (starting...)
Oct 11 03:05:28 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15851 -' entity='client.admin' cmd=[{"prefix": "balancer status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:28 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "report"} v 0) v1
Oct 11 03:05:28 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2798832336' entity='client.admin' cmd=[{"prefix": "report"}]: dispatch
Oct 11 03:05:28 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: dump_ops_in_flight {prefix=dump_ops_in_flight} (starting...)
Oct 11 03:05:28 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: get subtrees {prefix=get subtrees} (starting...)
Oct 11 03:05:28 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config generate-minimal-conf"} v 0) v1
Oct 11 03:05:28 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2132245564' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:05:29 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: ops {prefix=ops} (starting...)
Oct 11 03:05:29 compute-0 ceph-mon[191930]: pgmap v2640: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:29 compute-0 ceph-mon[191930]: from='client.15845 -' entity='client.admin' cmd=[{"prefix": "balancer eval", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:29 compute-0 ceph-mon[191930]: from='client.15851 -' entity='client.admin' cmd=[{"prefix": "balancer status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:29 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2798832336' entity='client.admin' cmd=[{"prefix": "report"}]: dispatch
Oct 11 03:05:29 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2132245564' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch
Oct 11 03:05:29 compute-0 podman[499938]: 2025-10-11 03:05:29.212405624 +0000 UTC m=+0.099566240 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, version=9.4, distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543, vcs-type=git, io.openshift.tags=base rhel9, build-date=2024-09-18T21:23:30, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, vendor=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, io.k8s.display-name=Red Hat Universal Base Image 9, name=ubi9, summary=Provides the latest release of Red Hat Universal Base Image 9., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release-0.7.12=, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, com.redhat.component=ubi9-container, io.buildah.version=1.29.0, maintainer=Red Hat, Inc., release=1214.1726694543, architecture=x86_64, container_name=kepler, config_id=edpm, managed_by=edpm_ansible)
Oct 11 03:05:29 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15859 -' entity='client.admin' cmd=[{"prefix": "healthcheck history ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:29 compute-0 ceph-mgr[192233]: mgr.server reply reply (95) Operation not supported Module 'prometheus' is not enabled/loaded (required by command 'healthcheck history ls'): use `ceph mgr module enable prometheus` to enable it
Oct 11 03:05:29 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T03:05:29.235+0000 7fe891be6640 -1 mgr.server reply reply (95) Operation not supported Module 'prometheus' is not enabled/loaded (required by command 'healthcheck history ls'): use `ceph mgr module enable prometheus` to enable it
Oct 11 03:05:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config log"} v 0) v1
Oct 11 03:05:29 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2302553291' entity='client.admin' cmd=[{"prefix": "config log"}]: dispatch
Oct 11 03:05:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "log last", "channel": "cephadm"} v 0) v1
Oct 11 03:05:29 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/315180206' entity='client.admin' cmd=[{"prefix": "log last", "channel": "cephadm"}]: dispatch
Oct 11 03:05:29 compute-0 podman[157119]: time="2025-10-11T03:05:29Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 03:05:29 compute-0 podman[157119]: @ - - [11/Oct/2025:03:05:29 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 03:05:29 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: session ls {prefix=session ls} (starting...)
Oct 11 03:05:29 compute-0 podman[157119]: @ - - [11/Oct/2025:03:05:29 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9105 "" "Go-http-client/1.1"
Oct 11 03:05:29 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2641: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:29 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config-key dump"} v 0) v1
Oct 11 03:05:29 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2349256958' entity='client.admin' cmd=[{"prefix": "config-key dump"}]: dispatch
Oct 11 03:05:29 compute-0 nova_compute[356901]: 2025-10-11 03:05:29.895 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:05:29 compute-0 ceph-mds[219472]: mds.cephfs.compute-0.mxkspn asok_command: status {prefix=status} (starting...)
Oct 11 03:05:30 compute-0 ceph-mon[191930]: from='client.15859 -' entity='client.admin' cmd=[{"prefix": "healthcheck history ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:30 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2302553291' entity='client.admin' cmd=[{"prefix": "config log"}]: dispatch
Oct 11 03:05:30 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/315180206' entity='client.admin' cmd=[{"prefix": "log last", "channel": "cephadm"}]: dispatch
Oct 11 03:05:30 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2349256958' entity='client.admin' cmd=[{"prefix": "config-key dump"}]: dispatch
Oct 11 03:05:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr dump"} v 0) v1
Oct 11 03:05:30 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3632022600' entity='client.admin' cmd=[{"prefix": "mgr dump"}]: dispatch
Oct 11 03:05:30 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15869 -' entity='client.admin' cmd=[{"prefix": "crash ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr metadata"} v 0) v1
Oct 11 03:05:30 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2661643455' entity='client.admin' cmd=[{"prefix": "mgr metadata"}]: dispatch
Oct 11 03:05:30 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15873 -' entity='client.admin' cmd=[{"prefix": "crash stat", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:30 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr module ls"} v 0) v1
Oct 11 03:05:30 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3915743378' entity='client.admin' cmd=[{"prefix": "mgr module ls"}]: dispatch
Oct 11 03:05:31 compute-0 ceph-mon[191930]: pgmap v2641: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:31 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3632022600' entity='client.admin' cmd=[{"prefix": "mgr dump"}]: dispatch
Oct 11 03:05:31 compute-0 ceph-mon[191930]: from='client.15869 -' entity='client.admin' cmd=[{"prefix": "crash ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:31 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2661643455' entity='client.admin' cmd=[{"prefix": "mgr metadata"}]: dispatch
Oct 11 03:05:31 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3915743378' entity='client.admin' cmd=[{"prefix": "mgr module ls"}]: dispatch
Oct 11 03:05:31 compute-0 nova_compute[356901]: 2025-10-11 03:05:31.063 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "features"} v 0) v1
Oct 11 03:05:31 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1049520232' entity='client.admin' cmd=[{"prefix": "features"}]: dispatch
Oct 11 03:05:31 compute-0 openstack_network_exporter[374316]: ERROR   03:05:31 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 03:05:31 compute-0 openstack_network_exporter[374316]: ERROR   03:05:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:05:31 compute-0 openstack_network_exporter[374316]: ERROR   03:05:31 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:05:31 compute-0 openstack_network_exporter[374316]: ERROR   03:05:31 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 03:05:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:05:31 compute-0 openstack_network_exporter[374316]: ERROR   03:05:31 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 03:05:31 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:05:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr services"} v 0) v1
Oct 11 03:05:31 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1720836489' entity='client.admin' cmd=[{"prefix": "mgr services"}]: dispatch
Oct 11 03:05:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "health", "detail": "detail"} v 0) v1
Oct 11 03:05:31 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1183913964' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch
Oct 11 03:05:31 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2642: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr stat"} v 0) v1
Oct 11 03:05:31 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2944518741' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch
Oct 11 03:05:31 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:05:32 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15885 -' entity='client.admin' cmd=[{"prefix": "insights", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:32 compute-0 ceph-mgr[192233]: mgr.server reply reply (95) Operation not supported Module 'insights' is not enabled/loaded (required by command 'insights'): use `ceph mgr module enable insights` to enable it
Oct 11 03:05:32 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T03:05:32.060+0000 7fe891be6640 -1 mgr.server reply reply (95) Operation not supported Module 'insights' is not enabled/loaded (required by command 'insights'): use `ceph mgr module enable insights` to enable it
Oct 11 03:05:32 compute-0 ceph-mon[191930]: from='client.15873 -' entity='client.admin' cmd=[{"prefix": "crash stat", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:32 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1049520232' entity='client.admin' cmd=[{"prefix": "features"}]: dispatch
Oct 11 03:05:32 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1720836489' entity='client.admin' cmd=[{"prefix": "mgr services"}]: dispatch
Oct 11 03:05:32 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1183913964' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch
Oct 11 03:05:32 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2944518741' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch
Oct 11 03:05:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr versions"} v 0) v1
Oct 11 03:05:32 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2867630325' entity='client.admin' cmd=[{"prefix": "mgr versions"}]: dispatch
Oct 11 03:05:32 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "log last", "num": 10000, "level": "debug", "channel": "audit"} v 0) v1
Oct 11 03:05:32 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3571683844' entity='client.admin' cmd=[{"prefix": "log last", "num": 10000, "level": "debug", "channel": "audit"}]: dispatch
Oct 11 03:05:32 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15891 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:32 compute-0 nova_compute[356901]: 2025-10-11 03:05:32.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:05:32 compute-0 nova_compute[356901]: 2025-10-11 03:05:32.896 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477
Oct 11 03:05:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "log last", "num": 10000, "level": "debug", "channel": "cluster"} v 0) v1
Oct 11 03:05:33 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1087046045' entity='client.admin' cmd=[{"prefix": "log last", "num": 10000, "level": "debug", "channel": "cluster"}]: dispatch
Oct 11 03:05:33 compute-0 ceph-mon[191930]: pgmap v2642: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:33 compute-0 ceph-mon[191930]: from='client.15885 -' entity='client.admin' cmd=[{"prefix": "insights", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:33 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2867630325' entity='client.admin' cmd=[{"prefix": "mgr versions"}]: dispatch
Oct 11 03:05:33 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3571683844' entity='client.admin' cmd=[{"prefix": "log last", "num": 10000, "level": "debug", "channel": "audit"}]: dispatch
Oct 11 03:05:33 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1087046045' entity='client.admin' cmd=[{"prefix": "log last", "num": 10000, "level": "debug", "channel": "cluster"}]: dispatch
Oct 11 03:05:33 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15895 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:33 compute-0 podman[500478]: 2025-10-11 03:05:33.215175419 +0000 UTC m=+0.100832251 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0)
Oct 11 03:05:33 compute-0 podman[500476]: 2025-10-11 03:05:33.21505157 +0000 UTC m=+0.102908428 container health_status c2b551c53987c38f980f140bc405476fffa266c855ecd60b6d89c724f43ec5d6 (image=quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested, name=ceilometer_agent_compute, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.4, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, config_data={'image': 'quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-compute:current-tested', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck compute', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute'}, 'volumes': ['/var/lib/openstack/config/telemetry:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, container_name=ceilometer_agent_compute, org.label-schema.build-date=20251007, org.label-schema.name=CentOS Stream 10 Base Image, tcib_build_tag=d674bdc5502e72c153d04cef014162b0)
Oct 11 03:05:33 compute-0 podman[500466]: 2025-10-11 03:05:33.228923389 +0000 UTC m=+0.127572086 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=edpm, container_name=podman_exporter)
Oct 11 03:05:33 compute-0 podman[500474]: 2025-10-11 03:05:33.241810575 +0000 UTC m=+0.135446865 container health_status 861aca443fe0529d844f3accee81755dd165c9e9ae49d55d9cd77ccac3b2d112 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 03:05:33 compute-0 nova_compute[356901]: 2025-10-11 03:05:33.266 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:33 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15899 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr dump"} v 0) v1
Oct 11 03:05:33 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3098009507' entity='client.admin' cmd=[{"prefix": "mgr dump"}]: dispatch
Oct 11 03:05:33 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2643: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1229082 data_alloc: 218103808 data_used: 15224832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:37.314055+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53be000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b53be000 session 0x5626b5831e00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 114581504 unmapped: 20455424 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57f5000 session 0x5626b3035e00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:38.314354+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 114294784 unmapped: 20742144 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57fb800 session 0x5626b3a3ed20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f9951000/0x0/0x4ffc00000, data 0x2061688/0x212c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5994c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:39.314699+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 16.210241318s of 16.285335541s, submitted: 6
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b5994c00 session 0x5626b3a3e5a0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b3aafc00 session 0x5626b35921e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53be000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b53be000 session 0x5626b49614a0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 21463040 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57f5000 session 0x5626b54b8f00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:40.314979+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57fb800 session 0x5626b3a3fa40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b7298000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b7298000 session 0x5626b4fc81e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b3aafc00 session 0x5626b54b8d20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53be000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b53be000 session 0x5626b4fc72c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113631232 unmapped: 21405696 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57f5000 session 0x5626b49b30e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57fb800 session 0x5626b593d2c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b7298400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b7298400 session 0x5626b58cab40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:41.315516+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b3aafc00 session 0x5626b4fc8d20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113664000 unmapped: 21372928 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1290692 data_alloc: 218103808 data_used: 22028288
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:42.315943+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53be000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b53be000 session 0x5626b5868d20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113737728 unmapped: 21299200 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57f5000 session 0x5626b30e2b40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:43.316408+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f94c9000/0x0/0x4ffc00000, data 0x24e770a/0x25b5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57fb800 session 0x5626b4cd14a0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5817c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b5817c00 session 0x5626b3a93e00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113737728 unmapped: 21299200 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:44.316806+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113745920 unmapped: 21291008 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53be000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:45.317326+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113745920 unmapped: 21291008 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:46.317700+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113745920 unmapped: 21291008 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1290824 data_alloc: 218103808 data_used: 22028288
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:47.318129+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:48.318370+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f94c9000/0x0/0x4ffc00000, data 0x24e770a/0x25b5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:49.318629+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:50.319004+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f94c9000/0x0/0x4ffc00000, data 0x24e770a/0x25b5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:51.319496+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f94c9000/0x0/0x4ffc00000, data 0x24e770a/0x25b5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:52.319858+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1323944 data_alloc: 234881024 data_used: 25653248
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f94c9000/0x0/0x4ffc00000, data 0x24e770a/0x25b5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:53.320153+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:54.320637+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:55.321048+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3000.2 total, 600.0 interval
                                            Cumulative writes: 7165 writes, 28K keys, 7165 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.01 MB/s
                                            Cumulative WAL: 7165 writes, 1536 syncs, 4.66 writes per sync, written: 0.02 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 676 writes, 2157 keys, 676 commit groups, 1.0 writes per commit group, ingest: 2.05 MB, 0.00 MB/s
                                            Interval WAL: 676 writes, 300 syncs, 2.25 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f94c9000/0x0/0x4ffc00000, data 0x24e770a/0x25b5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:56.321439+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:57.321657+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1323944 data_alloc: 234881024 data_used: 25653248
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:58.322015+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:59.322285+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f94c9000/0x0/0x4ffc00000, data 0x24e770a/0x25b5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:00.322577+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:01.322987+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:02.323509+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1323944 data_alloc: 234881024 data_used: 25653248
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f94c9000/0x0/0x4ffc00000, data 0x24e770a/0x25b5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:03.323929+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f94c9000/0x0/0x4ffc00000, data 0x24e770a/0x25b5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:04.324431+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:05.324785+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:06.325131+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b3aafc00 session 0x5626b53f63c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 27.680980682s of 27.859249115s, submitted: 20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b53be000 session 0x5626b59a8960
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57f5000 session 0x5626b817e3c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113991680 unmapped: 21045248 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:07.325398+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1322884 data_alloc: 234881024 data_used: 25657344
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112541696 unmapped: 22495232 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:08.325829+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 ms_handle_reset con 0x5626b57fb800 session 0x5626b3c961e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112541696 unmapped: 22495232 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:09.326174+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b7299c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 heartbeat osd_stat(store_statfs(0x4f9951000/0x0/0x4ffc00000, data 0x2061688/0x212c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 112541696 unmapped: 22495232 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:10.326575+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 130 handle_osd_map epochs [131,131], i have 130, src has [1,131]
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 131 ms_handle_reset con 0x5626b7299c00 session 0x5626b4f9d680
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 28770304 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:11.327054+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 28770304 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:12.328508+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1187286 data_alloc: 218103808 data_used: 14143488
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 28770304 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:13.328900+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 131 heartbeat osd_stat(store_statfs(0x4fa0db000/0x0/0x4ffc00000, data 0x18d7849/0x19a2000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 28770304 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:14.329966+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 28770304 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:15.330189+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106266624 unmapped: 28770304 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:16.331488+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 131 heartbeat osd_stat(store_statfs(0x4fa0db000/0x0/0x4ffc00000, data 0x18d7849/0x19a2000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 131 handle_osd_map epochs [132,132], i have 131, src has [1,132]
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:17.331947+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106299392 unmapped: 28737536 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1190260 data_alloc: 218103808 data_used: 14143488
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:18.332583+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106299392 unmapped: 28737536 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:19.333057+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106299392 unmapped: 28737536 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:20.333496+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106299392 unmapped: 28737536 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 13.701698303s of 13.948337555s, submitted: 60
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:21.333923+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106299392 unmapped: 28737536 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:22.334478+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106307584 unmapped: 28729344 heap: 135036928 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1192976 data_alloc: 218103808 data_used: 14143488
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 132 heartbeat osd_stat(store_statfs(0x4fa0d7000/0x0/0x4ffc00000, data 0x18d92da/0x19a7000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:23.334862+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106414080 unmapped: 37019648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:24.335384+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106414080 unmapped: 37019648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:25.335796+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106438656 unmapped: 36995072 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 132 handle_osd_map epochs [133,133], i have 132, src has [1,133]
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 133 ms_handle_reset con 0x5626b3aafc00 session 0x5626b30e1860
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:26.336210+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 133 ms_handle_reset con 0x5626b5817000 session 0x5626b59a81e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53be000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:27.336575+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106471424 unmapped: 36962304 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1251901 data_alloc: 218103808 data_used: 14151680
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:28.336925+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106471424 unmapped: 36962304 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 133 heartbeat osd_stat(store_statfs(0x4f98d3000/0x0/0x4ffc00000, data 0x20dae5c/0x21aa000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:29.337443+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106471424 unmapped: 36962304 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:30.337849+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106471424 unmapped: 36962304 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:31.338175+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106471424 unmapped: 36962304 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:32.338415+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106471424 unmapped: 36962304 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1251901 data_alloc: 218103808 data_used: 14151680
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5817000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 11.585621834s of 11.708389282s, submitted: 13
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:33.338667+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106471424 unmapped: 36962304 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 133 handle_osd_map epochs [133,134], i have 133, src has [1,134]
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 134 ms_handle_reset con 0x5626b5817000 session 0x5626b3c80000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 134 heartbeat osd_stat(store_statfs(0x4f98d0000/0x0/0x4ffc00000, data 0x20dca2d/0x21ad000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:34.339129+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:35.339370+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:36.339739+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:37.339989+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1200130 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 134 heartbeat osd_stat(store_statfs(0x4fa0d1000/0x0/0x4ffc00000, data 0x18dc9fa/0x19ab000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:38.340335+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 134 heartbeat osd_stat(store_statfs(0x4fa0d1000/0x0/0x4ffc00000, data 0x18dc9fa/0x19ab000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:39.340721+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:40.340953+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 134 handle_osd_map epochs [135,135], i have 134, src has [1,135]
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:41.341439+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:42.341734+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202928 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:43.342087+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:44.342457+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:45.342929+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:46.343225+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:47.343581+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202928 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:48.343984+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:49.344363+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:50.344631+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:51.344913+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:52.345152+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202928 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:53.345558+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:54.345882+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:55.346383+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:56.346737+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:57.347094+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202928 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:58.347347+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:59.347727+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:00.348025+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:01.348606+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:02.349627+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202928 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:03.350000+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:04.350455+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:05.350970+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:06.351213+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:07.351644+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202928 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:08.351873+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:09.352184+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:10.352558+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:11.352834+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:12.353205+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202928 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:13.353538+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:14.353907+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:15.354407+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:16.354745+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:17.355039+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202928 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:18.355494+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:19.355906+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0cf000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:20.356396+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 47.721370697s of 47.877182007s, submitted: 43
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:21.356811+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106586112 unmapped: 36847616 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:22.357059+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106602496 unmapped: 36831232 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:23.357361+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 36798464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:24.357771+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 36798464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:25.358162+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 36798464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:26.358554+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 36798464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:27.358966+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 36798464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:28.359214+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 36798464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:29.359673+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 36798464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:30.359944+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 36798464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:31.360497+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106635264 unmapped: 36798464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:32.360782+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106643456 unmapped: 36790272 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:33.361160+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106643456 unmapped: 36790272 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:34.361501+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106643456 unmapped: 36790272 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:35.361917+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:36.362410+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:37.362781+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:38.363089+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:39.363506+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:40.363915+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:41.364392+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:42.364960+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:43.365560+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:44.366488+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:45.366917+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:46.367132+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:47.367499+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:48.367760+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:49.367991+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:50.368326+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:51.368637+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:52.368973+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:53.369183+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:54.369505+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:55.369729+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:56.369949+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:57.370193+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:58.370411+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:59.370778+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:00.371096+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:01.371476+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:02.371854+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:03.372203+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:04.372592+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:05.372801+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:06.373041+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:07.373427+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:08.373748+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:09.373952+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:10.374193+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:11.375354+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:12.375711+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:13.376064+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:14.376499+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:15.376844+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:16.377371+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:17.377651+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:18.377867+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:19.378376+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:20.378755+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:21.379333+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:22.379782+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:23.380164+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:24.380483+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:25.380743+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:26.381018+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:27.381436+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:28.381731+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:29.382112+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:30.382489+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:31.382906+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:32.383353+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:33.383681+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:34.384127+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:35.384340+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:36.384701+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:37.384923+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:38.385204+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:39.385618+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:40.385971+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:41.386458+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:42.386705+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:43.387013+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:44.387454+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:45.387781+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:46.388059+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:47.388493+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:48.388709+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:49.388936+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:50.389294+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:51.389715+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:52.389939+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:53.390370+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106651648 unmapped: 36782080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:54.390580+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:55.390916+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:56.391302+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:57.391530+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:58.391779+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:59.392153+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:00.392394+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:01.392865+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:02.393295+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:03.393548+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:04.393962+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:05.394439+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:06.394647+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:07.394970+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:08.395317+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:09.395702+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:10.395962+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:11.396487+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:12.397118+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:13.397552+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:14.398016+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:15.398502+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:16.398897+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:17.399381+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:18.399802+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:19.400095+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:20.400684+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:21.401340+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:22.401796+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:23.402332+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:24.402664+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:25.402991+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:26.403443+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:27.403943+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:28.404526+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:29.404845+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:30.405176+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:31.405634+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:32.406033+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:33.406596+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:34.407030+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:35.407552+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:36.407871+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:37.408616+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:38.409173+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:39.409657+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:40.409872+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:41.410370+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:42.410751+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:43.411157+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:44.411384+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:45.411808+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:46.412198+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:47.412549+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:48.412817+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:49.413082+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:50.413339+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:51.413713+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:52.414376+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:53.414681+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:54.415083+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:55.415474+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:56.415814+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:57.416143+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:58.416578+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:59.417051+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:00.417480+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:01.417796+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:02.418127+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:03.418654+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:04.418902+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:05.419347+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:06.419690+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:07.419954+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:08.420464+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:09.421191+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:10.421452+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:11.421741+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:12.422068+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:13.422505+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:14.422866+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:15.423220+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:16.423642+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:17.423965+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:18.424362+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:19.424580+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:20.424861+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:21.425101+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:22.425491+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:23.425893+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:24.426433+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:25.426762+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:26.426994+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:27.427218+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:28.427551+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:29.427781+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:30.428174+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:31.428718+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:32.428975+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:33.429351+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106455040 unmapped: 36978688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1202048 data_alloc: 218103808 data_used: 14159872
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 heartbeat osd_stat(store_statfs(0x4fa0d0000/0x0/0x4ffc00000, data 0x18de45d/0x19ae000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:34.429666+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 193.622985840s of 194.207275391s, submitted: 90
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106463232 unmapped: 36970496 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:35.430010+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 handle_osd_map epochs [135,136], i have 135, src has [1,136]
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 135 handle_osd_map epochs [136,136], i have 136, src has [1,136]
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 136 ms_handle_reset con 0x5626b57f5000 session 0x5626b3034780
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106512384 unmapped: 36921344 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:36.430512+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106528768 unmapped: 36904960 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:37.430778+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 136 handle_osd_map epochs [137,137], i have 136, src has [1,137]
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fb800 session 0x5626b35925a0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106528768 unmapped: 36904960 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:38.431057+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106528768 unmapped: 36904960 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:39.431638+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106528768 unmapped: 36904960 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:40.431895+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:41.432382+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:42.432769+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:43.433167+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:44.433635+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:45.433995+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:46.434430+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:47.434666+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:48.435028+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:49.435429+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:50.435729+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:51.436142+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:52.436401+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:53.436838+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:54.437142+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:55.437380+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:56.437703+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:57.437923+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106536960 unmapped: 36896768 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:58.438748+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:59.439122+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:00.439456+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:01.439801+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:02.440041+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:03.440356+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:04.440676+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:05.440904+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:06.441493+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:07.441925+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:08.442387+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:09.442747+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:10.443055+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:11.443350+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:12.443689+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:13.444101+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106545152 unmapped: 36888576 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:14.444342+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106553344 unmapped: 36880384 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:15.444534+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106553344 unmapped: 36880384 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:16.444765+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106553344 unmapped: 36880384 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:17.445030+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106553344 unmapped: 36880384 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:18.445424+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106553344 unmapped: 36880384 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:19.445805+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:20.446362+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106553344 unmapped: 36880384 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:21.446701+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:22.447108+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:23.447492+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:24.447825+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:25.448439+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:26.448642+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:27.448879+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:28.449148+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:29.449534+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1357166 data_alloc: 218103808 data_used: 14168064
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8846000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:30.449829+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b59cb000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b59cb000 session 0x5626b3a3ed20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b3aafc00 session 0x5626b56b6780
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b584a3c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:31.450112+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 106569728 unmapped: 36864000 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 56.384044647s of 56.646003723s, submitted: 30
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fb800 session 0x5626b4f9d0e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5817000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:32.450414+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113516544 unmapped: 29917184 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5817000 session 0x5626b4fc70e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b59cb000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b59cb000 session 0x5626b30e2d20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:33.450710+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b3aafc00 session 0x5626b59a83c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 29859840 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b3027a40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:34.451106+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 29859840 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1375902 data_alloc: 218103808 data_used: 20987904
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fb800 session 0x5626b817f0e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8848000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:35.451505+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 29859840 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5817000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5817000 session 0x5626b53f7e00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539e400 session 0x5626b5831c20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:36.451776+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 29859840 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:37.452080+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113573888 unmapped: 29859840 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:38.452467+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 29851648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8848000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:39.452760+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 29851648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1375902 data_alloc: 218103808 data_used: 20987904
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8848000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:40.453060+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 29851648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8848000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:41.453380+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 29851648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:42.453837+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b3aafc00 session 0x5626b30ca780
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113582080 unmapped: 29851648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b49b3680
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5817000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539f000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539f000 session 0x5626b817f680
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 10.559719086s of 11.683396339s, submitted: 4
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fd400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:43.454490+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 29835264 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f8848000/0x0/0x4ffc00000, data 0x2d51b9d/0x2e26000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:44.454723+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 113598464 unmapped: 29835264 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1377726 data_alloc: 218103808 data_used: 20987904
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:45.455067+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 121847808 unmapped: 21585920 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:46.455436+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 121241600 unmapped: 22192128 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539e400 session 0x5626b339b2c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b59cf000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b59cf000 session 0x5626b4f843c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:47.456027+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b3aafc00 session 0x5626b6f52f00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5817000 session 0x5626b6f53a40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115449856 unmapped: 27983872 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539f000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539f000 session 0x5626b6f52b40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fb800 session 0x5626b584be00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b3c823c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fd400 session 0x5626b5803680
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539f000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f7516000/0x0/0x4ffc00000, data 0x4082bad/0x4158000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:48.456366+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539f000 session 0x5626b3c82780
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b584a1e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5817000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fb800 session 0x5626b3c82000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115458048 unmapped: 27975680 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b59cf000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5817000 session 0x5626b339a780
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539f000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:49.456746+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115458048 unmapped: 27975680 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1546360 data_alloc: 218103808 data_used: 20992000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539e400 session 0x5626b584b680
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b817e1e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:50.457140+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b3aafc00 session 0x5626b3a3eb40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115466240 unmapped: 27967488 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fd400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fd400 session 0x5626b339b680
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fb800 session 0x5626b6f532c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b59cf000 session 0x5626b3c830e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:51.457582+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539f000 session 0x5626b339ab40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115466240 unmapped: 27967488 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fb800 session 0x5626b6f525a0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b3aafc00 session 0x5626b3a3ef00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539e400 session 0x5626b3035e00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:52.457798+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f7516000/0x0/0x4ffc00000, data 0x4082bad/0x4158000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b3aafc00 session 0x5626b3f58960
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115466240 unmapped: 27967488 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539e400 session 0x5626b3372b40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:53.458210+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539f000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fb800 session 0x5626b3c6cf00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 7.995737553s of 10.312167168s, submitted: 42
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115433472 unmapped: 28000256 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b59cf000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b3c82b40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:54.458660+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fd400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b59ca800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 114819072 unmapped: 28614656 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550106 data_alloc: 218103808 data_used: 20992000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539f000 session 0x5626b59a9680
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b59cf000 session 0x5626b5830000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f7513000/0x0/0x4ffc00000, data 0x4082bf0/0x415b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:55.458925+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fd400 session 0x5626b6f53a40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b59ca800 session 0x5626b59a9860
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 114827264 unmapped: 28606464 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f7400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f9000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:56.459130+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5874800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e2800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 114843648 unmapped: 28590080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f7513000/0x0/0x4ffc00000, data 0x4082bf0/0x415b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:57.459488+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 114843648 unmapped: 28590080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:58.459688+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 114712576 unmapped: 28721152 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:59.459874+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 114712576 unmapped: 28721152 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1566614 data_alloc: 218103808 data_used: 23199744
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:00.460075+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 114720768 unmapped: 28712960 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:01.460328+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 115113984 unmapped: 28319744 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f7513000/0x0/0x4ffc00000, data 0x4082bf0/0x415b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:02.460494+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 117366784 unmapped: 26066944 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:03.460680+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 119152640 unmapped: 24281088 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:04.460952+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 119152640 unmapped: 24281088 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1620214 data_alloc: 234881024 data_used: 30789632
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:05.461143+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 119152640 unmapped: 24281088 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:06.461424+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 120389632 unmapped: 23044096 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:07.461624+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f7513000/0x0/0x4ffc00000, data 0x4082bf0/0x415b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 126394368 unmapped: 17039360 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:08.461829+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 126574592 unmapped: 16859136 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:09.462087+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 126623744 unmapped: 16809984 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1674134 data_alloc: 234881024 data_used: 38350848
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5874800 session 0x5626b3372000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b53e2800 session 0x5626b53f7e00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:10.464448+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fd400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 15.605598450s of 17.215515137s, submitted: 12
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 126599168 unmapped: 16834560 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fd400 session 0x5626b4cd1680
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:11.464825+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125960192 unmapped: 17473536 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:12.465144+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125960192 unmapped: 17473536 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b3c823c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f9000 session 0x5626b3c82780
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5874800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:13.465354+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f78df000/0x0/0x4ffc00000, data 0x3cb7be0/0x3d8f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123494400 unmapped: 19939328 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5874800 session 0x5626b53f74a0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:14.465562+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b59ca800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b59ca800 session 0x5626b3c825a0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b56b65a0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f9000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f9000 session 0x5626b56b6b40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123510784 unmapped: 19922944 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fd400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fd400 session 0x5626b56b74a0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1558501 data_alloc: 234881024 data_used: 30789632
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5874800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5874800 session 0x5626b56b7860
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b59cf000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b59cf000 session 0x5626b30cbe00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b4fc83c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f9000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:15.465755+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f9000 session 0x5626b4fc8000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fd400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fd400 session 0x5626b3a921e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123494400 unmapped: 19939328 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:16.465982+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123494400 unmapped: 19939328 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f74aa000/0x0/0x4ffc00000, data 0x40ecbe0/0x41c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:17.466203+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123494400 unmapped: 19939328 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:18.466426+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123494400 unmapped: 19939328 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:19.467269+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123494400 unmapped: 19939328 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1624982 data_alloc: 234881024 data_used: 30789632
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:20.467646+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f74aa000/0x0/0x4ffc00000, data 0x40ecbe0/0x41c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123387904 unmapped: 20045824 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:21.468091+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123387904 unmapped: 20045824 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f74aa000/0x0/0x4ffc00000, data 0x40ecbe0/0x41c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:22.468444+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123387904 unmapped: 20045824 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5874800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 12.408591270s of 12.638334274s, submitted: 29
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:23.468730+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5874800 session 0x5626b30cb4a0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123731968 unmapped: 19701760 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e2c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:24.468936+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123740160 unmapped: 19693568 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1629553 data_alloc: 234881024 data_used: 30793728
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:25.469169+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123740160 unmapped: 19693568 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:26.469392+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123740160 unmapped: 19693568 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:27.469581+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 124067840 unmapped: 19365888 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f7480000/0x0/0x4ffc00000, data 0x4116be0/0x41ee000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:28.469806+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125206528 unmapped: 18227200 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f7480000/0x0/0x4ffc00000, data 0x4116be0/0x41ee000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:29.470152+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125206528 unmapped: 18227200 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1685393 data_alloc: 234881024 data_used: 36040704
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:30.470402+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125206528 unmapped: 18227200 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:31.470670+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125206528 unmapped: 18227200 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:32.470880+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125222912 unmapped: 18210816 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 9.958163261s of 10.014116287s, submitted: 8
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:33.471590+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132775936 unmapped: 10657792 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:34.471974+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129392640 unmapped: 14041088 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1773203 data_alloc: 234881024 data_used: 36089856
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f6a32000/0x0/0x4ffc00000, data 0x4b64be0/0x4c3c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:35.472297+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136060928 unmapped: 7372800 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:36.472528+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136347648 unmapped: 7086080 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:37.472816+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136060928 unmapped: 7372800 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:38.472996+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136110080 unmapped: 7323648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:39.473450+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136110080 unmapped: 7323648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1862199 data_alloc: 234881024 data_used: 38940672
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:40.473658+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136110080 unmapped: 7323648 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f62e1000/0x0/0x4ffc00000, data 0x52b3be0/0x538b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:41.473884+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f62e1000/0x0/0x4ffc00000, data 0x52b3be0/0x538b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136151040 unmapped: 7282688 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:42.474101+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136200192 unmapped: 7233536 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:43.474317+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 7208960 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:44.474635+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 7208960 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1857875 data_alloc: 234881024 data_used: 38940672
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:45.475019+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 7208960 heap: 143433728 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f3000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f3000 session 0x5626b6f52b40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b817e000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f9000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f9000 session 0x5626b3f58b40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fd400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fd400 session 0x5626b817ed20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:46.475295+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5874800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 11.096207619s of 13.221765518s, submitted: 164
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5874800 session 0x5626b3c832c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d55c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b2d55c00 session 0x5626b817e5a0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b3c6c000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f9000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136298496 unmapped: 15540224 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f9000 session 0x5626b5868b40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fd400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fd400 session 0x5626b49601e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:47.475754+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5a4a000/0x0/0x4ffc00000, data 0x5b4cbe0/0x5c24000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136298496 unmapped: 15540224 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:48.476116+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136298496 unmapped: 15540224 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:49.476482+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136298496 unmapped: 15540224 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1921949 data_alloc: 234881024 data_used: 38940672
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:50.476690+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136298496 unmapped: 15540224 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:51.477121+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5a4a000/0x0/0x4ffc00000, data 0x5b4cbe0/0x5c24000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136306688 unmapped: 15532032 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b5874800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:52.477303+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b5874800 session 0x5626b2da9c20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136306688 unmapped: 15532032 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:53.477515+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136314880 unmapped: 15523840 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:54.477736+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5a4a000/0x0/0x4ffc00000, data 0x5b4cbe0/0x5c24000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136314880 unmapped: 15523840 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1921949 data_alloc: 234881024 data_used: 38940672
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:55.478148+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136314880 unmapped: 15523840 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:56.478349+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5a4a000/0x0/0x4ffc00000, data 0x5b4cbe0/0x5c24000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [1])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141860864 unmapped: 9977856 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:57.478561+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b2d72000 session 0x5626b3c6d0e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b2d72400 session 0x5626b3c83a40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f5000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5a4a000/0x0/0x4ffc00000, data 0x5b4cbe0/0x5c24000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 143032320 unmapped: 8806400 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 11.342640877s of 11.475764275s, submitted: 8
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:58.478816+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f5000 session 0x5626b56b6f00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137682944 unmapped: 14155776 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:59.479151+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137682944 unmapped: 14155776 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1860859 data_alloc: 234881024 data_used: 38952960
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:00.479511+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 138248192 unmapped: 13590528 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:01.479778+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137666560 unmapped: 14172160 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:02.480168+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137666560 unmapped: 14172160 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:03.480445+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5cbf000/0x0/0x4ffc00000, data 0x58d7be0/0x59af000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137707520 unmapped: 14131200 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:04.480885+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137707520 unmapped: 14131200 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1916679 data_alloc: 234881024 data_used: 39317504
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:05.481365+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137707520 unmapped: 14131200 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:06.481727+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5cbd000/0x0/0x4ffc00000, data 0x58d9be0/0x59b1000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137707520 unmapped: 14131200 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:07.482135+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137134080 unmapped: 14704640 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:08.482508+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137134080 unmapped: 14704640 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:09.482843+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b539e400 session 0x5626b3c801e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 11.344632149s of 11.581441879s, submitted: 51
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f7400 session 0x5626b5830960
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137134080 unmapped: 14704640 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1914803 data_alloc: 234881024 data_used: 39317504
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:10.483024+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57f9000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57f9000 session 0x5626b5803a40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137134080 unmapped: 14704640 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:11.483341+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137134080 unmapped: 14704640 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:12.483562+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5cbd000/0x0/0x4ffc00000, data 0x58d9be0/0x59b1000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137142272 unmapped: 14696448 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:13.484006+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 137158656 unmapped: 14680064 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5cbd000/0x0/0x4ffc00000, data 0x58d9be0/0x59b1000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:14.484201+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136921088 unmapped: 14917632 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1915495 data_alloc: 234881024 data_used: 39432192
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5cbd000/0x0/0x4ffc00000, data 0x58d9be0/0x59b1000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:15.484436+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136921088 unmapped: 14917632 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:16.484740+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136921088 unmapped: 14917632 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:17.484973+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136921088 unmapped: 14917632 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:18.485464+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b3aafc00 session 0x5626b59a8b40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b57fb800 session 0x5626b53f72c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 heartbeat osd_stat(store_statfs(0x4f5cbd000/0x0/0x4ffc00000, data 0x58d9be0/0x59b1000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136921088 unmapped: 14917632 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:19.485977+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 ms_handle_reset con 0x5626b2d72c00 session 0x5626b584ba40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 137 handle_osd_map epochs [138,138], i have 137, src has [1,138]
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 10.110903740s of 10.199507713s, submitted: 21
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 138 ms_handle_reset con 0x5626b2d73000 session 0x5626b30e1e00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133758976 unmapped: 18079744 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1782651 data_alloc: 234881024 data_used: 34406400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 138 ms_handle_reset con 0x5626b2d73400 session 0x5626b593d0e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:20.486175+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 138 ms_handle_reset con 0x5626b2d72c00 session 0x5626b593dc20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133775360 unmapped: 18063360 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:21.486436+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 138 heartbeat osd_stat(store_statfs(0x4f69f3000/0x0/0x4ffc00000, data 0x4b9fb6d/0x4c7a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 138 handle_osd_map epochs [139,139], i have 138, src has [1,139]
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133799936 unmapped: 18038784 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 139 ms_handle_reset con 0x5626b2d73000 session 0x5626b59a83c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:22.486669+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133808128 unmapped: 18030592 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:23.487337+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133808128 unmapped: 18030592 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:24.487861+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133808128 unmapped: 18030592 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 139 heartbeat osd_stat(store_statfs(0x4f69f2000/0x0/0x4ffc00000, data 0x4ba132e/0x4c7b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1783260 data_alloc: 234881024 data_used: 34406400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:25.488161+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 139 heartbeat osd_stat(store_statfs(0x4f69f2000/0x0/0x4ffc00000, data 0x4ba132e/0x4c7b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133808128 unmapped: 18030592 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:26.488553+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133808128 unmapped: 18030592 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:27.488790+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133808128 unmapped: 18030592 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:28.489189+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 139 heartbeat osd_stat(store_statfs(0x4f69f2000/0x0/0x4ffc00000, data 0x4ba132e/0x4c7b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133808128 unmapped: 18030592 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:29.489626+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 139 heartbeat osd_stat(store_statfs(0x4f69f2000/0x0/0x4ffc00000, data 0x4ba132e/0x4c7b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133808128 unmapped: 18030592 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 139 heartbeat osd_stat(store_statfs(0x4f69f2000/0x0/0x4ffc00000, data 0x4ba132e/0x4c7b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1783260 data_alloc: 234881024 data_used: 34406400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:30.490011+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133808128 unmapped: 18030592 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:31.490430+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 139 handle_osd_map epochs [140,140], i have 139, src has [1,140]
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 11.670613289s of 12.001146317s, submitted: 50
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133816320 unmapped: 18022400 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:32.490808+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f69ef000/0x0/0x4ffc00000, data 0x4ba2d91/0x4c7e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133816320 unmapped: 18022400 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:33.491469+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133824512 unmapped: 18014208 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:34.491801+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f69ef000/0x0/0x4ffc00000, data 0x4ba2d91/0x4c7e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133824512 unmapped: 18014208 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1786234 data_alloc: 234881024 data_used: 34406400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:35.492224+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133824512 unmapped: 18014208 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:36.492629+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133832704 unmapped: 18006016 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:37.492897+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f69ef000/0x0/0x4ffc00000, data 0x4ba2d91/0x4c7e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b53e2c00 session 0x5626b56b72c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b53e3800 session 0x5626b56b7a40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133840896 unmapped: 17997824 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:38.493095+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b3aafc00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f69f0000/0x0/0x4ffc00000, data 0x4ba2d91/0x4c7e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 128294912 unmapped: 23543808 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:39.493982+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b3aafc00 session 0x5626b404fe00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7816000/0x0/0x4ffc00000, data 0x3d7dd81/0x3e58000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 23887872 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:40.494414+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1610862 data_alloc: 234881024 data_used: 25632768
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7840000/0x0/0x4ffc00000, data 0x3d53d81/0x3e2e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 23887872 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:41.494639+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 23887872 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:42.495020+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 23887872 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:43.495422+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 23887872 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:44.495613+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 23887872 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:45.495863+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1610862 data_alloc: 234881024 data_used: 25632768
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 23887872 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:46.496122+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 14.696439743s of 14.932935715s, submitted: 57
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7840000/0x0/0x4ffc00000, data 0x3d53d81/0x3e2e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 23846912 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:47.496431+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7840000/0x0/0x4ffc00000, data 0x3d53d81/0x3e2e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 128212992 unmapped: 23625728 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:48.496597+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 128212992 unmapped: 23625728 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:49.496800+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7840000/0x0/0x4ffc00000, data 0x3d53d81/0x3e2e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 128212992 unmapped: 23625728 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:50.497177+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1620174 data_alloc: 234881024 data_used: 26468352
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 128212992 unmapped: 23625728 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:51.497545+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d72c00 session 0x5626b4fc81e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d73000 session 0x5626b3a3e5a0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e2c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b53e2c00 session 0x5626b58694a0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b53e3800 session 0x5626b6f530e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b57fb800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b57fb800 session 0x5626b3c6c960
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 24084480 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:52.497812+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7554000/0x0/0x4ffc00000, data 0x403fd81/0x411a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 24084480 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:53.498140+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7554000/0x0/0x4ffc00000, data 0x403fd81/0x411a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7554000/0x0/0x4ffc00000, data 0x403fd81/0x411a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:54.498385+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 24084480 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:55.498582+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 24084480 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1649054 data_alloc: 234881024 data_used: 26468352
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:56.498840+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 24084480 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d72c00 session 0x5626b7eb7c20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:57.499058+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 24084480 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e2c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7554000/0x0/0x4ffc00000, data 0x403fd81/0x411a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:58.499285+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 24084480 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:59.499603+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 24084480 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:00.499862+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 24281088 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1649054 data_alloc: 234881024 data_used: 26468352
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:01.500105+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127492096 unmapped: 24346624 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 15.217692375s of 15.319958687s, submitted: 17
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:02.500351+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:03.500682+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7553000/0x0/0x4ffc00000, data 0x403fd81/0x411a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:04.500976+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:05.501192+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1658814 data_alloc: 234881024 data_used: 28086272
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7553000/0x0/0x4ffc00000, data 0x403fd81/0x411a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:06.501625+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:07.501945+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:08.502216+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:09.502570+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:10.502761+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1658814 data_alloc: 234881024 data_used: 28086272
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7553000/0x0/0x4ffc00000, data 0x403fd81/0x411a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:11.503287+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7553000/0x0/0x4ffc00000, data 0x403fd81/0x411a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:12.503808+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:13.504344+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:14.504797+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:15.505355+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1658814 data_alloc: 234881024 data_used: 28086272
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:16.505618+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7553000/0x0/0x4ffc00000, data 0x403fd81/0x411a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:17.505968+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d72400 session 0x5626b4961680
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 16.142997742s of 16.154426575s, submitted: 3
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b539e400 session 0x5626b4cd1680
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:18.506357+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127508480 unmapped: 24330240 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b53e3800 session 0x5626b56b6f00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:19.506623+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:20.507019+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1466909 data_alloc: 218103808 data_used: 20537344
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:21.507311+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:22.507568+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f8558000/0x0/0x4ffc00000, data 0x303cd4e/0x3115000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:23.507805+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:24.508379+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f8558000/0x0/0x4ffc00000, data 0x303cd4e/0x3115000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:25.508775+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1466909 data_alloc: 218103808 data_used: 20537344
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:26.509378+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:27.509648+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f8558000/0x0/0x4ffc00000, data 0x303cd4e/0x3115000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:28.509945+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:29.510170+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123133952 unmapped: 28704768 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:30.510552+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 123142144 unmapped: 28696576 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1467229 data_alloc: 218103808 data_used: 20545536
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 12.610569954s of 12.732493401s, submitted: 38
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:31.511379+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125263872 unmapped: 26574848 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7b36000/0x0/0x4ffc00000, data 0x3a5fd4e/0x3b38000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:32.511749+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125444096 unmapped: 26394624 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:33.512086+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125386752 unmapped: 26451968 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:34.512483+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125386752 unmapped: 26451968 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:35.512887+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125394944 unmapped: 26443776 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7a90000/0x0/0x4ffc00000, data 0x3b04d4e/0x3bdd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1563015 data_alloc: 218103808 data_used: 21139456
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:36.513292+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125394944 unmapped: 26443776 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:37.513705+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125394944 unmapped: 26443776 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:38.513965+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125108224 unmapped: 26730496 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:39.514311+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125108224 unmapped: 26730496 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:40.514753+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125108224 unmapped: 26730496 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1558391 data_alloc: 218103808 data_used: 21139456
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:41.515062+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7a71000/0x0/0x4ffc00000, data 0x3b24d4e/0x3bfd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125108224 unmapped: 26730496 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:42.515562+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125108224 unmapped: 26730496 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7a71000/0x0/0x4ffc00000, data 0x3b24d4e/0x3bfd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:43.515932+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125108224 unmapped: 26730496 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:44.516205+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125108224 unmapped: 26730496 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 13.959811211s of 14.333621025s, submitted: 88
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:45.516477+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1559019 data_alloc: 218103808 data_used: 21200896
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:46.516768+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:47.517099+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7a61000/0x0/0x4ffc00000, data 0x3b34d4e/0x3c0d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7a61000/0x0/0x4ffc00000, data 0x3b34d4e/0x3c0d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:48.517471+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7a61000/0x0/0x4ffc00000, data 0x3b34d4e/0x3c0d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:49.517834+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f7a61000/0x0/0x4ffc00000, data 0x3b34d4e/0x3c0d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:50.518159+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1559019 data_alloc: 218103808 data_used: 21200896
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:51.518692+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:52.519050+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d73800 session 0x5626b53f65a0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:53.519526+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d73800 session 0x5626b339b860
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d72400 session 0x5626b339ad20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d72c00 session 0x5626b58032c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b539e400 session 0x5626b584a3c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:54.519818+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:55.520058+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1593799 data_alloc: 218103808 data_used: 21200896
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f6000/0x0/0x4ffc00000, data 0x3e9edb0/0x3f78000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:56.520514+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:57.520717+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f6000/0x0/0x4ffc00000, data 0x3e9edb0/0x3f78000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b53e3800 session 0x5626b5831c20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:58.521120+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b53e3800 session 0x5626b49614a0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d72400 session 0x5626b3c82960
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 13.765749931s of 14.004361153s, submitted: 43
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 ms_handle_reset con 0x5626b2d72c00 session 0x5626b30e3a40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:59.521665+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f6000/0x0/0x4ffc00000, data 0x3e9edb0/0x3f78000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:00.521897+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1594944 data_alloc: 218103808 data_used: 21200896
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:01.522155+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125337600 unmapped: 26501120 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f5000/0x0/0x4ffc00000, data 0x3e9edd3/0x3f79000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:02.522370+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125345792 unmapped: 26492928 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:03.522607+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 26484736 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:04.523045+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 26484736 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:05.523401+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 26484736 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1620848 data_alloc: 234881024 data_used: 24686592
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:06.523742+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 26484736 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:07.524155+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 26484736 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f5000/0x0/0x4ffc00000, data 0x3e9edd3/0x3f79000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:08.524402+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 26484736 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:09.524677+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 26484736 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:10.525066+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 26484736 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1620848 data_alloc: 234881024 data_used: 24686592
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:11.525382+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 26484736 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:12.525623+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125362176 unmapped: 26476544 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:13.525965+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125362176 unmapped: 26476544 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f5000/0x0/0x4ffc00000, data 0x3e9edd3/0x3f79000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:14.526353+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125362176 unmapped: 26476544 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:15.526718+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125362176 unmapped: 26476544 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1620848 data_alloc: 234881024 data_used: 24686592
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:16.527024+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125362176 unmapped: 26476544 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f5000/0x0/0x4ffc00000, data 0x3e9edd3/0x3f79000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:17.527317+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125362176 unmapped: 26476544 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:18.527524+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125370368 unmapped: 26468352 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f5000/0x0/0x4ffc00000, data 0x3e9edd3/0x3f79000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:19.527731+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125370368 unmapped: 26468352 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:20.527999+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125370368 unmapped: 26468352 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1620848 data_alloc: 234881024 data_used: 24686592
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:21.528465+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125370368 unmapped: 26468352 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:22.528837+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f5000/0x0/0x4ffc00000, data 0x3e9edd3/0x3f79000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125370368 unmapped: 26468352 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:23.529138+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125370368 unmapped: 26468352 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:24.529724+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125370368 unmapped: 26468352 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:25.530054+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125370368 unmapped: 26468352 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1620848 data_alloc: 234881024 data_used: 24686592
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 heartbeat osd_stat(store_statfs(0x4f76f5000/0x0/0x4ffc00000, data 0x3e9edd3/0x3f79000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:26.530465+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125370368 unmapped: 26468352 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:27.530846+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 28.517086029s of 28.619983673s, submitted: 9
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125378560 unmapped: 26460160 heap: 151838720 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:28.531086+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 140 handle_osd_map epochs [140,141], i have 140, src has [1,141]
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d73c00 session 0x5626b58030e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125394944 unmapped: 31105024 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:29.531406+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125394944 unmapped: 31105024 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:30.531779+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f7281000/0x0/0x4ffc00000, data 0x4310950/0x43ec000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125403136 unmapped: 31096832 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1655644 data_alloc: 234881024 data_used: 24694784
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:31.532028+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125403136 unmapped: 31096832 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:32.532312+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125403136 unmapped: 31096832 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:33.532616+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125403136 unmapped: 31096832 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:34.533003+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 125403136 unmapped: 31096832 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:35.533548+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f7282000/0x0/0x4ffc00000, data 0x4310950/0x43ec000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130400256 unmapped: 26099712 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1717042 data_alloc: 234881024 data_used: 25780224
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f7282000/0x0/0x4ffc00000, data 0x4310950/0x43ec000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:36.533766+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130441216 unmapped: 26058752 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:37.534068+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 9.699342728s of 10.029482841s, submitted: 96
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129261568 unmapped: 27238400 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:38.534469+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b9795000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b9795000 session 0x5626b49b3680
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133783552 unmapped: 22716416 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:39.534737+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133881856 unmapped: 22618112 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d72400 session 0x5626b4fc92c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d72c00 session 0x5626b56b7c20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d73c00 session 0x5626b53f7680
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:40.534928+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53e3800 session 0x5626b30e3c20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b9795400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b9795400 session 0x5626b56b6000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f69a3000/0x0/0x4ffc00000, data 0x4bef950/0x4ccb000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133906432 unmapped: 22593536 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1742321 data_alloc: 234881024 data_used: 30715904
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:41.535400+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133906432 unmapped: 22593536 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:42.535743+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133906432 unmapped: 22593536 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:43.536018+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d72400 session 0x5626b58021e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133906432 unmapped: 22593536 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:44.536285+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f699f000/0x0/0x4ffc00000, data 0x4bf2973/0x4ccf000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d73800 session 0x5626b6f52960
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b539e400 session 0x5626b404fe00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 133906432 unmapped: 22593536 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:45.536469+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53e3800 session 0x5626b3372b40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129998848 unmapped: 26501120 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1627004 data_alloc: 234881024 data_used: 25866240
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:46.536692+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129998848 unmapped: 26501120 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:47.536949+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130007040 unmapped: 26492928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:48.537283+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130007040 unmapped: 26492928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:49.537685+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6f9a000/0x0/0x4ffc00000, data 0x40bd8ee/0x4198000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130007040 unmapped: 26492928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6f9a000/0x0/0x4ffc00000, data 0x40bd8ee/0x4198000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:50.538117+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130007040 unmapped: 26492928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1627004 data_alloc: 234881024 data_used: 25866240
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:51.538368+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d73000 session 0x5626b5803a40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53e2c00 session 0x5626b5868d20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130007040 unmapped: 26492928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 14.008026123s of 14.405908585s, submitted: 75
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:52.538677+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f82b7000/0x0/0x4ffc00000, data 0x32dc8ee/0x33b7000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d72400 session 0x5626b58034a0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:53.538969+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:54.539416+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:55.539691+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3600.2 total, 600.0 interval
                                            Cumulative writes: 8926 writes, 35K keys, 8926 commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 8926 writes, 2237 syncs, 3.99 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 1761 writes, 6906 keys, 1761 commit groups, 1.0 writes per commit group, ingest: 6.83 MB, 0.01 MB/s
                                            Interval WAL: 1761 writes, 701 syncs, 2.51 writes per sync, written: 0.01 GB, 0.01 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1506240 data_alloc: 234881024 data_used: 23580672
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:56.540112+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:57.540437+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:58.540706+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f82b7000/0x0/0x4ffc00000, data 0x32dc8ee/0x33b7000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:59.541096+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:00.541380+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1506240 data_alloc: 234881024 data_used: 23580672
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:01.541789+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28696576 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:02.542036+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: mgrc ms_handle_reset ms_handle_reset con 0x5626b539e000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: mgrc reconnect Terminating session with v2:192.168.122.100:6800/1088804496
Oct 11 03:05:33 compute-0 ceph-osd[207831]: mgrc reconnect Starting new session with [v2:192.168.122.100:6800/1088804496,v1:192.168.122.100:6801/1088804496]
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: get_auth_request con 0x5626b9795400 auth_method 0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: mgrc handle_mgr_configure stats_period=5
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f82b7000/0x0/0x4ffc00000, data 0x32dc8ee/0x33b7000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:03.542896+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:04.543375+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:05.543746+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:06.544141+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1506240 data_alloc: 234881024 data_used: 23580672
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:07.544346+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f82b7000/0x0/0x4ffc00000, data 0x32dc8ee/0x33b7000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:08.544684+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f82b7000/0x0/0x4ffc00000, data 0x32dc8ee/0x33b7000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:09.545070+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:10.545431+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:11.545680+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1506240 data_alloc: 234881024 data_used: 23580672
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:12.545888+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f82b7000/0x0/0x4ffc00000, data 0x32dc8ee/0x33b7000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:13.546094+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:14.546707+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:15.546976+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:16.547158+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1506240 data_alloc: 234881024 data_used: 23580672
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f82b7000/0x0/0x4ffc00000, data 0x32dc8ee/0x33b7000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:17.547439+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f82b7000/0x0/0x4ffc00000, data 0x32dc8ee/0x33b7000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:18.547757+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28540928 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 26.793706894s of 26.818393707s, submitted: 9
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:19.547999+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 128401408 unmapped: 28098560 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:20.548265+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 128401408 unmapped: 28098560 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f8068000/0x0/0x4ffc00000, data 0x352b8ee/0x3606000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f8063000/0x0/0x4ffc00000, data 0x352f8ee/0x360a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:21.548744+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f8063000/0x0/0x4ffc00000, data 0x352f8ee/0x360a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:22.549135+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:23.549384+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:24.549611+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:25.549887+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:26.550087+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:27.550496+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:28.550736+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:29.551278+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:30.551489+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:31.551862+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:32.552220+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:33.552802+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:34.553143+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:35.553382+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:36.553666+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:37.553953+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:38.554165+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:39.554533+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:40.554989+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:41.555465+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:42.555736+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:43.555971+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:44.556214+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:45.556564+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:46.557049+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:47.557457+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:48.557659+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:49.557983+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:50.558331+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:51.558589+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 32.365455627s of 32.435802460s, submitted: 14
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:52.558841+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:53.559112+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:54.559496+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:55.559916+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:56.560191+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:57.560524+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:58.560848+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:59.561113+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:00.561347+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:01.561841+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:02.562223+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:03.562686+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:04.563075+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:05.563356+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:06.563991+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:07.564275+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:08.564506+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:09.564716+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:10.564913+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:11.565263+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:12.565503+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:13.565736+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b3aae400 session 0x5626b5869680
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:14.565961+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:15.566269+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:16.566483+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531382 data_alloc: 234881024 data_used: 23613440
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:17.566686+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:18.566900+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805b000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129466368 unmapped: 27033600 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:19.567177+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 28.230127335s of 28.241071701s, submitted: 1
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129507328 unmapped: 26992640 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:20.567525+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129523712 unmapped: 26976256 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:21.567826+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129597440 unmapped: 26902528 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:22.568023+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129638400 unmapped: 26861568 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:23.568214+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:24.568458+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:25.568694+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:26.568899+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:27.569149+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:28.569390+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:29.569678+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:30.569900+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:31.570185+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:32.570545+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:33.570925+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:34.571360+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:35.571616+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:36.571851+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:37.572171+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129695744 unmapped: 26804224 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:38.572560+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:39.572797+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:40.573162+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:41.573671+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:42.574034+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:43.574361+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:44.574734+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:45.574982+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:46.575359+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:47.575544+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:48.575742+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:49.575995+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:50.576380+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:51.576798+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:52.577150+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:53.577513+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:54.577970+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:55.578392+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:56.578648+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:57.579028+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:58.579431+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:59.579861+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:00.580131+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:01.580591+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:02.580941+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:03.581320+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:04.581712+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:05.581981+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:06.582203+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:07.582590+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:08.582838+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:09.583084+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:10.583351+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:11.583849+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:12.584303+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:13.584724+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:14.585113+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:15.585486+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:16.585711+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:17.586019+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:18.586300+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129703936 unmapped: 26796032 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:19.586566+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129712128 unmapped: 26787840 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:20.586933+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129712128 unmapped: 26787840 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:21.587354+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f805c000/0x0/0x4ffc00000, data 0x35378ee/0x3612000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129712128 unmapped: 26787840 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1531750 data_alloc: 234881024 data_used: 23650304
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:22.587758+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129712128 unmapped: 26787840 heap: 156499968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:23.587984+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b539e400 session 0x5626b339a1e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53e3800 session 0x5626b404f4a0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b9795800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b9795800 session 0x5626b53f6d20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d72400 session 0x5626b4fc9e00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 63.668666840s of 64.198707581s, submitted: 108
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b539e400 session 0x5626b4fc94a0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e2c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53e2c00 session 0x5626b49b30e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130580480 unmapped: 29073408 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53e3800 session 0x5626b817e1e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b9795c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:24.588183+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b9795c00 session 0x5626b4cd01e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d72400 session 0x5626b59a8d20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130580480 unmapped: 29073408 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:25.588389+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130588672 unmapped: 29065216 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:26.588795+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130588672 unmapped: 29065216 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1638123 data_alloc: 234881024 data_used: 23650304
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:27.589094+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130654208 unmapped: 28999680 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:28.589375+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b539e400 session 0x5626b59a8b40
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130654208 unmapped: 28999680 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:29.589718+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e2c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53e2c00 session 0x5626b59a83c0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130662400 unmapped: 28991488 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:30.590097+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53e3800 session 0x5626b59a9e00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b4060000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b4060000 session 0x5626b59a85a0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130670592 unmapped: 28983296 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:31.590351+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b4060000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130670592 unmapped: 28983296 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1638123 data_alloc: 234881024 data_used: 23650304
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:32.590522+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130670592 unmapped: 28983296 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:33.590766+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130891776 unmapped: 28762112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:34.590973+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132702208 unmapped: 26951680 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:35.592140+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136151040 unmapped: 23502848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:36.592405+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136151040 unmapped: 23502848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1734603 data_alloc: 234881024 data_used: 36290560
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:37.592641+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:38.592864+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:39.593177+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:40.593511+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:41.593801+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:42.594480+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1734603 data_alloc: 234881024 data_used: 36290560
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:43.594771+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:44.594989+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:45.595187+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:46.595374+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:47.595613+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1734603 data_alloc: 234881024 data_used: 36290560
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:48.595817+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:49.596022+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136216576 unmapped: 23437312 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:50.596218+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:51.596558+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:52.596820+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1734603 data_alloc: 234881024 data_used: 36290560
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:53.597373+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:54.597873+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:55.598276+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:56.598714+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:57.599174+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1734603 data_alloc: 234881024 data_used: 36290560
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:58.599466+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:59.599692+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136224768 unmapped: 23429120 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:00.599905+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136232960 unmapped: 23420928 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:01.600193+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136232960 unmapped: 23420928 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f72a9000/0x0/0x4ffc00000, data 0x42e9950/0x43c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:02.600402+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136232960 unmapped: 23420928 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1734603 data_alloc: 234881024 data_used: 36290560
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:03.600847+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136232960 unmapped: 23420928 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:04.601286+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136232960 unmapped: 23420928 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:05.601533+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 136232960 unmapped: 23420928 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 42.054290771s of 42.212093353s, submitted: 35
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f724e000/0x0/0x4ffc00000, data 0x4344950/0x4420000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [0,0,0,0,0,1])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:06.601795+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 139223040 unmapped: 20430848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:07.602006+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 139264000 unmapped: 20389888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1809071 data_alloc: 234881024 data_used: 36700160
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:08.602503+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f69b0000/0x0/0x4ffc00000, data 0x4bda950/0x4cb6000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142024704 unmapped: 17629184 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:09.602931+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142024704 unmapped: 17629184 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:10.603373+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142024704 unmapped: 17629184 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:11.603799+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142024704 unmapped: 17629184 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:12.604295+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1821195 data_alloc: 234881024 data_used: 36532224
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f696b000/0x0/0x4ffc00000, data 0x4c19950/0x4cf5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:13.604607+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f696b000/0x0/0x4ffc00000, data 0x4c19950/0x4cf5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:14.604996+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:15.605323+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f696a000/0x0/0x4ffc00000, data 0x4c28950/0x4d04000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:16.605758+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:17.606191+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812327 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:18.606770+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:19.607213+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f696a000/0x0/0x4ffc00000, data 0x4c28950/0x4d04000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:20.607687+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:21.608140+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:22.608341+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f696a000/0x0/0x4ffc00000, data 0x4c28950/0x4d04000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812327 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:23.608632+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:24.608959+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:25.609179+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:26.609397+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 19.895868301s of 20.259223938s, submitted: 107
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f696a000/0x0/0x4ffc00000, data 0x4c28950/0x4d04000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:27.609619+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812195 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:28.609832+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:29.610076+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:30.610370+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:31.610785+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:32.611137+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812195 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:33.611518+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:34.611777+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:35.612173+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:36.612693+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:37.613044+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812195 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:38.613440+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 12.529780388s of 12.544162750s, submitted: 2
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:39.613678+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141680640 unmapped: 17973248 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:40.614013+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:41.614348+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:42.614573+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812371 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:43.614808+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:44.615039+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:45.615375+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:46.615716+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:47.616099+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812371 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:48.616590+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:49.616883+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:50.617099+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:51.617386+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:52.617724+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812371 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:53.617971+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:54.618161+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141688832 unmapped: 17965056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:55.618545+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:56.618976+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:57.619364+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812371 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:58.619738+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:59.620148+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:00.620527+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:01.620966+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:02.621417+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812371 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:03.621707+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:04.622116+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:05.622474+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:06.622685+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:07.622895+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812371 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:08.623117+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:09.623361+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 30.708982468s of 30.720699310s, submitted: 1
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:10.623584+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:11.623863+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:12.624099+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:13.624371+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141697024 unmapped: 17956864 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:14.624614+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141705216 unmapped: 17948672 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:15.624830+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141705216 unmapped: 17948672 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:16.625061+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141705216 unmapped: 17948672 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:17.625330+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:18.625582+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:19.625790+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:20.626018+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:21.626332+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:22.626543+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:23.626744+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:24.626934+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:25.627329+0000)
Oct 11 03:05:33 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15901 -' entity='client.admin' cmd=[{"prefix": "orch ls", "export": true, "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:26.627570+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:27.627969+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:28.628452+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:29.628870+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:30.629198+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:31.629669+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:32.630027+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141713408 unmapped: 17940480 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:33.630495+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:34.630832+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:35.631206+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:36.631562+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:37.632001+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:38.632327+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:39.632752+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:40.633038+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:41.633346+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:42.633737+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:43.634093+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:44.634380+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:45.634669+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:46.635033+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:47.635349+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:48.635694+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:49.636038+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:50.636288+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141721600 unmapped: 17932288 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:51.636553+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:52.636769+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:53.636969+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:54.637214+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:55.637500+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:56.637727+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:57.637934+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:58.638138+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:59.638397+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:00.638603+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:01.638854+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:02.639516+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:03.640061+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:04.640360+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:05.640571+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:06.640787+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:07.641179+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:08.641648+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:09.644011+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:10.645706+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:11.647167+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:12.648858+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141729792 unmapped: 17924096 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:13.650507+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:14.651012+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:15.651511+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:16.652123+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:17.652479+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:18.652982+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:19.653506+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:20.653857+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 71.278442383s of 71.294609070s, submitted: 7
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:21.654485+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:22.654802+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:23.655018+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813251 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:24.655318+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:25.655526+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:26.655749+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b5817400 session 0x5626b58ca780
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b539e400
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53be000 session 0x5626b3a92d20
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e2c00
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b5816c00 session 0x5626b58310e0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53be000
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:27.656383+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:28.656927+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:29.657222+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:30.657487+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:31.657730+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:32.657958+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:33.658193+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:34.658392+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:35.658883+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:36.659312+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:37.659662+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:38.660060+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:39.660468+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141737984 unmapped: 17915904 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:40.660888+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:41.661793+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:42.662051+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:43.662940+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:44.663300+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:45.663536+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:46.663809+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:47.664125+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:48.664485+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:49.664978+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:50.665379+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:51.665759+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:52.666352+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:53.666795+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:54.667053+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141746176 unmapped: 17907712 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:55.667499+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:56.667915+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:57.668394+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:58.668766+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:59.669160+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:00.669669+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:01.670026+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:02.670536+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:03.670972+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:04.671287+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:05.671968+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:06.672405+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:07.672654+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:08.672918+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141754368 unmapped: 17899520 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:09.673313+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141762560 unmapped: 17891328 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:10.673520+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:11.673830+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:12.674176+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:13.674510+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:14.674892+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:15.675288+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:16.675629+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:17.675910+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:18.676592+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:19.677014+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:20.677406+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:21.677761+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:22.678111+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:23.678459+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:24.678719+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:25.679046+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:26.679343+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141770752 unmapped: 17883136 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:27.679617+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:28.679932+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:29.680328+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:30.680526+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:31.680822+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:32.681037+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:33.681548+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:34.681836+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:35.682205+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:36.682593+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:37.682907+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:38.683395+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:39.683794+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:40.684224+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:41.684830+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:42.685217+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:43.685717+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:44.686080+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:45.686461+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:46.686834+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:47.687139+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:48.687407+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:49.687711+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141778944 unmapped: 17874944 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:50.688053+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:51.688420+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:52.688696+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:53.688922+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:54.689154+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:55.689395+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:56.689854+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:57.690328+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:58.690726+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:59.691020+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:00.691455+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:01.691937+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:02.692393+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:03.692638+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:04.692897+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:05.693120+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:06.693524+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141787136 unmapped: 17866752 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:07.693916+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:08.694369+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:09.694770+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:10.695154+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:11.695781+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:12.696146+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:13.696394+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:14.696599+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:15.696829+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:16.697145+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:17.697359+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:18.697566+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:19.697899+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:20.698098+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:21.698384+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141795328 unmapped: 17858560 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:22.698760+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141803520 unmapped: 17850368 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:23.699028+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141803520 unmapped: 17850368 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:24.699419+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141803520 unmapped: 17850368 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:25.699605+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141803520 unmapped: 17850368 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:26.699909+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141803520 unmapped: 17850368 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:27.700325+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141803520 unmapped: 17850368 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:28.700747+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141803520 unmapped: 17850368 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:29.701204+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141803520 unmapped: 17850368 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:30.701641+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141811712 unmapped: 17842176 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:31.701938+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141811712 unmapped: 17842176 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:32.702209+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141811712 unmapped: 17842176 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:33.702575+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:34.702837+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:35.703044+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:36.703488+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:37.703881+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:38.704365+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:39.704707+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:40.705035+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:41.705479+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:42.705813+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:43.706088+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:44.706334+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:45.706544+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:46.706823+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:47.707223+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:48.707819+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:49.708207+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141819904 unmapped: 17833984 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:50.708657+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:51.709171+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:52.709392+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:53.709595+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:54.709831+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:55.710071+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:56.710319+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:57.710698+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:58.711210+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:59.711635+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:00.711993+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:01.712378+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141828096 unmapped: 17825792 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:02.712612+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141836288 unmapped: 17817600 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:03.712794+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141836288 unmapped: 17817600 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:04.713105+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1811843 data_alloc: 234881024 data_used: 36536320
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141836288 unmapped: 17817600 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:05.713443+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141836288 unmapped: 17817600 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:06.713897+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141836288 unmapped: 17817600 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:07.714216+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141836288 unmapped: 17817600 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:08.714652+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141869056 unmapped: 17784832 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:09.714871+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812963 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141885440 unmapped: 17768448 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:10.715363+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141885440 unmapped: 17768448 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:11.715847+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141885440 unmapped: 17768448 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:12.716215+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141885440 unmapped: 17768448 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:13.716552+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 141893632 unmapped: 17760256 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 173.439102173s of 173.452651978s, submitted: 2
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:14.716830+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813919 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6967000/0x0/0x4ffc00000, data 0x4c2b950/0x4d07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:15.717186+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6950000/0x0/0x4ffc00000, data 0x4c42950/0x4d1e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:16.717405+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:17.717747+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:18.718188+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:19.718579+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813919 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6950000/0x0/0x4ffc00000, data 0x4c42950/0x4d1e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:20.718836+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6950000/0x0/0x4ffc00000, data 0x4c42950/0x4d1e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:21.719323+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:22.719704+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:23.720094+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:24.720423+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1813919 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6950000/0x0/0x4ffc00000, data 0x4c42950/0x4d1e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:25.720640+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:26.720850+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:27.721303+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:28.721538+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6950000/0x0/0x4ffc00000, data 0x4c42950/0x4d1e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:29.721893+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814399 data_alloc: 234881024 data_used: 36552704
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 15.848360062s of 15.867633820s, submitted: 2
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:30.722336+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142016512 unmapped: 17637376 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:31.722648+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142016512 unmapped: 17637376 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:32.723038+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142016512 unmapped: 17637376 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:33.723344+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142016512 unmapped: 17637376 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:34.723702+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814819 data_alloc: 234881024 data_used: 36552704
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142016512 unmapped: 17637376 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:35.723986+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142024704 unmapped: 17629184 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:36.724341+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142024704 unmapped: 17629184 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:37.724658+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:38.725141+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:39.725370+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814819 data_alloc: 234881024 data_used: 36552704
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:40.725702+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:41.726132+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:42.726448+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:43.726682+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:44.727042+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814819 data_alloc: 234881024 data_used: 36552704
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:45.727414+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:46.727752+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:47.728101+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:48.728484+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142032896 unmapped: 17620992 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 19.181091309s of 19.198316574s, submitted: 2
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:49.728849+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142041088 unmapped: 17612800 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:50.729326+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142041088 unmapped: 17612800 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:51.729807+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142041088 unmapped: 17612800 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:52.730149+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:53.730481+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:54.730865+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:55.731196+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:56.731845+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:57.732069+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:58.732534+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:59.732898+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:00.733309+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:01.733580+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:02.734000+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:03.734300+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:04.734737+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:05.735125+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:06.735382+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142057472 unmapped: 17596416 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:07.735852+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142065664 unmapped: 17588224 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:08.736322+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142065664 unmapped: 17588224 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:09.736689+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142065664 unmapped: 17588224 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:10.736925+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142065664 unmapped: 17588224 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:11.737432+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142065664 unmapped: 17588224 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:12.737673+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142065664 unmapped: 17588224 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:13.737929+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142065664 unmapped: 17588224 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:14.738180+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142065664 unmapped: 17588224 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:15.738667+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142073856 unmapped: 17580032 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:16.739136+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142073856 unmapped: 17580032 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:17.739534+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142073856 unmapped: 17580032 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:18.739850+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142073856 unmapped: 17580032 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:19.740095+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142073856 unmapped: 17580032 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:20.740487+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142073856 unmapped: 17580032 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:21.740801+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142073856 unmapped: 17580032 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:22.741161+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:23.741437+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:24.741674+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:25.742043+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:26.742827+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:27.743198+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:28.743421+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:29.743763+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:30.744090+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:31.744534+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:32.744743+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:33.744963+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:34.745283+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:35.745607+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:36.746009+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:37.746458+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142082048 unmapped: 17571840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:38.746846+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142090240 unmapped: 17563648 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:39.747190+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142090240 unmapped: 17563648 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:40.747581+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142090240 unmapped: 17563648 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:41.747864+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142090240 unmapped: 17563648 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:42.748324+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142098432 unmapped: 17555456 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:43.748781+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142098432 unmapped: 17555456 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:44.749133+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142098432 unmapped: 17555456 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:45.749417+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142098432 unmapped: 17555456 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:46.749695+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142106624 unmapped: 17547264 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:47.750013+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142106624 unmapped: 17547264 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:48.750465+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142106624 unmapped: 17547264 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:49.750836+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142106624 unmapped: 17547264 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:50.751324+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142106624 unmapped: 17547264 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:51.751735+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142106624 unmapped: 17547264 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:52.752204+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142106624 unmapped: 17547264 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:53.752725+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142106624 unmapped: 17547264 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:54.753034+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:55.753406+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 4200.2 total, 600.0 interval
                                            Cumulative writes: 9424 writes, 37K keys, 9424 commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 9424 writes, 2439 syncs, 3.86 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 498 writes, 1584 keys, 498 commit groups, 1.0 writes per commit group, ingest: 2.24 MB, 0.00 MB/s
                                            Interval WAL: 498 writes, 202 syncs, 2.47 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:56.753774+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:57.754142+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:58.754461+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:59.755010+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:00.755502+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:01.756037+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:02.756405+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:03.756645+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:04.756934+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:05.757152+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:06.757666+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:07.757946+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:08.758217+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142114816 unmapped: 17539072 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:09.758596+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142123008 unmapped: 17530880 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:10.758865+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142123008 unmapped: 17530880 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:11.759315+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:12.759767+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:13.760158+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:14.760391+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:15.760781+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:16.761107+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:17.761354+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:18.761614+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:19.761937+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:20.762358+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:21.762781+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:22.763186+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:23.763662+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:24.763888+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:25.764143+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142131200 unmapped: 17522688 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:26.764482+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:27.764864+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:28.765456+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:29.765759+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:30.766064+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:31.766560+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:32.767870+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:33.769319+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:34.769552+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:35.769855+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:36.770467+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142139392 unmapped: 17514496 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:37.770986+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142147584 unmapped: 17506304 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:38.771313+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142147584 unmapped: 17506304 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:39.772342+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142147584 unmapped: 17506304 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:40.772874+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142147584 unmapped: 17506304 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:41.773504+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142147584 unmapped: 17506304 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:42.773746+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:43.774451+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:44.774748+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:45.775087+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:46.775661+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:47.775873+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:48.776394+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:49.777725+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:50.778079+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:51.778493+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142155776 unmapped: 17498112 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:52.778809+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142163968 unmapped: 17489920 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:53.779453+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142163968 unmapped: 17489920 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:54.779764+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142163968 unmapped: 17489920 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:55.780023+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142163968 unmapped: 17489920 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:56.780417+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142163968 unmapped: 17489920 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:57.780669+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142163968 unmapped: 17489920 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:58.781055+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142163968 unmapped: 17489920 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:59.781401+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142163968 unmapped: 17489920 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:00.781644+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142163968 unmapped: 17489920 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:01.781945+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:02.782319+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:03.782551+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:04.782782+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:05.783395+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:06.784704+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:07.785094+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:08.785374+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:09.785686+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:10.786195+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:11.787018+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:12.787757+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:13.788141+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142172160 unmapped: 17481728 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:14.788490+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142180352 unmapped: 17473536 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:15.788793+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:33 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:33 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:33 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142180352 unmapped: 17473536 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:33 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:16.789079+0000)
Oct 11 03:05:33 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:33 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr metadata"} v 0) v1
Oct 11 03:05:33 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1895804544' entity='client.admin' cmd=[{"prefix": "mgr metadata"}]: dispatch
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142180352 unmapped: 17473536 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:17.789558+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142180352 unmapped: 17473536 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:18.789897+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142180352 unmapped: 17473536 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:19.790386+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142180352 unmapped: 17473536 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:20.791031+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1817619 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142180352 unmapped: 17473536 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 151.628067017s of 151.660583496s, submitted: 15
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:21.791508+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142180352 unmapped: 17473536 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:22.791834+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142196736 unmapped: 17457152 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:23.792125+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142270464 unmapped: 17383424 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:24.792508+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:25.792947+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:26.793428+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:27.793934+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:28.794514+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:29.794898+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:30.795202+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:31.795787+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:32.796201+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:33.796641+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:34.797098+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:35.797577+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:36.797899+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:37.798218+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:38.798681+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:39.799079+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:40.799407+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:41.799973+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:42.800592+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:43.800982+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:44.801386+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:45.801769+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:46.802056+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:47.802440+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:48.802833+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:49.803548+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:50.803955+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:51.804567+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:52.804971+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:53.805517+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:54.805880+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:55.806406+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:56.806865+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:57.807114+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:58.807506+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:59.807956+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:00.808475+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:01.808945+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:02.809178+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:03.809468+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:04.809880+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:05.810118+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:06.810495+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:07.811039+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:08.811484+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:09.811912+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:10.812167+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142295040 unmapped: 17358848 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:11.812464+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142303232 unmapped: 17350656 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:12.812673+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142303232 unmapped: 17350656 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:13.812873+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142303232 unmapped: 17350656 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:14.813222+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f693b000/0x0/0x4ffc00000, data 0x4c57950/0x4d33000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142303232 unmapped: 17350656 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:15.813664+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1814803 data_alloc: 234881024 data_used: 36540416
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142303232 unmapped: 17350656 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:16.814643+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142303232 unmapped: 17350656 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:17.814861+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142303232 unmapped: 17350656 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:18.815069+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 57.462314606s of 58.026130676s, submitted: 90
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d72c00 session 0x5626b2da83c0
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d73c00 session 0x5626b5868780
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e3800
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142319616 unmapped: 17334272 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:19.815296+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b53e3800 session 0x5626b339bc20
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:20.815485+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6caa000/0x0/0x4ffc00000, data 0x48e892d/0x49c3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1784517 data_alloc: 234881024 data_used: 36499456
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:21.815741+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:22.816087+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:23.816359+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:24.816579+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:25.816934+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1784517 data_alloc: 234881024 data_used: 36499456
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6caa000/0x0/0x4ffc00000, data 0x48e892d/0x49c3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:26.817161+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:27.817371+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:28.817786+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:29.818168+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:30.818562+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f6caa000/0x0/0x4ffc00000, data 0x48e892d/0x49c3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1784517 data_alloc: 234881024 data_used: 36499456
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:31.818923+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 12.665335655s of 12.825592041s, submitted: 30
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d72400 session 0x5626b59a8960
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b4060000 session 0x5626b59a8000
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 142336000 unmapped: 17317888 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:32.819329+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 ms_handle_reset con 0x5626b2d72400 session 0x5626b58025a0
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f7915000/0x0/0x4ffc00000, data 0x31c88cb/0x32a2000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132472832 unmapped: 27181056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:33.819726+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132472832 unmapped: 27181056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:34.820132+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132472832 unmapped: 27181056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:35.820437+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f7915000/0x0/0x4ffc00000, data 0x31c88cb/0x32a2000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f7915000/0x0/0x4ffc00000, data 0x31c88cb/0x32a2000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1507173 data_alloc: 218103808 data_used: 23609344
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132472832 unmapped: 27181056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:36.820707+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:37.821085+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132472832 unmapped: 27181056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:38.821458+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132472832 unmapped: 27181056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:39.821841+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132472832 unmapped: 27181056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:40.822215+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132472832 unmapped: 27181056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72c00
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1507173 data_alloc: 218103808 data_used: 23609344
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:41.822792+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 132472832 unmapped: 27181056 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 heartbeat osd_stat(store_statfs(0x4f7915000/0x0/0x4ffc00000, data 0x31c88cb/0x32a2000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 141 handle_osd_map epochs [142,142], i have 141, src has [1,142]
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 9.965906143s of 10.049147606s, submitted: 20
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 142 ms_handle_reset con 0x5626b2d72c00 session 0x5626b54b85a0
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:42.823185+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129794048 unmapped: 29859840 heap: 159653888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73c00
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 142 heartbeat osd_stat(store_statfs(0x4f8838000/0x0/0x4ffc00000, data 0x2d5a49c/0x2e35000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:43.823601+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129982464 unmapped: 38068224 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 142 handle_osd_map epochs [143,143], i have 142, src has [1,143]
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 143 ms_handle_reset con 0x5626b2d73c00 session 0x5626b6f521e0
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:44.824024+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b4060000
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130031616 unmapped: 38019072 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 143 handle_osd_map epochs [143,144], i have 143, src has [1,144]
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _renew_subs
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 144 handle_osd_map epochs [144,144], i have 144, src has [1,144]
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 144 ms_handle_reset con 0x5626b4060000 session 0x5626b3c83680
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:45.824554+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130088960 unmapped: 37961728 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1480342 data_alloc: 218103808 data_used: 18972672
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:46.824957+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130088960 unmapped: 37961728 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:47.825545+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130088960 unmapped: 37961728 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:48.825815+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130088960 unmapped: 37961728 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 144 heartbeat osd_stat(store_statfs(0x4f8830000/0x0/0x4ffc00000, data 0x2d5dc16/0x2e3b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:49.826120+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130088960 unmapped: 37961728 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:50.826446+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130088960 unmapped: 37961728 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:51.826745+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1480342 data_alloc: 218103808 data_used: 18972672
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130088960 unmapped: 37961728 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:52.827131+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130088960 unmapped: 37961728 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 144 heartbeat osd_stat(store_statfs(0x4f8830000/0x0/0x4ffc00000, data 0x2d5dc16/0x2e3b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 144 handle_osd_map epochs [145,145], i have 144, src has [1,145]
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 10.107682228s of 10.716229439s, submitted: 93
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:53.827561+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130113536 unmapped: 37937152 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:54.827837+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130113536 unmapped: 37937152 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:55.828211+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:56.828711+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481367 data_alloc: 218103808 data_used: 18972672
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:57.829015+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:58.829506+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:59.829966+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:00.830387+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:01.830866+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481367 data_alloc: 218103808 data_used: 18972672
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:02.831676+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:03.831922+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:04.832364+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:05.832778+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:06.833216+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481367 data_alloc: 218103808 data_used: 18972672
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:07.833788+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:08.834184+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:09.834610+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:10.835061+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:11.835446+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481367 data_alloc: 218103808 data_used: 18972672
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:12.835958+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:13.836345+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:14.836687+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:15.836876+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:16.837186+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481367 data_alloc: 218103808 data_used: 18972672
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:17.837548+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:18.837829+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:19.838047+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:20.838517+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:21.838921+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481367 data_alloc: 218103808 data_used: 18972672
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:22.839081+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:23.839448+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:24.839753+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:25.840096+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:26.840422+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481367 data_alloc: 218103808 data_used: 18972672
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:27.840684+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:28.841017+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:29.841499+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:30.841861+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:31.842328+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:32.842673+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:33.843037+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:34.843482+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:35.843831+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:36.844050+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:37.844549+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:38.844939+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:39.845406+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:40.845792+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:41.846197+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:42.846540+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:43.846810+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:44.847165+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:45.847571+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:46.847886+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:47.848353+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:48.848557+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:49.848863+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:50.849332+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:51.849733+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:52.850112+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:53.850354+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:54.850827+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:55.851215+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:56.851762+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:57.852129+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:58.852507+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:59.852945+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:00.853431+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:01.853737+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:02.853961+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:03.854334+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:04.854602+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:05.854887+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:06.855165+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:07.855567+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:08.855824+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:09.856045+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:10.856401+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:11.856737+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:12.857059+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:13.857345+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:14.857541+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:15.857866+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130121728 unmapped: 37928960 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:16.858104+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'config diff' '{prefix=config diff}'
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'config diff' '{prefix=config diff}' result is 0 bytes
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:17.858501+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'config show' '{prefix=config show}'
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'config show' '{prefix=config show}' result is 0 bytes
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'counter dump' '{prefix=counter dump}'
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'counter dump' '{prefix=counter dump}' result is 0 bytes
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'counter schema' '{prefix=counter schema}'
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'counter schema' '{prefix=counter schema}' result is 0 bytes
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130375680 unmapped: 37675008 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:18.858727+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130088960 unmapped: 37961728 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:19.858912+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130383872 unmapped: 37666816 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'log dump' '{prefix=log dump}'
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:20.859160+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'log dump' '{prefix=log dump}' result is 0 bytes
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'perf dump' '{prefix=perf dump}'
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'perf dump' '{prefix=perf dump}' result is 0 bytes
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130465792 unmapped: 37584896 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'perf histogram dump' '{prefix=perf histogram dump}'
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'perf histogram dump' '{prefix=perf histogram dump}' result is 0 bytes
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'perf schema' '{prefix=perf schema}'
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'perf schema' '{prefix=perf schema}' result is 0 bytes
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:21.859509+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:22.859727+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:23.859999+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:24.860274+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:25.860477+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:26.860682+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:27.860919+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:28.861090+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:29.861323+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:30.861640+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:31.861901+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:32.862348+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:33.862543+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:34.862747+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:35.863207+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:36.863705+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:37.863912+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:38.864121+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:39.864418+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:40.864610+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:41.864943+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:42.865149+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:43.865352+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:44.865566+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:45.865939+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:46.866587+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:47.866847+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:48.867091+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:49.867405+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:50.867613+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:51.867883+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:52.868117+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:53.868383+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130277376 unmapped: 37773312 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:54.868783+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:55.869045+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:56.869385+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:57.869737+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:58.870212+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:59.870690+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:00.870926+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:01.871317+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:02.871577+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:03.874617+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:04.874834+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:05.875201+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:06.875432+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:07.875682+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:08.875979+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:09.876215+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:10.876615+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:11.877082+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:12.877432+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:13.877671+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:14.877990+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:15.878420+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:16.878782+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:17.879046+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:18.879374+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:19.879635+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:20.880046+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:21.880562+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:22.880818+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:23.881309+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:24.881751+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:25.882174+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:26.882518+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:27.882895+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:28.883073+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:29.883440+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:30.883774+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:31.884173+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:32.884615+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:33.884847+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:34.885289+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:35.885490+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:36.885898+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:37.886112+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:38.886530+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:39.886760+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:40.887403+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:41.887794+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:42.888165+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:43.888667+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:44.889070+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:45.889326+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:46.889660+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:47.891421+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:48.891652+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:49.892001+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:50.892483+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:51.892825+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:52.893052+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:53.893337+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:54.893710+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:55.895059+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:56.895553+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:57.895829+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:58.896358+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:59.896743+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:00.897102+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:01.897402+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:02.897801+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:03.898157+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:04.898540+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:05.898919+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:06.899566+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:07.899789+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:08.900130+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:09.900522+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:10.900822+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:11.901377+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:12.901840+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:13.902117+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 ms_handle_reset con 0x5626b2d73800 session 0x5626b3c82000
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72400
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:14.902431+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:15.902827+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:16.903094+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:17.903373+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:18.903573+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:19.903955+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:20.904421+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:21.904815+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:22.905065+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:23.905563+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:24.905803+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:25.906078+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:26.906372+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:27.906737+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:28.907095+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:29.907449+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:30.907864+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:31.908133+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:32.908490+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:33.908907+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:34.909194+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:35.909505+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:36.909883+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:37.910109+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:38.910405+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:39.910873+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:40.911128+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:41.911532+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:42.911787+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:43.912214+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:44.912557+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:45.913016+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:46.913334+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:47.913726+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:48.914116+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:49.914513+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:50.914918+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:51.915425+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:52.915784+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:53.916064+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:54.916450+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:55.916793+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:56.917062+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:57.917480+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:58.917752+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:59.918019+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:00.918463+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:01.918967+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:02.919216+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:03.919568+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:04.919925+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:05.920404+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:06.920686+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:07.920926+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:08.921385+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130285568 unmapped: 37765120 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:09.921821+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:10.922168+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:11.922816+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:12.923037+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:13.923509+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:14.923883+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:15.924284+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:16.924724+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:17.924968+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:18.925441+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:19.925847+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:20.926378+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:21.926787+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:22.927182+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:23.927572+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:24.927920+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:25.928508+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:26.928984+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:27.929514+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:28.929946+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:29.930450+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:30.932010+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:31.932711+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:32.933331+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:33.933692+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:34.934850+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:35.935753+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:36.936146+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:37.936529+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:38.937008+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:39.937418+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:40.937752+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:41.938504+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:42.939206+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:43.939715+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:44.940380+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:45.940715+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:46.941188+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:47.941577+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:48.941926+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:49.942563+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:50.943039+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:51.943406+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:52.943807+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:53.944162+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:54.944545+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:55.944883+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:56.945425+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:57.945691+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:58.946106+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:59.946631+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:00.946988+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:01.947459+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:02.947881+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:03.948348+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:04.948859+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:05.949441+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:06.949850+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:07.950333+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:08.950606+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:09.950875+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:10.951143+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:11.951481+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:12.951834+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:13.952207+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:14.952629+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:15.953081+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:16.953682+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:17.953947+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:18.954313+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:19.954702+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:20.955073+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:21.955668+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:22.955871+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:23.956115+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:24.956477+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:25.956806+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:26.957075+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:27.957388+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:28.957606+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:29.957927+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:30.958220+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:31.958825+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:32.959364+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:33.959737+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:34.960160+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:35.960479+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:36.960899+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:37.961372+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:38.961723+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:39.962075+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:40.962644+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:41.962950+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:42.963418+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:43.963653+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:44.963990+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:45.964325+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:46.964713+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:47.965003+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:48.965390+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:49.965843+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:50.966127+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:51.966593+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:52.967040+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:53.967511+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:54.967717+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:55.967961+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:56.968597+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:57.968980+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:58.969457+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:59.969854+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:00.970201+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:01.970769+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:02.971014+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:03.971518+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:04.971731+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:05.972141+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:06.972917+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:07.974352+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:08.976302+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:09.978211+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:10.979964+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:11.981715+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:12.983184+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:13.984432+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:14.985834+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:15.986955+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:16.988378+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:17.989374+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:18.990877+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:19.991620+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:20.992017+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:21.992527+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:22.992750+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:23.992972+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:24.993188+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:25.993462+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:26.993712+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:27.993946+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:28.994294+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:29.994617+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:30.995021+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:31.995361+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:32.995851+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:33.996374+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:34.996639+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:35.997134+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:36.997548+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:37.997934+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:38.998214+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:39.998669+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:40.998910+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:41.999461+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:42.999871+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:44.000155+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:45.000573+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:46.000793+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:47.001112+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:48.001515+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:49.001865+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:50.002379+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:51.002808+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:52.003168+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:53.003470+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 130293760 unmapped: 37756928 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:54.003877+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:55.004580+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:56.004986+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 4800.2 total, 600.0 interval
                                            Cumulative writes: 9892 writes, 38K keys, 9892 commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 9892 writes, 2660 syncs, 3.72 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 468 writes, 1210 keys, 468 commit groups, 1.0 writes per commit group, ingest: 0.45 MB, 0.00 MB/s
                                            Interval WAL: 468 writes, 221 syncs, 2.12 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:57.005504+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:58.005899+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:59.006393+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:00.006628+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:01.007081+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:02.007657+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:03.008219+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:04.009182+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:05.009481+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:06.009899+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:07.010527+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:08.011043+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:09.011516+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:10.011936+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:11.012595+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:12.013029+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:13.013499+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:14.013749+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:15.014084+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:16.014529+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:17.014810+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:18.015575+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:19.016006+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:20.016320+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129474560 unmapped: 38576128 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:21.016547+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:22.016777+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:23.017431+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:24.017660+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:25.018139+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:26.018589+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:27.019044+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 ms_handle_reset con 0x5626b539e400 session 0x5626b3592d20
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d72c00
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 ms_handle_reset con 0x5626b53e2c00 session 0x5626b3027a40
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b2d73c00
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 ms_handle_reset con 0x5626b53be000 session 0x5626b6f530e0
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: handle_auth_request added challenge on 0x5626b53e2c00
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:28.019327+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:29.019701+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:30.020067+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:31.020330+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:32.020653+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:33.020990+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:34.021300+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:35.021701+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:36.022017+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:37.022552+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:38.022798+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:39.023194+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:40.023495+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:41.023869+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:42.024390+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129482752 unmapped: 38567936 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:43.024649+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129490944 unmapped: 38559744 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:44.025699+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129490944 unmapped: 38559744 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:45.026093+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129490944 unmapped: 38559744 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:46.026371+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129490944 unmapped: 38559744 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:47.026707+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129490944 unmapped: 38559744 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:48.027100+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129490944 unmapped: 38559744 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:49.027499+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129490944 unmapped: 38559744 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:50.027873+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129490944 unmapped: 38559744 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:51.028192+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129490944 unmapped: 38559744 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:52.028861+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129490944 unmapped: 38559744 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:53.029699+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129490944 unmapped: 38559744 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:54.030121+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129490944 unmapped: 38559744 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:55.030373+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129490944 unmapped: 38559744 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:56.030661+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129490944 unmapped: 38559744 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:57.030971+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129499136 unmapped: 38551552 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:58.031370+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129499136 unmapped: 38551552 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:59.031767+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129499136 unmapped: 38551552 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:00.032729+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129499136 unmapped: 38551552 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:01.033005+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129499136 unmapped: 38551552 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:02.033463+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129499136 unmapped: 38551552 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:03.033720+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129499136 unmapped: 38551552 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:04.034133+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129499136 unmapped: 38551552 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:05.034363+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129499136 unmapped: 38551552 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:06.034655+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129499136 unmapped: 38551552 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:07.035068+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129499136 unmapped: 38551552 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:08.035519+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129499136 unmapped: 38551552 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:09.035822+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129499136 unmapped: 38551552 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:10.036435+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129499136 unmapped: 38551552 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:11.036867+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129499136 unmapped: 38551552 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:12.037156+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129499136 unmapped: 38551552 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:13.037724+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129499136 unmapped: 38551552 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:14.038075+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129499136 unmapped: 38551552 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:15.038456+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129507328 unmapped: 38543360 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:16.038826+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129515520 unmapped: 38535168 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:17.039662+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129515520 unmapped: 38535168 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:18.040063+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f882f000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129515520 unmapped: 38535168 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:19.040443+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129515520 unmapped: 38535168 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:20.040837+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481527 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129515520 unmapped: 38535168 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:21.041195+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129515520 unmapped: 38535168 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore(/var/lib/ceph/osd/ceph-2) _kv_sync_thread utilization: idle 508.765411377s of 508.789550781s, submitted: 14
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:22.041495+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129515520 unmapped: 38535168 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:23.042042+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129572864 unmapped: 38477824 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f8830000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:24.042638+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129605632 unmapped: 38445056 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:25.043328+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1480647 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f8830000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:26.043687+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:27.044092+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:28.044452+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:29.045435+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f8830000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:30.045730+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1480647 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:31.046196+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:32.046704+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:33.047054+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f8830000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:34.047524+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f8830000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:35.047868+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1480647 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:36.048310+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:37.048671+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:38.048972+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:39.049369+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:40.049812+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f8830000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1480647 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:41.050120+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:42.050662+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:43.050902+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:44.051441+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f8830000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:45.051701+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1480647 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:46.052075+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:47.052558+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:48.052807+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:49.053016+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f8830000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:50.053288+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1480647 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:51.053614+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:52.054073+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:53.054311+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f8830000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:54.054496+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f8830000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129671168 unmapped: 38379520 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:55.054698+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1480647 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129679360 unmapped: 38371328 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:56.055056+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129679360 unmapped: 38371328 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:57.055314+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129679360 unmapped: 38371328 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:58.055530+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: osd.2 145 heartbeat osd_stat(store_statfs(0x4f8830000/0x0/0x4ffc00000, data 0x2d5f699/0x2e3e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [0,1] op hist [])
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129679360 unmapped: 38371328 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:59.055774+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129679360 unmapped: 38371328 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:00.056006+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:34 compute-0 ceph-osd[207831]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:34 compute-0 ceph-osd[207831]: bluestore.MempoolThread(0x5626b1935b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1480647 data_alloc: 218103808 data_used: 18976768
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129679360 unmapped: 38371328 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'config diff' '{prefix=config diff}'
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'config diff' '{prefix=config diff}' result is 0 bytes
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:01.056209+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'config show' '{prefix=config show}'
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'config show' '{prefix=config show}' result is 0 bytes
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'counter dump' '{prefix=counter dump}'
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'counter dump' '{prefix=counter dump}' result is 0 bytes
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'counter schema' '{prefix=counter schema}'
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'counter schema' '{prefix=counter schema}' result is 0 bytes
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129875968 unmapped: 38174720 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:02.056478+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: prioritycache tune_memory target: 4294967296 mapped: 129597440 unmapped: 38453248 heap: 168050688 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: tick
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_tickets
Oct 11 03:05:34 compute-0 ceph-osd[207831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:03.056655+0000)
Oct 11 03:05:34 compute-0 ceph-osd[207831]: do_command 'log dump' '{prefix=log dump}'
Oct 11 03:05:34 compute-0 ceph-mon[191930]: from='client.15891 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:34 compute-0 ceph-mon[191930]: from='client.15895 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:34 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3098009507' entity='client.admin' cmd=[{"prefix": "mgr dump"}]: dispatch
Oct 11 03:05:34 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1895804544' entity='client.admin' cmd=[{"prefix": "mgr metadata"}]: dispatch
Oct 11 03:05:34 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15905 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:34 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 03:05:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr module ls"} v 0) v1
Oct 11 03:05:34 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1949488491' entity='client.admin' cmd=[{"prefix": "mgr module ls"}]: dispatch
Oct 11 03:05:34 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15909 -' entity='client.admin' cmd=[{"prefix": "orch status", "detail": true, "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:34 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr services"} v 0) v1
Oct 11 03:05:34 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2550814161' entity='client.admin' cmd=[{"prefix": "mgr services"}]: dispatch
Oct 11 03:05:35 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15913 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:35 compute-0 ceph-mon[191930]: from='client.15899 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:35 compute-0 ceph-mon[191930]: pgmap v2643: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:35 compute-0 ceph-mon[191930]: from='client.15901 -' entity='client.admin' cmd=[{"prefix": "orch ls", "export": true, "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:35 compute-0 ceph-mon[191930]: from='client.15905 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:35 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1949488491' entity='client.admin' cmd=[{"prefix": "mgr module ls"}]: dispatch
Oct 11 03:05:35 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2550814161' entity='client.admin' cmd=[{"prefix": "mgr services"}]: dispatch
Oct 11 03:05:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr versions"} v 0) v1
Oct 11 03:05:35 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2415930292' entity='client.admin' cmd=[{"prefix": "mgr versions"}]: dispatch
Oct 11 03:05:35 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15917 -' entity='client.admin' cmd=[{"prefix": "balancer eval", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:35 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2644: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:35 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon stat"} v 0) v1
Oct 11 03:05:35 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1695295698' entity='client.admin' cmd=[{"prefix": "mon stat"}]: dispatch
Oct 11 03:05:35 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15921 -' entity='client.admin' cmd=[{"prefix": "balancer status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:36 compute-0 nova_compute[356901]: 2025-10-11 03:05:36.068 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:36 compute-0 ceph-mon[191930]: from='client.15909 -' entity='client.admin' cmd=[{"prefix": "orch status", "detail": true, "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:36 compute-0 ceph-mon[191930]: from='client.15913 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:36 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2415930292' entity='client.admin' cmd=[{"prefix": "mgr versions"}]: dispatch
Oct 11 03:05:36 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1695295698' entity='client.admin' cmd=[{"prefix": "mon stat"}]: dispatch
Oct 11 03:05:36 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15927 -' entity='client.admin' cmd=[{"prefix": "healthcheck history ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:36 compute-0 ceph-mgr[192233]: mgr.server reply reply (95) Operation not supported Module 'prometheus' is not enabled/loaded (required by command 'healthcheck history ls'): use `ceph mgr module enable prometheus` to enable it
Oct 11 03:05:36 compute-0 ceph-3c7617c3-7a20-523e-a9de-20c0d6ba41da-mgr-compute-0-bzgmgr[192216]: 2025-10-11T03:05:36.872+0000 7fe891be6640 -1 mgr.server reply reply (95) Operation not supported Module 'prometheus' is not enabled/loaded (required by command 'healthcheck history ls'): use `ceph mgr module enable prometheus` to enable it
Oct 11 03:05:36 compute-0 nova_compute[356901]: 2025-10-11 03:05:36.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_shelved_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:05:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "node ls"} v 0) v1
Oct 11 03:05:36 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/20142445' entity='client.admin' cmd=[{"prefix": "node ls"}]: dispatch
Oct 11 03:05:36 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:05:37 compute-0 ceph-mon[191930]: from='client.15917 -' entity='client.admin' cmd=[{"prefix": "balancer eval", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:37 compute-0 ceph-mon[191930]: pgmap v2644: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:37 compute-0 ceph-mon[191930]: from='client.15921 -' entity='client.admin' cmd=[{"prefix": "balancer status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:37 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/20142445' entity='client.admin' cmd=[{"prefix": "node ls"}]: dispatch
Oct 11 03:05:37 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "log last", "channel": "cephadm", "format": "json-pretty"} v 0) v1
Oct 11 03:05:37 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/979346624' entity='client.admin' cmd=[{"prefix": "log last", "channel": "cephadm", "format": "json-pretty"}]: dispatch
Oct 11 03:05:37 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd crush class ls"} v 0) v1
Oct 11 03:05:37 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2264332952' entity='client.admin' cmd=[{"prefix": "osd crush class ls"}]: dispatch
Oct 11 03:05:37 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2645: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:37 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd crush dump"} v 0) v1
Oct 11 03:05:37 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3409527812' entity='client.admin' cmd=[{"prefix": "osd crush dump"}]: dispatch
Oct 11 03:05:37 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr dump", "format": "json-pretty"} v 0) v1
Oct 11 03:05:37 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/154609841' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json-pretty"}]: dispatch
Oct 11 03:05:37 compute-0 nova_compute[356901]: 2025-10-11 03:05:37.897 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:05:38 compute-0 ceph-mon[191930]: from='client.15927 -' entity='client.admin' cmd=[{"prefix": "healthcheck history ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:38 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/979346624' entity='client.admin' cmd=[{"prefix": "log last", "channel": "cephadm", "format": "json-pretty"}]: dispatch
Oct 11 03:05:38 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2264332952' entity='client.admin' cmd=[{"prefix": "osd crush class ls"}]: dispatch
Oct 11 03:05:38 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3409527812' entity='client.admin' cmd=[{"prefix": "osd crush dump"}]: dispatch
Oct 11 03:05:38 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/154609841' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json-pretty"}]: dispatch
Oct 11 03:05:38 compute-0 nova_compute[356901]: 2025-10-11 03:05:38.271 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd crush rule ls"} v 0) v1
Oct 11 03:05:38 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1557775261' entity='client.admin' cmd=[{"prefix": "osd crush rule ls"}]: dispatch
Oct 11 03:05:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr metadata", "format": "json-pretty"} v 0) v1
Oct 11 03:05:38 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2679158688' entity='client.admin' cmd=[{"prefix": "mgr metadata", "format": "json-pretty"}]: dispatch
Oct 11 03:05:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd crush show-tunables"} v 0) v1
Oct 11 03:05:38 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3814040216' entity='client.admin' cmd=[{"prefix": "osd crush show-tunables"}]: dispatch
Oct 11 03:05:38 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr module ls", "format": "json-pretty"} v 0) v1
Oct 11 03:05:38 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1135641234' entity='client.admin' cmd=[{"prefix": "mgr module ls", "format": "json-pretty"}]: dispatch
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:37.174795+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2635c00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 14.540469170s of 14.669846535s, submitted: 15
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 ms_handle_reset con 0x559ca2635c00 session 0x559ca231b860
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108421120 unmapped: 20602880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 ms_handle_reset con 0x559ca2648000 session 0x559ca21bb4a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:38.175120+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108421120 unmapped: 20602880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:39.175473+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 ms_handle_reset con 0x559ca14ca800 session 0x559ca01d9c20
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108691456 unmapped: 20332544 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:40.175865+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1185450 data_alloc: 218103808 data_used: 14958592
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99af000/0x0/0x4ffc00000, data 0x1bfa21f/0x1cbf000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108740608 unmapped: 20283392 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 ms_handle_reset con 0x559ca14cb400 session 0x559ca1c96d20
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:41.176066+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99af000/0x0/0x4ffc00000, data 0x1bfa21f/0x1cbf000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108740608 unmapped: 20283392 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:42.176455+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108740608 unmapped: 20283392 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:43.177337+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1520800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 ms_handle_reset con 0x559ca1520800 session 0x559ca44ae5a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108748800 unmapped: 20275200 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:44.177581+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2635c00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108748800 unmapped: 20275200 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:45.177815+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1189229 data_alloc: 218103808 data_used: 14966784
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2649400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108765184 unmapped: 20258816 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:46.178670+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108765184 unmapped: 20258816 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:47.179001+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108699648 unmapped: 20324352 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:48.179300+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3000.1 total, 600.0 interval
                                            Cumulative writes: 8598 writes, 33K keys, 8598 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.01 MB/s
                                            Cumulative WAL: 8598 writes, 1979 syncs, 4.34 writes per sync, written: 0.02 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 783 writes, 2196 keys, 783 commit groups, 1.0 writes per commit group, ingest: 1.52 MB, 0.00 MB/s
                                            Interval WAL: 783 writes, 356 syncs, 2.20 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:49.179723+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:50.180132+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1212241 data_alloc: 234881024 data_used: 18141184
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:51.180498+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:52.180910+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:53.181179+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:54.181662+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:55.181946+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1212241 data_alloc: 234881024 data_used: 18141184
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:56.182155+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:57.182569+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:58.182797+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:59.183326+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:00.183671+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1212241 data_alloc: 234881024 data_used: 18141184
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:01.184029+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:02.184499+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:03.185016+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:04.185416+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:05.185691+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 heartbeat osd_stat(store_statfs(0x4f99ab000/0x0/0x4ffc00000, data 0x1bfe21f/0x1cc3000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1212241 data_alloc: 234881024 data_used: 18141184
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:06.186384+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 ms_handle_reset con 0x559ca2635c00 session 0x559ca1caa5a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:07.187131+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 ms_handle_reset con 0x559ca2648800 session 0x559ca44ae000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 29.282367706s of 29.483385086s, submitted: 24
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 ms_handle_reset con 0x559ca2649400 session 0x559ca1c93c20
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 108568576 unmapped: 20455424 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:08.187539+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106881024 unmapped: 22142976 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 ms_handle_reset con 0x559ca14ca800 session 0x559ca126cb40
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:09.187832+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106881024 unmapped: 22142976 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:10.188379+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 106881024 unmapped: 22142976 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1156347 data_alloc: 218103808 data_used: 14954496
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 130 handle_osd_map epochs [131,131], i have 130, src has [1,131]
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 131 heartbeat osd_stat(store_statfs(0x4f9da5000/0x0/0x4ffc00000, data 0x18051fc/0x18c9000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:11.188761+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 131 ms_handle_reset con 0x559ca14cb400 session 0x559c9ff105a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101343232 unmapped: 27680768 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:12.189324+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101343232 unmapped: 27680768 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:13.189797+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101343232 unmapped: 27680768 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 131 heartbeat osd_stat(store_statfs(0x4fa5a1000/0x0/0x4ffc00000, data 0x1006dcd/0x10cc000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:14.190467+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101343232 unmapped: 27680768 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:15.190726+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101343232 unmapped: 27680768 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1085649 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 131 handle_osd_map epochs [131,132], i have 131, src has [1,132]
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:16.191528+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:17.192434+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 132 heartbeat osd_stat(store_statfs(0x4fa59e000/0x0/0x4ffc00000, data 0x1008830/0x10cf000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:18.193112+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:19.193798+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:20.194398+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1088623 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 132 heartbeat osd_stat(store_statfs(0x4fa59e000/0x0/0x4ffc00000, data 0x1008830/0x10cf000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1520800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:21.194651+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:22.195096+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:23.195357+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:24.195838+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:25.196432+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1088623 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 132 handle_osd_map epochs [133,133], i have 132, src has [1,133]
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 17.981964111s of 18.491548538s, submitted: 87
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 132 heartbeat osd_stat(store_statfs(0x4fa59e000/0x0/0x4ffc00000, data 0x1008830/0x10cf000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [0,0,0,0,1])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 132 handle_osd_map epochs [133,133], i have 133, src has [1,133]
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 132 handle_osd_map epochs [133,133], i have 133, src has [1,133]
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 133 ms_handle_reset con 0x559ca1520800 session 0x559ca21bbc20
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:26.197054+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:27.197529+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:28.197812+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:29.198107+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:30.198369+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 133 heartbeat osd_stat(store_statfs(0x4fa59b000/0x0/0x4ffc00000, data 0x100a3ad/0x10d2000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1091597 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:31.198793+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:32.200203+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2635c00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:33.200413+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101359616 unmapped: 27664384 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 133 handle_osd_map epochs [134,134], i have 133, src has [1,134]
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 134 ms_handle_reset con 0x559ca2635c00 session 0x559ca21e92c0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:34.200783+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:35.200996+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1094571 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 134 heartbeat osd_stat(store_statfs(0x4fa598000/0x0/0x4ffc00000, data 0x100bf7e/0x10d5000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:36.201397+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:37.201732+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 134 heartbeat osd_stat(store_statfs(0x4fa598000/0x0/0x4ffc00000, data 0x100bf7e/0x10d5000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:38.202148+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 134 heartbeat osd_stat(store_statfs(0x4fa598000/0x0/0x4ffc00000, data 0x100bf7e/0x10d5000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:39.202514+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:40.202968+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1094571 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:41.203502+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 134 handle_osd_map epochs [135,135], i have 134, src has [1,135]
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 15.483499527s of 15.565405846s, submitted: 15
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:42.203924+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:43.204297+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:44.204556+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:45.204898+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1097545 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:46.205276+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:47.205642+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:48.206036+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:49.206698+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:50.207140+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1097545 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:51.207386+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:52.207727+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:53.208141+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:54.208908+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:55.209392+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1097545 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:56.209719+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:57.210079+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:58.210410+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:59.210760+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:00.211223+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1097545 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:01.211794+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:02.212128+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:03.212508+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:04.212843+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100941824 unmapped: 28082176 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:05.213070+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1097545 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:06.213406+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:07.213769+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:08.214133+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:09.214365+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:10.214837+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1097545 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:11.215046+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:12.215434+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:13.215777+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:14.216212+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:15.216571+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1097545 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:16.216929+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:17.217216+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:18.217637+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:19.217998+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:20.218437+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa595000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 39.451190948s of 39.471805573s, submitted: 19
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100950016 unmapped: 28073984 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:21.218901+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 100974592 unmapped: 28049408 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:22.219398+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101031936 unmapped: 27992064 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:23.219834+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:24.220343+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:25.220782+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:26.221336+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:27.221695+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:28.221966+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:29.222413+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:30.222821+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:31.223338+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:32.223772+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:33.224453+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:34.225057+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:35.225529+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:36.225901+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:37.226195+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:38.226430+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:39.226947+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101081088 unmapped: 27942912 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:40.227342+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:41.227674+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:42.227910+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:43.228147+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:44.228919+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:45.229161+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:46.229504+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:47.229771+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:48.229995+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:49.230420+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:50.230867+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:51.231364+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:52.231761+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:53.231962+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:54.232496+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:55.232887+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:56.233360+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:57.233759+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:58.233988+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:59.234213+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:00.234611+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:01.235401+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:02.235724+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:03.236081+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:04.236523+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:05.236999+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:06.237479+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:07.237889+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:08.238396+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:09.239004+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:10.239619+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:11.240062+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:12.240461+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:13.240893+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:14.241360+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:15.241853+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:16.242391+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:17.242817+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:18.243090+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:19.243618+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:20.244134+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:21.244517+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:22.244930+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:23.245472+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:24.245869+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:25.246362+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:26.246749+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:27.247158+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:28.247516+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:29.247844+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:30.248165+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:31.248425+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:32.248742+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:33.249156+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:34.249686+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:35.249974+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:36.250206+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:37.250651+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:38.250957+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:39.251182+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:40.251765+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:41.251989+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:42.252332+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:43.252771+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:44.253210+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:45.253699+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:46.254086+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:47.254383+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:48.254612+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:49.254864+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:50.255140+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:51.255495+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:52.255707+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:53.255969+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:54.256482+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101089280 unmapped: 27934720 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:55.256839+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:56.257120+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:57.257531+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:58.257875+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:59.258209+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:00.258688+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:01.258941+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:02.259191+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:03.259478+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:04.259773+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:05.260213+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:06.260538+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:07.260748+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:08.261152+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:09.261568+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:10.262007+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:11.262468+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:12.262810+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:13.263096+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:14.263424+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101097472 unmapped: 27926528 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:15.263794+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:16.264147+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:17.264521+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:18.264725+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:19.264981+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:20.265455+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:21.265663+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:22.266046+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:23.266445+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:24.266859+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:25.267423+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:26.267692+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:27.268027+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:28.268399+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:29.268753+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:30.269078+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:31.269430+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:32.269899+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:33.270294+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:34.270606+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:35.270940+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:36.271373+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:37.271603+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:38.271906+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:39.272288+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:40.272642+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:41.273014+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:42.273412+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:43.273758+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:44.274033+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:45.274648+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:46.274992+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:47.275405+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:48.275690+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:49.276076+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:50.276332+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:51.276723+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:52.277640+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:53.278105+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:54.278457+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:55.278679+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:56.278886+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:57.279307+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:58.279526+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:59.279983+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:00.280331+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:01.280756+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:02.281141+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:03.281415+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:04.281712+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:05.282142+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:06.282359+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:07.282611+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:08.283062+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:09.283305+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:10.283527+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:11.283826+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:12.284149+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:13.284478+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:14.285083+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:15.285503+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:16.285951+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:17.286398+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:18.286632+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:19.286969+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:20.287203+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:21.287458+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:22.287840+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:23.288063+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:24.288485+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:25.288783+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:26.289052+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:27.289283+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:28.289590+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:29.290036+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:30.290535+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:31.290810+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1096665 data_alloc: 218103808 data_used: 8146944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:32.291015+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:33.291353+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101105664 unmapped: 27918336 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 heartbeat osd_stat(store_statfs(0x4fa596000/0x0/0x4ffc00000, data 0x100d9e1/0x10d8000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:34.291733+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 193.554534912s of 194.195724487s, submitted: 90
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101146624 unmapped: 27877376 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:35.291957+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 135 handle_osd_map epochs [136,136], i have 135, src has [1,136]
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 136 ms_handle_reset con 0x559ca14ca800 session 0x559ca21e90e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101154816 unmapped: 27869184 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:36.292497+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101220352 unmapped: 27803648 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 136 heartbeat osd_stat(store_statfs(0x4f991e000/0x0/0x4ffc00000, data 0x1c7f5c4/0x1d4f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1194261 data_alloc: 218103808 data_used: 8155136
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:37.292865+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 136 handle_osd_map epochs [137,137], i have 136, src has [1,137]
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14cb400 session 0x559ca21e8d20
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101244928 unmapped: 27779072 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:38.293146+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101244928 unmapped: 27779072 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:39.293547+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:40.293983+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:41.294475+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:42.294811+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:43.295401+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:44.295960+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:45.296316+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:46.296717+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:47.297013+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:48.297270+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:49.297557+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:50.297773+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:51.298045+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:52.298372+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:53.298890+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:54.299288+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:55.299745+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:56.300163+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:57.300552+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:58.300771+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:59.301108+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:00.301433+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:01.301692+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:02.301939+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:03.302468+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:04.302823+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:05.303102+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:06.303550+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:07.303926+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:08.304441+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:09.304765+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:10.305404+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:11.305816+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:12.306037+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:13.306358+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:14.306636+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:15.306894+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:16.307302+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:17.307825+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:18.308030+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:19.308312+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:20.308687+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:21.308948+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:22.309323+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:23.309651+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:24.310100+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:25.310352+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:26.310647+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:27.310870+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196347 data_alloc: 218103808 data_used: 8155136
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:28.311070+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:29.311690+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:30.311926+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1520800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1520800 session 0x559ca11c05a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2649400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2649400 session 0x559c9f416000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2648800 session 0x559ca1c961e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:31.312288+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 101253120 unmapped: 27770880 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14cb400 session 0x559ca2208000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1520800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1520800 session 0x559ca2209680
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2648800 session 0x559ca12a05a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14ca800 session 0x559ca21e8960
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2649400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:32.312569+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1207867 data_alloc: 218103808 data_used: 12812288
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2649400 session 0x559ca21e83c0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:33.312959+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:34.313448+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:35.313815+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:36.314076+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:37.314452+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1207867 data_alloc: 218103808 data_used: 12812288
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:38.314755+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:39.315147+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f991b000/0x0/0x4ffc00000, data 0x1c81141/0x1d52000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:40.315386+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:41.315672+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:42.316400+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14ca800 session 0x559c9ff10000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14cb400 session 0x559c9f5dc960
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1520800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1207867 data_alloc: 218103808 data_used: 12812288
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 67.697204590s of 67.921241760s, submitted: 29
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:43.316736+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 104873984 unmapped: 24150016 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2649800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca00bfc00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f9918000/0x0/0x4ffc00000, data 0x1c811b3/0x1d56000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [0,0,0,0,0,0,0,3,1])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:44.317109+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 105930752 unmapped: 23093248 heap: 129024000 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:45.317356+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131252224 unmapped: 9363456 heap: 140615680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:46.317827+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f832a000/0x0/0x4ffc00000, data 0x326d205/0x3344000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [0,0,0,0,0,0,0,0,0,20,0,14])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 123445248 unmapped: 20848640 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1520800 session 0x559c9f48a5a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:47.318202+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca00bec00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca00bec00 session 0x559ca157a3c0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0800 session 0x559ca1caab40
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca00bec00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109428736 unmapped: 34865152 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2649800 session 0x559ca44af0e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1564012 data_alloc: 218103808 data_used: 12832768
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14ca800 session 0x559ca098d0e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2648800 session 0x559ca12a12c0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14cb400 session 0x559ca01d92c0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1520800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca00bfc00 session 0x559ca21e8000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:48.318695+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14ca800 session 0x559ca11c1680
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14cb400 session 0x559ca1cf01e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2649800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2648800 session 0x559ca1d13e00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109486080 unmapped: 34807808 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2649800 session 0x559ca1d130e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:49.319047+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109510656 unmapped: 34783232 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca00bec00 session 0x559ca1c8c3c0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14ca800 session 0x559ca1261c20
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:50.319317+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6bf0000/0x0/0x4ffc00000, data 0x49a72e9/0x4a7e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [0,0,0,0,0,0,0,0,2,2])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109199360 unmapped: 35094528 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1520800 session 0x559ca25e54a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14cb400 session 0x559c9f6b81e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:51.319646+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0400 session 0x559ca1261860
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3a000 session 0x559ca25e5c20
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109199360 unmapped: 35094528 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14ca800 session 0x559ca1cac3c0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2648800 session 0x559ca0123680
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0400 session 0x559c9f5d0780
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:52.320064+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14cb400 session 0x559c9f5d6f00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109199360 unmapped: 35094528 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1520800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1520800 session 0x559ca1d12b40
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1562696 data_alloc: 218103808 data_used: 12820480
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:53.320493+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6bf0000/0x0/0x4ffc00000, data 0x49a72e9/0x4a7e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0400 session 0x559ca1c94b40
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109199360 unmapped: 35094528 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14ca800 session 0x559ca1c8cf00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14cb400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:54.320818+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2648800 session 0x559ca126d4a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2649800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109199360 unmapped: 35094528 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 3.315246582s of 11.861621857s, submitted: 155
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:55.321368+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3a400 session 0x559c9ebfa780
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14cb400 session 0x559c9f7105a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0400 session 0x559ca1c95e00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2649800 session 0x559c9ff92780
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109477888 unmapped: 34816000 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3ac00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:56.321588+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6b71000/0x0/0x4ffc00000, data 0x4a2530c/0x4afd000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109527040 unmapped: 34766848 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:57.321837+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109527040 unmapped: 34766848 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1576340 data_alloc: 218103808 data_used: 12824576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:58.322074+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 109527040 unmapped: 34766848 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:59.322326+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 111394816 unmapped: 32899072 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:00.322534+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 113188864 unmapped: 31105024 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:01.322810+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 113549312 unmapped: 30744576 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6b71000/0x0/0x4ffc00000, data 0x4a2530c/0x4afd000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:02.323015+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 115482624 unmapped: 28811264 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6b71000/0x0/0x4ffc00000, data 0x4a2530c/0x4afd000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1658580 data_alloc: 234881024 data_used: 24391680
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:03.323218+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 119177216 unmapped: 25116672 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6b71000/0x0/0x4ffc00000, data 0x4a2530c/0x4afd000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:04.323555+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 119185408 unmapped: 25108480 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:05.323757+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 119185408 unmapped: 25108480 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:06.323963+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 122814464 unmapped: 21479424 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:07.324206+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 130637824 unmapped: 13656064 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1793940 data_alloc: 251658240 data_used: 40927232
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:08.324397+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 142221312 unmapped: 2072576 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:09.324601+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 143204352 unmapped: 1089536 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6b71000/0x0/0x4ffc00000, data 0x4a2530c/0x4afd000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 15.136783600s of 15.585795403s, submitted: 16
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3b400 session 0x559ca21e8780
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:10.324840+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3b800 session 0x559ca1d130e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3bc00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 143253504 unmapped: 1040384 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3bc00 session 0x559ca21e85a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:11.325180+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137125888 unmapped: 7168000 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:12.325477+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f7a7f000/0x0/0x4ffc00000, data 0x3a95287/0x3b6b000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137125888 unmapped: 7168000 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1674346 data_alloc: 251658240 data_used: 39333888
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3a800 session 0x559c9f5d5c20
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3b000 session 0x559ca157a960
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:13.325668+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127270912 unmapped: 17022976 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0400 session 0x559ca11c0780
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:14.325980+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f8a34000/0x0/0x4ffc00000, data 0x2b66215/0x2c3a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127270912 unmapped: 17022976 heap: 144293888 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3b400 session 0x559ca21e8960
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:15.326395+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f84fb000/0x0/0x4ffc00000, data 0x2d9f215/0x2e73000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f84fb000/0x0/0x4ffc00000, data 0x2d9f215/0x2e73000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:16.326612+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:17.326879+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1535170 data_alloc: 234881024 data_used: 24551424
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:18.327321+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:19.327561+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f7ff1000/0x0/0x4ffc00000, data 0x35a9215/0x367d000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:20.327831+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3b800 session 0x559c9ff11a40
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:21.328071+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0400 session 0x559c9ff10f00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:22.328469+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1535170 data_alloc: 234881024 data_used: 24551424
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f7ff1000/0x0/0x4ffc00000, data 0x35a9215/0x367d000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3a800 session 0x559ca157b680
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:23.328730+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 12.523505211s of 13.102807045s, submitted: 111
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3b000 session 0x559ca126c1e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:24.328966+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:25.329382+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126730240 unmapped: 32260096 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f7ff0000/0x0/0x4ffc00000, data 0x35a9225/0x367e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:26.329584+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f7ff0000/0x0/0x4ffc00000, data 0x35a9225/0x367e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126328832 unmapped: 32661504 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:27.329782+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126263296 unmapped: 32727040 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1564048 data_alloc: 234881024 data_used: 28315648
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:28.329992+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127082496 unmapped: 31907840 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:29.330209+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127344640 unmapped: 31645696 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:30.330463+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127344640 unmapped: 31645696 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:31.331730+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127344640 unmapped: 31645696 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f7ff0000/0x0/0x4ffc00000, data 0x35a9225/0x367e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:32.332190+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127344640 unmapped: 31645696 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1605328 data_alloc: 234881024 data_used: 34054144
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:33.332472+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 9.957438469s of 10.036822319s, submitted: 2
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131047424 unmapped: 27942912 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:34.332738+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132907008 unmapped: 26083328 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f7408000/0x0/0x4ffc00000, data 0x4191225/0x4266000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [0,0,0,0,0,0,0,0,3])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:35.332960+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134840320 unmapped: 24150016 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:36.333143+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134553600 unmapped: 24436736 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:37.333332+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 23412736 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1764484 data_alloc: 251658240 data_used: 34795520
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:38.333543+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 23412736 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:39.333748+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 23412736 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:40.334175+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6d08000/0x0/0x4ffc00000, data 0x4883225/0x4958000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 23412736 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6d08000/0x0/0x4ffc00000, data 0x4883225/0x4958000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:41.334420+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135643136 unmapped: 23347200 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:42.334852+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133816320 unmapped: 25174016 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1756532 data_alloc: 251658240 data_used: 34807808
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:43.335132+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133816320 unmapped: 25174016 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:44.335383+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133824512 unmapped: 25165824 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6cf6000/0x0/0x4ffc00000, data 0x48a3225/0x4978000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:45.335602+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133824512 unmapped: 25165824 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2649800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2649800 session 0x559c9f5d6960
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca0492c00 session 0x559c9f5d70e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca0492c00 session 0x559c9f5d65a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:46.335897+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0400 session 0x559ca1cad2c0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 10.866591454s of 13.093039513s, submitted: 163
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3a800 session 0x559ca127bc20
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3b000 session 0x559c9f3ec5a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2649800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2649800 session 0x559c9ff93a40
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133971968 unmapped: 25018368 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca0492c00 session 0x559c9f701680
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0400 session 0x559c9f7001e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:47.336143+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133971968 unmapped: 25018368 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1796118 data_alloc: 251658240 data_used: 34807808
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:48.336520+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133971968 unmapped: 25018368 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:49.336865+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133971968 unmapped: 25018368 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:50.337103+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6855000/0x0/0x4ffc00000, data 0x4d43235/0x4e19000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6855000/0x0/0x4ffc00000, data 0x4d43235/0x4e19000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3a800 session 0x559ca12fc1e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133971968 unmapped: 25018368 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:51.337352+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3b000 session 0x559c9ff11860
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133980160 unmapped: 25010176 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0493c00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca0493c00 session 0x559c9f3ed680
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:52.337747+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca0492c00 session 0x559c9ff92f00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134291456 unmapped: 24698880 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1802017 data_alloc: 251658240 data_used: 34807808
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:53.337966+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6830000/0x0/0x4ffc00000, data 0x4d67245/0x4e3e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134291456 unmapped: 24698880 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:54.338220+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134291456 unmapped: 24698880 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:55.338643+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134291456 unmapped: 24698880 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:56.338837+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135061504 unmapped: 23928832 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:57.339061+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca11b0400 session 0x559ca098c3c0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 10.916957855s of 11.087557793s, submitted: 16
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3a800 session 0x559ca126de00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 136241152 unmapped: 22749184 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1766757 data_alloc: 251658240 data_used: 34811904
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:58.339276+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f682d000/0x0/0x4ffc00000, data 0x4d6a245/0x4e41000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [0,0,0,1])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3b000 session 0x559ca259b860
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134742016 unmapped: 24248320 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:59.339485+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134742016 unmapped: 24248320 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:00.339750+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 23420928 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:01.339972+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 138387456 unmapped: 20602880 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:02.340182+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137797632 unmapped: 21192704 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1838016 data_alloc: 251658240 data_used: 36421632
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:03.340605+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 138289152 unmapped: 20701184 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:04.340899+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6551000/0x0/0x4ffc00000, data 0x503f225/0x5114000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 138289152 unmapped: 20701184 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:05.341371+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 138297344 unmapped: 20692992 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:06.341742+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 138297344 unmapped: 20692992 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:07.342124+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 9.573361397s of 10.074029922s, submitted: 121
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137592832 unmapped: 21397504 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1841876 data_alloc: 251658240 data_used: 36651008
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:08.342499+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6556000/0x0/0x4ffc00000, data 0x5041225/0x5116000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137592832 unmapped: 21397504 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:09.342826+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3a400 session 0x559ca05663c0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3ac00 session 0x559ca1d13e00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137568256 unmapped: 21422080 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:10.343030+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137568256 unmapped: 21422080 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:11.343638+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137568256 unmapped: 21422080 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:12.343925+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137576448 unmapped: 21413888 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1841696 data_alloc: 251658240 data_used: 36655104
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:13.344165+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6553000/0x0/0x4ffc00000, data 0x5046225/0x511b000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137576448 unmapped: 21413888 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:14.344536+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137674752 unmapped: 21315584 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:15.344759+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137674752 unmapped: 21315584 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:16.345196+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6553000/0x0/0x4ffc00000, data 0x5046225/0x511b000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137691136 unmapped: 21299200 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:17.345528+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 137691136 unmapped: 21299200 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:18.345905+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1841856 data_alloc: 251658240 data_used: 37851136
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 11.048368454s of 11.090108871s, submitted: 7
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca14ca800 session 0x559ca231af00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca2648800 session 0x559c9f794000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6553000/0x0/0x4ffc00000, data 0x5046225/0x511b000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 136364032 unmapped: 22626304 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:19.346107+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 heartbeat osd_stat(store_statfs(0x4f6553000/0x0/0x4ffc00000, data 0x5046225/0x511b000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [0,1,1])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 ms_handle_reset con 0x559ca1f3a400 session 0x559ca22090e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 137 handle_osd_map epochs [137,138], i have 137, src has [1,138]
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 138 ms_handle_reset con 0x559ca1f3a800 session 0x559c9f6b85a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134152192 unmapped: 24838144 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:20.346847+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 138 ms_handle_reset con 0x559ca14ca800 session 0x559ca1cade00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 138 ms_handle_reset con 0x559ca1f3a400 session 0x559ca1cad680
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3ac00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134160384 unmapped: 24829952 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:21.347323+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 138 handle_osd_map epochs [139,139], i have 138, src has [1,139]
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134160384 unmapped: 24829952 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:22.347749+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 139 ms_handle_reset con 0x559ca1f3ac00 session 0x559ca21e8780
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 139 heartbeat osd_stat(store_statfs(0x4f730e000/0x0/0x4ffc00000, data 0x4287934/0x435f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134168576 unmapped: 24821760 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:23.348146+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1697050 data_alloc: 234881024 data_used: 32272384
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 139 heartbeat osd_stat(store_statfs(0x4f730e000/0x0/0x4ffc00000, data 0x4287911/0x435e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134176768 unmapped: 24813568 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:24.348551+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134176768 unmapped: 24813568 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:25.348747+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 139 heartbeat osd_stat(store_statfs(0x4f730e000/0x0/0x4ffc00000, data 0x4287911/0x435e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134176768 unmapped: 24813568 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:26.349147+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134176768 unmapped: 24813568 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:27.349650+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 139 heartbeat osd_stat(store_statfs(0x4f730e000/0x0/0x4ffc00000, data 0x4287911/0x435e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134176768 unmapped: 24813568 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:28.350359+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1697050 data_alloc: 234881024 data_used: 32272384
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134176768 unmapped: 24813568 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:29.350737+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134176768 unmapped: 24813568 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:30.351370+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 139 handle_osd_map epochs [139,140], i have 139, src has [1,140]
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 11.991467476s of 12.484093666s, submitted: 97
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134193152 unmapped: 24797184 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:31.351818+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134201344 unmapped: 24788992 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:32.352352+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:33.352782+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134201344 unmapped: 24788992 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1699243 data_alloc: 234881024 data_used: 32280576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f730c000/0x0/0x4ffc00000, data 0x4289374/0x4361000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:34.353321+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134201344 unmapped: 24788992 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:35.353706+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134201344 unmapped: 24788992 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:36.354113+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134201344 unmapped: 24788992 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:37.354377+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134201344 unmapped: 24788992 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f730d000/0x0/0x4ffc00000, data 0x4289374/0x4361000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3b400 session 0x559ca126d860
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:38.354583+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3b800 session 0x559c9f795680
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134201344 unmapped: 24788992 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1699515 data_alloc: 234881024 data_used: 32301056
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f730d000/0x0/0x4ffc00000, data 0x4289374/0x4361000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:39.354867+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127000576 unmapped: 31989760 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3b800 session 0x559ca1cada40
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:40.355092+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126181376 unmapped: 32808960 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:41.355358+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126181376 unmapped: 32808960 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:42.355603+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126181376 unmapped: 32808960 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:43.356005+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126181376 unmapped: 32808960 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1482020 data_alloc: 234881024 data_used: 20946944
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:44.356270+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126181376 unmapped: 32808960 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f84ea000/0x0/0x4ffc00000, data 0x30ad364/0x3184000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:45.356462+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126181376 unmapped: 32808960 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f84ea000/0x0/0x4ffc00000, data 0x30ad364/0x3184000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:46.356817+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126181376 unmapped: 32808960 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 15.414695740s of 15.540205956s, submitted: 39
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:47.357216+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126197760 unmapped: 32792576 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:48.359739+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126214144 unmapped: 32776192 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1489084 data_alloc: 234881024 data_used: 21655552
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:49.360015+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126214144 unmapped: 32776192 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:50.360293+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126214144 unmapped: 32776192 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:51.360477+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126214144 unmapped: 32776192 heap: 158990336 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca14ca800 session 0x559ca25e5680
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3a400 session 0x559ca259ab40
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3ac00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3ac00 session 0x559c9f5dc5a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3b400 session 0x559ca1cacf00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca14ca800 session 0x559c9f5dc1e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f838c000/0x0/0x4ffc00000, data 0x320a374/0x32e2000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3a400 session 0x559c9ff92d20
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3ac00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3ac00 session 0x559ca157af00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3b800 session 0x559c9f5d70e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca2648800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:52.361529+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca2648800 session 0x559ca157ab40
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127098880 unmapped: 43442176 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:53.361723+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127098880 unmapped: 43442176 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1609851 data_alloc: 234881024 data_used: 21659648
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:54.362086+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127098880 unmapped: 43442176 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca14ca800 session 0x559c9ff13860
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:55.362356+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127098880 unmapped: 43442176 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3a400 session 0x559ca051be00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:56.362606+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127098880 unmapped: 43442176 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3ac00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3ac00 session 0x559c9f416000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 10.025475502s of 10.205513954s, submitted: 31
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3b800 session 0x559c9f6b9860
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:57.362843+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127123456 unmapped: 43417600 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0493800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f762d000/0x0/0x4ffc00000, data 0x3f673a7/0x4041000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:58.363089+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f762d000/0x0/0x4ffc00000, data 0x3f673a7/0x4041000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127123456 unmapped: 43417600 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1612578 data_alloc: 234881024 data_used: 21663744
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:59.363468+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127123456 unmapped: 43417600 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:00.363693+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 126853120 unmapped: 43687936 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:01.363872+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f762d000/0x0/0x4ffc00000, data 0x3f673a7/0x4041000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 129507328 unmapped: 41033728 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:02.364128+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:03.364370+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1722604 data_alloc: 251658240 data_used: 36622336
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:04.364739+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:05.364931+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:06.365387+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7624000/0x0/0x4ffc00000, data 0x3f6d3a7/0x4047000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:07.365776+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:08.366130+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1722604 data_alloc: 251658240 data_used: 36622336
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:09.366389+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:10.366699+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7624000/0x0/0x4ffc00000, data 0x3f6d3a7/0x4047000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:11.366950+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:12.367341+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:13.367818+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1722604 data_alloc: 251658240 data_used: 36622336
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:14.368480+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:15.368883+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7624000/0x0/0x4ffc00000, data 0x3f6d3a7/0x4047000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:16.369166+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:17.369495+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 21.206089020s of 21.246303558s, submitted: 7
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca0492c00 session 0x559ca21e94a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca11b0400 session 0x559ca21e90e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:18.369803+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132308992 unmapped: 38232064 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1722308 data_alloc: 251658240 data_used: 36626432
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:19.369973+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca0492c00 session 0x559c9ff92f00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 42786816 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f8a2b000/0x0/0x4ffc00000, data 0x2b40335/0x2c18000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:20.370424+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 42786816 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:21.370655+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 42786816 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:22.370909+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 42786816 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:23.371196+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 42786816 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1504244 data_alloc: 234881024 data_used: 27107328
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:24.371750+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 42786816 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:25.372147+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f8a2b000/0x0/0x4ffc00000, data 0x2b40335/0x2c18000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 42778624 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:26.372507+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f8a2b000/0x0/0x4ffc00000, data 0x2b40335/0x2c18000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 42778624 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:27.372747+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 42778624 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:28.372948+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 42778624 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1504244 data_alloc: 234881024 data_used: 27107328
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:29.373205+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 42778624 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:30.373677+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 42778624 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 12.612641335s of 12.727919579s, submitted: 29
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:31.373922+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132325376 unmapped: 38215680 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:32.374110+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f839d000/0x0/0x4ffc00000, data 0x31f9335/0x32d1000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132374528 unmapped: 38166528 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:33.374493+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1563814 data_alloc: 234881024 data_used: 27361280
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:34.374838+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:35.375220+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:36.375846+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f837c000/0x0/0x4ffc00000, data 0x3212335/0x32ea000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:37.376137+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:38.376571+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1563814 data_alloc: 234881024 data_used: 27361280
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:39.376996+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:40.377446+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f837c000/0x0/0x4ffc00000, data 0x3212335/0x32ea000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:41.377795+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:42.378013+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:43.378403+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1563814 data_alloc: 234881024 data_used: 27361280
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:44.378717+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f837c000/0x0/0x4ffc00000, data 0x3212335/0x32ea000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:45.379052+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:46.379399+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:47.379736+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:48.380149+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1563814 data_alloc: 234881024 data_used: 27361280
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:49.380579+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f837c000/0x0/0x4ffc00000, data 0x3212335/0x32ea000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:50.380883+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:51.381365+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:52.381638+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134029312 unmapped: 36511744 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca14ca800 session 0x559ca051b680
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3a400 session 0x559c9f5dda40
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3ac00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3ac00 session 0x559c9ff12f00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca0492c00 session 0x559c9f5d4f00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 21.964023590s of 22.214895248s, submitted: 90
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:53.381878+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca11b0400 session 0x559ca12a1860
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca14ca800 session 0x559ca12a0780
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3a400 session 0x559ca1cf0b40
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca1f3b800 session 0x559ca1cf05a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134004736 unmapped: 36536320 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 ms_handle_reset con 0x559ca0492c00 session 0x559ca1cf1c20
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1607568 data_alloc: 234881024 data_used: 27361280
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:54.382338+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134004736 unmapped: 36536320 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:55.382693+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134004736 unmapped: 36536320 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:56.383000+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134086656 unmapped: 36454400 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:57.383295+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134086656 unmapped: 36454400 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:58.383472+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134086656 unmapped: 36454400 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1607568 data_alloc: 234881024 data_used: 27361280
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:59.383962+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134094848 unmapped: 36446208 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:00.384403+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134094848 unmapped: 36446208 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:01.384772+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134094848 unmapped: 36446208 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:02.385001+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134062080 unmapped: 36478976 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:03.385378+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 10.491098404s of 10.610158920s, submitted: 9
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 134520832 unmapped: 36020224 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1632976 data_alloc: 234881024 data_used: 31027200
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:04.385693+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:05.386048+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:06.386448+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:07.386826+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:08.387184+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1644336 data_alloc: 234881024 data_used: 32641024
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:09.387379+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:10.387626+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:11.387987+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:12.388509+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:13.388937+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1644336 data_alloc: 234881024 data_used: 32641024
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:14.389477+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135569408 unmapped: 34971648 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:15.389888+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 34963456 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:16.390414+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 34963456 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:17.390979+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 34963456 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:18.391499+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 34963456 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1644336 data_alloc: 234881024 data_used: 32641024
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:19.391998+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 34963456 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:20.392436+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 34963456 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:21.392910+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135577600 unmapped: 34963456 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:22.393268+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135585792 unmapped: 34955264 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:23.393723+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135585792 unmapped: 34955264 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1644336 data_alloc: 234881024 data_used: 32641024
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:24.394435+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135585792 unmapped: 34955264 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:25.394875+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135593984 unmapped: 34947072 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:26.395340+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135593984 unmapped: 34947072 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:27.395698+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 24.194051743s of 24.209526062s, submitted: 3
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135610368 unmapped: 34930688 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:28.395997+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 heartbeat osd_stat(store_statfs(0x4f7d65000/0x0/0x4ffc00000, data 0x3830345/0x3909000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 140 handle_osd_map epochs [141,141], i have 140, src has [1,141]
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca14ca800 session 0x559ca11c1a40
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135692288 unmapped: 34848768 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1763114 data_alloc: 234881024 data_used: 32649216
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:29.396350+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135692288 unmapped: 34848768 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:30.396732+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135700480 unmapped: 34840576 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:31.397126+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6d5f000/0x0/0x4ffc00000, data 0x4831f08/0x490e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135700480 unmapped: 34840576 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:32.397382+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135700480 unmapped: 34840576 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6d5f000/0x0/0x4ffc00000, data 0x4831f08/0x490e000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:33.397809+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135700480 unmapped: 34840576 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1763114 data_alloc: 234881024 data_used: 32649216
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:34.398344+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 135774208 unmapped: 34766848 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:35.398687+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 138289152 unmapped: 32251904 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:36.399012+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 138297344 unmapped: 32243712 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:37.399468+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6614000/0x0/0x4ffc00000, data 0x4f7cf08/0x5059000, compress 0x0/0x0/0x0, omap 0x639, meta 0x458f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca1f3a400 session 0x559c9f5d7a40
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0493000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca0493000 session 0x559ca051ad20
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 9.656514168s of 10.073102951s, submitted: 76
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0493400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca0493400 session 0x559ca157ad20
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 138362880 unmapped: 32178176 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:38.399855+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca0492c00 session 0x559c9f794d20
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0493000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca0493000 session 0x559ca098c3c0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 153591808 unmapped: 16949248 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1871926 data_alloc: 251658240 data_used: 48066560
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:39.400167+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca14ca800 session 0x559ca127b860
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157507584 unmapped: 13033472 heap: 170541056 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca1f3a400 session 0x559c9ff92780
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca266fc00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:40.400336+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca266fc00 session 0x559ca231a3c0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca0492c00 session 0x559c9f48b0e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0493000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca0493000 session 0x559ca231af00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca14ca800 session 0x559ca1d12f00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f53d9000/0x0/0x4ffc00000, data 0x5da6f7a/0x5e85000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 154574848 unmapped: 23838720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:41.400684+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 154574848 unmapped: 23838720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:42.401085+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f53d9000/0x0/0x4ffc00000, data 0x5da6f7a/0x5e85000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 154607616 unmapped: 23805952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:43.401388+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3a400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca1f3a400 session 0x559c9f5d1a40
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 154624000 unmapped: 23789568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1994344 data_alloc: 251658240 data_used: 48074752
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:44.401726+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca266e400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3bc00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca11b0400 session 0x559ca1cf01e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 154664960 unmapped: 23748608 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:45.401916+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca0492c00 session 0x559ca11c03c0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 151674880 unmapped: 26738688 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:46.402142+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 151674880 unmapped: 26738688 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:47.402370+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6149000/0x0/0x4ffc00000, data 0x5037f6a/0x5115000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 152780800 unmapped: 25632768 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:48.402572+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3600.1 total, 600.0 interval
                                            Cumulative writes: 10K writes, 41K keys, 10K commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 10K writes, 2796 syncs, 3.83 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 2104 writes, 7839 keys, 2104 commit groups, 1.0 writes per commit group, ingest: 8.42 MB, 0.01 MB/s
                                            Interval WAL: 2104 writes, 817 syncs, 2.58 writes per sync, written: 0.01 GB, 0.01 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6149000/0x0/0x4ffc00000, data 0x5037f6a/0x5115000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155754496 unmapped: 22659072 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1920008 data_alloc: 251658240 data_used: 52142080
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:49.402746+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6149000/0x0/0x4ffc00000, data 0x5037f6a/0x5115000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160505856 unmapped: 17907712 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:50.402961+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6149000/0x0/0x4ffc00000, data 0x5037f6a/0x5115000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160505856 unmapped: 17907712 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:51.403157+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 13.493240356s of 13.828572273s, submitted: 75
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca1f3b000 session 0x559ca2209860
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca0493800 session 0x559ca08370e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0493000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160505856 unmapped: 17907712 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:52.403381+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca0493000 session 0x559ca01223c0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150175744 unmapped: 28237824 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:53.403638+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150175744 unmapped: 28237824 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:54.403879+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1679329 data_alloc: 251658240 data_used: 40886272
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca266f000 session 0x559ca44ae780
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0492c00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f76d7000/0x0/0x4ffc00000, data 0x3aabf27/0x3b86000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150175744 unmapped: 28237824 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:55.404090+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: mgrc ms_handle_reset ms_handle_reset con 0x559c9f7abc00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: mgrc reconnect Terminating session with v2:192.168.122.100:6800/1088804496
Oct 11 03:05:39 compute-0 ceph-osd[206800]: mgrc reconnect Starting new session with [v2:192.168.122.100:6800/1088804496,v1:192.168.122.100:6801/1088804496]
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: get_auth_request con 0x559ca266f000 auth_method 0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: mgrc handle_mgr_configure stats_period=5
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca00bf400 session 0x559ca051a780
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca266ec00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca00bf000 session 0x559c9f5d14a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca00bf400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f76d7000/0x0/0x4ffc00000, data 0x3aabf27/0x3b86000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:56.404472+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:57.404867+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:58.405352+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:59.405726+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1679329 data_alloc: 251658240 data_used: 40886272
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:00.406119+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f76d7000/0x0/0x4ffc00000, data 0x3aabf27/0x3b86000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:01.406521+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:02.406833+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:03.407075+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:04.407368+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1679329 data_alloc: 251658240 data_used: 40886272
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f76d7000/0x0/0x4ffc00000, data 0x3aabf27/0x3b86000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:05.407834+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:06.408441+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:07.408668+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:08.408901+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f76d7000/0x0/0x4ffc00000, data 0x3aabf27/0x3b86000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:09.409132+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1679329 data_alloc: 251658240 data_used: 40886272
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:10.409432+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:11.409745+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:12.409971+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:13.410318+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f76d7000/0x0/0x4ffc00000, data 0x3aabf27/0x3b86000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:14.410645+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1679329 data_alloc: 251658240 data_used: 40886272
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f76d7000/0x0/0x4ffc00000, data 0x3aabf27/0x3b86000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:15.411175+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:16.411549+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:17.411950+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:18.412328+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150257664 unmapped: 28155904 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 27.144214630s of 27.347295761s, submitted: 47
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:19.412613+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155402240 unmapped: 23011328 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1800615 data_alloc: 251658240 data_used: 40894464
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:20.412814+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 158810112 unmapped: 19603456 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f65f2000/0x0/0x4ffc00000, data 0x4b89f27/0x4c64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:21.413438+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155615232 unmapped: 22798336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:22.413722+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155615232 unmapped: 22798336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:23.414141+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155615232 unmapped: 22798336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:24.415307+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155615232 unmapped: 22798336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1812281 data_alloc: 251658240 data_used: 41115648
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:25.415651+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155582464 unmapped: 22831104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f655d000/0x0/0x4ffc00000, data 0x4c26f27/0x4d01000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:26.416095+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155582464 unmapped: 22831104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:27.416538+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:28.416768+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6552000/0x0/0x4ffc00000, data 0x4c31f27/0x4d0c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:29.416992+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6552000/0x0/0x4ffc00000, data 0x4c31f27/0x4d0c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1809173 data_alloc: 251658240 data_used: 41119744
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6552000/0x0/0x4ffc00000, data 0x4c31f27/0x4d0c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:30.417224+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:31.417492+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:32.417939+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6552000/0x0/0x4ffc00000, data 0x4c31f27/0x4d0c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:33.418206+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:34.418694+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1809173 data_alloc: 251658240 data_used: 41119744
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 crontab[501471]: (root) LIST (root)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:35.419108+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:36.419465+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:37.419746+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6552000/0x0/0x4ffc00000, data 0x4c31f27/0x4d0c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:38.420140+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 19.423469543s of 19.880949020s, submitted: 173
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:39.420363+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6552000/0x0/0x4ffc00000, data 0x4c31f27/0x4d0c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155475968 unmapped: 22937600 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808865 data_alloc: 251658240 data_used: 41119744
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f6552000/0x0/0x4ffc00000, data 0x4c31f27/0x4d0c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:40.420892+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155475968 unmapped: 22937600 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:41.421136+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155475968 unmapped: 22937600 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:42.421480+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155475968 unmapped: 22937600 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:43.421831+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155484160 unmapped: 22929408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:44.422359+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155484160 unmapped: 22929408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808865 data_alloc: 251658240 data_used: 41119744
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:45.422592+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155484160 unmapped: 22929408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:46.422811+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155484160 unmapped: 22929408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:47.423162+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155484160 unmapped: 22929408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:48.423454+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155484160 unmapped: 22929408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:49.423925+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155484160 unmapped: 22929408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808865 data_alloc: 251658240 data_used: 41119744
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:50.424323+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155492352 unmapped: 22921216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:51.424827+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 12.526916504s of 12.543886185s, submitted: 2
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155500544 unmapped: 22913024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:52.425379+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155500544 unmapped: 22913024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:53.425803+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155500544 unmapped: 22913024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:54.426462+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155500544 unmapped: 22913024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808865 data_alloc: 251658240 data_used: 41119744
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:55.426687+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155508736 unmapped: 22904832 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:56.427143+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155508736 unmapped: 22904832 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:57.427403+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155508736 unmapped: 22904832 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:58.427667+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155508736 unmapped: 22904832 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:59.427915+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155508736 unmapped: 22904832 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808865 data_alloc: 251658240 data_used: 41119744
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:00.428221+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155516928 unmapped: 22896640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:01.428504+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155516928 unmapped: 22896640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:02.428936+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155516928 unmapped: 22896640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:03.429126+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155516928 unmapped: 22896640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:04.429400+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155516928 unmapped: 22896640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808865 data_alloc: 251658240 data_used: 41119744
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:05.429626+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155516928 unmapped: 22896640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:06.430094+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155516928 unmapped: 22896640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:07.430539+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155516928 unmapped: 22896640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:08.430798+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155525120 unmapped: 22888448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:09.431168+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155525120 unmapped: 22888448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808865 data_alloc: 251658240 data_used: 41119744
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:10.431444+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155525120 unmapped: 22888448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:11.431871+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155525120 unmapped: 22888448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:12.432300+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155533312 unmapped: 22880256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:13.432679+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155533312 unmapped: 22880256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca266e800 session 0x559ca44af4a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0493800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:14.433157+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155533312 unmapped: 22880256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808865 data_alloc: 251658240 data_used: 41119744
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:15.433441+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155533312 unmapped: 22880256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:16.440546+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155533312 unmapped: 22880256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:17.440816+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155533312 unmapped: 22880256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:18.441042+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155533312 unmapped: 22880256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:19.441512+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155533312 unmapped: 22880256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 28.290304184s of 28.305244446s, submitted: 2
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808513 data_alloc: 251658240 data_used: 41123840
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:20.441933+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155582464 unmapped: 22831104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:21.442150+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155582464 unmapped: 22831104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:22.442394+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155607040 unmapped: 22806528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:23.443015+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155623424 unmapped: 22790144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:24.443332+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155623424 unmapped: 22790144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:25.443582+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155623424 unmapped: 22790144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:26.443929+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155623424 unmapped: 22790144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:27.444171+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155623424 unmapped: 22790144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:28.444376+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155623424 unmapped: 22790144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:29.444584+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155623424 unmapped: 22790144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:30.444972+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155623424 unmapped: 22790144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:31.445704+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155631616 unmapped: 22781952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:32.445904+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155631616 unmapped: 22781952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:33.446352+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155631616 unmapped: 22781952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:34.446652+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155631616 unmapped: 22781952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:35.446925+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155631616 unmapped: 22781952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:36.447570+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155631616 unmapped: 22781952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:37.447864+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155631616 unmapped: 22781952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:38.448140+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155639808 unmapped: 22773760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:39.448366+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155648000 unmapped: 22765568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:40.448596+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155648000 unmapped: 22765568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:41.448960+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155648000 unmapped: 22765568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:42.449343+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155648000 unmapped: 22765568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:43.449688+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155648000 unmapped: 22765568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:44.450138+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155648000 unmapped: 22765568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:45.450513+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155648000 unmapped: 22765568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:46.450931+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155648000 unmapped: 22765568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:47.451169+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155656192 unmapped: 22757376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:48.451385+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155656192 unmapped: 22757376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:49.451738+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155656192 unmapped: 22757376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:50.452111+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155656192 unmapped: 22757376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:51.452343+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155656192 unmapped: 22757376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:52.452612+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155656192 unmapped: 22757376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:53.452970+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155656192 unmapped: 22757376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:54.453509+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155656192 unmapped: 22757376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:55.453878+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:56.454208+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:57.454643+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:58.455029+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:59.455436+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:00.455758+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:01.456096+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:02.456383+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:03.456710+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:04.457032+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:05.457433+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:06.457745+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:07.458308+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:08.458637+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:09.458970+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:10.459290+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155664384 unmapped: 22749184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:11.459652+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155672576 unmapped: 22740992 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:12.459866+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155672576 unmapped: 22740992 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:13.460101+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155672576 unmapped: 22740992 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:14.460378+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155672576 unmapped: 22740992 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:15.460575+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155672576 unmapped: 22740992 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:16.460825+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155672576 unmapped: 22740992 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:17.461061+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155672576 unmapped: 22740992 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:18.461479+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155672576 unmapped: 22740992 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:19.461815+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f654f000/0x0/0x4ffc00000, data 0x4c34f27/0x4d0f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155680768 unmapped: 22732800 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:20.462060+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1808993 data_alloc: 251658240 data_used: 41144320
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155680768 unmapped: 22732800 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:21.462757+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155680768 unmapped: 22732800 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:22.463029+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155680768 unmapped: 22732800 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:23.463253+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 63.620548248s of 64.190055847s, submitted: 110
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca1f3b000 session 0x559ca1cad860
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155926528 unmapped: 22487040 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:24.463528+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155926528 unmapped: 22487040 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:25.463890+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1873141 data_alloc: 251658240 data_used: 41144320
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e43000/0x0/0x4ffc00000, data 0x5340f27/0x541b000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155926528 unmapped: 22487040 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:26.464207+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155926528 unmapped: 22487040 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:27.464723+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:28.465202+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155926528 unmapped: 22487040 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:29.465483+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155926528 unmapped: 22487040 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e43000/0x0/0x4ffc00000, data 0x5340f27/0x541b000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:30.465841+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155926528 unmapped: 22487040 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1873141 data_alloc: 251658240 data_used: 41144320
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca11b0400 session 0x559ca1c8d0e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:31.466093+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155934720 unmapped: 22478848 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:32.466365+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155934720 unmapped: 22478848 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:33.466607+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155934720 unmapped: 22478848 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:34.466855+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 155934720 unmapped: 22478848 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e42000/0x0/0x4ffc00000, data 0x5340f4a/0x541c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:35.467085+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 156688384 unmapped: 21725184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1910234 data_alloc: 251658240 data_used: 46198784
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:36.467310+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157319168 unmapped: 21094400 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:37.467711+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157319168 unmapped: 21094400 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:38.467925+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157319168 unmapped: 21094400 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e42000/0x0/0x4ffc00000, data 0x5340f4a/0x541c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:39.468162+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157319168 unmapped: 21094400 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:40.468370+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157319168 unmapped: 21094400 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1917754 data_alloc: 251658240 data_used: 47218688
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:41.468632+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157319168 unmapped: 21094400 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:42.468835+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157319168 unmapped: 21094400 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:43.469041+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157319168 unmapped: 21094400 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:44.469390+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157327360 unmapped: 21086208 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e42000/0x0/0x4ffc00000, data 0x5340f4a/0x541c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:45.469667+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157327360 unmapped: 21086208 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1917754 data_alloc: 251658240 data_used: 47218688
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:46.469922+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157327360 unmapped: 21086208 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:47.470284+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157327360 unmapped: 21086208 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:48.470844+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e42000/0x0/0x4ffc00000, data 0x5340f4a/0x541c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157327360 unmapped: 21086208 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:49.471096+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157327360 unmapped: 21086208 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:50.471764+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157327360 unmapped: 21086208 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1917754 data_alloc: 251658240 data_used: 47218688
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:51.471998+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157327360 unmapped: 21086208 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:52.472305+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157335552 unmapped: 21078016 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:53.472546+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157335552 unmapped: 21078016 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:54.472915+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e42000/0x0/0x4ffc00000, data 0x5340f4a/0x541c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157335552 unmapped: 21078016 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:55.473186+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157335552 unmapped: 21078016 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1917754 data_alloc: 251658240 data_used: 47218688
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:56.473532+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157335552 unmapped: 21078016 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:57.473783+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157343744 unmapped: 21069824 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:58.474071+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157343744 unmapped: 21069824 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:59.474350+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157343744 unmapped: 21069824 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e42000/0x0/0x4ffc00000, data 0x5340f4a/0x541c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:00.474581+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157351936 unmapped: 21061632 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1917754 data_alloc: 251658240 data_used: 47218688
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:01.474824+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e42000/0x0/0x4ffc00000, data 0x5340f4a/0x541c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157351936 unmapped: 21061632 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:02.475047+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157351936 unmapped: 21061632 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:03.475316+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157351936 unmapped: 21061632 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e42000/0x0/0x4ffc00000, data 0x5340f4a/0x541c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:04.475566+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157351936 unmapped: 21061632 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f5e42000/0x0/0x4ffc00000, data 0x5340f4a/0x541c000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:05.475763+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 157351936 unmapped: 21061632 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1918234 data_alloc: 251658240 data_used: 47230976
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 42.103767395s of 42.208259583s, submitted: 21
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:06.476015+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160333824 unmapped: 18079744 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:07.476288+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160964608 unmapped: 17448960 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:08.476655+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 161062912 unmapped: 17350656 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:09.477013+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 161062912 unmapped: 17350656 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fb000/0x0/0x4ffc00000, data 0x5a86f4a/0x5b62000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:10.477346+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 161062912 unmapped: 17350656 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990890 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:11.477641+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 161062912 unmapped: 17350656 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:12.477916+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 161062912 unmapped: 17350656 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:13.478367+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 161062912 unmapped: 17350656 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:14.478808+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160260096 unmapped: 18153472 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:15.479047+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160260096 unmapped: 18153472 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:16.479313+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:17.479686+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:18.480050+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:19.480299+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:20.480638+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:21.480981+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:22.481207+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:23.481554+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:24.481869+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:25.482106+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:26.482451+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:27.482668+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:28.482961+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:29.483378+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:30.483660+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:31.484008+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:32.484430+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:33.484684+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:34.484994+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:35.485409+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160268288 unmapped: 18145280 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:36.485714+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160276480 unmapped: 18137088 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:37.485914+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160276480 unmapped: 18137088 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:38.486203+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160276480 unmapped: 18137088 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 32.356250763s of 32.752002716s, submitted: 100
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:39.486559+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160284672 unmapped: 18128896 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:40.486912+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160284672 unmapped: 18128896 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985218 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:41.487185+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160284672 unmapped: 18128896 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:42.487361+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160284672 unmapped: 18128896 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:43.487630+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160284672 unmapped: 18128896 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:44.487886+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160284672 unmapped: 18128896 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:45.488170+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160284672 unmapped: 18128896 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:46.488354+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160284672 unmapped: 18128896 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:47.488587+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160284672 unmapped: 18128896 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:48.489037+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:49.489277+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:50.489610+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:51.489910+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:52.490112+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:53.490365+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:54.490690+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:55.491064+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:56.491430+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:57.491761+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:58.491965+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:59.492411+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160292864 unmapped: 18120704 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:00.492743+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:01.492967+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:02.493292+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:03.493511+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:04.493755+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:05.493987+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:06.494368+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:07.494732+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:08.495103+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:09.495506+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:10.495889+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160301056 unmapped: 18112512 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:11.496212+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:12.496541+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:13.496920+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:14.497476+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:15.497877+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:16.498206+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:17.498503+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:18.498859+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:19.499208+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:20.499639+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:21.500009+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:22.500304+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:23.500711+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:24.501200+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:25.501490+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160309248 unmapped: 18104320 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:26.501725+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160317440 unmapped: 18096128 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:27.502098+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160317440 unmapped: 18096128 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:28.502374+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160317440 unmapped: 18096128 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:29.502821+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160317440 unmapped: 18096128 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:30.503189+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160317440 unmapped: 18096128 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:31.503412+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160317440 unmapped: 18096128 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:32.503815+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160317440 unmapped: 18096128 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:33.504154+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160317440 unmapped: 18096128 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:34.504691+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160317440 unmapped: 18096128 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:35.505022+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:36.505433+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:37.505747+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:38.506087+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:39.506470+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:40.506904+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:41.507192+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:42.507576+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:43.508075+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:44.509111+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:45.509446+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:46.509785+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:47.510112+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:48.510334+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:49.510778+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:50.511123+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160325632 unmapped: 18087936 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:51.511496+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160333824 unmapped: 18079744 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:52.511674+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160333824 unmapped: 18079744 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:53.512037+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160333824 unmapped: 18079744 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:54.516077+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160333824 unmapped: 18079744 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:55.516443+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160333824 unmapped: 18079744 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:56.516635+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160333824 unmapped: 18079744 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:57.516828+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160333824 unmapped: 18079744 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:58.517115+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160333824 unmapped: 18079744 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:59.517449+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:00.517712+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:01.517980+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:02.518370+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:03.518734+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:04.519092+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:05.519364+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:06.519649+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:07.519854+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:08.520185+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:09.520624+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:10.521076+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160342016 unmapped: 18071552 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:11.521490+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:12.521811+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160350208 unmapped: 18063360 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:13.522149+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160350208 unmapped: 18063360 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:14.523002+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160350208 unmapped: 18063360 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:15.523327+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160350208 unmapped: 18063360 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:16.523512+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160350208 unmapped: 18063360 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1985042 data_alloc: 251658240 data_used: 48492544
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:17.523782+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160350208 unmapped: 18063360 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:18.524035+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160350208 unmapped: 18063360 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:19.524267+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160358400 unmapped: 18055168 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:20.524520+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160358400 unmapped: 18055168 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 102.055480957s of 102.074127197s, submitted: 2
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:21.524722+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160374784 unmapped: 18038784 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1987746 data_alloc: 251658240 data_used: 48824320
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56fa000/0x0/0x4ffc00000, data 0x5a88f4a/0x5b64000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:22.525003+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160374784 unmapped: 18038784 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:23.525272+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160374784 unmapped: 18038784 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:24.525666+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160374784 unmapped: 18038784 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:25.526021+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160374784 unmapped: 18038784 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:26.526408+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160374784 unmapped: 18038784 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1987746 data_alloc: 251658240 data_used: 48824320
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:27.526639+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160464896 unmapped: 17948672 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56e1000/0x0/0x4ffc00000, data 0x5aa1f4a/0x5b7d000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:28.526813+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160464896 unmapped: 17948672 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:29.526994+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160464896 unmapped: 17948672 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:30.527433+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160464896 unmapped: 17948672 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:31.527629+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160464896 unmapped: 17948672 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1988774 data_alloc: 251658240 data_used: 48824320
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56e1000/0x0/0x4ffc00000, data 0x5aa1f4a/0x5b7d000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:32.527907+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160473088 unmapped: 17940480 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56e1000/0x0/0x4ffc00000, data 0x5aa1f4a/0x5b7d000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:33.528378+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160473088 unmapped: 17940480 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:34.528801+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160473088 unmapped: 17940480 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56e1000/0x0/0x4ffc00000, data 0x5aa1f4a/0x5b7d000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:35.529122+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160473088 unmapped: 17940480 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:36.529500+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160473088 unmapped: 17940480 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1988774 data_alloc: 251658240 data_used: 48824320
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:37.529811+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160481280 unmapped: 17932288 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:38.530043+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160481280 unmapped: 17932288 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:39.530330+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160481280 unmapped: 17932288 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56e1000/0x0/0x4ffc00000, data 0x5aa1f4a/0x5b7d000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:40.530501+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160481280 unmapped: 17932288 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:41.530840+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160481280 unmapped: 17932288 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989094 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:42.531514+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160481280 unmapped: 17932288 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56e1000/0x0/0x4ffc00000, data 0x5aa1f4a/0x5b7d000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 21.782728195s of 21.812334061s, submitted: 4
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:43.531866+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:44.532180+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:45.532459+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:46.532704+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989582 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:47.532953+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:48.533208+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:49.533652+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:50.533887+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:51.534291+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989582 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:52.534727+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:53.534963+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:54.535179+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:55.535370+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160563200 unmapped: 17850368 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:56.535691+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160571392 unmapped: 17842176 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989582 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:57.535931+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160571392 unmapped: 17842176 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:58.536532+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160571392 unmapped: 17842176 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:59.536871+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160571392 unmapped: 17842176 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:00.537110+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160571392 unmapped: 17842176 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca00be800 session 0x559c9f5d50e0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559c9f7ab800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:01.537408+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989582 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 19.184776306s of 19.200614929s, submitted: 3
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:02.537648+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:03.537896+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:04.538156+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:05.538415+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:06.538766+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989758 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:07.539099+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:08.539326+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:09.539595+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:10.539778+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:11.540102+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160579584 unmapped: 17833984 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989758 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 10.225237846s of 10.240203857s, submitted: 3
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:12.540391+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160587776 unmapped: 17825792 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:13.540760+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160587776 unmapped: 17825792 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:14.541215+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160587776 unmapped: 17825792 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:15.541743+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160587776 unmapped: 17825792 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:16.542151+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160587776 unmapped: 17825792 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:17.542676+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160587776 unmapped: 17825792 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:18.543108+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160587776 unmapped: 17825792 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:19.543504+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160595968 unmapped: 17817600 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:20.543958+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:21.544418+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:22.544738+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:23.545190+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:24.545648+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:25.546023+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:26.546462+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:27.546867+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:28.547215+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:29.547641+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:30.547882+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:31.548177+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160604160 unmapped: 17809408 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:32.548595+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:33.548997+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:34.549349+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:35.549737+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:36.550876+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:37.552533+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:38.554099+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:39.555742+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:40.557212+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:41.558811+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:42.560413+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:43.562045+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:44.562810+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:45.563113+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:46.563495+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:47.563943+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:48.564577+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:49.564895+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:50.565457+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160612352 unmapped: 17801216 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:51.565889+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:52.566529+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:53.567043+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:54.567513+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:55.567918+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:56.568575+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:57.569044+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:58.569766+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:59.570195+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:00.570782+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:01.571192+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:02.571382+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:03.571645+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:04.572197+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:05.572437+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:06.573001+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160620544 unmapped: 17793024 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:07.573385+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:08.573751+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:09.574157+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:10.574406+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:11.574722+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:12.575117+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:13.575383+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:14.575670+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:15.575923+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:16.576424+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:17.576666+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:18.576896+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-mon[191930]: pgmap v2645: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:39 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1557775261' entity='client.admin' cmd=[{"prefix": "osd crush rule ls"}]: dispatch
Oct 11 03:05:39 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2679158688' entity='client.admin' cmd=[{"prefix": "mgr metadata", "format": "json-pretty"}]: dispatch
Oct 11 03:05:39 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3814040216' entity='client.admin' cmd=[{"prefix": "osd crush show-tunables"}]: dispatch
Oct 11 03:05:39 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1135641234' entity='client.admin' cmd=[{"prefix": "mgr module ls", "format": "json-pretty"}]: dispatch
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:19.577193+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:20.577405+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:21.577815+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:22.578074+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160636928 unmapped: 17776640 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:23.578338+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:24.578673+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:25.578906+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:26.579119+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:27.579369+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:28.579627+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:29.579916+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:30.580153+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:31.580521+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:32.580806+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:33.581413+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:34.582457+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:35.582810+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:36.583350+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:37.583767+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:38.584155+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:39.584433+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160645120 unmapped: 17768448 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:40.584804+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:41.585148+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:42.585481+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:43.585887+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:44.586294+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:45.587011+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:46.587470+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:47.587809+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:48.588024+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:49.588440+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:50.588646+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:51.588866+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:52.589198+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:53.589404+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:54.589598+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:55.589808+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160653312 unmapped: 17760256 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:56.590081+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:57.590440+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:58.590799+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:59.591030+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:00.591280+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:01.591512+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:02.591722+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:03.591945+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:04.592208+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:05.592519+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:06.592759+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:07.593156+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1989934 data_alloc: 251658240 data_used: 48832512
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:08.593418+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:09.593799+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:10.594144+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:11.594524+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160661504 unmapped: 17752064 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:12.594922+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990894 data_alloc: 251658240 data_used: 48922624
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:13.595277+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56cb000/0x0/0x4ffc00000, data 0x5ab7f4a/0x5b93000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:14.595698+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 122.132949829s of 122.139366150s, submitted: 1
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:15.596082+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:16.596451+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c9000/0x0/0x4ffc00000, data 0x5ab9f4a/0x5b95000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:17.596892+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990550 data_alloc: 251658240 data_used: 48922624
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:18.597386+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c9000/0x0/0x4ffc00000, data 0x5ab9f4a/0x5b95000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:19.597779+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c9000/0x0/0x4ffc00000, data 0x5ab9f4a/0x5b95000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c9000/0x0/0x4ffc00000, data 0x5ab9f4a/0x5b95000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:20.598147+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:21.598575+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:22.598921+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990550 data_alloc: 251658240 data_used: 48922624
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:23.599382+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:24.600027+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:25.600331+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c9000/0x0/0x4ffc00000, data 0x5ab9f4a/0x5b95000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:26.600667+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:27.601083+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c9000/0x0/0x4ffc00000, data 0x5ab9f4a/0x5b95000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990710 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:28.601322+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:29.601701+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 15.860550880s of 15.867979050s, submitted: 1
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:30.602003+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:31.602328+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c8000/0x0/0x4ffc00000, data 0x5abaf4a/0x5b96000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:32.602584+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990778 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:33.602902+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:34.603166+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160669696 unmapped: 17743872 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:35.603375+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:36.603594+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:37.604025+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c8000/0x0/0x4ffc00000, data 0x5abaf4a/0x5b96000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990778 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:38.604525+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:39.604927+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:40.605159+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c8000/0x0/0x4ffc00000, data 0x5abaf4a/0x5b96000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:41.605569+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:42.606134+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990778 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:43.606367+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:44.606785+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c8000/0x0/0x4ffc00000, data 0x5abaf4a/0x5b96000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:45.607044+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:46.607539+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:47.607865+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c8000/0x0/0x4ffc00000, data 0x5abaf4a/0x5b96000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990778 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:48.608411+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160677888 unmapped: 17735680 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c8000/0x0/0x4ffc00000, data 0x5abaf4a/0x5b96000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:49.608900+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 19.193056107s of 19.199436188s, submitted: 1
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:50.609513+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:51.609883+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:52.610365+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990910 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:53.610680+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:54.611164+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:55.611574+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:56.611921+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:57.612465+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990910 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:58.612782+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160694272 unmapped: 17719296 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:59.613182+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 10.222181320s of 10.239167213s, submitted: 2
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:00.613506+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:01.613897+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:02.614159+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:03.614583+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:04.614875+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:05.615106+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:06.615561+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:07.615770+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:08.615980+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:09.616467+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:10.616739+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:11.616978+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160702464 unmapped: 17711104 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:12.617455+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:13.617719+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:14.618016+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:15.618541+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:16.619069+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:17.619461+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:18.619863+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:19.620381+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:20.620755+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:21.621209+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:22.621719+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:23.621934+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:24.622410+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:25.622786+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:26.623335+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:27.623799+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160710656 unmapped: 17702912 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:28.624079+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:29.624451+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:30.624740+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:31.625117+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:32.625542+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:33.625804+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:34.626369+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:35.626686+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:36.626938+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:37.627422+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:38.627634+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160718848 unmapped: 17694720 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:39.627956+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:40.628195+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:41.628510+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:42.628733+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:43.629432+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:44.629741+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:45.631405+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:46.631700+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:47.632037+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:48.632539+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 4200.1 total, 600.0 interval
                                            Cumulative writes: 11K writes, 44K keys, 11K commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 11K writes, 3098 syncs, 3.70 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 775 writes, 2877 keys, 775 commit groups, 1.0 writes per commit group, ingest: 3.62 MB, 0.01 MB/s
                                            Interval WAL: 775 writes, 302 syncs, 2.57 writes per sync, written: 0.00 GB, 0.01 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:49.632907+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:50.633334+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:51.633733+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:52.634366+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:53.634669+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:54.635162+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:55.635509+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:56.635880+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:57.636178+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:58.636423+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:59.636691+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:00.637178+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:01.637803+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160727040 unmapped: 17686528 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:02.638045+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:03.638485+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:04.638960+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:05.639509+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:06.640043+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:07.640481+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:08.641020+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:09.641421+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:10.641652+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:11.642003+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:12.642492+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:13.642858+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:14.643414+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160735232 unmapped: 17678336 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:15.643890+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:16.644134+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:17.644385+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:18.644691+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:19.645004+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:20.645184+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:21.645597+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:22.645978+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:23.646439+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:24.646918+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:25.647342+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:26.648133+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:27.648374+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:28.648647+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:29.649991+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:30.650486+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:31.651073+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160743424 unmapped: 17670144 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:32.651385+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:33.652034+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:34.652347+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:35.652735+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:36.653385+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:37.653850+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:38.654489+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:39.654778+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:40.656515+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:41.656757+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:42.657067+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:43.657548+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:44.658463+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:45.658768+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:46.659016+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:47.659301+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160751616 unmapped: 17661952 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:48.659730+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:49.660091+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:50.660417+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:51.660685+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:52.661019+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:53.661342+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:54.661933+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:55.662469+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:56.662845+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:57.663098+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:58.663476+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160759808 unmapped: 17653760 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:59.664072+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:00.664433+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:01.664630+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:02.665159+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:03.665433+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:04.665808+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:05.666031+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:06.666693+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:07.668196+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:08.668594+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:09.668792+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:10.669402+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:11.669921+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:12.671582+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:13.672846+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160768000 unmapped: 17645568 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:14.673611+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160776192 unmapped: 17637376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:15.673869+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160776192 unmapped: 17637376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:16.674376+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160776192 unmapped: 17637376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:17.674788+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160776192 unmapped: 17637376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c6000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:18.675091+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1991086 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160776192 unmapped: 17637376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:19.675433+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160776192 unmapped: 17637376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:20.675790+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160776192 unmapped: 17637376 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 141.434371948s of 141.442596436s, submitted: 1
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:21.676293+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160784384 unmapped: 17629184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:22.676716+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160784384 unmapped: 17629184 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:23.677022+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160817152 unmapped: 17596416 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:24.677342+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:25.677796+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:26.678176+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:27.678571+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:28.678814+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:29.679098+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:30.679442+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:31.679747+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:32.680094+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:33.680361+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:34.680909+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:35.681606+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:36.682024+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:37.682381+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:38.682855+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 251658240 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:39.683195+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:40.683675+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:41.684150+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:42.684433+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:43.684849+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 234881024 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160841728 unmapped: 17571840 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:44.685168+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:45.685550+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:46.685928+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:47.686402+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:48.686788+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 234881024 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:49.687325+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:50.687734+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:51.688143+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:52.688556+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:53.688969+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 234881024 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:54.689485+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:55.689898+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:56.690451+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:57.690840+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:58.691365+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 218103808 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:59.691742+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:00.691970+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:01.692201+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:02.692594+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:03.692867+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 218103808 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:04.693374+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:05.693573+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:06.694064+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:07.694453+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:08.694787+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 218103808 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:09.695175+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:10.695412+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:11.695634+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:12.695844+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:13.696078+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990558 data_alloc: 218103808 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:14.696423+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160849920 unmapped: 17563648 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:15.696685+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160858112 unmapped: 17555456 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:16.697060+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160858112 unmapped: 17555456 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:17.697382+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f56c7000/0x0/0x4ffc00000, data 0x5abbf4a/0x5b97000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160858112 unmapped: 17555456 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:18.697595+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 57.406566620s of 57.994510651s, submitted: 90
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca266e400 session 0x559c9f5dc5a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca1f3bc00 session 0x559c9ff123c0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1990354 data_alloc: 218103808 data_used: 48926720
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 160858112 unmapped: 17555456 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca00be800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:19.697810+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca00be800 session 0x559ca25e4960
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:20.698010+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:21.698250+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:22.698764+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f7426000/0x0/0x4ffc00000, data 0x3adfed8/0x3bb9000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:23.699404+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647764 data_alloc: 218103808 data_used: 33910784
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:24.700175+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:25.700373+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:26.700607+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f7426000/0x0/0x4ffc00000, data 0x3adfed8/0x3bb9000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:27.700860+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:28.701170+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1647764 data_alloc: 218103808 data_used: 33910784
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:29.701566+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:30.701927+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:31.702404+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f7426000/0x0/0x4ffc00000, data 0x3adfed8/0x3bb9000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca14ca800 session 0x559ca1c8c3c0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:32.702763+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 150700032 unmapped: 27713536 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca00be800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 13.333739281s of 13.553236961s, submitted: 50
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f76a5000/0x0/0x4ffc00000, data 0x2f88ed8/0x3062000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 ms_handle_reset con 0x559ca00be800 session 0x559ca25e5a40
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:33.703145+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 146096128 unmapped: 32317440 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1482456 data_alloc: 218103808 data_used: 26468352
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:34.703629+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 146096128 unmapped: 32317440 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f84fd000/0x0/0x4ffc00000, data 0x2c87eb5/0x2d60000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f84fd000/0x0/0x4ffc00000, data 0x2c87eb5/0x2d60000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:35.704002+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 146104320 unmapped: 32309248 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:36.704560+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 146104320 unmapped: 32309248 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:37.705008+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 146104320 unmapped: 32309248 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:38.705591+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 146104320 unmapped: 32309248 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 heartbeat osd_stat(store_statfs(0x4f84fd000/0x0/0x4ffc00000, data 0x2c87eb5/0x2d60000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1482456 data_alloc: 218103808 data_used: 26468352
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:39.706028+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 146104320 unmapped: 32309248 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:40.706487+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 146104320 unmapped: 32309248 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca11b0400
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:41.706932+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 146096128 unmapped: 32317440 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 141 handle_osd_map epochs [142,142], i have 141, src has [1,142]
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:42.707361+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 142 ms_handle_reset con 0x559ca11b0400 session 0x559c9f5d63c0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132194304 unmapped: 46219264 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 142 heartbeat osd_stat(store_statfs(0x4f84ff000/0x0/0x4ffc00000, data 0x2c87e92/0x2d5f000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:43.707722+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132194304 unmapped: 46219264 heap: 178413568 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 10.512029648s of 10.966043472s, submitted: 80
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 142 handle_osd_map epochs [143,143], i have 142, src has [1,143]
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 143 ms_handle_reset con 0x559ca1f3b000 session 0x559ca126d4a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1372255 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:44.708167+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133292032 unmapped: 49782784 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3bc00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _renew_subs
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 143 handle_osd_map epochs [144,144], i have 143, src has [1,144]
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:45.708478+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 144 ms_handle_reset con 0x559ca1f3bc00 session 0x559ca126de00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133324800 unmapped: 49750016 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:46.708930+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133324800 unmapped: 49750016 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:47.709441+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133324800 unmapped: 49750016 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:48.709785+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 144 heartbeat osd_stat(store_statfs(0x4f94f4000/0x0/0x4ffc00000, data 0x1c8d1ba/0x1d67000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133324800 unmapped: 49750016 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345275 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:49.710137+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133324800 unmapped: 49750016 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 144 heartbeat osd_stat(store_statfs(0x4f94f4000/0x0/0x4ffc00000, data 0x1c8d1ba/0x1d67000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:50.710546+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133324800 unmapped: 49750016 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:51.711022+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 144 handle_osd_map epochs [144,145], i have 144, src has [1,145]
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:52.711499+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:53.711971+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:54.712364+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:55.713003+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:56.713501+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:57.713891+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:58.714539+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:59.714931+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:00.715487+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:01.715837+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:02.716156+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:03.716374+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:04.716818+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:05.717171+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:06.717574+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:07.718020+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:08.718552+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:09.718865+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:10.719350+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:11.719643+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:12.720082+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:13.720413+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:14.720682+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:15.720906+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:16.721217+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:17.722563+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:18.723786+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:19.724341+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:20.724676+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:21.725029+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:22.725337+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:23.725645+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:24.726037+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:25.726492+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:26.726828+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:27.727104+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:28.727412+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:29.727759+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:30.728105+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:31.728372+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:32.728720+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:33.729051+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:34.729536+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133341184 unmapped: 49733632 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:35.730077+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:36.730529+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:37.731022+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:38.731414+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:39.731785+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:40.732169+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:41.732462+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:42.732866+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:43.733454+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:44.733929+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:45.734810+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:46.735361+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:47.735735+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:48.736465+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:49.736883+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:50.737146+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:51.737535+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:52.737923+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:53.738387+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133349376 unmapped: 49725440 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:54.738736+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:55.739201+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:56.739729+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:57.740113+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:58.740535+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:59.740946+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:00.741508+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:01.741802+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:02.742187+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:03.742376+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:04.742630+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:05.743099+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:06.743462+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:07.743765+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:08.744046+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:09.744375+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:10.744567+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:11.744808+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:12.745054+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:13.745361+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:14.745614+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:15.745817+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:16.746040+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:17.747783+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:18.748010+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:19.748271+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:20.748606+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 49717248 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:21.748839+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133365760 unmapped: 49709056 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:22.749054+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'config diff' '{prefix=config diff}'
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'config diff' '{prefix=config diff}' result is 0 bytes
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'config show' '{prefix=config show}'
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'config show' '{prefix=config show}' result is 0 bytes
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'counter dump' '{prefix=counter dump}'
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'counter dump' '{prefix=counter dump}' result is 0 bytes
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'counter schema' '{prefix=counter schema}'
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'counter schema' '{prefix=counter schema}' result is 0 bytes
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 133423104 unmapped: 49651712 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:23.749290+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132972544 unmapped: 50102272 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:24.749749+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132784128 unmapped: 50290688 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:25.750078+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'log dump' '{prefix=log dump}'
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'log dump' '{prefix=log dump}' result is 0 bytes
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132784128 unmapped: 50290688 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:26.750414+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'perf dump' '{prefix=perf dump}'
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'perf dump' '{prefix=perf dump}' result is 0 bytes
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'perf histogram dump' '{prefix=perf histogram dump}'
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'perf histogram dump' '{prefix=perf histogram dump}' result is 0 bytes
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'perf schema' '{prefix=perf schema}'
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'perf schema' '{prefix=perf schema}' result is 0 bytes
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:27.750629+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:28.750833+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:29.751028+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:30.751301+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:31.761399+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:32.761602+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:33.761984+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd crush tree", "show_shadow": true} v 0) v1
Oct 11 03:05:39 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1806847038' entity='client.admin' cmd=[{"prefix": "osd crush tree", "show_shadow": true}]: dispatch
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:34.762884+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:35.763078+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:36.763344+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:37.763541+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:38.763721+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:39.763922+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:40.764116+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:41.764366+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:42.764609+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:43.764826+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:44.765170+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:45.765378+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:46.765668+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:47.766000+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:48.766411+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:49.766661+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:50.766854+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:51.767093+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:52.767360+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:53.767566+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 ms_handle_reset con 0x559ca0492c00 session 0x559c9f5d0780
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca00be800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:54.767827+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:55.768024+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 ms_handle_reset con 0x559ca266ec00 session 0x559ca1c92780
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca0493000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 ms_handle_reset con 0x559ca00bf400 session 0x559c9f7952c0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca266ec00
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:56.768337+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:57.768569+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:58.768868+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:59.769315+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:00.769729+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:01.770053+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:02.770400+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:03.770654+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:04.771637+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:05.771933+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:06.772178+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:07.772526+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:08.772730+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:09.772918+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:10.773355+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:11.773552+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:12.773788+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:13.774016+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:14.774442+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:15.774794+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:16.774992+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:17.775182+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:18.775587+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:19.775813+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:20.776201+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:21.776512+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:22.777179+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:23.777646+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:24.778113+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:25.778362+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:26.778607+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:27.779007+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:28.779319+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:29.779687+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:30.780042+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:31.780475+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:32.780836+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:33.781360+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:34.782129+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:35.782337+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:36.782716+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:37.782974+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:38.783336+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:39.783558+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:40.783717+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:41.783926+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:42.784203+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:43.784484+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:44.784810+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:45.785400+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:46.785723+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:47.785969+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:48.786179+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:49.786532+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:50.786964+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:51.787307+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:52.787456+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:53.787666+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:54.788087+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:55.788457+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:56.788840+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:57.789201+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:58.789663+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:59.789966+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:00.790284+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:01.790699+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:02.791006+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:03.791351+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:04.791728+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:05.792137+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:06.792557+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:07.792940+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:08.793210+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:09.793718+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:10.793888+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:11.794382+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:12.794753+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:13.795094+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 ms_handle_reset con 0x559ca0493800 session 0x559c9f3ec5a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca14ca800
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:14.795378+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:15.795717+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:16.796070+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:17.796423+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:18.796819+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:19.797097+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:20.797424+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:21.797836+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:22.798221+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:23.798767+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:24.799415+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:25.799877+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:26.800419+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:27.800834+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:28.801107+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:29.801708+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:30.802609+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:31.803628+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:32.805302+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:33.805801+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:34.806189+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:35.806573+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:36.806903+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:37.807486+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:38.807826+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:39.808169+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:40.808628+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:41.808920+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:42.809421+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:43.809902+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:44.810535+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:45.810975+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:46.811346+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:47.811745+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:48.812157+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:49.812678+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:50.813079+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:51.813422+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:52.813791+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:53.814205+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:54.814740+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:55.814992+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:56.815355+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:57.828352+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:58.828606+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:59.830532+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:00.830985+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:01.831342+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:02.831675+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:03.831976+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:04.832354+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:05.832687+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:06.832935+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:07.833375+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:08.833752+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:09.834033+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:10.834468+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:11.834808+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:12.835141+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:13.835451+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:14.835960+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:15.836443+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:16.836836+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:17.837333+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:18.837738+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:19.838021+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:20.838806+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:21.839224+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:22.839586+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:23.839959+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:24.840584+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:25.841020+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:26.841499+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:27.841696+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:28.842048+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:29.842482+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:30.843145+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:31.843613+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:32.844186+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:33.844768+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:34.845122+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:35.845540+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:36.846040+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:37.846420+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:38.846949+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:39.847209+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:40.847627+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:41.848355+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:42.848719+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:43.849096+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:44.849558+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:45.849883+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:46.850439+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:47.850884+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:48.851385+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:49.851664+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:50.852047+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:51.852409+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:52.852804+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:53.853188+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:54.853559+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:55.853963+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:56.854509+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:57.854737+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:58.855140+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:59.855535+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:00.856048+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:01.856451+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:02.856812+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:03.857029+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:04.857459+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132841472 unmapped: 50233344 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:05.857812+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:06.858111+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:07.858389+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:08.858587+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:09.858912+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:10.859105+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:11.859379+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:12.859681+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:13.859923+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:14.860181+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:15.860546+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:16.860900+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:17.861156+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:18.861519+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:19.861851+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:20.862084+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:21.862495+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:22.862892+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:23.863159+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:24.863716+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:25.864355+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:26.864600+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:27.864978+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:28.865398+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:29.865719+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:30.866121+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:31.866511+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:32.866818+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:33.867174+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:34.867555+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:35.867979+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:36.868439+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:37.868729+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:38.869131+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:39.869549+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:40.869941+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:41.870208+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:42.870751+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:43.871136+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:44.871700+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:45.871970+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:46.872385+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:47.872798+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:48.873150+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:49.873613+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:50.873885+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:51.874400+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:52.874873+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:53.875345+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:54.875718+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:55.875947+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:56.876181+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:57.876573+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:58.876918+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:59.877198+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:00.877411+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:01.877800+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:02.878179+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:03.878548+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:04.878845+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:05.879167+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:06.879511+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:07.879866+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:08.880207+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:09.880579+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:10.880968+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:11.881426+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:12.881794+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:13.882172+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:14.882702+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:15.882959+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:16.883465+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:17.883726+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:18.884164+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:19.884622+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:20.884934+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:21.885305+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:22.885616+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:23.885899+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:24.886947+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:25.887290+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:26.887497+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:27.887702+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:28.888037+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:29.888419+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:30.888817+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:31.889197+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:32.889619+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:33.889873+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:34.890217+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:35.890660+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:36.891042+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:37.891462+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:38.891756+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:39.892072+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:40.892382+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:41.892855+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:42.893173+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:43.893616+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:44.894072+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:45.894703+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:46.895084+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:47.895495+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 4800.1 total, 600.0 interval
                                            Cumulative writes: 11K writes, 45K keys, 11K commit groups, 1.0 writes per commit group, ingest: 0.04 GB, 0.01 MB/s
                                            Cumulative WAL: 11K writes, 3324 syncs, 3.60 writes per sync, written: 0.04 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 478 writes, 1385 keys, 478 commit groups, 1.0 writes per commit group, ingest: 0.40 MB, 0.00 MB/s
                                            Interval WAL: 478 writes, 226 syncs, 2.12 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:48.895891+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:49.896159+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:50.896548+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:51.896912+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:52.897194+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:53.897689+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:54.898145+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:55.898539+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:56.898940+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:57.899289+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:58.899631+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:59.899861+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:00.900384+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:01.900746+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:02.901145+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:03.901502+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:04.901876+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132849664 unmapped: 50225152 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:05.902341+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:06.902849+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:07.903339+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:08.903721+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:09.904081+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:10.904568+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:11.904936+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:12.905493+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:13.905910+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:14.906418+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:15.906825+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:16.907098+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:17.907544+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:18.908009+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:19.908497+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:20.908760+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:21.909028+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:22.909531+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:23.909804+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:24.910389+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:25.910795+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:26.911470+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:27.911930+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:28.912475+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:29.913156+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:30.913421+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:31.913645+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:32.913913+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:33.914421+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:34.914753+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:35.915185+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:36.915601+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:37.915823+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:38.916148+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:39.916651+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:40.916882+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:41.917414+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:42.917728+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:43.918030+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:44.918405+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:45.918792+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:46.919202+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:47.919607+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:48.919874+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:49.920838+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:50.921469+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:51.921872+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:52.922197+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:53.922611+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:54.923092+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:55.923489+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:56.923761+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 132366336 unmapped: 50708480 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:57.924075+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:58.924533+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:59.924923+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:00.925285+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 ms_handle_reset con 0x559c9f7ab800 session 0x559ca1cf05a0
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: handle_auth_request added challenge on 0x559ca1f3b000
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:01.925677+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:02.926162+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:03.926535+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:04.926896+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:05.927433+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:06.927741+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:07.928180+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:08.928502+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:09.929134+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:10.929692+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:11.929912+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:12.930347+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:13.930698+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:14.931114+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:15.931381+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:16.931612+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:17.931936+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346523 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:18.932618+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:19.933032+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:20.933500+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131661824 unmapped: 51412992 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f3000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore(/var/lib/ceph/osd/ceph-1) _kv_sync_thread utilization: idle 517.731079102s of 517.891113281s, submitted: 32
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:21.934011+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131670016 unmapped: 51404800 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:22.934458+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345643 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f4000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131670016 unmapped: 51404800 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:23.934871+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131670016 unmapped: 51404800 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:24.935202+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:25.935712+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr services", "format": "json-pretty"} v 0) v1
Oct 11 03:05:39 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2637097341' entity='client.admin' cmd=[{"prefix": "mgr services", "format": "json-pretty"}]: dispatch
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:26.936093+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:27.936510+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345643 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f4000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:28.936898+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f4000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:29.937395+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:30.937846+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f4000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:31.938511+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:32.938976+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f4000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345643 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:33.939485+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:34.939895+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:35.940392+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:36.940754+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f4000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:37.941034+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345643 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:38.941515+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:39.941842+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:40.942220+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f4000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:41.942655+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:42.943059+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345643 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:43.943355+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:44.943812+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:45.943953+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:46.944381+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f4000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:47.944635+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345643 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:48.945006+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:49.945298+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:50.945489+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f4000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:51.945769+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:52.946133+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345643 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:53.946479+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:54.946763+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:55.946949+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f4000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:56.947136+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:57.947387+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f4000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345643 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:58.947596+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:59.948037+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:00.948255+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:01.948447+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:02.948667+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345643 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131702784 unmapped: 51372032 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:03.949005+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: osd.1 145 heartbeat osd_stat(store_statfs(0x4f94f4000/0x0/0x4ffc00000, data 0x1c8ec3d/0x1d6a000, compress 0x0/0x0/0x0, omap 0x639, meta 0x499f9c7), peers [0,2] op hist [])
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131710976 unmapped: 51363840 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:04.949285+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131710976 unmapped: 51363840 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:05.949721+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'config diff' '{prefix=config diff}'
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'config diff' '{prefix=config diff}' result is 0 bytes
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'config show' '{prefix=config show}'
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'config show' '{prefix=config show}' result is 0 bytes
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'counter dump' '{prefix=counter dump}'
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'counter dump' '{prefix=counter dump}' result is 0 bytes
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'counter schema' '{prefix=counter schema}'
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'counter schema' '{prefix=counter schema}' result is 0 bytes
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131899392 unmapped: 51175424 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:06.949992+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131432448 unmapped: 51642368 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:07.950201+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:39 compute-0 ceph-osd[206800]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:39 compute-0 ceph-osd[206800]: bluestore.MempoolThread(0x559c9def5b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345643 data_alloc: 218103808 data_used: 11800576
Oct 11 03:05:39 compute-0 ceph-osd[206800]: prioritycache tune_memory target: 4294967296 mapped: 131555328 unmapped: 51519488 heap: 183074816 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: tick
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_tickets
Oct 11 03:05:39 compute-0 ceph-osd[206800]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:08.950361+0000)
Oct 11 03:05:39 compute-0 ceph-osd[206800]: do_command 'log dump' '{prefix=log dump}'
Oct 11 03:05:39 compute-0 rsyslogd[187706]: imjournal from <compute-0:ceph-osd>: begin to drop messages due to rate-limiting
Oct 11 03:05:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd erasure-code-profile ls"} v 0) v1
Oct 11 03:05:39 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2482983629' entity='client.admin' cmd=[{"prefix": "osd erasure-code-profile ls"}]: dispatch
Oct 11 03:05:39 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr stat", "format": "json-pretty"} v 0) v1
Oct 11 03:05:39 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3308444790' entity='client.admin' cmd=[{"prefix": "mgr stat", "format": "json-pretty"}]: dispatch
Oct 11 03:05:39 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2646: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:39 compute-0 nova_compute[356901]: 2025-10-11 03:05:39.896 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:05:39 compute-0 nova_compute[356901]: 2025-10-11 03:05:39.897 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858
Oct 11 03:05:39 compute-0 nova_compute[356901]: 2025-10-11 03:05:39.898 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862
Oct 11 03:05:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd metadata"} v 0) v1
Oct 11 03:05:40 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3638502392' entity='client.admin' cmd=[{"prefix": "osd metadata"}]: dispatch
Oct 11 03:05:40 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1806847038' entity='client.admin' cmd=[{"prefix": "osd crush tree", "show_shadow": true}]: dispatch
Oct 11 03:05:40 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2637097341' entity='client.admin' cmd=[{"prefix": "mgr services", "format": "json-pretty"}]: dispatch
Oct 11 03:05:40 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2482983629' entity='client.admin' cmd=[{"prefix": "osd erasure-code-profile ls"}]: dispatch
Oct 11 03:05:40 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3308444790' entity='client.admin' cmd=[{"prefix": "mgr stat", "format": "json-pretty"}]: dispatch
Oct 11 03:05:40 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3638502392' entity='client.admin' cmd=[{"prefix": "osd metadata"}]: dispatch
Oct 11 03:05:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mgr versions", "format": "json-pretty"} v 0) v1
Oct 11 03:05:40 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/201991363' entity='client.admin' cmd=[{"prefix": "mgr versions", "format": "json-pretty"}]: dispatch
Oct 11 03:05:40 compute-0 nova_compute[356901]: 2025-10-11 03:05:40.416 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:312
Oct 11 03:05:40 compute-0 nova_compute[356901]: 2025-10-11 03:05:40.417 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquired lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:315
Oct 11 03:05:40 compute-0 nova_compute[356901]: 2025-10-11 03:05:40.417 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Forcefully refreshing network info cache for instance _get_instance_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:2004
Oct 11 03:05:40 compute-0 nova_compute[356901]: 2025-10-11 03:05:40.417 2 DEBUG nova.objects.instance [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lazy-loading 'info_cache' on Instance uuid 0cc56d17-ec3a-4408-bccb-91b29427379e obj_load_attr /usr/lib/python3.9/site-packages/nova/objects/instance.py:1105
Oct 11 03:05:40 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd utilization"} v 0) v1
Oct 11 03:05:40 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2855282270' entity='client.admin' cmd=[{"prefix": "osd utilization"}]: dispatch
Oct 11 03:05:40 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15961 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:40 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15963 -' entity='client.admin' cmd=[{"prefix": "telemetry channel ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:41 compute-0 nova_compute[356901]: 2025-10-11 03:05:41.071 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:41 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15965 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:41 compute-0 ceph-mon[191930]: pgmap v2646: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:41 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/201991363' entity='client.admin' cmd=[{"prefix": "mgr versions", "format": "json-pretty"}]: dispatch
Oct 11 03:05:41 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2855282270' entity='client.admin' cmd=[{"prefix": "osd utilization"}]: dispatch
Oct 11 03:05:41 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15967 -' entity='client.admin' cmd=[{"prefix": "telemetry collection ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:41 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15969 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:41 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2647: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:41 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15973 -' entity='client.admin' cmd=[{"prefix": "orch ls", "export": true, "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:41 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:05:42 compute-0 podman[501855]: 2025-10-11 03:05:42.19788259 +0000 UTC m=+0.089325572 container health_status 1ac8f540dad8e72eb80e73a81f84927520f6e5ca697dc9cacf15f9faf30fc03c (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=multipathd, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, org.label-schema.build-date=20251009, org.label-schema.schema-version=1.0, container_name=multipathd, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Oct 11 03:05:42 compute-0 podman[501865]: 2025-10-11 03:05:42.216879103 +0000 UTC m=+0.100877502 container health_status b49da8f51acda801d30cc5d0e368cbb99630014bab5d02adbc9252d064ecdb9e (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, org.label-schema.build-date=20251009, config_id=iscsid, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2)
Oct 11 03:05:42 compute-0 ceph-mon[191930]: from='client.15961 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:42 compute-0 ceph-mon[191930]: from='client.15963 -' entity='client.admin' cmd=[{"prefix": "telemetry channel ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:42 compute-0 ceph-mon[191930]: from='client.15965 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:42 compute-0 ceph-mon[191930]: from='client.15967 -' entity='client.admin' cmd=[{"prefix": "telemetry collection ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:42 compute-0 nova_compute[356901]: 2025-10-11 03:05:42.259 2 DEBUG nova.network.neutron [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updating instance_info_cache with network_info: [{"id": "64dfc81b-528a-4adc-9787-66719d2f9f93", "address": "fa:16:3e:cc:aa:96", "network": {"id": "d4dded16-3268-4cf9-bb6b-aa5200d5e4ec", "bridge": "br-int", "label": "private", "subnets": [{"cidr": "192.168.0.0/24", "dns": [], "gateway": {"address": "192.168.0.1", "type": "gateway", "version": 4, "meta": {}}, "ips": [{"address": "192.168.0.236", "type": "fixed", "version": 4, "meta": {}, "floating_ips": [{"address": "192.168.122.201", "type": "floating", "version": 4, "meta": {}}]}], "routes": [], "version": 4, "meta": {"enable_dhcp": true}}], "meta": {"injected": false, "tenant_id": "97026531b3404a11869cb85a059c4a0d", "mtu": 1442, "physical_network": null, "tunneled": true}}, "type": "ovs", "details": {"port_filter": true, "connectivity": "l2", "bridge_name": "br-int", "datapath_type": "system", "bound_drivers": {"0": "ovn"}}, "devname": "tap64dfc81b-52", "ovs_interfaceid": "64dfc81b-528a-4adc-9787-66719d2f9f93", "qbh_params": null, "qbg_params": null, "active": true, "vnic_type": "normal", "profile": {}, "preserve_on_delete": false, "delegate_create": true, "meta": {}}] update_instance_cache_with_nw_info /usr/lib/python3.9/site-packages/nova/network/neutron.py:116
Oct 11 03:05:42 compute-0 nova_compute[356901]: 2025-10-11 03:05:42.277 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Releasing lock "refresh_cache-0cc56d17-ec3a-4408-bccb-91b29427379e" lock /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:333
Oct 11 03:05:42 compute-0 nova_compute[356901]: 2025-10-11 03:05:42.278 2 DEBUG nova.compute.manager [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] [instance: 0cc56d17-ec3a-4408-bccb-91b29427379e] Updated the network info_cache for instance _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9929
Oct 11 03:05:42 compute-0 nova_compute[356901]: 2025-10-11 03:05:42.278 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:05:42 compute-0 nova_compute[356901]: 2025-10-11 03:05:42.308 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 03:05:42 compute-0 nova_compute[356901]: 2025-10-11 03:05:42.309 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 03:05:42 compute-0 nova_compute[356901]: 2025-10-11 03:05:42.310 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker.clean_compute_node_cache" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 03:05:42 compute-0 nova_compute[356901]: 2025-10-11 03:05:42.310 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Auditing locally available compute resources for compute-0.ctlplane.example.com (node: compute-0.ctlplane.example.com) update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:861
Oct 11 03:05:42 compute-0 nova_compute[356901]: 2025-10-11 03:05:42.310 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 03:05:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "quorum_status"} v 0) v1
Oct 11 03:05:42 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1749676999' entity='client.admin' cmd=[{"prefix": "quorum_status"}]: dispatch
Oct 11 03:05:42 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15977 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 03:05:42 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3319086728' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:05:42 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15983 -' entity='client.admin' cmd=[{"prefix": "orch status", "detail": true, "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:42 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "versions"} v 0) v1
Oct 11 03:05:42 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1041145345' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch
Oct 11 03:05:42 compute-0 nova_compute[356901]: 2025-10-11 03:05:42.807 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.497s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 03:05:42 compute-0 nova_compute[356901]: 2025-10-11 03:05:42.912 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 03:05:42 compute-0 nova_compute[356901]: 2025-10-11 03:05:42.912 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 03:05:42 compute-0 nova_compute[356901]: 2025-10-11 03:05:42.912 2 DEBUG nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] skipping disk for instance-00000001 as it does not have a path _get_instance_disk_info_from_config /usr/lib/python3.9/site-packages/nova/virt/libvirt/driver.py:11231
Oct 11 03:05:43 compute-0 nova_compute[356901]: 2025-10-11 03:05:43.243 2 WARNING nova.virt.libvirt.driver [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] This host appears to have multiple sockets per NUMA node. The `socket` PCI NUMA affinity will not be supported.
Oct 11 03:05:43 compute-0 nova_compute[356901]: 2025-10-11 03:05:43.245 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Hypervisor/Node resource view: name=compute-0.ctlplane.example.com free_ram=3487MB free_disk=59.955204010009766GB free_vcpus=7 pci_devices=[{"dev_id": "pci_0000_00_00_0", "address": "0000:00:00.0", "product_id": "1237", "vendor_id": "8086", "numa_node": null, "label": "label_8086_1237", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_1", "address": "0000:00:01.1", "product_id": "7010", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7010", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_05_0", "address": "0000:00:05.0", "product_id": "1002", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1002", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_2", "address": "0000:00:01.2", "product_id": "7020", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7020", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_06_0", "address": "0000:00:06.0", "product_id": "1005", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1005", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_04_0", "address": "0000:00:04.0", "product_id": "1001", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1001", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_0", "address": "0000:00:01.0", "product_id": "7000", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_03_0", "address": "0000:00:03.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_01_3", "address": "0000:00:01.3", "product_id": "7113", "vendor_id": "8086", "numa_node": null, "label": "label_8086_7113", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_02_0", "address": "0000:00:02.0", "product_id": "1050", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1050", "dev_type": "type-PCI"}, {"dev_id": "pci_0000_00_07_0", "address": "0000:00:07.0", "product_id": "1000", "vendor_id": "1af4", "numa_node": null, "label": "label_1af4_1000", "dev_type": "type-PCI"}] _report_hypervisor_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1034
Oct 11 03:05:43 compute-0 nova_compute[356901]: 2025-10-11 03:05:43.245 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Acquiring lock "compute_resources" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 03:05:43 compute-0 nova_compute[356901]: 2025-10-11 03:05:43.246 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" acquired by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 03:05:43 compute-0 ceph-mon[191930]: from='client.15969 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:43 compute-0 ceph-mon[191930]: pgmap v2647: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:43 compute-0 ceph-mon[191930]: from='client.15973 -' entity='client.admin' cmd=[{"prefix": "orch ls", "export": true, "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:43 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1749676999' entity='client.admin' cmd=[{"prefix": "quorum_status"}]: dispatch
Oct 11 03:05:43 compute-0 ceph-mon[191930]: from='client.15977 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:43 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3319086728' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:05:43 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1041145345' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch
Oct 11 03:05:43 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15985 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:43 compute-0 nova_compute[356901]: 2025-10-11 03:05:43.274 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:43 compute-0 nova_compute[356901]: 2025-10-11 03:05:43.361 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Instance 0cc56d17-ec3a-4408-bccb-91b29427379e actively managed on this compute host and has allocations in placement: {'resources': {'DISK_GB': 2, 'MEMORY_MB': 512, 'VCPU': 1}}. _remove_deleted_instances_allocations /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1635
Oct 11 03:05:43 compute-0 nova_compute[356901]: 2025-10-11 03:05:43.362 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Total usable vcpus: 8, total allocated vcpus: 1 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057
Oct 11 03:05:43 compute-0 nova_compute[356901]: 2025-10-11 03:05:43.363 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7680MB used_ram=1024MB phys_disk=59GB used_disk=2GB total_vcpus=8 used_vcpus=1 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066
Oct 11 03:05:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "health", "detail": "detail", "format": "json-pretty"} v 0) v1
Oct 11 03:05:43 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2931757092' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail", "format": "json-pretty"}]: dispatch
Oct 11 03:05:43 compute-0 nova_compute[356901]: 2025-10-11 03:05:43.440 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384
Oct 11 03:05:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd tree", "format": "json-pretty"} v 0) v1
Oct 11 03:05:43 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1213736420' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json-pretty"}]: dispatch
Oct 11 03:05:43 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2648: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:43 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json"} v 0) v1
Oct 11 03:05:43 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2619818995' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:05:43 compute-0 nova_compute[356901]: 2025-10-11 03:05:43.936 2 DEBUG oslo_concurrency.processutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.496s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422
Oct 11 03:05:43 compute-0 nova_compute[356901]: 2025-10-11 03:05:43.945 2 DEBUG nova.compute.provider_tree [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed in ProviderTree for provider: 256b11da-7f71-42c0-941c-ea1e909a35f8 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180
Oct 11 03:05:43 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
Oct 11 03:05:43 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
Oct 11 03:05:43 compute-0 nova_compute[356901]: 2025-10-11 03:05:43.973 2 DEBUG nova.scheduler.client.report [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Inventory has not changed for provider 256b11da-7f71-42c0-941c-ea1e909a35f8 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7680, 'reserved': 512, 'min_unit': 1, 'max_unit': 7680, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940
Oct 11 03:05:43 compute-0 nova_compute[356901]: 2025-10-11 03:05:43.977 2 DEBUG nova.compute.resource_tracker [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995
Oct 11 03:05:43 compute-0 nova_compute[356901]: 2025-10-11 03:05:43.977 2 DEBUG oslo_concurrency.lockutils [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.732s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 03:05:44 compute-0 ceph-mon[191930]: from='client.15983 -' entity='client.admin' cmd=[{"prefix": "orch status", "detail": true, "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:44 compute-0 ceph-mon[191930]: from='client.15985 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:44 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2931757092' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail", "format": "json-pretty"}]: dispatch
Oct 11 03:05:44 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1213736420' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json-pretty"}]: dispatch
Oct 11 03:05:44 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2619818995' entity='client.openstack' cmd=[{"prefix": "df", "format": "json"}]: dispatch
Oct 11 03:05:44 compute-0 ceph-mon[191930]: from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
Oct 11 03:05:44 compute-0 ceph-mon[191930]: from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fae84000/0x0/0x4ffc00000, data 0xb315d2/0xbf9000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100769792 unmapped: 21823488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:35.726499+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100769792 unmapped: 21823488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:36.726952+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025ba3c400 session 0x56025d75f680
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94000 session 0x56025a297860
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94400 session 0x56025a296960
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100769792 unmapped: 21823488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:37.727430+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1105130 data_alloc: 218103808 data_used: 7008256
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100769792 unmapped: 21823488 heap: 122593280 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:38.727639+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94800 session 0x560259c2f0e0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 16.167192459s of 16.237197876s, submitted: 10
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94800 session 0x56025c222b40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:39.727902+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 102735872 unmapped: 26673152 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025ba3c400 session 0x56025c0e2b40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025c22c000 session 0x56025a4741e0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94000 session 0x56025a2bc3c0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94400 session 0x56025c0d23c0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025ba3c400 session 0x56025c209e00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025c22c000 session 0x56025a297e00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94000 session 0x56025a2963c0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:40.728198+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 102670336 unmapped: 26738688 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94800 session 0x56025a2972c0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fa6c1000/0x0/0x4ffc00000, data 0x12f4634/0x13bd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3000.1 total, 600.0 interval
                                            Cumulative writes: 7474 writes, 29K keys, 7474 commit groups, 1.0 writes per commit group, ingest: 0.02 GB, 0.01 MB/s
                                            Cumulative WAL: 7474 writes, 1672 syncs, 4.47 writes per sync, written: 0.02 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 991 writes, 3371 keys, 991 commit groups, 1.0 writes per commit group, ingest: 2.86 MB, 0.00 MB/s
                                            Interval WAL: 991 writes, 421 syncs, 2.35 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:41.728616+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 102670336 unmapped: 26738688 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fa6c1000/0x0/0x4ffc00000, data 0x12f4634/0x13bd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025c22c400 session 0x56025a296d20
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:42.728860+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 102670336 unmapped: 26738688 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1168958 data_alloc: 218103808 data_used: 7008256
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025c22c400 session 0x56025a79b860
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025ba3c400 session 0x56025a79ab40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:43.729328+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 102686720 unmapped: 26722304 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025c22c000 session 0x56025c07c000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fa69b000/0x0/0x4ffc00000, data 0x1318667/0x13e3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:44.729630+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101965824 unmapped: 27443200 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:45.730059+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fa69b000/0x0/0x4ffc00000, data 0x1318667/0x13e3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101974016 unmapped: 27435008 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:46.730434+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101974016 unmapped: 27435008 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:47.730710+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101974016 unmapped: 27435008 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1177097 data_alloc: 218103808 data_used: 7208960
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fa69b000/0x0/0x4ffc00000, data 0x1318667/0x13e3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:48.730905+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 101998592 unmapped: 27410432 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:49.731500+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:50.731946+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:51.732420+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fa69b000/0x0/0x4ffc00000, data 0x1318667/0x13e3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:52.732827+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1231817 data_alloc: 218103808 data_used: 14794752
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:53.733060+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:54.733309+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:55.733551+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:56.733986+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:57.734458+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1231817 data_alloc: 218103808 data_used: 14794752
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fa69b000/0x0/0x4ffc00000, data 0x1318667/0x13e3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:58.734719+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:32:59.735084+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:00.735364+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:01.735760+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:02.736171+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1231817 data_alloc: 218103808 data_used: 14794752
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fa69b000/0x0/0x4ffc00000, data 0x1318667/0x13e3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:03.736448+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fa69b000/0x0/0x4ffc00000, data 0x1318667/0x13e3000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:04.736913+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:05.737510+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:06.737835+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 103669760 unmapped: 25739264 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 27.275114059s of 27.844760895s, submitted: 51
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94000 session 0x5602598d4780
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025eb94800 session 0x56025c2201e0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:07.738792+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100073472 unmapped: 29335552 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1114758 data_alloc: 218103808 data_used: 7008256
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 ms_handle_reset con 0x56025ba3c400 session 0x56025b97fe00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:08.739163+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100114432 unmapped: 29294592 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 heartbeat osd_stat(store_statfs(0x4fae5f000/0x0/0x4ffc00000, data 0xb315d2/0xbf9000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x417f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:09.739407+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100114432 unmapped: 29294592 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 130 handle_osd_map epochs [130,131], i have 130, src has [1,131]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:10.739911+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100106240 unmapped: 29302784 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 131 ms_handle_reset con 0x56025c22c000 session 0x56025b6d1860
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:11.740970+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100106240 unmapped: 29302784 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:12.741964+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100106240 unmapped: 29302784 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1118674 data_alloc: 218103808 data_used: 7016448
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:13.742635+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 131 heartbeat osd_stat(store_statfs(0x4faa71000/0x0/0x4ffc00000, data 0xb33180/0xbfb000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100106240 unmapped: 29302784 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:14.742845+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100106240 unmapped: 29302784 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:15.743637+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100106240 unmapped: 29302784 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:16.744653+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100106240 unmapped: 29302784 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _renew_subs
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 131 handle_osd_map epochs [132,132], i have 131, src has [1,132]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 9.644417763s of 10.028837204s, submitted: 71
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:17.745458+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 132 heartbeat osd_stat(store_statfs(0x4faa6f000/0x0/0x4ffc00000, data 0xb34be3/0xbfe000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100089856 unmapped: 29319168 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1121616 data_alloc: 218103808 data_used: 7020544
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 132 heartbeat osd_stat(store_statfs(0x4faa6f000/0x0/0x4ffc00000, data 0xb34be3/0xbfe000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:18.746521+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100089856 unmapped: 29319168 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:19.746907+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100089856 unmapped: 29319168 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:20.747547+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100089856 unmapped: 29319168 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 132 heartbeat osd_stat(store_statfs(0x4faa6f000/0x0/0x4ffc00000, data 0xb34be3/0xbfe000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:21.748046+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100089856 unmapped: 29319168 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:22.748748+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100089856 unmapped: 29319168 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1122457 data_alloc: 218103808 data_used: 7020544
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:23.749378+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100081664 unmapped: 29327360 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:24.749756+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100081664 unmapped: 29327360 heap: 129409024 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 132 handle_osd_map epochs [132,133], i have 132, src has [1,133]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:25.750015+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 133 ms_handle_reset con 0x56025c22c400 session 0x56025c209680
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100147200 unmapped: 45088768 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:26.750531+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100147200 unmapped: 45088768 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 133 heartbeat osd_stat(store_statfs(0x4fa2df000/0x0/0x4ffc00000, data 0x12c2183/0x138e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:27.750888+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100147200 unmapped: 45088768 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1180503 data_alloc: 218103808 data_used: 7028736
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:28.751421+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100147200 unmapped: 45088768 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:29.751714+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100147200 unmapped: 45088768 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:30.752052+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100147200 unmapped: 45088768 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:31.752351+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100147200 unmapped: 45088768 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:32.752607+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 15.445683479s of 15.610335350s, submitted: 25
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 133 heartbeat osd_stat(store_statfs(0x4fa2e1000/0x0/0x4ffc00000, data 0x12c2160/0x138d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100163584 unmapped: 45072384 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1178957 data_alloc: 218103808 data_used: 7028736
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _renew_subs
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 133 handle_osd_map epochs [134,134], i have 133, src has [1,134]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:33.752780+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 134 ms_handle_reset con 0x56025eb94000 session 0x56025c924780
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100196352 unmapped: 45039616 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:34.753001+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100196352 unmapped: 45039616 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:35.753274+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100196352 unmapped: 45039616 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 134 heartbeat osd_stat(store_statfs(0x4faa69000/0x0/0x4ffc00000, data 0xb38331/0xc04000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:36.753821+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100204544 unmapped: 45031424 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:37.754327+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100204544 unmapped: 45031424 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1131291 data_alloc: 218103808 data_used: 7036928
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:38.754801+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100204544 unmapped: 45031424 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 134 heartbeat osd_stat(store_statfs(0x4faa69000/0x0/0x4ffc00000, data 0xb38331/0xc04000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:39.755082+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:40.755356+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 134 handle_osd_map epochs [134,135], i have 134, src has [1,135]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:41.755732+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:42.756729+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1134265 data_alloc: 218103808 data_used: 7036928
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:43.757051+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:44.757421+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:45.757729+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:46.758407+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:47.758701+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1134425 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:48.759000+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:49.759328+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:50.759686+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:51.760041+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:52.760361+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1134425 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:53.760740+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:54.761108+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:55.761494+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:56.761995+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:57.762208+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1134425 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:58.762661+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:33:59.763061+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:00.763400+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:01.763603+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:02.763961+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1134425 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:03.764203+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:04.764555+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:05.764797+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:06.765208+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:07.765620+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1134425 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:08.765832+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:09.766187+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:10.766657+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:11.766846+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:12.767540+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1134425 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:13.767954+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:14.768177+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:15.768543+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:16.768848+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:17.769341+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1134425 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:18.769647+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa66000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:19.769994+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100212736 unmapped: 45023232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 47.773204803s of 47.967689514s, submitted: 49
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:20.770561+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100220928 unmapped: 45015040 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:21.771038+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100253696 unmapped: 44982272 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:22.771527+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100294656 unmapped: 44941312 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133617 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:23.771769+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:24.772492+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:25.772931+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:26.773275+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:27.773727+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:28.774051+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:29.774322+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:30.774630+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:31.774969+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:32.775535+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:33.775803+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:34.776160+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:35.776447+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:36.776920+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:37.777433+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:38.777667+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:39.778186+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:40.778487+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:41.778699+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:42.778976+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:43.779378+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:44.779591+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:45.779970+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:46.780637+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:47.781102+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:48.781527+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:49.781842+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:50.782374+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:51.782770+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:52.783125+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:53.783424+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:54.783646+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:55.783984+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:56.784458+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:57.784688+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:58.785065+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:34:59.785449+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config dump"} v 0) v1
Oct 11 03:05:44 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2742660004' entity='client.admin' cmd=[{"prefix": "config dump"}]: dispatch
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:00.785876+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:01.786348+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:02.786765+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:03.787027+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:04.787487+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:05.787646+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:06.787961+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100319232 unmapped: 44916736 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:07.788414+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:08.788799+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:09.789358+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:10.789621+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:11.789938+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:12.790394+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:13.790792+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:14.791453+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:15.791875+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:16.792364+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:17.792643+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:18.793054+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:19.793532+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:20.793902+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:21.794328+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:22.794681+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:23.795078+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:24.795526+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:25.795922+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:26.796221+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:27.796538+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:28.796819+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:29.797197+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:30.797867+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:31.798101+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:32.798407+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:33.798766+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:34.799139+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:35.799496+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:36.799979+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:37.800449+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:38.800700+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:39.801129+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:40.801389+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:41.801772+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:42.802000+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:43.802364+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:44.802609+0000)
Oct 11 03:05:44 compute-0 rsyslogd[187706]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:45.802838+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:46.803196+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:47.803597+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:48.803794+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:49.804325+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:50.804623+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:51.804863+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:52.805102+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:53.805359+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:54.805567+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:55.805827+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:56.806462+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:57.806687+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:58.806898+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:35:59.807395+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:00.807791+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:01.808069+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:02.808314+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:03.808694+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:04.809051+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:05.809388+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:06.810023+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:07.810419+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:08.810665+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:09.810892+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:10.811107+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:11.811487+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:12.811892+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:13.812348+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:14.812743+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:15.813138+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:16.813691+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:17.813993+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:18.814422+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:19.814822+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:20.815206+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:21.815657+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:22.816008+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:23.816597+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:24.817021+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:25.817499+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:26.817891+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:27.818365+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:28.818796+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:29.819145+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:30.819562+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:31.820032+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:32.820444+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:33.820834+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:34.821195+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:35.821630+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:36.822112+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:37.822496+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:38.822908+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:39.823357+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:40.823548+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:41.823940+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:42.824407+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:43.824895+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100327424 unmapped: 44908544 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:44.825332+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:45.825717+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:46.826218+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:47.826656+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:48.826960+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:49.827400+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:50.827783+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:51.828184+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:52.828511+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:53.828869+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:54.829374+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:55.829828+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:56.830182+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:57.830590+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:58.830805+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:36:59.831034+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:00.831356+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:01.831756+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:02.832100+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:03.832383+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:04.832580+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:05.832832+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:06.833316+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:07.833585+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:08.833786+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:09.834029+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:10.834383+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:11.834566+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:12.834960+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:13.835465+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:14.835749+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:15.835995+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:16.836538+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:17.836916+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:18.837727+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100335616 unmapped: 44900352 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:19.838123+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:20.838602+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:21.838984+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:22.839334+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:23.839706+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:24.840024+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:25.840347+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:26.840673+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:27.841029+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:28.841299+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:29.841489+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:30.841885+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:31.842330+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:32.842808+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:33.843174+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 heartbeat osd_stat(store_statfs(0x4faa67000/0x0/0x4ffc00000, data 0xb39d94/0xc07000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100343808 unmapped: 44892160 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:34.843592+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1133545 data_alloc: 218103808 data_used: 7041024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22d800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 193.742782593s of 194.405731201s, submitted: 106
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100352000 unmapped: 44883968 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:35.843964+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 ms_handle_reset con 0x56025c22d800 session 0x56025db3be00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100352000 unmapped: 44883968 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:36.844446+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 135 handle_osd_map epochs [136,136], i have 135, src has [1,136]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 136 heartbeat osd_stat(store_statfs(0x4fa267000/0x0/0x4ffc00000, data 0x1339d94/0x1407000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 136 handle_osd_map epochs [136,137], i have 136, src has [1,137]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100376576 unmapped: 44859392 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:37.844899+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100376576 unmapped: 44859392 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:38.845374+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:39.845811+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:40.846018+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:41.846436+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:42.846663+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:43.846962+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:44.847182+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:45.847534+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:46.848009+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:47.848475+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:48.848814+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:49.849215+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100286464 unmapped: 44949504 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:50.849528+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:51.849897+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:52.850141+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:53.850565+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:54.851009+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100311040 unmapped: 44924928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:55.851514+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:56.851941+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:57.852178+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:58.852420+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:37:59.852797+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:00.853339+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:01.853720+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:02.854042+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:03.854331+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:04.854727+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:05.855325+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:06.855847+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:07.856401+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:08.856794+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:09.857158+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:10.857374+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:11.857674+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:12.858142+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:13.858381+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:14.858888+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:15.859121+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:16.859696+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:17.859974+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:18.860469+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:19.864997+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:20.865375+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:21.865819+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:22.866164+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:23.866459+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:24.866849+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:25.867212+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:26.867599+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100188160 unmapped: 45047808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:27.867870+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100204544 unmapped: 45031424 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:28.868107+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100204544 unmapped: 45031424 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:29.868487+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100204544 unmapped: 45031424 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1196213 data_alloc: 218103808 data_used: 7049216
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:30.868878+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 100204544 unmapped: 45031424 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:31.869106+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107479040 unmapped: 37756928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:32.869620+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025ba3c400 session 0x56025c220780
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107479040 unmapped: 37756928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025eb94c00 session 0x56025c276d20
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb95000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025eb95000 session 0x56025a297e00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7b000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7b000 session 0x56025a2972c0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:33.869969+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107479040 unmapped: 37756928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7ac00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7ac00 session 0x56025a296d20
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:34.870460+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107479040 unmapped: 37756928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1216693 data_alloc: 218103808 data_used: 13864960
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025ba3c400 session 0x56025a297860
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7b000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7b000 session 0x56025d75f0e0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:35.870851+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107462656 unmapped: 37773312 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:36.871503+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107462656 unmapped: 37773312 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:37.871904+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107462656 unmapped: 37773312 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:38.872158+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107462656 unmapped: 37773312 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:39.872617+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107462656 unmapped: 37773312 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1216693 data_alloc: 218103808 data_used: 13864960
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:40.873604+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4fa25f000/0x0/0x4ffc00000, data 0x133d48e/0x140d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107462656 unmapped: 37773312 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:41.874090+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107462656 unmapped: 37773312 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 67.453536987s of 67.516914368s, submitted: 5
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb95000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025eb95000 session 0x56025c063680
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7a800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723c00 session 0x560259c025a0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:42.874489+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 107479040 unmapped: 37756928 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:43.874956+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108576768 unmapped: 36659200 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:44.875393+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 118153216 unmapped: 27082752 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1331760 data_alloc: 218103808 data_used: 13864960
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:45.875715+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112107520 unmapped: 33128448 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f981d000/0x0/0x4ffc00000, data 0x1d8049d/0x1e51000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [0,0,0,0,0,0,0,0,0,14])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:46.876156+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025eb94c00 session 0x56025d75e1e0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108658688 unmapped: 36577280 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723c00 session 0x56025c07de00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025ba3c400 session 0x56025b96d2c0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7b000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7a800 session 0x56025c2083c0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:47.876507+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723800 session 0x56025d75eb40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723400 session 0x560259c03860
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108290048 unmapped: 36945920 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723400 session 0x56025c0d2d20
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b722800 session 0x56025d75fe00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:48.876807+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108290048 unmapped: 36945920 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8dbf000/0x0/0x4ffc00000, data 0x27de49d/0x28af000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [0,0,0,0,0,0,0,0,1])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:49.877365+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7b000 session 0x56025a2b34a0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723000 session 0x56025b6d1e00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108298240 unmapped: 36937728 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1398318 data_alloc: 218103808 data_used: 13869056
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723c00 session 0x56025a788b40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:50.877817+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b722c00 session 0x56025c0e6f00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108298240 unmapped: 36937728 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b722800 session 0x56025a4754a0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723c00 session 0x560259b52000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:51.878047+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723000 session 0x5602598d4780
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108265472 unmapped: 36970496 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723400 session 0x56025a79b860
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723800 session 0x56025a79ab40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723c00 session 0x56025999ab40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:52.878333+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723000 session 0x56025c2774a0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8dbf000/0x0/0x4ffc00000, data 0x27de49d/0x28af000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108265472 unmapped: 36970496 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b722800 session 0x56025d90e1e0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 6.248156548s of 10.819179535s, submitted: 63
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723400 session 0x56025c07c5a0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8dbf000/0x0/0x4ffc00000, data 0x27de49d/0x28af000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b722800 session 0x56025c2234a0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:53.878557+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8dbf000/0x0/0x4ffc00000, data 0x27de49d/0x28af000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [0,0,0,0,0,0,1])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723800 session 0x560259b1fc20
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108265472 unmapped: 36970496 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723c00 session 0x560259b1e960
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7a800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7b000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:54.878968+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108281856 unmapped: 36954112 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b722c00 session 0x56025c222960
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723000 session 0x56025a79b2c0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1408982 data_alloc: 218103808 data_used: 13869056
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7a800 session 0x56025a8723c0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7b000 session 0x560259b22960
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:55.879399+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108634112 unmapped: 36601856 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb95000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:56.879639+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108658688 unmapped: 36577280 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:57.880002+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 36552704 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:58.880219+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8d91000/0x0/0x4ffc00000, data 0x2808503/0x28dd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 108683264 unmapped: 36552704 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:38:59.880450+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8d91000/0x0/0x4ffc00000, data 0x2808503/0x28dd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 109608960 unmapped: 35627008 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1443276 data_alloc: 234881024 data_used: 18096128
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:00.880766+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 112582656 unmapped: 32653312 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8d91000/0x0/0x4ffc00000, data 0x2808503/0x28dd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:01.880978+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8d91000/0x0/0x4ffc00000, data 0x2808503/0x28dd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 116989952 unmapped: 28246016 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:02.881205+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 119250944 unmapped: 25985024 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:03.881463+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 119250944 unmapped: 25985024 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:04.881703+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8d91000/0x0/0x4ffc00000, data 0x2808503/0x28dd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 119250944 unmapped: 25985024 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1531276 data_alloc: 234881024 data_used: 29814784
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:05.881899+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121102336 unmapped: 24133632 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:06.882127+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8d91000/0x0/0x4ffc00000, data 0x2808503/0x28dd000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121135104 unmapped: 24100864 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:07.882337+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121659392 unmapped: 23576576 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:08.882512+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121659392 unmapped: 23576576 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:09.882718+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025eb95000 session 0x56025a6e1a40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 15.274926186s of 16.816574097s, submitted: 28
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025eb94000 session 0x5602598d5a40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121659392 unmapped: 23576576 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1547784 data_alloc: 234881024 data_used: 31969280
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cda1000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:10.882933+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cda1000 session 0x56025c0e2780
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121675776 unmapped: 23560192 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:11.883349+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8f9f000/0x0/0x4ffc00000, data 0x25fb4f3/0x26cf000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121716736 unmapped: 23519232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:12.883707+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723c00 session 0x56025bfba960
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025ba3c400 session 0x56025c07c000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7a800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121716736 unmapped: 23519232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:13.883901+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7a800 session 0x56025c0623c0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120741888 unmapped: 24494080 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7b000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7b000 session 0x560259b1ed20
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cda1000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cda1000 session 0x56025c0e2f00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723c00 session 0x56025c0e23c0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025ba3c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025ba3c400 session 0x56025a2b2780
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:14.884109+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7a800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7a800 session 0x56025c063c20
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7b000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7b000 session 0x5602598bcf00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121167872 unmapped: 24068096 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1516696 data_alloc: 234881024 data_used: 29810688
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:15.884356+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121167872 unmapped: 24068096 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:16.884600+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121167872 unmapped: 24068096 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:17.884915+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8f11000/0x0/0x4ffc00000, data 0x2688532/0x275c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121200640 unmapped: 24035328 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:18.885167+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121200640 unmapped: 24035328 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:19.885415+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121208832 unmapped: 24027136 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1516696 data_alloc: 234881024 data_used: 29810688
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:20.885625+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121208832 unmapped: 24027136 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:21.885955+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121208832 unmapped: 24027136 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:22.886186+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb94000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 12.641468048s of 13.075722694s, submitted: 79
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8f11000/0x0/0x4ffc00000, data 0x2688532/0x275c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025eb94000 session 0x56025a6e12c0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121217024 unmapped: 24018944 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:23.886512+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121217024 unmapped: 24018944 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:24.886714+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121217024 unmapped: 24018944 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1517309 data_alloc: 234881024 data_used: 29810688
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:25.887028+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121241600 unmapped: 23994368 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:26.887345+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8f11000/0x0/0x4ffc00000, data 0x2688555/0x275d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122454016 unmapped: 22781952 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:27.887598+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122527744 unmapped: 22708224 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:28.887823+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122552320 unmapped: 22683648 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:29.888006+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8f11000/0x0/0x4ffc00000, data 0x2688555/0x275d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122585088 unmapped: 22650880 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1537789 data_alloc: 234881024 data_used: 32591872
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:30.888264+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122585088 unmapped: 22650880 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:31.888504+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122585088 unmapped: 22650880 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:32.888750+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 9.404064178s of 10.049774170s, submitted: 7
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122667008 unmapped: 22568960 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:33.888947+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123805696 unmapped: 21430272 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:34.889156+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8e36000/0x0/0x4ffc00000, data 0x275b555/0x2830000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 129884160 unmapped: 15351808 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1611137 data_alloc: 234881024 data_used: 32829440
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:35.889407+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 130719744 unmapped: 14516224 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:36.889711+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 132194304 unmapped: 13041664 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:37.889911+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 132268032 unmapped: 12967936 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:38.890127+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 132268032 unmapped: 12967936 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:39.890576+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8152000/0x0/0x4ffc00000, data 0x343f555/0x3514000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 132276224 unmapped: 12959744 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1662761 data_alloc: 234881024 data_used: 33730560
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:40.890758+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 132284416 unmapped: 12951552 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:41.891003+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 132284416 unmapped: 12951552 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:42.891209+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131956736 unmapped: 13279232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:43.900793+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131956736 unmapped: 13279232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:44.901025+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131956736 unmapped: 13279232 heap: 145235968 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8138000/0x0/0x4ffc00000, data 0x3461555/0x3536000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1654897 data_alloc: 234881024 data_used: 33730560
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:45.901404+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7b000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 11.470405579s of 13.082652092s, submitted: 158
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 138895360 unmapped: 10018816 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7b000 session 0x56025c0621e0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb95000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:46.901695+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025eb95000 session 0x56025a2bdc20
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cda0c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cda0c00 session 0x56025c2225a0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22cc00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025c22cc00 session 0x5602598bd4a0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025c22c400 session 0x56025c924d20
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 15556608 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:47.902020+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 15556608 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:48.902341+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133373952 unmapped: 15540224 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:49.902591+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133382144 unmapped: 15532032 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1729517 data_alloc: 234881024 data_used: 33730560
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:50.902803+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f792c000/0x0/0x4ffc00000, data 0x3c6c5b7/0x3d42000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133382144 unmapped: 15532032 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:51.903029+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025c22c400 session 0x56025c0e6b40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 15556608 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:52.903440+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f792b000/0x0/0x4ffc00000, data 0x3c6c5da/0x3d43000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 15556608 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22cc00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:53.903683+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 15556608 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:54.903890+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133357568 unmapped: 15556608 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1729922 data_alloc: 234881024 data_used: 33742848
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:55.904093+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 132521984 unmapped: 16392192 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:56.904367+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 10.772935867s of 11.063573837s, submitted: 61
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025c22cc00 session 0x56025b97ed20
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 132898816 unmapped: 16015360 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:57.904587+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7b000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025cd7b000 session 0x560259b1ed20
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8133000/0x0/0x4ffc00000, data 0x3464578/0x353a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131457024 unmapped: 17457152 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:58.906139+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8133000/0x0/0x4ffc00000, data 0x3464578/0x353a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8133000/0x0/0x4ffc00000, data 0x3464555/0x3539000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131457024 unmapped: 17457152 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:39:59.906406+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f8133000/0x0/0x4ffc00000, data 0x3464555/0x3539000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131465216 unmapped: 17448960 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1664360 data_alloc: 234881024 data_used: 32956416
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:00.906799+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 134193152 unmapped: 14721024 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:01.907634+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f7676000/0x0/0x4ffc00000, data 0x3f1b555/0x3ff0000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 134201344 unmapped: 14712832 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:02.908005+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 134389760 unmapped: 14524416 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:03.908322+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f75cf000/0x0/0x4ffc00000, data 0x3fc2555/0x4097000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 134389760 unmapped: 14524416 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:04.908590+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 134389760 unmapped: 14524416 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1762668 data_alloc: 234881024 data_used: 33112064
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:05.909005+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 134397952 unmapped: 14516224 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:06.909602+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 9.508942604s of 10.104267120s, submitted: 143
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133242880 unmapped: 15671296 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:07.909948+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f75d1000/0x0/0x4ffc00000, data 0x3fc8555/0x409d000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133382144 unmapped: 15532032 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:08.910438+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b722c00 session 0x56025c07c960
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133390336 unmapped: 15523840 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:09.910670+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133390336 unmapped: 15523840 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:10.910893+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1760452 data_alloc: 234881024 data_used: 33099776
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f75ac000/0x0/0x4ffc00000, data 0x3fed555/0x40c2000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cda0c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133398528 unmapped: 15515648 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:11.911148+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133398528 unmapped: 15515648 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:12.911456+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:13.911735+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133398528 unmapped: 15515648 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:14.911966+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 133611520 unmapped: 15302656 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:15.912200+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131956736 unmapped: 16957440 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1758984 data_alloc: 234881024 data_used: 33935360
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:16.912529+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131956736 unmapped: 16957440 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f75ac000/0x0/0x4ffc00000, data 0x3fed555/0x40c2000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:17.912755+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131956736 unmapped: 16957440 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b722800 session 0x56025c0ec000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b723800 session 0x56025b6d0f00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:18.913034+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131915776 unmapped: 16998400 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 11.391749382s of 11.534832001s, submitted: 32
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 ms_handle_reset con 0x56025b722800 session 0x56025c0e25a0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:19.914336+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 heartbeat osd_stat(store_statfs(0x4f75ac000/0x0/0x4ffc00000, data 0x3fed555/0x40c2000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 handle_osd_map epochs [138,138], i have 137, src has [1,138]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 137 handle_osd_map epochs [138,138], i have 138, src has [1,138]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125747200 unmapped: 23166976 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:20.914603+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125747200 unmapped: 23166976 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1541034 data_alloc: 234881024 data_used: 24788992
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:21.914878+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 138 handle_osd_map epochs [138,139], i have 138, src has [1,139]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125812736 unmapped: 23101440 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 139 heartbeat osd_stat(store_statfs(0x4f8954000/0x0/0x4ffc00000, data 0x2c45c61/0x2d1a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 139 ms_handle_reset con 0x56025b722c00 session 0x56025b97e780
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 139 heartbeat osd_stat(store_statfs(0x4f8954000/0x0/0x4ffc00000, data 0x2c45c61/0x2d1a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:22.915146+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125812736 unmapped: 23101440 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:23.915408+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125812736 unmapped: 23101440 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:24.915652+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125820928 unmapped: 23093248 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:25.915969+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125820928 unmapped: 23093248 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1543156 data_alloc: 234881024 data_used: 24793088
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:26.916486+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125820928 unmapped: 23093248 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:27.916908+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125820928 unmapped: 23093248 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 139 heartbeat osd_stat(store_statfs(0x4f8954000/0x0/0x4ffc00000, data 0x2c45c61/0x2d1a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 139 heartbeat osd_stat(store_statfs(0x4f8954000/0x0/0x4ffc00000, data 0x2c45c61/0x2d1a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:28.917409+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125820928 unmapped: 23093248 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 139 heartbeat osd_stat(store_statfs(0x4f8954000/0x0/0x4ffc00000, data 0x2c45c61/0x2d1a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:29.917711+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125820928 unmapped: 23093248 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:30.918075+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125829120 unmapped: 23085056 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1543156 data_alloc: 234881024 data_used: 24793088
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _renew_subs
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 139 handle_osd_map epochs [140,140], i have 139, src has [1,140]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 12.436845779s of 12.615279198s, submitted: 41
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:31.918405+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125181952 unmapped: 23732224 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:32.918836+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125181952 unmapped: 23732224 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:33.919214+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125181952 unmapped: 23732224 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:34.919533+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125181952 unmapped: 23732224 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f894f000/0x0/0x4ffc00000, data 0x2c486c4/0x2d1e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:35.919842+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125181952 unmapped: 23732224 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1547110 data_alloc: 234881024 data_used: 24801280
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:36.920187+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125181952 unmapped: 23732224 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:37.920397+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125272064 unmapped: 23642112 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025b723c00 session 0x56025c062b40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:38.921483+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125296640 unmapped: 23617536 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025c22c400 session 0x56025d75f860
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:39.921691+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 26025984 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:40.922029+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 26025984 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1415394 data_alloc: 234881024 data_used: 21823488
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f9129000/0x0/0x4ffc00000, data 0x1dcb63f/0x1e9f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:41.922431+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 26025984 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:42.922664+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 26025984 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f9129000/0x0/0x4ffc00000, data 0x1dcb63f/0x1e9f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:43.922844+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 26025984 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:44.923031+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 26025984 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:45.923254+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f9129000/0x0/0x4ffc00000, data 0x1dcb63f/0x1e9f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 26025984 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1415394 data_alloc: 234881024 data_used: 21823488
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 14.772927284s of 14.955487251s, submitted: 47
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:46.923490+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 26001408 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:47.923889+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122929152 unmapped: 25985024 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:48.924134+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122929152 unmapped: 25985024 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:49.924479+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122929152 unmapped: 25985024 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:50.924750+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122929152 unmapped: 25985024 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1420990 data_alloc: 234881024 data_used: 22102016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f97cf000/0x0/0x4ffc00000, data 0x1dcb63f/0x1e9f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:51.924951+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025c22c400 session 0x56025b97f4a0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124108800 unmapped: 24805376 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025b722800 session 0x56025a7bda40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:52.925387+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124436480 unmapped: 24477696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f9430000/0x0/0x4ffc00000, data 0x21696a1/0x223e000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:53.925875+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124436480 unmapped: 24477696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:54.926320+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025b722c00 session 0x560259b1e960
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124436480 unmapped: 24477696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025b723800 session 0x56025d75e3c0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:55.926715+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124436480 unmapped: 24477696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1450013 data_alloc: 234881024 data_used: 22102016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025b723c00 session 0x56025d75f4a0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:56.927065+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 9.964710236s of 10.208685875s, submitted: 51
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025b723c00 session 0x56025d75fe00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:57.927305+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:58.927615+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f940c000/0x0/0x4ffc00000, data 0x218d6a1/0x2262000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:40:59.928089+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:00.928314+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f940c000/0x0/0x4ffc00000, data 0x218d6a1/0x2262000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1465806 data_alloc: 234881024 data_used: 23711744
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:01.928529+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:02.928822+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:03.929309+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f940c000/0x0/0x4ffc00000, data 0x218d6a1/0x2262000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:04.929608+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:05.929986+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1481950 data_alloc: 234881024 data_used: 25804800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:06.930496+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 10.207981110s of 10.273481369s, submitted: 18
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f940c000/0x0/0x4ffc00000, data 0x218d6a1/0x2262000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:07.930949+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124780544 unmapped: 24133632 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:08.931425+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124788736 unmapped: 24125440 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:09.931675+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124796928 unmapped: 24117248 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:10.932047+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124796928 unmapped: 24117248 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1480718 data_alloc: 234881024 data_used: 25804800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:11.932427+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124805120 unmapped: 24109056 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f940c000/0x0/0x4ffc00000, data 0x218d6a1/0x2262000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:12.932800+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124805120 unmapped: 24109056 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:13.933196+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124805120 unmapped: 24109056 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:14.933431+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124805120 unmapped: 24109056 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:15.933707+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124805120 unmapped: 24109056 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1480718 data_alloc: 234881024 data_used: 25804800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:16.934176+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124805120 unmapped: 24109056 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:17.934467+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f940c000/0x0/0x4ffc00000, data 0x218d6a1/0x2262000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 10.966763496s of 10.988698959s, submitted: 3
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025cda0c00 session 0x56025be8ef00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 124805120 unmapped: 24109056 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:18.934678+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120258560 unmapped: 28655616 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025b723800 session 0x56025c066960
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:19.934958+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:20.935156+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1340116 data_alloc: 234881024 data_used: 17596416
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f9e95000/0x0/0x4ffc00000, data 0x17046a1/0x17d9000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:21.935516+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:22.935908+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:23.936442+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:24.936735+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:25.937129+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1340116 data_alloc: 234881024 data_used: 17596416
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:26.937529+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f9e95000/0x0/0x4ffc00000, data 0x17046a1/0x17d9000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:27.937779+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:28.938089+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:29.938361+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 120242176 unmapped: 28672000 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f9e95000/0x0/0x4ffc00000, data 0x17046a1/0x17d9000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:30.938590+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 12.631653786s of 12.728900909s, submitted: 25
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122511360 unmapped: 26402816 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1399472 data_alloc: 234881024 data_used: 17620992
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:31.938838+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122560512 unmapped: 26353664 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:32.939087+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121290752 unmapped: 27623424 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f971b000/0x0/0x4ffc00000, data 0x1e7e6a1/0x1f53000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:33.939478+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:34.939694+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f971b000/0x0/0x4ffc00000, data 0x1e7e6a1/0x1f53000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:35.940052+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1411456 data_alloc: 234881024 data_used: 18411520
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:36.940461+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:37.940863+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:38.941312+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:39.941596+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:40.942126+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1411472 data_alloc: 234881024 data_used: 18411520
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f971b000/0x0/0x4ffc00000, data 0x1e7e6a1/0x1f53000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:41.942508+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f971b000/0x0/0x4ffc00000, data 0x1e7e6a1/0x1f53000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:42.942927+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121356288 unmapped: 27557888 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:43.943213+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121364480 unmapped: 27549696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:44.943639+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121364480 unmapped: 27549696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:45.943889+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121364480 unmapped: 27549696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1411472 data_alloc: 234881024 data_used: 18411520
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:46.944382+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f971b000/0x0/0x4ffc00000, data 0x1e7e6a1/0x1f53000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121364480 unmapped: 27549696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:47.944662+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121364480 unmapped: 27549696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:48.945018+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121364480 unmapped: 27549696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:49.945495+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121364480 unmapped: 27549696 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:50.945808+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121372672 unmapped: 27541504 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1411472 data_alloc: 234881024 data_used: 18411520
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:51.946128+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 121372672 unmapped: 27541504 heap: 148914176 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:52.946361+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 21.937910080s of 22.197715759s, submitted: 64
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f971b000/0x0/0x4ffc00000, data 0x1e7e6a1/0x1f53000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [1,0,0,0,1,3,1])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025c22c400 session 0x56025a2b25a0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122314752 unmapped: 34480128 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:53.946694+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122314752 unmapped: 34480128 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:54.946890+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122314752 unmapped: 34480128 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:55.947166+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b62000/0x0/0x4ffc00000, data 0x2a376a1/0x2b0c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122314752 unmapped: 34480128 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1503518 data_alloc: 234881024 data_used: 18415616
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:56.947525+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122314752 unmapped: 34480128 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22cc00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025c22cc00 session 0x560259b1e1e0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:57.947773+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025b723800 session 0x56025c062780
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122322944 unmapped: 34471936 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:58.948117+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025b723c00 session 0x56025a79ab40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025c22c400 session 0x56025a297860
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122478592 unmapped: 34316288 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:41:59.948398+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cda0c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122478592 unmapped: 34316288 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:00.948616+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122478592 unmapped: 34316288 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1508493 data_alloc: 234881024 data_used: 18415616
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:01.948861+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b3d000/0x0/0x4ffc00000, data 0x2a5b6b1/0x2b31000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb95000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122085376 unmapped: 34709504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:02.949060+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123609088 unmapped: 33185792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 10.425390244s of 10.618241310s, submitted: 27
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:03.949298+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:04.949588+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:05.949933+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1594241 data_alloc: 234881024 data_used: 30023680
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:06.950198+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:07.950367+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b3b000/0x0/0x4ffc00000, data 0x2a5c6b1/0x2b32000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:08.950626+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:09.950892+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:10.951221+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b3b000/0x0/0x4ffc00000, data 0x2a5c6b1/0x2b32000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b3b000/0x0/0x4ffc00000, data 0x2a5c6b1/0x2b32000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1594241 data_alloc: 234881024 data_used: 30023680
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:11.951485+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:12.951751+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:13.952120+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:14.952464+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:15.952787+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b3b000/0x0/0x4ffc00000, data 0x2a5c6b1/0x2b32000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1594241 data_alloc: 234881024 data_used: 30023680
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:16.953122+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b3b000/0x0/0x4ffc00000, data 0x2a5c6b1/0x2b32000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:17.953495+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:18.953714+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:19.954148+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:20.954495+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128581632 unmapped: 28213248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1594241 data_alloc: 234881024 data_used: 30023680
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:21.954695+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b3b000/0x0/0x4ffc00000, data 0x2a5c6b1/0x2b32000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128589824 unmapped: 28205056 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:22.955053+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b3b000/0x0/0x4ffc00000, data 0x2a5c6b1/0x2b32000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128589824 unmapped: 28205056 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:23.955432+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128589824 unmapped: 28205056 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:24.955735+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128589824 unmapped: 28205056 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:25.956004+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128589824 unmapped: 28205056 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1594241 data_alloc: 234881024 data_used: 30023680
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:26.956310+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128589824 unmapped: 28205056 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:27.956506+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 24.193414688s of 24.200784683s, submitted: 1
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 heartbeat osd_stat(store_statfs(0x4f8b3b000/0x0/0x4ffc00000, data 0x2a5c6b1/0x2b32000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128589824 unmapped: 28205056 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 ms_handle_reset con 0x56025c22c000 session 0x56025a229c20
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:28.956759+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128589824 unmapped: 28205056 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:29.957063+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _renew_subs
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 140 handle_osd_map epochs [141,141], i have 140, src has [1,141]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128622592 unmapped: 28172288 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:30.957507+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128622592 unmapped: 28172288 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1599701 data_alloc: 234881024 data_used: 30031872
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:31.957840+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8b37000/0x0/0x4ffc00000, data 0x2a5e23d/0x2b36000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128622592 unmapped: 28172288 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:32.958098+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128622592 unmapped: 28172288 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:33.958408+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128630784 unmapped: 28164096 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:34.958715+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 134488064 unmapped: 22306816 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:35.959046+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8b37000/0x0/0x4ffc00000, data 0x2a5e23d/0x2b36000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [0,1])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 134979584 unmapped: 21815296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1679465 data_alloc: 234881024 data_used: 30416896
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:36.959456+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 135323648 unmapped: 21471232 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:37.959703+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b728c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b728c00 session 0x56025c07dc20
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 9.719120979s of 10.028226852s, submitted: 74
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b723800 session 0x56025c07d0e0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b723c00 session 0x56025c07c5a0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22c000 session 0x56025a2b3c20
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 136052736 unmapped: 20742144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:38.959938+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22c400 session 0x56025a2b2780
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 136052736 unmapped: 20742144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b728800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b728800 session 0x56025a2b34a0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:39.960173+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b723800 session 0x56025a2b21e0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 136077312 unmapped: 20717568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:40.960409+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f7b32000/0x0/0x4ffc00000, data 0x3a5c23d/0x3b34000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 3600.1 total, 600.0 interval
                                            Cumulative writes: 9523 writes, 36K keys, 9523 commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 9523 writes, 2506 syncs, 3.80 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 2049 writes, 7169 keys, 2049 commit groups, 1.0 writes per commit group, ingest: 7.34 MB, 0.01 MB/s
                                            Interval WAL: 2049 writes, 834 syncs, 2.46 writes per sync, written: 0.01 GB, 0.01 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 136077312 unmapped: 20717568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:41.960944+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.056338
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 1736163 data_alloc: 234881024 data_used: 30232576
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b723c00 session 0x56025a7883c0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 136077312 unmapped: 20717568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:42.961370+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22c000 session 0x56025a7885a0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f7b32000/0x0/0x4ffc00000, data 0x3a5c23d/0x3b34000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22c400 session 0x5602598bcf00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 136110080 unmapped: 20684800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:43.961836+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cd7a800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025cd7a800 session 0x5602598bd4a0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 135733248 unmapped: 21061632 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:44.962040+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025eb95000 session 0x56025c062b40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025cda0c00 session 0x56025a4741e0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:45.962281+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125075456 unmapped: 31719424 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b723c00 session 0x56025b974d20
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:46.962543+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125075456 unmapped: 31719424 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1480820 data_alloc: 234881024 data_used: 18042880
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8d8d000/0x0/0x4ffc00000, data 0x24b123d/0x2589000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:47.962888+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125116416 unmapped: 31678464 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: mgrc ms_handle_reset ms_handle_reset con 0x56025c048800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: mgrc reconnect Terminating session with v2:192.168.122.100:6800/1088804496
Oct 11 03:05:44 compute-0 ceph-osd[205667]: mgrc reconnect Starting new session with [v2:192.168.122.100:6800/1088804496,v1:192.168.122.100:6801/1088804496]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: get_auth_request con 0x56025cd7a800 auth_method 0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: mgrc handle_mgr_configure stats_period=5
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:48.963323+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125353984 unmapped: 31440896 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:49.963636+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 126304256 unmapped: 30490624 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:50.964066+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 126304256 unmapped: 30490624 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8d8d000/0x0/0x4ffc00000, data 0x24b123d/0x2589000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8d8d000/0x0/0x4ffc00000, data 0x24b123d/0x2589000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 13.589024544s of 13.861538887s, submitted: 67
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b722c00 session 0x56025c07cb40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:51.964442+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b722800 session 0x56025c0e7860
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 126304256 unmapped: 30490624 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1525144 data_alloc: 234881024 data_used: 24367104
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b722c00 session 0x56025a6e1a40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:52.964692+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9bfe000/0x0/0x4ffc00000, data 0x19981db/0x1a6f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:53.964973+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:54.965292+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x5602594adc00 session 0x56025c07cf00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:55.965707+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9c22000/0x0/0x4ffc00000, data 0x19741db/0x1a4b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:56.966123+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1403522 data_alloc: 234881024 data_used: 19841024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:57.966489+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:58.966897+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:42:59.967412+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:00.967783+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9c22000/0x0/0x4ffc00000, data 0x19741db/0x1a4b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:01.968197+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1403522 data_alloc: 234881024 data_used: 19841024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:02.968541+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:03.968830+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:04.969120+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9c22000/0x0/0x4ffc00000, data 0x19741db/0x1a4b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:05.969469+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9c22000/0x0/0x4ffc00000, data 0x19741db/0x1a4b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:06.969739+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1403522 data_alloc: 234881024 data_used: 19841024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9c22000/0x0/0x4ffc00000, data 0x19741db/0x1a4b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:07.970043+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:08.970357+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:09.970594+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:10.970901+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:11.971336+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9c22000/0x0/0x4ffc00000, data 0x19741db/0x1a4b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1403522 data_alloc: 234881024 data_used: 19841024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:12.971671+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:13.971908+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:14.972168+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:15.973956+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:16.974362+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1403522 data_alloc: 234881024 data_used: 19841024
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9c22000/0x0/0x4ffc00000, data 0x19741db/0x1a4b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:17.974706+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:18.974968+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 125624320 unmapped: 31170560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 27.218790054s of 27.380781174s, submitted: 36
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:19.975199+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 126967808 unmapped: 29827072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:20.975443+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127369216 unmapped: 29425664 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:21.975825+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127369216 unmapped: 29425664 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:22.976035+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127369216 unmapped: 29425664 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:23.976402+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127369216 unmapped: 29425664 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:24.976937+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127369216 unmapped: 29425664 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:25.977254+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127369216 unmapped: 29425664 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:26.977801+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:27.978518+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:28.978848+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:29.979078+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:30.979391+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:31.979935+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:32.980660+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:33.981141+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:34.981826+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:35.982569+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:36.983354+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:37.983633+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:38.983952+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:39.984427+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:40.985016+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:41.985527+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:42.985936+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:43.986368+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:44.986841+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:45.987339+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:46.987643+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:47.987944+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:48.988421+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:49.988933+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:50.989327+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:51.989580+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:52.989996+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:53.990445+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:54.990748+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:55.991383+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:56.991884+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:57.992723+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:58.993170+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:43:59.993603+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:00.994096+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:01.994559+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:02.994836+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:03.995135+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:04.995418+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:05.995732+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:06.996151+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:07.996462+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:08.997084+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:09.997497+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:10.998089+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:11.998495+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:12.998811+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025ba3c000 session 0x56025c0ec3c0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025cda0c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:13.999323+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:14.999736+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96e3000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:16.000138+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:17.000543+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1461068 data_alloc: 234881024 data_used: 20312064
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:18.000879+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:19.001308+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127377408 unmapped: 29417472 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 60.488044739s of 60.699516296s, submitted: 57
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:20.001541+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127410176 unmapped: 29384704 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:21.001883+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127426560 unmapped: 29368320 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:22.002094+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127492096 unmapped: 29302784 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458868 data_alloc: 234881024 data_used: 20365312
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:23.002292+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:24.002703+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:25.003032+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:26.003303+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:27.003552+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:28.003861+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:29.004327+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:30.004708+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:31.004910+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:32.005319+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:33.005734+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:34.006186+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:35.006607+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:36.007036+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:37.007423+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:38.007826+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:39.008185+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:40.008529+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:41.008887+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:42.009168+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:43.009582+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:44.009916+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:45.010350+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:46.010742+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:47.011206+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127549440 unmapped: 29245440 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:48.011770+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:49.011969+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:50.012392+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:51.012581+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:52.012736+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:53.012946+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:54.013604+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:55.014014+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:56.014436+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:57.014965+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:58.015695+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:44:59.016073+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:00.016423+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:01.016758+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:02.017120+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:03.017441+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:04.017850+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:05.018305+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:06.018645+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:07.019065+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:08.019501+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:09.019900+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:10.020303+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:11.020615+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:12.020925+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:13.021347+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:14.021638+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:15.021986+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:16.022173+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:17.022566+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:18.023004+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:19.023478+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f96eb000/0x0/0x4ffc00000, data 0x1eac1db/0x1f83000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:20.023717+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:21.023938+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:22.024190+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:23.024431+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127557632 unmapped: 29237248 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1458796 data_alloc: 234881024 data_used: 20365312
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb95000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025eb95000 session 0x56025c2223c0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22c400 session 0x56025c222b40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025d73c000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025d73c000 session 0x56025c0e2d20
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025d73c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025d73c400 session 0x56025c0e3a40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:24.024656+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127565824 unmapped: 29229056 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 63.423805237s of 64.131790161s, submitted: 136
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b722c00 session 0x56025c0e21e0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22c400 session 0x56025c0e34a0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025d73c000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025d73c000 session 0x56025cc1e1e0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb95000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025eb95000 session 0x56025b97fa40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025d73c800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025d73c800 session 0x56025c0e7680
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:25.025044+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127000576 unmapped: 29794304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f966a000/0x0/0x4ffc00000, data 0x1f2c1eb/0x2004000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:26.025396+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127000576 unmapped: 29794304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:27.025879+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127000576 unmapped: 29794304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:28.026442+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f966a000/0x0/0x4ffc00000, data 0x1f2c1eb/0x2004000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127000576 unmapped: 29794304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1466955 data_alloc: 234881024 data_used: 20365312
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:29.026861+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127000576 unmapped: 29794304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b722c00 session 0x56025b96cd20
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22c400 session 0x56025b6d03c0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:30.027333+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127016960 unmapped: 29777920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025d73c000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025d73c000 session 0x56025d75f680
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025eb95000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:31.027649+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127393792 unmapped: 29401088 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025eb95000 session 0x56025b6d1c20
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025d73cc00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025d73d000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:32.027875+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127393792 unmapped: 29401088 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:33.028093+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127393792 unmapped: 29401088 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1473892 data_alloc: 234881024 data_used: 20369408
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:34.028376+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127393792 unmapped: 29401088 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:35.028572+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127401984 unmapped: 29392896 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:36.028761+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127401984 unmapped: 29392896 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:37.029026+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127401984 unmapped: 29392896 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:38.029271+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127401984 unmapped: 29392896 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1477892 data_alloc: 234881024 data_used: 20897792
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:39.029562+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127401984 unmapped: 29392896 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:40.029865+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127401984 unmapped: 29392896 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:41.030133+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127401984 unmapped: 29392896 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:42.030313+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127401984 unmapped: 29392896 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:43.030500+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127410176 unmapped: 29384704 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1477892 data_alloc: 234881024 data_used: 20897792
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:44.030733+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127410176 unmapped: 29384704 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:45.031312+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127410176 unmapped: 29384704 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:46.031643+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127410176 unmapped: 29384704 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:47.032305+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127410176 unmapped: 29384704 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:48.032687+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127410176 unmapped: 29384704 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1477892 data_alloc: 234881024 data_used: 20897792
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:49.033073+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127410176 unmapped: 29384704 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:50.033329+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:51.033657+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:52.034071+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:53.034347+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1477892 data_alloc: 234881024 data_used: 20897792
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:54.034712+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:55.035052+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:56.035428+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:57.035866+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:58.036101+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1477892 data_alloc: 234881024 data_used: 20897792
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:45:59.036467+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127418368 unmapped: 29376512 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:00.036772+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127426560 unmapped: 29368320 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:01.037009+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127426560 unmapped: 29368320 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:02.037453+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127426560 unmapped: 29368320 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:03.037723+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127426560 unmapped: 29368320 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1477892 data_alloc: 234881024 data_used: 20897792
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:04.037976+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127426560 unmapped: 29368320 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:05.038601+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127426560 unmapped: 29368320 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f963f000/0x0/0x4ffc00000, data 0x1f561fb/0x202f000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x458f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:06.038973+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 42.074275970s of 42.221923828s, submitted: 23
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127426560 unmapped: 29368320 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:07.039352+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:08.039722+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1545754 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:09.040219+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:10.040560+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:11.040868+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:12.041312+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:13.041609+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:14.041864+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:15.042053+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:16.042316+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:17.042724+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:18.042993+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:19.043432+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:20.043643+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:21.043991+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:22.044457+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:23.044681+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127664128 unmapped: 29130752 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:24.044947+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127664128 unmapped: 29130752 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:25.045366+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127664128 unmapped: 29130752 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:26.045844+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127664128 unmapped: 29130752 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 20.101493835s of 20.269266129s, submitted: 29
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:27.046630+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:28.046975+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:29.047417+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:30.047751+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:31.048003+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:32.048387+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:33.048795+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:34.049535+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:35.049857+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:36.050416+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:37.050815+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:38.051219+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:39.051765+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:40.052147+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:41.052467+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:42.052676+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:43.052909+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:44.053104+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:45.053682+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127672320 unmapped: 29122560 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:46.053888+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:47.054294+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:48.054565+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:49.054779+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:50.055205+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:51.055474+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:52.055652+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:53.055866+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:54.056143+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:55.056623+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:56.056944+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:57.057348+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127680512 unmapped: 29114368 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:58.057521+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127688704 unmapped: 29106176 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:46:59.057760+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127688704 unmapped: 29106176 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:00.058137+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127688704 unmapped: 29106176 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:01.058502+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127688704 unmapped: 29106176 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:02.058707+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:03.058971+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:04.059337+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:05.059575+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:06.059782+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:07.060127+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:08.060357+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:09.060754+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:10.060993+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:11.061201+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:12.061389+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:13.061682+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:14.061991+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:15.062455+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127696896 unmapped: 29097984 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:16.062719+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:17.063388+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:18.063727+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:19.063983+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:20.064302+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:21.064722+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:22.065053+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:23.065427+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:24.065726+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:25.066124+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127705088 unmapped: 29089792 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:26.066354+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127713280 unmapped: 29081600 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:27.066691+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127713280 unmapped: 29081600 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:28.067014+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:29.067431+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:30.067652+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:31.067917+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:32.068368+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:33.068634+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:34.068961+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:35.069390+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:36.069706+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:37.070118+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:38.070485+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:39.070851+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:40.071104+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:41.071447+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127721472 unmapped: 29073408 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:42.071839+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127729664 unmapped: 29065216 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:43.072193+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:44.072676+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127729664 unmapped: 29065216 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:45.073122+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127729664 unmapped: 29065216 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:46.073501+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127729664 unmapped: 29065216 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:47.073956+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127729664 unmapped: 29065216 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:48.074219+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127729664 unmapped: 29065216 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:49.074694+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127729664 unmapped: 29065216 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:50.074978+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127729664 unmapped: 29065216 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:51.075321+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127729664 unmapped: 29065216 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:52.075544+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127737856 unmapped: 29057024 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:53.075782+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127737856 unmapped: 29057024 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:54.076096+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127737856 unmapped: 29057024 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:55.076363+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127737856 unmapped: 29057024 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:56.076563+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127737856 unmapped: 29057024 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:57.076878+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:58.077095+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:47:59.077461+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:00.077729+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:01.078147+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:02.078429+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:03.078849+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:04.079283+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:05.079661+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:06.080054+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127746048 unmapped: 29048832 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:07.080415+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 29040640 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:08.080724+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 29040640 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:09.081140+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 29040640 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:10.081551+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 29040640 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:11.081890+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 29040640 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:12.082431+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 29040640 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:13.082862+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 29040640 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:14.083329+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 29040640 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:15.083567+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127754240 unmapped: 29040640 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:16.083864+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:17.084188+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:18.084619+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:19.084810+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550032 data_alloc: 234881024 data_used: 21028864
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:20.085130+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:21.085376+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:22.085623+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:23.085816+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:24.086340+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550512 data_alloc: 234881024 data_used: 21090304
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:25.086724+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:26.087145+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22dc00 session 0x56025c276780
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b722c00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:27.087663+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:28.087850+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:29.088038+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127762432 unmapped: 29032448 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550512 data_alloc: 234881024 data_used: 21090304
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:30.088451+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127770624 unmapped: 29024256 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:31.088835+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:32.089084+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:33.089325+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:34.089709+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550512 data_alloc: 234881024 data_used: 21090304
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:35.089955+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:36.090358+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:37.090635+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:38.090976+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:39.091174+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550512 data_alloc: 234881024 data_used: 21090304
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:40.091478+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:41.093102+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127778816 unmapped: 29016064 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:42.093664+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:43.094943+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:44.095197+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550512 data_alloc: 234881024 data_used: 21090304
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:45.095491+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:46.095771+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:47.096679+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:48.097025+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:49.097632+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550512 data_alloc: 234881024 data_used: 21090304
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:50.097866+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:51.098147+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:52.098391+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:53.098686+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:54.099005+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550512 data_alloc: 234881024 data_used: 21090304
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:55.099651+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127787008 unmapped: 29007872 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:56.099993+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127795200 unmapped: 28999680 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:57.100422+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127795200 unmapped: 28999680 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:58.100777+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127795200 unmapped: 28999680 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:48:59.101199+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1550512 data_alloc: 234881024 data_used: 21090304
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127795200 unmapped: 28999680 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:00.101615+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28991488 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:01.102067+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127803392 unmapped: 28991488 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f899a000/0x0/0x4ffc00000, data 0x27eb1fb/0x28c4000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:02.102432+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 155.622192383s of 155.630996704s, submitted: 1
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:03.102804+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:04.103006+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:05.103320+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:06.103634+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:07.104059+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:08.104420+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:09.104704+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:10.105014+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:11.105338+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:12.105594+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:13.105917+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:14.106314+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:15.106550+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:16.108660+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:17.109099+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:18.109538+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:19.109927+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:20.110338+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:21.110717+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127811584 unmapped: 28983296 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:22.111078+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:23.111407+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:24.111646+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:25.111978+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:26.112439+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:27.112759+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:28.113056+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:29.113466+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:30.113648+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:31.113969+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:32.114420+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127819776 unmapped: 28975104 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:33.115061+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127827968 unmapped: 28966912 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:34.115376+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127827968 unmapped: 28966912 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:35.115721+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127827968 unmapped: 28966912 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:36.116019+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127827968 unmapped: 28966912 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:37.116416+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127827968 unmapped: 28966912 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:38.116747+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127827968 unmapped: 28966912 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:39.122893+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127827968 unmapped: 28966912 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:40.123395+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127827968 unmapped: 28966912 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:41.123719+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127827968 unmapped: 28966912 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:42.124102+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127836160 unmapped: 28958720 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:43.124389+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127836160 unmapped: 28958720 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:44.124785+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127836160 unmapped: 28958720 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:45.125200+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127836160 unmapped: 28958720 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:46.125726+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127836160 unmapped: 28958720 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:47.126107+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127836160 unmapped: 28958720 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:48.126709+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127836160 unmapped: 28958720 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:49.127031+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:50.127607+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127844352 unmapped: 28950528 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:51.127976+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:52.128391+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:53.128960+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:54.129173+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:55.129439+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:56.129690+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:57.130209+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:58.130731+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:49:59.131081+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:00.131292+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:01.131685+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:02.132117+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:03.132335+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:04.132647+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:05.132866+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127852544 unmapped: 28942336 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:06.133308+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:07.133708+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:08.134081+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:09.134594+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:10.134862+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:11.135130+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:12.135560+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:13.135892+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:14.136156+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:15.136388+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:16.136753+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127860736 unmapped: 28934144 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:17.137341+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:18.137739+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:19.138081+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:20.138465+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:21.138692+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:22.138952+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:23.139335+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:24.139696+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:25.139910+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:26.140141+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:27.140504+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:28.140693+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:29.141036+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:30.141280+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:31.141480+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:32.141650+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:33.141856+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:34.142064+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:35.142309+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 234881024 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:36.142663+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:37.143609+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:38.144157+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127868928 unmapped: 28925952 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:39.144371+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127877120 unmapped: 28917760 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:40.144768+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:41.145041+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:42.145390+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:43.145633+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:44.145908+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:45.146391+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:46.146694+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:47.147376+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:48.148345+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:49.148749+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:50.149039+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:51.149472+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:52.149840+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:53.150705+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:54.151060+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127885312 unmapped: 28909568 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:55.151348+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:56.151620+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:57.151960+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:58.152220+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:50:59.152722+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:00.153099+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:01.153374+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:02.153710+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:03.154178+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:04.154346+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:05.154569+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1552212 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:06.155023+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:07.155446+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8998000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:08.155843+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 126.354400635s of 126.374946594s, submitted: 14
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:09.156207+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:10.156575+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:11.156957+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:12.157730+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127893504 unmapped: 28901376 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:13.158103+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:14.158433+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:15.158919+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:16.159401+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:17.159789+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:18.160203+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:19.160582+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:20.160934+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:21.161433+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:22.161768+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:23.162131+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:24.162524+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:25.162891+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:26.163181+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:27.163597+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:28.163845+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:29.164073+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127901696 unmapped: 28893184 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:30.164409+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127909888 unmapped: 28884992 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:31.164594+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:32.164934+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:33.165167+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:34.165429+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:35.165889+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:36.166337+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:37.166809+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:38.167157+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:39.167524+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:40.167999+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:41.168482+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127918080 unmapped: 28876800 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:42.168802+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:43.169045+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:44.169501+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:45.169853+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:46.170347+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:47.170692+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:48.171038+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:49.171536+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:50.171759+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:51.172139+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127926272 unmapped: 28868608 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:52.172491+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127934464 unmapped: 28860416 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:53.172861+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127934464 unmapped: 28860416 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:54.173213+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127934464 unmapped: 28860416 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:55.173514+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127934464 unmapped: 28860416 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:56.173793+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127934464 unmapped: 28860416 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:57.174157+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127934464 unmapped: 28860416 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:58.174630+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127934464 unmapped: 28860416 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:51:59.174910+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127934464 unmapped: 28860416 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:00.175339+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127942656 unmapped: 28852224 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:01.175701+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:02.175893+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:03.176703+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:04.177069+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:05.177412+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:06.177763+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:07.178079+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:08.178372+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:09.178748+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:10.179143+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:11.179656+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:12.180006+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127950848 unmapped: 28844032 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:13.180295+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:14.180620+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:15.180982+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:16.181629+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:17.182072+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:18.182399+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:19.182654+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:20.182905+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:21.183138+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:22.183594+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:23.183885+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127959040 unmapped: 28835840 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:24.184395+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127967232 unmapped: 28827648 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:25.184812+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127967232 unmapped: 28827648 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:26.185171+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127967232 unmapped: 28827648 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:27.185554+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127967232 unmapped: 28827648 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:28.185937+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127967232 unmapped: 28827648 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:29.186352+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127967232 unmapped: 28827648 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:30.187711+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127967232 unmapped: 28827648 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:31.187973+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127975424 unmapped: 28819456 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:32.188297+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127975424 unmapped: 28819456 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:33.188509+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127975424 unmapped: 28819456 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:34.194349+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127975424 unmapped: 28819456 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:35.194593+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127975424 unmapped: 28819456 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:36.194957+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127975424 unmapped: 28819456 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:37.195311+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127975424 unmapped: 28819456 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:38.195546+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127975424 unmapped: 28819456 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:39.195848+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:40.196077+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:41.196507+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 4200.1 total, 600.0 interval
                                            Cumulative writes: 10K writes, 38K keys, 10K commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 10K writes, 2784 syncs, 3.64 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 615 writes, 1777 keys, 615 commit groups, 1.0 writes per commit group, ingest: 1.43 MB, 0.00 MB/s
                                            Interval WAL: 615 writes, 278 syncs, 2.21 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:42.196813+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:43.197193+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:44.197456+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:45.197651+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:46.198122+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:47.198546+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:48.198941+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:49.199262+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:50.199694+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:51.199977+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:52.200266+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:53.200653+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:54.200933+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127983616 unmapped: 28811264 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:55.201428+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:56.201727+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:57.202075+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:58.202445+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:52:59.202685+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:00.203066+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:01.203573+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:02.203789+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:03.204135+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:04.204719+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:05.204963+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:06.205214+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:07.205694+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 127991808 unmapped: 28803072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:08.205974+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128000000 unmapped: 28794880 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:09.206336+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128000000 unmapped: 28794880 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:10.206556+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:11.206917+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:12.207199+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:13.207427+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:14.207800+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:15.208324+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:16.208777+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:17.209303+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:18.209763+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:19.210035+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:20.210330+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:21.210644+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128008192 unmapped: 28786688 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:22.211004+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:23.211510+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:24.211967+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:25.212383+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:26.212777+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:27.213332+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:28.213542+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:29.214035+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:30.214492+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:31.214681+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:32.214938+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:33.215641+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:34.216032+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:35.216410+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128016384 unmapped: 28778496 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:36.216966+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128024576 unmapped: 28770304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:37.217344+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128024576 unmapped: 28770304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:38.217569+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128024576 unmapped: 28770304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:39.217861+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128024576 unmapped: 28770304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:40.218148+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128024576 unmapped: 28770304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:41.218540+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128024576 unmapped: 28770304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:42.218843+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128024576 unmapped: 28770304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:43.219452+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128024576 unmapped: 28770304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:44.219878+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128024576 unmapped: 28770304 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:45.220161+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128032768 unmapped: 28762112 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:46.220398+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128032768 unmapped: 28762112 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:47.220844+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128032768 unmapped: 28762112 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:48.221098+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128032768 unmapped: 28762112 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:49.221492+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128032768 unmapped: 28762112 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:50.221837+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:51.222344+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:52.222827+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:53.223023+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:54.223407+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:55.223788+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:56.224135+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:57.224556+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:58.224979+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:53:59.225417+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128040960 unmapped: 28753920 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:00.226087+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:01.226379+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:02.226595+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:03.228951+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:04.229168+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:05.229362+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:06.229566+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:07.230032+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:08.230343+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:09.230869+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:10.231382+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:11.231700+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:12.232559+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:13.232950+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128049152 unmapped: 28745728 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:14.234046+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128057344 unmapped: 28737536 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:15.234413+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128057344 unmapped: 28737536 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:16.234635+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128057344 unmapped: 28737536 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:17.235145+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128057344 unmapped: 28737536 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:18.235391+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128057344 unmapped: 28737536 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:19.235846+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128057344 unmapped: 28737536 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:20.236369+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128057344 unmapped: 28737536 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:21.236706+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 192.754379272s of 192.776382446s, submitted: 3
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128073728 unmapped: 28721152 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:22.237023+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128106496 unmapped: 28688384 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:23.237290+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128163840 unmapped: 28631040 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:24.237483+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128221184 unmapped: 28573696 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:25.237835+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128221184 unmapped: 28573696 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:26.238185+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128221184 unmapped: 28573696 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:27.238632+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128221184 unmapped: 28573696 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:28.238897+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128221184 unmapped: 28573696 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:29.239328+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128221184 unmapped: 28573696 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:30.239684+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128221184 unmapped: 28573696 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:31.240094+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128221184 unmapped: 28573696 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:32.240412+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:33.240744+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:34.241216+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:35.241558+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:36.242071+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:37.242443+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:38.242819+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:39.243376+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:40.243693+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:41.244056+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:42.244459+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:43.244913+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:44.245578+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:45.246021+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:46.246537+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:47.247060+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:48.247374+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:49.247785+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:50.248058+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:51.248427+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:52.248801+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:53.249128+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:54.249444+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:55.249679+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:56.250069+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:57.250537+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:58.250881+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:54:59.251377+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:00.251677+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:01.252154+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:02.252564+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:03.252846+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128229376 unmapped: 28565504 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:04.253139+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:05.253407+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:06.253697+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:07.254143+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:08.254365+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:09.254759+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:10.255092+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:11.255599+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:12.255855+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:13.256091+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:14.256452+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:15.256931+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f8999000/0x0/0x4ffc00000, data 0x27ec1fb/0x28c5000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:16.257187+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:17.257519+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1549572 data_alloc: 218103808 data_used: 21078016
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128245760 unmapped: 28549120 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:18.257960+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128245760 unmapped: 28549120 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:19.258400+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22c000 session 0x56025bfba3c0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b723800 session 0x5602598bda40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 128237568 unmapped: 28557312 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 57.812805176s of 58.417026520s, submitted: 106
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:20.258611+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025c22c400 session 0x56025c222960
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f94d8000/0x0/0x4ffc00000, data 0x1cad1fb/0x1d86000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:21.258822+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:22.259159+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1411485 data_alloc: 218103808 data_used: 14237696
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:23.259440+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9503000/0x0/0x4ffc00000, data 0x1c831eb/0x1d5b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:24.259824+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9503000/0x0/0x4ffc00000, data 0x1c831eb/0x1d5b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:25.260318+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9503000/0x0/0x4ffc00000, data 0x1c831eb/0x1d5b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:26.260803+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:27.261477+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1411485 data_alloc: 218103808 data_used: 14237696
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:28.261724+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9503000/0x0/0x4ffc00000, data 0x1c831eb/0x1d5b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:29.262201+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9503000/0x0/0x4ffc00000, data 0x1c831eb/0x1d5b000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:30.262624+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:31.262967+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123895808 unmapped: 32899072 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 12.343958855s of 12.403041840s, submitted: 12
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025d73cc00 session 0x56025a2b25a0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:32.263423+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025d73d000 session 0x56025b9a21e0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1411129 data_alloc: 218103808 data_used: 14237696
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123904000 unmapped: 32890880 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:33.263807+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 ms_handle_reset con 0x56025b723800 session 0x56025b6a5c20
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123174912 unmapped: 33619968 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:34.264386+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123174912 unmapped: 33619968 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9e44000/0x0/0x4ffc00000, data 0x13441cb/0x141a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:35.264638+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9e44000/0x0/0x4ffc00000, data 0x13441cb/0x141a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123174912 unmapped: 33619968 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:36.264896+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123174912 unmapped: 33619968 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:37.265503+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1331102 data_alloc: 218103808 data_used: 13574144
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123174912 unmapped: 33619968 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:38.265759+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123174912 unmapped: 33619968 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:39.266068+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123174912 unmapped: 33619968 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:40.266436+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 heartbeat osd_stat(store_statfs(0x4f9e44000/0x0/0x4ffc00000, data 0x13441cb/0x141a000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123174912 unmapped: 33619968 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:41.266757+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123174912 unmapped: 33619968 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 141 handle_osd_map epochs [141,142], i have 141, src has [1,142]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 9.774773598s of 10.002117157s, submitted: 51
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:42.267031+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1334403 data_alloc: 218103808 data_used: 13578240
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 33570816 heap: 156794880 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 142 ms_handle_reset con 0x56025c22c000 session 0x56025b97e780
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:43.267459+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 142 heartbeat osd_stat(store_statfs(0x4f9e41000/0x0/0x4ffc00000, data 0x1345d8d/0x141c000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 131612672 unmapped: 33579008 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 142 handle_osd_map epochs [142,143], i have 142, src has [1,143]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:44.267826+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 143 ms_handle_reset con 0x56025c22c400 session 0x56025b6d1680
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025d73cc00
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:45.268096+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _renew_subs
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 143 handle_osd_map epochs [144,144], i have 143, src has [1,144]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 144 ms_handle_reset con 0x56025d73cc00 session 0x56025a297a40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:46.268511+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:47.269047+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1343540 data_alloc: 218103808 data_used: 13586432
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:48.269397+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:49.269809+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 144 heartbeat osd_stat(store_statfs(0x4f9e3a000/0x0/0x4ffc00000, data 0x1349507/0x1422000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:50.270012+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 144 heartbeat osd_stat(store_statfs(0x4f9e3a000/0x0/0x4ffc00000, data 0x1349507/0x1422000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:51.270615+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:52.271015+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1343540 data_alloc: 218103808 data_used: 13586432
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:53.271289+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:54.271727+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123224064 unmapped: 41967616 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 144 heartbeat osd_stat(store_statfs(0x4f9e3a000/0x0/0x4ffc00000, data 0x1349507/0x1422000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 144 handle_osd_map epochs [145,145], i have 144, src has [1,145]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 144 handle_osd_map epochs [145,145], i have 145, src has [1,145]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 12.784080505s of 12.961950302s, submitted: 25
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:55.272093+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:56.272490+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:57.272812+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345842 data_alloc: 218103808 data_used: 13586432
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:58.273106+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:55:59.273580+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:00.273963+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:01.274447+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:02.274759+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:03.275170+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:04.275562+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:05.275800+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:06.276066+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:07.276513+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:08.276986+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:09.277486+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:10.278211+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:11.278764+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:12.279028+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:13.279404+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:14.279685+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:15.279953+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:16.280433+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:17.280895+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:18.281374+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:19.281600+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:20.281859+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:21.282202+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:22.282619+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:23.282913+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:24.283298+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:25.283567+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:26.283937+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:27.284433+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:28.284682+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:29.285004+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:30.285448+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:31.285844+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:32.286201+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:33.286529+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:34.286904+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:35.287143+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:36.287521+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:37.287766+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:38.288156+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:39.288651+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:40.289037+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:41.289445+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:42.289715+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:43.290095+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:44.290521+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:45.290837+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:46.291461+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:47.292048+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:48.292307+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:49.292714+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:50.292971+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:51.293431+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:52.293729+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:53.294076+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:54.294408+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:55.294854+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:56.295428+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:57.296051+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:58.296406+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:56:59.296765+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:00.297173+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:01.297558+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:02.297998+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:03.298972+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:04.299185+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:05.299427+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:06.299758+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:07.300199+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:08.300420+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:09.300782+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:10.301024+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:11.301458+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:12.301764+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:13.301947+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:14.302138+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:15.302336+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:16.302609+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:17.302868+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:18.303114+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:19.303317+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:20.303526+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:21.303909+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:22.304107+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:23.304357+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:24.304565+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:25.304747+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:26.306393+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:27.306658+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123273216 unmapped: 41918464 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'config diff' '{prefix=config diff}'
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'config diff' '{prefix=config diff}' result is 0 bytes
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:28.306869+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'config show' '{prefix=config show}'
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'config show' '{prefix=config show}' result is 0 bytes
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123617280 unmapped: 41574400 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'counter dump' '{prefix=counter dump}'
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'counter dump' '{prefix=counter dump}' result is 0 bytes
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'counter schema' '{prefix=counter schema}'
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'counter schema' '{prefix=counter schema}' result is 0 bytes
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:29.307062+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 42008576 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:30.307376+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122552320 unmapped: 42639360 heap: 165191680 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'log dump' '{prefix=log dump}'
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:31.307615+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'log dump' '{prefix=log dump}' result is 0 bytes
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122552320 unmapped: 53682176 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'perf dump' '{prefix=perf dump}'
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'perf dump' '{prefix=perf dump}' result is 0 bytes
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'perf histogram dump' '{prefix=perf histogram dump}'
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'perf histogram dump' '{prefix=perf histogram dump}' result is 0 bytes
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'perf schema' '{prefix=perf schema}'
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'perf schema' '{prefix=perf schema}' result is 0 bytes
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:32.307968+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:33.308218+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:34.308446+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:35.308663+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:36.308945+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:37.309210+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:38.309584+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:39.309798+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:40.310008+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:41.310221+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:42.310441+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:43.310687+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:44.310887+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:45.311095+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:46.311347+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:47.311614+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:48.311877+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: mgrc ms_handle_reset ms_handle_reset con 0x56025cd7a800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: mgrc reconnect Terminating session with v2:192.168.122.100:6800/1088804496
Oct 11 03:05:44 compute-0 ceph-osd[205667]: mgrc reconnect Starting new session with [v2:192.168.122.100:6800/1088804496,v1:192.168.122.100:6801/1088804496]
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: get_auth_request con 0x56025d73d000 auth_method 0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: mgrc handle_mgr_configure stats_period=5
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:49.312105+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:50.312355+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:51.312598+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:52.313049+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:53.313634+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:54.313883+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:55.314530+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 ms_handle_reset con 0x56025b723c00 session 0x56025a2bc1e0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025b723800
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:56.314820+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:57.315168+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:58.315380+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:57:59.315594+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:00.315794+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:01.316201+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:02.316337+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:03.316574+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:04.316765+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:05.317022+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:06.317274+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:07.317645+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:08.317847+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:09.318088+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:10.318334+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:11.318849+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:12.319208+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:13.319612+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:14.320069+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:15.320338+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:16.320574+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:17.320900+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:18.321376+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:19.321792+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:20.322307+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:21.322716+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:22.322939+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:23.323421+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:24.323667+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:25.323968+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:26.324280+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:27.324540+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:28.324745+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:29.325106+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:30.325377+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:31.325597+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:32.325861+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:33.326075+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:34.326339+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:35.326716+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:36.327109+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:37.327348+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:38.327619+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:39.327822+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:40.328214+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:41.328665+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:42.329054+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:43.329446+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:44.329735+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:45.330085+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:46.330433+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:47.330812+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:48.331118+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:49.331520+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:50.331810+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:51.331999+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:52.332358+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:53.332838+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:54.333074+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:55.333429+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:56.333658+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:57.334098+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:58.334437+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:58:59.334990+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:00.335453+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:01.335889+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:02.336070+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:03.336452+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:04.336774+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:05.337028+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:06.337393+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:07.337832+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:08.338218+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:09.338650+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:10.338931+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:11.339330+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:12.339719+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:13.340118+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 ms_handle_reset con 0x56025cda0c00 session 0x56025b6a41e0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c000
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:14.340464+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:15.340695+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:16.341055+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:17.341413+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:18.341826+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:19.342115+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:20.342481+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:21.342770+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:22.343022+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:23.343320+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:24.343593+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:25.343964+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:26.344221+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:27.344771+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:28.344991+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:29.345216+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:30.345518+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:31.345743+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:32.346377+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:33.346742+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:34.347191+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:35.347582+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:36.347929+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:37.348461+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:38.348689+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:39.349076+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:40.349333+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:41.349730+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:42.350100+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122798080 unmapped: 53436416 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:43.350426+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:44.350833+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:45.351351+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:46.351647+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:47.352118+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:48.352638+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:49.353110+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:50.353400+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:51.353714+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:52.354088+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:53.354449+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:54.354911+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:55.355129+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:56.355530+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:57.355825+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:58.356033+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T02:59:59.356355+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:00.356781+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:01.357305+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:02.357573+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:03.357950+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:04.358164+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:05.358445+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:06.358829+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:07.359374+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:08.359840+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:09.360046+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:10.360471+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:11.360803+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:12.361039+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:13.361486+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122806272 unmapped: 53428224 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:14.361969+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122814464 unmapped: 53420032 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:15.362407+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122814464 unmapped: 53420032 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:16.362660+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122814464 unmapped: 53420032 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:17.363078+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122814464 unmapped: 53420032 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:18.363430+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122814464 unmapped: 53420032 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:19.363771+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122814464 unmapped: 53420032 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:20.364106+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122814464 unmapped: 53420032 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:21.364368+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122814464 unmapped: 53420032 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:22.364675+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122814464 unmapped: 53420032 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:23.365133+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122814464 unmapped: 53420032 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:24.365495+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122814464 unmapped: 53420032 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:25.365881+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122814464 unmapped: 53420032 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:26.366294+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122814464 unmapped: 53420032 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:27.366612+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122814464 unmapped: 53420032 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:28.366956+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122814464 unmapped: 53420032 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:29.367507+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122814464 unmapped: 53420032 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:30.367889+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:31.369650+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:32.370224+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:33.370771+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:34.371119+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:35.371722+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:36.372177+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:37.372656+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:38.372936+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:39.373493+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:40.373902+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:41.374178+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:42.374534+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:43.374862+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:44.375477+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:45.376020+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:46.376378+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:47.376820+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:48.377342+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:49.377841+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122822656 unmapped: 53411840 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:50.378422+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:51.378904+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:52.379303+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:53.379606+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:54.380002+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:55.380339+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:56.380564+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:57.381003+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:58.381513+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:00:59.381964+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:00.382549+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:01.382958+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:02.383514+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:03.383781+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:04.384176+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:05.384611+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:06.385169+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:07.385699+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:08.385925+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:09.386140+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:10.386488+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:11.386720+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122830848 unmapped: 53403648 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:12.386957+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122855424 unmapped: 53379072 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:13.387317+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122855424 unmapped: 53379072 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:14.387532+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122855424 unmapped: 53379072 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:15.387872+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122855424 unmapped: 53379072 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:16.388817+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122855424 unmapped: 53379072 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:17.389102+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122855424 unmapped: 53379072 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:18.389378+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122855424 unmapped: 53379072 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:19.389753+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122855424 unmapped: 53379072 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:20.390124+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122855424 unmapped: 53379072 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:21.390513+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122855424 unmapped: 53379072 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:22.390871+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122855424 unmapped: 53379072 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:23.391112+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122855424 unmapped: 53379072 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:24.399615+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122855424 unmapped: 53379072 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:25.399864+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122855424 unmapped: 53379072 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:26.400179+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:27.400540+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:28.400868+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:29.401342+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:30.401708+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:31.402073+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:32.402455+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:33.402718+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:34.403006+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:35.403402+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:36.403868+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:37.404189+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:38.404540+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:39.404935+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:40.405350+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:41.405720+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:42.406200+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:43.406710+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:44.407144+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:45.407422+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:46.407826+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:47.408332+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:48.408690+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:49.409073+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:50.409442+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:51.409961+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122863616 unmapped: 53370880 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:52.410581+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122880000 unmapped: 53354496 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:53.410921+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122880000 unmapped: 53354496 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:54.411174+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122880000 unmapped: 53354496 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:55.411555+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122880000 unmapped: 53354496 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:56.411929+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122880000 unmapped: 53354496 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:57.412597+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122880000 unmapped: 53354496 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:58.412817+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122880000 unmapped: 53354496 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:01:59.413207+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122880000 unmapped: 53354496 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:00.413565+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122880000 unmapped: 53354496 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:01.413992+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122880000 unmapped: 53354496 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:02.414354+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122880000 unmapped: 53354496 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:03.414881+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122880000 unmapped: 53354496 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:04.415171+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122880000 unmapped: 53354496 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:05.415557+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122880000 unmapped: 53354496 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:06.416014+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122880000 unmapped: 53354496 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:07.416520+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122880000 unmapped: 53354496 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:08.416741+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 53346304 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:09.417081+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 53346304 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:10.417511+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 53346304 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:11.417899+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 53346304 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:12.418193+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 53346304 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:13.418470+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 53346304 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:14.418876+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 53346304 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:15.419136+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122888192 unmapped: 53346304 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:16.419420+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122896384 unmapped: 53338112 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:17.419760+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122896384 unmapped: 53338112 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:18.420012+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122896384 unmapped: 53338112 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:19.420529+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122904576 unmapped: 53329920 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:20.421081+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122904576 unmapped: 53329920 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:21.421494+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122904576 unmapped: 53329920 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:22.421742+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122904576 unmapped: 53329920 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:23.422000+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122904576 unmapped: 53329920 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:24.422193+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122904576 unmapped: 53329920 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:25.422476+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122904576 unmapped: 53329920 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:26.422849+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122904576 unmapped: 53329920 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:27.423139+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122904576 unmapped: 53329920 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:28.423355+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122904576 unmapped: 53329920 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:29.423603+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122904576 unmapped: 53329920 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:30.424003+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122904576 unmapped: 53329920 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:31.424409+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122904576 unmapped: 53329920 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:32.424994+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122904576 unmapped: 53329920 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:33.425698+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122904576 unmapped: 53329920 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:34.426102+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122904576 unmapped: 53329920 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:35.426495+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122904576 unmapped: 53329920 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:36.426740+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122904576 unmapped: 53329920 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:37.427126+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122904576 unmapped: 53329920 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:38.427592+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 53321728 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:39.428076+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 53321728 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:40.428341+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 53321728 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:41.428595+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                            ** DB Stats **
                                            Uptime(secs): 4800.1 total, 600.0 interval
                                            Cumulative writes: 10K writes, 39K keys, 10K commit groups, 1.0 writes per commit group, ingest: 0.03 GB, 0.01 MB/s
                                            Cumulative WAL: 10K writes, 2968 syncs, 3.55 writes per sync, written: 0.03 GB, 0.01 MB/s
                                            Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                            Interval writes: 387 writes, 929 keys, 387 commit groups, 1.0 writes per commit group, ingest: 0.30 MB, 0.00 MB/s
                                            Interval WAL: 387 writes, 184 syncs, 2.10 writes per sync, written: 0.00 GB, 0.00 MB/s
                                            Interval stall: 00:00:0.000 H:M:S, 0.0 percent
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 53321728 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:42.428808+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 53321728 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:43.429197+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 53321728 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:44.429506+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 53321728 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:45.429719+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 53321728 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:46.429951+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 53321728 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:47.430337+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 53321728 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:48.430618+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 53321728 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:49.430976+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 53321728 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:50.431500+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 53321728 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:51.431814+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 53321728 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:52.432190+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 53321728 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:53.432421+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 53321728 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:54.432667+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 53321728 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:55.433030+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122912768 unmapped: 53321728 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:56.433402+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122920960 unmapped: 53313536 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:57.433828+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122920960 unmapped: 53313536 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:58.434030+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122937344 unmapped: 53297152 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:02:59.434496+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122937344 unmapped: 53297152 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:00.434658+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122937344 unmapped: 53297152 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:01.434883+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122937344 unmapped: 53297152 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:02.435172+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122937344 unmapped: 53297152 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:03.435395+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122937344 unmapped: 53297152 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:04.435851+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122937344 unmapped: 53297152 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:05.436079+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122937344 unmapped: 53297152 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:06.436443+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122937344 unmapped: 53297152 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:07.436899+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122937344 unmapped: 53297152 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:08.437518+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122937344 unmapped: 53297152 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:09.437879+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122937344 unmapped: 53297152 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:10.438405+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122937344 unmapped: 53297152 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:11.438661+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122937344 unmapped: 53297152 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:12.438866+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _send_mon_message to mon.compute-0 at v2:192.168.122.100:3300/0
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122937344 unmapped: 53297152 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:13.439562+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122945536 unmapped: 53288960 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:14.439960+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122945536 unmapped: 53288960 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:15.440439+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122945536 unmapped: 53288960 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:16.440680+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122945536 unmapped: 53288960 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:17.441128+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122945536 unmapped: 53288960 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:18.441455+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122945536 unmapped: 53288960 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:19.441716+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122945536 unmapped: 53288960 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:20.442010+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122953728 unmapped: 53280768 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:21.442470+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122953728 unmapped: 53280768 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:22.442815+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122953728 unmapped: 53280768 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:23.443163+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122953728 unmapped: 53280768 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:24.443554+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122953728 unmapped: 53280768 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:25.443875+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122953728 unmapped: 53280768 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:26.444454+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 ms_handle_reset con 0x56025b722c00 session 0x56025a79ab40
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: handle_auth_request added challenge on 0x56025c22c400
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122953728 unmapped: 53280768 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:27.444905+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122953728 unmapped: 53280768 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:28.445449+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122953728 unmapped: 53280768 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:29.445834+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122953728 unmapped: 53280768 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:30.447380+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122953728 unmapped: 53280768 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:31.448109+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122953728 unmapped: 53280768 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:32.448357+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122953728 unmapped: 53280768 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:33.448663+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122953728 unmapped: 53280768 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:34.449051+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122961920 unmapped: 53272576 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:35.449422+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122961920 unmapped: 53272576 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:36.449663+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122961920 unmapped: 53272576 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:37.450188+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122961920 unmapped: 53272576 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:38.450548+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122961920 unmapped: 53272576 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:39.450884+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122961920 unmapped: 53272576 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:40.451287+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122961920 unmapped: 53272576 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:41.451517+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122961920 unmapped: 53272576 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:42.451815+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122961920 unmapped: 53272576 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:43.452392+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122961920 unmapped: 53272576 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:44.452864+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122961920 unmapped: 53272576 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:45.453368+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122970112 unmapped: 53264384 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:46.453650+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122970112 unmapped: 53264384 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:47.454061+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122970112 unmapped: 53264384 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:48.454463+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122970112 unmapped: 53264384 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:49.454878+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122970112 unmapped: 53264384 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:50.455350+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:51.455582+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:52.455942+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:53.456345+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:54.456828+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:55.457380+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:56.457869+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:57.458509+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:58.458945+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:03:59.459301+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:00.459620+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:01.460002+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:02.460562+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:03.460790+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:04.461206+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:05.461740+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:06.462137+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:07.462607+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:08.463561+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:09.463988+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122978304 unmapped: 53256192 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:10.464326+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122986496 unmapped: 53248000 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:11.464527+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122986496 unmapped: 53248000 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:12.465530+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122986496 unmapped: 53248000 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:13.465760+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122994688 unmapped: 53239808 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:14.466347+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122994688 unmapped: 53239808 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:15.466592+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122994688 unmapped: 53239808 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:16.466892+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:17.467197+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122994688 unmapped: 53239808 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:18.467634+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122994688 unmapped: 53239808 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:19.468024+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122994688 unmapped: 53239808 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:20.468451+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122994688 unmapped: 53239808 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1346002 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:21.468741+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 122994688 unmapped: 53239808 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore(/var/lib/ceph/osd/ceph-0) _kv_sync_thread utilization: idle 506.597503662s of 506.617523193s, submitted: 9
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e38000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [1])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:22.469120+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123019264 unmapped: 53215232 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e39000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:23.469341+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123052032 unmapped: 53182464 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:24.469679+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123125760 unmapped: 53108736 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:25.469984+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e39000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345122 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:26.470337+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:27.470798+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:28.471040+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:29.471504+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:30.471880+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345122 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:31.472317+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e39000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:32.472715+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:33.473082+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:34.473460+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:35.473839+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345122 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:36.474325+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e39000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:37.474897+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:38.475370+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:39.475774+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:40.476127+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345122 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:41.476396+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:42.476882+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e39000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:43.477327+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e39000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:44.477612+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:45.477833+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345122 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:46.478085+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:47.478390+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e39000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:48.478594+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:49.478866+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:50.479297+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345122 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:51.479653+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:52.481154+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:53.481364+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e39000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:54.481647+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:55.482055+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345122 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:56.482398+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:57.482720+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:58.482963+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e39000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:04:59.483182+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:00.483572+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345122 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:01.483809+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:02.484151+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:03.484378+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:04.484557+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e39000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:05.484769+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345122 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:06.484999+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:07.485478+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e39000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:08.485789+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:09.485998+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:10.486204+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123183104 unmapped: 53051392 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e39000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Oct 11 03:05:44 compute-0 ceph-osd[205667]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Oct 11 03:05:44 compute-0 ceph-osd[205667]: bluestore.MempoolThread(0x5602581a3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1345122 data_alloc: 218103808 data_used: 13590528
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'config diff' '{prefix=config diff}'
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'config diff' '{prefix=config diff}' result is 0 bytes
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:11.486466+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'config show' '{prefix=config show}'
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'config show' '{prefix=config show}' result is 0 bytes
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'counter dump' '{prefix=counter dump}'
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'counter dump' '{prefix=counter dump}' result is 0 bytes
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'counter schema' '{prefix=counter schema}'
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'counter schema' '{prefix=counter schema}' result is 0 bytes
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123191296 unmapped: 53043200 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:12.486660+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123346944 unmapped: 52887552 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: tick
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_tickets
Oct 11 03:05:44 compute-0 ceph-osd[205667]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T03:05:13.486847+0000)
Oct 11 03:05:44 compute-0 ceph-osd[205667]: prioritycache tune_memory target: 4294967296 mapped: 123248640 unmapped: 52985856 heap: 176234496 old mem: 2845415832 new mem: 2845415832
Oct 11 03:05:44 compute-0 ceph-osd[205667]: osd.0 145 heartbeat osd_stat(store_statfs(0x4f9e39000/0x0/0x4ffc00000, data 0x134af8a/0x1425000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x499f9c6), peers [1,2] op hist [])
Oct 11 03:05:44 compute-0 ceph-osd[205667]: do_command 'log dump' '{prefix=log dump}'
Oct 11 03:05:44 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.15999 -' entity='client.admin' cmd=[{"prefix": "device ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "detail": "detail"} v 0) v1
Oct 11 03:05:45 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2681782851' entity='client.admin' cmd=[{"prefix": "df", "detail": "detail"}]: dispatch
Oct 11 03:05:45 compute-0 ceph-mon[191930]: pgmap v2648: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:45 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2742660004' entity='client.admin' cmd=[{"prefix": "config dump"}]: dispatch
Oct 11 03:05:45 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2681782851' entity='client.admin' cmd=[{"prefix": "df", "detail": "detail"}]: dispatch
Oct 11 03:05:45 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df"} v 0) v1
Oct 11 03:05:45 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2666631825' entity='client.admin' cmd=[{"prefix": "df"}]: dispatch
Oct 11 03:05:45 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2649: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:46 compute-0 nova_compute[356901]: 2025-10-11 03:05:46.073 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "fs dump"} v 0) v1
Oct 11 03:05:46 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3969184369' entity='client.admin' cmd=[{"prefix": "fs dump"}]: dispatch
Oct 11 03:05:46 compute-0 ceph-mon[191930]: from='client.15999 -' entity='client.admin' cmd=[{"prefix": "device ls", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:46 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2666631825' entity='client.admin' cmd=[{"prefix": "df"}]: dispatch
Oct 11 03:05:46 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3969184369' entity='client.admin' cmd=[{"prefix": "fs dump"}]: dispatch
Oct 11 03:05:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "fs ls"} v 0) v1
Oct 11 03:05:46 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/784954238' entity='client.admin' cmd=[{"prefix": "fs ls"}]: dispatch
Oct 11 03:05:46 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.16009 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:46 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:05:47 compute-0 ceph-mon[191930]: pgmap v2649: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:47 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/784954238' entity='client.admin' cmd=[{"prefix": "fs ls"}]: dispatch
Oct 11 03:05:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mds stat"} v 0) v1
Oct 11 03:05:47 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1422819360' entity='client.admin' cmd=[{"prefix": "mds stat"}]: dispatch
Oct 11 03:05:47 compute-0 systemd[1]: Starting Hostname Service...
Oct 11 03:05:47 compute-0 systemd[1]: Started Hostname Service.
Oct 11 03:05:47 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2650: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:47 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump"} v 0) v1
Oct 11 03:05:47 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3799854016' entity='client.admin' cmd=[{"prefix": "mon dump"}]: dispatch
Oct 11 03:05:47 compute-0 nova_compute[356901]: 2025-10-11 03:05:47.973 2 DEBUG oslo_service.periodic_task [None req-40d8b8a0-0d98-4a25-9d29-4da616e43fa9 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210
Oct 11 03:05:48 compute-0 nova_compute[356901]: 2025-10-11 03:05:48.279 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:48 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.16015 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:48 compute-0 ceph-mon[191930]: from='client.16009 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:48 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1422819360' entity='client.admin' cmd=[{"prefix": "mds stat"}]: dispatch
Oct 11 03:05:48 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3799854016' entity='client.admin' cmd=[{"prefix": "mon dump"}]: dispatch
Oct 11 03:05:48 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd blocklist ls"} v 0) v1
Oct 11 03:05:48 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1841295084' entity='client.admin' cmd=[{"prefix": "osd blocklist ls"}]: dispatch
Oct 11 03:05:49 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.16019 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:49 compute-0 podman[502603]: 2025-10-11 03:05:49.19075259 +0000 UTC m=+0.081045496 container health_status 7ab5da42c653c03f032ee1c844ed4635fd8ac494431f1d84b580a7c3398b99ce (image=quay.io/prometheus/node-exporter:v1.5.0, name=node_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_data={'image': 'quay.io/prometheus/node-exporter:v1.5.0', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9100:9100'], 'command': ['--web.config.file=/etc/node_exporter/node_exporter.yaml', '--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl'], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck node_exporter', 'mount': '/var/lib/openstack/healthchecks/node_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/node_exporter.yaml:/etc/node_exporter/node_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/node_exporter/tls:z', '/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=edpm, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Oct 11 03:05:49 compute-0 podman[502600]: 2025-10-11 03:05:49.220800238 +0000 UTC m=+0.115073444 container health_status 47cfb34da43d90c74b596cf80e443f8f29f5efdc75c73d9ef1586457289ff35c (image=quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified, name=ceilometer_agent_ipmi, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_id=edpm, container_name=ceilometer_agent_ipmi, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified', 'user': 'ceilometer', 'restart': 'always', 'command': 'kolla_start', 'security_opt': 'label:type:ceilometer_polling_t', 'privileged': 'true', 'net': 'host', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'test': '/openstack/healthcheck ipmi', 'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi'}, 'volumes': ['/var/lib/openstack/config/telemetry-power-monitoring:/var/lib/openstack/config/:z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:z', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry-power-monitoring/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/config/telemetry-power-monitoring/ceilometer_prom_exporter.yaml:/etc/ceilometer/ceilometer_prom_exporter.yaml:z', '/var/lib/openstack/certs/telemetry-power-monitoring/default:/etc/ceilometer/tls:z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_ipmi:/openstack:ro,z']})
Oct 11 03:05:49 compute-0 podman[502602]: 2025-10-11 03:05:49.222213039 +0000 UTC m=+0.116801220 container health_status 6beb01b4376c87bfe8b8fe0c0b793e27775de3bcd0dd2b56099bec6ef443e27c (image=quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified, name=openstack_network_exporter, health_status=healthy, health_failing_streak=0, health_log=, vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=1755695350, io.openshift.tags=minimal rhel9, name=ubi9-minimal, vcs-ref=f4b088292653bbf5ca8188a5e59ffd06a8671d4b, version=9.6, architecture=x86_64, config_data={'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified', 'restart': 'always', 'recreate': True, 'privileged': True, 'ports': ['9105:9105'], 'command': [], 'net': 'host', 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml'}, 'healthcheck': {'test': '/openstack/healthcheck openstack-netwo', 'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/openstack_network_exporter/tls:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, config_id=edpm, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., managed_by=edpm_ansible, com.redhat.component=ubi9-minimal-container, container_name=openstack_network_exporter, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, io.buildah.version=1.33.7, io.openshift.expose-services=, url=https://catalog.redhat.com/en/search?searchType=containers, maintainer=Red Hat, Inc., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., build-date=2025-08-20T13:12:41, distribution-scope=public)
Oct 11 03:05:49 compute-0 ceph-mon[191930]: pgmap v2650: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:49 compute-0 ceph-mon[191930]: from='client.16015 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:49 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1841295084' entity='client.admin' cmd=[{"prefix": "osd blocklist ls"}]: dispatch
Oct 11 03:05:49 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.16021 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:49 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2651: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd dump"} v 0) v1
Oct 11 03:05:50 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1683851121' entity='client.admin' cmd=[{"prefix": "osd dump"}]: dispatch
Oct 11 03:05:50 compute-0 ceph-mon[191930]: from='client.16019 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:50 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1683851121' entity='client.admin' cmd=[{"prefix": "osd dump"}]: dispatch
Oct 11 03:05:50 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd numa-status"} v 0) v1
Oct 11 03:05:50 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3339135919' entity='client.admin' cmd=[{"prefix": "osd numa-status"}]: dispatch
Oct 11 03:05:50 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.16027 -' entity='client.admin' cmd=[{"prefix": "osd perf", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:51 compute-0 nova_compute[356901]: 2025-10-11 03:05:51.077 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.16029 -' entity='client.admin' cmd=[{"prefix": "osd pool autoscale-status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0009191400908380543 of space, bias 1.0, pg target 0.2757420272514163 quantized to 32 (current 32)
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 03:05:51 compute-0 ceph-mon[191930]: from='client.16021 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:51 compute-0 ceph-mon[191930]: pgmap v2651: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:51 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3339135919' entity='client.admin' cmd=[{"prefix": "osd numa-status"}]: dispatch
Oct 11 03:05:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool ls", "detail": "detail"} v 0) v1
Oct 11 03:05:51 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/423493374' entity='client.admin' cmd=[{"prefix": "osd pool ls", "detail": "detail"}]: dispatch
Oct 11 03:05:51 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2652: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:51 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:05:52 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd stat"} v 0) v1
Oct 11 03:05:52 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/788610639' entity='client.admin' cmd=[{"prefix": "osd stat"}]: dispatch
Oct 11 03:05:52 compute-0 ceph-mon[191930]: from='client.16027 -' entity='client.admin' cmd=[{"prefix": "osd perf", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:52 compute-0 ceph-mon[191930]: from='client.16029 -' entity='client.admin' cmd=[{"prefix": "osd pool autoscale-status", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:52 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/423493374' entity='client.admin' cmd=[{"prefix": "osd pool ls", "detail": "detail"}]: dispatch
Oct 11 03:05:52 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/788610639' entity='client.admin' cmd=[{"prefix": "osd stat"}]: dispatch
Oct 11 03:05:52 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.16035 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:52 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.16037 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:53 compute-0 nova_compute[356901]: 2025-10-11 03:05:53.281 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:53 compute-0 ceph-mon[191930]: pgmap v2652: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status"} v 0) v1
Oct 11 03:05:53 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/878622323' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch
Oct 11 03:05:53 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2653: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:53 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "time-sync-status"} v 0) v1
Oct 11 03:05:53 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3502259810' entity='client.admin' cmd=[{"prefix": "time-sync-status"}]: dispatch
Oct 11 03:05:54 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "config dump", "format": "json-pretty"} v 0) v1
Oct 11 03:05:54 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2430839446' entity='client.admin' cmd=[{"prefix": "config dump", "format": "json-pretty"}]: dispatch
Oct 11 03:05:54 compute-0 ceph-mon[191930]: from='client.16035 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:54 compute-0 ceph-mon[191930]: from='client.16037 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""]}]: dispatch
Oct 11 03:05:54 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/878622323' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch
Oct 11 03:05:54 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3502259810' entity='client.admin' cmd=[{"prefix": "time-sync-status"}]: dispatch
Oct 11 03:05:54 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/2430839446' entity='client.admin' cmd=[{"prefix": "config dump", "format": "json-pretty"}]: dispatch
Oct 11 03:05:54 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.16045 -' entity='client.admin' cmd=[{"prefix": "device ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:05:54.906 286362 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404
Oct 11 03:05:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:05:54.907 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409
Oct 11 03:05:54 compute-0 ovn_metadata_agent[286344]: 2025-10-11 03:05:54.908 286362 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423
Oct 11 03:05:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "detail": "detail", "format": "json-pretty"} v 0) v1
Oct 11 03:05:55 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/127555434' entity='client.admin' cmd=[{"prefix": "df", "detail": "detail", "format": "json-pretty"}]: dispatch
Oct 11 03:05:55 compute-0 ceph-mon[191930]: pgmap v2653: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:55 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/127555434' entity='client.admin' cmd=[{"prefix": "df", "detail": "detail", "format": "json-pretty"}]: dispatch
Oct 11 03:05:55 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "df", "format": "json-pretty"} v 0) v1
Oct 11 03:05:55 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3935332007' entity='client.admin' cmd=[{"prefix": "df", "format": "json-pretty"}]: dispatch
Oct 11 03:05:55 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2654: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:56 compute-0 nova_compute[356901]: 2025-10-11 03:05:56.080 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "fs dump", "format": "json-pretty"} v 0) v1
Oct 11 03:05:56 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1095071775' entity='client.admin' cmd=[{"prefix": "fs dump", "format": "json-pretty"}]: dispatch
Oct 11 03:05:56 compute-0 ceph-mon[191930]: from='client.16045 -' entity='client.admin' cmd=[{"prefix": "device ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:56 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3935332007' entity='client.admin' cmd=[{"prefix": "df", "format": "json-pretty"}]: dispatch
Oct 11 03:05:56 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1095071775' entity='client.admin' cmd=[{"prefix": "fs dump", "format": "json-pretty"}]: dispatch
Oct 11 03:05:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "fs ls", "format": "json-pretty"} v 0) v1
Oct 11 03:05:56 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/39604074' entity='client.admin' cmd=[{"prefix": "fs ls", "format": "json-pretty"}]: dispatch
Oct 11 03:05:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:05:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:05:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:05:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:05:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] scanning for idle connections..
Oct 11 03:05:56 compute-0 ceph-mgr[192233]: [volumes INFO mgr_util] cleaning up connections: []
Oct 11 03:05:56 compute-0 ovs-appctl[503883]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Oct 11 03:05:56 compute-0 ovs-appctl[503898]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Oct 11 03:05:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Optimize plan auto_2025-10-11_03:05:56
Oct 11 03:05:56 compute-0 ceph-mgr[192233]: [balancer INFO root] Mode upmap, max misplaced 0.050000
Oct 11 03:05:56 compute-0 ceph-mgr[192233]: [balancer INFO root] do_upmap
Oct 11 03:05:56 compute-0 ceph-mgr[192233]: [balancer INFO root] pools ['.mgr', 'cephfs.cephfs.data', 'default.rgw.control', 'vms', 'default.rgw.meta', 'backups', 'default.rgw.log', 'volumes', 'images', '.rgw.root', 'cephfs.cephfs.meta']
Oct 11 03:05:56 compute-0 ceph-mgr[192233]: [balancer INFO root] prepared 0/10 changes
Oct 11 03:05:56 compute-0 ovs-appctl[503912]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Oct 11 03:05:56 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.16055 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:56 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:05:56 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_write.cc:2098] [default] New memtable created with log file: #129. Immutable memtables: 0.
Oct 11 03:05:56 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:05:56.998089) [db/db_impl/db_impl_compaction_flush.cc:2832] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
Oct 11 03:05:56 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:856] [default] [JOB 77] Flushing memtable with next log file: 129
Oct 11 03:05:56 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151956998158, "job": 77, "event": "flush_started", "num_memtables": 1, "num_entries": 1505, "num_deletes": 250, "total_data_size": 2162064, "memory_usage": 2204440, "flush_reason": "Manual Compaction"}
Oct 11 03:05:56 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:885] [default] [JOB 77] Level-0 flush table #130: started
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151957009524, "cf_name": "default", "job": 77, "event": "table_file_creation", "file_number": 130, "file_size": 1301253, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 53317, "largest_seqno": 54821, "table_properties": {"data_size": 1295744, "index_size": 2584, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1861, "raw_key_size": 15436, "raw_average_key_size": 21, "raw_value_size": 1283318, "raw_average_value_size": 1779, "num_data_blocks": 117, "num_entries": 721, "num_filter_entries": 721, "num_deletions": 250, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760151820, "oldest_key_time": 1760151820, "file_creation_time": 1760151956, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 130, "seqno_to_time_mapping": "N/A"}}
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: [db/flush_job.cc:1019] [default] [JOB 77] Flush lasted 11515 microseconds, and 5916 cpu microseconds.
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:05:57.009605) [db/flush_job.cc:967] [default] [JOB 77] Level-0 flush table #130: 1301253 bytes OK
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:05:57.009626) [db/memtable_list.cc:519] [default] Level-0 commit table #130 started
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:05:57.011757) [db/memtable_list.cc:722] [default] Level-0 commit table #130: memtable #1 done
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:05:57.011771) EVENT_LOG_v1 {"time_micros": 1760151957011767, "job": 77, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:05:57.011792) [db/db_impl/db_impl_compaction_flush.cc:299] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: [db/db_impl/db_impl_files.cc:463] [JOB 77] Try to delete WAL files size 2155215, prev total WAL file size 2155215, number of live WAL files 2.
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000126.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:05:57.012589) [db/db_impl/db_impl_compaction_flush.cc:3165] [default] Manual compaction from level-0 to level-6 from '6D6772737461740032323534' seq:72057594037927935, type:22 .. '6D6772737461740032353035' seq:0, type:0; will stop at (end)
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1995] [default] [JOB 78] Compacting 1@0 + 1@6 files to L6, score -1.00
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:2001] [default]: Compaction start summary: Base version 77 Base level 0, inputs: [130(1270KB)], [128(8894KB)]
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151957012638, "job": 78, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [130], "files_L6": [128], "score": -1, "input_data_size": 10409096, "oldest_snapshot_seqno": -1}
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: [db/compaction/compaction_job.cc:1588] [default] [JOB 78] Generated table #131: 6930 keys, 7937327 bytes, temperature: kUnknown
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151957054790, "cf_name": "default", "job": 78, "event": "table_file_creation", "file_number": 131, "file_size": 7937327, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 0, "largest_seqno": 0, "table_properties": {"data_size": 7896011, "index_size": 22899, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 17349, "raw_key_size": 181017, "raw_average_key_size": 26, "raw_value_size": 7775674, "raw_average_value_size": 1122, "num_data_blocks": 908, "num_entries": 6930, "num_filter_entries": 6930, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1760146865, "oldest_key_time": 0, "file_creation_time": 1760151957, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "dcdf6145-9d9a-452f-b56e-35ebdfe48072", "db_session_id": "7YDO48KWFK8QJTXVXJNU", "orig_file_number": 131, "seqno_to_time_mapping": "N/A"}}
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed.
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:05:57.055082) [db/compaction/compaction_job.cc:1663] [default] [JOB 78] Compacted 1@0 + 1@6 files to L6 => 7937327 bytes
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:05:57.057077) [db/compaction/compaction_job.cc:865] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 246.4 rd, 187.9 wr, level 6, files in(1, 1) out(1 +0 blob) MB in(1.2, 8.7 +0.0 blob) out(7.6 +0.0 blob), read-write-amplify(14.1) write-amplify(6.1) OK, records in: 7379, records dropped: 449 output_compression: NoCompression
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:05:57.057094) EVENT_LOG_v1 {"time_micros": 1760151957057084, "job": 78, "event": "compaction_finished", "compaction_time_micros": 42239, "compaction_time_cpu_micros": 23304, "output_level": 6, "num_output_files": 1, "total_output_size": 7937327, "num_input_records": 7379, "num_output_records": 6930, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000130.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151957057519, "job": 78, "event": "table_file_deletion", "file_number": 130}
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-compute-0/store.db/000128.sst immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: EVENT_LOG_v1 {"time_micros": 1760151957060163, "job": 78, "event": "table_file_deletion", "file_number": 128}
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:05:57.012483) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:05:57.060320) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:05:57.060328) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:05:57.060330) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:05:57.060331) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 03:05:57 compute-0 ceph-mon[191930]: rocksdb: (Original Log Time 2025/10/11-03:05:57.060333) [db/db_impl/db_impl_compaction_flush.cc:1903] [default] Manual compaction starting
Oct 11 03:05:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mds stat", "format": "json-pretty"} v 0) v1
Oct 11 03:05:57 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/4131878606' entity='client.admin' cmd=[{"prefix": "mds stat", "format": "json-pretty"}]: dispatch
Oct 11 03:05:57 compute-0 ceph-mon[191930]: pgmap v2654: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:57 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/39604074' entity='client.admin' cmd=[{"prefix": "fs ls", "format": "json-pretty"}]: dispatch
Oct 11 03:05:57 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/4131878606' entity='client.admin' cmd=[{"prefix": "mds stat", "format": "json-pretty"}]: dispatch
Oct 11 03:05:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
Oct 11 03:05:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 03:05:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Oct 11 03:05:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: vms, start_after=
Oct 11 03:05:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 03:05:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: volumes, start_after=
Oct 11 03:05:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 03:05:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: backups, start_after=
Oct 11 03:05:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 03:05:57 compute-0 ceph-mgr[192233]: [rbd_support INFO root] load_schedules: images, start_after=
Oct 11 03:05:57 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2655: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:57 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json-pretty"} v 0) v1
Oct 11 03:05:57 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1751613708' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json-pretty"}]: dispatch
Oct 11 03:05:58 compute-0 nova_compute[356901]: 2025-10-11 03:05:58.285 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:05:58 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.16061 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:58 compute-0 ceph-mon[191930]: from='client.16055 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:58 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1751613708' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json-pretty"}]: dispatch
Oct 11 03:05:58 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd blocklist ls", "format": "json-pretty"} v 0) v1
Oct 11 03:05:58 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3287370429' entity='client.admin' cmd=[{"prefix": "osd blocklist ls", "format": "json-pretty"}]: dispatch
Oct 11 03:05:59 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.16065 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:59 compute-0 ceph-mon[191930]: pgmap v2655: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:59 compute-0 ceph-mon[191930]: from='client.16061 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:59 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/3287370429' entity='client.admin' cmd=[{"prefix": "osd blocklist ls", "format": "json-pretty"}]: dispatch
Oct 11 03:05:59 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.16067 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:05:59 compute-0 podman[157119]: time="2025-10-11T03:05:59Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Oct 11 03:05:59 compute-0 podman[157119]: @ - - [11/Oct/2025:03:05:59 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 46268 "" "Go-http-client/1.1"
Oct 11 03:05:59 compute-0 podman[157119]: @ - - [11/Oct/2025:03:05:59 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 9111 "" "Go-http-client/1.1"
Oct 11 03:05:59 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2656: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:05:59 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd dump", "format": "json-pretty"} v 0) v1
Oct 11 03:05:59 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1501115461' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json-pretty"}]: dispatch
Oct 11 03:06:00 compute-0 podman[504552]: 2025-10-11 03:06:00.217380323 +0000 UTC m=+0.115383920 container health_status ffd3ae2f504a16fb2d0cf5b81102149bbd001edf68a653ca2ef2e828330b8603 (image=quay.io/sustainable_computing_io/kepler:release-0.7.12, name=kepler, health_status=healthy, health_failing_streak=0, health_log=, release-0.7.12=, io.openshift.expose-services=, io.openshift.tags=base rhel9, architecture=x86_64, managed_by=edpm_ansible, container_name=kepler, summary=Provides the latest release of Red Hat Universal Base Image 9., vcs-type=git, version=9.4, config_id=edpm, build-date=2024-09-18T21:23:30, io.buildah.version=1.29.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi9-container, description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, maintainer=Red Hat, Inc., release=1214.1726694543, vendor=Red Hat, Inc., config_data={'image': 'quay.io/sustainable_computing_io/kepler:release-0.7.12', 'privileged': 'true', 'restart': 'always', 'ports': ['8888:8888'], 'net': 'host', 'command': '-v=2', 'recreate': True, 'environment': {'ENABLE_GPU': 'true', 'EXPOSE_CONTAINER_METRICS': 'true', 'ENABLE_PROCESS_METRICS': 'true', 'EXPOSE_VM_METRICS': 'true', 'EXPOSE_ESTIMATED_IDLE_POWER_METRICS': 'false', 'LIBVIRT_METADATA_URI': 'http://openstack.org/xmlns/libvirt/nova/1.1'}, 'healthcheck': {'test': '/openstack/healthcheck kepler', 'mount': '/var/lib/openstack/healthchecks/kepler'}, 'volumes': ['/lib/modules:/lib/modules:ro', '/run/libvirt:/run/libvirt:shared,ro', '/sys:/sys', '/proc:/proc', '/var/lib/openstack/healthchecks/kepler:/openstack:ro,z']}, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 9, vcs-ref=e309397d02fc53f7fa99db1371b8700eb49f268f, name=ubi9, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi9/images/9.4-1214.1726694543)
Oct 11 03:06:00 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd numa-status", "format": "json-pretty"} v 0) v1
Oct 11 03:06:00 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1010757162' entity='client.admin' cmd=[{"prefix": "osd numa-status", "format": "json-pretty"}]: dispatch
Oct 11 03:06:00 compute-0 ceph-mon[191930]: from='client.16065 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:06:00 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1501115461' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json-pretty"}]: dispatch
Oct 11 03:06:00 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1010757162' entity='client.admin' cmd=[{"prefix": "osd numa-status", "format": "json-pretty"}]: dispatch
Oct 11 03:06:00 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.16073 -' entity='client.admin' cmd=[{"prefix": "osd perf", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:06:01 compute-0 nova_compute[356901]: 2025-10-11 03:06:01.081 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.16075 -' entity='client.admin' cmd=[{"prefix": "osd pool autoscale-status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.mgr' root_id -1 using 7.185749983720779e-06 of space, bias 1.0, pg target 0.0021557249951162337 quantized to 1 (current 1)
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'vms' root_id -1 using 0.0005513950275118838 of space, bias 1.0, pg target 0.16541850825356513 quantized to 32 (current 32)
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'volumes' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'backups' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'images' root_id -1 using 0.0009191400908380543 of space, bias 1.0, pg target 0.2757420272514163 quantized to 32 (current 32)
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.meta' root_id -1 using 5.087256625643029e-07 of space, bias 4.0, pg target 0.0006104707950771635 quantized to 16 (current 32)
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 03:06:01 compute-0 openstack_network_exporter[374316]: ERROR   03:06:01 appctl.go:131: Failed to prepare call to ovsdb-server: no control socket files found for the ovs db server
Oct 11 03:06:01 compute-0 openstack_network_exporter[374316]: ERROR   03:06:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:06:01 compute-0 openstack_network_exporter[374316]: ERROR   03:06:01 appctl.go:144: Failed to get PID for ovn-northd: no control socket files found for ovn-northd
Oct 11 03:06:01 compute-0 openstack_network_exporter[374316]: ERROR   03:06:01 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Oct 11 03:06:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:06:01 compute-0 openstack_network_exporter[374316]: ERROR   03:06:01 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Oct 11 03:06:01 compute-0 openstack_network_exporter[374316]: 
Oct 11 03:06:01 compute-0 ceph-mon[191930]: from='client.16067 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:06:01 compute-0 ceph-mon[191930]: pgmap v2656: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:06:01 compute-0 ceph-mon[191930]: from='client.16073 -' entity='client.admin' cmd=[{"prefix": "osd perf", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:06:01 compute-0 ceph-mon[191930]: from='client.16075 -' entity='client.admin' cmd=[{"prefix": "osd pool autoscale-status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:06:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool ls", "detail": "detail", "format": "json-pretty"} v 0) v1
Oct 11 03:06:01 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1433278196' entity='client.admin' cmd=[{"prefix": "osd pool ls", "detail": "detail", "format": "json-pretty"}]: dispatch
Oct 11 03:06:01 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2657: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:06:01 compute-0 ceph-mon[191930]: mon.compute-0@0(leader).osd e145 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 03:06:02 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd stat", "format": "json-pretty"} v 0) v1
Oct 11 03:06:02 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/782301090' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json-pretty"}]: dispatch
Oct 11 03:06:02 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.16081 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:06:02 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1433278196' entity='client.admin' cmd=[{"prefix": "osd pool ls", "detail": "detail", "format": "json-pretty"}]: dispatch
Oct 11 03:06:02 compute-0 ceph-mon[191930]: pgmap v2657: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:06:02 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/782301090' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json-pretty"}]: dispatch
Oct 11 03:06:02 compute-0 ceph-mon[191930]: from='client.16081 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:06:02 compute-0 ceph-mgr[192233]: log_channel(audit) log [DBG] : from='client.16083 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:06:03 compute-0 nova_compute[356901]: 2025-10-11 03:06:03.288 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263
Oct 11 03:06:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status", "format": "json-pretty"} v 0) v1
Oct 11 03:06:03 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1702855748' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 03:06:03 compute-0 ceph-mon[191930]: from='client.16083 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 03:06:03 compute-0 ceph-mon[191930]: from='client.? 192.168.122.100:0/1702855748' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 03:06:03 compute-0 ceph-mgr[192233]: log_channel(cluster) log [DBG] : pgmap v2658: 321 pgs: 321 active+clean; 118 MiB data, 313 MiB used, 60 GiB / 60 GiB avail
Oct 11 03:06:03 compute-0 ceph-mon[191930]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "time-sync-status", "format": "json-pretty"} v 0) v1
Oct 11 03:06:03 compute-0 ceph-mon[191930]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1211979281' entity='client.admin' cmd=[{"prefix": "time-sync-status", "format": "json-pretty"}]: dispatch
Oct 11 03:06:04 compute-0 podman[505620]: 2025-10-11 03:06:04.214729978 +0000 UTC m=+0.098865217 container health_status 31dece240beccf800a742e4debe6f80897b0548550324adc2e4a84bcd3b99028 (image=quay.io/navidys/prometheus-podman-exporter:v1.10.1, name=podman_exporter, health_status=healthy, health_failing_streak=0, health_log=, config_id=edpm, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'image': 'quay.io/navidys/prometheus-podman-exporter:v1.10.1', 'restart': 'always', 'recreate': True, 'user': 'root', 'privileged': True, 'ports': ['9882:9882'], 'net': 'host', 'command': ['--web.config.file=/etc/podman_exporter/podman_exporter.yaml'], 'environment': {'OS_ENDPOINT_TYPE': 'internal', 'CONTAINER_HOST': 'unix:///run/podman/podman.sock'}, 'healthcheck': {'test': '/openstack/healthcheck podman_exporter', 'mount': '/var/lib/openstack/healthchecks/podman_exporter'}, 'volumes': ['/var/lib/openstack/config/telemetry/podman_exporter.yaml:/etc/podman_exporter/podman_exporter.yaml:z', '/var/lib/openstack/certs/telemetry/default:/etc/podman_exporter/tls:z', '/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Oct 11 03:06:04 compute-0 podman[505626]: 2025-10-11 03:06:04.236880221 +0000 UTC m=+0.112242143 container health_status c8d2618d6efbdb5c0425c33fe90da8b81a121da7edb3ddbb5093eaae21995ed3 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent)
